summaryrefslogtreecommitdiffstats
path: root/library/alloc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
commit5363f350887b1e5b5dd21a86f88c8af9d7fea6da (patch)
tree35ca005eb6e0e9a1ba3bb5dbc033209ad445dc17 /library/alloc
parentAdding debian version 1.66.0+dfsg1-1. (diff)
downloadrustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.tar.xz
rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc')
-rw-r--r--library/alloc/benches/lib.rs2
-rw-r--r--library/alloc/benches/str.rs65
-rw-r--r--library/alloc/src/alloc.rs23
-rw-r--r--library/alloc/src/alloc/tests.rs1
-rw-r--r--library/alloc/src/boxed.rs33
-rw-r--r--library/alloc/src/collections/btree/map.rs4
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs1
-rw-r--r--library/alloc/src/collections/mod.rs2
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs193
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs178
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs149
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs1283
-rw-r--r--library/alloc/src/collections/vec_deque/pair_slices.rs67
-rw-r--r--library/alloc/src/collections/vec_deque/ring_slices.rs56
-rw-r--r--library/alloc/src/collections/vec_deque/spec_extend.rs81
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs250
-rw-r--r--library/alloc/src/lib.rs3
-rw-r--r--library/alloc/src/rc.rs66
-rw-r--r--library/alloc/src/slice.rs2
-rw-r--r--library/alloc/src/string.rs4
-rw-r--r--library/alloc/src/sync.rs70
-rw-r--r--library/alloc/src/vec/mod.rs67
-rw-r--r--library/alloc/src/vec/set_len_on_drop.rs5
-rw-r--r--library/alloc/src/vec/spec_extend.rs34
-rw-r--r--library/alloc/tests/boxed.rs41
-rw-r--r--library/alloc/tests/fmt.rs13
-rw-r--r--library/alloc/tests/lib.rs2
-rw-r--r--library/alloc/tests/str.rs38
-rw-r--r--library/alloc/tests/vec.rs3
-rw-r--r--library/alloc/tests/vec_deque.rs21
30 files changed, 1322 insertions, 1435 deletions
diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs
index d418965cd..b25d63d83 100644
--- a/library/alloc/benches/lib.rs
+++ b/library/alloc/benches/lib.rs
@@ -5,7 +5,9 @@
#![feature(iter_next_chunk)]
#![feature(repr_simd)]
#![feature(slice_partition_dedup)]
+#![feature(strict_provenance)]
#![feature(test)]
+#![deny(fuzzy_provenance_casts)]
extern crate test;
diff --git a/library/alloc/benches/str.rs b/library/alloc/benches/str.rs
index 391475bc0..54af389de 100644
--- a/library/alloc/benches/str.rs
+++ b/library/alloc/benches/str.rs
@@ -1,3 +1,4 @@
+use core::iter::Iterator;
use test::{black_box, Bencher};
#[bench]
@@ -122,14 +123,13 @@ fn bench_contains_short_short(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "sit";
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(haystack.contains(needle));
+ assert!(black_box(haystack).contains(black_box(needle)));
})
}
-#[bench]
-fn bench_contains_short_long(b: &mut Bencher) {
- let haystack = "\
+static LONG_HAYSTACK: &str = "\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
@@ -164,10 +164,48 @@ feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, i
vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
malesuada sollicitudin quam eu fermentum.";
+
+#[bench]
+fn bench_contains_2b_repeated_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "::";
+
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_short_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
let needle = "english";
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_16b_in_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "english language";
+
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_32b_in_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "the english language sample text";
+
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(!haystack.contains(needle));
+ assert!(!black_box(haystack).contains(black_box(needle)));
})
}
@@ -176,8 +214,20 @@ fn bench_contains_bad_naive(b: &mut Bencher) {
let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let needle = "aaaaaaaab";
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_bad_simd(b: &mut Bencher) {
+ let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ let needle = "aaabaaaa";
+
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(!haystack.contains(needle));
+ assert!(!black_box(haystack).contains(black_box(needle)));
})
}
@@ -186,8 +236,9 @@ fn bench_contains_equal(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(haystack.contains(needle));
+ assert!(black_box(haystack).contains(black_box(needle)));
})
}
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 8187517cc..e5fbfc557 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -28,20 +28,16 @@ extern "Rust" {
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_deallocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_reallocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_zeroed]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
@@ -402,19 +398,18 @@ pub use std::alloc::handle_alloc_error;
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
- use crate::alloc::Layout;
-
- // called via generated `__rust_alloc_error_handler`
-
- // if there is no `#[alloc_error_handler]`
+ // called via generated `__rust_alloc_error_handler` if there is no
+ // `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
panic!("memory allocation of {size} bytes failed")
}
- // if there is an `#[alloc_error_handler]`
+ #[cfg(bootstrap)]
#[rustc_std_internal_symbol]
pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
+ use crate::alloc::Layout;
+
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
#[lang = "oom"]
diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs
index b2f019459..1a5938fd3 100644
--- a/library/alloc/src/alloc/tests.rs
+++ b/library/alloc/src/alloc/tests.rs
@@ -22,7 +22,6 @@ fn allocate_zeroed() {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = Box::new(10);
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index d6681a317..e5f6b0c0c 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -158,6 +158,8 @@ use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
+#[cfg(not(bootstrap))]
+use core::marker::Tuple;
use core::marker::{Destruct, Unpin, Unsize};
use core::mem;
use core::ops::{
@@ -185,7 +187,7 @@ pub use thin::ThinBox;
mod thin;
-/// A pointer type for heap allocation.
+/// A pointer type that uniquely owns a heap allocation of type `T`.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
@@ -1979,6 +1981,7 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
@@ -1988,6 +1991,17 @@ impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+ type Output = <F as FnOnce<Args>>::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ <F as FnOnce<Args>>::call_once(*self, args)
+ }
+}
+
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
@@ -1995,6 +2009,15 @@ impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ <F as FnMut<Args>>::call_mut(self, args)
+ }
+}
+
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
@@ -2002,6 +2025,14 @@ impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output {
+ <F as Fn<Args>>::call(self, args)
+ }
+}
+
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 8a7719347..1d9c4460e 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -46,8 +46,8 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// is done is *very* inefficient for modern computer architectures. In particular, every element
/// is stored in its own individually heap-allocated node. This means that every single insertion
/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
-/// are both notably expensive things to do in practice, we are forced to at very least reconsider
-/// the BST strategy.
+/// are both notably expensive things to do in practice, we are forced to, at the very least,
+/// reconsider the BST strategy.
///
/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
index aadb0dc9c..64bce0ff8 100644
--- a/library/alloc/src/collections/btree/node/tests.rs
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -94,6 +94,7 @@ fn test_partial_eq() {
#[test]
#[cfg(target_arch = "x86_64")]
+#[cfg_attr(miri, ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 161a37573..3e0b0f735 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -139,7 +139,7 @@ impl Display for TryReserveError {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
- " because the memory allocator returned a error"
+ " because the memory allocator returned an error"
}
};
fmt.write_str(reason)
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 41baa7102..89feb361d 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -1,12 +1,12 @@
-use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
-use core::mem::{self, MaybeUninit};
-use core::ptr::{self, NonNull};
+use core::mem::{self, SizedTypeProperties};
+use core::ptr::NonNull;
+use core::{fmt, ptr};
use crate::alloc::{Allocator, Global};
-use super::{count, wrap_index, VecDeque};
+use super::VecDeque;
/// A draining iterator over the elements of a `VecDeque`.
///
@@ -20,26 +20,70 @@ pub struct Drain<
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
- after_tail: usize,
- after_head: usize,
- ring: NonNull<[T]>,
- tail: usize,
- head: usize,
+ // We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
+ // and we want it to be covariant instead
deque: NonNull<VecDeque<T, A>>,
- _phantom: PhantomData<&'a T>,
+ // drain_start is stored in deque.len
+ drain_len: usize,
+ // index into the logical array, not the physical one (always lies in [0..deque.len))
+ idx: usize,
+ // number of elements after the drain range
+ tail_len: usize,
+ remaining: usize,
+ // Needed to make Drain covariant over T
+ _marker: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
- after_tail: usize,
- after_head: usize,
- ring: &'a [MaybeUninit<T>],
- tail: usize,
- head: usize,
- deque: NonNull<VecDeque<T, A>>,
+ deque: &'a mut VecDeque<T, A>,
+ drain_start: usize,
+ drain_len: usize,
) -> Self {
- let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) };
- Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData }
+ let orig_len = mem::replace(&mut deque.len, drain_start);
+ let tail_len = orig_len - drain_start - drain_len;
+ Drain {
+ deque: NonNull::from(deque),
+ drain_len,
+ idx: drain_start,
+ tail_len,
+ remaining: drain_len,
+ _marker: PhantomData,
+ }
+ }
+
+ // Only returns pointers to the slices, as that's
+ // all we need to drop them. May only be called if `self.remaining != 0`.
+ unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
+ unsafe {
+ let deque = self.deque.as_ref();
+ // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
+ // Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
+ // just `drain_start`, so the range check would (almost) always panic. Between temporarily
+ // adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
+ // implementation, this seemed like the less hacky solution, though it might be good to
+ // find a better one in the future.
+
+ // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
+ // logical index.
+ let wrapped_start = deque.to_physical_idx(self.idx);
+
+ let head_len = deque.capacity() - wrapped_start;
+
+ let (a_range, b_range) = if head_len >= self.remaining {
+ (wrapped_start..wrapped_start + self.remaining, 0..0)
+ } else {
+ let tail_len = self.remaining - head_len;
+ (wrapped_start..deque.capacity(), 0..tail_len)
+ };
+
+ // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
+ // the range `0..deque.original_len`. because of this, and because of the fact
+ // that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
+ // it's guaranteed that `a_range` and `b_range` represent valid ranges into
+ // the deques buffer.
+ (deque.buffer_range(a_range), deque.buffer_range(b_range))
+ }
}
}
@@ -47,11 +91,10 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain")
- .field(&self.after_tail)
- .field(&self.after_head)
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
+ .field(&self.drain_len)
+ .field(&self.idx)
+ .field(&self.tail_len)
+ .field(&self.remaining)
.finish()
}
}
@@ -68,57 +111,81 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
- self.0.for_each(drop);
+ if self.0.remaining != 0 {
+ unsafe {
+ // SAFETY: We just checked that `self.remaining != 0`.
+ let (front, back) = self.0.as_slices();
+ ptr::drop_in_place(front);
+ ptr::drop_in_place(back);
+ }
+ }
let source_deque = unsafe { self.0.deque.as_mut() };
- // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
- //
- // T t h H
- // [. . . o o x x o o . . .]
- //
- let orig_tail = source_deque.tail;
- let drain_tail = source_deque.head;
- let drain_head = self.0.after_tail;
- let orig_head = self.0.after_head;
+ let drain_start = source_deque.len();
+ let drain_len = self.0.drain_len;
+ let drain_end = drain_start + drain_len;
+
+ let orig_len = self.0.tail_len + drain_end;
- let tail_len = count(orig_tail, drain_tail, source_deque.cap());
- let head_len = count(drain_head, orig_head, source_deque.cap());
+ if T::IS_ZST {
+ // no need to copy around any memory if T is a ZST
+ source_deque.len = orig_len - drain_len;
+ return;
+ }
- // Restore the original head value
- source_deque.head = orig_head;
+ let head_len = drain_start;
+ let tail_len = self.0.tail_len;
- match (tail_len, head_len) {
+ match (head_len, tail_len) {
(0, 0) => {
source_deque.head = 0;
- source_deque.tail = 0;
+ source_deque.len = 0;
}
(0, _) => {
- source_deque.tail = drain_head;
+ source_deque.head = source_deque.to_physical_idx(drain_len);
+ source_deque.len = orig_len - drain_len;
}
(_, 0) => {
- source_deque.head = drain_tail;
+ source_deque.len = orig_len - drain_len;
}
_ => unsafe {
- if tail_len <= head_len {
- source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
- source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
+ if head_len <= tail_len {
+ source_deque.wrap_copy(
+ source_deque.head,
+ source_deque.to_physical_idx(drain_len),
+ head_len,
+ );
+ source_deque.head = source_deque.to_physical_idx(drain_len);
+ source_deque.len = orig_len - drain_len;
} else {
- source_deque.head = source_deque.wrap_add(drain_tail, head_len);
- source_deque.wrap_copy(drain_tail, drain_head, head_len);
+ source_deque.wrap_copy(
+ source_deque.to_physical_idx(head_len + drain_len),
+ source_deque.to_physical_idx(head_len),
+ tail_len,
+ );
+ source_deque.len = orig_len - drain_len;
}
},
}
}
}
- while let Some(item) = self.next() {
- let guard = DropGuard(self);
- drop(item);
- mem::forget(guard);
+ let guard = DropGuard(self);
+ if guard.0.remaining != 0 {
+ unsafe {
+ // SAFETY: We just checked that `self.remaining != 0`.
+ let (front, back) = guard.0.as_slices();
+ // since idx is a logical index, we don't need to worry about wrapping.
+ guard.0.idx += front.len();
+ guard.0.remaining -= front.len();
+ ptr::drop_in_place(front);
+ guard.0.remaining = 0;
+ ptr::drop_in_place(back);
+ }
}
- DropGuard(self);
+ // Dropping `guard` handles moving the remaining elements into place.
}
}
@@ -128,20 +195,18 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.tail == self.head {
+ if self.remaining == 0 {
return None;
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
- // Safety:
- // - `self.tail` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
+ let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
+ self.idx += 1;
+ self.remaining -= 1;
+ Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.remaining;
(len, Some(len))
}
}
@@ -150,14 +215,12 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
- if self.tail == self.head {
+ if self.remaining == 0 {
return None;
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
- // Safety:
- // - `self.head` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
+ self.remaining -= 1;
+ let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
+ Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
}
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
index e696d7ed6..d9f393714 100644
--- a/library/alloc/src/collections/vec_deque/iter.rs
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -1,9 +1,6 @@
-use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
-use core::mem::MaybeUninit;
use core::ops::Try;
-
-use super::{count, wrap_index, RingSlices};
+use core::{fmt, mem, slice};
/// An iterator over the elements of a `VecDeque`.
///
@@ -13,30 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
- ring: &'a [MaybeUninit<T>],
- tail: usize,
- head: usize,
+ i1: slice::Iter<'a, T>,
+ i2: slice::Iter<'a, T>,
}
impl<'a, T> Iter<'a, T> {
- pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self {
- Iter { ring, tail, head }
+ pub(super) fn new(i1: slice::Iter<'a, T>, i2: slice::Iter<'a, T>) -> Self {
+ Self { i1, i2 }
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- f.debug_tuple("Iter")
- .field(&MaybeUninit::slice_assume_init_ref(front))
- .field(&MaybeUninit::slice_assume_init_ref(back))
- .finish()
- }
+ f.debug_tuple("Iter").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@@ -44,7 +31,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
- Iter { ring: self.ring, tail: self.tail, head: self.head }
+ Iter { i1: self.i1.clone(), i2: self.i2.clone() }
}
}
@@ -54,72 +41,50 @@ impl<'a, T> Iterator for Iter<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a T> {
- if self.tail == self.head {
- return None;
+ match self.i1.next() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i1 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.next()
+ }
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
- // Safety:
- // - `self.tail` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
+ }
+
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i1.advance_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.len();
(len, Some(len))
}
- fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
- MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
- }
+ let accum = self.i1.fold(accum, &mut f);
+ self.i2.fold(accum, &mut f)
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
- Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- let (mut iter, final_res);
- if self.tail <= self.head {
- // Safety: single slice self.ring[self.tail..self.head] is initialized.
- iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
- .iter();
- final_res = iter.try_fold(init, &mut f);
- } else {
- // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
- let (front, back) = self.ring.split_at(self.tail);
-
- let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
- let res = back_iter.try_fold(init, &mut f);
- let len = self.ring.len();
- self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
- iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
- final_res = iter.try_fold(res?, &mut f);
- }
- self.tail = self.head - iter.len();
- final_res
- }
-
- fn nth(&mut self, n: usize) -> Option<Self::Item> {
- if n >= count(self.tail, self.head, self.ring.len()) {
- self.tail = self.head;
- None
- } else {
- self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
- self.next()
- }
+ let acc = self.i1.try_fold(init, &mut f)?;
+ self.i2.try_fold(acc, &mut f)
}
#[inline]
@@ -132,8 +97,12 @@ impl<'a, T> Iterator for Iter<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
- let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
- self.ring.get_unchecked(idx).assume_init_ref()
+ let i1_len = self.i1.len();
+ if idx < i1_len {
+ self.i1.__iterator_get_unchecked(idx)
+ } else {
+ self.i2.__iterator_get_unchecked(idx - i1_len)
+ }
}
}
}
@@ -142,63 +111,56 @@ impl<'a, T> Iterator for Iter<'a, T> {
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
- if self.tail == self.head {
- return None;
+ match self.i2.next_back() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the second one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i2 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.next_back()
+ }
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
- // Safety:
- // - `self.head` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
}
- fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i2.advance_back_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ }
+
+ fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
- MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
- }
+ let accum = self.i2.rfold(accum, &mut f);
+ self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
- Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- let (mut iter, final_res);
- if self.tail <= self.head {
- // Safety: single slice self.ring[self.tail..self.head] is initialized.
- iter = unsafe {
- MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
- };
- final_res = iter.try_rfold(init, &mut f);
- } else {
- // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
- let (front, back) = self.ring.split_at(self.tail);
-
- let mut front_iter =
- unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
- let res = front_iter.try_rfold(init, &mut f);
- self.head = front_iter.len();
- iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
- final_res = iter.try_rfold(res?, &mut f);
- }
- self.head = self.tail + iter.len();
- final_res
+ let acc = self.i2.try_rfold(init, &mut f)?;
+ self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.i1.len() + self.i2.len()
+ }
+
fn is_empty(&self) -> bool {
- self.head == self.tail
+ self.i1.is_empty() && self.i2.is_empty()
}
}
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
index b78c0d5e1..2c59d95cd 100644
--- a/library/alloc/src/collections/vec_deque/iter_mut.rs
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -1,8 +1,6 @@
-use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
-use core::marker::PhantomData;
-
-use super::{count, wrap_index, RingSlices};
+use core::ops::Try;
+use core::{fmt, mem, slice};
/// A mutable iterator over the elements of a `VecDeque`.
///
@@ -12,39 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter_mut`]: super::VecDeque::iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
- // Internal safety invariant: the entire slice is dereferenceable.
- ring: *mut [T],
- tail: usize,
- head: usize,
- phantom: PhantomData<&'a mut [T]>,
+ i1: slice::IterMut<'a, T>,
+ i2: slice::IterMut<'a, T>,
}
impl<'a, T> IterMut<'a, T> {
- pub(super) unsafe fn new(
- ring: *mut [T],
- tail: usize,
- head: usize,
- phantom: PhantomData<&'a mut [T]>,
- ) -> Self {
- IterMut { ring, tail, head, phantom }
+ pub(super) fn new(i1: slice::IterMut<'a, T>, i2: slice::IterMut<'a, T>) -> Self {
+ Self { i1, i2 }
}
}
-// SAFETY: we do nothing thread-local and there is no interior mutability,
-// so the usual structural `Send`/`Sync` apply.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Send> Send for IterMut<'_, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
-
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&*front, &*back) };
- f.debug_tuple("IterMut").field(&front).field(&back).finish()
+ f.debug_tuple("IterMut").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@@ -54,44 +33,50 @@ impl<'a, T> Iterator for IterMut<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
- if self.tail == self.head {
- return None;
+ match self.i1.next() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i1 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.next()
+ }
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ }
- unsafe {
- let elem = self.ring.get_unchecked_mut(tail);
- Some(&mut *elem)
- }
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i1.advance_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.len();
(len, Some(len))
}
- fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&mut *front, &mut *back) };
- accum = front.iter_mut().fold(accum, &mut f);
- back.iter_mut().fold(accum, &mut f)
+ let accum = self.i1.fold(accum, &mut f);
+ self.i2.fold(accum, &mut f)
}
- fn nth(&mut self, n: usize) -> Option<Self::Item> {
- if n >= count(self.tail, self.head, self.ring.len()) {
- self.tail = self.head;
- None
- } else {
- self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
- self.next()
- }
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let acc = self.i1.try_fold(init, &mut f)?;
+ self.i2.try_fold(acc, &mut f)
}
#[inline]
@@ -104,8 +89,12 @@ impl<'a, T> Iterator for IterMut<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
- let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
- &mut *self.ring.get_unchecked_mut(idx)
+ let i1_len = self.i1.len();
+ if idx < i1_len {
+ self.i1.__iterator_get_unchecked(idx)
+ } else {
+ self.i2.__iterator_get_unchecked(idx - i1_len)
+ }
}
}
}
@@ -114,34 +103,56 @@ impl<'a, T> Iterator for IterMut<'a, T> {
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
- if self.tail == self.head {
- return None;
+ match self.i2.next_back() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i2 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.next_back()
+ }
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ }
- unsafe {
- let elem = self.ring.get_unchecked_mut(self.head);
- Some(&mut *elem)
- }
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i2.advance_back_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n - m).map_err(|o| m + o)
}
- fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&mut *front, &mut *back) };
- accum = back.iter_mut().rfold(accum, &mut f);
- front.iter_mut().rfold(accum, &mut f)
+ let accum = self.i2.rfold(accum, &mut f);
+ self.i1.rfold(accum, &mut f)
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let acc = self.i2.try_rfold(init, &mut f)?;
+ self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IterMut<'_, T> {
+ fn len(&self) -> usize {
+ self.i1.len() + self.i2.len()
+ }
+
fn is_empty(&self) -> bool {
- self.head == self.tail
+ self.i1.is_empty() && self.i2.is_empty()
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 2a57dad89..4866c53e7 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -10,11 +10,10 @@
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{repeat_with, FromIterator};
-use core::marker::PhantomData;
-use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::iter::{repeat_n, repeat_with, ByRefSized, FromIterator};
+use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
-use core::ptr::{self, NonNull};
+use core::ptr;
use core::slice;
// This is used in a bunch of intra-doc links.
@@ -52,14 +51,6 @@ pub use self::iter::Iter;
mod iter;
-use self::pair_slices::PairSlices;
-
-mod pair_slices;
-
-use self::ring_slices::RingSlices;
-
-mod ring_slices;
-
use self::spec_extend::SpecExtend;
mod spec_extend;
@@ -67,11 +58,6 @@ mod spec_extend;
#[cfg(test)]
mod tests;
-const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
-const MINIMUM_CAPACITY: usize = 1; // 2 - 1
-
-const MAXIMUM_ZST_CAPACITY: usize = 1 << (usize::BITS - 1); // Largest possible power of two
-
/// A double-ended queue implemented with a growable ring buffer.
///
/// The "default" usage of this type as a queue is to use [`push_back`] to add to
@@ -105,13 +91,13 @@ pub struct VecDeque<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
- // tail and head are pointers into the buffer. Tail always points
- // to the first element that could be read, Head always points
- // to where data should be written.
- // If tail == head the buffer is empty. The length of the ringbuffer
- // is defined as the distance between the two.
- tail: usize,
+ // `self[0]`, if it exists, is `buf[head]`.
+ // `head < buf.capacity()`, unless `buf.capacity() == 0` when `head == 0`.
head: usize,
+ // the number of initialized elements, starting from the one at `head` and potentially wrapping around.
+ // if `len == 0`, the exact value of `head` is unimportant.
+ // if `T` is zero-Sized, then `self.len <= usize::MAX`, otherwise `self.len <= isize::MAX as usize`.
+ len: usize,
buf: RawVec<T, A>,
}
@@ -124,18 +110,8 @@ impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
}
fn clone_from(&mut self, other: &Self) {
- self.truncate(other.len());
-
- let mut iter = PairSlices::from(self, other);
- while let Some((dst, src)) = iter.next() {
- dst.clone_from_slice(&src);
- }
-
- if iter.has_remainder() {
- for remainder in iter.remainder() {
- self.extend(remainder.iter().cloned());
- }
- }
+ self.clear();
+ self.extend(other.iter().cloned());
}
}
@@ -180,41 +156,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.buf.ptr()
}
- /// Marginally more convenient
- #[inline]
- fn cap(&self) -> usize {
- if T::IS_ZST {
- // For zero sized types, we are always at maximum capacity
- MAXIMUM_ZST_CAPACITY
- } else {
- self.buf.capacity()
- }
- }
-
- /// Turn ptr into a slice, since the elements of the backing buffer may be uninitialized,
- /// we will return a slice of [`MaybeUninit<T>`].
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
- /// incorrect usage of this method.
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[inline]
- unsafe fn buffer_as_slice(&self) -> &[MaybeUninit<T>] {
- unsafe { slice::from_raw_parts(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
- }
-
- /// Turn ptr into a mut slice, since the elements of the backing buffer may be uninitialized,
- /// we will return a slice of [`MaybeUninit<T>`].
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
- /// incorrect usage of this method.
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[inline]
- unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
- unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
- }
-
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
@@ -229,51 +170,58 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
}
- /// Returns `true` if the buffer is at full capacity.
+ /// Returns a slice pointer into the buffer.
+ /// `range` must lie inside `0..self.capacity()`.
#[inline]
- fn is_full(&self) -> bool {
- self.cap() - self.len() == 1
+ unsafe fn buffer_range(&self, range: Range<usize>) -> *mut [T] {
+ unsafe {
+ ptr::slice_from_raw_parts_mut(self.ptr().add(range.start), range.end - range.start)
+ }
}
- /// Returns the index in the underlying buffer for a given logical element
- /// index.
+ /// Returns `true` if the buffer is at full capacity.
#[inline]
- fn wrap_index(&self, idx: usize) -> usize {
- wrap_index(idx, self.cap())
+ fn is_full(&self) -> bool {
+ self.len == self.capacity()
}
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
- wrap_index(idx.wrapping_add(addend), self.cap())
+ wrap_index(idx.wrapping_add(addend), self.capacity())
+ }
+
+ #[inline]
+ fn to_physical_idx(&self, idx: usize) -> usize {
+ self.wrap_add(self.head, idx)
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
- wrap_index(idx.wrapping_sub(subtrahend), self.cap())
+ wrap_index(idx.wrapping_sub(subtrahend).wrapping_add(self.capacity()), self.capacity())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
- unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
+ unsafe fn copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- dst + len <= self.cap(),
+ dst + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
debug_assert!(
- src + len <= self.cap(),
+ src + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
unsafe {
ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
@@ -282,22 +230,22 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Copies a contiguous block of memory len long from src to dst
#[inline]
- unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
+ unsafe fn copy_nonoverlapping(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- dst + len <= self.cap(),
+ dst + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
debug_assert!(
- src + len <= self.cap(),
+ src + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
unsafe {
ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
@@ -305,30 +253,28 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Copies a potentially wrapping block of memory len long from src to dest.
- /// (abs(dst - src) + len) must be no larger than cap() (There must be at
+ /// (abs(dst - src) + len) must be no larger than capacity() (There must be at
/// most one continuous overlapping region between src and dest).
- unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
- #[allow(dead_code)]
- fn diff(a: usize, b: usize) -> usize {
- if a <= b { b - a } else { a - b }
- }
+ unsafe fn wrap_copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
+ cmp::min(src.abs_diff(dst), self.capacity() - src.abs_diff(dst)) + len
+ <= self.capacity(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
- if src == dst || len == 0 {
+ // If T is a ZST, don't do any copying.
+ if T::IS_ZST || src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
- let src_pre_wrap_len = self.cap() - src;
- let dst_pre_wrap_len = self.cap() - dst;
+ let src_pre_wrap_len = self.capacity() - src;
+ let dst_pre_wrap_len = self.capacity() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
@@ -342,7 +288,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst, src, len);
+ self.copy(src, dst, len);
}
}
(false, false, true) => {
@@ -355,8 +301,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// . . D .
//
unsafe {
- self.copy(dst, src, dst_pre_wrap_len);
- self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ self.copy(src, dst, dst_pre_wrap_len);
+ self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
}
}
(true, false, true) => {
@@ -369,8 +315,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// . . D .
//
unsafe {
- self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
- self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
+ self.copy(src, dst, dst_pre_wrap_len);
}
}
(false, true, false) => {
@@ -383,8 +329,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst, src, src_pre_wrap_len);
- self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
}
}
(true, true, false) => {
@@ -397,8 +343,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
- self.copy(dst, src, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
}
}
(false, true, true) => {
@@ -414,9 +360,9 @@ impl<T, A: Allocator> VecDeque<T, A> {
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
unsafe {
- self.copy(dst, src, src_pre_wrap_len);
- self.copy(dst + src_pre_wrap_len, 0, delta);
- self.copy(0, delta, len - dst_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, delta);
+ self.copy(delta, 0, len - dst_pre_wrap_len);
}
}
(true, true, true) => {
@@ -432,9 +378,9 @@ impl<T, A: Allocator> VecDeque<T, A> {
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
unsafe {
- self.copy(delta, 0, len - src_pre_wrap_len);
- self.copy(0, self.cap() - delta, delta);
- self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(0, delta, len - src_pre_wrap_len);
+ self.copy(self.capacity() - delta, 0, delta);
+ self.copy(src, dst, dst_pre_wrap_len);
}
}
}
@@ -444,8 +390,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Assumes capacity is sufficient.
#[inline]
unsafe fn copy_slice(&mut self, dst: usize, src: &[T]) {
- debug_assert!(src.len() <= self.cap());
- let head_room = self.cap() - dst;
+ debug_assert!(src.len() <= self.capacity());
+ let head_room = self.capacity() - dst;
if src.len() <= head_room {
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len());
@@ -478,48 +424,100 @@ impl<T, A: Allocator> VecDeque<T, A> {
});
}
+ /// Writes all values from `iter` to `dst`, wrapping
+ /// at the end of the buffer and returns the number
+ /// of written values.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `iter` yields at most `len` items.
+ /// Assumes capacity is sufficient.
+ unsafe fn write_iter_wrapping(
+ &mut self,
+ dst: usize,
+ mut iter: impl Iterator<Item = T>,
+ len: usize,
+ ) -> usize {
+ struct Guard<'a, T, A: Allocator> {
+ deque: &'a mut VecDeque<T, A>,
+ written: usize,
+ }
+
+ impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
+ fn drop(&mut self) {
+ self.deque.len += self.written;
+ }
+ }
+
+ let head_room = self.capacity() - dst;
+
+ let mut guard = Guard { deque: self, written: 0 };
+
+ if head_room >= len {
+ unsafe { guard.deque.write_iter(dst, iter, &mut guard.written) };
+ } else {
+ unsafe {
+ guard.deque.write_iter(
+ dst,
+ ByRefSized(&mut iter).take(head_room),
+ &mut guard.written,
+ );
+ guard.deque.write_iter(0, iter, &mut guard.written)
+ };
+ }
+
+ guard.written
+ }
+
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_capacity.
#[inline]
unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
- let new_capacity = self.cap();
+ let new_capacity = self.capacity();
+ debug_assert!(new_capacity >= old_capacity);
// Move the shortest contiguous section of the ring buffer
- // T H
+ //
+ // H := head
+ // L := last element (`self.to_physical_idx(self.len - 1)`)
+ //
+ // H L
// [o o o o o o o . ]
- // T H
+ // H L
// A [o o o o o o o . . . . . . . . . ]
- // H T
- // [o o . o o o o o ]
- // T H
+ // L H
+ // [o o o o o o o o ]
+ // H L
// B [. . . o o o o o o o . . . . . . ]
- // H T
- // [o o o o o . o o ]
- // H T
+ // L H
+ // [o o o o o o o o ]
+ // L H
// C [o o o o o . . . . . . . . . o o ]
- if self.tail <= self.head {
+ // can't use is_contiguous() because the capacity is already updated.
+ if self.head <= old_capacity - self.len {
// A
// Nop
- } else if self.head < old_capacity - self.tail {
- // B
- unsafe {
- self.copy_nonoverlapping(old_capacity, 0, self.head);
- }
- self.head += old_capacity;
- debug_assert!(self.head > self.tail);
} else {
- // C
- let new_tail = new_capacity - (old_capacity - self.tail);
- unsafe {
- self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+ let head_len = old_capacity - self.head;
+ let tail_len = self.len - head_len;
+ if head_len > tail_len && new_capacity - old_capacity >= tail_len {
+ // B
+ unsafe {
+ self.copy_nonoverlapping(0, old_capacity, tail_len);
+ }
+ } else {
+ // C
+ let new_head = new_capacity - head_len;
+ unsafe {
+ // can't use copy_nonoverlapping here, because if e.g. head_len = 2
+ // and new_capacity = old_capacity + 1, then the heads overlap.
+ self.copy(self.head, new_head, head_len);
+ }
+ self.head = new_head;
}
- self.tail = new_tail;
- debug_assert!(self.head < self.tail);
}
- debug_assert!(self.head < self.cap());
- debug_assert!(self.tail < self.cap());
- debug_assert!(self.cap().count_ones() == 1);
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
}
}
@@ -533,6 +531,7 @@ impl<T> VecDeque<T> {
///
/// let deque: VecDeque<u32> = VecDeque::new();
/// ```
+ // FIXME: This should probably be const
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
@@ -569,8 +568,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
- pub fn new_in(alloc: A) -> VecDeque<T, A> {
- VecDeque::with_capacity_in(INITIAL_CAPACITY, alloc)
+ pub const fn new_in(alloc: A) -> VecDeque<T, A> {
+ VecDeque { head: 0, len: 0, buf: RawVec::new_in(alloc) }
}
/// Creates an empty deque with space for at least `capacity` elements.
@@ -584,11 +583,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque<T, A> {
- assert!(capacity < 1_usize << usize::BITS - 1, "capacity overflow");
- // +1 since the ringbuffer always leaves one space empty
- let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
-
- VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) }
+ VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
}
/// Provides a reference to the element at the given index.
@@ -608,8 +603,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> {
- if index < self.len() {
- let idx = self.wrap_add(self.tail, index);
+ if index < self.len {
+ let idx = self.to_physical_idx(index);
unsafe { Some(&*self.ptr().add(idx)) }
} else {
None
@@ -637,8 +632,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
- if index < self.len() {
- let idx = self.wrap_add(self.tail, index);
+ if index < self.len {
+ let idx = self.to_physical_idx(index);
unsafe { Some(&mut *self.ptr().add(idx)) }
} else {
None
@@ -672,8 +667,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
- let ri = self.wrap_add(self.tail, i);
- let rj = self.wrap_add(self.tail, j);
+ let ri = self.to_physical_idx(i);
+ let rj = self.to_physical_idx(j);
unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
}
@@ -691,7 +686,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
- self.cap() - 1
+ if T::IS_ZST { usize::MAX } else { self.buf.capacity() }
}
/// Reserves the minimum capacity for at least `additional` more elements to be inserted in the
@@ -718,7 +713,15 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// [`reserve`]: VecDeque::reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
- self.reserve(additional);
+ let new_cap = self.len.checked_add(additional).expect("capacity overflow");
+ let old_cap = self.capacity();
+
+ if new_cap > old_cap {
+ self.buf.reserve_exact(self.len, additional);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
@@ -739,15 +742,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
- let old_cap = self.cap();
- let used_cap = self.len() + 1;
- let new_cap = used_cap
- .checked_add(additional)
- .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
- .expect("capacity overflow");
+ let new_cap = self.len.checked_add(additional).expect("capacity overflow");
+ let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.reserve_exact(used_cap, new_cap - used_cap);
+ // we don't need to reserve_exact(), as the size doesn't have
+ // to be a power of 2.
+ self.buf.reserve(self.len, additional);
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -793,7 +794,17 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.try_reserve(additional)
+ let new_cap =
+ self.len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let old_cap = self.capacity();
+
+ if new_cap > old_cap {
+ self.buf.try_reserve_exact(self.len, additional)?;
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ Ok(())
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -831,15 +842,12 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- let old_cap = self.cap();
- let used_cap = self.len() + 1;
- let new_cap = used_cap
- .checked_add(additional)
- .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
- .ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let new_cap =
+ self.len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
+ self.buf.try_reserve(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -890,13 +898,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
- let min_capacity = cmp::min(min_capacity, self.capacity());
- // We don't have to worry about an overflow as neither `self.len()` nor `self.capacity()`
- // can ever be `usize::MAX`. +1 as the ringbuffer always leaves one space empty.
- let target_cap = cmp::max(cmp::max(min_capacity, self.len()) + 1, MINIMUM_CAPACITY + 1)
- .next_power_of_two();
+ let target_cap = min_capacity.max(self.len);
- if target_cap < self.cap() {
+ // never shrink ZSTs
+ if T::IS_ZST || self.capacity() <= target_cap {
+ return;
+ }
+
+ if target_cap < self.capacity() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
@@ -905,49 +914,55 @@ impl<T, A: Allocator> VecDeque<T, A> {
// At all other times, element positions are unaffected.
//
// Indicates that elements at the head should be moved.
- let head_outside = self.head == 0 || self.head >= target_cap;
+
+ let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
// Move elements from out of desired bounds (positions after target_cap)
- if self.tail >= target_cap && head_outside {
- // T H
+ if self.len == 0 {
+ self.head = 0;
+ } else if self.head >= target_cap && tail_outside {
+ // H := head
+ // L := last element
+ // H L
// [. . . . . . . . o o o o o o o . ]
- // T H
+ // H L
// [o o o o o o o . ]
unsafe {
- self.copy_nonoverlapping(0, self.tail, self.len());
+ // nonoverlapping because self.head >= target_cap >= self.len
+ self.copy_nonoverlapping(self.head, 0, self.len);
}
- self.head = self.len();
- self.tail = 0;
- } else if self.tail != 0 && self.tail < target_cap && head_outside {
- // T H
+ self.head = 0;
+ } else if self.head < target_cap && tail_outside {
+ // H := head
+ // L := last element
+ // H L
// [. . . o o o o o o o . . . . . . ]
- // H T
+ // L H
// [o o . o o o o o ]
- let len = self.wrap_sub(self.head, target_cap);
+ let len = self.head + self.len - target_cap;
unsafe {
- self.copy_nonoverlapping(0, target_cap, len);
+ self.copy_nonoverlapping(target_cap, 0, len);
}
- self.head = len;
- debug_assert!(self.head < self.tail);
- } else if self.tail >= target_cap {
- // H T
+ } else if self.head >= target_cap {
+ // H := head
+ // L := last element
+ // L H
// [o o o o o . . . . . . . . . o o ]
- // H T
+ // L H
// [o o o o o . o o ]
- debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
- let len = self.cap() - self.tail;
- let new_tail = target_cap - len;
+ let len = self.capacity() - self.head;
+ let new_head = target_cap - len;
unsafe {
- self.copy_nonoverlapping(new_tail, self.tail, len);
+ // can't use copy_nonoverlapping here for the same reason
+ // as in `handle_capacity_increase()`
+ self.copy(self.head, new_head, len);
}
- self.tail = new_tail;
- debug_assert!(self.head < self.tail);
+ self.head = new_head;
}
self.buf.shrink_to_fit(target_cap);
- debug_assert!(self.head < self.cap());
- debug_assert!(self.tail < self.cap());
- debug_assert!(self.cap().count_ones() == 1);
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
+ debug_assert!(self.len <= self.capacity());
}
}
@@ -992,20 +1007,20 @@ impl<T, A: Allocator> VecDeque<T, A> {
// * The head of the VecDeque is moved before calling `drop_in_place`,
// so no value is dropped twice if `drop_in_place` panics
unsafe {
- if len > self.len() {
+ if len >= self.len {
return;
}
- let num_dropped = self.len() - len;
+
let (front, back) = self.as_mut_slices();
if len > front.len() {
let begin = len - front.len();
let drop_back = back.get_unchecked_mut(begin..) as *mut _;
- self.head = self.wrap_sub(self.head, num_dropped);
+ self.len = len;
ptr::drop_in_place(drop_back);
} else {
let drop_back = back as *mut _;
let drop_front = front.get_unchecked_mut(len..) as *mut _;
- self.head = self.wrap_sub(self.head, num_dropped);
+ self.len = len;
// Make sure the second half is dropped even when a destructor
// in the first one panics.
@@ -1039,7 +1054,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, T> {
- Iter::new(unsafe { self.buffer_as_slice() }, self.tail, self.head)
+ let (a, b) = self.as_slices();
+ Iter::new(a.iter(), b.iter())
}
/// Returns a front-to-back iterator that returns mutable references.
@@ -1061,11 +1077,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
- // SAFETY: The internal `IterMut` safety invariant is established because the
- // `ring` we create is a dereferenceable slice for lifetime '_.
- let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
-
- unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) }
+ let (a, b) = self.as_mut_slices();
+ IterMut::new(a.iter_mut(), b.iter_mut())
}
/// Returns a pair of slices which contain, in order, the contents of the
@@ -1097,14 +1110,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- let buf = self.buffer_as_slice();
- let (front, back) = RingSlices::ring_slices(buf, self.head, self.tail);
- (MaybeUninit::slice_assume_init_ref(front), MaybeUninit::slice_assume_init_ref(back))
- }
+ let (a_range, b_range) = self.slice_ranges(..);
+ // SAFETY: `slice_ranges` always returns valid ranges into
+ // the physical buffer.
+ unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
}
/// Returns a pair of slices which contain, in order, the contents of the
@@ -1135,16 +1144,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- let head = self.head;
- let tail = self.tail;
- let buf = self.buffer_as_mut_slice();
- let (front, back) = RingSlices::ring_slices(buf, head, tail);
- (MaybeUninit::slice_assume_init_mut(front), MaybeUninit::slice_assume_init_mut(back))
- }
+ let (a_range, b_range) = self.slice_ranges(..);
+ // SAFETY: `slice_ranges` always returns valid ranges into
+ // the physical buffer.
+ unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
}
/// Returns the number of elements in the deque.
@@ -1161,7 +1164,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
- count(self.tail, self.head, self.cap())
+ self.len
}
/// Returns `true` if the deque is empty.
@@ -1178,17 +1181,41 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
- self.tail == self.head
+ self.len == 0
}
- fn range_tail_head<R>(&self, range: R) -> (usize, usize)
+ /// Given a range into the logical buffer of the deque, this function
+ /// return two ranges into the physical buffer that correspond to
+ /// the given range.
+ fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
where
R: RangeBounds<usize>,
{
- let Range { start, end } = slice::range(range, ..self.len());
- let tail = self.wrap_add(self.tail, start);
- let head = self.wrap_add(self.tail, end);
- (tail, head)
+ let Range { start, end } = slice::range(range, ..self.len);
+ let len = end - start;
+
+ if len == 0 {
+ (0..0, 0..0)
+ } else {
+ // `slice::range` guarantees that `start <= end <= self.len`.
+ // because `len != 0`, we know that `start < end`, so `start < self.len`
+ // and the indexing is valid.
+ let wrapped_start = self.to_physical_idx(start);
+
+ // this subtraction can never overflow because `wrapped_start` is
+ // at most `self.capacity()` (and if `self.capacity != 0`, then `wrapped_start` is strictly less
+ // than `self.capacity`).
+ let head_len = self.capacity() - wrapped_start;
+
+ if head_len >= len {
+ // we know that `len + wrapped_start <= self.capacity <= usize::MAX`, so this addition can't overflow
+ (wrapped_start..wrapped_start + len, 0..0)
+ } else {
+ // can't overflow because of the if condition
+ let tail_len = len - head_len;
+ (wrapped_start..self.capacity(), 0..tail_len)
+ }
+ }
}
/// Creates an iterator that covers the specified range in the deque.
@@ -1217,9 +1244,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (tail, head) = self.range_tail_head(range);
- // The shared reference we have in &self is maintained in the '_ of Iter.
- Iter::new(unsafe { self.buffer_as_slice() }, tail, head)
+ let (a_range, b_range) = self.slice_ranges(range);
+ // SAFETY: The ranges returned by `slice_ranges`
+ // are valid ranges into the physical buffer, so
+ // it's ok to pass them to `buffer_range` and
+ // dereference the result.
+ let a = unsafe { &*self.buffer_range(a_range) };
+ let b = unsafe { &*self.buffer_range(b_range) };
+ Iter::new(a.iter(), b.iter())
}
/// Creates an iterator that covers the specified mutable range in the deque.
@@ -1252,13 +1284,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (tail, head) = self.range_tail_head(range);
-
- // SAFETY: The internal `IterMut` safety invariant is established because the
- // `ring` we create is a dereferenceable slice for lifetime '_.
- let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
-
- unsafe { IterMut::new(ring, tail, head, PhantomData) }
+ let (a_range, b_range) = self.slice_ranges(range);
+ // SAFETY: The ranges returned by `slice_ranges`
+ // are valid ranges into the physical buffer, so
+ // it's ok to pass them to `buffer_range` and
+ // dereference the result.
+ let a = unsafe { &mut *self.buffer_range(a_range) };
+ let b = unsafe { &mut *self.buffer_range(b_range) };
+ IterMut::new(a.iter_mut(), b.iter_mut())
}
/// Removes the specified range from the deque in bulk, returning all
@@ -1310,39 +1343,30 @@ impl<T, A: Allocator> VecDeque<T, A> {
// When finished, the remaining data will be copied back to cover the hole,
// and the head/tail values will be restored correctly.
//
- let (drain_tail, drain_head) = self.range_tail_head(range);
+ let Range { start, end } = slice::range(range, ..self.len);
+ let drain_start = start;
+ let drain_len = end - start;
// The deque's elements are parted into three segments:
- // * self.tail -> drain_tail
- // * drain_tail -> drain_head
- // * drain_head -> self.head
+ // * 0 -> drain_start
+ // * drain_start -> drain_start+drain_len
+ // * drain_start+drain_len -> self.len
//
- // T = self.tail; H = self.head; t = drain_tail; h = drain_head
+ // H = self.head; T = self.head+self.len; t = drain_start+drain_len; h = drain_head
//
- // We store drain_tail as self.head, and drain_head and self.head as
- // after_tail and after_head respectively on the Drain. This also
+ // We store drain_start as self.len, and drain_len and self.len as
+ // drain_len and orig_len respectively on the Drain. This also
// truncates the effective array such that if the Drain is leaked, we
// have forgotten about the potentially moved values after the start of
// the drain.
//
- // T t h H
+ // H h t T
// [. . . o o x x o o . . .]
//
- let head = self.head;
-
// "forget" about the values after the start of the drain until after
// the drain is complete and the Drain destructor is run.
- self.head = drain_tail;
- let deque = NonNull::from(&mut *self);
- unsafe {
- // Crucially, we only create shared references from `self` here and read from
- // it. We do not write to `self` nor reborrow to a mutable reference.
- // Hence the raw pointer we created above, for `deque`, remains valid.
- let ring = self.buffer_as_slice();
-
- Drain::new(drain_head, head, ring, drain_tail, drain_head, deque)
- }
+ unsafe { Drain::new(self, drain_start, drain_len) }
}
/// Clears the deque, removing all values.
@@ -1361,6 +1385,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
pub fn clear(&mut self) {
self.truncate(0);
+ // Not strictly necessary, but leaves things in a more consistent/predictable state.
+ self.head = 0;
}
/// Returns `true` if the deque contains an element equal to the
@@ -1455,7 +1481,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
- self.get(self.len().wrapping_sub(1))
+ self.get(self.len.wrapping_sub(1))
}
/// Provides a mutable reference to the back element, or `None` if the
@@ -1479,7 +1505,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
- self.get_mut(self.len().wrapping_sub(1))
+ self.get_mut(self.len.wrapping_sub(1))
}
/// Removes the first element and returns it, or `None` if the deque is
@@ -1503,9 +1529,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
if self.is_empty() {
None
} else {
- let tail = self.tail;
- self.tail = self.wrap_add(self.tail, 1);
- unsafe { Some(self.buffer_read(tail)) }
+ let old_head = self.head;
+ self.head = self.to_physical_idx(1);
+ self.len -= 1;
+ Some(unsafe { self.buffer_read(old_head) })
}
}
@@ -1528,9 +1555,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
if self.is_empty() {
None
} else {
- self.head = self.wrap_sub(self.head, 1);
- let head = self.head;
- unsafe { Some(self.buffer_read(head)) }
+ self.len -= 1;
+ Some(unsafe { self.buffer_read(self.to_physical_idx(self.len)) })
}
}
@@ -1552,10 +1578,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- self.tail = self.wrap_sub(self.tail, 1);
- let tail = self.tail;
+ self.head = self.wrap_sub(self.head, 1);
+ self.len += 1;
+
unsafe {
- self.buffer_write(tail, value);
+ self.buffer_write(self.head, value);
}
}
@@ -1577,16 +1604,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- let head = self.head;
- self.head = self.wrap_add(self.head, 1);
- unsafe { self.buffer_write(head, value) }
+ unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
+ self.len += 1;
}
#[inline]
fn is_contiguous(&self) -> bool {
- // FIXME: Should we consider `head == 0` to mean
- // that `self` is contiguous?
- self.tail <= self.head
+ // Do the calculation like this to avoid overflowing if len + head > usize::MAX
+ self.head <= self.capacity() - self.len
}
/// Removes an element from anywhere in the deque and returns it,
@@ -1615,8 +1640,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
- let length = self.len();
- if length > 0 && index < length && index != 0 {
+ let length = self.len;
+ if index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
@@ -1650,7 +1675,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
- let length = self.len();
+ let length = self.len;
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
@@ -1689,198 +1714,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- // Move the least number of elements in the ring buffer and insert
- // the given object
- //
- // At most len/2 - 1 elements will be moved. O(min(n, n-i))
- //
- // There are three main cases:
- // Elements are contiguous
- // - special case when tail is 0
- // Elements are discontiguous and the insert is in the tail section
- // Elements are discontiguous and the insert is in the head section
- //
- // For each of those there are two more cases:
- // Insert is closer to tail
- // Insert is closer to head
- //
- // Key: H - self.head
- // T - self.tail
- // o - Valid element
- // I - Insertion element
- // A - The element that should be after the insertion point
- // M - Indicates element was moved
-
- let idx = self.wrap_add(self.tail, index);
-
- let distance_to_tail = index;
- let distance_to_head = self.len() - index;
-
- let contiguous = self.is_contiguous();
-
- match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
- (true, true, _) if index == 0 => {
- // push_front
- //
- // T
- // I H
- // [A o o o o o o . . . . . . . . .]
- //
- // H T
- // [A o o o o o o o . . . . . I]
- //
-
- self.tail = self.wrap_sub(self.tail, 1);
- }
- (true, true, _) => {
- unsafe {
- // contiguous, insert closer to tail:
- //
- // T I H
- // [. . . o o A o o o o . . . . . .]
- //
- // T H
- // [. . o o I A o o o o . . . . . .]
- // M M
- //
- // contiguous, insert closer to tail and tail is 0:
- //
- //
- // T I H
- // [o o A o o o o . . . . . . . . .]
- //
- // H T
- // [o I A o o o o o . . . . . . . o]
- // M M
-
- let new_tail = self.wrap_sub(self.tail, 1);
-
- self.copy(new_tail, self.tail, 1);
- // Already moved the tail, so we only copy `index - 1` elements.
- self.copy(self.tail, self.tail + 1, index - 1);
-
- self.tail = new_tail;
- }
- }
- (true, false, _) => {
- unsafe {
- // contiguous, insert closer to head:
- //
- // T I H
- // [. . . o o o o A o o . . . . . .]
- //
- // T H
- // [. . . o o o o I A o o . . . . .]
- // M M M
-
- self.copy(idx + 1, idx, self.head - idx);
- self.head = self.wrap_add(self.head, 1);
- }
- }
- (false, true, true) => {
- unsafe {
- // discontiguous, insert closer to tail, tail section:
- //
- // H T I
- // [o o o o o o . . . . . o o A o o]
- //
- // H T
- // [o o o o o o . . . . o o I A o o]
- // M M
-
- self.copy(self.tail - 1, self.tail, index);
- self.tail -= 1;
- }
- }
- (false, false, true) => {
- unsafe {
- // discontiguous, insert closer to head, tail section:
- //
- // H T I
- // [o o . . . . . . . o o o o o A o]
- //
- // H T
- // [o o o . . . . . . o o o o o I A]
- // M M M M
-
- // copy elements up to new head
- self.copy(1, 0, self.head);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(0, self.cap() - 1, 1);
-
- // move elements from idx to end forward not including ^ element
- self.copy(idx + 1, idx, self.cap() - 1 - idx);
-
- self.head += 1;
- }
- }
- (false, true, false) if idx == 0 => {
- unsafe {
- // discontiguous, insert is closer to tail, head section,
- // and is at index zero in the internal buffer:
- //
- // I H T
- // [A o o o o o o o o o . . . o o o]
- //
- // H T
- // [A o o o o o o o o o . . o o o I]
- // M M M
-
- // copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(self.cap() - 1, 0, 1);
-
- self.tail -= 1;
- }
- }
- (false, true, false) => {
- unsafe {
- // discontiguous, insert closer to tail, head section:
- //
- // I H T
- // [o o o A o o o o o o . . . o o o]
- //
- // H T
- // [o o I A o o o o o o . . o o o o]
- // M M M M M M
-
- // copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(self.cap() - 1, 0, 1);
-
- // move elements from idx-1 to end forward not including ^ element
- self.copy(0, 1, idx - 1);
-
- self.tail -= 1;
- }
+ let k = self.len - index;
+ if k < index {
+ // `index + 1` can't overflow, because if index was usize::MAX, then either the
+ // assert would've failed, or the deque would've tried to grow past usize::MAX
+ // and panicked.
+ unsafe {
+ // see `remove()` for explanation why this wrap_copy() call is safe.
+ self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
}
- (false, false, false) => {
- unsafe {
- // discontiguous, insert closer to head, head section:
- //
- // I H T
- // [o o o o A o o . . . . . . o o o]
- //
- // H T
- // [o o o o I A o o . . . . . o o o]
- // M M M
-
- self.copy(idx + 1, idx, self.head - idx);
- self.head += 1;
- }
+ } else {
+ let old_head = self.head;
+ self.head = self.wrap_sub(self.head, 1);
+ unsafe {
+ self.wrap_copy(old_head, self.head, index);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
}
}
-
- // tail might've been changed so we need to recalculate
- let new_idx = self.wrap_add(self.tail, index);
- unsafe {
- self.buffer_write(new_idx, value);
- }
}
/// Removes and returns the element at `index` from the deque.
@@ -1906,156 +1759,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, index: usize) -> Option<T> {
- if self.is_empty() || self.len() <= index {
+ if self.len <= index {
return None;
}
- // There are three main cases:
- // Elements are contiguous
- // Elements are discontiguous and the removal is in the tail section
- // Elements are discontiguous and the removal is in the head section
- // - special case when elements are technically contiguous,
- // but self.head = 0
- //
- // For each of those there are two more cases:
- // Insert is closer to tail
- // Insert is closer to head
- //
- // Key: H - self.head
- // T - self.tail
- // o - Valid element
- // x - Element marked for removal
- // R - Indicates element that is being removed
- // M - Indicates element was moved
-
- let idx = self.wrap_add(self.tail, index);
-
- let elem = unsafe { Some(self.buffer_read(idx)) };
+ let wrapped_idx = self.to_physical_idx(index);
- let distance_to_tail = index;
- let distance_to_head = self.len() - index;
+ let elem = unsafe { Some(self.buffer_read(wrapped_idx)) };
- let contiguous = self.is_contiguous();
-
- match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
- (true, true, _) => {
- unsafe {
- // contiguous, remove closer to tail:
- //
- // T R H
- // [. . . o o x o o o o . . . . . .]
- //
- // T H
- // [. . . . o o o o o o . . . . . .]
- // M M
-
- self.copy(self.tail + 1, self.tail, index);
- self.tail += 1;
- }
- }
- (true, false, _) => {
- unsafe {
- // contiguous, remove closer to head:
- //
- // T R H
- // [. . . o o o o x o o . . . . . .]
- //
- // T H
- // [. . . o o o o o o . . . . . . .]
- // M M
-
- self.copy(idx, idx + 1, self.head - idx - 1);
- self.head -= 1;
- }
- }
- (false, true, true) => {
- unsafe {
- // discontiguous, remove closer to tail, tail section:
- //
- // H T R
- // [o o o o o o . . . . . o o x o o]
- //
- // H T
- // [o o o o o o . . . . . . o o o o]
- // M M
-
- self.copy(self.tail + 1, self.tail, index);
- self.tail = self.wrap_add(self.tail, 1);
- }
- }
- (false, false, false) => {
- unsafe {
- // discontiguous, remove closer to head, head section:
- //
- // R H T
- // [o o o o x o o . . . . . . o o o]
- //
- // H T
- // [o o o o o o . . . . . . . o o o]
- // M M
-
- self.copy(idx, idx + 1, self.head - idx - 1);
- self.head -= 1;
- }
- }
- (false, false, true) => {
- unsafe {
- // discontiguous, remove closer to head, tail section:
- //
- // H T R
- // [o o o . . . . . . o o o o o x o]
- //
- // H T
- // [o o . . . . . . . o o o o o o o]
- // M M M M
- //
- // or quasi-discontiguous, remove next to head, tail section:
- //
- // H T R
- // [. . . . . . . . . o o o o o x o]
- //
- // T H
- // [. . . . . . . . . o o o o o o .]
- // M
-
- // draw in elements in the tail section
- self.copy(idx, idx + 1, self.cap() - idx - 1);
-
- // Prevents underflow.
- if self.head != 0 {
- // copy first element into empty spot
- self.copy(self.cap() - 1, 0, 1);
-
- // move elements in the head section backwards
- self.copy(0, 1, self.head - 1);
- }
-
- self.head = self.wrap_sub(self.head, 1);
- }
- }
- (false, true, false) => {
- unsafe {
- // discontiguous, remove closer to tail, head section:
- //
- // R H T
- // [o o x o o o o o o o . . . o o o]
- //
- // H T
- // [o o o o o o o o o o . . . . o o]
- // M M M M M
-
- // draw in elements up to idx
- self.copy(1, 0, idx);
-
- // copy last element into empty spot
- self.copy(0, self.cap() - 1, 1);
-
- // move elements from tail to end forward, excluding the last one
- self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
-
- self.tail = self.wrap_add(self.tail, 1);
- }
- }
+ let k = self.len - index - 1;
+ // safety: due to the nature of the if-condition, whichever wrap_copy gets called,
+ // its length argument will be at most `self.len / 2`, so there can't be more than
+ // one overlapping area.
+ if k < index {
+ unsafe { self.wrap_copy(self.wrap_add(wrapped_idx, 1), wrapped_idx, k) };
+ self.len -= 1;
+ } else {
+ let old_head = self.head;
+ self.head = self.to_physical_idx(1);
+ unsafe { self.wrap_copy(old_head, self.head, index) };
+ self.len -= 1;
}
elem
@@ -2091,7 +1814,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
A: Clone,
{
- let len = self.len();
+ let len = self.len;
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
@@ -2128,8 +1851,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
// Cleanup where the ends of the buffers are
- self.head = self.wrap_sub(self.head, other_len);
- other.head = other.wrap_index(other_len);
+ self.len = at;
+ other.len = other_len;
other
}
@@ -2154,17 +1877,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
- self.reserve(other.len());
+ if T::IS_ZST {
+ self.len += other.len;
+ other.len = 0;
+ other.head = 0;
+ return;
+ }
+
+ self.reserve(other.len);
unsafe {
let (left, right) = other.as_slices();
- self.copy_slice(self.head, left);
- self.copy_slice(self.wrap_add(self.head, left.len()), right);
+ self.copy_slice(self.to_physical_idx(self.len), left);
+ // no overflow, because self.capacity() >= old_cap + left.len() >= self.len + left.len()
+ self.copy_slice(self.to_physical_idx(self.len + left.len()), right);
}
// SAFETY: Update pointers after copying to avoid leaving doppelganger
// in case of panics.
- self.head = self.wrap_add(self.head, other.len());
- // Silently drop values in `other`.
- other.tail = other.head;
+ self.len += other.len;
+ // Now that we own its values, forget everything in `other`.
+ other.len = 0;
+ other.head = 0;
}
/// Retains only the elements specified by the predicate.
@@ -2232,7 +1964,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
F: FnMut(&mut T) -> bool,
{
- let len = self.len();
+ let len = self.len;
let mut idx = 0;
let mut cur = 0;
@@ -2270,9 +2002,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// Extend or possibly remove this assertion when valid use-cases for growing the
// buffer without it being full emerge
debug_assert!(self.is_full());
- let old_cap = self.cap();
- self.buf.reserve_exact(old_cap, old_cap);
- assert!(self.cap() == old_cap * 2);
+ let old_cap = self.capacity();
+ self.buf.reserve_for_push(old_cap);
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -2306,7 +2037,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "vec_resize_with", since = "1.33.0")]
pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
- let len = self.len();
+ let len = self.len;
if new_len > len {
self.extend(repeat_with(generator).take(new_len - len))
@@ -2372,110 +2103,129 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_make_contiguous", since = "1.48.0")]
pub fn make_contiguous(&mut self) -> &mut [T] {
+ if T::IS_ZST {
+ self.head = 0;
+ }
+
if self.is_contiguous() {
- let tail = self.tail;
- let head = self.head;
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- return unsafe {
- MaybeUninit::slice_assume_init_mut(
- RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
- )
- };
+ unsafe { return slice::from_raw_parts_mut(self.ptr().add(self.head), self.len) }
}
- let buf = self.buf.ptr();
- let cap = self.cap();
- let len = self.len();
+ let &mut Self { head, len, .. } = self;
+ let ptr = self.ptr();
+ let cap = self.capacity();
- let free = self.tail - self.head;
- let tail_len = cap - self.tail;
+ let free = cap - len;
+ let head_len = cap - head;
+ let tail = len - head_len;
+ let tail_len = tail;
- if free >= tail_len {
- // there is enough free space to copy the tail in one go,
- // this means that we first shift the head backwards, and then
- // copy the tail to the correct position.
+ if free >= head_len {
+ // there is enough free space to copy the head in one go,
+ // this means that we first shift the tail backwards, and then
+ // copy the head to the correct position.
//
// from: DEFGH....ABC
// to: ABCDEFGH....
unsafe {
- ptr::copy(buf, buf.add(tail_len), self.head);
+ self.copy(0, head_len, tail_len);
// ...DEFGH.ABC
- ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+ self.copy_nonoverlapping(head, 0, head_len);
// ABCDEFGH....
-
- self.tail = 0;
- self.head = len;
}
- } else if free > self.head {
- // FIXME: We currently do not consider ....ABCDEFGH
- // to be contiguous because `head` would be `0` in this
- // case. While we probably want to change this it
- // isn't trivial as a few places expect `is_contiguous`
- // to mean that we can just slice using `buf[tail..head]`.
- // there is enough free space to copy the head in one go,
- // this means that we first shift the tail forwards, and then
- // copy the head to the correct position.
+ self.head = 0;
+ } else if free >= tail_len {
+ // there is enough free space to copy the tail in one go,
+ // this means that we first shift the head forwards, and then
+ // copy the tail to the correct position.
//
// from: FGH....ABCDE
// to: ...ABCDEFGH.
unsafe {
- ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+ self.copy(head, tail, head_len);
// FGHABCDE....
- ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+ self.copy_nonoverlapping(0, tail + head_len, tail_len);
// ...ABCDEFGH.
-
- self.tail = self.head;
- self.head = self.wrap_add(self.tail, len);
}
+
+ self.head = tail;
} else {
- // free is smaller than both head and tail,
- // this means we have to slowly "swap" the tail and the head.
+ // `free` is smaller than both `head_len` and `tail_len`.
+ // the general algorithm for this first moves the slices
+ // right next to each other and then uses `slice::rotate`
+ // to rotate them into place:
//
- // from: EFGHI...ABCD or HIJK.ABCDEFG
- // to: ABCDEFGHI... or ABCDEFGHIJK.
- let mut left_edge: usize = 0;
- let mut right_edge: usize = self.tail;
- unsafe {
- // The general problem looks like this
- // GHIJKLM...ABCDEF - before any swaps
- // ABCDEFM...GHIJKL - after 1 pass of swaps
- // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
- // - then restart the algorithm with a new (smaller) store
- // Sometimes the temp store is reached when the right edge is at the end
- // of the buffer - this means we've hit the right order with fewer swaps!
- // E.g
- // EF..ABCD
- // ABCDEF.. - after four only swaps we've finished
- while left_edge < len && right_edge != cap {
- let mut right_offset = 0;
- for i in left_edge..right_edge {
- right_offset = (i - left_edge) % (cap - right_edge);
- let src = right_edge + right_offset;
- ptr::swap(buf.add(i), buf.add(src));
+ // initially: HIJK..ABCDEFG
+ // step 1: ..HIJKABCDEFG
+ // step 2: ..ABCDEFGHIJK
+ //
+ // or:
+ //
+ // initially: FGHIJK..ABCDE
+ // step 1: FGHIJKABCDE..
+ // step 2: ABCDEFGHIJK..
+
+ // pick the shorter of the 2 slices to reduce the amount
+ // of memory that needs to be moved around.
+ if head_len > tail_len {
+ // tail is shorter, so:
+ // 1. copy tail forwards
+ // 2. rotate used part of the buffer
+ // 3. update head to point to the new beginning (which is just `free`)
+
+ unsafe {
+ // if there is no free space in the buffer, then the slices are already
+ // right next to each other and we don't need to move any memory.
+ if free != 0 {
+ // because we only move the tail forward as much as there's free space
+ // behind it, we don't overwrite any elements of the head slice, and
+ // the slices end up right next to each other.
+ self.copy(0, free, tail_len);
}
- let n_ops = right_edge - left_edge;
- left_edge += n_ops;
- right_edge += right_offset + 1;
+
+ // We just copied the tail right next to the head slice,
+ // so all of the elements in the range are initialized
+ let slice = &mut *self.buffer_range(free..self.capacity());
+
+ // because the deque wasn't contiguous, we know that `tail_len < self.len == slice.len()`,
+ // so this will never panic.
+ slice.rotate_left(tail_len);
+
+ // the used part of the buffer now is `free..self.capacity()`, so set
+ // `head` to the beginning of that range.
+ self.head = free;
}
+ } else {
+ // head is shorter so:
+ // 1. copy head backwards
+ // 2. rotate used part of the buffer
+ // 3. update head to point to the new beginning (which is the beginning of the buffer)
+
+ unsafe {
+ // if there is no free space in the buffer, then the slices are already
+ // right next to each other and we don't need to move any memory.
+ if free != 0 {
+ // copy the head slice to lie right behind the tail slice.
+ self.copy(self.head, tail_len, head_len);
+ }
- self.tail = 0;
- self.head = len;
+ // because we copied the head slice so that both slices lie right
+ // next to each other, all the elements in the range are initialized.
+ let slice = &mut *self.buffer_range(0..self.len);
+
+ // because the deque wasn't contiguous, we know that `head_len < self.len == slice.len()`
+ // so this will never panic.
+ slice.rotate_right(head_len);
+
+ // the used part of the buffer now is `0..self.len`, so set
+ // `head` to the beginning of that range.
+ self.head = 0;
+ }
}
}
- let tail = self.tail;
- let head = self.head;
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- MaybeUninit::slice_assume_init_mut(
- RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
- )
- }
+ unsafe { slice::from_raw_parts_mut(ptr.add(self.head), self.len) }
}
/// Rotates the double-ended queue `mid` places to the left.
@@ -2513,7 +2263,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
- let k = self.len() - mid;
+ let k = self.len - mid;
if mid <= k {
unsafe { self.rotate_left_inner(mid) }
} else {
@@ -2556,7 +2306,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
- let mid = self.len() - k;
+ let mid = self.len - k;
if k <= mid {
unsafe { self.rotate_right_inner(k) }
} else {
@@ -2567,26 +2317,24 @@ impl<T, A: Allocator> VecDeque<T, A> {
// SAFETY: the following two methods require that the rotation amount
// be less than half the length of the deque.
//
- // `wrap_copy` requires that `min(x, cap() - x) + copy_len <= cap()`,
- // but than `min` is never more than half the capacity, regardless of x,
+ // `wrap_copy` requires that `min(x, capacity() - x) + copy_len <= capacity()`,
+ // but then `min` is never more than half the capacity, regardless of x,
// so it's sound to call here because we're calling with something
// less than half the length, which is never above half the capacity.
unsafe fn rotate_left_inner(&mut self, mid: usize) {
debug_assert!(mid * 2 <= self.len());
unsafe {
- self.wrap_copy(self.head, self.tail, mid);
+ self.wrap_copy(self.head, self.to_physical_idx(self.len), mid);
}
- self.head = self.wrap_add(self.head, mid);
- self.tail = self.wrap_add(self.tail, mid);
+ self.head = self.to_physical_idx(mid);
}
unsafe fn rotate_right_inner(&mut self, k: usize) {
debug_assert!(k * 2 <= self.len());
self.head = self.wrap_sub(self.head, k);
- self.tail = self.wrap_sub(self.tail, k);
unsafe {
- self.wrap_copy(self.tail, self.head, k);
+ self.wrap_copy(self.to_physical_idx(self.len), self.head, k);
}
}
@@ -2833,29 +2581,30 @@ impl<T: Clone, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn resize(&mut self, new_len: usize, value: T) {
- self.resize_with(new_len, || value.clone());
+ if new_len > self.len() {
+ let extra = new_len - self.len();
+ self.extend(repeat_n(value, extra))
+ } else {
+ self.truncate(new_len);
+ }
}
}
/// Returns the index in the underlying buffer for a given logical element index.
#[inline]
-fn wrap_index(index: usize, size: usize) -> usize {
- // size is always a power of 2
- debug_assert!(size.is_power_of_two());
- index & (size - 1)
-}
-
-/// Calculate the number of elements left to be read in the buffer
-#[inline]
-fn count(tail: usize, head: usize, size: usize) -> usize {
- // size is always a power of 2
- (head.wrapping_sub(tail)) & (size - 1)
+fn wrap_index(logical_index: usize, capacity: usize) -> usize {
+ debug_assert!(
+ (logical_index == 0 && capacity == 0)
+ || logical_index < capacity
+ || (logical_index - capacity) < capacity
+ );
+ if logical_index >= capacity { logical_index - capacity } else { logical_index }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialEq, A: Allocator> PartialEq for VecDeque<T, A> {
fn eq(&self, other: &Self) -> bool {
- if self.len() != other.len() {
+ if self.len != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
@@ -2919,7 +2668,7 @@ impl<T: Ord, A: Allocator> Ord for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash, A: Allocator> Hash for VecDeque<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
- state.write_length_prefix(self.len());
+ state.write_length_prefix(self.len);
// It's not possible to use Hash::hash_slice on slices
// returned by as_slices method as their length can vary
// in otherwise identical deques.
@@ -2950,12 +2699,18 @@ impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for VecDeque<T> {
+ #[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
- let mut deq = VecDeque::with_capacity(lower);
- deq.extend(iterator);
- deq
+ // Since converting is O(1) now, might as well re-use that logic
+ // (including things like the `vec::IntoIter`→`Vec` specialization)
+ // especially as that could save us some monomorphiziation work
+ // if one uses the same iterators (like slice ones) with both.
+ return from_iter_via_vec(iter.into_iter());
+
+ #[inline]
+ fn from_iter_via_vec<U>(iter: impl Iterator<Item = U>) -> VecDeque<U> {
+ Vec::from_iter(iter).into()
+ }
}
}
@@ -3028,7 +2783,7 @@ impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for VecDeque<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_list().entries(self).finish()
+ f.debug_list().entries(self.iter()).finish()
}
}
@@ -3039,31 +2794,13 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
- /// This avoids reallocating where possible, but the conditions for that are
- /// strict, and subject to change, and so shouldn't be relied upon unless the
- /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
- fn from(mut other: Vec<T, A>) -> Self {
- let len = other.len();
- if T::IS_ZST {
- // There's no actual allocation for ZSTs to worry about capacity,
- // but `VecDeque` can't handle as much length as `Vec`.
- assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
- } else {
- // We need to resize if the capacity is not a power of two, too small or
- // doesn't have at least one free space. We do this while it's still in
- // the `Vec` so the items will drop on panic.
- let min_cap = cmp::max(MINIMUM_CAPACITY, len) + 1;
- let cap = cmp::max(min_cap, other.capacity()).next_power_of_two();
- if other.capacity() != cap {
- other.reserve_exact(cap - len);
- }
- }
-
- unsafe {
- let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc();
- let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc);
- VecDeque { tail: 0, head: len, buf }
- }
+ /// In its current implementation, this is a very cheap
+ /// conversion. This isn't yet a guarantee though, and
+ /// shouldn't be relied on.
+ #[inline]
+ fn from(other: Vec<T, A>) -> Self {
+ let (ptr, len, cap, alloc) = other.into_raw_parts_with_alloc();
+ Self { head: 0, len, buf: unsafe { RawVec::from_raw_parts_in(ptr, cap, alloc) } }
}
}
@@ -3105,11 +2842,11 @@ impl<T, A: Allocator> From<VecDeque<T, A>> for Vec<T, A> {
let other = ManuallyDrop::new(other);
let buf = other.buf.ptr();
let len = other.len();
- let cap = other.cap();
+ let cap = other.capacity();
let alloc = ptr::read(other.allocator());
- if other.tail != 0 {
- ptr::copy(buf.add(other.tail), buf, len);
+ if other.head != 0 {
+ ptr::copy(buf.add(other.head), buf, len);
}
Vec::from_raw_parts_in(buf, len, cap, alloc)
}
@@ -3136,8 +2873,8 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
}
}
- deq.tail = 0;
- deq.head = N;
+ deq.head = 0;
+ deq.len = N;
deq
}
}
diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs
deleted file mode 100644
index 6735424a3..000000000
--- a/library/alloc/src/collections/vec_deque/pair_slices.rs
+++ /dev/null
@@ -1,67 +0,0 @@
-use core::cmp::{self};
-use core::mem::replace;
-
-use crate::alloc::Allocator;
-
-use super::VecDeque;
-
-/// PairSlices pairs up equal length slice parts of two deques
-///
-/// For example, given deques "A" and "B" with the following division into slices:
-///
-/// A: [0 1 2] [3 4 5]
-/// B: [a b] [c d e]
-///
-/// It produces the following sequence of matching slices:
-///
-/// ([0 1], [a b])
-/// (\[2\], \[c\])
-/// ([3 4], [d e])
-///
-/// and the uneven remainder of either A or B is skipped.
-pub struct PairSlices<'a, 'b, T> {
- a0: &'a mut [T],
- a1: &'a mut [T],
- b0: &'b [T],
- b1: &'b [T],
-}
-
-impl<'a, 'b, T> PairSlices<'a, 'b, T> {
- pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
- let (a0, a1) = to.as_mut_slices();
- let (b0, b1) = from.as_slices();
- PairSlices { a0, a1, b0, b1 }
- }
-
- pub fn has_remainder(&self) -> bool {
- !self.b0.is_empty()
- }
-
- pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
- IntoIterator::into_iter([self.b0, self.b1])
- }
-}
-
-impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
- type Item = (&'a mut [T], &'b [T]);
- fn next(&mut self) -> Option<Self::Item> {
- // Get next part length
- let part = cmp::min(self.a0.len(), self.b0.len());
- if part == 0 {
- return None;
- }
- let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
- let (q0, q1) = self.b0.split_at(part);
-
- // Move a1 into a0, if it's empty (and b1, b0 the same way).
- self.a0 = p1;
- self.b0 = q1;
- if self.a0.is_empty() {
- self.a0 = replace(&mut self.a1, &mut []);
- }
- if self.b0.is_empty() {
- self.b0 = replace(&mut self.b1, &[]);
- }
- Some((p0, q0))
- }
-}
diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs
deleted file mode 100644
index dd0fa7d60..000000000
--- a/library/alloc/src/collections/vec_deque/ring_slices.rs
+++ /dev/null
@@ -1,56 +0,0 @@
-use core::ptr::{self};
-
-/// Returns the two slices that cover the `VecDeque`'s valid range
-pub trait RingSlices: Sized {
- fn slice(self, from: usize, to: usize) -> Self;
- fn split_at(self, i: usize) -> (Self, Self);
-
- fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
- let contiguous = tail <= head;
- if contiguous {
- let (empty, buf) = buf.split_at(0);
- (buf.slice(tail, head), empty)
- } else {
- let (mid, right) = buf.split_at(tail);
- let (left, _) = mid.split_at(head);
- (right, left)
- }
- }
-}
-
-impl<T> RingSlices for &[T] {
- fn slice(self, from: usize, to: usize) -> Self {
- &self[from..to]
- }
- fn split_at(self, i: usize) -> (Self, Self) {
- (*self).split_at(i)
- }
-}
-
-impl<T> RingSlices for &mut [T] {
- fn slice(self, from: usize, to: usize) -> Self {
- &mut self[from..to]
- }
- fn split_at(self, i: usize) -> (Self, Self) {
- (*self).split_at_mut(i)
- }
-}
-
-impl<T> RingSlices for *mut [T] {
- fn slice(self, from: usize, to: usize) -> Self {
- assert!(from <= to && to < self.len());
- // Not using `get_unchecked_mut` to keep this a safe operation.
- let len = to - from;
- ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
- }
-
- fn split_at(self, mid: usize) -> (Self, Self) {
- let len = self.len();
- let ptr = self.as_mut_ptr();
- assert!(mid <= len);
- (
- ptr::slice_from_raw_parts_mut(ptr, mid),
- ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
- )
- }
-}
diff --git a/library/alloc/src/collections/vec_deque/spec_extend.rs b/library/alloc/src/collections/vec_deque/spec_extend.rs
index 97ff8b765..dccf40ccb 100644
--- a/library/alloc/src/collections/vec_deque/spec_extend.rs
+++ b/library/alloc/src/collections/vec_deque/spec_extend.rs
@@ -1,6 +1,6 @@
use crate::alloc::Allocator;
use crate::vec;
-use core::iter::{ByRefSized, TrustedLen};
+use core::iter::TrustedLen;
use core::slice;
use super::VecDeque;
@@ -17,19 +17,33 @@ where
default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of:
//
- // for item in iter {
- // self.push_back(item);
- // }
- while let Some(element) = iter.next() {
- if self.len() == self.capacity() {
- let (lower, _) = iter.size_hint();
- self.reserve(lower.saturating_add(1));
- }
+ // for item in iter {
+ // self.push_back(item);
+ // }
+
+ // May only be called if `deque.len() < deque.capacity()`
+ unsafe fn push_unchecked<T, A: Allocator>(deque: &mut VecDeque<T, A>, element: T) {
+ // SAFETY: Because of the precondition, it's guaranteed that there is space
+ // in the logical array after the last element.
+ unsafe { deque.buffer_write(deque.to_physical_idx(deque.len), element) };
+ // This can't overflow because `deque.len() < deque.capacity() <= usize::MAX`.
+ deque.len += 1;
+ }
- let head = self.head;
- self.head = self.wrap_add(self.head, 1);
- unsafe {
- self.buffer_write(head, element);
+ while let Some(element) = iter.next() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+
+ // SAFETY: We just reserved space for at least one element.
+ unsafe { push_unchecked(self, element) };
+
+ // Inner loop to avoid repeatedly calling `reserve`.
+ while self.len < self.capacity() {
+ let Some(element) = iter.next() else {
+ return;
+ };
+ // SAFETY: The loop condition guarantees that `self.len() < self.capacity()`.
+ unsafe { push_unchecked(self, element) };
}
}
}
@@ -39,7 +53,7 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: TrustedLen<Item = T>,
{
- default fn spec_extend(&mut self, mut iter: I) {
+ default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint();
if let Some(additional) = high {
@@ -51,35 +65,12 @@ where
);
self.reserve(additional);
- struct WrapAddOnDrop<'a, T, A: Allocator> {
- vec_deque: &'a mut VecDeque<T, A>,
- written: usize,
- }
-
- impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
- fn drop(&mut self) {
- self.vec_deque.head =
- self.vec_deque.wrap_add(self.vec_deque.head, self.written);
- }
- }
-
- let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
-
- let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
- unsafe {
- wrapper.vec_deque.write_iter(
- wrapper.vec_deque.head,
- ByRefSized(&mut iter).take(head_room),
- &mut wrapper.written,
- );
-
- if additional > head_room {
- wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
- }
- }
+ let written = unsafe {
+ self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
+ };
debug_assert_eq!(
- additional, wrapper.written,
+ additional, written,
"The number of items written to VecDeque doesn't match the TrustedLen size hint"
);
} else {
@@ -99,8 +90,8 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
self.reserve(slice.len());
unsafe {
- self.copy_slice(self.head, slice);
- self.head = self.wrap_add(self.head, slice.len());
+ self.copy_slice(self.to_physical_idx(self.len), slice);
+ self.len += slice.len();
}
iterator.forget_remaining_elements();
}
@@ -125,8 +116,8 @@ where
self.reserve(slice.len());
unsafe {
- self.copy_slice(self.head, slice);
- self.head = self.wrap_add(self.head, slice.len());
+ self.copy_slice(self.to_physical_idx(self.len), slice);
+ self.len += slice.len();
}
}
}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 1f2daef21..220ad71be 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -3,7 +3,6 @@ use core::iter::TrustedLen;
use super::*;
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@@ -11,12 +10,11 @@ fn bench_push_back_100(b: &mut test::Bencher) {
deq.push_back(i);
}
deq.head = 0;
- deq.tail = 0;
+ deq.len = 0;
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@@ -24,18 +22,21 @@ fn bench_push_front_100(b: &mut test::Bencher) {
deq.push_front(i);
}
deq.head = 0;
- deq.tail = 0;
+ deq.len = 0;
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_back_100(b: &mut test::Bencher) {
- let mut deq = VecDeque::<i32>::with_capacity(101);
+ let size = 100;
+ let mut deq = VecDeque::<i32>::with_capacity(size + 1);
+ // We'll mess with private state to pretend like `deq` is filled.
+ // Make sure the buffer is initialized so that we don't read uninit memory.
+ unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
- deq.head = 100;
- deq.tail = 0;
+ deq.head = 0;
+ deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
@@ -43,9 +44,9 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_whole_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@@ -54,9 +55,9 @@ fn bench_retain_whole_10000(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_odd_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@@ -65,24 +66,27 @@ fn bench_retain_odd_10000(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_half_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
- v.retain(|x| *x > 50000)
+ v.retain(|x| *x > size / 2)
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_front_100(b: &mut test::Bencher) {
- let mut deq = VecDeque::<i32>::with_capacity(101);
+ let size = 100;
+ let mut deq = VecDeque::<i32>::with_capacity(size + 1);
+ // We'll mess with private state to pretend like `deq` is filled.
+ // Make sure the buffer is initialized so that we don't read uninit memory.
+ unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
- deq.head = 100;
- deq.tail = 0;
+ deq.head = 0;
+ deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
@@ -101,9 +105,9 @@ fn test_swap_front_back_remove() {
for len in 0..final_len {
let expected: VecDeque<_> =
if back { (0..len).collect() } else { (0..len).rev().collect() };
- for tail_pos in 0..usable_cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..usable_cap {
+ tester.head = head_pos;
+ tester.len = 0;
if back {
for i in 0..len * 2 {
tester.push_front(i);
@@ -120,8 +124,8 @@ fn test_swap_front_back_remove() {
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -146,18 +150,18 @@ fn test_insert() {
for len in minlen..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
+ for head_pos in 0..cap {
for to_insert in 0..len {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -253,13 +257,14 @@ fn test_swap_panic() {
#[test]
fn test_reserve_exact() {
let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
- assert!(tester.capacity() == 1);
+ assert_eq!(tester.capacity(), 1);
tester.reserve_exact(50);
- assert!(tester.capacity() >= 51);
+ assert_eq!(tester.capacity(), 50);
tester.reserve_exact(40);
- assert!(tester.capacity() >= 51);
+ // reserving won't shrink the buffer
+ assert_eq!(tester.capacity(), 50);
tester.reserve_exact(200);
- assert!(tester.capacity() >= 200);
+ assert_eq!(tester.capacity(), 200);
}
#[test]
@@ -319,6 +324,7 @@ fn test_contains() {
#[test]
fn test_rotate_left_right() {
let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.reserve(1);
assert_eq!(tester.len(), 10);
@@ -459,7 +465,7 @@ fn test_binary_search_key() {
}
#[test]
-fn make_contiguous_big_tail() {
+fn make_contiguous_big_head() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..3 {
@@ -474,14 +480,14 @@ fn make_contiguous_big_tail() {
assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
- let expected_start = tester.head;
+ let expected_start = tester.as_slices().1.len();
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
-fn make_contiguous_big_head() {
+fn make_contiguous_big_tail() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..8 {
@@ -495,44 +501,46 @@ fn make_contiguous_big_head() {
// 01234567......98
let expected_start = 0;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_small_free() {
- let mut tester = VecDeque::with_capacity(15);
+ let mut tester = VecDeque::with_capacity(16);
- for i in 'A' as u8..'I' as u8 {
+ for i in b'A'..b'I' {
tester.push_back(i as char);
}
- for i in 'I' as u8..'N' as u8 {
+ for i in b'I'..b'N' {
tester.push_front(i as char);
}
+ assert_eq!(tester, ['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']);
+
// ABCDEFGH...MLKJI
let expected_start = 0;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!(
(&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
tester.as_slices()
);
tester.clear();
- for i in 'I' as u8..'N' as u8 {
+ for i in b'I'..b'N' {
tester.push_back(i as char);
}
- for i in 'A' as u8..'I' as u8 {
+ for i in b'A'..b'I' {
tester.push_front(i as char);
}
// IJKLM...HGFEDCBA
- let expected_start = 0;
+ let expected_start = 3;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!(
(&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
tester.as_slices()
@@ -541,16 +549,55 @@ fn make_contiguous_small_free() {
#[test]
fn make_contiguous_head_to_end() {
- let mut dq = VecDeque::with_capacity(3);
- dq.push_front('B');
- dq.push_front('A');
- dq.push_back('C');
- dq.make_contiguous();
- let expected_tail = 0;
- let expected_head = 3;
- assert_eq!(expected_tail, dq.tail);
- assert_eq!(expected_head, dq.head);
- assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
+ let mut tester = VecDeque::with_capacity(16);
+
+ for i in b'A'..b'L' {
+ tester.push_back(i as char);
+ }
+
+ for i in b'L'..b'Q' {
+ tester.push_front(i as char);
+ }
+
+ assert_eq!(
+ tester,
+ ['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
+ );
+
+ // ABCDEFGHIJKPONML
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.head, expected_start);
+ assert_eq!(
+ (
+ &['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
+ as &[_],
+ &[] as &[_]
+ ),
+ tester.as_slices()
+ );
+
+ tester.clear();
+ for i in b'L'..b'Q' {
+ tester.push_back(i as char);
+ }
+
+ for i in b'A'..b'L' {
+ tester.push_front(i as char);
+ }
+
+ // LMNOPKJIHGFEDCBA
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.head, expected_start);
+ assert_eq!(
+ (
+ &['K', 'J', 'I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'L', 'M', 'N', 'O', 'P']
+ as &[_],
+ &[] as &[_]
+ ),
+ tester.as_slices()
+ );
}
#[test]
@@ -584,10 +631,10 @@ fn test_remove() {
for len in minlen..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
+ for head_pos in 0..cap {
for to_remove in 0..=len {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
@@ -598,8 +645,8 @@ fn test_remove() {
tester.push_back(1234);
}
tester.remove(to_remove);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -613,11 +660,11 @@ fn test_range() {
let cap = tester.capacity();
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..=cap {
- for tail in 0..=cap {
+ for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@@ -638,17 +685,17 @@ fn test_range_mut() {
let cap = tester.capacity();
for len in 0..=cap {
- for tail in 0..=cap {
+ for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let head_was = tester.head;
- let tail_was = tester.tail;
+ let len_was = tester.len;
// Check that we iterate over the correct values
let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
@@ -658,8 +705,8 @@ fn test_range_mut() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
- assert_eq!(tester.tail, tail_was);
assert_eq!(tester.head, head_was);
+ assert_eq!(tester.len, len_was);
}
}
}
@@ -672,11 +719,11 @@ fn test_drain() {
let cap = tester.capacity();
for len in 0..=cap {
- for tail in 0..=cap {
+ for head in 0..cap {
for drain_start in 0..=len {
for drain_end in drain_start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@@ -689,8 +736,8 @@ fn test_drain() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
@@ -717,17 +764,18 @@ fn test_shrink_to_fit() {
for len in 0..=cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..=max_cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..=max_cap {
+ tester.reserve(head_pos);
+ tester.head = head_pos;
+ tester.len = 0;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -754,17 +802,17 @@ fn test_split_off() {
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..cap {
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
- assert!(result.tail < result.cap());
- assert!(result.head < result.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
+ assert!(result.head <= result.capacity());
+ assert!(result.len <= result.capacity());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
@@ -781,16 +829,10 @@ fn test_from_vec() {
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
- assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
-
- let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
- let vd = VecDeque::from(vec.clone());
- assert!(vd.cap().is_power_of_two());
- assert_eq!(vd.len(), vec.len());
}
#[test]
@@ -842,10 +884,6 @@ fn test_extend_impl(trusted_len: bool) {
}
assert_eq!(self.test, self.expected);
- let (a1, b1) = self.test.as_slices();
- let (a2, b2) = self.expected.as_slices();
- assert_eq!(a1, a2);
- assert_eq!(b1, b2);
}
fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
@@ -868,7 +906,7 @@ fn test_extend_impl(trusted_len: bool) {
let mut tester = VecDequeTester::new(trusted_len);
// Initial capacity
- tester.test_extend(0..tester.remaining_capacity() - 1);
+ tester.test_extend(0..tester.remaining_capacity());
// Grow
tester.test_extend(1024..2048);
@@ -876,7 +914,7 @@ fn test_extend_impl(trusted_len: bool) {
// Wrap around
tester.drain(..128);
- tester.test_extend(0..tester.remaining_capacity() - 1);
+ tester.test_extend(0..tester.remaining_capacity());
// Continue
tester.drain(256..);
@@ -889,16 +927,6 @@ fn test_extend_impl(trusted_len: bool) {
}
#[test]
-#[should_panic = "capacity overflow"]
-fn test_from_vec_zst_overflow() {
- use crate::vec::Vec;
- let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
- let vd = VecDeque::from(vec.clone()); // no room for +1
- assert!(vd.cap().is_power_of_two());
- assert_eq!(vd.len(), vec.len());
-}
-
-#[test]
fn test_from_array() {
fn test<const N: usize>() {
let mut array: [usize; N] = [0; N];
@@ -913,7 +941,6 @@ fn test_from_array() {
assert_eq!(deq[i], i);
}
- assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), N);
}
test::<0>();
@@ -921,11 +948,6 @@ fn test_from_array() {
test::<2>();
test::<32>();
test::<35>();
-
- let array = [(); MAXIMUM_ZST_CAPACITY - 1];
- let deq = VecDeque::from(array);
- assert!(deq.cap().is_power_of_two());
- assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
}
#[test]
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index ce36b116f..96960d43f 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -82,6 +82,7 @@
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
@@ -124,6 +125,7 @@
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
+#![feature(iter_repeat_n)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
@@ -150,6 +152,7 @@
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
+#![cfg_attr(not(bootstrap), feature(tuple_trait))]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 006d813e5..38e31b180 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -293,6 +293,15 @@ struct RcBox<T: ?Sized> {
value: T,
}
+/// Calculate layout for `RcBox<T>` using the inner value's layout
+fn rcbox_layout_for_value_layout(layout: Layout) -> Layout {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ Layout::new::<RcBox<()>>().extend(layout).unwrap().0.pad_to_align()
+}
+
/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
/// Counted'.
///
@@ -1082,10 +1091,11 @@ impl<T: ?Sized> Rc<T> {
///
/// # Safety
///
- /// Any other `Rc` or [`Weak`] pointers to the same allocation must not be dereferenced
- /// for the duration of the returned borrow.
- /// This is trivially the case if no such pointers exist,
- /// for example immediately after `Rc::new`.
+ /// If any other `Rc` or [`Weak`] pointers to the same allocation exist, then
+ /// they must be must not be dereferenced or have active borrows for the duration
+ /// of the returned borrow, and their inner type must be exactly the same as the
+ /// inner type of this Rc (including lifetimes). This is trivially the case if no
+ /// such pointers exist, for example immediately after `Rc::new`.
///
/// # Examples
///
@@ -1100,6 +1110,38 @@ impl<T: ?Sized> Rc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
+ /// Other `Rc` pointers to the same allocation must be to the same type.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<str> = Rc::from("Hello, world!");
+ /// let mut y: Rc<[u8]> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type is str, not [u8]
+ /// Rc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
+ /// }
+ /// println!("{}", &*x); // Invalid UTF-8 in a str
+ /// ```
+ /// Other `Rc` pointers to the same allocation must be to the exact same type, including lifetimes.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<&str> = Rc::new("Hello, world!");
+ /// {
+ /// let s = String::from("Oh, no!");
+ /// let mut y: Rc<&str> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type
+ /// // is &'long str, not &'short str
+ /// *Rc::get_mut_unchecked(&mut y) = &s;
+ /// }
+ /// }
+ /// println!("{}", &*x); // Use-after-free
+ /// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@@ -1334,11 +1376,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> *mut RcBox<T> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = rcbox_layout_for_value_layout(value_layout);
unsafe {
Rc::try_allocate_for_layout(value_layout, allocate, mem_to_rcbox)
.unwrap_or_else(|_| handle_alloc_error(layout))
@@ -1357,11 +1395,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> Result<*mut RcBox<T>, AllocError> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = rcbox_layout_for_value_layout(value_layout);
// Allocate for the layout.
let ptr = allocate(layout)?;
@@ -1428,7 +1462,7 @@ impl<T> Rc<[T]> {
}
}
- /// Copy elements from slice into newly allocated Rc<\[T\]>
+ /// Copy elements from slice into newly allocated `Rc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`
#[cfg(not(no_global_oom_handling))]
@@ -1968,10 +2002,8 @@ impl<T> From<Vec<T>> for Rc<[T]> {
fn from(mut v: Vec<T>) -> Rc<[T]> {
unsafe {
let rc = Rc::copy_from_slice(&v);
-
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
-
rc
}
}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index a5e7bf2a1..1b61ede34 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -458,7 +458,7 @@ impl<T> [T] {
hack::into_vec(self)
}
- /// Creates a vector by repeating a slice `n` times.
+ /// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index c436adf70..7a8e6f088 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -362,8 +362,8 @@ use crate::vec::Vec;
/// [`Deref`]: core::ops::Deref "ops::Deref"
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
-#[cfg_attr(not(test), rustc_diagnostic_item = "String")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(all(not(bootstrap), not(test)), lang = "String")]
pub struct String {
vec: Vec<u8>,
}
@@ -949,7 +949,7 @@ impl String {
/// assert_eq!(string, "abcdecdeabecde");
/// ```
#[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "string_extend_from_within", issue = "none")]
+ #[unstable(feature = "string_extend_from_within", issue = "103806")]
pub fn extend_from_within<R>(&mut self, src: R)
where
R: RangeBounds<usize>,
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 81cd77074..f7dc4d109 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -333,6 +333,15 @@ struct ArcInner<T: ?Sized> {
data: T,
}
+/// Calculate layout for `ArcInner<T>` using the inner value's layout
+fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
+}
+
unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
@@ -1154,11 +1163,7 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = arcinner_layout_for_value_layout(value_layout);
unsafe {
Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
.unwrap_or_else(|_| handle_alloc_error(layout))
@@ -1176,11 +1181,7 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> Result<*mut ArcInner<T>, AllocError> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = arcinner_layout_for_value_layout(value_layout);
let ptr = allocate(layout)?;
@@ -1246,7 +1247,7 @@ impl<T> Arc<[T]> {
}
}
- /// Copy elements from slice into newly allocated Arc<\[T\]>
+ /// Copy elements from slice into newly allocated `Arc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`.
#[cfg(not(no_global_oom_handling))]
@@ -1586,10 +1587,11 @@ impl<T: ?Sized> Arc<T> {
///
/// # Safety
///
- /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
- /// for the duration of the returned borrow.
- /// This is trivially the case if no such pointers exist,
- /// for example immediately after `Arc::new`.
+ /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
+ /// they must be must not be dereferenced or have active borrows for the duration
+ /// of the returned borrow, and their inner type must be exactly the same as the
+ /// inner type of this Rc (including lifetimes). This is trivially the case if no
+ /// such pointers exist, for example immediately after `Arc::new`.
///
/// # Examples
///
@@ -1604,6 +1606,38 @@ impl<T: ?Sized> Arc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
+ /// Other `Arc` pointers to the same allocation must be to the same type.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<str> = Arc::from("Hello, world!");
+ /// let mut y: Arc<[u8]> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type is str, not [u8]
+ /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
+ /// }
+ /// println!("{}", &*x); // Invalid UTF-8 in a str
+ /// ```
+ /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<&str> = Arc::new("Hello, world!");
+ /// {
+ /// let s = String::from("Oh, no!");
+ /// let mut y: Arc<&str> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type
+ /// // is &'long str, not &'short str
+ /// *Arc::get_mut_unchecked(&mut y) = &s;
+ /// }
+ /// }
+ /// println!("{}", &*x); // Use-after-free
+ /// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@@ -2573,12 +2607,10 @@ impl<T> From<Vec<T>> for Arc<[T]> {
#[inline]
fn from(mut v: Vec<T>) -> Arc<[T]> {
unsafe {
- let arc = Arc::copy_from_slice(&v);
-
+ let rc = Arc::copy_from_slice(&v);
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
-
- arc
+ rc
}
}
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index bbbdc3aa2..ba34ab680 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -1070,7 +1070,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// Converts the vector into [`Box<[T]>`][owned slice].
///
- /// Note that this will drop any excess capacity.
+ /// If the vector has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
///
/// [owned slice]: Box
///
@@ -2163,7 +2164,7 @@ impl<T, A: Allocator> Vec<T, A> {
{
let len = self.len();
if new_len > len {
- self.extend_with(new_len - len, ExtendFunc(f));
+ self.extend_trusted(iter::repeat_with(f).take(new_len - len));
} else {
self.truncate(new_len);
}
@@ -2491,16 +2492,6 @@ impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
}
}
-struct ExtendFunc<F>(F);
-impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
- fn next(&mut self) -> T {
- (self.0)()
- }
- fn last(mut self) -> T {
- (self.0)()
- }
-}
-
impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` values, using the given generator.
@@ -2588,7 +2579,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
// SAFETY:
- // - caller guaratees that src is a valid index
+ // - caller guarantees that src is a valid index
let to_clone = unsafe { this.get_unchecked(src) };
iter::zip(to_clone, spare)
@@ -2607,7 +2598,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (init, spare) = self.split_at_spare_mut();
// SAFETY:
- // - caller guaratees that `src` is a valid index
+ // - caller guarantees that `src` is a valid index
let source = unsafe { init.get_unchecked(src) };
// SAFETY:
@@ -2780,7 +2771,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
/// assert_eq!(v_iter.next(), None);
/// ```
#[inline]
- fn into_iter(self) -> IntoIter<T, A> {
+ fn into_iter(self) -> Self::IntoIter {
unsafe {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
@@ -2808,7 +2799,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
- fn into_iter(self) -> slice::Iter<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
@@ -2818,7 +2809,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
- fn into_iter(self) -> slice::IterMut<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
@@ -2870,6 +2861,40 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
+ // specific extend for `TrustedLen` iterators, called both by the specializations
+ // and internal places where resolving specialization makes compilation slower
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let ptr = self.as_mut_ptr();
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr.add(local_len.current_len()), element);
+ // Since the loop executes user code which can panic we have to update
+ // the length every step to correctly drop what we've written.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
@@ -3199,6 +3224,14 @@ impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
/// ```
/// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
/// ```
+ ///
+ /// Any excess capacity is removed:
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
+ /// ```
fn from(v: Vec<T, A>) -> Self {
v.into_boxed_slice()
}
diff --git a/library/alloc/src/vec/set_len_on_drop.rs b/library/alloc/src/vec/set_len_on_drop.rs
index 8b66bc812..6ce5a3a9f 100644
--- a/library/alloc/src/vec/set_len_on_drop.rs
+++ b/library/alloc/src/vec/set_len_on_drop.rs
@@ -18,6 +18,11 @@ impl<'a> SetLenOnDrop<'a> {
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
+
+ #[inline]
+ pub(super) fn current_len(&self) -> usize {
+ self.local_len
+ }
}
impl Drop for SetLenOnDrop<'_> {
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 1ea9c827a..56065ce56 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -1,9 +1,8 @@
use crate::alloc::Allocator;
use core::iter::TrustedLen;
-use core::ptr::{self};
use core::slice::{self};
-use super::{IntoIter, SetLenOnDrop, Vec};
+use super::{IntoIter, Vec};
// Specialization trait used for Vec::extend
pub(super) trait SpecExtend<T, I> {
@@ -24,36 +23,7 @@ where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iterator: I) {
- // This is the case for a TrustedLen iterator.
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.reserve(additional);
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr, element);
- ptr = ptr.add(1);
- // Since the loop executes user code which can panic we have to bump the pointer
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- } else {
- // Per TrustedLen contract a `None` upper bound means that the iterator length
- // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
- // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
- // This avoids additional codegen for a fallback code path which would eventually
- // panic anyway.
- panic!("capacity overflow");
- }
+ self.extend_trusted(iterator)
}
}
diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs
index 9e5123be9..af49826ff 100644
--- a/library/alloc/tests/boxed.rs
+++ b/library/alloc/tests/boxed.rs
@@ -102,8 +102,18 @@ unsafe impl const Allocator for ConstAllocator {
let new_ptr = self.allocate(new_layout)?;
if new_layout.size() > 0 {
- new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
- self.deallocate(ptr, old_layout);
+ // Safety: `new_ptr` is valid for writes and `ptr` for reads of
+ // `old_layout.size()`, because `new_layout.size() >=
+ // old_layout.size()` (which is an invariant that must be upheld by
+ // callers).
+ unsafe {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
+ }
+ // Safety: `ptr` is never used again is also an invariant which must
+ // be upheld by callers.
+ unsafe {
+ self.deallocate(ptr, old_layout);
+ }
}
Ok(new_ptr)
}
@@ -114,12 +124,21 @@ unsafe impl const Allocator for ConstAllocator {
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
- let new_ptr = self.grow(ptr, old_layout, new_layout)?;
+ // Safety: Invariants of `grow_zeroed` and `grow` are the same, and must
+ // be enforced by callers.
+ let new_ptr = unsafe { self.grow(ptr, old_layout, new_layout)? };
if new_layout.size() > 0 {
let old_size = old_layout.size();
let new_size = new_layout.size();
let raw_ptr = new_ptr.as_mut_ptr();
- raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ // Safety:
+ // - `grow` returned Ok, so the returned pointer must be valid for
+ // `new_size` bytes
+ // - `new_size` must be larger than `old_size`, which is an
+ // invariant which must be upheld by callers.
+ unsafe {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
}
Ok(new_ptr)
}
@@ -137,8 +156,18 @@ unsafe impl const Allocator for ConstAllocator {
let new_ptr = self.allocate(new_layout)?;
if new_layout.size() > 0 {
- new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), new_layout.size());
- self.deallocate(ptr, old_layout);
+ // Safety: `new_ptr` and `ptr` are valid for reads/writes of
+ // `new_layout.size()` because of the invariants of shrink, which
+ // include `new_layout.size()` being smaller than (or equal to)
+ // `old_layout.size()`.
+ unsafe {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), new_layout.size());
+ }
+ // Safety: `ptr` is never used again is also an invariant which must
+ // be upheld by callers.
+ unsafe {
+ self.deallocate(ptr, old_layout);
+ }
}
Ok(new_ptr)
}
diff --git a/library/alloc/tests/fmt.rs b/library/alloc/tests/fmt.rs
index 5ee6db43f..04da95bbb 100644
--- a/library/alloc/tests/fmt.rs
+++ b/library/alloc/tests/fmt.rs
@@ -2,6 +2,7 @@
use std::cell::RefCell;
use std::fmt::{self, Write};
+use std::ptr;
#[test]
fn test_format() {
@@ -76,14 +77,14 @@ fn test_format_macro_interface() {
t!(format!("{}", "foo"), "foo");
t!(format!("{}", "foo".to_string()), "foo");
if cfg!(target_pointer_width = "32") {
- t!(format!("{:#p}", 0x1234 as *const isize), "0x00001234");
- t!(format!("{:#p}", 0x1234 as *mut isize), "0x00001234");
+ t!(format!("{:#p}", ptr::invalid::<isize>(0x1234)), "0x00001234");
+ t!(format!("{:#p}", ptr::invalid_mut::<isize>(0x1234)), "0x00001234");
} else {
- t!(format!("{:#p}", 0x1234 as *const isize), "0x0000000000001234");
- t!(format!("{:#p}", 0x1234 as *mut isize), "0x0000000000001234");
+ t!(format!("{:#p}", ptr::invalid::<isize>(0x1234)), "0x0000000000001234");
+ t!(format!("{:#p}", ptr::invalid_mut::<isize>(0x1234)), "0x0000000000001234");
}
- t!(format!("{:p}", 0x1234 as *const isize), "0x1234");
- t!(format!("{:p}", 0x1234 as *mut isize), "0x1234");
+ t!(format!("{:p}", ptr::invalid::<isize>(0x1234)), "0x1234");
+ t!(format!("{:p}", ptr::invalid_mut::<isize>(0x1234)), "0x1234");
t!(format!("{A:x}"), "aloha");
t!(format!("{B:X}"), "adios");
t!(format!("foo {} ☃☃☃☃☃☃", "bar"), "foo bar ☃☃☃☃☃☃");
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index ffc5ca7a5..d6d2b055b 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -47,6 +47,8 @@
#![feature(strict_provenance)]
#![feature(once_cell)]
#![feature(drain_keep_rest)]
+#![deny(fuzzy_provenance_casts)]
+#![deny(unsafe_op_in_unsafe_fn)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index e30329aa1..4d182be02 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1590,11 +1590,27 @@ fn test_bool_from_str() {
assert_eq!("not even a boolean".parse::<bool>().ok(), None);
}
-fn check_contains_all_substrings(s: &str) {
- assert!(s.contains(""));
- for i in 0..s.len() {
- for j in i + 1..=s.len() {
- assert!(s.contains(&s[i..j]));
+fn check_contains_all_substrings(haystack: &str) {
+ let mut modified_needle = String::new();
+
+ for i in 0..haystack.len() {
+ // check different haystack lengths since we special-case short haystacks.
+ let haystack = &haystack[0..i];
+ assert!(haystack.contains(""));
+ for j in 0..haystack.len() {
+ for k in j + 1..=haystack.len() {
+ let needle = &haystack[j..k];
+ assert!(haystack.contains(needle));
+ modified_needle.clear();
+ modified_needle.push_str(needle);
+ modified_needle.replace_range(0..1, "\0");
+ assert!(!haystack.contains(&modified_needle));
+
+ modified_needle.clear();
+ modified_needle.push_str(needle);
+ modified_needle.replace_range(needle.len() - 1..needle.len(), "\0");
+ assert!(!haystack.contains(&modified_needle));
+ }
}
}
}
@@ -1616,6 +1632,18 @@ fn strslice_issue_16878() {
}
#[test]
+fn strslice_issue_104726() {
+ // Edge-case in the simd_contains impl.
+ // The first and last byte are the same so it backtracks by one byte
+ // which aligns with the end of the string. Previously incorrect offset calculations
+ // lead to out-of-bounds slicing.
+ #[rustfmt::skip]
+ let needle = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaba";
+ let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab";
+ assert!(!haystack.contains(needle));
+}
+
+#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_strslice_contains() {
let x = "There are moments, Jeeves, when one asks oneself, 'Do trousers matter?'";
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index e02711870..7ebed0d5c 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -1089,7 +1089,8 @@ fn test_into_iter_drop_allocator() {
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
- System.deallocate(ptr, layout)
+ // Safety: Invariants passed to caller.
+ unsafe { System.deallocate(ptr, layout) }
}
}
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index 019d73c0b..d04de5a07 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -465,7 +465,6 @@ fn test_drain() {
for i in 6..9 {
d.push_front(i);
}
-
assert_eq!(d.drain(..).collect::<Vec<_>>(), [8, 7, 6, 0, 1, 2, 3, 4]);
assert!(d.is_empty());
}
@@ -1142,7 +1141,7 @@ fn test_reserve_exact_2() {
v.push_back(16);
v.reserve_exact(16);
- assert!(v.capacity() >= 48)
+ assert!(v.capacity() >= 33)
}
#[test]
@@ -1157,7 +1156,7 @@ fn test_try_reserve() {
// * overflow may trigger when adding `len` to `cap` (in number of elements)
// * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
- const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
{
@@ -1248,7 +1247,7 @@ fn test_try_reserve_exact() {
// This is exactly the same as test_try_reserve with the method changed.
// See that test for comments.
- const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
{
@@ -1391,7 +1390,8 @@ fn test_rotate_nop() {
#[test]
fn test_rotate_left_parts() {
- let mut v: VecDeque<_> = (1..=7).collect();
+ let mut v: VecDeque<_> = VecDeque::with_capacity(8);
+ v.extend(1..=7);
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..]));
v.rotate_left(2);
@@ -1410,7 +1410,8 @@ fn test_rotate_left_parts() {
#[test]
fn test_rotate_right_parts() {
- let mut v: VecDeque<_> = (1..=7).collect();
+ let mut v: VecDeque<_> = VecDeque::with_capacity(8);
+ v.extend(1..=7);
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..]));
v.rotate_right(2);
@@ -1727,3 +1728,11 @@ fn test_from_zero_sized_vec() {
let queue = VecDeque::from(v);
assert_eq!(queue.len(), 100);
}
+
+#[test]
+fn test_resize_keeps_reserved_space_from_item() {
+ let v = Vec::<i32>::with_capacity(1234);
+ let mut d = VecDeque::new();
+ d.resize(1, v);
+ assert_eq!(d[0].capacity(), 1234);
+}