summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
commit5363f350887b1e5b5dd21a86f88c8af9d7fea6da (patch)
tree35ca005eb6e0e9a1ba3bb5dbc033209ad445dc17 /library
parentAdding debian version 1.66.0+dfsg1-1. (diff)
downloadrustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.tar.xz
rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library')
-rw-r--r--library/alloc/benches/lib.rs2
-rw-r--r--library/alloc/benches/str.rs65
-rw-r--r--library/alloc/src/alloc.rs23
-rw-r--r--library/alloc/src/alloc/tests.rs1
-rw-r--r--library/alloc/src/boxed.rs33
-rw-r--r--library/alloc/src/collections/btree/map.rs4
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs1
-rw-r--r--library/alloc/src/collections/mod.rs2
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs193
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs178
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs149
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs1283
-rw-r--r--library/alloc/src/collections/vec_deque/pair_slices.rs67
-rw-r--r--library/alloc/src/collections/vec_deque/ring_slices.rs56
-rw-r--r--library/alloc/src/collections/vec_deque/spec_extend.rs81
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs250
-rw-r--r--library/alloc/src/lib.rs3
-rw-r--r--library/alloc/src/rc.rs66
-rw-r--r--library/alloc/src/slice.rs2
-rw-r--r--library/alloc/src/string.rs4
-rw-r--r--library/alloc/src/sync.rs70
-rw-r--r--library/alloc/src/vec/mod.rs67
-rw-r--r--library/alloc/src/vec/set_len_on_drop.rs5
-rw-r--r--library/alloc/src/vec/spec_extend.rs34
-rw-r--r--library/alloc/tests/boxed.rs41
-rw-r--r--library/alloc/tests/fmt.rs13
-rw-r--r--library/alloc/tests/lib.rs2
-rw-r--r--library/alloc/tests/str.rs38
-rw-r--r--library/alloc/tests/vec.rs3
-rw-r--r--library/alloc/tests/vec_deque.rs21
-rw-r--r--library/core/benches/iter.rs23
-rw-r--r--library/core/benches/lib.rs2
-rw-r--r--library/core/src/alloc/global.rs8
-rw-r--r--library/core/src/alloc/layout.rs60
-rw-r--r--library/core/src/alloc/mod.rs10
-rw-r--r--library/core/src/arch.rs30
-rw-r--r--library/core/src/array/mod.rs88
-rw-r--r--library/core/src/async_iter/async_iter.rs2
-rw-r--r--library/core/src/cell.rs6
-rw-r--r--library/core/src/cell/lazy.rs4
-rw-r--r--library/core/src/cell/once.rs10
-rw-r--r--library/core/src/char/convert.rs1
-rw-r--r--library/core/src/char/methods.rs8
-rw-r--r--library/core/src/char/mod.rs6
-rw-r--r--library/core/src/clone.rs9
-rw-r--r--library/core/src/cmp.rs11
-rw-r--r--library/core/src/const_closure.rs30
-rw-r--r--library/core/src/convert/mod.rs8
-rw-r--r--library/core/src/convert/num.rs2
-rw-r--r--library/core/src/default.rs2
-rw-r--r--library/core/src/error.md2
-rw-r--r--library/core/src/error.rs5
-rw-r--r--library/core/src/ffi/c_str.rs7
-rw-r--r--library/core/src/fmt/mod.rs3
-rw-r--r--library/core/src/future/mod.rs28
-rw-r--r--library/core/src/hash/mod.rs83
-rw-r--r--library/core/src/hash/sip.rs36
-rw-r--r--library/core/src/hint.rs3
-rw-r--r--library/core/src/intrinsics.rs146
-rw-r--r--library/core/src/intrinsics/mir.rs289
-rw-r--r--library/core/src/iter/adapters/array_chunks.rs75
-rw-r--r--library/core/src/iter/adapters/take.rs21
-rw-r--r--library/core/src/iter/mod.rs2
-rw-r--r--library/core/src/iter/sources.rs4
-rw-r--r--library/core/src/iter/sources/repeat_n.rs195
-rw-r--r--library/core/src/iter/sources/repeat_with.rs17
-rw-r--r--library/core/src/iter/traits/iterator.rs2
-rw-r--r--library/core/src/lib.rs45
-rw-r--r--library/core/src/macros/mod.rs47
-rw-r--r--library/core/src/marker.rs14
-rw-r--r--library/core/src/mem/maybe_uninit.rs4
-rw-r--r--library/core/src/mem/mod.rs73
-rw-r--r--library/core/src/num/flt2dec/strategy/dragon.rs2
-rw-r--r--library/core/src/num/int_macros.rs63
-rw-r--r--library/core/src/num/mod.rs5
-rw-r--r--library/core/src/num/nonzero.rs39
-rw-r--r--library/core/src/num/uint_macros.rs49
-rw-r--r--library/core/src/ops/control_flow.rs4
-rw-r--r--library/core/src/ops/deref.rs2
-rw-r--r--library/core/src/ops/function.rs332
-rw-r--r--library/core/src/ops/index.rs4
-rw-r--r--library/core/src/option.rs68
-rw-r--r--library/core/src/panic.rs1
-rw-r--r--library/core/src/panicking.rs31
-rw-r--r--library/core/src/pin.rs59
-rw-r--r--library/core/src/prelude/v1.rs15
-rw-r--r--library/core/src/primitive_docs.rs2
-rw-r--r--library/core/src/ptr/alignment.rs24
-rw-r--r--library/core/src/ptr/const_ptr.rs338
-rw-r--r--library/core/src/ptr/metadata.rs1
-rw-r--r--library/core/src/ptr/mod.rs111
-rw-r--r--library/core/src/ptr/mut_ptr.rs350
-rw-r--r--library/core/src/ptr/non_null.rs8
-rw-r--r--library/core/src/slice/index.rs21
-rw-r--r--library/core/src/slice/iter.rs32
-rw-r--r--library/core/src/slice/mod.rs173
-rw-r--r--library/core/src/str/converts.rs2
-rw-r--r--library/core/src/str/mod.rs16
-rw-r--r--library/core/src/str/pattern.rs234
-rw-r--r--library/core/src/task/poll.rs1
-rw-r--r--library/core/src/tuple.rs11
-rw-r--r--library/core/tests/any.rs18
-rw-r--r--library/core/tests/fmt/float.rs124
-rw-r--r--library/core/tests/hash/mod.rs38
-rw-r--r--library/core/tests/hash/sip.rs15
-rw-r--r--library/core/tests/iter/adapters/array_chunks.rs3
-rw-r--r--library/core/tests/iter/adapters/take.rs20
-rw-r--r--library/core/tests/iter/sources.rs49
-rw-r--r--library/core/tests/lib.rs10
-rw-r--r--library/core/tests/mem.rs20
-rw-r--r--library/core/tests/num/flt2dec/mod.rs4
-rw-r--r--library/core/tests/option.rs2
-rw-r--r--library/core/tests/ptr.rs291
-rw-r--r--library/core/tests/slice.rs60
-rw-r--r--library/panic_abort/Cargo.toml2
-rw-r--r--library/panic_unwind/Cargo.toml2
-rw-r--r--library/portable-simd/crates/core_simd/src/intrinsics.rs2
-rw-r--r--library/portable-simd/crates/core_simd/src/ops.rs2
-rw-r--r--library/std/Cargo.toml10
-rw-r--r--library/std/src/collections/hash/map.rs9
-rw-r--r--library/std/src/f32.rs14
-rw-r--r--library/std/src/f32/tests.rs8
-rw-r--r--library/std/src/f64.rs14
-rw-r--r--library/std/src/f64/tests.rs8
-rw-r--r--library/std/src/fs.rs7
-rw-r--r--library/std/src/lib.rs5
-rw-r--r--library/std/src/net/ip_addr.rs2
-rw-r--r--library/std/src/net/mod.rs2
-rw-r--r--library/std/src/os/android/net.rs9
-rw-r--r--library/std/src/os/linux/net.rs9
-rw-r--r--library/std/src/os/net/linux_ext/addr.rs64
-rw-r--r--library/std/src/os/net/linux_ext/mod.rs12
-rw-r--r--library/std/src/os/net/linux_ext/tcp.rs (renamed from library/std/src/os/net/tcp.rs)0
-rw-r--r--library/std/src/os/net/linux_ext/tests.rs (renamed from library/std/src/os/net/tests.rs)3
-rw-r--r--library/std/src/os/net/mod.rs9
-rw-r--r--library/std/src/os/unix/net/addr.rs93
-rw-r--r--library/std/src/os/unix/net/tests.rs36
-rw-r--r--library/std/src/os/wasi/io/mod.rs4
-rw-r--r--library/std/src/os/windows/io/socket.rs1
-rw-r--r--library/std/src/panicking.rs4
-rw-r--r--library/std/src/path.rs59
-rw-r--r--library/std/src/personality/dwarf/eh.rs7
-rw-r--r--library/std/src/personality/gcc.rs2
-rw-r--r--library/std/src/prelude/v1.rs16
-rw-r--r--library/std/src/primitive_docs.rs2
-rw-r--r--library/std/src/sync/condvar.rs2
-rw-r--r--library/std/src/sync/lazy_lock.rs4
-rw-r--r--library/std/src/sync/mod.rs1
-rw-r--r--library/std/src/sync/mpmc/array.rs513
-rw-r--r--library/std/src/sync/mpmc/context.rs155
-rw-r--r--library/std/src/sync/mpmc/counter.rs137
-rw-r--r--library/std/src/sync/mpmc/error.rs46
-rw-r--r--library/std/src/sync/mpmc/list.rs638
-rw-r--r--library/std/src/sync/mpmc/mod.rs430
-rw-r--r--library/std/src/sync/mpmc/select.rs71
-rw-r--r--library/std/src/sync/mpmc/utils.rs143
-rw-r--r--library/std/src/sync/mpmc/waker.rs204
-rw-r--r--library/std/src/sync/mpmc/zero.rs318
-rw-r--r--library/std/src/sync/mpsc/blocking.rs82
-rw-r--r--library/std/src/sync/mpsc/cache_aligned.rs25
-rw-r--r--library/std/src/sync/mpsc/mod.rs473
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue.rs117
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs47
-rw-r--r--library/std/src/sync/mpsc/oneshot.rs315
-rw-r--r--library/std/src/sync/mpsc/shared.rs501
-rw-r--r--library/std/src/sync/mpsc/spsc_queue.rs236
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs102
-rw-r--r--library/std/src/sync/mpsc/stream.rs457
-rw-r--r--library/std/src/sync/mpsc/sync.rs495
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs8
-rw-r--r--library/std/src/sync/mpsc/tests.rs15
-rw-r--r--library/std/src/sync/mutex.rs16
-rw-r--r--library/std/src/sync/once_lock.rs6
-rw-r--r--library/std/src/sync/rwlock.rs12
-rw-r--r--library/std/src/sys/common/alloc.rs12
-rw-r--r--library/std/src/sys/hermit/fs.rs4
-rw-r--r--library/std/src/sys/hermit/mod.rs6
-rw-r--r--library/std/src/sys/hermit/thread.rs3
-rw-r--r--library/std/src/sys/itron/condvar.rs13
-rw-r--r--library/std/src/sys/itron/mutex.rs8
-rw-r--r--library/std/src/sys/itron/thread.rs48
-rw-r--r--library/std/src/sys/sgx/condvar.rs29
-rw-r--r--library/std/src/sys/sgx/mod.rs1
-rw-r--r--library/std/src/sys/sgx/mutex.rs24
-rw-r--r--library/std/src/sys/sgx/rwlock.rs78
-rw-r--r--library/std/src/sys/sgx/rwlock/tests.rs16
-rw-r--r--library/std/src/sys/solid/io.rs4
-rw-r--r--library/std/src/sys/solid/os.rs3
-rw-r--r--library/std/src/sys/solid/rwlock.rs10
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs18
-rw-r--r--library/std/src/sys/unix/locks/futex_condvar.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs10
-rw-r--r--library/std/src/sys/unix/locks/mod.rs18
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs179
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs134
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs148
-rw-r--r--library/std/src/sys/unix/time.rs25
-rw-r--r--library/std/src/sys/unix/weak.rs37
-rw-r--r--library/std/src/sys/unsupported/locks/condvar.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/mod.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/mutex.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/rwlock.rs10
-rw-r--r--library/std/src/sys/wasi/net.rs10
-rw-r--r--library/std/src/sys/wasm/mod.rs6
-rw-r--r--library/std/src/sys/windows/args.rs54
-rw-r--r--library/std/src/sys/windows/c.rs59
-rw-r--r--library/std/src/sys/windows/locks/condvar.rs10
-rw-r--r--library/std/src/sys/windows/locks/mod.rs6
-rw-r--r--library/std/src/sys/windows/locks/mutex.rs13
-rw-r--r--library/std/src/sys/windows/locks/rwlock.rs18
-rw-r--r--library/std/src/sys/windows/mod.rs4
-rw-r--r--library/std/src/sys/windows/pipe.rs9
-rw-r--r--library/std/src/sys/windows/process.rs34
-rw-r--r--library/std/src/sys/windows/stdio_uwp.rs87
-rw-r--r--library/std/src/sys_common/condvar.rs57
-rw-r--r--library/std/src/sys_common/condvar/check.rs58
-rw-r--r--library/std/src/sys_common/mod.rs4
-rw-r--r--library/std/src/sys_common/mutex.rs50
-rw-r--r--library/std/src/sys_common/once/generic.rs1
-rw-r--r--library/std/src/sys_common/remutex.rs10
-rw-r--r--library/std/src/sys_common/rwlock.rs71
-rw-r--r--library/std/src/sys_common/wstr.rs59
-rw-r--r--library/std/src/thread/local/tests.rs66
-rw-r--r--library/std/src/thread/mod.rs5
-rw-r--r--library/std/src/thread/scoped.rs2
-rw-r--r--library/std/src/time/tests.rs8
-rw-r--r--library/test/Cargo.toml2
-rw-r--r--library/test/src/cli.rs5
-rw-r--r--library/test/src/console.rs8
-rw-r--r--library/test/src/event.rs2
-rw-r--r--library/test/src/lib.rs168
-rw-r--r--library/test/src/options.rs7
-rw-r--r--library/test/src/tests.rs27
-rw-r--r--library/unwind/Cargo.toml4
-rw-r--r--library/unwind/build.rs24
-rw-r--r--library/unwind/src/lib.rs25
-rw-r--r--library/unwind/src/libunwind.rs17
238 files changed, 8713 insertions, 5961 deletions
diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs
index d418965cd..b25d63d83 100644
--- a/library/alloc/benches/lib.rs
+++ b/library/alloc/benches/lib.rs
@@ -5,7 +5,9 @@
#![feature(iter_next_chunk)]
#![feature(repr_simd)]
#![feature(slice_partition_dedup)]
+#![feature(strict_provenance)]
#![feature(test)]
+#![deny(fuzzy_provenance_casts)]
extern crate test;
diff --git a/library/alloc/benches/str.rs b/library/alloc/benches/str.rs
index 391475bc0..54af389de 100644
--- a/library/alloc/benches/str.rs
+++ b/library/alloc/benches/str.rs
@@ -1,3 +1,4 @@
+use core::iter::Iterator;
use test::{black_box, Bencher};
#[bench]
@@ -122,14 +123,13 @@ fn bench_contains_short_short(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "sit";
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(haystack.contains(needle));
+ assert!(black_box(haystack).contains(black_box(needle)));
})
}
-#[bench]
-fn bench_contains_short_long(b: &mut Bencher) {
- let haystack = "\
+static LONG_HAYSTACK: &str = "\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
@@ -164,10 +164,48 @@ feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, i
vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
malesuada sollicitudin quam eu fermentum.";
+
+#[bench]
+fn bench_contains_2b_repeated_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "::";
+
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_short_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
let needle = "english";
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_16b_in_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "english language";
+
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_32b_in_long(b: &mut Bencher) {
+ let haystack = LONG_HAYSTACK;
+ let needle = "the english language sample text";
+
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(!haystack.contains(needle));
+ assert!(!black_box(haystack).contains(black_box(needle)));
})
}
@@ -176,8 +214,20 @@ fn bench_contains_bad_naive(b: &mut Bencher) {
let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let needle = "aaaaaaaab";
+ b.bytes = haystack.len() as u64;
+ b.iter(|| {
+ assert!(!black_box(haystack).contains(black_box(needle)));
+ })
+}
+
+#[bench]
+fn bench_contains_bad_simd(b: &mut Bencher) {
+ let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ let needle = "aaabaaaa";
+
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(!haystack.contains(needle));
+ assert!(!black_box(haystack).contains(black_box(needle)));
})
}
@@ -186,8 +236,9 @@ fn bench_contains_equal(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
+ b.bytes = haystack.len() as u64;
b.iter(|| {
- assert!(haystack.contains(needle));
+ assert!(black_box(haystack).contains(black_box(needle)));
})
}
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 8187517cc..e5fbfc557 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -28,20 +28,16 @@ extern "Rust" {
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_deallocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_reallocator]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_zeroed]
- #[cfg_attr(not(bootstrap), rustc_nounwind)]
- #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+ #[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
@@ -402,19 +398,18 @@ pub use std::alloc::handle_alloc_error;
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
- use crate::alloc::Layout;
-
- // called via generated `__rust_alloc_error_handler`
-
- // if there is no `#[alloc_error_handler]`
+ // called via generated `__rust_alloc_error_handler` if there is no
+ // `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
panic!("memory allocation of {size} bytes failed")
}
- // if there is an `#[alloc_error_handler]`
+ #[cfg(bootstrap)]
#[rustc_std_internal_symbol]
pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
+ use crate::alloc::Layout;
+
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
#[lang = "oom"]
diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs
index b2f019459..1a5938fd3 100644
--- a/library/alloc/src/alloc/tests.rs
+++ b/library/alloc/src/alloc/tests.rs
@@ -22,7 +22,6 @@ fn allocate_zeroed() {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = Box::new(10);
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index d6681a317..e5f6b0c0c 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -158,6 +158,8 @@ use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
+#[cfg(not(bootstrap))]
+use core::marker::Tuple;
use core::marker::{Destruct, Unpin, Unsize};
use core::mem;
use core::ops::{
@@ -185,7 +187,7 @@ pub use thin::ThinBox;
mod thin;
-/// A pointer type for heap allocation.
+/// A pointer type that uniquely owns a heap allocation of type `T`.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
@@ -1979,6 +1981,7 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
@@ -1988,6 +1991,17 @@ impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+ type Output = <F as FnOnce<Args>>::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ <F as FnOnce<Args>>::call_once(*self, args)
+ }
+}
+
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
@@ -1995,6 +2009,15 @@ impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ <F as FnMut<Args>>::call_mut(self, args)
+ }
+}
+
+#[cfg(bootstrap)]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
@@ -2002,6 +2025,14 @@ impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output {
+ <F as Fn<Args>>::call(self, args)
+ }
+}
+
#[unstable(feature = "coerce_unsized", issue = "27732")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 8a7719347..1d9c4460e 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -46,8 +46,8 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// is done is *very* inefficient for modern computer architectures. In particular, every element
/// is stored in its own individually heap-allocated node. This means that every single insertion
/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
-/// are both notably expensive things to do in practice, we are forced to at very least reconsider
-/// the BST strategy.
+/// are both notably expensive things to do in practice, we are forced to, at the very least,
+/// reconsider the BST strategy.
///
/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
index aadb0dc9c..64bce0ff8 100644
--- a/library/alloc/src/collections/btree/node/tests.rs
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -94,6 +94,7 @@ fn test_partial_eq() {
#[test]
#[cfg(target_arch = "x86_64")]
+#[cfg_attr(miri, ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 161a37573..3e0b0f735 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -139,7 +139,7 @@ impl Display for TryReserveError {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
- " because the memory allocator returned a error"
+ " because the memory allocator returned an error"
}
};
fmt.write_str(reason)
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 41baa7102..89feb361d 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -1,12 +1,12 @@
-use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
-use core::mem::{self, MaybeUninit};
-use core::ptr::{self, NonNull};
+use core::mem::{self, SizedTypeProperties};
+use core::ptr::NonNull;
+use core::{fmt, ptr};
use crate::alloc::{Allocator, Global};
-use super::{count, wrap_index, VecDeque};
+use super::VecDeque;
/// A draining iterator over the elements of a `VecDeque`.
///
@@ -20,26 +20,70 @@ pub struct Drain<
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
- after_tail: usize,
- after_head: usize,
- ring: NonNull<[T]>,
- tail: usize,
- head: usize,
+ // We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
+ // and we want it to be covariant instead
deque: NonNull<VecDeque<T, A>>,
- _phantom: PhantomData<&'a T>,
+ // drain_start is stored in deque.len
+ drain_len: usize,
+ // index into the logical array, not the physical one (always lies in [0..deque.len))
+ idx: usize,
+ // number of elements after the drain range
+ tail_len: usize,
+ remaining: usize,
+ // Needed to make Drain covariant over T
+ _marker: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
- after_tail: usize,
- after_head: usize,
- ring: &'a [MaybeUninit<T>],
- tail: usize,
- head: usize,
- deque: NonNull<VecDeque<T, A>>,
+ deque: &'a mut VecDeque<T, A>,
+ drain_start: usize,
+ drain_len: usize,
) -> Self {
- let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) };
- Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData }
+ let orig_len = mem::replace(&mut deque.len, drain_start);
+ let tail_len = orig_len - drain_start - drain_len;
+ Drain {
+ deque: NonNull::from(deque),
+ drain_len,
+ idx: drain_start,
+ tail_len,
+ remaining: drain_len,
+ _marker: PhantomData,
+ }
+ }
+
+ // Only returns pointers to the slices, as that's
+ // all we need to drop them. May only be called if `self.remaining != 0`.
+ unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
+ unsafe {
+ let deque = self.deque.as_ref();
+ // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
+ // Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
+ // just `drain_start`, so the range check would (almost) always panic. Between temporarily
+ // adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
+ // implementation, this seemed like the less hacky solution, though it might be good to
+ // find a better one in the future.
+
+ // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
+ // logical index.
+ let wrapped_start = deque.to_physical_idx(self.idx);
+
+ let head_len = deque.capacity() - wrapped_start;
+
+ let (a_range, b_range) = if head_len >= self.remaining {
+ (wrapped_start..wrapped_start + self.remaining, 0..0)
+ } else {
+ let tail_len = self.remaining - head_len;
+ (wrapped_start..deque.capacity(), 0..tail_len)
+ };
+
+ // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
+ // the range `0..deque.original_len`. because of this, and because of the fact
+ // that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
+ // it's guaranteed that `a_range` and `b_range` represent valid ranges into
+ // the deques buffer.
+ (deque.buffer_range(a_range), deque.buffer_range(b_range))
+ }
}
}
@@ -47,11 +91,10 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain")
- .field(&self.after_tail)
- .field(&self.after_head)
- .field(&self.ring)
- .field(&self.tail)
- .field(&self.head)
+ .field(&self.drain_len)
+ .field(&self.idx)
+ .field(&self.tail_len)
+ .field(&self.remaining)
.finish()
}
}
@@ -68,57 +111,81 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
- self.0.for_each(drop);
+ if self.0.remaining != 0 {
+ unsafe {
+ // SAFETY: We just checked that `self.remaining != 0`.
+ let (front, back) = self.0.as_slices();
+ ptr::drop_in_place(front);
+ ptr::drop_in_place(back);
+ }
+ }
let source_deque = unsafe { self.0.deque.as_mut() };
- // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
- //
- // T t h H
- // [. . . o o x x o o . . .]
- //
- let orig_tail = source_deque.tail;
- let drain_tail = source_deque.head;
- let drain_head = self.0.after_tail;
- let orig_head = self.0.after_head;
+ let drain_start = source_deque.len();
+ let drain_len = self.0.drain_len;
+ let drain_end = drain_start + drain_len;
+
+ let orig_len = self.0.tail_len + drain_end;
- let tail_len = count(orig_tail, drain_tail, source_deque.cap());
- let head_len = count(drain_head, orig_head, source_deque.cap());
+ if T::IS_ZST {
+ // no need to copy around any memory if T is a ZST
+ source_deque.len = orig_len - drain_len;
+ return;
+ }
- // Restore the original head value
- source_deque.head = orig_head;
+ let head_len = drain_start;
+ let tail_len = self.0.tail_len;
- match (tail_len, head_len) {
+ match (head_len, tail_len) {
(0, 0) => {
source_deque.head = 0;
- source_deque.tail = 0;
+ source_deque.len = 0;
}
(0, _) => {
- source_deque.tail = drain_head;
+ source_deque.head = source_deque.to_physical_idx(drain_len);
+ source_deque.len = orig_len - drain_len;
}
(_, 0) => {
- source_deque.head = drain_tail;
+ source_deque.len = orig_len - drain_len;
}
_ => unsafe {
- if tail_len <= head_len {
- source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
- source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
+ if head_len <= tail_len {
+ source_deque.wrap_copy(
+ source_deque.head,
+ source_deque.to_physical_idx(drain_len),
+ head_len,
+ );
+ source_deque.head = source_deque.to_physical_idx(drain_len);
+ source_deque.len = orig_len - drain_len;
} else {
- source_deque.head = source_deque.wrap_add(drain_tail, head_len);
- source_deque.wrap_copy(drain_tail, drain_head, head_len);
+ source_deque.wrap_copy(
+ source_deque.to_physical_idx(head_len + drain_len),
+ source_deque.to_physical_idx(head_len),
+ tail_len,
+ );
+ source_deque.len = orig_len - drain_len;
}
},
}
}
}
- while let Some(item) = self.next() {
- let guard = DropGuard(self);
- drop(item);
- mem::forget(guard);
+ let guard = DropGuard(self);
+ if guard.0.remaining != 0 {
+ unsafe {
+ // SAFETY: We just checked that `self.remaining != 0`.
+ let (front, back) = guard.0.as_slices();
+ // since idx is a logical index, we don't need to worry about wrapping.
+ guard.0.idx += front.len();
+ guard.0.remaining -= front.len();
+ ptr::drop_in_place(front);
+ guard.0.remaining = 0;
+ ptr::drop_in_place(back);
+ }
}
- DropGuard(self);
+ // Dropping `guard` handles moving the remaining elements into place.
}
}
@@ -128,20 +195,18 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.tail == self.head {
+ if self.remaining == 0 {
return None;
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
- // Safety:
- // - `self.tail` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
+ let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
+ self.idx += 1;
+ self.remaining -= 1;
+ Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.remaining;
(len, Some(len))
}
}
@@ -150,14 +215,12 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
- if self.tail == self.head {
+ if self.remaining == 0 {
return None;
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
- // Safety:
- // - `self.head` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
+ self.remaining -= 1;
+ let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
+ Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
}
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
index e696d7ed6..d9f393714 100644
--- a/library/alloc/src/collections/vec_deque/iter.rs
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -1,9 +1,6 @@
-use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
-use core::mem::MaybeUninit;
use core::ops::Try;
-
-use super::{count, wrap_index, RingSlices};
+use core::{fmt, mem, slice};
/// An iterator over the elements of a `VecDeque`.
///
@@ -13,30 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
- ring: &'a [MaybeUninit<T>],
- tail: usize,
- head: usize,
+ i1: slice::Iter<'a, T>,
+ i2: slice::Iter<'a, T>,
}
impl<'a, T> Iter<'a, T> {
- pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self {
- Iter { ring, tail, head }
+ pub(super) fn new(i1: slice::Iter<'a, T>, i2: slice::Iter<'a, T>) -> Self {
+ Self { i1, i2 }
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- f.debug_tuple("Iter")
- .field(&MaybeUninit::slice_assume_init_ref(front))
- .field(&MaybeUninit::slice_assume_init_ref(back))
- .finish()
- }
+ f.debug_tuple("Iter").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@@ -44,7 +31,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
- Iter { ring: self.ring, tail: self.tail, head: self.head }
+ Iter { i1: self.i1.clone(), i2: self.i2.clone() }
}
}
@@ -54,72 +41,50 @@ impl<'a, T> Iterator for Iter<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a T> {
- if self.tail == self.head {
- return None;
+ match self.i1.next() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i1 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.next()
+ }
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
- // Safety:
- // - `self.tail` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
+ }
+
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i1.advance_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.len();
(len, Some(len))
}
- fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
- MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
- }
+ let accum = self.i1.fold(accum, &mut f);
+ self.i2.fold(accum, &mut f)
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
- Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- let (mut iter, final_res);
- if self.tail <= self.head {
- // Safety: single slice self.ring[self.tail..self.head] is initialized.
- iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
- .iter();
- final_res = iter.try_fold(init, &mut f);
- } else {
- // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
- let (front, back) = self.ring.split_at(self.tail);
-
- let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
- let res = back_iter.try_fold(init, &mut f);
- let len = self.ring.len();
- self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
- iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
- final_res = iter.try_fold(res?, &mut f);
- }
- self.tail = self.head - iter.len();
- final_res
- }
-
- fn nth(&mut self, n: usize) -> Option<Self::Item> {
- if n >= count(self.tail, self.head, self.ring.len()) {
- self.tail = self.head;
- None
- } else {
- self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
- self.next()
- }
+ let acc = self.i1.try_fold(init, &mut f)?;
+ self.i2.try_fold(acc, &mut f)
}
#[inline]
@@ -132,8 +97,12 @@ impl<'a, T> Iterator for Iter<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
- let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
- self.ring.get_unchecked(idx).assume_init_ref()
+ let i1_len = self.i1.len();
+ if idx < i1_len {
+ self.i1.__iterator_get_unchecked(idx)
+ } else {
+ self.i2.__iterator_get_unchecked(idx - i1_len)
+ }
}
}
}
@@ -142,63 +111,56 @@ impl<'a, T> Iterator for Iter<'a, T> {
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
- if self.tail == self.head {
- return None;
+ match self.i2.next_back() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the second one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i2 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.next_back()
+ }
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
- // Safety:
- // - `self.head` in a ring buffer is always a valid index.
- // - `self.head` and `self.tail` equality is checked above.
- unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
}
- fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i2.advance_back_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ }
+
+ fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
- MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
- }
+ let accum = self.i2.rfold(accum, &mut f);
+ self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
- Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
- let (mut iter, final_res);
- if self.tail <= self.head {
- // Safety: single slice self.ring[self.tail..self.head] is initialized.
- iter = unsafe {
- MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
- };
- final_res = iter.try_rfold(init, &mut f);
- } else {
- // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
- let (front, back) = self.ring.split_at(self.tail);
-
- let mut front_iter =
- unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
- let res = front_iter.try_rfold(init, &mut f);
- self.head = front_iter.len();
- iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
- final_res = iter.try_rfold(res?, &mut f);
- }
- self.head = self.tail + iter.len();
- final_res
+ let acc = self.i2.try_rfold(init, &mut f)?;
+ self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.i1.len() + self.i2.len()
+ }
+
fn is_empty(&self) -> bool {
- self.head == self.tail
+ self.i1.is_empty() && self.i2.is_empty()
}
}
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
index b78c0d5e1..2c59d95cd 100644
--- a/library/alloc/src/collections/vec_deque/iter_mut.rs
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -1,8 +1,6 @@
-use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
-use core::marker::PhantomData;
-
-use super::{count, wrap_index, RingSlices};
+use core::ops::Try;
+use core::{fmt, mem, slice};
/// A mutable iterator over the elements of a `VecDeque`.
///
@@ -12,39 +10,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter_mut`]: super::VecDeque::iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
- // Internal safety invariant: the entire slice is dereferenceable.
- ring: *mut [T],
- tail: usize,
- head: usize,
- phantom: PhantomData<&'a mut [T]>,
+ i1: slice::IterMut<'a, T>,
+ i2: slice::IterMut<'a, T>,
}
impl<'a, T> IterMut<'a, T> {
- pub(super) unsafe fn new(
- ring: *mut [T],
- tail: usize,
- head: usize,
- phantom: PhantomData<&'a mut [T]>,
- ) -> Self {
- IterMut { ring, tail, head, phantom }
+ pub(super) fn new(i1: slice::IterMut<'a, T>, i2: slice::IterMut<'a, T>) -> Self {
+ Self { i1, i2 }
}
}
-// SAFETY: we do nothing thread-local and there is no interior mutability,
-// so the usual structural `Send`/`Sync` apply.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Send> Send for IterMut<'_, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
-
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&*front, &*back) };
- f.debug_tuple("IterMut").field(&front).field(&back).finish()
+ f.debug_tuple("IterMut").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@@ -54,44 +33,50 @@ impl<'a, T> Iterator for IterMut<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
- if self.tail == self.head {
- return None;
+ match self.i1.next() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i1 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.next()
+ }
}
- let tail = self.tail;
- self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ }
- unsafe {
- let elem = self.ring.get_unchecked_mut(tail);
- Some(&mut *elem)
- }
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i1.advance_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let len = count(self.tail, self.head, self.ring.len());
+ let len = self.len();
(len, Some(len))
}
- fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&mut *front, &mut *back) };
- accum = front.iter_mut().fold(accum, &mut f);
- back.iter_mut().fold(accum, &mut f)
+ let accum = self.i1.fold(accum, &mut f);
+ self.i2.fold(accum, &mut f)
}
- fn nth(&mut self, n: usize) -> Option<Self::Item> {
- if n >= count(self.tail, self.head, self.ring.len()) {
- self.tail = self.head;
- None
- } else {
- self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
- self.next()
- }
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let acc = self.i1.try_fold(init, &mut f)?;
+ self.i2.try_fold(acc, &mut f)
}
#[inline]
@@ -104,8 +89,12 @@ impl<'a, T> Iterator for IterMut<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
- let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
- &mut *self.ring.get_unchecked_mut(idx)
+ let i1_len = self.i1.len();
+ if idx < i1_len {
+ self.i1.__iterator_get_unchecked(idx)
+ } else {
+ self.i2.__iterator_get_unchecked(idx - i1_len)
+ }
}
}
}
@@ -114,34 +103,56 @@ impl<'a, T> Iterator for IterMut<'a, T> {
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
- if self.tail == self.head {
- return None;
+ match self.i2.next_back() {
+ Some(val) => Some(val),
+ None => {
+ // most of the time, the iterator will either always
+ // call next(), or always call next_back(). By swapping
+ // the iterators once the first one is empty, we ensure
+ // that the first branch is taken as often as possible,
+ // without sacrificing correctness, as i2 is empty anyways
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.next_back()
+ }
}
- self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ }
- unsafe {
- let elem = self.ring.get_unchecked_mut(self.head);
- Some(&mut *elem)
- }
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let m = match self.i2.advance_back_by(n) {
+ Ok(_) => return Ok(()),
+ Err(m) => m,
+ };
+
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n - m).map_err(|o| m + o)
}
- fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
- let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
- // The `IterMut` invariant also ensures everything is dereferenceable.
- let (front, back) = unsafe { (&mut *front, &mut *back) };
- accum = back.iter_mut().rfold(accum, &mut f);
- front.iter_mut().rfold(accum, &mut f)
+ let accum = self.i2.rfold(accum, &mut f);
+ self.i1.rfold(accum, &mut f)
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let acc = self.i2.try_rfold(init, &mut f)?;
+ self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IterMut<'_, T> {
+ fn len(&self) -> usize {
+ self.i1.len() + self.i2.len()
+ }
+
fn is_empty(&self) -> bool {
- self.head == self.tail
+ self.i1.is_empty() && self.i2.is_empty()
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 2a57dad89..4866c53e7 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -10,11 +10,10 @@
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{repeat_with, FromIterator};
-use core::marker::PhantomData;
-use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::iter::{repeat_n, repeat_with, ByRefSized, FromIterator};
+use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
-use core::ptr::{self, NonNull};
+use core::ptr;
use core::slice;
// This is used in a bunch of intra-doc links.
@@ -52,14 +51,6 @@ pub use self::iter::Iter;
mod iter;
-use self::pair_slices::PairSlices;
-
-mod pair_slices;
-
-use self::ring_slices::RingSlices;
-
-mod ring_slices;
-
use self::spec_extend::SpecExtend;
mod spec_extend;
@@ -67,11 +58,6 @@ mod spec_extend;
#[cfg(test)]
mod tests;
-const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
-const MINIMUM_CAPACITY: usize = 1; // 2 - 1
-
-const MAXIMUM_ZST_CAPACITY: usize = 1 << (usize::BITS - 1); // Largest possible power of two
-
/// A double-ended queue implemented with a growable ring buffer.
///
/// The "default" usage of this type as a queue is to use [`push_back`] to add to
@@ -105,13 +91,13 @@ pub struct VecDeque<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
- // tail and head are pointers into the buffer. Tail always points
- // to the first element that could be read, Head always points
- // to where data should be written.
- // If tail == head the buffer is empty. The length of the ringbuffer
- // is defined as the distance between the two.
- tail: usize,
+ // `self[0]`, if it exists, is `buf[head]`.
+ // `head < buf.capacity()`, unless `buf.capacity() == 0` when `head == 0`.
head: usize,
+ // the number of initialized elements, starting from the one at `head` and potentially wrapping around.
+ // if `len == 0`, the exact value of `head` is unimportant.
+ // if `T` is zero-Sized, then `self.len <= usize::MAX`, otherwise `self.len <= isize::MAX as usize`.
+ len: usize,
buf: RawVec<T, A>,
}
@@ -124,18 +110,8 @@ impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
}
fn clone_from(&mut self, other: &Self) {
- self.truncate(other.len());
-
- let mut iter = PairSlices::from(self, other);
- while let Some((dst, src)) = iter.next() {
- dst.clone_from_slice(&src);
- }
-
- if iter.has_remainder() {
- for remainder in iter.remainder() {
- self.extend(remainder.iter().cloned());
- }
- }
+ self.clear();
+ self.extend(other.iter().cloned());
}
}
@@ -180,41 +156,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.buf.ptr()
}
- /// Marginally more convenient
- #[inline]
- fn cap(&self) -> usize {
- if T::IS_ZST {
- // For zero sized types, we are always at maximum capacity
- MAXIMUM_ZST_CAPACITY
- } else {
- self.buf.capacity()
- }
- }
-
- /// Turn ptr into a slice, since the elements of the backing buffer may be uninitialized,
- /// we will return a slice of [`MaybeUninit<T>`].
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
- /// incorrect usage of this method.
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[inline]
- unsafe fn buffer_as_slice(&self) -> &[MaybeUninit<T>] {
- unsafe { slice::from_raw_parts(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
- }
-
- /// Turn ptr into a mut slice, since the elements of the backing buffer may be uninitialized,
- /// we will return a slice of [`MaybeUninit<T>`].
- ///
- /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
- /// incorrect usage of this method.
- ///
- /// [zeroed]: mem::MaybeUninit::zeroed
- #[inline]
- unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
- unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
- }
-
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
@@ -229,51 +170,58 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
}
- /// Returns `true` if the buffer is at full capacity.
+ /// Returns a slice pointer into the buffer.
+ /// `range` must lie inside `0..self.capacity()`.
#[inline]
- fn is_full(&self) -> bool {
- self.cap() - self.len() == 1
+ unsafe fn buffer_range(&self, range: Range<usize>) -> *mut [T] {
+ unsafe {
+ ptr::slice_from_raw_parts_mut(self.ptr().add(range.start), range.end - range.start)
+ }
}
- /// Returns the index in the underlying buffer for a given logical element
- /// index.
+ /// Returns `true` if the buffer is at full capacity.
#[inline]
- fn wrap_index(&self, idx: usize) -> usize {
- wrap_index(idx, self.cap())
+ fn is_full(&self) -> bool {
+ self.len == self.capacity()
}
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
- wrap_index(idx.wrapping_add(addend), self.cap())
+ wrap_index(idx.wrapping_add(addend), self.capacity())
+ }
+
+ #[inline]
+ fn to_physical_idx(&self, idx: usize) -> usize {
+ self.wrap_add(self.head, idx)
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
- wrap_index(idx.wrapping_sub(subtrahend), self.cap())
+ wrap_index(idx.wrapping_sub(subtrahend).wrapping_add(self.capacity()), self.capacity())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
- unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
+ unsafe fn copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- dst + len <= self.cap(),
+ dst + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
debug_assert!(
- src + len <= self.cap(),
+ src + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
unsafe {
ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
@@ -282,22 +230,22 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Copies a contiguous block of memory len long from src to dst
#[inline]
- unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
+ unsafe fn copy_nonoverlapping(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- dst + len <= self.cap(),
+ dst + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
debug_assert!(
- src + len <= self.cap(),
+ src + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
unsafe {
ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
@@ -305,30 +253,28 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Copies a potentially wrapping block of memory len long from src to dest.
- /// (abs(dst - src) + len) must be no larger than cap() (There must be at
+ /// (abs(dst - src) + len) must be no larger than capacity() (There must be at
/// most one continuous overlapping region between src and dest).
- unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
- #[allow(dead_code)]
- fn diff(a: usize, b: usize) -> usize {
- if a <= b { b - a } else { a - b }
- }
+ unsafe fn wrap_copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
- cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
+ cmp::min(src.abs_diff(dst), self.capacity() - src.abs_diff(dst)) + len
+ <= self.capacity(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
- self.cap()
+ self.capacity()
);
- if src == dst || len == 0 {
+ // If T is a ZST, don't do any copying.
+ if T::IS_ZST || src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
- let src_pre_wrap_len = self.cap() - src;
- let dst_pre_wrap_len = self.cap() - dst;
+ let src_pre_wrap_len = self.capacity() - src;
+ let dst_pre_wrap_len = self.capacity() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
@@ -342,7 +288,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst, src, len);
+ self.copy(src, dst, len);
}
}
(false, false, true) => {
@@ -355,8 +301,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// . . D .
//
unsafe {
- self.copy(dst, src, dst_pre_wrap_len);
- self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ self.copy(src, dst, dst_pre_wrap_len);
+ self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
}
}
(true, false, true) => {
@@ -369,8 +315,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// . . D .
//
unsafe {
- self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
- self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
+ self.copy(src, dst, dst_pre_wrap_len);
}
}
(false, true, false) => {
@@ -383,8 +329,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst, src, src_pre_wrap_len);
- self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
}
}
(true, true, false) => {
@@ -397,8 +343,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// D . . .
//
unsafe {
- self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
- self.copy(dst, src, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
}
}
(false, true, true) => {
@@ -414,9 +360,9 @@ impl<T, A: Allocator> VecDeque<T, A> {
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
unsafe {
- self.copy(dst, src, src_pre_wrap_len);
- self.copy(dst + src_pre_wrap_len, 0, delta);
- self.copy(0, delta, len - dst_pre_wrap_len);
+ self.copy(src, dst, src_pre_wrap_len);
+ self.copy(0, dst + src_pre_wrap_len, delta);
+ self.copy(delta, 0, len - dst_pre_wrap_len);
}
}
(true, true, true) => {
@@ -432,9 +378,9 @@ impl<T, A: Allocator> VecDeque<T, A> {
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
unsafe {
- self.copy(delta, 0, len - src_pre_wrap_len);
- self.copy(0, self.cap() - delta, delta);
- self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(0, delta, len - src_pre_wrap_len);
+ self.copy(self.capacity() - delta, 0, delta);
+ self.copy(src, dst, dst_pre_wrap_len);
}
}
}
@@ -444,8 +390,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Assumes capacity is sufficient.
#[inline]
unsafe fn copy_slice(&mut self, dst: usize, src: &[T]) {
- debug_assert!(src.len() <= self.cap());
- let head_room = self.cap() - dst;
+ debug_assert!(src.len() <= self.capacity());
+ let head_room = self.capacity() - dst;
if src.len() <= head_room {
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len());
@@ -478,48 +424,100 @@ impl<T, A: Allocator> VecDeque<T, A> {
});
}
+ /// Writes all values from `iter` to `dst`, wrapping
+ /// at the end of the buffer and returns the number
+ /// of written values.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `iter` yields at most `len` items.
+ /// Assumes capacity is sufficient.
+ unsafe fn write_iter_wrapping(
+ &mut self,
+ dst: usize,
+ mut iter: impl Iterator<Item = T>,
+ len: usize,
+ ) -> usize {
+ struct Guard<'a, T, A: Allocator> {
+ deque: &'a mut VecDeque<T, A>,
+ written: usize,
+ }
+
+ impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
+ fn drop(&mut self) {
+ self.deque.len += self.written;
+ }
+ }
+
+ let head_room = self.capacity() - dst;
+
+ let mut guard = Guard { deque: self, written: 0 };
+
+ if head_room >= len {
+ unsafe { guard.deque.write_iter(dst, iter, &mut guard.written) };
+ } else {
+ unsafe {
+ guard.deque.write_iter(
+ dst,
+ ByRefSized(&mut iter).take(head_room),
+ &mut guard.written,
+ );
+ guard.deque.write_iter(0, iter, &mut guard.written)
+ };
+ }
+
+ guard.written
+ }
+
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_capacity.
#[inline]
unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
- let new_capacity = self.cap();
+ let new_capacity = self.capacity();
+ debug_assert!(new_capacity >= old_capacity);
// Move the shortest contiguous section of the ring buffer
- // T H
+ //
+ // H := head
+ // L := last element (`self.to_physical_idx(self.len - 1)`)
+ //
+ // H L
// [o o o o o o o . ]
- // T H
+ // H L
// A [o o o o o o o . . . . . . . . . ]
- // H T
- // [o o . o o o o o ]
- // T H
+ // L H
+ // [o o o o o o o o ]
+ // H L
// B [. . . o o o o o o o . . . . . . ]
- // H T
- // [o o o o o . o o ]
- // H T
+ // L H
+ // [o o o o o o o o ]
+ // L H
// C [o o o o o . . . . . . . . . o o ]
- if self.tail <= self.head {
+ // can't use is_contiguous() because the capacity is already updated.
+ if self.head <= old_capacity - self.len {
// A
// Nop
- } else if self.head < old_capacity - self.tail {
- // B
- unsafe {
- self.copy_nonoverlapping(old_capacity, 0, self.head);
- }
- self.head += old_capacity;
- debug_assert!(self.head > self.tail);
} else {
- // C
- let new_tail = new_capacity - (old_capacity - self.tail);
- unsafe {
- self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+ let head_len = old_capacity - self.head;
+ let tail_len = self.len - head_len;
+ if head_len > tail_len && new_capacity - old_capacity >= tail_len {
+ // B
+ unsafe {
+ self.copy_nonoverlapping(0, old_capacity, tail_len);
+ }
+ } else {
+ // C
+ let new_head = new_capacity - head_len;
+ unsafe {
+ // can't use copy_nonoverlapping here, because if e.g. head_len = 2
+ // and new_capacity = old_capacity + 1, then the heads overlap.
+ self.copy(self.head, new_head, head_len);
+ }
+ self.head = new_head;
}
- self.tail = new_tail;
- debug_assert!(self.head < self.tail);
}
- debug_assert!(self.head < self.cap());
- debug_assert!(self.tail < self.cap());
- debug_assert!(self.cap().count_ones() == 1);
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
}
}
@@ -533,6 +531,7 @@ impl<T> VecDeque<T> {
///
/// let deque: VecDeque<u32> = VecDeque::new();
/// ```
+ // FIXME: This should probably be const
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
@@ -569,8 +568,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
- pub fn new_in(alloc: A) -> VecDeque<T, A> {
- VecDeque::with_capacity_in(INITIAL_CAPACITY, alloc)
+ pub const fn new_in(alloc: A) -> VecDeque<T, A> {
+ VecDeque { head: 0, len: 0, buf: RawVec::new_in(alloc) }
}
/// Creates an empty deque with space for at least `capacity` elements.
@@ -584,11 +583,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque<T, A> {
- assert!(capacity < 1_usize << usize::BITS - 1, "capacity overflow");
- // +1 since the ringbuffer always leaves one space empty
- let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
-
- VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) }
+ VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
}
/// Provides a reference to the element at the given index.
@@ -608,8 +603,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> {
- if index < self.len() {
- let idx = self.wrap_add(self.tail, index);
+ if index < self.len {
+ let idx = self.to_physical_idx(index);
unsafe { Some(&*self.ptr().add(idx)) }
} else {
None
@@ -637,8 +632,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
- if index < self.len() {
- let idx = self.wrap_add(self.tail, index);
+ if index < self.len {
+ let idx = self.to_physical_idx(index);
unsafe { Some(&mut *self.ptr().add(idx)) }
} else {
None
@@ -672,8 +667,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
- let ri = self.wrap_add(self.tail, i);
- let rj = self.wrap_add(self.tail, j);
+ let ri = self.to_physical_idx(i);
+ let rj = self.to_physical_idx(j);
unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
}
@@ -691,7 +686,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
- self.cap() - 1
+ if T::IS_ZST { usize::MAX } else { self.buf.capacity() }
}
/// Reserves the minimum capacity for at least `additional` more elements to be inserted in the
@@ -718,7 +713,15 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// [`reserve`]: VecDeque::reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
- self.reserve(additional);
+ let new_cap = self.len.checked_add(additional).expect("capacity overflow");
+ let old_cap = self.capacity();
+
+ if new_cap > old_cap {
+ self.buf.reserve_exact(self.len, additional);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
@@ -739,15 +742,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
- let old_cap = self.cap();
- let used_cap = self.len() + 1;
- let new_cap = used_cap
- .checked_add(additional)
- .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
- .expect("capacity overflow");
+ let new_cap = self.len.checked_add(additional).expect("capacity overflow");
+ let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.reserve_exact(used_cap, new_cap - used_cap);
+ // we don't need to reserve_exact(), as the size doesn't have
+ // to be a power of 2.
+ self.buf.reserve(self.len, additional);
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -793,7 +794,17 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.try_reserve(additional)
+ let new_cap =
+ self.len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let old_cap = self.capacity();
+
+ if new_cap > old_cap {
+ self.buf.try_reserve_exact(self.len, additional)?;
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ Ok(())
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -831,15 +842,12 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- let old_cap = self.cap();
- let used_cap = self.len() + 1;
- let new_cap = used_cap
- .checked_add(additional)
- .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
- .ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let new_cap =
+ self.len.checked_add(additional).ok_or(TryReserveErrorKind::CapacityOverflow)?;
+ let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
+ self.buf.try_reserve(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -890,13 +898,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "shrink_to", since = "1.56.0")]
pub fn shrink_to(&mut self, min_capacity: usize) {
- let min_capacity = cmp::min(min_capacity, self.capacity());
- // We don't have to worry about an overflow as neither `self.len()` nor `self.capacity()`
- // can ever be `usize::MAX`. +1 as the ringbuffer always leaves one space empty.
- let target_cap = cmp::max(cmp::max(min_capacity, self.len()) + 1, MINIMUM_CAPACITY + 1)
- .next_power_of_two();
+ let target_cap = min_capacity.max(self.len);
- if target_cap < self.cap() {
+ // never shrink ZSTs
+ if T::IS_ZST || self.capacity() <= target_cap {
+ return;
+ }
+
+ if target_cap < self.capacity() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
@@ -905,49 +914,55 @@ impl<T, A: Allocator> VecDeque<T, A> {
// At all other times, element positions are unaffected.
//
// Indicates that elements at the head should be moved.
- let head_outside = self.head == 0 || self.head >= target_cap;
+
+ let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
// Move elements from out of desired bounds (positions after target_cap)
- if self.tail >= target_cap && head_outside {
- // T H
+ if self.len == 0 {
+ self.head = 0;
+ } else if self.head >= target_cap && tail_outside {
+ // H := head
+ // L := last element
+ // H L
// [. . . . . . . . o o o o o o o . ]
- // T H
+ // H L
// [o o o o o o o . ]
unsafe {
- self.copy_nonoverlapping(0, self.tail, self.len());
+ // nonoverlapping because self.head >= target_cap >= self.len
+ self.copy_nonoverlapping(self.head, 0, self.len);
}
- self.head = self.len();
- self.tail = 0;
- } else if self.tail != 0 && self.tail < target_cap && head_outside {
- // T H
+ self.head = 0;
+ } else if self.head < target_cap && tail_outside {
+ // H := head
+ // L := last element
+ // H L
// [. . . o o o o o o o . . . . . . ]
- // H T
+ // L H
// [o o . o o o o o ]
- let len = self.wrap_sub(self.head, target_cap);
+ let len = self.head + self.len - target_cap;
unsafe {
- self.copy_nonoverlapping(0, target_cap, len);
+ self.copy_nonoverlapping(target_cap, 0, len);
}
- self.head = len;
- debug_assert!(self.head < self.tail);
- } else if self.tail >= target_cap {
- // H T
+ } else if self.head >= target_cap {
+ // H := head
+ // L := last element
+ // L H
// [o o o o o . . . . . . . . . o o ]
- // H T
+ // L H
// [o o o o o . o o ]
- debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
- let len = self.cap() - self.tail;
- let new_tail = target_cap - len;
+ let len = self.capacity() - self.head;
+ let new_head = target_cap - len;
unsafe {
- self.copy_nonoverlapping(new_tail, self.tail, len);
+ // can't use copy_nonoverlapping here for the same reason
+ // as in `handle_capacity_increase()`
+ self.copy(self.head, new_head, len);
}
- self.tail = new_tail;
- debug_assert!(self.head < self.tail);
+ self.head = new_head;
}
self.buf.shrink_to_fit(target_cap);
- debug_assert!(self.head < self.cap());
- debug_assert!(self.tail < self.cap());
- debug_assert!(self.cap().count_ones() == 1);
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
+ debug_assert!(self.len <= self.capacity());
}
}
@@ -992,20 +1007,20 @@ impl<T, A: Allocator> VecDeque<T, A> {
// * The head of the VecDeque is moved before calling `drop_in_place`,
// so no value is dropped twice if `drop_in_place` panics
unsafe {
- if len > self.len() {
+ if len >= self.len {
return;
}
- let num_dropped = self.len() - len;
+
let (front, back) = self.as_mut_slices();
if len > front.len() {
let begin = len - front.len();
let drop_back = back.get_unchecked_mut(begin..) as *mut _;
- self.head = self.wrap_sub(self.head, num_dropped);
+ self.len = len;
ptr::drop_in_place(drop_back);
} else {
let drop_back = back as *mut _;
let drop_front = front.get_unchecked_mut(len..) as *mut _;
- self.head = self.wrap_sub(self.head, num_dropped);
+ self.len = len;
// Make sure the second half is dropped even when a destructor
// in the first one panics.
@@ -1039,7 +1054,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, T> {
- Iter::new(unsafe { self.buffer_as_slice() }, self.tail, self.head)
+ let (a, b) = self.as_slices();
+ Iter::new(a.iter(), b.iter())
}
/// Returns a front-to-back iterator that returns mutable references.
@@ -1061,11 +1077,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
- // SAFETY: The internal `IterMut` safety invariant is established because the
- // `ring` we create is a dereferenceable slice for lifetime '_.
- let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
-
- unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) }
+ let (a, b) = self.as_mut_slices();
+ IterMut::new(a.iter_mut(), b.iter_mut())
}
/// Returns a pair of slices which contain, in order, the contents of the
@@ -1097,14 +1110,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- let buf = self.buffer_as_slice();
- let (front, back) = RingSlices::ring_slices(buf, self.head, self.tail);
- (MaybeUninit::slice_assume_init_ref(front), MaybeUninit::slice_assume_init_ref(back))
- }
+ let (a_range, b_range) = self.slice_ranges(..);
+ // SAFETY: `slice_ranges` always returns valid ranges into
+ // the physical buffer.
+ unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
}
/// Returns a pair of slices which contain, in order, the contents of the
@@ -1135,16 +1144,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- let head = self.head;
- let tail = self.tail;
- let buf = self.buffer_as_mut_slice();
- let (front, back) = RingSlices::ring_slices(buf, head, tail);
- (MaybeUninit::slice_assume_init_mut(front), MaybeUninit::slice_assume_init_mut(back))
- }
+ let (a_range, b_range) = self.slice_ranges(..);
+ // SAFETY: `slice_ranges` always returns valid ranges into
+ // the physical buffer.
+ unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
}
/// Returns the number of elements in the deque.
@@ -1161,7 +1164,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
- count(self.tail, self.head, self.cap())
+ self.len
}
/// Returns `true` if the deque is empty.
@@ -1178,17 +1181,41 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
- self.tail == self.head
+ self.len == 0
}
- fn range_tail_head<R>(&self, range: R) -> (usize, usize)
+ /// Given a range into the logical buffer of the deque, this function
+ /// return two ranges into the physical buffer that correspond to
+ /// the given range.
+ fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
where
R: RangeBounds<usize>,
{
- let Range { start, end } = slice::range(range, ..self.len());
- let tail = self.wrap_add(self.tail, start);
- let head = self.wrap_add(self.tail, end);
- (tail, head)
+ let Range { start, end } = slice::range(range, ..self.len);
+ let len = end - start;
+
+ if len == 0 {
+ (0..0, 0..0)
+ } else {
+ // `slice::range` guarantees that `start <= end <= self.len`.
+ // because `len != 0`, we know that `start < end`, so `start < self.len`
+ // and the indexing is valid.
+ let wrapped_start = self.to_physical_idx(start);
+
+ // this subtraction can never overflow because `wrapped_start` is
+ // at most `self.capacity()` (and if `self.capacity != 0`, then `wrapped_start` is strictly less
+ // than `self.capacity`).
+ let head_len = self.capacity() - wrapped_start;
+
+ if head_len >= len {
+ // we know that `len + wrapped_start <= self.capacity <= usize::MAX`, so this addition can't overflow
+ (wrapped_start..wrapped_start + len, 0..0)
+ } else {
+ // can't overflow because of the if condition
+ let tail_len = len - head_len;
+ (wrapped_start..self.capacity(), 0..tail_len)
+ }
+ }
}
/// Creates an iterator that covers the specified range in the deque.
@@ -1217,9 +1244,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (tail, head) = self.range_tail_head(range);
- // The shared reference we have in &self is maintained in the '_ of Iter.
- Iter::new(unsafe { self.buffer_as_slice() }, tail, head)
+ let (a_range, b_range) = self.slice_ranges(range);
+ // SAFETY: The ranges returned by `slice_ranges`
+ // are valid ranges into the physical buffer, so
+ // it's ok to pass them to `buffer_range` and
+ // dereference the result.
+ let a = unsafe { &*self.buffer_range(a_range) };
+ let b = unsafe { &*self.buffer_range(b_range) };
+ Iter::new(a.iter(), b.iter())
}
/// Creates an iterator that covers the specified mutable range in the deque.
@@ -1252,13 +1284,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (tail, head) = self.range_tail_head(range);
-
- // SAFETY: The internal `IterMut` safety invariant is established because the
- // `ring` we create is a dereferenceable slice for lifetime '_.
- let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
-
- unsafe { IterMut::new(ring, tail, head, PhantomData) }
+ let (a_range, b_range) = self.slice_ranges(range);
+ // SAFETY: The ranges returned by `slice_ranges`
+ // are valid ranges into the physical buffer, so
+ // it's ok to pass them to `buffer_range` and
+ // dereference the result.
+ let a = unsafe { &mut *self.buffer_range(a_range) };
+ let b = unsafe { &mut *self.buffer_range(b_range) };
+ IterMut::new(a.iter_mut(), b.iter_mut())
}
/// Removes the specified range from the deque in bulk, returning all
@@ -1310,39 +1343,30 @@ impl<T, A: Allocator> VecDeque<T, A> {
// When finished, the remaining data will be copied back to cover the hole,
// and the head/tail values will be restored correctly.
//
- let (drain_tail, drain_head) = self.range_tail_head(range);
+ let Range { start, end } = slice::range(range, ..self.len);
+ let drain_start = start;
+ let drain_len = end - start;
// The deque's elements are parted into three segments:
- // * self.tail -> drain_tail
- // * drain_tail -> drain_head
- // * drain_head -> self.head
+ // * 0 -> drain_start
+ // * drain_start -> drain_start+drain_len
+ // * drain_start+drain_len -> self.len
//
- // T = self.tail; H = self.head; t = drain_tail; h = drain_head
+ // H = self.head; T = self.head+self.len; t = drain_start+drain_len; h = drain_head
//
- // We store drain_tail as self.head, and drain_head and self.head as
- // after_tail and after_head respectively on the Drain. This also
+ // We store drain_start as self.len, and drain_len and self.len as
+ // drain_len and orig_len respectively on the Drain. This also
// truncates the effective array such that if the Drain is leaked, we
// have forgotten about the potentially moved values after the start of
// the drain.
//
- // T t h H
+ // H h t T
// [. . . o o x x o o . . .]
//
- let head = self.head;
-
// "forget" about the values after the start of the drain until after
// the drain is complete and the Drain destructor is run.
- self.head = drain_tail;
- let deque = NonNull::from(&mut *self);
- unsafe {
- // Crucially, we only create shared references from `self` here and read from
- // it. We do not write to `self` nor reborrow to a mutable reference.
- // Hence the raw pointer we created above, for `deque`, remains valid.
- let ring = self.buffer_as_slice();
-
- Drain::new(drain_head, head, ring, drain_tail, drain_head, deque)
- }
+ unsafe { Drain::new(self, drain_start, drain_len) }
}
/// Clears the deque, removing all values.
@@ -1361,6 +1385,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
pub fn clear(&mut self) {
self.truncate(0);
+ // Not strictly necessary, but leaves things in a more consistent/predictable state.
+ self.head = 0;
}
/// Returns `true` if the deque contains an element equal to the
@@ -1455,7 +1481,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
- self.get(self.len().wrapping_sub(1))
+ self.get(self.len.wrapping_sub(1))
}
/// Provides a mutable reference to the back element, or `None` if the
@@ -1479,7 +1505,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
- self.get_mut(self.len().wrapping_sub(1))
+ self.get_mut(self.len.wrapping_sub(1))
}
/// Removes the first element and returns it, or `None` if the deque is
@@ -1503,9 +1529,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
if self.is_empty() {
None
} else {
- let tail = self.tail;
- self.tail = self.wrap_add(self.tail, 1);
- unsafe { Some(self.buffer_read(tail)) }
+ let old_head = self.head;
+ self.head = self.to_physical_idx(1);
+ self.len -= 1;
+ Some(unsafe { self.buffer_read(old_head) })
}
}
@@ -1528,9 +1555,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
if self.is_empty() {
None
} else {
- self.head = self.wrap_sub(self.head, 1);
- let head = self.head;
- unsafe { Some(self.buffer_read(head)) }
+ self.len -= 1;
+ Some(unsafe { self.buffer_read(self.to_physical_idx(self.len)) })
}
}
@@ -1552,10 +1578,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- self.tail = self.wrap_sub(self.tail, 1);
- let tail = self.tail;
+ self.head = self.wrap_sub(self.head, 1);
+ self.len += 1;
+
unsafe {
- self.buffer_write(tail, value);
+ self.buffer_write(self.head, value);
}
}
@@ -1577,16 +1604,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- let head = self.head;
- self.head = self.wrap_add(self.head, 1);
- unsafe { self.buffer_write(head, value) }
+ unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
+ self.len += 1;
}
#[inline]
fn is_contiguous(&self) -> bool {
- // FIXME: Should we consider `head == 0` to mean
- // that `self` is contiguous?
- self.tail <= self.head
+ // Do the calculation like this to avoid overflowing if len + head > usize::MAX
+ self.head <= self.capacity() - self.len
}
/// Removes an element from anywhere in the deque and returns it,
@@ -1615,8 +1640,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
- let length = self.len();
- if length > 0 && index < length && index != 0 {
+ let length = self.len;
+ if index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
@@ -1650,7 +1675,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
- let length = self.len();
+ let length = self.len;
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
@@ -1689,198 +1714,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow();
}
- // Move the least number of elements in the ring buffer and insert
- // the given object
- //
- // At most len/2 - 1 elements will be moved. O(min(n, n-i))
- //
- // There are three main cases:
- // Elements are contiguous
- // - special case when tail is 0
- // Elements are discontiguous and the insert is in the tail section
- // Elements are discontiguous and the insert is in the head section
- //
- // For each of those there are two more cases:
- // Insert is closer to tail
- // Insert is closer to head
- //
- // Key: H - self.head
- // T - self.tail
- // o - Valid element
- // I - Insertion element
- // A - The element that should be after the insertion point
- // M - Indicates element was moved
-
- let idx = self.wrap_add(self.tail, index);
-
- let distance_to_tail = index;
- let distance_to_head = self.len() - index;
-
- let contiguous = self.is_contiguous();
-
- match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
- (true, true, _) if index == 0 => {
- // push_front
- //
- // T
- // I H
- // [A o o o o o o . . . . . . . . .]
- //
- // H T
- // [A o o o o o o o . . . . . I]
- //
-
- self.tail = self.wrap_sub(self.tail, 1);
- }
- (true, true, _) => {
- unsafe {
- // contiguous, insert closer to tail:
- //
- // T I H
- // [. . . o o A o o o o . . . . . .]
- //
- // T H
- // [. . o o I A o o o o . . . . . .]
- // M M
- //
- // contiguous, insert closer to tail and tail is 0:
- //
- //
- // T I H
- // [o o A o o o o . . . . . . . . .]
- //
- // H T
- // [o I A o o o o o . . . . . . . o]
- // M M
-
- let new_tail = self.wrap_sub(self.tail, 1);
-
- self.copy(new_tail, self.tail, 1);
- // Already moved the tail, so we only copy `index - 1` elements.
- self.copy(self.tail, self.tail + 1, index - 1);
-
- self.tail = new_tail;
- }
- }
- (true, false, _) => {
- unsafe {
- // contiguous, insert closer to head:
- //
- // T I H
- // [. . . o o o o A o o . . . . . .]
- //
- // T H
- // [. . . o o o o I A o o . . . . .]
- // M M M
-
- self.copy(idx + 1, idx, self.head - idx);
- self.head = self.wrap_add(self.head, 1);
- }
- }
- (false, true, true) => {
- unsafe {
- // discontiguous, insert closer to tail, tail section:
- //
- // H T I
- // [o o o o o o . . . . . o o A o o]
- //
- // H T
- // [o o o o o o . . . . o o I A o o]
- // M M
-
- self.copy(self.tail - 1, self.tail, index);
- self.tail -= 1;
- }
- }
- (false, false, true) => {
- unsafe {
- // discontiguous, insert closer to head, tail section:
- //
- // H T I
- // [o o . . . . . . . o o o o o A o]
- //
- // H T
- // [o o o . . . . . . o o o o o I A]
- // M M M M
-
- // copy elements up to new head
- self.copy(1, 0, self.head);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(0, self.cap() - 1, 1);
-
- // move elements from idx to end forward not including ^ element
- self.copy(idx + 1, idx, self.cap() - 1 - idx);
-
- self.head += 1;
- }
- }
- (false, true, false) if idx == 0 => {
- unsafe {
- // discontiguous, insert is closer to tail, head section,
- // and is at index zero in the internal buffer:
- //
- // I H T
- // [A o o o o o o o o o . . . o o o]
- //
- // H T
- // [A o o o o o o o o o . . o o o I]
- // M M M
-
- // copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(self.cap() - 1, 0, 1);
-
- self.tail -= 1;
- }
- }
- (false, true, false) => {
- unsafe {
- // discontiguous, insert closer to tail, head section:
- //
- // I H T
- // [o o o A o o o o o o . . . o o o]
- //
- // H T
- // [o o I A o o o o o o . . o o o o]
- // M M M M M M
-
- // copy elements up to new tail
- self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
-
- // copy last element into empty spot at bottom of buffer
- self.copy(self.cap() - 1, 0, 1);
-
- // move elements from idx-1 to end forward not including ^ element
- self.copy(0, 1, idx - 1);
-
- self.tail -= 1;
- }
+ let k = self.len - index;
+ if k < index {
+ // `index + 1` can't overflow, because if index was usize::MAX, then either the
+ // assert would've failed, or the deque would've tried to grow past usize::MAX
+ // and panicked.
+ unsafe {
+ // see `remove()` for explanation why this wrap_copy() call is safe.
+ self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
}
- (false, false, false) => {
- unsafe {
- // discontiguous, insert closer to head, head section:
- //
- // I H T
- // [o o o o A o o . . . . . . o o o]
- //
- // H T
- // [o o o o I A o o . . . . . o o o]
- // M M M
-
- self.copy(idx + 1, idx, self.head - idx);
- self.head += 1;
- }
+ } else {
+ let old_head = self.head;
+ self.head = self.wrap_sub(self.head, 1);
+ unsafe {
+ self.wrap_copy(old_head, self.head, index);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
}
}
-
- // tail might've been changed so we need to recalculate
- let new_idx = self.wrap_add(self.tail, index);
- unsafe {
- self.buffer_write(new_idx, value);
- }
}
/// Removes and returns the element at `index` from the deque.
@@ -1906,156 +1759,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, index: usize) -> Option<T> {
- if self.is_empty() || self.len() <= index {
+ if self.len <= index {
return None;
}
- // There are three main cases:
- // Elements are contiguous
- // Elements are discontiguous and the removal is in the tail section
- // Elements are discontiguous and the removal is in the head section
- // - special case when elements are technically contiguous,
- // but self.head = 0
- //
- // For each of those there are two more cases:
- // Insert is closer to tail
- // Insert is closer to head
- //
- // Key: H - self.head
- // T - self.tail
- // o - Valid element
- // x - Element marked for removal
- // R - Indicates element that is being removed
- // M - Indicates element was moved
-
- let idx = self.wrap_add(self.tail, index);
-
- let elem = unsafe { Some(self.buffer_read(idx)) };
+ let wrapped_idx = self.to_physical_idx(index);
- let distance_to_tail = index;
- let distance_to_head = self.len() - index;
+ let elem = unsafe { Some(self.buffer_read(wrapped_idx)) };
- let contiguous = self.is_contiguous();
-
- match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
- (true, true, _) => {
- unsafe {
- // contiguous, remove closer to tail:
- //
- // T R H
- // [. . . o o x o o o o . . . . . .]
- //
- // T H
- // [. . . . o o o o o o . . . . . .]
- // M M
-
- self.copy(self.tail + 1, self.tail, index);
- self.tail += 1;
- }
- }
- (true, false, _) => {
- unsafe {
- // contiguous, remove closer to head:
- //
- // T R H
- // [. . . o o o o x o o . . . . . .]
- //
- // T H
- // [. . . o o o o o o . . . . . . .]
- // M M
-
- self.copy(idx, idx + 1, self.head - idx - 1);
- self.head -= 1;
- }
- }
- (false, true, true) => {
- unsafe {
- // discontiguous, remove closer to tail, tail section:
- //
- // H T R
- // [o o o o o o . . . . . o o x o o]
- //
- // H T
- // [o o o o o o . . . . . . o o o o]
- // M M
-
- self.copy(self.tail + 1, self.tail, index);
- self.tail = self.wrap_add(self.tail, 1);
- }
- }
- (false, false, false) => {
- unsafe {
- // discontiguous, remove closer to head, head section:
- //
- // R H T
- // [o o o o x o o . . . . . . o o o]
- //
- // H T
- // [o o o o o o . . . . . . . o o o]
- // M M
-
- self.copy(idx, idx + 1, self.head - idx - 1);
- self.head -= 1;
- }
- }
- (false, false, true) => {
- unsafe {
- // discontiguous, remove closer to head, tail section:
- //
- // H T R
- // [o o o . . . . . . o o o o o x o]
- //
- // H T
- // [o o . . . . . . . o o o o o o o]
- // M M M M
- //
- // or quasi-discontiguous, remove next to head, tail section:
- //
- // H T R
- // [. . . . . . . . . o o o o o x o]
- //
- // T H
- // [. . . . . . . . . o o o o o o .]
- // M
-
- // draw in elements in the tail section
- self.copy(idx, idx + 1, self.cap() - idx - 1);
-
- // Prevents underflow.
- if self.head != 0 {
- // copy first element into empty spot
- self.copy(self.cap() - 1, 0, 1);
-
- // move elements in the head section backwards
- self.copy(0, 1, self.head - 1);
- }
-
- self.head = self.wrap_sub(self.head, 1);
- }
- }
- (false, true, false) => {
- unsafe {
- // discontiguous, remove closer to tail, head section:
- //
- // R H T
- // [o o x o o o o o o o . . . o o o]
- //
- // H T
- // [o o o o o o o o o o . . . . o o]
- // M M M M M
-
- // draw in elements up to idx
- self.copy(1, 0, idx);
-
- // copy last element into empty spot
- self.copy(0, self.cap() - 1, 1);
-
- // move elements from tail to end forward, excluding the last one
- self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
-
- self.tail = self.wrap_add(self.tail, 1);
- }
- }
+ let k = self.len - index - 1;
+ // safety: due to the nature of the if-condition, whichever wrap_copy gets called,
+ // its length argument will be at most `self.len / 2`, so there can't be more than
+ // one overlapping area.
+ if k < index {
+ unsafe { self.wrap_copy(self.wrap_add(wrapped_idx, 1), wrapped_idx, k) };
+ self.len -= 1;
+ } else {
+ let old_head = self.head;
+ self.head = self.to_physical_idx(1);
+ unsafe { self.wrap_copy(old_head, self.head, index) };
+ self.len -= 1;
}
elem
@@ -2091,7 +1814,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
A: Clone,
{
- let len = self.len();
+ let len = self.len;
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
@@ -2128,8 +1851,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
// Cleanup where the ends of the buffers are
- self.head = self.wrap_sub(self.head, other_len);
- other.head = other.wrap_index(other_len);
+ self.len = at;
+ other.len = other_len;
other
}
@@ -2154,17 +1877,26 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
- self.reserve(other.len());
+ if T::IS_ZST {
+ self.len += other.len;
+ other.len = 0;
+ other.head = 0;
+ return;
+ }
+
+ self.reserve(other.len);
unsafe {
let (left, right) = other.as_slices();
- self.copy_slice(self.head, left);
- self.copy_slice(self.wrap_add(self.head, left.len()), right);
+ self.copy_slice(self.to_physical_idx(self.len), left);
+ // no overflow, because self.capacity() >= old_cap + left.len() >= self.len + left.len()
+ self.copy_slice(self.to_physical_idx(self.len + left.len()), right);
}
// SAFETY: Update pointers after copying to avoid leaving doppelganger
// in case of panics.
- self.head = self.wrap_add(self.head, other.len());
- // Silently drop values in `other`.
- other.tail = other.head;
+ self.len += other.len;
+ // Now that we own its values, forget everything in `other`.
+ other.len = 0;
+ other.head = 0;
}
/// Retains only the elements specified by the predicate.
@@ -2232,7 +1964,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
F: FnMut(&mut T) -> bool,
{
- let len = self.len();
+ let len = self.len;
let mut idx = 0;
let mut cur = 0;
@@ -2270,9 +2002,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// Extend or possibly remove this assertion when valid use-cases for growing the
// buffer without it being full emerge
debug_assert!(self.is_full());
- let old_cap = self.cap();
- self.buf.reserve_exact(old_cap, old_cap);
- assert!(self.cap() == old_cap * 2);
+ let old_cap = self.capacity();
+ self.buf.reserve_for_push(old_cap);
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -2306,7 +2037,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "vec_resize_with", since = "1.33.0")]
pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
- let len = self.len();
+ let len = self.len;
if new_len > len {
self.extend(repeat_with(generator).take(new_len - len))
@@ -2372,110 +2103,129 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_make_contiguous", since = "1.48.0")]
pub fn make_contiguous(&mut self) -> &mut [T] {
+ if T::IS_ZST {
+ self.head = 0;
+ }
+
if self.is_contiguous() {
- let tail = self.tail;
- let head = self.head;
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- return unsafe {
- MaybeUninit::slice_assume_init_mut(
- RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
- )
- };
+ unsafe { return slice::from_raw_parts_mut(self.ptr().add(self.head), self.len) }
}
- let buf = self.buf.ptr();
- let cap = self.cap();
- let len = self.len();
+ let &mut Self { head, len, .. } = self;
+ let ptr = self.ptr();
+ let cap = self.capacity();
- let free = self.tail - self.head;
- let tail_len = cap - self.tail;
+ let free = cap - len;
+ let head_len = cap - head;
+ let tail = len - head_len;
+ let tail_len = tail;
- if free >= tail_len {
- // there is enough free space to copy the tail in one go,
- // this means that we first shift the head backwards, and then
- // copy the tail to the correct position.
+ if free >= head_len {
+ // there is enough free space to copy the head in one go,
+ // this means that we first shift the tail backwards, and then
+ // copy the head to the correct position.
//
// from: DEFGH....ABC
// to: ABCDEFGH....
unsafe {
- ptr::copy(buf, buf.add(tail_len), self.head);
+ self.copy(0, head_len, tail_len);
// ...DEFGH.ABC
- ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+ self.copy_nonoverlapping(head, 0, head_len);
// ABCDEFGH....
-
- self.tail = 0;
- self.head = len;
}
- } else if free > self.head {
- // FIXME: We currently do not consider ....ABCDEFGH
- // to be contiguous because `head` would be `0` in this
- // case. While we probably want to change this it
- // isn't trivial as a few places expect `is_contiguous`
- // to mean that we can just slice using `buf[tail..head]`.
- // there is enough free space to copy the head in one go,
- // this means that we first shift the tail forwards, and then
- // copy the head to the correct position.
+ self.head = 0;
+ } else if free >= tail_len {
+ // there is enough free space to copy the tail in one go,
+ // this means that we first shift the head forwards, and then
+ // copy the tail to the correct position.
//
// from: FGH....ABCDE
// to: ...ABCDEFGH.
unsafe {
- ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+ self.copy(head, tail, head_len);
// FGHABCDE....
- ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+ self.copy_nonoverlapping(0, tail + head_len, tail_len);
// ...ABCDEFGH.
-
- self.tail = self.head;
- self.head = self.wrap_add(self.tail, len);
}
+
+ self.head = tail;
} else {
- // free is smaller than both head and tail,
- // this means we have to slowly "swap" the tail and the head.
+ // `free` is smaller than both `head_len` and `tail_len`.
+ // the general algorithm for this first moves the slices
+ // right next to each other and then uses `slice::rotate`
+ // to rotate them into place:
//
- // from: EFGHI...ABCD or HIJK.ABCDEFG
- // to: ABCDEFGHI... or ABCDEFGHIJK.
- let mut left_edge: usize = 0;
- let mut right_edge: usize = self.tail;
- unsafe {
- // The general problem looks like this
- // GHIJKLM...ABCDEF - before any swaps
- // ABCDEFM...GHIJKL - after 1 pass of swaps
- // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
- // - then restart the algorithm with a new (smaller) store
- // Sometimes the temp store is reached when the right edge is at the end
- // of the buffer - this means we've hit the right order with fewer swaps!
- // E.g
- // EF..ABCD
- // ABCDEF.. - after four only swaps we've finished
- while left_edge < len && right_edge != cap {
- let mut right_offset = 0;
- for i in left_edge..right_edge {
- right_offset = (i - left_edge) % (cap - right_edge);
- let src = right_edge + right_offset;
- ptr::swap(buf.add(i), buf.add(src));
+ // initially: HIJK..ABCDEFG
+ // step 1: ..HIJKABCDEFG
+ // step 2: ..ABCDEFGHIJK
+ //
+ // or:
+ //
+ // initially: FGHIJK..ABCDE
+ // step 1: FGHIJKABCDE..
+ // step 2: ABCDEFGHIJK..
+
+ // pick the shorter of the 2 slices to reduce the amount
+ // of memory that needs to be moved around.
+ if head_len > tail_len {
+ // tail is shorter, so:
+ // 1. copy tail forwards
+ // 2. rotate used part of the buffer
+ // 3. update head to point to the new beginning (which is just `free`)
+
+ unsafe {
+ // if there is no free space in the buffer, then the slices are already
+ // right next to each other and we don't need to move any memory.
+ if free != 0 {
+ // because we only move the tail forward as much as there's free space
+ // behind it, we don't overwrite any elements of the head slice, and
+ // the slices end up right next to each other.
+ self.copy(0, free, tail_len);
}
- let n_ops = right_edge - left_edge;
- left_edge += n_ops;
- right_edge += right_offset + 1;
+
+ // We just copied the tail right next to the head slice,
+ // so all of the elements in the range are initialized
+ let slice = &mut *self.buffer_range(free..self.capacity());
+
+ // because the deque wasn't contiguous, we know that `tail_len < self.len == slice.len()`,
+ // so this will never panic.
+ slice.rotate_left(tail_len);
+
+ // the used part of the buffer now is `free..self.capacity()`, so set
+ // `head` to the beginning of that range.
+ self.head = free;
}
+ } else {
+ // head is shorter so:
+ // 1. copy head backwards
+ // 2. rotate used part of the buffer
+ // 3. update head to point to the new beginning (which is the beginning of the buffer)
+
+ unsafe {
+ // if there is no free space in the buffer, then the slices are already
+ // right next to each other and we don't need to move any memory.
+ if free != 0 {
+ // copy the head slice to lie right behind the tail slice.
+ self.copy(self.head, tail_len, head_len);
+ }
- self.tail = 0;
- self.head = len;
+ // because we copied the head slice so that both slices lie right
+ // next to each other, all the elements in the range are initialized.
+ let slice = &mut *self.buffer_range(0..self.len);
+
+ // because the deque wasn't contiguous, we know that `head_len < self.len == slice.len()`
+ // so this will never panic.
+ slice.rotate_right(head_len);
+
+ // the used part of the buffer now is `0..self.len`, so set
+ // `head` to the beginning of that range.
+ self.head = 0;
+ }
}
}
- let tail = self.tail;
- let head = self.head;
- // Safety:
- // - `self.head` and `self.tail` in a ring buffer are always valid indices.
- // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
- unsafe {
- MaybeUninit::slice_assume_init_mut(
- RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
- )
- }
+ unsafe { slice::from_raw_parts_mut(ptr.add(self.head), self.len) }
}
/// Rotates the double-ended queue `mid` places to the left.
@@ -2513,7 +2263,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
- let k = self.len() - mid;
+ let k = self.len - mid;
if mid <= k {
unsafe { self.rotate_left_inner(mid) }
} else {
@@ -2556,7 +2306,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "vecdeque_rotate", since = "1.36.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
- let mid = self.len() - k;
+ let mid = self.len - k;
if k <= mid {
unsafe { self.rotate_right_inner(k) }
} else {
@@ -2567,26 +2317,24 @@ impl<T, A: Allocator> VecDeque<T, A> {
// SAFETY: the following two methods require that the rotation amount
// be less than half the length of the deque.
//
- // `wrap_copy` requires that `min(x, cap() - x) + copy_len <= cap()`,
- // but than `min` is never more than half the capacity, regardless of x,
+ // `wrap_copy` requires that `min(x, capacity() - x) + copy_len <= capacity()`,
+ // but then `min` is never more than half the capacity, regardless of x,
// so it's sound to call here because we're calling with something
// less than half the length, which is never above half the capacity.
unsafe fn rotate_left_inner(&mut self, mid: usize) {
debug_assert!(mid * 2 <= self.len());
unsafe {
- self.wrap_copy(self.head, self.tail, mid);
+ self.wrap_copy(self.head, self.to_physical_idx(self.len), mid);
}
- self.head = self.wrap_add(self.head, mid);
- self.tail = self.wrap_add(self.tail, mid);
+ self.head = self.to_physical_idx(mid);
}
unsafe fn rotate_right_inner(&mut self, k: usize) {
debug_assert!(k * 2 <= self.len());
self.head = self.wrap_sub(self.head, k);
- self.tail = self.wrap_sub(self.tail, k);
unsafe {
- self.wrap_copy(self.tail, self.head, k);
+ self.wrap_copy(self.to_physical_idx(self.len), self.head, k);
}
}
@@ -2833,29 +2581,30 @@ impl<T: Clone, A: Allocator> VecDeque<T, A> {
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn resize(&mut self, new_len: usize, value: T) {
- self.resize_with(new_len, || value.clone());
+ if new_len > self.len() {
+ let extra = new_len - self.len();
+ self.extend(repeat_n(value, extra))
+ } else {
+ self.truncate(new_len);
+ }
}
}
/// Returns the index in the underlying buffer for a given logical element index.
#[inline]
-fn wrap_index(index: usize, size: usize) -> usize {
- // size is always a power of 2
- debug_assert!(size.is_power_of_two());
- index & (size - 1)
-}
-
-/// Calculate the number of elements left to be read in the buffer
-#[inline]
-fn count(tail: usize, head: usize, size: usize) -> usize {
- // size is always a power of 2
- (head.wrapping_sub(tail)) & (size - 1)
+fn wrap_index(logical_index: usize, capacity: usize) -> usize {
+ debug_assert!(
+ (logical_index == 0 && capacity == 0)
+ || logical_index < capacity
+ || (logical_index - capacity) < capacity
+ );
+ if logical_index >= capacity { logical_index - capacity } else { logical_index }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialEq, A: Allocator> PartialEq for VecDeque<T, A> {
fn eq(&self, other: &Self) -> bool {
- if self.len() != other.len() {
+ if self.len != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
@@ -2919,7 +2668,7 @@ impl<T: Ord, A: Allocator> Ord for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash, A: Allocator> Hash for VecDeque<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
- state.write_length_prefix(self.len());
+ state.write_length_prefix(self.len);
// It's not possible to use Hash::hash_slice on slices
// returned by as_slices method as their length can vary
// in otherwise identical deques.
@@ -2950,12 +2699,18 @@ impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for VecDeque<T> {
+ #[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
- let mut deq = VecDeque::with_capacity(lower);
- deq.extend(iterator);
- deq
+ // Since converting is O(1) now, might as well re-use that logic
+ // (including things like the `vec::IntoIter`→`Vec` specialization)
+ // especially as that could save us some monomorphiziation work
+ // if one uses the same iterators (like slice ones) with both.
+ return from_iter_via_vec(iter.into_iter());
+
+ #[inline]
+ fn from_iter_via_vec<U>(iter: impl Iterator<Item = U>) -> VecDeque<U> {
+ Vec::from_iter(iter).into()
+ }
}
}
@@ -3028,7 +2783,7 @@ impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for VecDeque<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_list().entries(self).finish()
+ f.debug_list().entries(self.iter()).finish()
}
}
@@ -3039,31 +2794,13 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
- /// This avoids reallocating where possible, but the conditions for that are
- /// strict, and subject to change, and so shouldn't be relied upon unless the
- /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
- fn from(mut other: Vec<T, A>) -> Self {
- let len = other.len();
- if T::IS_ZST {
- // There's no actual allocation for ZSTs to worry about capacity,
- // but `VecDeque` can't handle as much length as `Vec`.
- assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
- } else {
- // We need to resize if the capacity is not a power of two, too small or
- // doesn't have at least one free space. We do this while it's still in
- // the `Vec` so the items will drop on panic.
- let min_cap = cmp::max(MINIMUM_CAPACITY, len) + 1;
- let cap = cmp::max(min_cap, other.capacity()).next_power_of_two();
- if other.capacity() != cap {
- other.reserve_exact(cap - len);
- }
- }
-
- unsafe {
- let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc();
- let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc);
- VecDeque { tail: 0, head: len, buf }
- }
+ /// In its current implementation, this is a very cheap
+ /// conversion. This isn't yet a guarantee though, and
+ /// shouldn't be relied on.
+ #[inline]
+ fn from(other: Vec<T, A>) -> Self {
+ let (ptr, len, cap, alloc) = other.into_raw_parts_with_alloc();
+ Self { head: 0, len, buf: unsafe { RawVec::from_raw_parts_in(ptr, cap, alloc) } }
}
}
@@ -3105,11 +2842,11 @@ impl<T, A: Allocator> From<VecDeque<T, A>> for Vec<T, A> {
let other = ManuallyDrop::new(other);
let buf = other.buf.ptr();
let len = other.len();
- let cap = other.cap();
+ let cap = other.capacity();
let alloc = ptr::read(other.allocator());
- if other.tail != 0 {
- ptr::copy(buf.add(other.tail), buf, len);
+ if other.head != 0 {
+ ptr::copy(buf.add(other.head), buf, len);
}
Vec::from_raw_parts_in(buf, len, cap, alloc)
}
@@ -3136,8 +2873,8 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
}
}
- deq.tail = 0;
- deq.head = N;
+ deq.head = 0;
+ deq.len = N;
deq
}
}
diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs
deleted file mode 100644
index 6735424a3..000000000
--- a/library/alloc/src/collections/vec_deque/pair_slices.rs
+++ /dev/null
@@ -1,67 +0,0 @@
-use core::cmp::{self};
-use core::mem::replace;
-
-use crate::alloc::Allocator;
-
-use super::VecDeque;
-
-/// PairSlices pairs up equal length slice parts of two deques
-///
-/// For example, given deques "A" and "B" with the following division into slices:
-///
-/// A: [0 1 2] [3 4 5]
-/// B: [a b] [c d e]
-///
-/// It produces the following sequence of matching slices:
-///
-/// ([0 1], [a b])
-/// (\[2\], \[c\])
-/// ([3 4], [d e])
-///
-/// and the uneven remainder of either A or B is skipped.
-pub struct PairSlices<'a, 'b, T> {
- a0: &'a mut [T],
- a1: &'a mut [T],
- b0: &'b [T],
- b1: &'b [T],
-}
-
-impl<'a, 'b, T> PairSlices<'a, 'b, T> {
- pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
- let (a0, a1) = to.as_mut_slices();
- let (b0, b1) = from.as_slices();
- PairSlices { a0, a1, b0, b1 }
- }
-
- pub fn has_remainder(&self) -> bool {
- !self.b0.is_empty()
- }
-
- pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
- IntoIterator::into_iter([self.b0, self.b1])
- }
-}
-
-impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
- type Item = (&'a mut [T], &'b [T]);
- fn next(&mut self) -> Option<Self::Item> {
- // Get next part length
- let part = cmp::min(self.a0.len(), self.b0.len());
- if part == 0 {
- return None;
- }
- let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
- let (q0, q1) = self.b0.split_at(part);
-
- // Move a1 into a0, if it's empty (and b1, b0 the same way).
- self.a0 = p1;
- self.b0 = q1;
- if self.a0.is_empty() {
- self.a0 = replace(&mut self.a1, &mut []);
- }
- if self.b0.is_empty() {
- self.b0 = replace(&mut self.b1, &[]);
- }
- Some((p0, q0))
- }
-}
diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs
deleted file mode 100644
index dd0fa7d60..000000000
--- a/library/alloc/src/collections/vec_deque/ring_slices.rs
+++ /dev/null
@@ -1,56 +0,0 @@
-use core::ptr::{self};
-
-/// Returns the two slices that cover the `VecDeque`'s valid range
-pub trait RingSlices: Sized {
- fn slice(self, from: usize, to: usize) -> Self;
- fn split_at(self, i: usize) -> (Self, Self);
-
- fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
- let contiguous = tail <= head;
- if contiguous {
- let (empty, buf) = buf.split_at(0);
- (buf.slice(tail, head), empty)
- } else {
- let (mid, right) = buf.split_at(tail);
- let (left, _) = mid.split_at(head);
- (right, left)
- }
- }
-}
-
-impl<T> RingSlices for &[T] {
- fn slice(self, from: usize, to: usize) -> Self {
- &self[from..to]
- }
- fn split_at(self, i: usize) -> (Self, Self) {
- (*self).split_at(i)
- }
-}
-
-impl<T> RingSlices for &mut [T] {
- fn slice(self, from: usize, to: usize) -> Self {
- &mut self[from..to]
- }
- fn split_at(self, i: usize) -> (Self, Self) {
- (*self).split_at_mut(i)
- }
-}
-
-impl<T> RingSlices for *mut [T] {
- fn slice(self, from: usize, to: usize) -> Self {
- assert!(from <= to && to < self.len());
- // Not using `get_unchecked_mut` to keep this a safe operation.
- let len = to - from;
- ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
- }
-
- fn split_at(self, mid: usize) -> (Self, Self) {
- let len = self.len();
- let ptr = self.as_mut_ptr();
- assert!(mid <= len);
- (
- ptr::slice_from_raw_parts_mut(ptr, mid),
- ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
- )
- }
-}
diff --git a/library/alloc/src/collections/vec_deque/spec_extend.rs b/library/alloc/src/collections/vec_deque/spec_extend.rs
index 97ff8b765..dccf40ccb 100644
--- a/library/alloc/src/collections/vec_deque/spec_extend.rs
+++ b/library/alloc/src/collections/vec_deque/spec_extend.rs
@@ -1,6 +1,6 @@
use crate::alloc::Allocator;
use crate::vec;
-use core::iter::{ByRefSized, TrustedLen};
+use core::iter::TrustedLen;
use core::slice;
use super::VecDeque;
@@ -17,19 +17,33 @@ where
default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of:
//
- // for item in iter {
- // self.push_back(item);
- // }
- while let Some(element) = iter.next() {
- if self.len() == self.capacity() {
- let (lower, _) = iter.size_hint();
- self.reserve(lower.saturating_add(1));
- }
+ // for item in iter {
+ // self.push_back(item);
+ // }
+
+ // May only be called if `deque.len() < deque.capacity()`
+ unsafe fn push_unchecked<T, A: Allocator>(deque: &mut VecDeque<T, A>, element: T) {
+ // SAFETY: Because of the precondition, it's guaranteed that there is space
+ // in the logical array after the last element.
+ unsafe { deque.buffer_write(deque.to_physical_idx(deque.len), element) };
+ // This can't overflow because `deque.len() < deque.capacity() <= usize::MAX`.
+ deque.len += 1;
+ }
- let head = self.head;
- self.head = self.wrap_add(self.head, 1);
- unsafe {
- self.buffer_write(head, element);
+ while let Some(element) = iter.next() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+
+ // SAFETY: We just reserved space for at least one element.
+ unsafe { push_unchecked(self, element) };
+
+ // Inner loop to avoid repeatedly calling `reserve`.
+ while self.len < self.capacity() {
+ let Some(element) = iter.next() else {
+ return;
+ };
+ // SAFETY: The loop condition guarantees that `self.len() < self.capacity()`.
+ unsafe { push_unchecked(self, element) };
}
}
}
@@ -39,7 +53,7 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: TrustedLen<Item = T>,
{
- default fn spec_extend(&mut self, mut iter: I) {
+ default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint();
if let Some(additional) = high {
@@ -51,35 +65,12 @@ where
);
self.reserve(additional);
- struct WrapAddOnDrop<'a, T, A: Allocator> {
- vec_deque: &'a mut VecDeque<T, A>,
- written: usize,
- }
-
- impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
- fn drop(&mut self) {
- self.vec_deque.head =
- self.vec_deque.wrap_add(self.vec_deque.head, self.written);
- }
- }
-
- let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
-
- let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
- unsafe {
- wrapper.vec_deque.write_iter(
- wrapper.vec_deque.head,
- ByRefSized(&mut iter).take(head_room),
- &mut wrapper.written,
- );
-
- if additional > head_room {
- wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
- }
- }
+ let written = unsafe {
+ self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
+ };
debug_assert_eq!(
- additional, wrapper.written,
+ additional, written,
"The number of items written to VecDeque doesn't match the TrustedLen size hint"
);
} else {
@@ -99,8 +90,8 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
self.reserve(slice.len());
unsafe {
- self.copy_slice(self.head, slice);
- self.head = self.wrap_add(self.head, slice.len());
+ self.copy_slice(self.to_physical_idx(self.len), slice);
+ self.len += slice.len();
}
iterator.forget_remaining_elements();
}
@@ -125,8 +116,8 @@ where
self.reserve(slice.len());
unsafe {
- self.copy_slice(self.head, slice);
- self.head = self.wrap_add(self.head, slice.len());
+ self.copy_slice(self.to_physical_idx(self.len), slice);
+ self.len += slice.len();
}
}
}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 1f2daef21..220ad71be 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -3,7 +3,6 @@ use core::iter::TrustedLen;
use super::*;
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@@ -11,12 +10,11 @@ fn bench_push_back_100(b: &mut test::Bencher) {
deq.push_back(i);
}
deq.head = 0;
- deq.tail = 0;
+ deq.len = 0;
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@@ -24,18 +22,21 @@ fn bench_push_front_100(b: &mut test::Bencher) {
deq.push_front(i);
}
deq.head = 0;
- deq.tail = 0;
+ deq.len = 0;
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_back_100(b: &mut test::Bencher) {
- let mut deq = VecDeque::<i32>::with_capacity(101);
+ let size = 100;
+ let mut deq = VecDeque::<i32>::with_capacity(size + 1);
+ // We'll mess with private state to pretend like `deq` is filled.
+ // Make sure the buffer is initialized so that we don't read uninit memory.
+ unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
- deq.head = 100;
- deq.tail = 0;
+ deq.head = 0;
+ deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
@@ -43,9 +44,9 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_whole_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@@ -54,9 +55,9 @@ fn bench_retain_whole_10000(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_odd_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@@ -65,24 +66,27 @@ fn bench_retain_odd_10000(b: &mut test::Bencher) {
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_half_10000(b: &mut test::Bencher) {
- let v = (1..100000).collect::<VecDeque<u32>>();
+ let size = if cfg!(miri) { 1000 } else { 100000 };
+ let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
- v.retain(|x| *x > 50000)
+ v.retain(|x| *x > size / 2)
})
}
#[bench]
-#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_front_100(b: &mut test::Bencher) {
- let mut deq = VecDeque::<i32>::with_capacity(101);
+ let size = 100;
+ let mut deq = VecDeque::<i32>::with_capacity(size + 1);
+ // We'll mess with private state to pretend like `deq` is filled.
+ // Make sure the buffer is initialized so that we don't read uninit memory.
+ unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
- deq.head = 100;
- deq.tail = 0;
+ deq.head = 0;
+ deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
@@ -101,9 +105,9 @@ fn test_swap_front_back_remove() {
for len in 0..final_len {
let expected: VecDeque<_> =
if back { (0..len).collect() } else { (0..len).rev().collect() };
- for tail_pos in 0..usable_cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..usable_cap {
+ tester.head = head_pos;
+ tester.len = 0;
if back {
for i in 0..len * 2 {
tester.push_front(i);
@@ -120,8 +124,8 @@ fn test_swap_front_back_remove() {
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -146,18 +150,18 @@ fn test_insert() {
for len in minlen..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
+ for head_pos in 0..cap {
for to_insert in 0..len {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -253,13 +257,14 @@ fn test_swap_panic() {
#[test]
fn test_reserve_exact() {
let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
- assert!(tester.capacity() == 1);
+ assert_eq!(tester.capacity(), 1);
tester.reserve_exact(50);
- assert!(tester.capacity() >= 51);
+ assert_eq!(tester.capacity(), 50);
tester.reserve_exact(40);
- assert!(tester.capacity() >= 51);
+ // reserving won't shrink the buffer
+ assert_eq!(tester.capacity(), 50);
tester.reserve_exact(200);
- assert!(tester.capacity() >= 200);
+ assert_eq!(tester.capacity(), 200);
}
#[test]
@@ -319,6 +324,7 @@ fn test_contains() {
#[test]
fn test_rotate_left_right() {
let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.reserve(1);
assert_eq!(tester.len(), 10);
@@ -459,7 +465,7 @@ fn test_binary_search_key() {
}
#[test]
-fn make_contiguous_big_tail() {
+fn make_contiguous_big_head() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..3 {
@@ -474,14 +480,14 @@ fn make_contiguous_big_tail() {
assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
- let expected_start = tester.head;
+ let expected_start = tester.as_slices().1.len();
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
-fn make_contiguous_big_head() {
+fn make_contiguous_big_tail() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..8 {
@@ -495,44 +501,46 @@ fn make_contiguous_big_head() {
// 01234567......98
let expected_start = 0;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_small_free() {
- let mut tester = VecDeque::with_capacity(15);
+ let mut tester = VecDeque::with_capacity(16);
- for i in 'A' as u8..'I' as u8 {
+ for i in b'A'..b'I' {
tester.push_back(i as char);
}
- for i in 'I' as u8..'N' as u8 {
+ for i in b'I'..b'N' {
tester.push_front(i as char);
}
+ assert_eq!(tester, ['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']);
+
// ABCDEFGH...MLKJI
let expected_start = 0;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!(
(&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
tester.as_slices()
);
tester.clear();
- for i in 'I' as u8..'N' as u8 {
+ for i in b'I'..b'N' {
tester.push_back(i as char);
}
- for i in 'A' as u8..'I' as u8 {
+ for i in b'A'..b'I' {
tester.push_front(i as char);
}
// IJKLM...HGFEDCBA
- let expected_start = 0;
+ let expected_start = 3;
tester.make_contiguous();
- assert_eq!(tester.tail, expected_start);
+ assert_eq!(tester.head, expected_start);
assert_eq!(
(&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
tester.as_slices()
@@ -541,16 +549,55 @@ fn make_contiguous_small_free() {
#[test]
fn make_contiguous_head_to_end() {
- let mut dq = VecDeque::with_capacity(3);
- dq.push_front('B');
- dq.push_front('A');
- dq.push_back('C');
- dq.make_contiguous();
- let expected_tail = 0;
- let expected_head = 3;
- assert_eq!(expected_tail, dq.tail);
- assert_eq!(expected_head, dq.head);
- assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
+ let mut tester = VecDeque::with_capacity(16);
+
+ for i in b'A'..b'L' {
+ tester.push_back(i as char);
+ }
+
+ for i in b'L'..b'Q' {
+ tester.push_front(i as char);
+ }
+
+ assert_eq!(
+ tester,
+ ['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
+ );
+
+ // ABCDEFGHIJKPONML
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.head, expected_start);
+ assert_eq!(
+ (
+ &['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
+ as &[_],
+ &[] as &[_]
+ ),
+ tester.as_slices()
+ );
+
+ tester.clear();
+ for i in b'L'..b'Q' {
+ tester.push_back(i as char);
+ }
+
+ for i in b'A'..b'L' {
+ tester.push_front(i as char);
+ }
+
+ // LMNOPKJIHGFEDCBA
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.head, expected_start);
+ assert_eq!(
+ (
+ &['K', 'J', 'I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'L', 'M', 'N', 'O', 'P']
+ as &[_],
+ &[] as &[_]
+ ),
+ tester.as_slices()
+ );
}
#[test]
@@ -584,10 +631,10 @@ fn test_remove() {
for len in minlen..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
+ for head_pos in 0..cap {
for to_remove in 0..=len {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
@@ -598,8 +645,8 @@ fn test_remove() {
tester.push_back(1234);
}
tester.remove(to_remove);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -613,11 +660,11 @@ fn test_range() {
let cap = tester.capacity();
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..=cap {
- for tail in 0..=cap {
+ for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@@ -638,17 +685,17 @@ fn test_range_mut() {
let cap = tester.capacity();
for len in 0..=cap {
- for tail in 0..=cap {
+ for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let head_was = tester.head;
- let tail_was = tester.tail;
+ let len_was = tester.len;
// Check that we iterate over the correct values
let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
@@ -658,8 +705,8 @@ fn test_range_mut() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
- assert_eq!(tester.tail, tail_was);
assert_eq!(tester.head, head_was);
+ assert_eq!(tester.len, len_was);
}
}
}
@@ -672,11 +719,11 @@ fn test_drain() {
let cap = tester.capacity();
for len in 0..=cap {
- for tail in 0..=cap {
+ for head in 0..cap {
for drain_start in 0..=len {
for drain_end in drain_start..=len {
- tester.tail = tail;
- tester.head = tail;
+ tester.head = head;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@@ -689,8 +736,8 @@ fn test_drain() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
@@ -717,17 +764,18 @@ fn test_shrink_to_fit() {
for len in 0..=cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
- for tail_pos in 0..=max_cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..=max_cap {
+ tester.reserve(head_pos);
+ tester.head = head_pos;
+ tester.len = 0;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@@ -754,17 +802,17 @@ fn test_split_off() {
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
- for tail_pos in 0..cap {
- tester.tail = tail_pos;
- tester.head = tail_pos;
+ for head_pos in 0..cap {
+ tester.head = head_pos;
+ tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
- assert!(tester.tail < tester.cap());
- assert!(tester.head < tester.cap());
- assert!(result.tail < result.cap());
- assert!(result.head < result.cap());
+ assert!(tester.head <= tester.capacity());
+ assert!(tester.len <= tester.capacity());
+ assert!(result.head <= result.capacity());
+ assert!(result.len <= result.capacity());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
@@ -781,16 +829,10 @@ fn test_from_vec() {
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
- assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
-
- let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
- let vd = VecDeque::from(vec.clone());
- assert!(vd.cap().is_power_of_two());
- assert_eq!(vd.len(), vec.len());
}
#[test]
@@ -842,10 +884,6 @@ fn test_extend_impl(trusted_len: bool) {
}
assert_eq!(self.test, self.expected);
- let (a1, b1) = self.test.as_slices();
- let (a2, b2) = self.expected.as_slices();
- assert_eq!(a1, a2);
- assert_eq!(b1, b2);
}
fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
@@ -868,7 +906,7 @@ fn test_extend_impl(trusted_len: bool) {
let mut tester = VecDequeTester::new(trusted_len);
// Initial capacity
- tester.test_extend(0..tester.remaining_capacity() - 1);
+ tester.test_extend(0..tester.remaining_capacity());
// Grow
tester.test_extend(1024..2048);
@@ -876,7 +914,7 @@ fn test_extend_impl(trusted_len: bool) {
// Wrap around
tester.drain(..128);
- tester.test_extend(0..tester.remaining_capacity() - 1);
+ tester.test_extend(0..tester.remaining_capacity());
// Continue
tester.drain(256..);
@@ -889,16 +927,6 @@ fn test_extend_impl(trusted_len: bool) {
}
#[test]
-#[should_panic = "capacity overflow"]
-fn test_from_vec_zst_overflow() {
- use crate::vec::Vec;
- let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
- let vd = VecDeque::from(vec.clone()); // no room for +1
- assert!(vd.cap().is_power_of_two());
- assert_eq!(vd.len(), vec.len());
-}
-
-#[test]
fn test_from_array() {
fn test<const N: usize>() {
let mut array: [usize; N] = [0; N];
@@ -913,7 +941,6 @@ fn test_from_array() {
assert_eq!(deq[i], i);
}
- assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), N);
}
test::<0>();
@@ -921,11 +948,6 @@ fn test_from_array() {
test::<2>();
test::<32>();
test::<35>();
-
- let array = [(); MAXIMUM_ZST_CAPACITY - 1];
- let deq = VecDeque::from(array);
- assert!(deq.cap().is_power_of_two());
- assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
}
#[test]
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index ce36b116f..96960d43f 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -82,6 +82,7 @@
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
@@ -124,6 +125,7 @@
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
+#![feature(iter_repeat_n)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
@@ -150,6 +152,7 @@
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
+#![cfg_attr(not(bootstrap), feature(tuple_trait))]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 006d813e5..38e31b180 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -293,6 +293,15 @@ struct RcBox<T: ?Sized> {
value: T,
}
+/// Calculate layout for `RcBox<T>` using the inner value's layout
+fn rcbox_layout_for_value_layout(layout: Layout) -> Layout {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ Layout::new::<RcBox<()>>().extend(layout).unwrap().0.pad_to_align()
+}
+
/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
/// Counted'.
///
@@ -1082,10 +1091,11 @@ impl<T: ?Sized> Rc<T> {
///
/// # Safety
///
- /// Any other `Rc` or [`Weak`] pointers to the same allocation must not be dereferenced
- /// for the duration of the returned borrow.
- /// This is trivially the case if no such pointers exist,
- /// for example immediately after `Rc::new`.
+ /// If any other `Rc` or [`Weak`] pointers to the same allocation exist, then
+ /// they must be must not be dereferenced or have active borrows for the duration
+ /// of the returned borrow, and their inner type must be exactly the same as the
+ /// inner type of this Rc (including lifetimes). This is trivially the case if no
+ /// such pointers exist, for example immediately after `Rc::new`.
///
/// # Examples
///
@@ -1100,6 +1110,38 @@ impl<T: ?Sized> Rc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
+ /// Other `Rc` pointers to the same allocation must be to the same type.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<str> = Rc::from("Hello, world!");
+ /// let mut y: Rc<[u8]> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type is str, not [u8]
+ /// Rc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
+ /// }
+ /// println!("{}", &*x); // Invalid UTF-8 in a str
+ /// ```
+ /// Other `Rc` pointers to the same allocation must be to the exact same type, including lifetimes.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<&str> = Rc::new("Hello, world!");
+ /// {
+ /// let s = String::from("Oh, no!");
+ /// let mut y: Rc<&str> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type
+ /// // is &'long str, not &'short str
+ /// *Rc::get_mut_unchecked(&mut y) = &s;
+ /// }
+ /// }
+ /// println!("{}", &*x); // Use-after-free
+ /// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@@ -1334,11 +1376,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> *mut RcBox<T> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = rcbox_layout_for_value_layout(value_layout);
unsafe {
Rc::try_allocate_for_layout(value_layout, allocate, mem_to_rcbox)
.unwrap_or_else(|_| handle_alloc_error(layout))
@@ -1357,11 +1395,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> Result<*mut RcBox<T>, AllocError> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = rcbox_layout_for_value_layout(value_layout);
// Allocate for the layout.
let ptr = allocate(layout)?;
@@ -1428,7 +1462,7 @@ impl<T> Rc<[T]> {
}
}
- /// Copy elements from slice into newly allocated Rc<\[T\]>
+ /// Copy elements from slice into newly allocated `Rc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`
#[cfg(not(no_global_oom_handling))]
@@ -1968,10 +2002,8 @@ impl<T> From<Vec<T>> for Rc<[T]> {
fn from(mut v: Vec<T>) -> Rc<[T]> {
unsafe {
let rc = Rc::copy_from_slice(&v);
-
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
-
rc
}
}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index a5e7bf2a1..1b61ede34 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -458,7 +458,7 @@ impl<T> [T] {
hack::into_vec(self)
}
- /// Creates a vector by repeating a slice `n` times.
+ /// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index c436adf70..7a8e6f088 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -362,8 +362,8 @@ use crate::vec::Vec;
/// [`Deref`]: core::ops::Deref "ops::Deref"
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
-#[cfg_attr(not(test), rustc_diagnostic_item = "String")]
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(all(not(bootstrap), not(test)), lang = "String")]
pub struct String {
vec: Vec<u8>,
}
@@ -949,7 +949,7 @@ impl String {
/// assert_eq!(string, "abcdecdeabecde");
/// ```
#[cfg(not(no_global_oom_handling))]
- #[unstable(feature = "string_extend_from_within", issue = "none")]
+ #[unstable(feature = "string_extend_from_within", issue = "103806")]
pub fn extend_from_within<R>(&mut self, src: R)
where
R: RangeBounds<usize>,
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 81cd77074..f7dc4d109 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -333,6 +333,15 @@ struct ArcInner<T: ?Sized> {
data: T,
}
+/// Calculate layout for `ArcInner<T>` using the inner value's layout
+fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
+}
+
unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
@@ -1154,11 +1163,7 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = arcinner_layout_for_value_layout(value_layout);
unsafe {
Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
.unwrap_or_else(|_| handle_alloc_error(layout))
@@ -1176,11 +1181,7 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> Result<*mut ArcInner<T>, AllocError> {
- // Calculate layout using the given value layout.
- // Previously, layout was calculated on the expression
- // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
- // reference (see #54908).
- let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ let layout = arcinner_layout_for_value_layout(value_layout);
let ptr = allocate(layout)?;
@@ -1246,7 +1247,7 @@ impl<T> Arc<[T]> {
}
}
- /// Copy elements from slice into newly allocated Arc<\[T\]>
+ /// Copy elements from slice into newly allocated `Arc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`.
#[cfg(not(no_global_oom_handling))]
@@ -1586,10 +1587,11 @@ impl<T: ?Sized> Arc<T> {
///
/// # Safety
///
- /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
- /// for the duration of the returned borrow.
- /// This is trivially the case if no such pointers exist,
- /// for example immediately after `Arc::new`.
+ /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
+ /// they must be must not be dereferenced or have active borrows for the duration
+ /// of the returned borrow, and their inner type must be exactly the same as the
+ /// inner type of this Rc (including lifetimes). This is trivially the case if no
+ /// such pointers exist, for example immediately after `Arc::new`.
///
/// # Examples
///
@@ -1604,6 +1606,38 @@ impl<T: ?Sized> Arc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
+ /// Other `Arc` pointers to the same allocation must be to the same type.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<str> = Arc::from("Hello, world!");
+ /// let mut y: Arc<[u8]> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type is str, not [u8]
+ /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
+ /// }
+ /// println!("{}", &*x); // Invalid UTF-8 in a str
+ /// ```
+ /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
+ /// ```no_run
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<&str> = Arc::new("Hello, world!");
+ /// {
+ /// let s = String::from("Oh, no!");
+ /// let mut y: Arc<&str> = x.clone().into();
+ /// unsafe {
+ /// // this is Undefined Behavior, because x's inner type
+ /// // is &'long str, not &'short str
+ /// *Arc::get_mut_unchecked(&mut y) = &s;
+ /// }
+ /// }
+ /// println!("{}", &*x); // Use-after-free
+ /// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@@ -2573,12 +2607,10 @@ impl<T> From<Vec<T>> for Arc<[T]> {
#[inline]
fn from(mut v: Vec<T>) -> Arc<[T]> {
unsafe {
- let arc = Arc::copy_from_slice(&v);
-
+ let rc = Arc::copy_from_slice(&v);
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
-
- arc
+ rc
}
}
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index bbbdc3aa2..ba34ab680 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -1070,7 +1070,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// Converts the vector into [`Box<[T]>`][owned slice].
///
- /// Note that this will drop any excess capacity.
+ /// If the vector has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
///
/// [owned slice]: Box
///
@@ -2163,7 +2164,7 @@ impl<T, A: Allocator> Vec<T, A> {
{
let len = self.len();
if new_len > len {
- self.extend_with(new_len - len, ExtendFunc(f));
+ self.extend_trusted(iter::repeat_with(f).take(new_len - len));
} else {
self.truncate(new_len);
}
@@ -2491,16 +2492,6 @@ impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
}
}
-struct ExtendFunc<F>(F);
-impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
- fn next(&mut self) -> T {
- (self.0)()
- }
- fn last(mut self) -> T {
- (self.0)()
- }
-}
-
impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` values, using the given generator.
@@ -2588,7 +2579,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
// SAFETY:
- // - caller guaratees that src is a valid index
+ // - caller guarantees that src is a valid index
let to_clone = unsafe { this.get_unchecked(src) };
iter::zip(to_clone, spare)
@@ -2607,7 +2598,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (init, spare) = self.split_at_spare_mut();
// SAFETY:
- // - caller guaratees that `src` is a valid index
+ // - caller guarantees that `src` is a valid index
let source = unsafe { init.get_unchecked(src) };
// SAFETY:
@@ -2780,7 +2771,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
/// assert_eq!(v_iter.next(), None);
/// ```
#[inline]
- fn into_iter(self) -> IntoIter<T, A> {
+ fn into_iter(self) -> Self::IntoIter {
unsafe {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
@@ -2808,7 +2799,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
- fn into_iter(self) -> slice::Iter<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
@@ -2818,7 +2809,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
- fn into_iter(self) -> slice::IterMut<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
@@ -2870,6 +2861,40 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
+ // specific extend for `TrustedLen` iterators, called both by the specializations
+ // and internal places where resolving specialization makes compilation slower
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let ptr = self.as_mut_ptr();
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr.add(local_len.current_len()), element);
+ // Since the loop executes user code which can panic we have to update
+ // the length every step to correctly drop what we've written.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
@@ -3199,6 +3224,14 @@ impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
/// ```
/// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
/// ```
+ ///
+ /// Any excess capacity is removed:
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
+ /// ```
fn from(v: Vec<T, A>) -> Self {
v.into_boxed_slice()
}
diff --git a/library/alloc/src/vec/set_len_on_drop.rs b/library/alloc/src/vec/set_len_on_drop.rs
index 8b66bc812..6ce5a3a9f 100644
--- a/library/alloc/src/vec/set_len_on_drop.rs
+++ b/library/alloc/src/vec/set_len_on_drop.rs
@@ -18,6 +18,11 @@ impl<'a> SetLenOnDrop<'a> {
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
+
+ #[inline]
+ pub(super) fn current_len(&self) -> usize {
+ self.local_len
+ }
}
impl Drop for SetLenOnDrop<'_> {
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 1ea9c827a..56065ce56 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -1,9 +1,8 @@
use crate::alloc::Allocator;
use core::iter::TrustedLen;
-use core::ptr::{self};
use core::slice::{self};
-use super::{IntoIter, SetLenOnDrop, Vec};
+use super::{IntoIter, Vec};
// Specialization trait used for Vec::extend
pub(super) trait SpecExtend<T, I> {
@@ -24,36 +23,7 @@ where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iterator: I) {
- // This is the case for a TrustedLen iterator.
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.reserve(additional);
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr, element);
- ptr = ptr.add(1);
- // Since the loop executes user code which can panic we have to bump the pointer
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- } else {
- // Per TrustedLen contract a `None` upper bound means that the iterator length
- // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
- // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
- // This avoids additional codegen for a fallback code path which would eventually
- // panic anyway.
- panic!("capacity overflow");
- }
+ self.extend_trusted(iterator)
}
}
diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs
index 9e5123be9..af49826ff 100644
--- a/library/alloc/tests/boxed.rs
+++ b/library/alloc/tests/boxed.rs
@@ -102,8 +102,18 @@ unsafe impl const Allocator for ConstAllocator {
let new_ptr = self.allocate(new_layout)?;
if new_layout.size() > 0 {
- new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
- self.deallocate(ptr, old_layout);
+ // Safety: `new_ptr` is valid for writes and `ptr` for reads of
+ // `old_layout.size()`, because `new_layout.size() >=
+ // old_layout.size()` (which is an invariant that must be upheld by
+ // callers).
+ unsafe {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
+ }
+ // Safety: `ptr` is never used again is also an invariant which must
+ // be upheld by callers.
+ unsafe {
+ self.deallocate(ptr, old_layout);
+ }
}
Ok(new_ptr)
}
@@ -114,12 +124,21 @@ unsafe impl const Allocator for ConstAllocator {
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
- let new_ptr = self.grow(ptr, old_layout, new_layout)?;
+ // Safety: Invariants of `grow_zeroed` and `grow` are the same, and must
+ // be enforced by callers.
+ let new_ptr = unsafe { self.grow(ptr, old_layout, new_layout)? };
if new_layout.size() > 0 {
let old_size = old_layout.size();
let new_size = new_layout.size();
let raw_ptr = new_ptr.as_mut_ptr();
- raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ // Safety:
+ // - `grow` returned Ok, so the returned pointer must be valid for
+ // `new_size` bytes
+ // - `new_size` must be larger than `old_size`, which is an
+ // invariant which must be upheld by callers.
+ unsafe {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
}
Ok(new_ptr)
}
@@ -137,8 +156,18 @@ unsafe impl const Allocator for ConstAllocator {
let new_ptr = self.allocate(new_layout)?;
if new_layout.size() > 0 {
- new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), new_layout.size());
- self.deallocate(ptr, old_layout);
+ // Safety: `new_ptr` and `ptr` are valid for reads/writes of
+ // `new_layout.size()` because of the invariants of shrink, which
+ // include `new_layout.size()` being smaller than (or equal to)
+ // `old_layout.size()`.
+ unsafe {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), new_layout.size());
+ }
+ // Safety: `ptr` is never used again is also an invariant which must
+ // be upheld by callers.
+ unsafe {
+ self.deallocate(ptr, old_layout);
+ }
}
Ok(new_ptr)
}
diff --git a/library/alloc/tests/fmt.rs b/library/alloc/tests/fmt.rs
index 5ee6db43f..04da95bbb 100644
--- a/library/alloc/tests/fmt.rs
+++ b/library/alloc/tests/fmt.rs
@@ -2,6 +2,7 @@
use std::cell::RefCell;
use std::fmt::{self, Write};
+use std::ptr;
#[test]
fn test_format() {
@@ -76,14 +77,14 @@ fn test_format_macro_interface() {
t!(format!("{}", "foo"), "foo");
t!(format!("{}", "foo".to_string()), "foo");
if cfg!(target_pointer_width = "32") {
- t!(format!("{:#p}", 0x1234 as *const isize), "0x00001234");
- t!(format!("{:#p}", 0x1234 as *mut isize), "0x00001234");
+ t!(format!("{:#p}", ptr::invalid::<isize>(0x1234)), "0x00001234");
+ t!(format!("{:#p}", ptr::invalid_mut::<isize>(0x1234)), "0x00001234");
} else {
- t!(format!("{:#p}", 0x1234 as *const isize), "0x0000000000001234");
- t!(format!("{:#p}", 0x1234 as *mut isize), "0x0000000000001234");
+ t!(format!("{:#p}", ptr::invalid::<isize>(0x1234)), "0x0000000000001234");
+ t!(format!("{:#p}", ptr::invalid_mut::<isize>(0x1234)), "0x0000000000001234");
}
- t!(format!("{:p}", 0x1234 as *const isize), "0x1234");
- t!(format!("{:p}", 0x1234 as *mut isize), "0x1234");
+ t!(format!("{:p}", ptr::invalid::<isize>(0x1234)), "0x1234");
+ t!(format!("{:p}", ptr::invalid_mut::<isize>(0x1234)), "0x1234");
t!(format!("{A:x}"), "aloha");
t!(format!("{B:X}"), "adios");
t!(format!("foo {} ☃☃☃☃☃☃", "bar"), "foo bar ☃☃☃☃☃☃");
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index ffc5ca7a5..d6d2b055b 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -47,6 +47,8 @@
#![feature(strict_provenance)]
#![feature(once_cell)]
#![feature(drain_keep_rest)]
+#![deny(fuzzy_provenance_casts)]
+#![deny(unsafe_op_in_unsafe_fn)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index e30329aa1..4d182be02 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1590,11 +1590,27 @@ fn test_bool_from_str() {
assert_eq!("not even a boolean".parse::<bool>().ok(), None);
}
-fn check_contains_all_substrings(s: &str) {
- assert!(s.contains(""));
- for i in 0..s.len() {
- for j in i + 1..=s.len() {
- assert!(s.contains(&s[i..j]));
+fn check_contains_all_substrings(haystack: &str) {
+ let mut modified_needle = String::new();
+
+ for i in 0..haystack.len() {
+ // check different haystack lengths since we special-case short haystacks.
+ let haystack = &haystack[0..i];
+ assert!(haystack.contains(""));
+ for j in 0..haystack.len() {
+ for k in j + 1..=haystack.len() {
+ let needle = &haystack[j..k];
+ assert!(haystack.contains(needle));
+ modified_needle.clear();
+ modified_needle.push_str(needle);
+ modified_needle.replace_range(0..1, "\0");
+ assert!(!haystack.contains(&modified_needle));
+
+ modified_needle.clear();
+ modified_needle.push_str(needle);
+ modified_needle.replace_range(needle.len() - 1..needle.len(), "\0");
+ assert!(!haystack.contains(&modified_needle));
+ }
}
}
}
@@ -1616,6 +1632,18 @@ fn strslice_issue_16878() {
}
#[test]
+fn strslice_issue_104726() {
+ // Edge-case in the simd_contains impl.
+ // The first and last byte are the same so it backtracks by one byte
+ // which aligns with the end of the string. Previously incorrect offset calculations
+ // lead to out-of-bounds slicing.
+ #[rustfmt::skip]
+ let needle = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaba";
+ let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab";
+ assert!(!haystack.contains(needle));
+}
+
+#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_strslice_contains() {
let x = "There are moments, Jeeves, when one asks oneself, 'Do trousers matter?'";
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index e02711870..7ebed0d5c 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -1089,7 +1089,8 @@ fn test_into_iter_drop_allocator() {
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
- System.deallocate(ptr, layout)
+ // Safety: Invariants passed to caller.
+ unsafe { System.deallocate(ptr, layout) }
}
}
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index 019d73c0b..d04de5a07 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -465,7 +465,6 @@ fn test_drain() {
for i in 6..9 {
d.push_front(i);
}
-
assert_eq!(d.drain(..).collect::<Vec<_>>(), [8, 7, 6, 0, 1, 2, 3, 4]);
assert!(d.is_empty());
}
@@ -1142,7 +1141,7 @@ fn test_reserve_exact_2() {
v.push_back(16);
v.reserve_exact(16);
- assert!(v.capacity() >= 48)
+ assert!(v.capacity() >= 33)
}
#[test]
@@ -1157,7 +1156,7 @@ fn test_try_reserve() {
// * overflow may trigger when adding `len` to `cap` (in number of elements)
// * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
- const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
{
@@ -1248,7 +1247,7 @@ fn test_try_reserve_exact() {
// This is exactly the same as test_try_reserve with the method changed.
// See that test for comments.
- const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
{
@@ -1391,7 +1390,8 @@ fn test_rotate_nop() {
#[test]
fn test_rotate_left_parts() {
- let mut v: VecDeque<_> = (1..=7).collect();
+ let mut v: VecDeque<_> = VecDeque::with_capacity(8);
+ v.extend(1..=7);
v.rotate_left(2);
assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..]));
v.rotate_left(2);
@@ -1410,7 +1410,8 @@ fn test_rotate_left_parts() {
#[test]
fn test_rotate_right_parts() {
- let mut v: VecDeque<_> = (1..=7).collect();
+ let mut v: VecDeque<_> = VecDeque::with_capacity(8);
+ v.extend(1..=7);
v.rotate_right(2);
assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..]));
v.rotate_right(2);
@@ -1727,3 +1728,11 @@ fn test_from_zero_sized_vec() {
let queue = VecDeque::from(v);
assert_eq!(queue.len(), 100);
}
+
+#[test]
+fn test_resize_keeps_reserved_space_from_item() {
+ let v = Vec::<i32>::with_capacity(1234);
+ let mut d = VecDeque::new();
+ d.resize(1, v);
+ assert_eq!(d[0].capacity(), 1234);
+}
diff --git a/library/core/benches/iter.rs b/library/core/benches/iter.rs
index 38887f29a..9193c79be 100644
--- a/library/core/benches/iter.rs
+++ b/library/core/benches/iter.rs
@@ -1,3 +1,4 @@
+use core::borrow::Borrow;
use core::iter::*;
use core::mem;
use core::num::Wrapping;
@@ -403,13 +404,31 @@ fn bench_trusted_random_access_adapters(b: &mut Bencher) {
/// Exercises the iter::Copied specialization for slice::Iter
#[bench]
-fn bench_copied_array_chunks(b: &mut Bencher) {
+fn bench_copied_chunks(b: &mut Bencher) {
+ let v = vec![1u8; 1024];
+
+ b.iter(|| {
+ let mut iter = black_box(&v).iter().copied();
+ let mut acc = Wrapping(0);
+ // This uses a while-let loop to side-step the TRA specialization in ArrayChunks
+ while let Ok(chunk) = iter.next_chunk::<{ mem::size_of::<u64>() }>() {
+ let d = u64::from_ne_bytes(chunk);
+ acc += Wrapping(d.rotate_left(7).wrapping_add(1));
+ }
+ acc
+ })
+}
+
+/// Exercises the TrustedRandomAccess specialization in ArrayChunks
+#[bench]
+fn bench_trusted_random_access_chunks(b: &mut Bencher) {
let v = vec![1u8; 1024];
b.iter(|| {
black_box(&v)
.iter()
- .copied()
+ // this shows that we're not relying on the slice::Iter specialization in Copied
+ .map(|b| *b.borrow())
.array_chunks::<{ mem::size_of::<u64>() }>()
.map(|ary| {
let d = u64::from_ne_bytes(ary);
diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs
index 1e462e3fc..f1244d932 100644
--- a/library/core/benches/lib.rs
+++ b/library/core/benches/lib.rs
@@ -1,10 +1,10 @@
// wasm32 does not support benches (no time).
#![cfg(not(target_arch = "wasm32"))]
#![feature(flt2dec)]
-#![feature(int_log)]
#![feature(test)]
#![feature(trusted_random_access)]
#![feature(iter_array_chunks)]
+#![feature(iter_next_chunk)]
extern crate test;
diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs
index 6756eecd0..1d80b8bf9 100644
--- a/library/core/src/alloc/global.rs
+++ b/library/core/src/alloc/global.rs
@@ -208,9 +208,11 @@ pub unsafe trait GlobalAlloc {
///
/// If this returns a non-null pointer, then ownership of the memory block
/// referenced by `ptr` has been transferred to this allocator.
- /// The memory may or may not have been deallocated, and should be
- /// considered unusable. The new memory block is allocated with `layout`,
- /// but with the `size` updated to `new_size`. This new layout should be
+ /// Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation remained in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
+ /// The new memory block is allocated with `layout`,
+ /// but with the `size` updated to `new_size`. This new layout must be
/// used when deallocating the new memory block with `dealloc`. The range
/// `0..min(layout.size(), new_size)` of the new memory block is
/// guaranteed to have the same values as the original block.
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
index 920e559cc..ac3d84718 100644
--- a/library/core/src/alloc/layout.rs
+++ b/library/core/src/alloc/layout.rs
@@ -7,8 +7,8 @@
use crate::cmp;
use crate::error::Error;
use crate::fmt;
-use crate::mem::{self, ValidAlign};
-use crate::ptr::NonNull;
+use crate::mem;
+use crate::ptr::{Alignment, NonNull};
// While this function is used in one place and its implementation
// could be inlined, the previous attempts to do so made rustc
@@ -46,7 +46,7 @@ pub struct Layout {
//
// (However, we do not analogously require `align >= sizeof(void*)`,
// even though that is *also* a requirement of `posix_memalign`.)
- align: ValidAlign,
+ align: Alignment,
}
impl Layout {
@@ -71,11 +71,11 @@ impl Layout {
}
// SAFETY: just checked that align is a power of two.
- Layout::from_size_valid_align(size, unsafe { ValidAlign::new_unchecked(align) })
+ Layout::from_size_alignment(size, unsafe { Alignment::new_unchecked(align) })
}
#[inline(always)]
- const fn max_size_for_align(align: ValidAlign) -> usize {
+ const fn max_size_for_align(align: Alignment) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
@@ -95,7 +95,7 @@ impl Layout {
/// Internal helper constructor to skip revalidating alignment validity.
#[inline]
- const fn from_size_valid_align(size: usize, align: ValidAlign) -> Result<Self, LayoutError> {
+ const fn from_size_alignment(size: usize, align: Alignment) -> Result<Self, LayoutError> {
if size > Self::max_size_for_align(align) {
return Err(LayoutError);
}
@@ -117,7 +117,7 @@ impl Layout {
#[rustc_allow_const_fn_unstable(ptr_alignment_type)]
pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
// SAFETY: the caller is required to uphold the preconditions.
- unsafe { Layout { size, align: ValidAlign::new_unchecked(align) } }
+ unsafe { Layout { size, align: Alignment::new_unchecked(align) } }
}
/// The minimum size in bytes for a memory block of this layout.
@@ -157,9 +157,10 @@ impl Layout {
/// allocate backing structure for `T` (which could be a trait
/// or other unsized type like a slice).
#[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use]
#[inline]
- pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ pub const fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
@@ -191,8 +192,9 @@ impl Layout {
/// [trait object]: ../../book/ch17-02-trait-objects.html
/// [extern type]: ../../unstable-book/language-features/extern-types.html
#[unstable(feature = "layout_for_ptr", issue = "69835")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use]
- pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
+ pub const unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
// SAFETY: we pass along the prerequisites of these functions to the caller
let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
// SAFETY: see rationale in `new` for why this is using the unsafe variant
@@ -229,8 +231,9 @@ impl Layout {
/// Returns an error if the combination of `self.size()` and the given
/// `align` violates the conditions listed in [`Layout::from_size_align`].
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
+ pub const fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
Layout::from_size_align(self.size(), cmp::max(self.align(), align))
}
@@ -287,10 +290,11 @@ impl Layout {
/// This is equivalent to adding the result of `padding_needed_for`
/// to the layout's current size.
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[must_use = "this returns a new `Layout`, \
without modifying the original"]
#[inline]
- pub fn pad_to_align(&self) -> Layout {
+ pub const fn pad_to_align(&self) -> Layout {
let pad = self.padding_needed_for(self.align());
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
@@ -311,8 +315,9 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
+ pub const fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow isize (i.e., the rounded value must be
@@ -321,7 +326,8 @@ impl Layout {
let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(alloc_size, self.align).map(|layout| (layout, padded_size))
+ let layout = Layout::from_size_alignment(alloc_size, self.align)?;
+ Ok((layout, padded_size))
}
/// Creates a layout describing the record for `self` followed by
@@ -370,8 +376,9 @@ impl Layout {
/// # assert_eq!(repr_c(&[u64, u32, u16, u32]), Ok((s, vec![0, 8, 12, 16])));
/// ```
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
+ pub const fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
let new_align = cmp::max(self.align, next.align);
let pad = self.padding_needed_for(next.align());
@@ -379,7 +386,7 @@ impl Layout {
let new_size = offset.checked_add(next.size()).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- let layout = Layout::from_size_valid_align(new_size, new_align)?;
+ let layout = Layout::from_size_alignment(new_size, new_align)?;
Ok((layout, offset))
}
@@ -396,11 +403,12 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
+ pub const fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
let size = self.size().checked_mul(n).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(size, self.align)
+ Layout::from_size_alignment(size, self.align)
}
/// Creates a layout describing the record for `self` followed by
@@ -410,11 +418,12 @@ impl Layout {
///
/// On arithmetic overflow, returns `LayoutError`.
#[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
+ pub const fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
let new_size = self.size().checked_add(next.size()).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
- Layout::from_size_valid_align(new_size, self.align)
+ Layout::from_size_alignment(new_size, self.align)
}
/// Creates a layout describing the record for a `[T; n]`.
@@ -422,13 +431,18 @@ impl Layout {
/// On arithmetic overflow or when the total size would exceed
/// `isize::MAX`, returns `LayoutError`.
#[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[inline]
- pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
+ pub const fn array<T>(n: usize) -> Result<Self, LayoutError> {
// Reduce the amount of code we need to monomorphize per `T`.
- return inner(mem::size_of::<T>(), ValidAlign::of::<T>(), n);
+ return inner(mem::size_of::<T>(), Alignment::of::<T>(), n);
#[inline]
- fn inner(element_size: usize, align: ValidAlign, n: usize) -> Result<Layout, LayoutError> {
+ const fn inner(
+ element_size: usize,
+ align: Alignment,
+ n: usize,
+ ) -> Result<Layout, LayoutError> {
// We need to check two things about the size:
// - That the total size won't overflow a `usize`, and
// - That the total size still fits in an `isize`.
@@ -443,7 +457,7 @@ impl Layout {
// SAFETY: We just checked above that the `array_size` will not
// exceed `isize::MAX` even when rounded up to the alignment.
- // And `ValidAlign` guarantees it's a power of two.
+ // And `Alignment` guarantees it's a power of two.
unsafe { Ok(Layout::from_size_align_unchecked(array_size, align.as_usize())) }
}
}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
index a4bf6a853..a6082455f 100644
--- a/library/core/src/alloc/mod.rs
+++ b/library/core/src/alloc/mod.rs
@@ -169,8 +169,9 @@ pub unsafe trait Allocator {
/// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
- /// transferred to this allocator. The memory may or may not have been freed, and should be
- /// considered unusable.
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was grown in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
@@ -295,8 +296,9 @@ pub unsafe trait Allocator {
/// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
- /// transferred to this allocator. The memory may or may not have been freed, and should be
- /// considered unusable.
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
///
/// If this method returns `Err`, then ownership of the memory block has not been transferred to
/// this allocator, and the contents of the memory block are unaltered.
diff --git a/library/core/src/arch.rs b/library/core/src/arch.rs
new file mode 100644
index 000000000..fc2a5b89c
--- /dev/null
+++ b/library/core/src/arch.rs
@@ -0,0 +1,30 @@
+#![doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
+
+#[stable(feature = "simd_arch", since = "1.27.0")]
+pub use crate::core_arch::arch::*;
+
+/// Inline assembly.
+///
+/// Refer to [rust by example] for a usage guide and the [reference] for
+/// detailed information about the syntax and available options.
+///
+/// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+/// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+#[stable(feature = "asm", since = "1.59.0")]
+#[rustc_builtin_macro]
+pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+}
+
+/// Module-level inline assembly.
+///
+/// Refer to [rust by example] for a usage guide and the [reference] for
+/// detailed information about the syntax and available options.
+///
+/// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+/// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+#[stable(feature = "global_asm", since = "1.59.0")]
+#[rustc_builtin_macro]
+pub macro global_asm("assembly template", $(operands,)* $(options($(option),*))?) {
+ /* compiler built-in */
+}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index eae0e1c76..94a1a1d32 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -23,7 +23,8 @@ mod iter;
#[stable(feature = "array_value_iter", since = "1.51.0")]
pub use iter::IntoIter;
-/// Creates an array `[T; N]` where each array element `T` is returned by the `cb` call.
+/// Creates an array of type [T; N], where each element `T` is the returned value from `cb`
+/// using that element's index.
///
/// # Arguments
///
@@ -36,8 +37,18 @@ pub use iter::IntoIter;
/// // elements to produce is the length of array down there: only arrays of
/// // equal lengths can be compared, so the const generic parameter `N` is
/// // inferred to be 5, thus creating array of 5 elements.
+///
/// let array = core::array::from_fn(|i| i);
+/// // indexes are: 0 1 2 3 4
/// assert_eq!(array, [0, 1, 2, 3, 4]);
+///
+/// let array2: [usize; 8] = core::array::from_fn(|i| i * 2);
+/// // indexes are: 0 1 2 3 4 5 6 7
+/// assert_eq!(array2, [0, 2, 4, 6, 8, 10, 12, 14]);
+///
+/// let bool_arr = core::array::from_fn::<_, 5, _>(|i| i % 2 == 0);
+/// // indexes are: 0 1 2 3 4
+/// assert_eq!(bool_arr, [true, false, true, false, true]);
/// ```
#[inline]
#[stable(feature = "array_from_fn", since = "1.63.0")]
@@ -865,24 +876,6 @@ where
return Ok(Try::from_output(unsafe { mem::zeroed() }));
}
- struct Guard<'a, T, const N: usize> {
- array_mut: &'a mut [MaybeUninit<T>; N],
- initialized: usize,
- }
-
- impl<T, const N: usize> Drop for Guard<'_, T, N> {
- fn drop(&mut self) {
- debug_assert!(self.initialized <= N);
-
- // SAFETY: this slice will contain only initialized objects.
- unsafe {
- crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
- &mut self.array_mut.get_unchecked_mut(..self.initialized),
- ));
- }
- }
- }
-
let mut array = MaybeUninit::uninit_array::<N>();
let mut guard = Guard { array_mut: &mut array, initialized: 0 };
@@ -896,13 +889,11 @@ where
ControlFlow::Continue(elem) => elem,
};
- // SAFETY: `guard.initialized` starts at 0, is increased by one in the
- // loop and the loop is aborted once it reaches N (which is
- // `array.len()`).
+ // SAFETY: `guard.initialized` starts at 0, which means push can be called
+ // at most N times, which this loop does.
unsafe {
- guard.array_mut.get_unchecked_mut(guard.initialized).write(item);
+ guard.push_unchecked(item);
}
- guard.initialized += 1;
}
None => {
let alive = 0..guard.initialized;
@@ -920,6 +911,55 @@ where
Ok(Try::from_output(output))
}
+/// Panic guard for incremental initialization of arrays.
+///
+/// Disarm the guard with `mem::forget` once the array has been initialized.
+///
+/// # Safety
+///
+/// All write accesses to this structure are unsafe and must maintain a correct
+/// count of `initialized` elements.
+///
+/// To minimize indirection fields are still pub but callers should at least use
+/// `push_unchecked` to signal that something unsafe is going on.
+pub(crate) struct Guard<'a, T, const N: usize> {
+ /// The array to be initialized.
+ pub array_mut: &'a mut [MaybeUninit<T>; N],
+ /// The number of items that have been initialized so far.
+ pub initialized: usize,
+}
+
+impl<T, const N: usize> Guard<'_, T, N> {
+ /// Adds an item to the array and updates the initialized item counter.
+ ///
+ /// # Safety
+ ///
+ /// No more than N elements must be initialized.
+ #[inline]
+ pub unsafe fn push_unchecked(&mut self, item: T) {
+ // SAFETY: If `initialized` was correct before and the caller does not
+ // invoke this method more than N times then writes will be in-bounds
+ // and slots will not be initialized more than once.
+ unsafe {
+ self.array_mut.get_unchecked_mut(self.initialized).write(item);
+ self.initialized = self.initialized.unchecked_add(1);
+ }
+ }
+}
+
+impl<T, const N: usize> Drop for Guard<'_, T, N> {
+ fn drop(&mut self) {
+ debug_assert!(self.initialized <= N);
+
+ // SAFETY: this slice will contain only initialized objects.
+ unsafe {
+ crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(
+ &mut self.array_mut.get_unchecked_mut(..self.initialized),
+ ));
+ }
+ }
+}
+
/// Returns the next chunk of `N` items from the iterator or errors with an
/// iterator over the remainder. Used for `Iterator::next_chunk`.
#[inline]
diff --git a/library/core/src/async_iter/async_iter.rs b/library/core/src/async_iter/async_iter.rs
index 016a3685e..12a47f9fc 100644
--- a/library/core/src/async_iter/async_iter.rs
+++ b/library/core/src/async_iter/async_iter.rs
@@ -2,7 +2,7 @@ use crate::ops::DerefMut;
use crate::pin::Pin;
use crate::task::{Context, Poll};
-/// An interface for dealing with asynchronous iterators.
+/// A trait for dealing with asynchronous iterators.
///
/// This is the main async iterator trait. For more about the concept of async iterators
/// generally, please see the [module-level documentation]. In particular, you
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 7bf32cb0d..47cce2aa3 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -1025,7 +1025,7 @@ impl<T: ?Sized> RefCell<T> {
///
/// Since this method borrows `RefCell` mutably, it is statically guaranteed
/// that no borrows to the underlying data exist. The dynamic checks inherent
- /// in [`borrow_mut`] and most other methods of `RefCell` are therefor
+ /// in [`borrow_mut`] and most other methods of `RefCell` are therefore
/// unnecessary.
///
/// This method can only be called if `RefCell` can be mutably borrowed,
@@ -1856,7 +1856,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// }
/// ```
///
-/// Coverting in the other direction from a `&mut T`
+/// Converting in the other direction from a `&mut T`
/// to an `&UnsafeCell<T>` is allowed:
///
/// ```rust
@@ -1936,7 +1936,7 @@ impl<T> UnsafeCell<T> {
/// Constructs a new instance of `UnsafeCell` which will wrap the specified
/// value.
///
- /// All access to the inner value through methods is `unsafe`.
+ /// All access to the inner value through `&UnsafeCell<T>` requires `unsafe` code.
///
/// # Examples
///
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
index 7844be5f7..b355d94ce 100644
--- a/library/core/src/cell/lazy.rs
+++ b/library/core/src/cell/lazy.rs
@@ -4,6 +4,10 @@ use crate::ops::Deref;
/// A value which is initialized on the first access.
///
+/// For a thread-safe version of this struct, see [`std::sync::LazyLock`].
+///
+/// [`std::sync::LazyLock`]: ../../std/sync/struct.LazyLock.html
+///
/// # Examples
///
/// ```
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 3c39394dd..8c01643c7 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -4,8 +4,14 @@ use crate::mem;
/// A cell which can be written to only once.
///
-/// Unlike `RefCell`, a `OnceCell` only provides shared `&T` references to its value.
-/// Unlike `Cell`, a `OnceCell` doesn't require copying or replacing the value to access it.
+/// Unlike [`RefCell`], a `OnceCell` only provides shared `&T` references to its value.
+/// Unlike [`Cell`], a `OnceCell` doesn't require copying or replacing the value to access it.
+///
+/// For a thread-safe version of this struct, see [`std::sync::OnceLock`].
+///
+/// [`RefCell`]: crate::cell::RefCell
+/// [`Cell`]: crate::cell::Cell
+/// [`std::sync::OnceLock`]: ../../std/sync/struct.OnceLock.html
///
/// # Examples
///
diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs
index 7c5f82f5e..f1a51a550 100644
--- a/library/core/src/char/convert.rs
+++ b/library/core/src/char/convert.rs
@@ -18,7 +18,6 @@ pub(super) const fn from_u32(i: u32) -> Option<char> {
}
/// Converts a `u32` to a `char`, ignoring validity. See [`char::from_u32_unchecked`].
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
#[inline]
#[must_use]
pub(super) const unsafe fn from_u32_unchecked(i: u32) -> char {
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index bb8359936..3e7383b4c 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -140,7 +140,7 @@ impl char {
/// assert_eq!(None, c);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_u32(i: u32) -> Option<char> {
@@ -183,7 +183,7 @@ impl char {
/// assert_eq!('❤', c);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_unstable(feature = "const_char_from_u32_unchecked", issue = "89259")]
#[must_use]
#[inline]
pub const unsafe fn from_u32_unchecked(i: u32) -> char {
@@ -241,7 +241,7 @@ impl char {
/// let _c = char::from_digit(1, 37);
/// ```
#[stable(feature = "assoc_char_funcs", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
@@ -338,7 +338,7 @@ impl char {
/// let _ = '1'.to_digit(37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+ #[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs
index b34a71216..af98059cf 100644
--- a/library/core/src/char/mod.rs
+++ b/library/core/src/char/mod.rs
@@ -110,7 +110,7 @@ pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::Into
/// Converts a `u32` to a `char`. Use [`char::from_u32`] instead.
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_u32(i: u32) -> Option<char> {
@@ -120,7 +120,7 @@ pub const fn from_u32(i: u32) -> Option<char> {
/// Converts a `u32` to a `char`, ignoring validity. Use [`char::from_u32_unchecked`].
/// instead.
#[stable(feature = "char_from_unchecked", since = "1.5.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_unstable(feature = "const_char_from_u32_unchecked", issue = "89259")]
#[must_use]
#[inline]
pub const unsafe fn from_u32_unchecked(i: u32) -> char {
@@ -130,7 +130,7 @@ pub const unsafe fn from_u32_unchecked(i: u32) -> char {
/// Converts a digit in the given radix to a `char`. Use [`char::from_digit`] instead.
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_char_convert", issue = "89259")]
+#[rustc_const_stable(feature = "const_char_convert", since = "1.67.0")]
#[must_use]
#[inline]
pub const fn from_digit(num: u32, radix: u32) -> Option<char> {
diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs
index 06dca7e59..398437d9a 100644
--- a/library/core/src/clone.rs
+++ b/library/core/src/clone.rs
@@ -176,7 +176,6 @@ pub struct AssertParamIsCopy<T: Copy + ?Sized> {
/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
/// in `rustc_trait_selection`.
mod impls {
-
use super::Clone;
macro_rules! impl_clone {
@@ -185,7 +184,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl const Clone for $t {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -213,7 +212,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for *const T {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -222,7 +221,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for *mut T {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
@@ -232,7 +231,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for &T {
- #[inline]
+ #[inline(always)]
#[rustc_diagnostic_item = "noop_method_clone"]
fn clone(&self) -> Self {
*self
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index f0fa2e1d2..949896e57 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -24,12 +24,12 @@
use crate::const_closure::ConstFnMutClosure;
use crate::marker::Destruct;
+#[cfg(bootstrap)]
use crate::marker::StructuralPartialEq;
use self::Ordering::*;
-/// Trait for equality comparisons which are [partial equivalence
-/// relations](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
+/// Trait for equality comparisons.
///
/// `x.eq(y)` can also be written `x == y`, and `x.ne(y)` can be written `x != y`.
/// We use the easier-to-read infix notation in the remainder of this documentation.
@@ -37,6 +37,8 @@ use self::Ordering::*;
/// This trait allows for partial equality, for types that do not have a full
/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
/// so floating point types implement `PartialEq` but not [`trait@Eq`].
+/// Formally speaking, when `Rhs == Self`, this trait corresponds to a [partial equivalence
+/// relation](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
///
/// Implementations must ensure that `eq` and `ne` are consistent with each other:
///
@@ -331,6 +333,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> {
/// assert_eq!(Ordering::Greater, result);
/// ```
#[derive(Clone, Copy, Eq, Debug, Hash)]
+#[cfg_attr(not(bootstrap), derive_const(PartialOrd, Ord, PartialEq))]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
@@ -877,10 +880,12 @@ pub macro Ord($item:item) {
}
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg(bootstrap)]
impl StructuralPartialEq for Ordering {}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const PartialEq for Ordering {
#[inline]
fn eq(&self, other: &Self) -> bool {
@@ -890,6 +895,7 @@ impl const PartialEq for Ordering {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const Ord for Ordering {
#[inline]
fn cmp(&self, other: &Ordering) -> Ordering {
@@ -899,6 +905,7 @@ impl const Ord for Ordering {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+#[cfg(bootstrap)]
impl const PartialOrd for Ordering {
#[inline]
fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs
index 9e9c02093..151c8e6d8 100644
--- a/library/core/src/const_closure.rs
+++ b/library/core/src/const_closure.rs
@@ -1,4 +1,6 @@
use crate::marker::Destruct;
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
/// Struct representing a closure with mutably borrowed data.
///
@@ -44,6 +46,7 @@ impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData,
macro_rules! impl_fn_mut_tuple {
($($var:ident)*) => {
+ #[cfg(bootstrap)]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -56,6 +59,7 @@ macro_rules! impl_fn_mut_tuple {
self.call_mut(args)
}
}
+ #[cfg(bootstrap)]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -68,6 +72,32 @@ macro_rules! impl_fn_mut_tuple {
(self.func)(($($var),*), args)
}
}
+ #[cfg(not(bootstrap))]
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
+ FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct,
+ {
+ type Output = ClosureReturnValue;
+
+ extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output {
+ self.call_mut(args)
+ }
+ }
+ #[cfg(not(bootstrap))]
+ #[allow(unused_parens)]
+ impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
+ FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
+ where
+ Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output {
+ #[allow(non_snake_case)]
+ let ($($var),*) = &mut self.data;
+ (self.func)(($($var),*), args)
+ }
+ }
};
}
impl_fn_mut_tuple!(A);
diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs
index 33493964b..f95b880df 100644
--- a/library/core/src/convert/mod.rs
+++ b/library/core/src/convert/mod.rs
@@ -99,7 +99,7 @@ pub use num::FloatToInt;
/// ```
#[stable(feature = "convert_id", since = "1.33.0")]
#[rustc_const_stable(feature = "const_identity", since = "1.33.0")]
-#[inline]
+#[inline(always)]
pub const fn identity<T>(x: T) -> T {
x
}
@@ -789,6 +789,7 @@ where
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsRef<[T]> for [T] {
+ #[inline(always)]
fn as_ref(&self) -> &[T] {
self
}
@@ -796,6 +797,7 @@ impl<T> AsRef<[T]> for [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsMut<[T]> for [T] {
+ #[inline(always)]
fn as_mut(&mut self) -> &mut [T] {
self
}
@@ -803,7 +805,7 @@ impl<T> AsMut<[T]> for [T] {
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for str {
- #[inline]
+ #[inline(always)]
fn as_ref(&self) -> &str {
self
}
@@ -811,7 +813,7 @@ impl AsRef<str> for str {
#[stable(feature = "as_mut_str_for_str", since = "1.51.0")]
impl AsMut<str> for str {
- #[inline]
+ #[inline(always)]
fn as_mut(&mut self) -> &mut str {
self
}
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 4fa5d129b..9c0d7e9a1 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -49,7 +49,7 @@ macro_rules! impl_from {
// Rustdocs on the impl block show a "[+] show undocumented items" toggle.
// Rustdocs on functions do not.
#[doc = $doc]
- #[inline]
+ #[inline(always)]
fn from(small: $Small) -> Self {
small as Self
}
diff --git a/library/core/src/default.rs b/library/core/src/default.rs
index a5b4e9655..d96b53de0 100644
--- a/library/core/src/default.rs
+++ b/library/core/src/default.rs
@@ -99,7 +99,7 @@
/// ```
#[cfg_attr(not(test), rustc_diagnostic_item = "Default")]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Default: Sized {
/// Returns the "default value" for a type.
///
diff --git a/library/core/src/error.md b/library/core/src/error.md
index 891abebbf..78808d489 100644
--- a/library/core/src/error.md
+++ b/library/core/src/error.md
@@ -46,7 +46,7 @@ These functions are equivalent, they either return the inner value if the
`Result` is `Ok` or panic if the `Result` is `Err` printing the inner error
as the source. The only difference between them is that with `expect` you
provide a panic error message to be printed alongside the source, whereas
-`unwrap` has a default message indicating only that you unwraped an `Err`.
+`unwrap` has a default message indicating only that you unwrapped an `Err`.
Of the two, `expect` is generally preferred since its `msg` field allows you
to convey your intent and assumptions which makes tracking down the source
diff --git a/library/core/src/error.rs b/library/core/src/error.rs
index 2738b4994..7152300ab 100644
--- a/library/core/src/error.rs
+++ b/library/core/src/error.rs
@@ -1,5 +1,5 @@
#![doc = include_str!("error.md")]
-#![unstable(feature = "error_in_core", issue = "none")]
+#![unstable(feature = "error_in_core", issue = "103765")]
#[cfg(test)]
mod tests;
@@ -506,3 +506,6 @@ impl Error for crate::ffi::FromBytesWithNulError {
#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
impl Error for crate::ffi::FromBytesUntilNulError {}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> Error for crate::slice::GetManyMutError<N> {}
diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs
index 8923f548a..15dd9ea7e 100644
--- a/library/core/src/ffi/c_str.rs
+++ b/library/core/src/ffi/c_str.rs
@@ -13,9 +13,9 @@ use crate::str;
/// array of bytes. It can be constructed safely from a <code>&[[u8]]</code>
/// slice, or unsafely from a raw `*const c_char`. It can then be
/// converted to a Rust <code>&[str]</code> by performing UTF-8 validation, or
-/// into an owned `CString`.
+/// into an owned [`CString`].
///
-/// `&CStr` is to `CString` as <code>&[str]</code> is to `String`: the former
+/// `&CStr` is to [`CString`] as <code>&[str]</code> is to [`String`]: the former
/// in each pair are borrowed references; the latter are owned
/// strings.
///
@@ -24,6 +24,9 @@ use crate::str;
/// functions may leverage the unsafe [`CStr::from_ptr`] constructor to provide
/// a safe interface to other consumers.
///
+/// [`CString`]: ../../std/ffi/struct.CString.html
+/// [`String`]: ../../std/string/struct.String.html
+///
/// # Examples
///
/// Inspecting a foreign C string:
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index c8d285505..48b617743 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -510,7 +510,7 @@ impl<'a> Arguments<'a> {
/// assert_eq!(format_args!("{}", 1).as_str(), None);
/// ```
#[stable(feature = "fmt_as_str", since = "1.52.0")]
- #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "none")]
+ #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "103900")]
#[must_use]
#[inline]
pub const fn as_str(&self) -> Option<&'static str> {
@@ -1054,7 +1054,6 @@ pub trait UpperHex {
pub trait Pointer {
/// Formats the value using the given formatter.
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_diagnostic_item = "pointer_trait_fmt"]
fn fmt(&self, f: &mut Formatter<'_>) -> Result;
}
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index 6487aa088..f2b961d62 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -9,12 +9,8 @@
//! [`await`]: ../../std/keyword.await.html
//! [async book]: https://rust-lang.github.io/async-book/
-use crate::{
- ops::{Generator, GeneratorState},
- pin::Pin,
- ptr::NonNull,
- task::{Context, Poll},
-};
+use crate::ptr::NonNull;
+use crate::task::Context;
mod future;
mod into_future;
@@ -48,6 +44,7 @@ pub use poll_fn::{poll_fn, PollFn};
/// non-Send/Sync as well, and we don't want that.
///
/// It also simplifies the HIR lowering of `.await`.
+#[cfg_attr(not(bootstrap), lang = "ResumeTy")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[derive(Debug, Copy, Clone)]
@@ -64,15 +61,21 @@ unsafe impl Sync for ResumeTy {}
/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
// This is `const` to avoid extra errors after we recover from `const async fn`
-#[lang = "from_generator"]
+#[cfg_attr(bootstrap, lang = "from_generator")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
#[inline]
pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return>
where
- T: Generator<ResumeTy, Yield = ()>,
+ T: crate::ops::Generator<ResumeTy, Yield = ()>,
{
+ use crate::{
+ ops::{Generator, GeneratorState},
+ pin::Pin,
+ task::Poll,
+ };
+
#[rustc_diagnostic_item = "gen_future"]
struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T);
@@ -82,6 +85,7 @@ where
impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> {
type Output = T::Return;
+ #[track_caller]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection.
let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) };
@@ -108,3 +112,11 @@ pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
// that fulfills all the requirements for a mutable reference.
unsafe { &mut *cx.0.as_ptr().cast() }
}
+
+#[cfg_attr(not(bootstrap), lang = "identity_future")]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[inline]
+pub const fn identity_future<O, Fut: Future<Output = O>>(f: Fut) -> Fut {
+ f
+}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index aa13435e6..c755afa39 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -86,7 +86,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::fmt;
-use crate::marker;
+use crate::intrinsics::const_eval_select;
+use crate::marker::{self, Destruct};
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(deprecated)]
@@ -183,6 +184,7 @@ mod sip;
/// [impl]: ../../std/primitive.str.html#impl-Hash-for-str
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Hash"]
+#[const_trait]
pub trait Hash {
/// Feeds this value into the given [`Hasher`].
///
@@ -234,13 +236,25 @@ pub trait Hash {
/// [`hash`]: Hash::hash
/// [`hash_slice`]: Hash::hash_slice
#[stable(feature = "hash_slice", since = "1.3.0")]
- fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
+ fn hash_slice<H: ~const Hasher>(data: &[Self], state: &mut H)
where
Self: Sized,
{
- for piece in data {
- piece.hash(state);
+ //FIXME(const_trait_impl): revert to only a for loop
+ fn rt<T: Hash, H: Hasher>(data: &[T], state: &mut H) {
+ for piece in data {
+ piece.hash(state)
+ }
+ }
+ const fn ct<T: ~const Hash, H: ~const Hasher>(data: &[T], state: &mut H) {
+ let mut i = 0;
+ while i < data.len() {
+ data[i].hash(state);
+ i += 1;
+ }
}
+ // SAFETY: same behavior, CT just uses while instead of for
+ unsafe { const_eval_select((data, state), ct, rt) };
}
}
@@ -313,6 +327,7 @@ pub use macros::Hash;
/// [`write_u8`]: Hasher::write_u8
/// [`write_u32`]: Hasher::write_u32
#[stable(feature = "rust1", since = "1.0.0")]
+#[const_trait]
pub trait Hasher {
/// Returns the hash value for the values written so far.
///
@@ -558,7 +573,8 @@ pub trait Hasher {
}
#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
-impl<H: Hasher + ?Sized> Hasher for &mut H {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl<H: ~const Hasher + ?Sized> const Hasher for &mut H {
fn finish(&self) -> u64 {
(**self).finish()
}
@@ -638,6 +654,7 @@ impl<H: Hasher + ?Sized> Hasher for &mut H {
/// [`build_hasher`]: BuildHasher::build_hasher
/// [`HashMap`]: ../../std/collections/struct.HashMap.html
#[stable(since = "1.7.0", feature = "build_hasher")]
+#[const_trait]
pub trait BuildHasher {
/// Type of the hasher that will be created.
#[stable(since = "1.7.0", feature = "build_hasher")]
@@ -698,9 +715,10 @@ pub trait BuildHasher {
/// );
/// ```
#[unstable(feature = "build_hasher_simple_hash_one", issue = "86161")]
- fn hash_one<T: Hash>(&self, x: T) -> u64
+ fn hash_one<T: ~const Hash + ~const Destruct>(&self, x: T) -> u64
where
Self: Sized,
+ Self::Hasher: ~const Hasher + ~const Destruct,
{
let mut hasher = self.build_hasher();
x.hash(&mut hasher);
@@ -764,7 +782,8 @@ impl<H> fmt::Debug for BuildHasherDefault<H> {
}
#[stable(since = "1.7.0", feature = "build_hasher")]
-impl<H: Default + Hasher> BuildHasher for BuildHasherDefault<H> {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl<H: ~const Default + Hasher> const BuildHasher for BuildHasherDefault<H> {
type Hasher = H;
fn build_hasher(&self) -> H {
@@ -806,14 +825,15 @@ mod impls {
macro_rules! impl_write {
($(($ty:ident, $meth:ident),)*) => {$(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for $ty {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for $ty {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
#[inline]
- fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
+ fn hash_slice<H: ~const Hasher>(data: &[$ty], state: &mut H) {
let newlen = data.len() * mem::size_of::<$ty>();
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
@@ -842,33 +862,37 @@ mod impls {
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for bool {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for bool {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u8(*self as u8)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for char {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for char {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u32(*self as u32)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for str {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for str {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_str(self);
}
}
#[stable(feature = "never_hash", since = "1.29.0")]
- impl Hash for ! {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for ! {
#[inline]
- fn hash<H: Hasher>(&self, _: &mut H) {
+ fn hash<H: ~const Hasher>(&self, _: &mut H) {
*self
}
}
@@ -876,9 +900,10 @@ mod impls {
macro_rules! impl_hash_tuple {
() => (
#[stable(feature = "rust1", since = "1.0.0")]
- impl Hash for () {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl const Hash for () {
#[inline]
- fn hash<H: Hasher>(&self, _state: &mut H) {}
+ fn hash<H: ~const Hasher>(&self, _state: &mut H) {}
}
);
@@ -886,10 +911,11 @@ mod impls {
maybe_tuple_doc! {
$($name)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($name: Hash),+> Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<$($name: ~const Hash),+> const Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
#[allow(non_snake_case)]
#[inline]
- fn hash<S: Hasher>(&self, state: &mut S) {
+ fn hash<S: ~const Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
@@ -932,24 +958,27 @@ mod impls {
impl_hash_tuple! { T B C D E F G H I J K L }
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: Hash> Hash for [T] {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ~const Hash> const Hash for [T] {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_length_prefix(self.len());
Hash::hash_slice(self, state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized + Hash> Hash for &T {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ?Sized + ~const Hash> const Hash for &T {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
- impl<T: ?Sized + Hash> Hash for &mut T {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ impl<T: ?Sized + ~const Hash> const Hash for &mut T {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs
index 81bf1dfdf..7f8287bf5 100644
--- a/library/core/src/hash/sip.rs
+++ b/library/core/src/hash/sip.rs
@@ -118,7 +118,7 @@ macro_rules! load_int_le {
/// Safety: this performs unchecked indexing of `buf` at `start..start+len`, so
/// that must be in-bounds.
#[inline]
-unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+const unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
debug_assert!(len < 8);
let mut i = 0; // current byte index (from LSB) in the output u64
let mut out = 0;
@@ -138,7 +138,8 @@ unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
i += 1;
}
- debug_assert_eq!(i, len);
+ //FIXME(fee1-dead): use debug_assert_eq
+ debug_assert!(i == len);
out
}
@@ -150,8 +151,9 @@ impl SipHasher {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
- pub fn new() -> SipHasher {
+ pub const fn new() -> SipHasher {
SipHasher::new_with_keys(0, 0)
}
@@ -162,8 +164,9 @@ impl SipHasher {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
- pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
SipHasher(SipHasher24 { hasher: Hasher::new_with_keys(key0, key1) })
}
}
@@ -176,7 +179,8 @@ impl SipHasher13 {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
- pub fn new() -> SipHasher13 {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ pub const fn new() -> SipHasher13 {
SipHasher13::new_with_keys(0, 0)
}
@@ -187,14 +191,15 @@ impl SipHasher13 {
since = "1.13.0",
note = "use `std::collections::hash_map::DefaultHasher` instead"
)]
- pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+ pub const fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
}
}
impl<S: Sip> Hasher<S> {
#[inline]
- fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ const fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
let mut state = Hasher {
k0: key0,
k1: key1,
@@ -209,7 +214,7 @@ impl<S: Sip> Hasher<S> {
}
#[inline]
- fn reset(&mut self) {
+ const fn reset(&mut self) {
self.length = 0;
self.state.v0 = self.k0 ^ 0x736f6d6570736575;
self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
@@ -220,7 +225,8 @@ impl<S: Sip> Hasher<S> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl super::Hasher for SipHasher {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const super::Hasher for SipHasher {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.0.hasher.write(msg)
@@ -238,7 +244,8 @@ impl super::Hasher for SipHasher {
}
#[unstable(feature = "hashmap_internals", issue = "none")]
-impl super::Hasher for SipHasher13 {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const super::Hasher for SipHasher13 {
#[inline]
fn write(&mut self, msg: &[u8]) {
self.hasher.write(msg)
@@ -255,7 +262,7 @@ impl super::Hasher for SipHasher13 {
}
}
-impl<S: Sip> super::Hasher for Hasher<S> {
+impl<S: ~const Sip> const super::Hasher for Hasher<S> {
// Note: no integer hashing methods (`write_u*`, `write_i*`) are defined
// for this type. We could add them, copy the `short_write` implementation
// in librustc_data_structures/sip128.rs, and add `write_u*`/`write_i*`
@@ -335,7 +342,7 @@ impl<S: Sip> super::Hasher for Hasher<S> {
}
}
-impl<S: Sip> Clone for Hasher<S> {
+impl<S: Sip> const Clone for Hasher<S> {
#[inline]
fn clone(&self) -> Hasher<S> {
Hasher {
@@ -359,6 +366,7 @@ impl<S: Sip> Default for Hasher<S> {
}
#[doc(hidden)]
+#[const_trait]
trait Sip {
fn c_rounds(_: &mut State);
fn d_rounds(_: &mut State);
@@ -367,7 +375,7 @@ trait Sip {
#[derive(Debug, Clone, Default)]
struct Sip13Rounds;
-impl Sip for Sip13Rounds {
+impl const Sip for Sip13Rounds {
#[inline]
fn c_rounds(state: &mut State) {
compress!(state);
@@ -384,7 +392,7 @@ impl Sip for Sip13Rounds {
#[derive(Debug, Clone, Default)]
struct Sip24Rounds;
-impl Sip for Sip24Rounds {
+impl const Sip for Sip24Rounds {
#[inline]
fn c_rounds(state: &mut State) {
compress!(state);
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index c53175ba4..e8d724ab1 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -160,7 +160,7 @@ pub const unsafe fn unreachable_unchecked() -> ! {
/// ```
///
/// [`thread::yield_now`]: ../../std/thread/fn.yield_now.html
-#[inline]
+#[inline(always)]
#[stable(feature = "renamed_spin_loop", since = "1.49.0")]
pub fn spin_loop() {
#[cfg(target_arch = "x86")]
@@ -345,6 +345,7 @@ pub const fn black_box<T>(dummy: T) -> T {
#[unstable(feature = "hint_must_use", issue = "94745")]
#[rustc_const_unstable(feature = "hint_must_use", issue = "94745")]
#[must_use] // <-- :)
+#[inline(always)]
pub const fn must_use<T>(value: T) -> T {
value
}
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 1dc79afe8..7ed7d767f 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -55,8 +55,13 @@
#![allow(missing_docs)]
use crate::marker::DiscriminantKind;
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
use crate::mem;
+#[cfg(not(bootstrap))]
+pub mod mir;
+
// These imports are used for simplifying intra-doc links
#[allow(unused_imports)]
#[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
@@ -788,7 +793,7 @@ extern "rust-intrinsic" {
/// uninitialized at that point in the control flow.
///
/// This intrinsic should not be used outside of the compiler.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rustc_peek<T>(_: T) -> T;
/// Aborts the execution of the process.
@@ -806,7 +811,7 @@ extern "rust-intrinsic" {
/// On Unix, the
/// process will probably terminate with a signal like `SIGABRT`, `SIGILL`, `SIGTRAP`, `SIGSEGV` or
/// `SIGBUS`. The precise behaviour is not guaranteed and not stable.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn abort() -> !;
/// Informs the optimizer that this point in the code is not reachable,
@@ -845,7 +850,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn likely(b: bool) -> bool;
/// Hints to the compiler that branch condition is likely to be false.
@@ -860,7 +865,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_likely", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn unlikely(b: bool) -> bool;
/// Executes a breakpoint trap, for inspection by a debugger.
@@ -880,7 +885,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn size_of<T>() -> usize;
/// The minimum alignment of a type.
@@ -892,7 +897,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::align_of`].
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn min_align_of<T>() -> usize;
/// The preferred alignment of a type.
///
@@ -921,7 +926,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::type_name`].
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn type_name<T: ?Sized>() -> &'static str;
/// Gets an identifier which is globally unique to the specified type. This
@@ -935,7 +940,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
@@ -943,7 +948,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_stable(feature = "const_assert_type", since = "1.59.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_inhabited<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` does not permit
@@ -951,7 +956,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_zero_valid<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` has invalid
@@ -959,7 +964,7 @@ extern "rust-intrinsic" {
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn assert_uninit_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
@@ -971,7 +976,7 @@ extern "rust-intrinsic" {
///
/// Consider using [`core::panic::Location::caller`] instead.
#[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn caller_location() -> &'static crate::panic::Location<'static>;
/// Moves a value out of scope without running drop glue.
@@ -984,7 +989,7 @@ extern "rust-intrinsic" {
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_intrinsic_forget", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn forget<T: ?Sized>(_: T);
/// Reinterprets the bits of a value of one type as another type.
@@ -1264,7 +1269,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
#[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn needs_drop<T: ?Sized>() -> bool;
/// Calculates the offset from a pointer.
@@ -1309,7 +1314,7 @@ extern "rust-intrinsic" {
/// any safety invariants.
///
/// Consider using [`pointer::mask`] instead.
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ptr_mask<T>(ptr: *const T, mask: usize) -> *const T;
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
@@ -1501,7 +1506,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::min`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn minnumf32(x: f32, y: f32) -> f32;
/// Returns the minimum of two `f64` values.
///
@@ -1512,7 +1517,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::min`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn minnumf64(x: f64, y: f64) -> f64;
/// Returns the maximum of two `f32` values.
///
@@ -1523,7 +1528,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f32::max`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn maxnumf32(x: f32, y: f32) -> f32;
/// Returns the maximum of two `f64` values.
///
@@ -1534,7 +1539,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is
/// [`f64::max`]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn maxnumf64(x: f64, y: f64) -> f64;
/// Copies the sign from `y` to `x` for `f32` values.
@@ -1655,7 +1660,7 @@ extern "rust-intrinsic" {
/// primitives via the `count_ones` method. For example,
/// [`u32::count_ones`]
#[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ctpop<T: Copy>(x: T) -> T;
/// Returns the number of leading unset bits (zeroes) in an integer type `T`.
@@ -1693,7 +1698,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_leading, 16);
/// ```
#[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ctlz<T: Copy>(x: T) -> T;
/// Like `ctlz`, but extra-unsafe as it returns `undef` when
@@ -1750,7 +1755,7 @@ extern "rust-intrinsic" {
/// assert_eq!(num_trailing, 16);
/// ```
#[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn cttz<T: Copy>(x: T) -> T;
/// Like `cttz`, but extra-unsafe as it returns `undef` when
@@ -1783,7 +1788,7 @@ extern "rust-intrinsic" {
/// primitives via the `swap_bytes` method. For example,
/// [`u32::swap_bytes`]
#[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn bswap<T: Copy>(x: T) -> T;
/// Reverses the bits in an integer type `T`.
@@ -1797,7 +1802,7 @@ extern "rust-intrinsic" {
/// primitives via the `reverse_bits` method. For example,
/// [`u32::reverse_bits`]
#[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn bitreverse<T: Copy>(x: T) -> T;
/// Performs checked integer addition.
@@ -1811,7 +1816,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_add` method. For example,
/// [`u32::overflowing_add`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer subtraction
@@ -1825,7 +1830,7 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_sub` method. For example,
/// [`u32::overflowing_sub`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs checked integer multiplication
@@ -1839,13 +1844,14 @@ extern "rust-intrinsic" {
/// primitives via the `overflowing_mul` method. For example,
/// [`u32::overflowing_mul`]
#[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
/// Performs an exact division, resulting in undefined behavior where
/// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_exact_div", issue = "none")]
pub fn exact_div<T: Copy>(x: T, y: T) -> T;
/// Performs an unchecked division, resulting in undefined behavior
@@ -1914,7 +1920,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_left` method. For example,
/// [`u32::rotate_left`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
/// Performs rotate right.
@@ -1928,7 +1934,7 @@ extern "rust-intrinsic" {
/// primitives via the `rotate_right` method. For example,
/// [`u32::rotate_right`]
#[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
/// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
@@ -1942,7 +1948,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_add` method. For example,
/// [`u32::wrapping_add`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
/// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1955,7 +1961,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_sub` method. For example,
/// [`u32::wrapping_sub`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
/// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
///
@@ -1968,7 +1974,7 @@ extern "rust-intrinsic" {
/// primitives via the `wrapping_mul` method. For example,
/// [`u32::wrapping_mul`]
#[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
/// Computes `a + b`, saturating at numeric bounds.
@@ -1982,7 +1988,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_add` method. For example,
/// [`u32::saturating_add`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
/// Computes `a - b`, saturating at numeric bounds.
///
@@ -1995,7 +2001,7 @@ extern "rust-intrinsic" {
/// primitives via the `saturating_sub` method. For example,
/// [`u32::saturating_sub`]
#[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
/// Returns the value of the discriminant for the variant in 'v';
@@ -2008,7 +2014,7 @@ extern "rust-intrinsic" {
///
/// The stabilized version of this intrinsic is [`core::mem::discriminant`].
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
/// Returns the number of variants of the type `T` cast to a `usize`;
@@ -2021,7 +2027,7 @@ extern "rust-intrinsic" {
///
/// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn variant_count<T>() -> usize;
/// Rust's "try catch" construct which invokes the function pointer `try_fn`
@@ -2055,7 +2061,7 @@ extern "rust-intrinsic" {
/// Therefore, implementations must not require the user to uphold
/// any safety invariants.
#[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
@@ -2106,7 +2112,7 @@ extern "rust-intrinsic" {
///
/// [`std::hint::black_box`]: crate::hint::black_box
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
- #[cfg_attr(not(bootstrap), rustc_safe_intrinsic)]
+ #[rustc_safe_intrinsic]
pub fn black_box<T>(dummy: T) -> T;
/// `ptr` must point to a vtable.
@@ -2169,11 +2175,75 @@ extern "rust-intrinsic" {
/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
/// which violates the principle that a `const fn` must behave the same at
/// compile-time and at run-time. The unsafe code in crate B is fine.
+ #[cfg(bootstrap)]
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
where
G: FnOnce<ARG, Output = RET>,
F: FnOnce<ARG, Output = RET>;
+
+ /// Selects which function to call depending on the context.
+ ///
+ /// If this function is evaluated at compile-time, then a call to this
+ /// intrinsic will be replaced with a call to `called_in_const`. It gets
+ /// replaced with a call to `called_at_rt` otherwise.
+ ///
+ /// # Type Requirements
+ ///
+ /// The two functions must be both function items. They cannot be function
+ /// pointers or closures. The first function must be a `const fn`.
+ ///
+ /// `arg` will be the tupled arguments that will be passed to either one of
+ /// the two functions, therefore, both functions must accept the same type of
+ /// arguments. Both functions must return RET.
+ ///
+ /// # Safety
+ ///
+ /// The two functions must behave observably equivalent. Safe code in other
+ /// crates may assume that calling a `const fn` at compile-time and at run-time
+ /// produces the same result. A function that produces a different result when
+ /// evaluated at run-time, or has any other observable side-effects, is
+ /// *unsound*.
+ ///
+ /// Here is an example of how this could cause a problem:
+ /// ```no_run
+ /// #![feature(const_eval_select)]
+ /// #![feature(core_intrinsics)]
+ /// use std::hint::unreachable_unchecked;
+ /// use std::intrinsics::const_eval_select;
+ ///
+ /// // Crate A
+ /// pub const fn inconsistent() -> i32 {
+ /// fn runtime() -> i32 { 1 }
+ /// const fn compiletime() -> i32 { 2 }
+ ///
+ /// unsafe {
+ // // ⚠ This code violates the required equivalence of `compiletime`
+ /// // and `runtime`.
+ /// const_eval_select((), compiletime, runtime)
+ /// }
+ /// }
+ ///
+ /// // Crate B
+ /// const X: i32 = inconsistent();
+ /// let x = inconsistent();
+ /// if x != X { unsafe { unreachable_unchecked(); }}
+ /// ```
+ ///
+ /// This code causes Undefined Behavior when being run, since the
+ /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
+ /// which violates the principle that a `const fn` must behave the same at
+ /// compile-time and at run-time. The unsafe code in crate B is fine.
+ #[cfg(not(bootstrap))]
+ #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
+ pub fn const_eval_select<ARG: Tuple, F, G, RET>(
+ arg: ARG,
+ called_in_const: F,
+ called_at_rt: G,
+ ) -> RET
+ where
+ G: FnOnce<ARG, Output = RET>,
+ F: FnOnce<ARG, Output = RET>;
}
// Some functions are defined here because they accidentally got made
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
new file mode 100644
index 000000000..8ba1c1228
--- /dev/null
+++ b/library/core/src/intrinsics/mir.rs
@@ -0,0 +1,289 @@
+//! Rustc internal tooling for hand-writing MIR.
+//!
+//! If for some reasons you are not writing rustc tests and have found yourself considering using
+//! this feature, turn back. This is *exceptionally* unstable. There is no attempt at all to make
+//! anything work besides those things which the rustc test suite happened to need. If you make a
+//! typo you'll probably ICE. Really, this is not the solution to your problems. Consider instead
+//! supporting the [stable MIR project group](https://github.com/rust-lang/project-stable-mir).
+//!
+//! The documentation for this module describes how to use this feature. If you are interested in
+//! hacking on the implementation, most of that documentation lives at
+//! `rustc_mir_building/src/build/custom/mod.rs`.
+//!
+//! Typical usage will look like this:
+//!
+//! ```rust
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! pub fn simple(x: i32) -> i32 {
+//! mir!(
+//! let temp1: i32;
+//! let temp2: _;
+//!
+//! {
+//! temp1 = x;
+//! Goto(exit)
+//! }
+//!
+//! exit = {
+//! temp2 = Move(temp1);
+//! RET = temp2;
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! Hopefully most of this is fairly self-explanatory. Expanding on some notable details:
+//!
+//! - The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
+//! attribute only works on functions - there is no way to insert custom MIR into the middle of
+//! another function.
+//! - The `dialect` and `phase` parameters indicate which version of MIR you are inserting here.
+//! This will normally be the phase that corresponds to the thing you are trying to test. The
+//! phase can be omitted for dialects that have just one.
+//! - You should define your function signature like you normally would. Externally, this function
+//! can be called like any other function.
+//! - Type inference works - you don't have to spell out the type of all of your locals.
+//!
+//! For now, all statements and terminators are parsed from nested invocations of the special
+//! functions provided in this module. We additionally want to (but do not yet) support more
+//! "normal" Rust syntax in places where it makes sense. Also, most kinds of instructions are not
+//! supported yet.
+//!
+
+#![unstable(
+ feature = "custom_mir",
+ reason = "MIR is an implementation detail and extremely unstable",
+ issue = "none"
+)]
+#![allow(unused_variables, non_snake_case, missing_debug_implementations)]
+
+/// Type representing basic blocks.
+///
+/// All terminators will have this type as a return type. It helps achieve some type safety.
+pub struct BasicBlock;
+
+macro_rules! define {
+ ($name:literal, $($sig:tt)*) => {
+ #[rustc_diagnostic_item = $name]
+ pub $($sig)* { panic!() }
+ }
+}
+
+define!("mir_return", fn Return() -> BasicBlock);
+define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
+define!("mir_retag", fn Retag<T>(place: T));
+define!("mir_retag_raw", fn RetagRaw<T>(place: T));
+define!("mir_move", fn Move<T>(place: T) -> T);
+define!("mir_static", fn Static<T>(s: T) -> &'static T);
+define!("mir_static_mut", fn StaticMut<T>(s: T) -> *mut T);
+
+/// Convenience macro for generating custom MIR.
+///
+/// See the module documentation for syntax details. This macro is not magic - it only transforms
+/// your MIR into something that is easier to parse in the compiler.
+#[rustc_macro_transparency = "transparent"]
+pub macro mir {
+ (
+ $(let $local_decl:ident $(: $local_decl_ty:ty)? ;)*
+
+ {
+ $($entry:tt)*
+ }
+
+ $(
+ $block_name:ident = {
+ $($block:tt)*
+ }
+ )*
+ ) => {{
+ // First, we declare all basic blocks.
+ $(
+ let $block_name: ::core::intrinsics::mir::BasicBlock;
+ )*
+
+ {
+ // Now all locals
+ #[allow(non_snake_case)]
+ let RET;
+ $(
+ let $local_decl $(: $local_decl_ty)? ;
+ )*
+
+ ::core::intrinsics::mir::__internal_extract_let!($($entry)*);
+ $(
+ ::core::intrinsics::mir::__internal_extract_let!($($block)*);
+ )*
+
+ {
+ // Finally, the contents of the basic blocks
+ ::core::intrinsics::mir::__internal_remove_let!({
+ {}
+ { $($entry)* }
+ });
+ $(
+ ::core::intrinsics::mir::__internal_remove_let!({
+ {}
+ { $($block)* }
+ });
+ )*
+
+ RET
+ }
+ }
+ }}
+}
+
+/// Helper macro that extracts the `let` declarations out of a bunch of statements.
+///
+/// This macro is written using the "statement muncher" strategy. Each invocation parses the first
+/// statement out of the input, does the appropriate thing with it, and then recursively calls the
+/// same macro on the remainder of the input.
+#[doc(hidden)]
+pub macro __internal_extract_let {
+ // If it's a `let` like statement, keep the `let`
+ (
+ let $var:ident $(: $ty:ty)? = $expr:expr; $($rest:tt)*
+ ) => {
+ let $var $(: $ty)?;
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ // Due to #86730, we have to handle const blocks separately
+ (
+ let $var:ident $(: $ty:ty)? = const $block:block; $($rest:tt)*
+ ) => {
+ let $var $(: $ty)?;
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ // Otherwise, output nothing
+ (
+ $stmt:stmt; $($rest:tt)*
+ ) => {
+ ::core::intrinsics::mir::__internal_extract_let!($($rest)*);
+ },
+ (
+ $expr:expr
+ ) => {}
+}
+
+/// Helper macro that removes the `let` declarations from a bunch of statements.
+///
+/// Because expression position macros cannot expand to statements + expressions, we need to be
+/// slightly creative here. The general strategy is also statement munching as above, but the output
+/// of the macro is "stored" in the subsequent macro invocation. Easiest understood via example:
+/// ```text
+/// invoke!(
+/// {
+/// {
+/// x = 5;
+/// }
+/// {
+/// let d = e;
+/// Call()
+/// }
+/// }
+/// )
+/// ```
+/// becomes
+/// ```text
+/// invoke!(
+/// {
+/// {
+/// x = 5;
+/// d = e;
+/// }
+/// {
+/// Call()
+/// }
+/// }
+/// )
+/// ```
+#[doc(hidden)]
+pub macro __internal_remove_let {
+ // If it's a `let` like statement, remove the `let`
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ let $var:ident $(: $ty:ty)? = $expr:expr;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $var = $expr;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ // Due to #86730 , we have to handle const blocks separately
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ let $var:ident $(: $ty:ty)? = const $block:block;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $var = const $block;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ // Otherwise, keep going
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ $stmt:stmt;
+ $($rest:tt)*
+ }
+ }
+ ) => { ::core::intrinsics::mir::__internal_remove_let!(
+ {
+ {
+ $($already_parsed)*
+ $stmt;
+ }
+ {
+ $($rest)*
+ }
+ }
+ )},
+ (
+ {
+ {
+ $($already_parsed:tt)*
+ }
+ {
+ $expr:expr
+ }
+ }
+ ) => {
+ {
+ $($already_parsed)*
+ $expr
+ }
+ },
+}
diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs
index d4fb88610..5e4211058 100644
--- a/library/core/src/iter/adapters/array_chunks.rs
+++ b/library/core/src/iter/adapters/array_chunks.rs
@@ -1,6 +1,8 @@
use crate::array;
-use crate::iter::{ByRefSized, FusedIterator, Iterator};
-use crate::ops::{ControlFlow, Try};
+use crate::const_closure::ConstFnMutClosure;
+use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce};
+use crate::mem::{self, MaybeUninit};
+use crate::ops::{ControlFlow, NeverShortCircuit, Try};
/// An iterator over `N` elements of the iterator at a time.
///
@@ -82,7 +84,13 @@ where
}
}
- impl_fold_via_try_fold! { fold -> try_fold }
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ <Self as SpecFold>::fold(self, init, f)
+ }
}
#[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")]
@@ -168,3 +176,64 @@ where
self.iter.len() < N
}
}
+
+trait SpecFold: Iterator {
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B;
+}
+
+impl<I, const N: usize> SpecFold for ArrayChunks<I, N>
+where
+ I: Iterator,
+{
+ #[inline]
+ default fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let fold = ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp);
+ self.try_fold(init, fold).0
+ }
+}
+
+impl<I, const N: usize> SpecFold for ArrayChunks<I, N>
+where
+ I: Iterator + TrustedRandomAccessNoCoerce,
+{
+ #[inline]
+ fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ let inner_len = self.iter.size();
+ let mut i = 0;
+ // Use a while loop because (0..len).step_by(N) doesn't optimize well.
+ while inner_len - i >= N {
+ let mut chunk = MaybeUninit::uninit_array();
+ let mut guard = array::Guard { array_mut: &mut chunk, initialized: 0 };
+ while guard.initialized < N {
+ // SAFETY: The method consumes the iterator and the loop condition ensures that
+ // all accesses are in bounds and only happen once.
+ unsafe {
+ let idx = i + guard.initialized;
+ guard.push_unchecked(self.iter.__iterator_get_unchecked(idx));
+ }
+ }
+ mem::forget(guard);
+ // SAFETY: The loop above initialized all elements
+ let chunk = unsafe { MaybeUninit::array_assume_init(chunk) };
+ accum = f(accum, chunk);
+ i += N;
+ }
+
+ // unlike try_fold this method does not need to take care of the remainder
+ // since `self` will be dropped
+
+ accum
+ }
+}
diff --git a/library/core/src/iter/adapters/take.rs b/library/core/src/iter/adapters/take.rs
index 58a0b9d7b..d947c7b0e 100644
--- a/library/core/src/iter/adapters/take.rs
+++ b/library/core/src/iter/adapters/take.rs
@@ -75,7 +75,6 @@ where
#[inline]
fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
- Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
@@ -101,6 +100,26 @@ where
impl_fold_via_try_fold! { fold -> try_fold }
#[inline]
+ fn for_each<F: FnMut(Self::Item)>(mut self, f: F) {
+ // The default implementation would use a unit accumulator, so we can
+ // avoid a stateful closure by folding over the remaining number
+ // of items we wish to return instead.
+ fn check<'a, Item>(
+ mut action: impl FnMut(Item) + 'a,
+ ) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
+ move |more, x| {
+ action(x);
+ more.checked_sub(1)
+ }
+ }
+
+ let remaining = self.n;
+ if remaining > 0 {
+ self.iter.try_fold(remaining - 1, check(f));
+ }
+ }
+
+ #[inline]
#[rustc_inherit_overflow_checks]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let min = self.n.min(n);
diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs
index ef0f39782..bb35d50b4 100644
--- a/library/core/src/iter/mod.rs
+++ b/library/core/src/iter/mod.rs
@@ -401,6 +401,8 @@ pub use self::sources::{once, Once};
pub use self::sources::{once_with, OnceWith};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::sources::{repeat, Repeat};
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+pub use self::sources::{repeat_n, RepeatN};
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub use self::sources::{repeat_with, RepeatWith};
#[stable(feature = "iter_successors", since = "1.34.0")]
diff --git a/library/core/src/iter/sources.rs b/library/core/src/iter/sources.rs
index d34772cd3..3ec426a3a 100644
--- a/library/core/src/iter/sources.rs
+++ b/library/core/src/iter/sources.rs
@@ -4,6 +4,7 @@ mod from_generator;
mod once;
mod once_with;
mod repeat;
+mod repeat_n;
mod repeat_with;
mod successors;
@@ -16,6 +17,9 @@ pub use self::empty::{empty, Empty};
#[stable(feature = "iter_once", since = "1.2.0")]
pub use self::once::{once, Once};
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+pub use self::repeat_n::{repeat_n, RepeatN};
+
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub use self::repeat_with::{repeat_with, RepeatWith};
diff --git a/library/core/src/iter/sources/repeat_n.rs b/library/core/src/iter/sources/repeat_n.rs
new file mode 100644
index 000000000..fd8d25ce1
--- /dev/null
+++ b/library/core/src/iter/sources/repeat_n.rs
@@ -0,0 +1,195 @@
+use crate::iter::{FusedIterator, TrustedLen};
+use crate::mem::ManuallyDrop;
+
+/// Creates a new iterator that repeats a single element a given number of times.
+///
+/// The `repeat_n()` function repeats a single value exactly `n` times.
+///
+/// This is very similar to using [`repeat()`] with [`Iterator::take()`],
+/// but there are two differences:
+/// - `repeat_n()` can return the original value, rather than always cloning.
+/// - `repeat_n()` produces an [`ExactSizeIterator`].
+///
+/// [`repeat()`]: crate::iter::repeat
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// #![feature(iter_repeat_n)]
+/// use std::iter;
+///
+/// // four of the number four:
+/// let mut four_fours = iter::repeat_n(4, 4);
+///
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+///
+/// // no more fours
+/// assert_eq!(None, four_fours.next());
+/// ```
+///
+/// For non-`Copy` types,
+///
+/// ```
+/// #![feature(iter_repeat_n)]
+/// use std::iter;
+///
+/// let v: Vec<i32> = Vec::with_capacity(123);
+/// let mut it = iter::repeat_n(v, 5);
+///
+/// for i in 0..4 {
+/// // It starts by cloning things
+/// let cloned = it.next().unwrap();
+/// assert_eq!(cloned.len(), 0);
+/// assert_eq!(cloned.capacity(), 0);
+/// }
+///
+/// // ... but the last item is the original one
+/// let last = it.next().unwrap();
+/// assert_eq!(last.len(), 0);
+/// assert_eq!(last.capacity(), 123);
+///
+/// // ... and now we're done
+/// assert_eq!(None, it.next());
+/// ```
+#[inline]
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+#[doc(hidden)] // waiting on ACP#120 to decide whether to expose publicly
+pub fn repeat_n<T: Clone>(element: T, count: usize) -> RepeatN<T> {
+ let mut element = ManuallyDrop::new(element);
+
+ if count == 0 {
+ // SAFETY: we definitely haven't dropped it yet, since we only just got
+ // passed it in, and because the count is zero the instance we're about
+ // to create won't drop it, so to avoid leaking we need to now.
+ unsafe { ManuallyDrop::drop(&mut element) };
+ }
+
+ RepeatN { element, count }
+}
+
+/// An iterator that repeats an element an exact number of times.
+///
+/// This `struct` is created by the [`repeat_n()`] function.
+/// See its documentation for more.
+#[derive(Clone, Debug)]
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+#[doc(hidden)] // waiting on ACP#120 to decide whether to expose publicly
+pub struct RepeatN<A> {
+ count: usize,
+ // Invariant: has been dropped iff count == 0.
+ element: ManuallyDrop<A>,
+}
+
+impl<A> RepeatN<A> {
+ /// If we haven't already dropped the element, return it in an option.
+ ///
+ /// Clears the count so it won't be dropped again later.
+ #[inline]
+ fn take_element(&mut self) -> Option<A> {
+ if self.count > 0 {
+ self.count = 0;
+ // SAFETY: We just set count to zero so it won't be dropped again,
+ // and it used to be non-zero so it hasn't already been dropped.
+ unsafe { Some(ManuallyDrop::take(&mut self.element)) }
+ } else {
+ None
+ }
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A> Drop for RepeatN<A> {
+ fn drop(&mut self) {
+ self.take_element();
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> Iterator for RepeatN<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if self.count == 0 {
+ return None;
+ }
+
+ self.count -= 1;
+ Some(if self.count == 0 {
+ // SAFETY: the check above ensured that the count used to be non-zero,
+ // so element hasn't been dropped yet, and we just lowered the count to
+ // zero so it won't be dropped later, and thus it's okay to take it here.
+ unsafe { ManuallyDrop::take(&mut self.element) }
+ } else {
+ A::clone(&mut self.element)
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn advance_by(&mut self, skip: usize) -> Result<(), usize> {
+ let len = self.count;
+
+ if skip >= len {
+ self.take_element();
+ }
+
+ if skip > len {
+ Err(len)
+ } else {
+ self.count = len - skip;
+ Ok(())
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.take_element()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> ExactSizeIterator for RepeatN<A> {
+ fn len(&self) -> usize {
+ self.count
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> DoubleEndedIterator for RepeatN<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.advance_by(n)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ self.nth(n)
+ }
+}
+
+#[unstable(feature = "iter_repeat_n", issue = "104434")]
+impl<A: Clone> FusedIterator for RepeatN<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Clone> TrustedLen for RepeatN<A> {}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
index 6f62662d8..ab2d0472b 100644
--- a/library/core/src/iter/sources/repeat_with.rs
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -1,4 +1,5 @@
use crate::iter::{FusedIterator, TrustedLen};
+use crate::ops::Try;
/// Creates a new iterator that repeats elements of type `A` endlessly by
/// applying the provided closure, the repeater, `F: FnMut() -> A`.
@@ -89,6 +90,22 @@ impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::MAX, None)
}
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Output = Acc>,
+ {
+ // This override isn't strictly needed, but avoids the need to optimize
+ // away the `next`-always-returns-`Some` and emphasizes that the `?`
+ // is the only way to exit the loop.
+
+ loop {
+ let item = (self.repeater)();
+ init = fold(init, item)?;
+ }
+ }
}
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 789a87968..83c7e8977 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -14,7 +14,7 @@ use super::super::{
fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
-/// An interface for dealing with iterators.
+/// A trait for dealing with iterators.
///
/// This is the main iterator trait. For more about the concept of iterators
/// generally, please see the [module-level documentation]. In particular, you
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 659409557..1823fd300 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -89,6 +89,7 @@
// Lints:
#![deny(rust_2021_incompatible_or_patterns)]
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
@@ -98,20 +99,24 @@
// Library features:
#![feature(const_align_offset)]
#![feature(const_align_of_val)]
+#![feature(const_align_of_val_raw)]
+#![feature(const_alloc_layout)]
#![feature(const_arguments_as_str)]
#![feature(const_array_into_iter_constructors)]
#![feature(const_bigint_helper_methods)]
#![feature(const_black_box)]
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
-#![feature(const_char_convert)]
+#![feature(const_char_from_u32_unchecked)]
#![feature(const_clone)]
#![feature(const_cmp)]
#![feature(const_discriminant)]
#![feature(const_eval_select)]
+#![feature(const_exact_div)]
#![feature(const_float_bits_conv)]
#![feature(const_float_classify)]
#![feature(const_fmt_arguments_new)]
+#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_convert)]
#![feature(const_index_range_slice_index)]
@@ -128,14 +133,17 @@
#![feature(const_option)]
#![feature(const_option_ext)]
#![feature(const_pin)]
+#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_sub_ptr)]
#![feature(const_replace)]
+#![feature(const_result_drop)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
#![feature(const_raw_ptr_comparison)]
#![feature(const_size_of_val)]
+#![feature(const_size_of_val_raw)]
#![feature(const_slice_from_raw_parts_mut)]
#![feature(const_slice_ptr_len)]
#![feature(const_slice_split_at_mut)]
@@ -154,9 +162,11 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
#![feature(ptr_metadata)]
+#![feature(set_ptr_value)]
#![feature(slice_ptr_get)]
#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
+#![feature(strict_provenance)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
#![feature(variant_count)]
@@ -184,6 +194,7 @@
#![feature(const_refs_to_cell)]
#![feature(decl_macro)]
#![feature(deprecated_suggestion)]
+#![cfg_attr(not(bootstrap), feature(derive_const))]
#![feature(doc_cfg)]
#![feature(doc_notable_trait)]
#![feature(rustdoc_internals)]
@@ -386,38 +397,8 @@ pub mod primitive;
#[unstable(feature = "stdsimd", issue = "48556")]
mod core_arch;
-#[doc = include_str!("../../stdarch/crates/core_arch/src/core_arch_docs.md")]
#[stable(feature = "simd_arch", since = "1.27.0")]
-pub mod arch {
- #[stable(feature = "simd_arch", since = "1.27.0")]
- pub use crate::core_arch::arch::*;
-
- /// Inline assembly.
- ///
- /// Refer to [rust by example] for a usage guide and the [reference] for
- /// detailed information about the syntax and available options.
- ///
- /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
- /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
- #[stable(feature = "asm", since = "1.59.0")]
- #[rustc_builtin_macro]
- pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
- /* compiler built-in */
- }
-
- /// Module-level inline assembly.
- ///
- /// Refer to [rust by example] for a usage guide and the [reference] for
- /// detailed information about the syntax and available options.
- ///
- /// [rust by example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
- /// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
- #[stable(feature = "global_asm", since = "1.59.0")]
- #[rustc_builtin_macro]
- pub macro global_asm("assembly template", $(operands,)* $(options($(option),*))?) {
- /* compiler built-in */
- }
-}
+pub mod arch;
// Pull in the `core_simd` crate directly into libcore. The contents of
// `core_simd` are in a different repository: rust-lang/portable-simd.
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index fd96e1ff7..f29cd357d 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -820,7 +820,6 @@ pub(crate) mod builtin {
#[stable(feature = "compile_error_macro", since = "1.20.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "compile_error_macro")]
macro_rules! compile_error {
($msg:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -944,7 +943,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "env_macro")]
macro_rules! env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
($name:expr, $error_msg:expr $(,)?) => {{ /* compiler built-in */ }};
@@ -973,7 +971,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "option_env_macro")]
macro_rules! option_env {
($name:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1058,7 +1055,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "concat_macro")]
macro_rules! concat {
($($e:expr),* $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1084,7 +1080,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "line_macro")]
macro_rules! line {
() => {
/* compiler built-in */
@@ -1124,7 +1119,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "column_macro")]
macro_rules! column {
() => {
/* compiler built-in */
@@ -1150,7 +1144,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "file_macro")]
macro_rules! file {
() => {
/* compiler built-in */
@@ -1175,7 +1168,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "stringify_macro")]
macro_rules! stringify {
($($t:tt)*) => {
/* compiler built-in */
@@ -1282,7 +1274,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "module_path_macro")]
macro_rules! module_path {
() => {
/* compiler built-in */
@@ -1316,7 +1307,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "cfg_macro")]
macro_rules! cfg {
($($cfg:tt)*) => {
/* compiler built-in */
@@ -1367,7 +1357,6 @@ pub(crate) mod builtin {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_builtin_macro]
#[macro_export]
- #[cfg_attr(not(test), rustc_diagnostic_item = "include_macro")]
macro_rules! include {
($file:expr $(,)?) => {{ /* compiler built-in */ }};
}
@@ -1464,6 +1453,19 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Attribute macro used to apply derive macros for implementing traits
+ /// in a const context.
+ ///
+ /// See [the reference] for more info.
+ ///
+ /// [the reference]: ../../../reference/attributes/derive.html
+ #[unstable(feature = "derive_const", issue = "none")]
+ #[rustc_builtin_macro]
+ #[cfg(not(bootstrap))]
+ pub macro derive_const($item:item) {
+ /* compiler built-in */
+ }
+
/// Attribute macro applied to a function to turn it into a unit test.
///
/// See [the reference] for more info.
@@ -1511,6 +1513,17 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Attribute macro applied to a function to register it as a handler for allocation failure.
+ ///
+ /// See also [`std::alloc::handle_alloc_error`](../../../std/alloc/fn.handle_alloc_error.html).
+ #[cfg(not(bootstrap))]
+ #[unstable(feature = "alloc_error_handler", issue = "51540")]
+ #[allow_internal_unstable(rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro alloc_error_handler($item:item) {
+ /* compiler built-in */
+ }
+
/// Keeps the item it's applied to if the passed path is accessible, and removes it otherwise.
#[unstable(
feature = "cfg_accessible",
@@ -1533,6 +1546,18 @@ pub(crate) mod builtin {
/* compiler built-in */
}
+ /// Unstable placeholder for type ascription.
+ #[rustc_builtin_macro]
+ #[unstable(
+ feature = "type_ascription",
+ issue = "23416",
+ reason = "placeholder syntax for type ascription"
+ )]
+ #[cfg(not(bootstrap))]
+ pub macro type_ascribe($expr:expr, $ty:ty) {
+ /* compiler built-in */
+ }
+
/// Unstable implementation detail of the `rustc` compiler, do not use.
#[rustc_builtin_macro]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index ae4ebf444..42c342801 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -96,6 +96,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Sized {
// Empty.
}
@@ -127,6 +128,7 @@ pub trait Sized {
/// [nomicon-coerce]: ../../nomicon/coercions.html
#[unstable(feature = "unsize", issue = "27732")]
#[lang = "unsize"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -693,6 +695,7 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -793,6 +796,7 @@ impl<T: ?Sized> Unpin for *mut T {}
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
#[const_trait]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Destruct {}
/// A marker for tuple types.
@@ -802,8 +806,18 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Tuple {}
+/// A marker for things
+#[unstable(feature = "pointer_sized_trait", issue = "none")]
+#[cfg_attr(not(bootstrap), lang = "pointer_sized")]
+#[rustc_on_unimplemented(
+ message = "`{Self}` needs to be a pointer-sized type",
+ label = "`{Self}` needs to be a pointer-sized type"
+)]
+pub trait PointerSized {}
+
/// Implementations of `Copy` for primitive types.
///
/// Implementations that cannot be described in Rust
diff --git a/library/core/src/mem/maybe_uninit.rs b/library/core/src/mem/maybe_uninit.rs
index 7757c95de..3f4918365 100644
--- a/library/core/src/mem/maybe_uninit.rs
+++ b/library/core/src/mem/maybe_uninit.rs
@@ -1172,7 +1172,7 @@ impl<T> MaybeUninit<T> {
/// #![feature(maybe_uninit_as_bytes, maybe_uninit_slice)]
/// use std::mem::MaybeUninit;
///
- /// let val = 0x12345678i32;
+ /// let val = 0x12345678_i32;
/// let uninit = MaybeUninit::new(val);
/// let uninit_bytes = uninit.as_bytes();
/// let bytes = unsafe { MaybeUninit::slice_assume_init_ref(uninit_bytes) };
@@ -1198,7 +1198,7 @@ impl<T> MaybeUninit<T> {
/// #![feature(maybe_uninit_as_bytes)]
/// use std::mem::MaybeUninit;
///
- /// let val = 0x12345678i32;
+ /// let val = 0x12345678_i32;
/// let mut uninit = MaybeUninit::new(val);
/// let uninit_bytes = uninit.as_bytes_mut();
/// if cfg!(target_endian = "little") {
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 9195da5a4..383bdc7b6 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -21,11 +21,6 @@ mod maybe_uninit;
#[stable(feature = "maybe_uninit", since = "1.36.0")]
pub use maybe_uninit::MaybeUninit;
-// FIXME: This is left here for now to avoid complications around pending reverts.
-// Once <https://github.com/rust-lang/rust/issues/101899> is fully resolved,
-// this should be removed and the references in `alloc::Layout` updated.
-pub(crate) use ptr::Alignment as ValidAlign;
-
mod transmutability;
#[unstable(feature = "transmutability", issue = "99571")]
pub use transmutability::{Assume, BikeshedIntrinsicFrom};
@@ -730,10 +725,7 @@ pub const fn swap<T>(x: &mut T, y: &mut T) {
// understanding `mem::replace`, `Option::take`, etc. - a better overall
// solution might be to make `ptr::swap_nonoverlapping` into an intrinsic, which
// a backend can choose to implement using the block optimization, or not.
- // NOTE(scottmcm) MIRI is disabled here as reading in smaller units is a
- // pessimization for it. Also, if the type contains any unaligned pointers,
- // copying those over multiple reads is difficult to support.
- #[cfg(not(any(target_arch = "spirv", miri)))]
+ #[cfg(not(any(target_arch = "spirv")))]
{
// For types that are larger multiples of their alignment, the simple way
// tends to copy the whole thing to stack rather than doing it one part
@@ -1004,7 +996,7 @@ pub fn drop<T>(_x: T) {}
/// ```
#[inline]
#[unstable(feature = "mem_copy_fn", issue = "98262")]
-pub fn copy<T: Copy>(x: &T) -> T {
+pub const fn copy<T: Copy>(x: &T) -> T {
*x
}
@@ -1121,7 +1113,10 @@ impl<T> fmt::Debug for Discriminant<T> {
/// # Stability
///
/// The discriminant of an enum variant may change if the enum definition changes. A discriminant
-/// of some variant will not change between compilations with the same compiler.
+/// of some variant will not change between compilations with the same compiler. See the [Reference]
+/// for more information.
+///
+/// [Reference]: ../../reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
///
/// # Examples
///
@@ -1137,6 +1132,62 @@ impl<T> fmt::Debug for Discriminant<T> {
/// assert_eq!(mem::discriminant(&Foo::B(1)), mem::discriminant(&Foo::B(2)));
/// assert_ne!(mem::discriminant(&Foo::B(3)), mem::discriminant(&Foo::C(3)));
/// ```
+///
+/// ## Accessing the numeric value of the discriminant
+///
+/// Note that it is *undefined behavior* to [`transmute`] from [`Discriminant`] to a primitive!
+///
+/// If an enum has only unit variants, then the numeric value of the discriminant can be accessed
+/// with an [`as`] cast:
+///
+/// ```
+/// enum Enum {
+/// Foo,
+/// Bar,
+/// Baz,
+/// }
+///
+/// assert_eq!(0, Enum::Foo as isize);
+/// assert_eq!(1, Enum::Bar as isize);
+/// assert_eq!(2, Enum::Baz as isize);
+/// ```
+///
+/// If an enum has opted-in to having a [primitive representation] for its discriminant,
+/// then it's possible to use pointers to read the memory location storing the discriminant.
+/// That **cannot** be done for enums using the [default representation], however, as it's
+/// undefined what layout the discriminant has and where it's stored — it might not even be
+/// stored at all!
+///
+/// [`as`]: ../../std/keyword.as.html
+/// [primitive representation]: ../../reference/type-layout.html#primitive-representations
+/// [default representation]: ../../reference/type-layout.html#the-default-representation
+/// ```
+/// #[repr(u8)]
+/// enum Enum {
+/// Unit,
+/// Tuple(bool),
+/// Struct { a: bool },
+/// }
+///
+/// impl Enum {
+/// fn discriminant(&self) -> u8 {
+/// // SAFETY: Because `Self` is marked `repr(u8)`, its layout is a `repr(C)` `union`
+/// // between `repr(C)` structs, each of which has the `u8` discriminant as its first
+/// // field, so we can read the discriminant without offsetting the pointer.
+/// unsafe { *<*const _>::from(self).cast::<u8>() }
+/// }
+/// }
+///
+/// let unit_like = Enum::Unit;
+/// let tuple_like = Enum::Tuple(true);
+/// let struct_like = Enum::Struct { a: false };
+/// assert_eq!(0, unit_like.discriminant());
+/// assert_eq!(1, tuple_like.discriminant());
+/// assert_eq!(2, struct_like.discriminant());
+///
+/// // ⚠️ This is undefined behavior. Don't do this. ⚠️
+/// // assert_eq!(0, unsafe { std::mem::transmute::<_, u8>(std::mem::discriminant(&unit_like)) });
+/// ```
#[stable(feature = "discriminant_value", since = "1.21.0")]
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
#[cfg_attr(not(test), rustc_diagnostic_item = "mem_discriminant")]
diff --git a/library/core/src/num/flt2dec/strategy/dragon.rs b/library/core/src/num/flt2dec/strategy/dragon.rs
index 8ced5971e..71b14d0ae 100644
--- a/library/core/src/num/flt2dec/strategy/dragon.rs
+++ b/library/core/src/num/flt2dec/strategy/dragon.rs
@@ -366,7 +366,7 @@ pub fn format_exact<'a>(
if order == Ordering::Greater
|| (order == Ordering::Equal
// SAFETY: `buf[len-1]` is initialized.
- && (len == 0 || unsafe { buf[len - 1].assume_init() } & 1 == 1))
+ && len > 0 && unsafe { buf[len - 1].assume_init() } & 1 == 1)
{
// if rounding up changes the length, the exponent should also change.
// but we've been requested a fixed number of digits, so do not alter the buffer...
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 914dca61b..57096f439 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -107,6 +107,9 @@ macro_rules! int_impl {
/// Returns the number of leading zeros in the binary representation of `self`.
///
+ /// Depending on what you're doing with the value, you might also be interested in the
+ /// [`ilog2`] function which returns a consistent number, even if the type widens.
+ ///
/// # Examples
///
/// Basic usage:
@@ -116,6 +119,7 @@ macro_rules! int_impl {
///
/// assert_eq!(n.leading_zeros(), 0);
/// ```
+ #[doc = concat!("[`ilog2`]: ", stringify!($SelfT), "::ilog2")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
#[must_use = "this returns the result of the operation, \
@@ -757,10 +761,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
- unsafe { intrinsics::unchecked_shl(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
@@ -804,10 +809,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
- unsafe { intrinsics::unchecked_shr(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked absolute value. Computes `self.abs()`, returning `None` if
@@ -1354,11 +1360,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shl(rhs & ($BITS - 1))
}
}
@@ -1383,11 +1390,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shr(rhs & ($BITS - 1))
}
}
@@ -2068,11 +2076,15 @@ macro_rules! int_impl {
pub const fn rem_euclid(self, rhs: Self) -> Self {
let r = self % rhs;
if r < 0 {
- if rhs < 0 {
- r - rhs
- } else {
- r + rhs
- }
+ // Semantically equivalent to `if rhs < 0 { r - rhs } else { r + rhs }`.
+ // If `rhs` is not `Self::MIN`, then `r + abs(rhs)` will not overflow
+ // and is clearly equivalent, because `r` is negative.
+ // Otherwise, `rhs` is `Self::MIN`, then we have
+ // `r.wrapping_add(Self::MIN.wrapping_abs())`, which evaluates
+ // to `r.wrapping_add(Self::MIN)`, which is equivalent to
+ // `r - Self::MIN`, which is what we wanted (and will not overflow
+ // for negative `r`).
+ r.wrapping_add(rhs.wrapping_abs())
} else {
r
}
@@ -2271,15 +2283,16 @@ macro_rules! int_impl {
/// # Panics
///
/// This function will panic if `self` is less than or equal to zero,
- /// or if `base` is less then 2.
+ /// or if `base` is less than 2.
///
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2298,10 +2311,11 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2319,10 +2333,11 @@ macro_rules! int_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2343,10 +2358,10 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2379,10 +2394,10 @@ macro_rules! int_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -2403,10 +2418,10 @@ macro_rules! int_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs
index 311c5fa5b..ac7f579eb 100644
--- a/library/core/src/num/mod.rs
+++ b/library/core/src/num/mod.rs
@@ -3,12 +3,15 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::ascii;
-use crate::error::Error;
+use crate::convert::TryInto;
use crate::intrinsics;
use crate::mem;
use crate::ops::{Add, Mul, Sub};
use crate::str::FromStr;
+#[cfg(not(no_fp_fmt_parse))]
+use crate::error::Error;
+
// Used because the `?` operator is not allowed in a const context.
macro_rules! try_opt {
($e:expr) => {
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 6b6f3417f..fbda8f82b 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -321,7 +321,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -356,7 +355,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -391,8 +389,8 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
/// #![feature(nonzero_ops)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let one = ", stringify!($Ty), "::new(1)?;")]
@@ -420,7 +418,6 @@ macro_rules! nonzero_unsigned_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -460,14 +457,13 @@ macro_rules! nonzero_unsigned_operations {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(7).unwrap().ilog2(), 2);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(8).unwrap().ilog2(), 3);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(9).unwrap().ilog2(), 3);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -485,14 +481,13 @@ macro_rules! nonzero_unsigned_operations {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(99).unwrap().ilog10(), 1);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(100).unwrap().ilog10(), 2);")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::new(101).unwrap().ilog10(), 2);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -526,7 +521,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -556,7 +550,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -591,7 +584,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -626,7 +618,6 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
@@ -662,14 +653,13 @@ macro_rules! nonzero_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let pos = ", stringify!($Ty), "::new(1)?;")]
#[doc = concat!("let neg = ", stringify!($Ty), "::new(-1)?;")]
#[doc = concat!("let min = ", stringify!($Ty), "::new(",
stringify!($Int), "::MIN)?;")]
- #[doc = concat!("let max = ", stringify!($Ty), "::new(",
+ #[doc = concat!("# let max = ", stringify!($Ty), "::new(",
stringify!($Int), "::MAX)?;")]
///
/// assert_eq!(pos, pos.wrapping_abs());
@@ -905,7 +895,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -941,7 +930,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -986,8 +974,8 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
/// #![feature(nonzero_ops)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let two = ", stringify!($Ty), "::new(2)?;")]
@@ -1014,7 +1002,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
@@ -1058,7 +1045,6 @@ macro_rules! nonzero_unsigned_signed_operations {
///
/// ```
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
- ///
/// # fn main() { test().unwrap(); }
/// # fn test() -> Option<()> {
#[doc = concat!("let three = ", stringify!($Ty), "::new(3)?;")]
@@ -1162,8 +1148,8 @@ macro_rules! nonzero_min_max_unsigned {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), 1", stringify!($Int), ");")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1177,8 +1163,8 @@ macro_rules! nonzero_min_max_unsigned {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1204,8 +1190,8 @@ macro_rules! nonzero_min_max_signed {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MIN.get(), ", stringify!($Int), "::MIN);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1223,8 +1209,8 @@ macro_rules! nonzero_min_max_signed {
///
/// ```
/// #![feature(nonzero_min_max)]
- #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
#[doc = concat!("assert_eq!(", stringify!($Ty), "::MAX.get(), ", stringify!($Int), "::MAX);")]
/// ```
#[unstable(feature = "nonzero_min_max", issue = "89065")]
@@ -1263,12 +1249,11 @@ macro_rules! nonzero_bits {
/// # Examples
///
/// ```
- /// #![feature(nonzero_bits)]
#[doc = concat!("# use std::num::", stringify!($Ty), ";")]
///
#[doc = concat!("assert_eq!(", stringify!($Ty), "::BITS, ", stringify!($Int), "::BITS);")]
/// ```
- #[unstable(feature = "nonzero_bits", issue = "94881")]
+ #[stable(feature = "nonzero_bits", since = "1.67.0")]
pub const BITS: u32 = <$Int>::BITS;
}
)+
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 335cc5124..1c97c4686 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -109,6 +109,9 @@ macro_rules! uint_impl {
/// Returns the number of leading zeros in the binary representation of `self`.
///
+ /// Depending on what you're doing with the value, you might also be interested in the
+ /// [`ilog2`] function which returns a consistent number, even if the type widens.
+ ///
/// # Examples
///
/// Basic usage:
@@ -118,6 +121,7 @@ macro_rules! uint_impl {
///
/// assert_eq!(n.leading_zeros(), 2);
/// ```
+ #[doc = concat!("[`ilog2`]: ", stringify!($SelfT), "::ilog2")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_math", since = "1.32.0")]
#[must_use = "this returns the result of the operation, \
@@ -692,15 +696,16 @@ macro_rules! uint_impl {
///
/// # Panics
///
- /// This function will panic if `self` is zero, or if `base` is less then 2.
+ /// This function will panic if `self` is zero, or if `base` is less than 2.
///
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".ilog(5), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -719,10 +724,11 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".ilog2(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -740,10 +746,11 @@ macro_rules! uint_impl {
/// # Example
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".ilog10(), 1);")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_allow_const_fn_unstable(const_option)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -764,10 +771,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(5", stringify!($SelfT), ".checked_ilog(5), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -800,10 +807,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(2", stringify!($SelfT), ".checked_ilog2(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -822,10 +829,10 @@ macro_rules! uint_impl {
/// # Examples
///
/// ```
- /// #![feature(int_log)]
#[doc = concat!("assert_eq!(10", stringify!($SelfT), ".checked_ilog10(), Some(1));")]
/// ```
- #[unstable(feature = "int_log", issue = "70887")]
+ #[stable(feature = "int_log", since = "1.67.0")]
+ #[rustc_const_stable(feature = "int_log", since = "1.67.0")]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline]
@@ -901,10 +908,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
- unsafe { intrinsics::unchecked_shl(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked shift right. Computes `self >> rhs`, returning `None`
@@ -948,10 +956,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
- unsafe { intrinsics::unchecked_shr(self, rhs) }
+ // Any legal shift amount is losslessly representable in the self type.
+ unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}
/// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
@@ -1367,11 +1376,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shl(rhs & ($BITS - 1))
}
}
@@ -1399,11 +1409,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
+ #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
- intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ self.unchecked_shr(rhs & ($BITS - 1))
}
}
diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs
index 72ebe653c..cd183540c 100644
--- a/library/core/src/ops/control_flow.rs
+++ b/library/core/src/ops/control_flow.rs
@@ -79,7 +79,9 @@ use crate::{convert, ops};
/// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue
#[stable(feature = "control_flow_enum_type", since = "1.55.0")]
-#[derive(Debug, Clone, Copy, PartialEq)]
+// ControlFlow should not implement PartialOrd or Ord, per RFC 3058:
+// https://rust-lang.github.io/rfcs/3058-try-trait-v2.html#traits-for-controlflow
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ControlFlow<B, C = ()> {
/// Move on to the next phase of the operation as normal.
#[stable(feature = "control_flow_enum_type", since = "1.55.0")]
diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs
index 4f4c99c4a..c67867f44 100644
--- a/library/core/src/ops/deref.rs
+++ b/library/core/src/ops/deref.rs
@@ -61,7 +61,7 @@
#[doc(alias = "&*")]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "Deref"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Deref {
/// The resulting type after dereferencing.
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 2e0a752c8..127b047db 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -1,3 +1,6 @@
+#[cfg(not(bootstrap))]
+use crate::marker::Tuple;
+
/// The version of the call operator that takes an immutable receiver.
///
/// Instances of `Fn` can be called repeatedly without mutating state.
@@ -51,9 +54,9 @@
/// let double = |x| x * 2;
/// assert_eq!(call_with_one(double), 2);
/// ```
+#[cfg(bootstrap)]
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "Fn"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -71,13 +74,93 @@
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Fn<Args>: FnMut<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call(&self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes an immutable receiver.
+///
+/// Instances of `Fn` can be called repeatedly without mutating state.
+///
+/// *This trait (`Fn`) is not to be confused with [function pointers]
+/// (`fn`).*
+///
+/// `Fn` is implemented automatically by closures which only take immutable
+/// references to captured variables or don't capture anything at all, as well
+/// as (safe) [function pointers] (with some caveats, see their documentation
+/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
+/// implements `Fn`, too.
+///
+/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
+/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
+/// is expected.
+///
+/// Use `Fn` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly and without mutating state (e.g., when
+/// calling it concurrently). If you do not need such strict requirements, use
+/// [`FnMut`] or [`FnOnce`] as bounds.
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a closure
+///
+/// ```
+/// let square = |x| x * x;
+/// assert_eq!(square(5), 25);
+/// ```
+///
+/// ## Using a `Fn` parameter
+///
+/// ```
+/// fn call_with_one<F>(func: F) -> usize
+/// where F: Fn(usize) -> usize {
+/// func(1)
+/// }
+///
+/// let double = |x| x * 2;
+/// assert_eq!(call_with_one(double), 2);
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
+ label = "expected an `Fn<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait Fn<Args: Tuple>: FnMut<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output;
+}
+
/// The version of the call operator that takes a mutable receiver.
///
/// Instances of `FnMut` can be called repeatedly and may mutate state.
@@ -139,9 +222,9 @@ pub trait Fn<Args>: FnMut<Args> {
///
/// assert_eq!(x, 5);
/// ```
+#[cfg(bootstrap)]
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "FnMut"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -159,13 +242,101 @@ pub trait Fn<Args>: FnMut<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait FnMut<Args>: FnOnce<Args> {
/// Performs the call operation.
#[unstable(feature = "fn_traits", issue = "29625")]
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes a mutable receiver.
+///
+/// Instances of `FnMut` can be called repeatedly and may mutate state.
+///
+/// `FnMut` is implemented automatically by closures which take mutable
+/// references to captured variables, as well as all types that implement
+/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
+/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
+/// implements `FnMut`, too.
+///
+/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
+/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
+/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
+///
+/// Use `FnMut` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly, while allowing it to mutate state.
+/// If you don't want the parameter to mutate state, use [`Fn`] as a
+/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a mutably capturing closure
+///
+/// ```
+/// let mut x = 5;
+/// {
+/// let mut square_x = || x *= x;
+/// square_x();
+/// }
+/// assert_eq!(x, 25);
+/// ```
+///
+/// ## Using a `FnMut` parameter
+///
+/// ```
+/// fn do_twice<F>(mut func: F)
+/// where F: FnMut()
+/// {
+/// func();
+/// func();
+/// }
+///
+/// let mut x: usize = 1;
+/// {
+/// let add_two_to_x = || x += 2;
+/// do_twice(add_two_to_x);
+/// }
+///
+/// assert_eq!(x, 5);
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn_mut"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait FnMut<Args: Tuple>: FnOnce<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
/// The version of the call operator that takes a by-value receiver.
///
/// Instances of `FnOnce` can be called, but might not be callable multiple
@@ -219,9 +390,9 @@ pub trait FnMut<Args>: FnOnce<Args> {
///
/// // `consume_and_return_x` can no longer be invoked at this point
/// ```
+#[cfg(bootstrap)]
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_diagnostic_item = "FnOnce"]
#[rustc_paren_sugar]
#[rustc_on_unimplemented(
on(
@@ -239,7 +410,7 @@ pub trait FnMut<Args>: FnOnce<Args> {
)]
#[fundamental] // so that regex can rely that `&str: !FnMut`
#[must_use = "closures are lazy and do nothing unless called"]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait FnOnce<Args> {
/// The returned type after the call operator is used.
#[lang = "fn_once_output"]
@@ -251,6 +422,92 @@ pub trait FnOnce<Args> {
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
}
+/// The version of the call operator that takes a by-value receiver.
+///
+/// Instances of `FnOnce` can be called, but might not be callable multiple
+/// times. Because of this, if the only thing known about a type is that it
+/// implements `FnOnce`, it can only be called once.
+///
+/// `FnOnce` is implemented automatically by closures that might consume captured
+/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
+/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
+///
+/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
+/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
+///
+/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
+/// type and only need to call it once. If you need to call the parameter
+/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
+/// state, use [`Fn`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: fn
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Using a `FnOnce` parameter
+///
+/// ```
+/// fn consume_with_relish<F>(func: F)
+/// where F: FnOnce() -> String
+/// {
+/// // `func` consumes its captured variables, so it cannot be run more
+/// // than once.
+/// println!("Consumed: {}", func());
+///
+/// println!("Delicious!");
+///
+/// // Attempting to invoke `func()` again will throw a `use of moved
+/// // value` error for `func`.
+/// }
+///
+/// let x = String::from("x");
+/// let consume_and_return_x = move || x;
+/// consume_with_relish(consume_and_return_x);
+///
+/// // `consume_and_return_x` can no longer be invoked at this point
+/// ```
+#[cfg(not(bootstrap))]
+#[lang = "fn_once"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ on(
+ _Self = "unsafe fn",
+ note = "unsafe function cannot be called generically without an unsafe block",
+ // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
+ label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+#[const_trait]
+pub trait FnOnce<Args: Tuple> {
+ /// The returned type after the call operator is used.
+ #[lang = "fn_once_output"]
+ #[stable(feature = "fn_once_output", since = "1.12.0")]
+ type Output;
+
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[cfg(bootstrap)]
mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
@@ -310,3 +567,66 @@ mod impls {
}
}
}
+
+#[cfg(not(bootstrap))]
+mod impls {
+ use crate::marker::Tuple;
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const Fn<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ extern "rust-call" fn call(&self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnMut<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnOnce<A> for &F
+ where
+ F: ~const Fn<A>,
+ {
+ type Output = F::Output;
+
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnMut<A> for &mut F
+ where
+ F: ~const FnMut<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
+ impl<A: Tuple, F: ?Sized> const FnOnce<A> for &mut F
+ where
+ F: ~const FnMut<A>,
+ {
+ type Output = F::Output;
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+}
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index dd4e3ac1c..5e3dc48b6 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -55,7 +55,7 @@
#[doc(alias = "]")]
#[doc(alias = "[")]
#[doc(alias = "[]")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait Index<Idx: ?Sized> {
/// The returned type after indexing.
#[stable(feature = "rust1", since = "1.0.0")]
@@ -164,7 +164,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
#[doc(alias = "[")]
#[doc(alias = "]")]
#[doc(alias = "[]")]
-#[cfg_attr(not(bootstrap), const_trait)]
+#[const_trait]
pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
/// Performs the mutable indexing (`container[index]`) operation.
///
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index f284b4359..505d964e5 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -512,7 +512,7 @@ use crate::{
};
/// The `Option` type. See [the module level documentation](self) for more.
-#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[derive(Copy, PartialOrd, Eq, Ord, Debug, Hash)]
#[rustc_diagnostic_item = "Option"]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
@@ -2035,6 +2035,72 @@ impl<'a, T> const From<&'a mut Option<T>> for Option<&'a mut T> {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> crate::marker::StructuralPartialEq for Option<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq> PartialEq for Option<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ SpecOptionPartialEq::eq(self, other)
+ }
+}
+
+#[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")]
+#[doc(hidden)]
+pub trait SpecOptionPartialEq: Sized {
+ fn eq(l: &Option<Self>, other: &Option<Self>) -> bool;
+}
+
+#[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")]
+impl<T: PartialEq> SpecOptionPartialEq for T {
+ #[inline]
+ default fn eq(l: &Option<T>, r: &Option<T>) -> bool {
+ match (l, r) {
+ (Some(l), Some(r)) => *l == *r,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+}
+
+macro_rules! non_zero_option {
+ ( $( #[$stability: meta] $NZ:ty; )+ ) => {
+ $(
+ #[$stability]
+ impl SpecOptionPartialEq for $NZ {
+ #[inline]
+ fn eq(l: &Option<Self>, r: &Option<Self>) -> bool {
+ l.map(Self::get).unwrap_or(0) == r.map(Self::get).unwrap_or(0)
+ }
+ }
+ )+
+ };
+}
+
+non_zero_option! {
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU8;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU16;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU32;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU64;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroU128;
+ #[stable(feature = "nonzero", since = "1.28.0")] crate::num::NonZeroUsize;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI8;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI16;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI32;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI64;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroI128;
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] crate::num::NonZeroIsize;
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T> SpecOptionPartialEq for crate::ptr::NonNull<T> {
+ #[inline]
+ fn eq(l: &Option<Self>, r: &Option<Self>) -> bool {
+ l.map(Self::as_ptr).unwrap_or_else(|| crate::ptr::null_mut())
+ == r.map(Self::as_ptr).unwrap_or_else(|| crate::ptr::null_mut())
+ }
+}
+
/////////////////////////////////////////////////////////////////////////////
// The Option Iterators
/////////////////////////////////////////////////////////////////////////////
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 00b63dfbd..461b70c32 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -80,7 +80,6 @@ pub macro unreachable_2015 {
#[doc(hidden)]
#[unstable(feature = "edition_panic", issue = "none", reason = "use unreachable!() instead")]
#[allow_internal_unstable(core_panic)]
-#[rustc_diagnostic_item = "unreachable_2021_macro"]
#[rustc_macro_transparency = "semitransparent"]
pub macro unreachable_2021 {
() => (
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index a9de7c94e..a704a00fa 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -38,10 +38,9 @@ use crate::panic::{Location, PanicInfo};
/// site as much as possible (so that `panic!()` has as low an impact
/// on (e.g.) the inlining of other functions as possible), by moving
/// the actual formatting into this shared place.
-#[cold]
// If panic_immediate_abort, inline the abort call,
// otherwise avoid inlining because of it is cold path.
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_fmt"] // needed for const-evaluated panics
@@ -67,11 +66,9 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize.
/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed.
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cfg_attr(not(bootstrap), rustc_nounwind)]
-#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+#[rustc_nounwind]
pub fn panic_str_nounwind(msg: &'static str) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
@@ -97,10 +94,9 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
// above.
/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
-#[cold]
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[rustc_const_unstable(feature = "core_panic", issue = "none")]
@@ -139,8 +135,8 @@ pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
}
-#[cold]
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access
fn panic_bounds_check(index: usize, len: usize) -> ! {
@@ -155,11 +151,10 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
///
/// This function is called directly by the codegen backend, and must not have
/// any extra arguments (including those synthesized by track_caller).
-#[cold]
-#[inline(never)]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
-#[cfg_attr(not(bootstrap), rustc_nounwind)]
-#[cfg_attr(bootstrap, rustc_allocator_nounwind)]
+#[rustc_nounwind]
fn panic_no_unwind() -> ! {
panic_str_nounwind("panic in a function that cannot unwind")
}
@@ -187,7 +182,8 @@ pub enum AssertKind {
}
/// Internal function for `assert_eq!` and `assert_ne!` macros
-#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[doc(hidden)]
pub fn assert_failed<T, U>(
@@ -204,7 +200,8 @@ where
}
/// Internal function for `assert_match!`
-#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[doc(hidden)]
pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
@@ -223,6 +220,8 @@ pub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
}
/// Non-generic version of the above functions, to avoid code bloat.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
fn assert_failed_inner(
kind: AssertKind,
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index ccef35b45..4524fa4c4 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -543,7 +543,7 @@ impl<P: Deref> Pin<P> {
/// let p: Pin<&mut T> = Pin::new_unchecked(&mut a);
/// // This should mean the pointee `a` can never move again.
/// }
- /// mem::swap(&mut a, &mut b);
+ /// mem::swap(&mut a, &mut b); // Potential UB down the road ⚠️
/// // The address of `a` changed to `b`'s stack slot, so `a` got moved even
/// // though we have previously pinned it! We have violated the pinning API contract.
/// }
@@ -563,13 +563,66 @@ impl<P: Deref> Pin<P> {
/// // This should mean the pointee can never move again.
/// }
/// drop(pinned);
- /// let content = Rc::get_mut(&mut x).unwrap();
+ /// let content = Rc::get_mut(&mut x).unwrap(); // Potential UB down the road ⚠️
/// // Now, if `x` was the only reference, we have a mutable reference to
/// // data that we pinned above, which we could use to move it as we have
/// // seen in the previous example. We have violated the pinning API contract.
/// }
/// ```
///
+ /// ## Pinning of closure captures
+ ///
+ /// Particular care is required when using `Pin::new_unchecked` in a closure:
+ /// `Pin::new_unchecked(&mut var)` where `var` is a by-value (moved) closure capture
+ /// implicitly makes the promise that the closure itself is pinned, and that *all* uses
+ /// of this closure capture respect that pinning.
+ /// ```
+ /// use std::pin::Pin;
+ /// use std::task::Context;
+ /// use std::future::Future;
+ ///
+ /// fn move_pinned_closure(mut x: impl Future, cx: &mut Context<'_>) {
+ /// // Create a closure that moves `x`, and then internally uses it in a pinned way.
+ /// let mut closure = move || unsafe {
+ /// let _ignore = Pin::new_unchecked(&mut x).poll(cx);
+ /// };
+ /// // Call the closure, so the future can assume it has been pinned.
+ /// closure();
+ /// // Move the closure somewhere else. This also moves `x`!
+ /// let mut moved = closure;
+ /// // Calling it again means we polled the future from two different locations,
+ /// // violating the pinning API contract.
+ /// moved(); // Potential UB ⚠️
+ /// }
+ /// ```
+ /// When passing a closure to another API, it might be moving the closure any time, so
+ /// `Pin::new_unchecked` on closure captures may only be used if the API explicitly documents
+ /// that the closure is pinned.
+ ///
+ /// The better alternative is to avoid all that trouble and do the pinning in the outer function
+ /// instead (here using the unstable `pin` macro):
+ /// ```
+ /// #![feature(pin_macro)]
+ /// use std::pin::pin;
+ /// use std::task::Context;
+ /// use std::future::Future;
+ ///
+ /// fn move_pinned_closure(mut x: impl Future, cx: &mut Context<'_>) {
+ /// let mut x = pin!(x);
+ /// // Create a closure that captures `x: Pin<&mut _>`, which is safe to move.
+ /// let mut closure = move || {
+ /// let _ignore = x.as_mut().poll(cx);
+ /// };
+ /// // Call the closure, so the future can assume it has been pinned.
+ /// closure();
+ /// // Move the closure somewhere else.
+ /// let mut moved = closure;
+ /// // Calling it again here is fine (except that we might be polling a future that already
+ /// // returned `Poll::Ready`, but that is a separate problem).
+ /// moved();
+ /// }
+ /// ```
+ ///
/// [`mem::swap`]: crate::mem::swap
#[lang = "new_unchecked"]
#[inline(always)]
@@ -1059,7 +1112,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// 8 | let x: Pin<&mut Foo> = {
/// | - borrow later stored here
/// 9 | let x: Pin<&mut Foo> = pin!(Foo { /* … */ });
-/// | ^^^^^^^^^^^^^^^^^^^^^ creates a temporary which is freed while still in use
+/// | ^^^^^^^^^^^^^^^^^^^^^ creates a temporary value which is freed while still in use
/// 10 | x
/// 11 | }; // <- Foo is dropped
/// | - temporary value is freed at the end of this statement
diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs
index b566e211c..2d67d742c 100644
--- a/library/core/src/prelude/v1.rs
+++ b/library/core/src/prelude/v1.rs
@@ -75,9 +75,16 @@ pub use crate::macros::builtin::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
+#[cfg(not(bootstrap))]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+pub use crate::macros::builtin::alloc_error_handler;
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
pub use crate::macros::builtin::{bench, derive, global_allocator, test, test_case};
+#[unstable(feature = "derive_const", issue = "none")]
+#[cfg(not(bootstrap))]
+pub use crate::macros::builtin::derive_const;
+
#[unstable(
feature = "cfg_accessible",
issue = "64797",
@@ -91,3 +98,11 @@ pub use crate::macros::builtin::cfg_accessible;
reason = "`cfg_eval` is a recently implemented feature"
)]
pub use crate::macros::builtin::cfg_eval;
+
+#[unstable(
+ feature = "type_ascription",
+ issue = "23416",
+ reason = "placeholder syntax for type ascription"
+)]
+#[cfg(not(bootstrap))]
+pub use crate::macros::builtin::type_ascribe;
diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs
index 331714a99..d6e9da187 100644
--- a/library/core/src/primitive_docs.rs
+++ b/library/core/src/primitive_docs.rs
@@ -1493,11 +1493,13 @@ mod prim_ref {}
/// However, a direct cast back is not possible. You need to use `transmute`:
///
/// ```rust
+/// # #[cfg(not(miri))] { // FIXME: use strict provenance APIs once they are stable, then remove this `cfg`
/// # let fnptr: fn(i32) -> i32 = |x| x+2;
/// # let fnptr_addr = fnptr as usize;
/// let fnptr = fnptr_addr as *const ();
/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
/// assert_eq!(fnptr(40), 42);
+/// # }
/// ```
///
/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 1390e09dd..64a5290c3 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -9,7 +9,9 @@ use crate::{cmp, fmt, hash, mem, num};
/// Note that particularly large alignments, while representable in this type,
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -167,16 +169,18 @@ impl From<Alignment> for usize {
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::Ord for Alignment {
+impl const cmp::Ord for Alignment {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
- self.as_nonzero().cmp(&other.as_nonzero())
+ self.as_nonzero().get().cmp(&other.as_nonzero().get())
}
}
+#[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
-impl cmp::PartialOrd for Alignment {
+impl const cmp::PartialOrd for Alignment {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
@@ -198,7 +202,9 @@ type AlignmentEnum = AlignmentEnum32;
#[cfg(target_pointer_width = "64")]
type AlignmentEnum = AlignmentEnum64;
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -219,7 +225,9 @@ enum AlignmentEnum16 {
_Align1Shl15 = 1 << 15,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -256,7 +264,9 @@ enum AlignmentEnum32 {
_Align1Shl31 = 1 << 31,
}
-#[derive(Copy, Clone, Eq, PartialEq)]
+#[derive(Copy, Clone, Eq)]
+#[cfg_attr(bootstrap, derive(PartialEq))]
+#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index 5a083227b..d34813599 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -45,7 +45,7 @@ impl<T: ?Sized> *const T {
/// Casts to a pointer of another type.
#[stable(feature = "ptr_cast", since = "1.38.0")]
#[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
- #[inline]
+ #[inline(always)]
pub const fn cast<U>(self) -> *const U {
self as _
}
@@ -79,19 +79,14 @@ impl<T: ?Sized> *const T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, mut val: *const U) -> *const U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
where
U: ?Sized,
{
- let target = &mut val as *mut *const U as *mut *const u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *const u8 };
- val
+ from_raw_parts::<U>(self as *const (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -100,6 +95,7 @@ impl<T: ?Sized> *const T {
/// refactored.
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_mut(self) -> *mut T {
self as _
}
@@ -117,13 +113,21 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let array = [13, 42];
/// let p0: *const i32 = &array[0];
/// assert_eq!(<*const _>::from_bits(p0.to_bits()), p0);
/// let p1: *const i32 = &array[1];
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -140,11 +144,20 @@ impl<T: ?Sized> *const T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *const u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*const u8>::from_bits(1), dangling);
+ /// # }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr` function, or update \
+ your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -176,7 +189,7 @@ impl<T: ?Sized> *const T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -213,7 +226,7 @@ impl<T: ?Sized> *const T {
///
/// [`from_exposed_addr`]: from_exposed_addr
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -478,8 +491,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -559,7 +571,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_offset(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -568,12 +580,36 @@ impl<T: ?Sized> *const T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let v = 17_u32;
+ /// let ptr: *const u32 = &v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *const T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask);
- from_raw_parts::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).with_metadata_of(self)
}
/// Calculates the distance between two pointers. The returned value is in
@@ -684,7 +720,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -914,8 +950,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1001,8 +1036,7 @@ impl<T: ?Sized> *const T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1082,7 +1116,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1162,7 +1196,7 @@ impl<T: ?Sized> *const T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1304,6 +1338,8 @@ impl<T: ?Sized> *const T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1314,32 +1350,149 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *const T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *const T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
+ usize::MAX
+ }
- const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
- usize::MAX
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
+ }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1350,16 +1503,121 @@ impl<T: ?Sized> *const T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let data = AlignedI32(42);
+ /// let ptr = &data as *const AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42);
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *const u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index caa10f181..a8604843e 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,6 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
+#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 565c38d22..48b2e88da 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -35,7 +35,8 @@
//! be used for inter-thread synchronization.
//! * The result of casting a reference to a pointer is valid for as long as the
//! underlying object is live and no reference (just raw pointers) is used to
-//! access the same memory.
+//! access the same memory. That is, reference and pointer accesses cannot be
+//! interleaved.
//!
//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
@@ -64,7 +65,6 @@
//! separate allocated object), heap allocations (each allocation created by the global allocator is
//! a separate allocated object), and `static` variables.
//!
-//!
//! # Strict Provenance
//!
//! **The following text is non-normative, insufficiently formal, and is an extremely strict
@@ -613,9 +613,10 @@ pub const fn invalid_mut<T>(addr: usize) -> *mut T {
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr<T>(addr: usize) -> *const T
where
T: Sized,
@@ -650,9 +651,10 @@ where
/// This API and its claimed semantics are part of the Strict Provenance experiment, see the
/// [module documentation][crate::ptr] for details.
#[must_use]
-#[inline]
+#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
+#[allow(fuzzy_provenance_casts)] // this *is* the strict provenance API one should use instead
pub fn from_exposed_addr_mut<T>(addr: usize) -> *mut T
where
T: Sized,
@@ -908,21 +910,15 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
);
}
- // NOTE(scottmcm) Miri is disabled here as reading in smaller units is a
- // pessimization for it. Also, if the type contains any unaligned pointers,
- // copying those over multiple reads is difficult to support.
- #[cfg(not(miri))]
+ // Split up the slice into small power-of-two-sized chunks that LLVM is able
+ // to vectorize (unless it's a special type with more-than-pointer alignment,
+ // because we don't want to pessimize things like slices of SIMD vectors.)
+ if mem::align_of::<T>() <= mem::size_of::<usize>()
+ && (!mem::size_of::<T>().is_power_of_two()
+ || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
{
- // Split up the slice into small power-of-two-sized chunks that LLVM is able
- // to vectorize (unless it's a special type with more-than-pointer alignment,
- // because we don't want to pessimize things like slices of SIMD vectors.)
- if mem::align_of::<T>() <= mem::size_of::<usize>()
- && (!mem::size_of::<T>().is_power_of_two()
- || mem::size_of::<T>() > mem::size_of::<usize>() * 2)
- {
- attempt_swap_as_chunks!(usize);
- attempt_swap_as_chunks!(u8);
- }
+ attempt_swap_as_chunks!(usize);
+ attempt_swap_as_chunks!(u8);
}
// SAFETY: Same preconditions as this function
@@ -1580,10 +1576,14 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
/// Align pointer `p`.
///
-/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
+/// Calculate offset (in terms of elements of `size_of::<T>()` stride) that has to be applied
/// to pointer `p` so that pointer `p` would get aligned to `a`.
///
-/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
+/// # Safety
+/// `a` must be a power of two.
+///
+/// # Notes
+/// This implementation has been carefully tailored to not panic. It is UB for this to panic.
/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
/// constants.
///
@@ -1593,12 +1593,12 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
///
/// Any questions go to @nagisa.
#[lang = "align_offset"]
-pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
+pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
- cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub,
- wrapping_add, wrapping_mul, wrapping_sub,
+ cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl, unchecked_shr,
+ unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
@@ -1610,7 +1610,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
///
/// Implementation of this function shall not panic. Ever.
#[inline]
- unsafe fn mod_inv(x: usize, m: usize) -> usize {
+ const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
@@ -1618,40 +1618,48 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
- /// INV_TABLE_MOD²
- const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
- let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
- if m <= INV_TABLE_MOD {
- table_inverse & m_minus_one
- } else {
- // We iterate "up" using the following formula:
- //
- // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
+ let mut mod_gate = INV_TABLE_MOD;
+ // We iterate "up" using the following formula:
+ //
+ // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ //
+ // This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
+ // finally reduce the computation to our desired `m` by taking `inverse mod m`.
+ //
+ // This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
+ // will always finish in at most 4 iterations.
+ loop {
+ // y = y * (2 - xy) mod n
//
- // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
- let mut inverse = table_inverse;
- let mut going_mod = INV_TABLE_MOD_SQUARED;
- loop {
- // y = y * (2 - xy) mod n
- //
- // Note, that we use wrapping operations here intentionally – the original formula
- // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
- // usize::MAX` instead, because we take the result `mod n` at the end
- // anyway.
- inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
- if going_mod >= m {
- return inverse & m_minus_one;
- }
- going_mod = wrapping_mul(going_mod, going_mod);
+ // Note, that we use wrapping operations here intentionally – the original formula
+ // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
+ // usize::MAX` instead, because we take the result `mod n` at the end
+ // anyway.
+ if mod_gate >= m {
+ break;
}
+ inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
+ let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
+ if overflow {
+ break;
+ }
+ mod_gate = new_gate;
}
+ inverse & m_minus_one
}
- let addr = p.addr();
let stride = mem::size_of::<T>();
+
+ // SAFETY: This is just an inlined `p.addr()` (which is not
+ // a `const fn` so we cannot call it).
+ // During const eval, we hook this function to ensure that the pointer never
+ // has provenance, making this sound.
+ let addr: usize = unsafe { mem::transmute(p) };
+
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
@@ -1761,7 +1769,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// (which is what the `PartialEq for &T` implementation does).
///
/// When comparing wide pointers, both the address and the metadata are tested for equality.
-/// However, note that comparing trait object pointers (`*const dyn Trait`) is unrealiable: pointers
+/// However, note that comparing trait object pointers (`*const dyn Trait`) is unreliable: pointers
/// to values of the same underlying type can compare inequal (because vtables are duplicated in
/// multiple codegen units), and pointers to values of *different* underlying type can compare equal
/// (since identical vtables can be deduplicated within a codegen unit).
@@ -1793,7 +1801,7 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
/// ```
#[stable(feature = "ptr_eq", since = "1.17.0")]
-#[inline]
+#[inline(always)]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
}
@@ -1862,7 +1870,6 @@ macro_rules! fnptr_impls_safety_abi {
fnptr_impls_safety_abi! { #[stable(feature = "fnptr_impls", since = "1.4.0")] $FnTy, $($Arg),* }
};
(@c_unwind $FnTy: ty, $($Arg: ident),*) => {
- #[cfg(not(bootstrap))]
fnptr_impls_safety_abi! { #[unstable(feature = "c_unwind", issue = "74990")] $FnTy, $($Arg),* }
};
(#[$meta:meta] $FnTy: ty, $($Arg: ident),*) => {
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index 6764002bc..c924a90b1 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -78,23 +78,14 @@ impl<T: ?Sized> *mut T {
/// }
/// ```
#[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[rustc_const_unstable(feature = "set_ptr_value", issue = "75091")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline]
- pub fn with_metadata_of<U>(self, val: *const U) -> *mut U
+ pub const fn with_metadata_of<U>(self, meta: *const U) -> *mut U
where
U: ?Sized,
{
- // Prepare in the type system that we will replace the pointer value with a mutable
- // pointer, taking the mutable provenance from the `self` pointer.
- let mut val = val as *mut U;
- // Pointer to the pointer value within the value.
- let target = &mut val as *mut *mut U as *mut *mut u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *target = self as *mut u8 };
- val
+ from_raw_parts_mut::<U>(self as *mut (), metadata(meta))
}
/// Changes constness without changing the type.
@@ -109,6 +100,7 @@ impl<T: ?Sized> *mut T {
/// [`cast_mut`]: #method.cast_mut
#[stable(feature = "ptr_const_cast", since = "1.65.0")]
#[rustc_const_stable(feature = "ptr_const_cast", since = "1.65.0")]
+ #[inline(always)]
pub const fn cast_const(self) -> *const T {
self as _
}
@@ -126,14 +118,22 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// let mut array = [13, 42];
/// let mut it = array.iter_mut();
/// let p0: *mut i32 = it.next().unwrap();
/// assert_eq!(<*mut _>::from_bits(p0.to_bits()), p0);
/// let p1: *mut i32 = it.next().unwrap();
/// assert_eq!(p1.to_bits() - p0.to_bits(), 4);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `exposed_addr` method, or update your code \
+ to follow the strict provenance rules using its APIs"
+ )]
+ #[inline(always)]
pub fn to_bits(self) -> usize
where
T: Sized,
@@ -150,11 +150,20 @@ impl<T: ?Sized> *mut T {
///
/// ```
/// #![feature(ptr_to_from_bits)]
+ /// # #[cfg(not(miri))] { // doctest does not work with strict provenance
/// use std::ptr::NonNull;
/// let dangling: *mut u8 = NonNull::dangling().as_ptr();
/// assert_eq!(<*mut u8>::from_bits(1), dangling);
+ /// }
/// ```
#[unstable(feature = "ptr_to_from_bits", issue = "91126")]
+ #[deprecated(
+ since = "1.67",
+ note = "replaced by the `ptr::from_exposed_addr_mut` function, or \
+ update your code to follow the strict provenance rules using its APIs"
+ )]
+ #[allow(fuzzy_provenance_casts)] // this is an unstable and semi-deprecated cast function
+ #[inline(always)]
pub fn from_bits(bits: usize) -> Self
where
T: Sized,
@@ -186,7 +195,7 @@ impl<T: ?Sized> *mut T {
/// might change in the future (including possibly weakening this so it becomes wholly
/// equivalent to `self as usize`). See the [module documentation][crate::ptr] for details.
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn addr(self) -> usize
where
@@ -223,7 +232,7 @@ impl<T: ?Sized> *mut T {
///
/// [`from_exposed_addr_mut`]: from_exposed_addr_mut
#[must_use]
- #[inline]
+ #[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
pub fn expose_addr(self) -> usize
where
@@ -496,8 +505,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_offset(self, count: isize) -> Self {
// SAFETY: the caller must uphold the safety contract for `offset`.
- let this = unsafe { self.cast::<u8>().offset(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().offset(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -576,10 +584,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_offset(self, count: isize) -> Self {
- from_raw_parts_mut::<T>(
- self.cast::<u8>().wrapping_offset(count).cast::<()>(),
- metadata(self),
- )
+ self.cast::<u8>().wrapping_offset(count).with_metadata_of(self)
}
/// Masks out bits of the pointer according to a mask.
@@ -588,12 +593,39 @@ impl<T: ?Sized> *mut T {
///
/// For non-`Sized` pointees this operation changes only the data pointer,
/// leaving the metadata untouched.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(ptr_mask, strict_provenance)]
+ /// let mut v = 17_u32;
+ /// let ptr: *mut u32 = &mut v;
+ ///
+ /// // `u32` is 4 bytes aligned,
+ /// // which means that lower 2 bits are always 0.
+ /// let tag_mask = 0b11;
+ /// let ptr_mask = !tag_mask;
+ ///
+ /// // We can store something in these lower bits
+ /// let tagged_ptr = ptr.map_addr(|a| a | 0b10);
+ ///
+ /// // Get the "tag" back
+ /// let tag = tagged_ptr.addr() & tag_mask;
+ /// assert_eq!(tag, 0b10);
+ ///
+ /// // Note that `tagged_ptr` is unaligned, it's UB to read from/write to it.
+ /// // To get original pointer `mask` can be used:
+ /// let masked_ptr = tagged_ptr.mask(ptr_mask);
+ /// assert_eq!(unsafe { *masked_ptr }, 17);
+ ///
+ /// unsafe { *masked_ptr = 0 };
+ /// assert_eq!(v, 0);
+ /// ```
#[unstable(feature = "ptr_mask", issue = "98290")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[inline(always)]
pub fn mask(self, mask: usize) -> *mut T {
- let this = intrinsics::ptr_mask(self.cast::<()>(), mask) as *mut ();
- from_raw_parts_mut::<T>(this, metadata(self))
+ intrinsics::ptr_mask(self.cast::<()>(), mask).cast_mut().with_metadata_of(self)
}
/// Returns `None` if the pointer is null, or else returns a unique reference to
@@ -861,7 +893,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub const unsafe fn byte_offset_from(self, origin: *const T) -> isize {
+ pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: *const U) -> isize {
// SAFETY: the caller must uphold the safety contract for `offset_from`.
unsafe { self.cast::<u8>().offset_from(origin.cast::<u8>()) }
}
@@ -1020,8 +1052,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
- let this = unsafe { self.cast::<u8>().add(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer (convenience for
@@ -1107,8 +1138,7 @@ impl<T: ?Sized> *mut T {
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn byte_sub(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `sub`.
- let this = unsafe { self.cast::<u8>().sub(count).cast::<()>() };
- from_raw_parts_mut::<T>(this, metadata(self))
+ unsafe { self.cast::<u8>().sub(count).with_metadata_of(self) }
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1188,7 +1218,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_add(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_add(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_add(count).with_metadata_of(self)
}
/// Calculates the offset from a pointer using wrapping arithmetic.
@@ -1268,7 +1298,7 @@ impl<T: ?Sized> *mut T {
#[unstable(feature = "pointer_byte_offsets", issue = "96283")]
#[rustc_const_unstable(feature = "const_pointer_byte_offsets", issue = "96283")]
pub const fn wrapping_byte_sub(self, count: usize) -> Self {
- from_raw_parts_mut::<T>(self.cast::<u8>().wrapping_sub(count).cast::<()>(), metadata(self))
+ self.cast::<u8>().wrapping_sub(count).with_metadata_of(self)
}
/// Reads the value from `self` without moving it. This leaves the
@@ -1576,6 +1606,8 @@ impl<T: ?Sized> *mut T {
/// }
/// # }
/// ```
+ #[must_use]
+ #[inline]
#[stable(feature = "align_offset", since = "1.36.0")]
#[rustc_const_unstable(feature = "const_align_offset", issue = "90962")]
pub const fn align_offset(self, align: usize) -> usize
@@ -1586,32 +1618,151 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- fn rt_impl<T>(p: *mut T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
+ #[cfg(bootstrap)]
+ {
+ fn rt_impl<T>(p: *mut T, align: usize) -> usize {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(p, align) }
+ }
+
+ const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
+ usize::MAX
+ }
+
+ // SAFETY:
+ // It is permissible for `align_offset` to always return `usize::MAX`,
+ // algorithm correctness can not depend on `align_offset` returning non-max values.
+ //
+ // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
+ unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
- const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
- usize::MAX
+ #[cfg(not(bootstrap))]
+ {
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
}
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
}
/// Returns whether the pointer is properly aligned for `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned());
+ /// assert!(!ptr.wrapping_byte_add(1).is_aligned());
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// is never aligned if cast to a type with a stricter alignment than the reference's
+ /// underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(!ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.cast::<AlignedI64>().is_aligned());
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::<AlignedI64>().is_aligned());
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.cast::<AlignedI64>().is_aligned(),
+ /// runtime_ptr.wrapping_add(1).cast::<AlignedI64>().is_aligned(),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of primitives is less than their size.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ /// #[repr(align(8))]
+ /// struct AlignedI64(i64);
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut AlignedI32;
+ /// assert!(ptr.is_aligned());
+ ///
+ /// // For pointers with a known address, runtime and compiletime behavior are identical.
+ /// let ptr1 = ptr.cast::<AlignedI64>();
+ /// let ptr2 = ptr.wrapping_add(1).cast::<AlignedI64>();
+ /// assert!(ptr1.is_aligned());
+ /// assert!(!ptr2.is_aligned());
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned(self) -> bool
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned(self) -> bool
where
T: Sized,
{
- self.is_aligned_to(core::mem::align_of::<T>())
+ self.is_aligned_to(mem::align_of::<T>())
}
/// Returns whether the pointer is aligned to `align`.
@@ -1622,16 +1773,123 @@ impl<T: ?Sized> *mut T {
/// # Panics
///
/// The function panics if `align` is not a power-of-two (this includes 0).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ /// ```
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(pointer_byte_offsets)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+ ///
+ /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+ /// ```
+ ///
+ /// # At compiletime
+ /// **Note: Alignment at compiletime is experimental and subject to change. See the
+ /// [tracking issue] for details.**
+ ///
+ /// At compiletime, the compiler may not know where a value will end up in memory.
+ /// Calling this function on a pointer created from a reference at compiletime will only
+ /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
+ /// cannot be stricter aligned than the reference's underlying allocation.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ /// #![feature(const_mut_refs)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// const _: () = {
+ /// let mut data = AlignedI32(42);
+ /// let ptr = &mut data as *mut AlignedI32;
+ ///
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ ///
+ /// // At compiletime, we know for sure that the pointer isn't aligned to 8.
+ /// assert!(!ptr.is_aligned_to(8));
+ /// assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ /// };
+ /// ```
+ ///
+ /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
+ /// pointer is aligned, even if the compiletime pointer wasn't aligned.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// // On some platforms, the alignment of i32 is less than 4.
+ /// #[repr(align(4))]
+ /// struct AlignedI32(i32);
+ ///
+ /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned.
+ /// // Also, note that mutable references are not allowed in the final value of constants.
+ /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut();
+ /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8));
+ /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8));
+ ///
+ /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned.
+ /// let runtime_ptr = COMPTIME_PTR;
+ /// assert_ne!(
+ /// runtime_ptr.is_aligned_to(8),
+ /// runtime_ptr.wrapping_add(1).is_aligned_to(8),
+ /// );
+ /// ```
+ ///
+ /// If a pointer is created from a fixed address, this function behaves the same during
+ /// runtime and compiletime.
+ ///
+ #[cfg_attr(bootstrap, doc = "```ignore")]
+ #[cfg_attr(not(bootstrap), doc = "```")]
+ /// #![feature(pointer_is_aligned)]
+ /// #![feature(const_pointer_is_aligned)]
+ ///
+ /// const _: () = {
+ /// let ptr = 40 as *mut u8;
+ /// assert!(ptr.is_aligned_to(1));
+ /// assert!(ptr.is_aligned_to(2));
+ /// assert!(ptr.is_aligned_to(4));
+ /// assert!(ptr.is_aligned_to(8));
+ /// assert!(!ptr.is_aligned_to(16));
+ /// };
+ /// ```
+ ///
+ /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203
#[must_use]
#[inline]
#[unstable(feature = "pointer_is_aligned", issue = "96284")]
- pub fn is_aligned_to(self, align: usize) -> bool {
+ #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")]
+ pub const fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
- // Cast is needed for `T: !Sized`
- self.cast::<u8>().addr() & align - 1 == 0
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ self.cast::<()>().align_offset(align) == 0
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index c18264d13..c4348169c 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -330,7 +330,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const fn as_ptr(self) -> *mut T {
self.pointer as *mut T
}
@@ -378,7 +378,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
@@ -429,7 +429,7 @@ impl<T: ?Sized> NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
@@ -703,7 +703,7 @@ impl<T> NonNull<[T]> {
#[stable(feature = "nonnull", since = "1.25.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
impl<T: ?Sized> const Clone for NonNull<T> {
- #[inline]
+ #[inline(always)]
fn clone(&self) -> Self {
*self
}
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index 6d2f7330d..c295a0e06 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -31,9 +31,8 @@ where
}
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
@@ -48,19 +47,20 @@ const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_start_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range start index {index} out of range for slice of length {len}");
}
+#[inline]
#[track_caller]
const fn slice_start_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice start index is out of range for slice");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
@@ -71,19 +71,20 @@ const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_end_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range end index {index} out of range for slice of length {len}");
}
+#[inline]
#[track_caller]
const fn slice_end_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice end index is out of range for slice");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
const fn slice_index_order_fail(index: usize, end: usize) -> ! {
@@ -92,27 +93,27 @@ const fn slice_index_order_fail(index: usize, end: usize) -> ! {
}
// FIXME const-hack
+#[inline]
#[track_caller]
fn slice_index_order_fail_rt(index: usize, end: usize) -> ! {
panic!("slice index starts at {index} but ends at {end}");
}
+#[inline]
#[track_caller]
const fn slice_index_order_fail_ct(_: usize, _: usize) -> ! {
panic!("slice index start is larger than end");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
const fn slice_start_index_overflow_fail() -> ! {
panic!("attempted to index slice from after maximum usize");
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[cold]
#[track_caller]
const fn slice_end_index_overflow_fail() -> ! {
panic!("attempted to index slice up to maximum usize");
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 8a8962828..062289767 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -1834,6 +1834,20 @@ impl<'a, T> ChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks_exact(2);
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), Some(&['l', 'o'][..]));
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), Some(&['r', 'e'][..]));
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.remainder(), &['m'][..]);
+ /// ```
#[must_use]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
@@ -2869,7 +2883,7 @@ unsafe impl<T> Sync for RChunksMut<'_, T> where T: Sync {}
/// ```
///
/// [`rchunks_exact`]: slice::rchunks_exact
-/// [`remainder`]: ChunksExact::remainder
+/// [`remainder`]: RChunksExact::remainder
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
@@ -2892,6 +2906,20 @@ impl<'a, T> RChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks_exact(2);
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), Some(&['e', 'm'][..]));
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), Some(&['o', 'r'][..]));
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.remainder(), &['l'][..]);
+ /// ```
#[must_use]
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
@@ -3031,7 +3059,7 @@ unsafe impl<'a, T> TrustedRandomAccessNoCoerce for RChunksExact<'a, T> {
/// ```
///
/// [`rchunks_exact_mut`]: slice::rchunks_exact_mut
-/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [`into_remainder`]: RChunksExactMut::into_remainder
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index 4f1bb1734..d9281a925 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -7,6 +7,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp::Ordering::{self, Greater, Less};
+use crate::fmt;
use crate::intrinsics::{assert_unsafe_precondition, exact_div};
use crate::marker::Copy;
use crate::mem::{self, SizedTypeProperties};
@@ -464,7 +465,7 @@ impl<T> [T] {
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
- #[inline]
+ #[inline(always)]
#[must_use]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
@@ -494,7 +495,7 @@ impl<T> [T] {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[rustc_allow_const_fn_unstable(const_mut_refs)]
- #[inline]
+ #[inline(always)]
#[must_use]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
@@ -3467,10 +3468,11 @@ impl<T> [T] {
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
- /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
- /// length possible for a given type and input slice, but only your algorithm's performance
- /// should depend on that, not its correctness. It is permissible for all of the input data to
- /// be returned as the prefix or suffix slice.
+ /// slice of a new type, and the suffix slice. How exactly the slice is split up is not
+ /// specified; the middle part may be smaller than necessary. However, if this fails to return a
+ /// maximal middle part, that is because code is running in a context where performance does not
+ /// matter, such as a sanitizer attempting to find alignment bugs. Regular code running
+ /// in a default (debug or release) execution *will* return a maximal middle part.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
@@ -3524,14 +3526,15 @@ impl<T> [T] {
}
}
- /// Transmute the slice to a slice of another type, ensuring alignment of the types is
- /// maintained.
+ /// Transmute the mutable slice to a mutable slice of another type, ensuring alignment of the
+ /// types is maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
- /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
- /// length possible for a given type and input slice, but only your algorithm's performance
- /// should depend on that, not its correctness. It is permissible for all of the input data to
- /// be returned as the prefix or suffix slice.
+ /// slice of a new type, and the suffix slice. How exactly the slice is split up is not
+ /// specified; the middle part may be smaller than necessary. However, if this fails to return a
+ /// maximal middle part, that is because code is running in a context where performance does not
+ /// matter, such as a sanitizer attempting to find alignment bugs. Regular code running
+ /// in a default (debug or release) execution *will* return a maximal middle part.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
@@ -3667,7 +3670,8 @@ impl<T> [T] {
unsafe { self.align_to() }
}
- /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+ /// Split a mutable slice into a mutable prefix, a middle of aligned SIMD types,
+ /// and a mutable suffix.
///
/// This is a safe wrapper around [`slice::align_to_mut`], so has the same weak
/// postconditions as that method. You're only assured that
@@ -3751,9 +3755,9 @@ impl<T> [T] {
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
#[must_use]
- pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
+ pub fn is_sorted_by<'a, F>(&'a self, mut compare: F) -> bool
where
- F: FnMut(&T, &T) -> Option<Ordering>,
+ F: FnMut(&'a T, &'a T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
@@ -3777,9 +3781,9 @@ impl<T> [T] {
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
#[must_use]
- pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
+ pub fn is_sorted_by_key<'a, F, K>(&'a self, f: F) -> bool
where
- F: FnMut(&T) -> K,
+ F: FnMut(&'a T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
@@ -4081,6 +4085,88 @@ impl<T> [T] {
*self = rem;
Some(last)
}
+
+ /// Returns mutable references to many indices at once, without doing any checks.
+ ///
+ /// For a safe alternative see [`get_many_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping or out-of-bounds indices is *[undefined behavior]*
+ /// even if the resulting references are not used.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_many_mut)]
+ ///
+ /// let x = &mut [1, 2, 4];
+ ///
+ /// unsafe {
+ /// let [a, b] = x.get_many_unchecked_mut([0, 2]);
+ /// *a *= 10;
+ /// *b *= 100;
+ /// }
+ /// assert_eq!(x, &[10, 2, 400]);
+ /// ```
+ ///
+ /// [`get_many_mut`]: slice::get_many_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "get_many_mut", issue = "104642")]
+ #[inline]
+ pub unsafe fn get_many_unchecked_mut<const N: usize>(
+ &mut self,
+ indices: [usize; N],
+ ) -> [&mut T; N] {
+ // NB: This implementation is written as it is because any variation of
+ // `indices.map(|i| self.get_unchecked_mut(i))` would make miri unhappy,
+ // or generate worse code otherwise. This is also why we need to go
+ // through a raw pointer here.
+ let slice: *mut [T] = self;
+ let mut arr: mem::MaybeUninit<[&mut T; N]> = mem::MaybeUninit::uninit();
+ let arr_ptr = arr.as_mut_ptr();
+
+ // SAFETY: We expect `indices` to contain disjunct values that are
+ // in bounds of `self`.
+ unsafe {
+ for i in 0..N {
+ let idx = *indices.get_unchecked(i);
+ *(*arr_ptr).get_unchecked_mut(i) = &mut *slice.get_unchecked_mut(idx);
+ }
+ arr.assume_init()
+ }
+ }
+
+ /// Returns mutable references to many indices at once.
+ ///
+ /// Returns an error if any index is out-of-bounds, or if the same index was
+ /// passed more than once.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_many_mut)]
+ ///
+ /// let v = &mut [1, 2, 3];
+ /// if let Ok([a, b]) = v.get_many_mut([0, 2]) {
+ /// *a = 413;
+ /// *b = 612;
+ /// }
+ /// assert_eq!(v, &[413, 2, 612]);
+ /// ```
+ #[unstable(feature = "get_many_mut", issue = "104642")]
+ #[inline]
+ pub fn get_many_mut<const N: usize>(
+ &mut self,
+ indices: [usize; N],
+ ) -> Result<[&mut T; N], GetManyMutError<N>> {
+ if !get_many_check_valid(&indices, self.len()) {
+ return Err(GetManyMutError { _private: () });
+ }
+ // SAFETY: The `get_many_check_valid()` call checked that all indices
+ // are disjunct and in bounds.
+ unsafe { Ok(self.get_many_unchecked_mut(indices)) }
+ }
}
impl<T, const N: usize> [[T; N]] {
@@ -4303,3 +4389,56 @@ impl<T, const N: usize> SlicePattern for [T; N] {
self
}
}
+
+/// This checks every index against each other, and against `len`.
+///
+/// This will do `binomial(N + 1, 2) = N * (N + 1) / 2 = 0, 1, 3, 6, 10, ..`
+/// comparison operations.
+fn get_many_check_valid<const N: usize>(indices: &[usize; N], len: usize) -> bool {
+ // NB: The optimzer should inline the loops into a sequence
+ // of instructions without additional branching.
+ let mut valid = true;
+ for (i, &idx) in indices.iter().enumerate() {
+ valid &= idx < len;
+ for &idx2 in &indices[..i] {
+ valid &= idx != idx2;
+ }
+ }
+ valid
+}
+
+/// The error type returned by [`get_many_mut<N>`][`slice::get_many_mut`].
+///
+/// It indicates one of two possible errors:
+/// - An index is out-of-bounds.
+/// - The same index appeared multiple times in the array.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(get_many_mut)]
+///
+/// let v = &mut [1, 2, 3];
+/// assert!(v.get_many_mut([0, 999]).is_err());
+/// assert!(v.get_many_mut([1, 1]).is_err());
+/// ```
+#[unstable(feature = "get_many_mut", issue = "104642")]
+// NB: The N here is there to be forward-compatible with adding more details
+// to the error type at a later point
+pub struct GetManyMutError<const N: usize> {
+ _private: (),
+}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> fmt::Debug for GetManyMutError<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("GetManyMutError").finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "get_many_mut", issue = "104642")]
+impl<const N: usize> fmt::Display for GetManyMutError<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("an index is out of bounds or appeared multiple times in the array", f)
+ }
+}
diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs
index b0c55ca4f..5f8748206 100644
--- a/library/core/src/str/converts.rs
+++ b/library/core/src/str/converts.rs
@@ -77,7 +77,7 @@ use super::Utf8Error;
/// let sparkle_heart = [240, 159, 146, 150];
///
/// // We know these bytes are valid, so just use `unwrap()`.
-/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+/// let sparkle_heart: &str = str::from_utf8(&sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index fbc0fc397..45fd2caae 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -396,7 +396,7 @@ impl str {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub const fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
@@ -411,7 +411,7 @@ impl str {
/// modified in a way that it remains valid UTF-8.
#[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
#[must_use]
- #[inline]
+ #[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self as *mut str as *mut u8
}
@@ -902,6 +902,12 @@ impl str {
///
/// assert_eq!(None, iter.next());
/// ```
+ ///
+ /// If the string is empty or all whitespace, the iterator yields no string slices:
+ /// ```
+ /// assert_eq!("".split_whitespace().next(), None);
+ /// assert_eq!(" ".split_whitespace().next(), None);
+ /// ```
#[must_use = "this returns the split string as an iterator, \
without modifying the original"]
#[stable(feature = "split_whitespace", since = "1.1.0")]
@@ -946,6 +952,12 @@ impl str {
///
/// assert_eq!(None, iter.next());
/// ```
+ ///
+ /// If the string is empty or all ASCII whitespace, the iterator yields no string slices:
+ /// ```
+ /// assert_eq!("".split_ascii_whitespace().next(), None);
+ /// assert_eq!(" ".split_ascii_whitespace().next(), None);
+ /// ```
#[must_use = "this returns the split string as an iterator, \
without modifying the original"]
#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs
index ec2cb429e..19da6d2fb 100644
--- a/library/core/src/str/pattern.rs
+++ b/library/core/src/str/pattern.rs
@@ -39,6 +39,7 @@
)]
use crate::cmp;
+use crate::cmp::Ordering;
use crate::fmt;
use crate::slice::memchr;
@@ -946,6 +947,32 @@ impl<'a, 'b> Pattern<'a> for &'b str {
haystack.as_bytes().starts_with(self.as_bytes())
}
+ /// Checks whether the pattern matches anywhere in the haystack
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ if self.len() == 0 {
+ return true;
+ }
+
+ match self.len().cmp(&haystack.len()) {
+ Ordering::Less => {
+ if self.len() == 1 {
+ return haystack.as_bytes().contains(&self.as_bytes()[0]);
+ }
+
+ #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+ if self.len() <= 32 {
+ if let Some(result) = simd_contains(self, haystack) {
+ return result;
+ }
+ }
+
+ self.into_searcher(haystack).next_match().is_some()
+ }
+ _ => self == haystack,
+ }
+ }
+
/// Removes the pattern from the front of haystack, if it matches.
#[inline]
fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
@@ -1684,3 +1711,210 @@ impl TwoWayStrategy for RejectAndMatch {
SearchStep::Match(a, b)
}
}
+
+/// SIMD search for short needles based on
+/// Wojciech Muła's "SIMD-friendly algorithms for substring searching"[0]
+///
+/// It skips ahead by the vector width on each iteration (rather than the needle length as two-way
+/// does) by probing the first and last byte of the needle for the whole vector width
+/// and only doing full needle comparisons when the vectorized probe indicated potential matches.
+///
+/// Since the x86_64 baseline only offers SSE2 we only use u8x16 here.
+/// If we ever ship std with for x86-64-v3 or adapt this for other platforms then wider vectors
+/// should be evaluated.
+///
+/// For haystacks smaller than vector-size + needle length it falls back to
+/// a naive O(n*m) search so this implementation should not be called on larger needles.
+///
+/// [0]: http://0x80.pl/articles/simd-strfind.html#sse-avx2
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
+#[inline]
+fn simd_contains(needle: &str, haystack: &str) -> Option<bool> {
+ let needle = needle.as_bytes();
+ let haystack = haystack.as_bytes();
+
+ debug_assert!(needle.len() > 1);
+
+ use crate::ops::BitAnd;
+ use crate::simd::mask8x16 as Mask;
+ use crate::simd::u8x16 as Block;
+ use crate::simd::{SimdPartialEq, ToBitMask};
+
+ let first_probe = needle[0];
+ let last_byte_offset = needle.len() - 1;
+
+ // the offset used for the 2nd vector
+ let second_probe_offset = if needle.len() == 2 {
+ // never bail out on len=2 needles because the probes will fully cover them and have
+ // no degenerate cases.
+ 1
+ } else {
+ // try a few bytes in case first and last byte of the needle are the same
+ let Some(second_probe_offset) = (needle.len().saturating_sub(4)..needle.len()).rfind(|&idx| needle[idx] != first_probe) else {
+ // fall back to other search methods if we can't find any different bytes
+ // since we could otherwise hit some degenerate cases
+ return None;
+ };
+ second_probe_offset
+ };
+
+ // do a naive search if the haystack is too small to fit
+ if haystack.len() < Block::LANES + last_byte_offset {
+ return Some(haystack.windows(needle.len()).any(|c| c == needle));
+ }
+
+ let first_probe: Block = Block::splat(first_probe);
+ let second_probe: Block = Block::splat(needle[second_probe_offset]);
+ // first byte are already checked by the outer loop. to verify a match only the
+ // remainder has to be compared.
+ let trimmed_needle = &needle[1..];
+
+ // this #[cold] is load-bearing, benchmark before removing it...
+ let check_mask = #[cold]
+ |idx, mask: u16, skip: bool| -> bool {
+ if skip {
+ return false;
+ }
+
+ // and so is this. optimizations are weird.
+ let mut mask = mask;
+
+ while mask != 0 {
+ let trailing = mask.trailing_zeros();
+ let offset = idx + trailing as usize + 1;
+ // SAFETY: mask is between 0 and 15 trailing zeroes, we skip one additional byte that was already compared
+ // and then take trimmed_needle.len() bytes. This is within the bounds defined by the outer loop
+ unsafe {
+ let sub = haystack.get_unchecked(offset..).get_unchecked(..trimmed_needle.len());
+ if small_slice_eq(sub, trimmed_needle) {
+ return true;
+ }
+ }
+ mask &= !(1 << trailing);
+ }
+ return false;
+ };
+
+ let test_chunk = |idx| -> u16 {
+ // SAFETY: this requires at least LANES bytes being readable at idx
+ // that is ensured by the loop ranges (see comments below)
+ let a: Block = unsafe { haystack.as_ptr().add(idx).cast::<Block>().read_unaligned() };
+ // SAFETY: this requires LANES + block_offset bytes being readable at idx
+ let b: Block = unsafe {
+ haystack.as_ptr().add(idx).add(second_probe_offset).cast::<Block>().read_unaligned()
+ };
+ let eq_first: Mask = a.simd_eq(first_probe);
+ let eq_last: Mask = b.simd_eq(second_probe);
+ let both = eq_first.bitand(eq_last);
+ let mask = both.to_bitmask();
+
+ return mask;
+ };
+
+ let mut i = 0;
+ let mut result = false;
+ // The loop condition must ensure that there's enough headroom to read LANE bytes,
+ // and not only at the current index but also at the index shifted by block_offset
+ const UNROLL: usize = 4;
+ while i + last_byte_offset + UNROLL * Block::LANES < haystack.len() && !result {
+ let mut masks = [0u16; UNROLL];
+ for j in 0..UNROLL {
+ masks[j] = test_chunk(i + j * Block::LANES);
+ }
+ for j in 0..UNROLL {
+ let mask = masks[j];
+ if mask != 0 {
+ result |= check_mask(i + j * Block::LANES, mask, result);
+ }
+ }
+ i += UNROLL * Block::LANES;
+ }
+ while i + last_byte_offset + Block::LANES < haystack.len() && !result {
+ let mask = test_chunk(i);
+ if mask != 0 {
+ result |= check_mask(i, mask, result);
+ }
+ i += Block::LANES;
+ }
+
+ // Process the tail that didn't fit into LANES-sized steps.
+ // This simply repeats the same procedure but as right-aligned chunk instead
+ // of a left-aligned one. The last byte must be exactly flush with the string end so
+ // we don't miss a single byte or read out of bounds.
+ let i = haystack.len() - last_byte_offset - Block::LANES;
+ let mask = test_chunk(i);
+ if mask != 0 {
+ result |= check_mask(i, mask, result);
+ }
+
+ Some(result)
+}
+
+/// Compares short slices for equality.
+///
+/// It avoids a call to libc's memcmp which is faster on long slices
+/// due to SIMD optimizations but it incurs a function call overhead.
+///
+/// # Safety
+///
+/// Both slices must have the same length.
+#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))] // only called on x86
+#[inline]
+unsafe fn small_slice_eq(x: &[u8], y: &[u8]) -> bool {
+ debug_assert_eq!(x.len(), y.len());
+ // This function is adapted from
+ // https://github.com/BurntSushi/memchr/blob/8037d11b4357b0f07be2bb66dc2659d9cf28ad32/src/memmem/util.rs#L32
+
+ // If we don't have enough bytes to do 4-byte at a time loads, then
+ // fall back to the naive slow version.
+ //
+ // Potential alternative: We could do a copy_nonoverlapping combined with a mask instead
+ // of a loop. Benchmark it.
+ if x.len() < 4 {
+ for (&b1, &b2) in x.iter().zip(y) {
+ if b1 != b2 {
+ return false;
+ }
+ }
+ return true;
+ }
+ // When we have 4 or more bytes to compare, then proceed in chunks of 4 at
+ // a time using unaligned loads.
+ //
+ // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is
+ // that this particular version of memcmp is likely to be called with tiny
+ // needles. That means that if we do 8 byte loads, then a higher proportion
+ // of memcmp calls will use the slower variant above. With that said, this
+ // is a hypothesis and is only loosely supported by benchmarks. There's
+ // likely some improvement that could be made here. The main thing here
+ // though is to optimize for latency, not throughput.
+
+ // SAFETY: Via the conditional above, we know that both `px` and `py`
+ // have the same length, so `px < pxend` implies that `py < pyend`.
+ // Thus, derefencing both `px` and `py` in the loop below is safe.
+ //
+ // Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
+ // end of `px` and `py`. Thus, the final dereference outside of the
+ // loop is guaranteed to be valid. (The final comparison will overlap with
+ // the last comparison done in the loop for lengths that aren't multiples
+ // of four.)
+ //
+ // Finally, we needn't worry about alignment here, since we do unaligned
+ // loads.
+ unsafe {
+ let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
+ let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
+ while px < pxend {
+ let vx = (px as *const u32).read_unaligned();
+ let vy = (py as *const u32).read_unaligned();
+ if vx != vy {
+ return false;
+ }
+ px = px.add(4);
+ py = py.add(4);
+ }
+ let vx = (pxend as *const u32).read_unaligned();
+ let vy = (pyend as *const u32).read_unaligned();
+ vx == vy
+ }
+}
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index 41f0a25db..f1dc4f7b5 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -9,6 +9,7 @@ use crate::task::Ready;
/// scheduled to receive a wakeup instead.
#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[cfg_attr(not(bootstrap), lang = "Poll")]
#[stable(feature = "futures_api", since = "1.36.0")]
pub enum Poll<T> {
/// Represents that a value is immediately ready.
diff --git a/library/core/src/tuple.rs b/library/core/src/tuple.rs
index fc91fe468..28275798f 100644
--- a/library/core/src/tuple.rs
+++ b/library/core/src/tuple.rs
@@ -22,7 +22,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:PartialEq),+> PartialEq for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const PartialEq),+> const PartialEq for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
@@ -40,7 +41,7 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:Eq),+> Eq for ($($T,)+)
+ impl<$($T: Eq),+> Eq for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{}
@@ -49,7 +50,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const PartialOrd + ~const PartialEq),+> const PartialOrd for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
@@ -79,7 +81,8 @@ macro_rules! tuple_impls {
maybe_tuple_doc! {
$($T)+ @
#[stable(feature = "rust1", since = "1.0.0")]
- impl<$($T:Ord),+> Ord for ($($T,)+)
+ #[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
+ impl<$($T: ~const Ord),+> const Ord for ($($T,)+)
where
last_type!($($T,)+): ?Sized
{
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index 9538b8139..e98dac8d1 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -131,6 +131,24 @@ fn distinct_type_names() {
assert_ne!(type_name_of_val(Velocity), type_name_of_val(Velocity(0.0, -9.8)),);
}
+#[cfg(not(bootstrap))]
+#[test]
+fn dyn_type_name() {
+ trait Foo {
+ type Bar;
+ }
+
+ assert_eq!(
+ "dyn core::ops::function::Fn(i32, i32) -> i32",
+ std::any::type_name::<dyn Fn(i32, i32) -> i32>()
+ );
+ assert_eq!(
+ "dyn coretests::any::dyn_type_name::Foo<Bar = i32> \
+ + core::marker::Send + core::marker::Sync",
+ std::any::type_name::<dyn Foo<Bar = i32> + Send + Sync>()
+ );
+}
+
// Test the `Provider` API.
struct SomeConcreteType {
diff --git a/library/core/tests/fmt/float.rs b/library/core/tests/fmt/float.rs
index 47a7400f7..003782f34 100644
--- a/library/core/tests/fmt/float.rs
+++ b/library/core/tests/fmt/float.rs
@@ -5,7 +5,7 @@ fn test_format_f64() {
assert_eq!("10", format!("{:.0}", 9.9f64));
assert_eq!("9.8", format!("{:.1}", 9.849f64));
assert_eq!("9.9", format!("{:.1}", 9.851f64));
- assert_eq!("1", format!("{:.0}", 0.5f64));
+ assert_eq!("0", format!("{:.0}", 0.5f64));
assert_eq!("1.23456789e6", format!("{:e}", 1234567.89f64));
assert_eq!("1.23456789e3", format!("{:e}", 1234.56789f64));
assert_eq!("1.23456789E6", format!("{:E}", 1234567.89f64));
@@ -25,13 +25,73 @@ fn test_format_f64() {
}
#[test]
+fn test_format_f64_rounds_ties_to_even() {
+ assert_eq!("0", format!("{:.0}", 0.5f64));
+ assert_eq!("2", format!("{:.0}", 1.5f64));
+ assert_eq!("2", format!("{:.0}", 2.5f64));
+ assert_eq!("4", format!("{:.0}", 3.5f64));
+ assert_eq!("4", format!("{:.0}", 4.5f64));
+ assert_eq!("6", format!("{:.0}", 5.5f64));
+ assert_eq!("128", format!("{:.0}", 127.5f64));
+ assert_eq!("128", format!("{:.0}", 128.5f64));
+ assert_eq!("0.2", format!("{:.1}", 0.25f64));
+ assert_eq!("0.8", format!("{:.1}", 0.75f64));
+ assert_eq!("0.12", format!("{:.2}", 0.125f64));
+ assert_eq!("0.88", format!("{:.2}", 0.875f64));
+ assert_eq!("0.062", format!("{:.3}", 0.062f64));
+ assert_eq!("-0", format!("{:.0}", -0.5f64));
+ assert_eq!("-2", format!("{:.0}", -1.5f64));
+ assert_eq!("-2", format!("{:.0}", -2.5f64));
+ assert_eq!("-4", format!("{:.0}", -3.5f64));
+ assert_eq!("-4", format!("{:.0}", -4.5f64));
+ assert_eq!("-6", format!("{:.0}", -5.5f64));
+ assert_eq!("-128", format!("{:.0}", -127.5f64));
+ assert_eq!("-128", format!("{:.0}", -128.5f64));
+ assert_eq!("-0.2", format!("{:.1}", -0.25f64));
+ assert_eq!("-0.8", format!("{:.1}", -0.75f64));
+ assert_eq!("-0.12", format!("{:.2}", -0.125f64));
+ assert_eq!("-0.88", format!("{:.2}", -0.875f64));
+ assert_eq!("-0.062", format!("{:.3}", -0.062f64));
+
+ assert_eq!("2e0", format!("{:.0e}", 1.5f64));
+ assert_eq!("2e0", format!("{:.0e}", 2.5f64));
+ assert_eq!("4e0", format!("{:.0e}", 3.5f64));
+ assert_eq!("4e0", format!("{:.0e}", 4.5f64));
+ assert_eq!("6e0", format!("{:.0e}", 5.5f64));
+ assert_eq!("1.28e2", format!("{:.2e}", 127.5f64));
+ assert_eq!("1.28e2", format!("{:.2e}", 128.5f64));
+ assert_eq!("-2e0", format!("{:.0e}", -1.5f64));
+ assert_eq!("-2e0", format!("{:.0e}", -2.5f64));
+ assert_eq!("-4e0", format!("{:.0e}", -3.5f64));
+ assert_eq!("-4e0", format!("{:.0e}", -4.5f64));
+ assert_eq!("-6e0", format!("{:.0e}", -5.5f64));
+ assert_eq!("-1.28e2", format!("{:.2e}", -127.5f64));
+ assert_eq!("-1.28e2", format!("{:.2e}", -128.5f64));
+
+ assert_eq!("2E0", format!("{:.0E}", 1.5f64));
+ assert_eq!("2E0", format!("{:.0E}", 2.5f64));
+ assert_eq!("4E0", format!("{:.0E}", 3.5f64));
+ assert_eq!("4E0", format!("{:.0E}", 4.5f64));
+ assert_eq!("6E0", format!("{:.0E}", 5.5f64));
+ assert_eq!("1.28E2", format!("{:.2E}", 127.5f64));
+ assert_eq!("1.28E2", format!("{:.2E}", 128.5f64));
+ assert_eq!("-2E0", format!("{:.0E}", -1.5f64));
+ assert_eq!("-2E0", format!("{:.0E}", -2.5f64));
+ assert_eq!("-4E0", format!("{:.0E}", -3.5f64));
+ assert_eq!("-4E0", format!("{:.0E}", -4.5f64));
+ assert_eq!("-6E0", format!("{:.0E}", -5.5f64));
+ assert_eq!("-1.28E2", format!("{:.2E}", -127.5f64));
+ assert_eq!("-1.28E2", format!("{:.2E}", -128.5f64));
+}
+
+#[test]
fn test_format_f32() {
assert_eq!("1", format!("{:.0}", 1.0f32));
assert_eq!("9", format!("{:.0}", 9.4f32));
assert_eq!("10", format!("{:.0}", 9.9f32));
assert_eq!("9.8", format!("{:.1}", 9.849f32));
assert_eq!("9.9", format!("{:.1}", 9.851f32));
- assert_eq!("1", format!("{:.0}", 0.5f32));
+ assert_eq!("0", format!("{:.0}", 0.5f32));
assert_eq!("1.2345679e6", format!("{:e}", 1234567.89f32));
assert_eq!("1.2345679e3", format!("{:e}", 1234.56789f32));
assert_eq!("1.2345679E6", format!("{:E}", 1234567.89f32));
@@ -50,6 +110,66 @@ fn test_format_f32() {
assert_eq!("1234.6", format!("{:.1?}", 1234.56789f32));
}
+#[test]
+fn test_format_f32_rounds_ties_to_even() {
+ assert_eq!("0", format!("{:.0}", 0.5f32));
+ assert_eq!("2", format!("{:.0}", 1.5f32));
+ assert_eq!("2", format!("{:.0}", 2.5f32));
+ assert_eq!("4", format!("{:.0}", 3.5f32));
+ assert_eq!("4", format!("{:.0}", 4.5f32));
+ assert_eq!("6", format!("{:.0}", 5.5f32));
+ assert_eq!("128", format!("{:.0}", 127.5f32));
+ assert_eq!("128", format!("{:.0}", 128.5f32));
+ assert_eq!("0.2", format!("{:.1}", 0.25f32));
+ assert_eq!("0.8", format!("{:.1}", 0.75f32));
+ assert_eq!("0.12", format!("{:.2}", 0.125f32));
+ assert_eq!("0.88", format!("{:.2}", 0.875f32));
+ assert_eq!("0.062", format!("{:.3}", 0.062f32));
+ assert_eq!("-0", format!("{:.0}", -0.5f32));
+ assert_eq!("-2", format!("{:.0}", -1.5f32));
+ assert_eq!("-2", format!("{:.0}", -2.5f32));
+ assert_eq!("-4", format!("{:.0}", -3.5f32));
+ assert_eq!("-4", format!("{:.0}", -4.5f32));
+ assert_eq!("-6", format!("{:.0}", -5.5f32));
+ assert_eq!("-128", format!("{:.0}", -127.5f32));
+ assert_eq!("-128", format!("{:.0}", -128.5f32));
+ assert_eq!("-0.2", format!("{:.1}", -0.25f32));
+ assert_eq!("-0.8", format!("{:.1}", -0.75f32));
+ assert_eq!("-0.12", format!("{:.2}", -0.125f32));
+ assert_eq!("-0.88", format!("{:.2}", -0.875f32));
+ assert_eq!("-0.062", format!("{:.3}", -0.062f32));
+
+ assert_eq!("2e0", format!("{:.0e}", 1.5f32));
+ assert_eq!("2e0", format!("{:.0e}", 2.5f32));
+ assert_eq!("4e0", format!("{:.0e}", 3.5f32));
+ assert_eq!("4e0", format!("{:.0e}", 4.5f32));
+ assert_eq!("6e0", format!("{:.0e}", 5.5f32));
+ assert_eq!("1.28e2", format!("{:.2e}", 127.5f32));
+ assert_eq!("1.28e2", format!("{:.2e}", 128.5f32));
+ assert_eq!("-2e0", format!("{:.0e}", -1.5f32));
+ assert_eq!("-2e0", format!("{:.0e}", -2.5f32));
+ assert_eq!("-4e0", format!("{:.0e}", -3.5f32));
+ assert_eq!("-4e0", format!("{:.0e}", -4.5f32));
+ assert_eq!("-6e0", format!("{:.0e}", -5.5f32));
+ assert_eq!("-1.28e2", format!("{:.2e}", -127.5f32));
+ assert_eq!("-1.28e2", format!("{:.2e}", -128.5f32));
+
+ assert_eq!("2E0", format!("{:.0E}", 1.5f32));
+ assert_eq!("2E0", format!("{:.0E}", 2.5f32));
+ assert_eq!("4E0", format!("{:.0E}", 3.5f32));
+ assert_eq!("4E0", format!("{:.0E}", 4.5f32));
+ assert_eq!("6E0", format!("{:.0E}", 5.5f32));
+ assert_eq!("1.28E2", format!("{:.2E}", 127.5f32));
+ assert_eq!("1.28E2", format!("{:.2E}", 128.5f32));
+ assert_eq!("-2E0", format!("{:.0E}", -1.5f32));
+ assert_eq!("-2E0", format!("{:.0E}", -2.5f32));
+ assert_eq!("-4E0", format!("{:.0E}", -3.5f32));
+ assert_eq!("-4E0", format!("{:.0E}", -4.5f32));
+ assert_eq!("-6E0", format!("{:.0E}", -5.5f32));
+ assert_eq!("-1.28E2", format!("{:.2E}", -127.5f32));
+ assert_eq!("-1.28E2", format!("{:.2E}", -128.5f32));
+}
+
fn is_exponential(s: &str) -> bool {
s.contains("e") || s.contains("E")
}
diff --git a/library/core/tests/hash/mod.rs b/library/core/tests/hash/mod.rs
index f7934d062..267245f05 100644
--- a/library/core/tests/hash/mod.rs
+++ b/library/core/tests/hash/mod.rs
@@ -9,16 +9,19 @@ struct MyHasher {
hash: u64,
}
-impl Default for MyHasher {
+impl const Default for MyHasher {
fn default() -> MyHasher {
MyHasher { hash: 0 }
}
}
-impl Hasher for MyHasher {
+impl const Hasher for MyHasher {
fn write(&mut self, buf: &[u8]) {
- for byte in buf {
- self.hash += *byte as u64;
+ // FIXME(const_trait_impl): change to for loop
+ let mut i = 0;
+ while i < buf.len() {
+ self.hash += buf[i] as u64;
+ i += 1;
}
}
fn write_str(&mut self, s: &str) {
@@ -32,12 +35,25 @@ impl Hasher for MyHasher {
#[test]
fn test_writer_hasher() {
- fn hash<T: Hash>(t: &T) -> u64 {
+ const fn hash<T: ~const Hash>(t: &T) -> u64 {
let mut s = MyHasher { hash: 0 };
t.hash(&mut s);
s.finish()
}
+ const {
+ // FIXME(fee1-dead): assert_eq
+ assert!(hash(&()) == 0);
+ assert!(hash(&5_u8) == 5);
+ assert!(hash(&5_u16) == 5);
+ assert!(hash(&5_u32) == 5);
+
+ assert!(hash(&'a') == 97);
+
+ let s: &str = "a";
+ assert!(hash(&s) == 97 + 0xFF);
+ };
+
assert_eq!(hash(&()), 0);
assert_eq!(hash(&5_u8), 5);
@@ -97,7 +113,7 @@ struct CustomHasher {
output: u64,
}
-impl Hasher for CustomHasher {
+impl const Hasher for CustomHasher {
fn finish(&self) -> u64 {
self.output
}
@@ -109,27 +125,29 @@ impl Hasher for CustomHasher {
}
}
-impl Default for CustomHasher {
+impl const Default for CustomHasher {
fn default() -> CustomHasher {
CustomHasher { output: 0 }
}
}
-impl Hash for Custom {
- fn hash<H: Hasher>(&self, state: &mut H) {
+impl const Hash for Custom {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
state.write_u64(self.hash);
}
}
#[test]
fn test_custom_state() {
- fn hash<T: Hash>(t: &T) -> u64 {
+ const fn hash<T: ~const Hash>(t: &T) -> u64 {
let mut c = CustomHasher { output: 0 };
t.hash(&mut c);
c.finish()
}
assert_eq!(hash(&Custom { hash: 5 }), 5);
+
+ const { assert!(hash(&Custom { hash: 6 }) == 6) };
}
// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
diff --git a/library/core/tests/hash/sip.rs b/library/core/tests/hash/sip.rs
index 877d08418..3abf6efcf 100644
--- a/library/core/tests/hash/sip.rs
+++ b/library/core/tests/hash/sip.rs
@@ -8,7 +8,6 @@ use core::{mem, slice};
struct Bytes<'a>(&'a [u8]);
impl<'a> Hash for Bytes<'a> {
- #[allow(unused_must_use)]
fn hash<H: Hasher>(&self, state: &mut H) {
let Bytes(v) = *self;
state.write(v);
@@ -25,6 +24,20 @@ fn hash<T: Hash>(x: &T) -> u64 {
}
#[test]
+const fn test_const_sip() {
+ let val1 = 0x45;
+ let val2 = 0xfeed;
+
+ const fn const_hash<T: ~const Hash>(x: &T) -> u64 {
+ let mut st = SipHasher::new();
+ x.hash(&mut st);
+ st.finish()
+ }
+
+ assert!(const_hash(&(val1)) != const_hash(&(val2)));
+}
+
+#[test]
#[allow(unused_must_use)]
fn test_siphash_1_3() {
let vecs: [[u8; 8]; 64] = [
diff --git a/library/core/tests/iter/adapters/array_chunks.rs b/library/core/tests/iter/adapters/array_chunks.rs
index 4e9d89e1e..ef4a7e53b 100644
--- a/library/core/tests/iter/adapters/array_chunks.rs
+++ b/library/core/tests/iter/adapters/array_chunks.rs
@@ -139,7 +139,8 @@ fn test_iterator_array_chunks_fold() {
let result =
(0..10).map(|_| CountDrop::new(&count)).array_chunks::<3>().fold(0, |acc, _item| acc + 1);
assert_eq!(result, 3);
- assert_eq!(count.get(), 10);
+ // fold impls may or may not process the remainder
+ assert!(count.get() <= 10 && count.get() >= 9);
}
#[test]
diff --git a/library/core/tests/iter/adapters/take.rs b/library/core/tests/iter/adapters/take.rs
index bfb659f0a..3e26b43a2 100644
--- a/library/core/tests/iter/adapters/take.rs
+++ b/library/core/tests/iter/adapters/take.rs
@@ -146,3 +146,23 @@ fn test_take_try_folds() {
assert_eq!(iter.try_for_each(Err), Err(2));
assert_eq!(iter.try_for_each(Err), Ok(()));
}
+
+#[test]
+fn test_byref_take_consumed_items() {
+ let mut inner = 10..90;
+
+ let mut count = 0;
+ inner.by_ref().take(0).for_each(|_| count += 1);
+ assert_eq!(count, 0);
+ assert_eq!(inner, 10..90);
+
+ let mut count = 0;
+ inner.by_ref().take(10).for_each(|_| count += 1);
+ assert_eq!(count, 10);
+ assert_eq!(inner, 20..90);
+
+ let mut count = 0;
+ inner.by_ref().take(100).for_each(|_| count += 1);
+ assert_eq!(count, 70);
+ assert_eq!(inner, 90..90);
+}
diff --git a/library/core/tests/iter/sources.rs b/library/core/tests/iter/sources.rs
index d0114ade6..a15f3a514 100644
--- a/library/core/tests/iter/sources.rs
+++ b/library/core/tests/iter/sources.rs
@@ -106,3 +106,52 @@ fn test_empty() {
let mut it = empty::<i32>();
assert_eq!(it.next(), None);
}
+
+#[test]
+fn test_repeat_n_drop() {
+ #[derive(Clone, Debug)]
+ struct DropCounter<'a>(&'a Cell<usize>);
+ impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1);
+ }
+ }
+
+ // `repeat_n(x, 0)` drops `x` immediately
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let mut it = repeat_n(item, 0);
+ assert_eq!(count.get(), 1);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 1);
+ drop(it);
+ assert_eq!(count.get(), 1);
+
+ // Dropping the iterator needs to drop the item if it's non-empty
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let it = repeat_n(item, 3);
+ assert_eq!(count.get(), 0);
+ drop(it);
+ assert_eq!(count.get(), 1);
+
+ // Dropping the iterator doesn't drop the item if it was exhausted
+ let count = Cell::new(0);
+ let item = DropCounter(&count);
+ let mut it = repeat_n(item, 3);
+ assert_eq!(count.get(), 0);
+ let x0 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ let x1 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ let x2 = it.next().unwrap();
+ assert_eq!(count.get(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(count.get(), 0);
+ drop(it);
+ assert_eq!(count.get(), 0);
+ drop((x0, x1, x2));
+ assert_eq!(count.get(), 3);
+}
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 51f858ade..99d4a40c4 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -4,18 +4,22 @@
#![feature(array_windows)]
#![feature(bigint_helper_methods)]
#![feature(cell_update)]
+#![feature(const_align_offset)]
#![feature(const_assume)]
+#![feature(const_align_of_val_raw)]
#![feature(const_black_box)]
#![feature(const_bool_to_option)]
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_convert)]
+#![feature(const_hash)]
#![feature(const_heap)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_assume_init_read)]
#![feature(const_nonnull_new)]
#![feature(const_num_from_num)]
#![feature(const_pointer_byte_offsets)]
+#![feature(const_pointer_is_aligned)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
@@ -42,6 +46,7 @@
#![feature(try_find)]
#![feature(inline_const)]
#![feature(is_sorted)]
+#![feature(layout_for_ptr)]
#![feature(pattern)]
#![feature(pin_macro)]
#![feature(sort_internals)]
@@ -62,7 +67,6 @@
#![feature(try_trait_v2)]
#![feature(slice_internals)]
#![feature(slice_partition_dedup)]
-#![feature(int_log)]
#![feature(iter_advance_by)]
#![feature(iter_array_chunks)]
#![feature(iter_collect_into)]
@@ -71,6 +75,7 @@
#![feature(iter_is_partitioned)]
#![feature(iter_next_chunk)]
#![feature(iter_order_by)]
+#![feature(iter_repeat_n)]
#![feature(iterator_try_collect)]
#![feature(iterator_try_reduce)]
#![feature(const_mut_refs)]
@@ -79,6 +84,7 @@
#![feature(never_type)]
#![feature(unwrap_infallible)]
#![feature(pointer_byte_offsets)]
+#![feature(pointer_is_aligned)]
#![feature(portable_simd)]
#![feature(ptr_metadata)]
#![feature(once_cell)]
@@ -102,7 +108,9 @@
#![feature(provide_any)]
#![feature(utf8_chunks)]
#![feature(is_ascii_octdigit)]
+#![feature(get_many_mut)]
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
extern crate test;
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 0362e1c8a..1cfb4fd9f 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -1,4 +1,5 @@
use core::mem::*;
+use core::ptr;
#[cfg(panic = "unwind")]
use std::rc::Rc;
@@ -76,6 +77,25 @@ fn align_of_val_basic() {
}
#[test]
+#[cfg(not(bootstrap))] // stage 0 doesn't have the fix yet, so the test fails
+fn align_of_val_raw_packed() {
+ #[repr(C, packed)]
+ struct B {
+ f: [u32],
+ }
+ let storage = [0u8; 4];
+ let b: *const B = ptr::from_raw_parts(storage.as_ptr().cast(), 1);
+ assert_eq!(unsafe { align_of_val_raw(b) }, 1);
+
+ const ALIGN_OF_VAL_RAW: usize = {
+ let storage = [0u8; 4];
+ let b: *const B = ptr::from_raw_parts(storage.as_ptr().cast(), 1);
+ unsafe { align_of_val_raw(b) }
+ };
+ assert_eq!(ALIGN_OF_VAL_RAW, 1);
+}
+
+#[test]
fn test_swap() {
let mut x = 31337;
let mut y = 42;
diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
index 798473bbd..30843cc3d 100644
--- a/library/core/tests/num/flt2dec/mod.rs
+++ b/library/core/tests/num/flt2dec/mod.rs
@@ -138,7 +138,7 @@ where
// check exact rounding for zero- and negative-width cases
let start;
- if expected[0] >= b'5' {
+ if expected[0] > b'5' {
try_fixed!(f(&decoded) => &mut buf, expectedk, b"1", expectedk + 1;
"zero-width rounding-up mismatch for v={v}: \
actual {actual:?}, expected {expected:?}",
@@ -1007,7 +1007,7 @@ where
assert_eq!(to_string(f, 999.5, Minus, 3), "999.500");
assert_eq!(to_string(f, 999.5, Minus, 30), "999.500000000000000000000000000000");
- assert_eq!(to_string(f, 0.5, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.5, Minus, 0), "0");
assert_eq!(to_string(f, 0.5, Minus, 1), "0.5");
assert_eq!(to_string(f, 0.5, Minus, 2), "0.50");
assert_eq!(to_string(f, 0.5, Minus, 3), "0.500");
diff --git a/library/core/tests/option.rs b/library/core/tests/option.rs
index f36f7c268..dca6321cf 100644
--- a/library/core/tests/option.rs
+++ b/library/core/tests/option.rs
@@ -57,7 +57,7 @@ fn test_get_resource() {
}
#[test]
-#[cfg_attr(not(bootstrap), allow(for_loops_over_fallibles))]
+#[allow(for_loops_over_fallibles)]
fn test_option_dance() {
let x = Some(());
let mut y = Some(5);
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index 97a369810..90bc83510 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -359,6 +359,23 @@ fn align_offset_zst() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_zst_const() {
+ const {
+ // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
+ // all, because no amount of elements will align the pointer.
+ let mut p = 1;
+ while p < 1024 {
+ assert!(ptr::invalid::<()>(p).align_offset(p) == 0);
+ if p != 1 {
+ assert!(ptr::invalid::<()>(p + 1).align_offset(p) == !0);
+ }
+ p = (p + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
fn align_offset_stride_one() {
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
// number of bytes.
@@ -380,6 +397,26 @@ fn align_offset_stride_one() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_stride_one_const() {
+ const {
+ // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
+ // number of bytes.
+ let mut align = 1;
+ while align < 1024 {
+ let mut ptr = 1;
+ while ptr < 2 * align {
+ let expected = ptr % align;
+ let offset = if expected == 0 { 0 } else { align - expected };
+ assert!(ptr::invalid::<u8>(ptr).align_offset(align) == offset);
+ ptr += 1;
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
fn align_offset_various_strides() {
unsafe fn test_stride<T>(ptr: *const T, align: usize) -> bool {
let numptr = ptr as usize;
@@ -456,6 +493,260 @@ fn align_offset_various_strides() {
}
#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_various_strides_const() {
+ const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
+ let mut expected = usize::MAX;
+ // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
+ let mut el = 0;
+ while el < align {
+ if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
+ expected = el;
+ break;
+ }
+ el += 1;
+ }
+ let got = ptr.align_offset(align);
+ assert!(got == expected);
+ }
+
+ const {
+ // For pointers of stride != 1, we verify the algorithm against the naivest possible
+ // implementation
+ let mut align = 1;
+ let limit = 32;
+ while align < limit {
+ let mut ptr = 1;
+ while ptr < 4 * align {
+ unsafe {
+ #[repr(packed)]
+ struct A3(u16, u8);
+ test_stride::<A3>(ptr::invalid::<A3>(ptr), ptr, align);
+
+ struct A4(u32);
+ test_stride::<A4>(ptr::invalid::<A4>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A5(u32, u8);
+ test_stride::<A5>(ptr::invalid::<A5>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A6(u32, u16);
+ test_stride::<A6>(ptr::invalid::<A6>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A7(u32, u16, u8);
+ test_stride::<A7>(ptr::invalid::<A7>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A8(u32, u32);
+ test_stride::<A8>(ptr::invalid::<A8>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A9(u32, u32, u8);
+ test_stride::<A9>(ptr::invalid::<A9>(ptr), ptr, align);
+
+ #[repr(packed)]
+ struct A10(u32, u32, u16);
+ test_stride::<A10>(ptr::invalid::<A10>(ptr), ptr, align);
+
+ test_stride::<u32>(ptr::invalid::<u32>(ptr), ptr, align);
+ test_stride::<u128>(ptr::invalid::<u128>(ptr), ptr, align);
+ }
+ ptr += 1;
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ }
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_with_provenance_const() {
+ const {
+ // On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
+ #[repr(align(4))]
+ struct AlignedI32(i32);
+
+ let data = AlignedI32(42);
+
+ // `stride % align == 0` (usual case)
+
+ let ptr: *const i32 = &data.0;
+ assert!(ptr.align_offset(1) == 0);
+ assert!(ptr.align_offset(2) == 0);
+ assert!(ptr.align_offset(4) == 0);
+ assert!(ptr.align_offset(8) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX);
+ assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX);
+
+ assert!(ptr.wrapping_add(42).align_offset(4) == 0);
+ assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX);
+
+ let ptr1: *const i8 = ptr.cast();
+ assert!(ptr1.align_offset(1) == 0);
+ assert!(ptr1.align_offset(2) == 0);
+ assert!(ptr1.align_offset(4) == 0);
+ assert!(ptr1.align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3);
+ assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1);
+ assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+
+ let ptr2: *const i16 = ptr.cast();
+ assert!(ptr2.align_offset(1) == 0);
+ assert!(ptr2.align_offset(2) == 0);
+ assert!(ptr2.align_offset(4) == 0);
+ assert!(ptr2.align_offset(8) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1);
+ assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX);
+
+ let ptr3: *const i64 = ptr.cast();
+ assert!(ptr3.align_offset(1) == 0);
+ assert!(ptr3.align_offset(2) == 0);
+ assert!(ptr3.align_offset(4) == 0);
+ assert!(ptr3.align_offset(8) == usize::MAX);
+ assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX);
+
+ // `stride % align != 0` (edge case)
+
+ let ptr4: *const [u8; 3] = ptr.cast();
+ assert!(ptr4.align_offset(1) == 0);
+ assert!(ptr4.align_offset(2) == 0);
+ assert!(ptr4.align_offset(4) == 0);
+ assert!(ptr4.align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(4) == 1);
+ assert!(ptr4.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr4.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(4) == 3);
+ assert!(ptr4.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+
+ let ptr5: *const [u8; 5] = ptr.cast();
+ assert!(ptr5.align_offset(1) == 0);
+ assert!(ptr5.align_offset(2) == 0);
+ assert!(ptr5.align_offset(4) == 0);
+ assert!(ptr5.align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(2) == 1);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(4) == 3);
+ assert!(ptr5.wrapping_byte_add(1).align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(2) == 0);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(4) == 2);
+ assert!(ptr5.wrapping_byte_add(2).align_offset(8) == usize::MAX);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(1) == 0);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(2) == 1);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(4) == 1);
+ assert!(ptr5.wrapping_byte_add(3).align_offset(8) == usize::MAX);
+ }
+}
+
+#[test]
+fn align_offset_issue_103361() {
+ #[cfg(target_pointer_width = "64")]
+ const SIZE: usize = 1 << 47;
+ #[cfg(target_pointer_width = "32")]
+ const SIZE: usize = 1 << 30;
+ #[cfg(target_pointer_width = "16")]
+ const SIZE: usize = 1 << 13;
+ struct HugeSize([u8; SIZE - 1]);
+ let _ = ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE);
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn align_offset_issue_103361_const() {
+ #[cfg(target_pointer_width = "64")]
+ const SIZE: usize = 1 << 47;
+ #[cfg(target_pointer_width = "32")]
+ const SIZE: usize = 1 << 30;
+ #[cfg(target_pointer_width = "16")]
+ const SIZE: usize = 1 << 13;
+ struct HugeSize([u8; SIZE - 1]);
+
+ const {
+ assert!(ptr::invalid::<HugeSize>(SIZE - 1).align_offset(SIZE) == SIZE - 1);
+ assert!(ptr::invalid::<HugeSize>(SIZE).align_offset(SIZE) == 0);
+ assert!(ptr::invalid::<HugeSize>(SIZE + 1).align_offset(SIZE) == 1);
+ }
+}
+
+#[test]
+fn is_aligned() {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ assert!(ptr.is_aligned());
+ assert!(ptr.is_aligned_to(1));
+ assert!(ptr.is_aligned_to(2));
+ assert!(ptr.is_aligned_to(4));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+
+ // At runtime either `ptr` or `ptr+1` is aligned to 8.
+ assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8));
+}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn is_aligned_const() {
+ const {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ assert!(ptr.is_aligned());
+ assert!(ptr.is_aligned_to(1));
+ assert!(ptr.is_aligned_to(2));
+ assert!(ptr.is_aligned_to(4));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(1));
+ assert!(ptr.wrapping_byte_add(2).is_aligned_to(2));
+ assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4));
+
+ // At comptime neither `ptr` nor `ptr+1` is aligned to 8.
+ assert!(!ptr.is_aligned_to(8));
+ assert!(!ptr.wrapping_add(1).is_aligned_to(8));
+ }
+}
+
+#[test]
+#[cfg(bootstrap)]
+fn is_aligned_const() {
+ const {
+ let data = 42;
+ let ptr: *const i32 = &data;
+ // The bootstrap compiler always returns false for is_aligned.
+ assert!(!ptr.is_aligned());
+ assert!(!ptr.is_aligned_to(1));
+ }
+}
+
+#[test]
fn offset_from() {
let mut a = [0; 5];
let ptr1: *mut i32 = &mut a[1];
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 9e1fbea79..4e06e0f43 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -2595,3 +2595,63 @@ fn test_flatten_mut_size_overflow() {
let x = &mut [[(); usize::MAX]; 2][..];
let _ = x.flatten_mut();
}
+
+#[test]
+fn test_get_many_mut_normal_2() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a, b] = v.get_many_mut([3, 0]).unwrap();
+ *a += 10;
+ *b += 100;
+ assert_eq!(v, vec![101, 2, 3, 14, 5]);
+}
+
+#[test]
+fn test_get_many_mut_normal_3() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a, b, c] = v.get_many_mut([0, 4, 2]).unwrap();
+ *a += 10;
+ *b += 100;
+ *c += 1000;
+ assert_eq!(v, vec![11, 2, 1003, 4, 105]);
+}
+
+#[test]
+fn test_get_many_mut_empty() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [] = v.get_many_mut([]).unwrap();
+ assert_eq!(v, vec![1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_get_many_mut_single_first() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a] = v.get_many_mut([0]).unwrap();
+ *a += 10;
+ assert_eq!(v, vec![11, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_get_many_mut_single_last() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let [a] = v.get_many_mut([4]).unwrap();
+ *a += 10;
+ assert_eq!(v, vec![1, 2, 3, 4, 15]);
+}
+
+#[test]
+fn test_get_many_mut_oob_nonempty() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ assert!(v.get_many_mut([5]).is_err());
+}
+
+#[test]
+fn test_get_many_mut_oob_empty() {
+ let mut v: Vec<i32> = vec![];
+ assert!(v.get_many_mut([0]).is_err());
+}
+
+#[test]
+fn test_get_many_mut_duplicate() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ assert!(v.get_many_mut([1, 3, 3, 4]).is_err());
+}
diff --git a/library/panic_abort/Cargo.toml b/library/panic_abort/Cargo.toml
index 46183d1ad..e6ea2b184 100644
--- a/library/panic_abort/Cargo.toml
+++ b/library/panic_abort/Cargo.toml
@@ -13,7 +13,7 @@ doc = false
[dependencies]
alloc = { path = "../alloc" }
-cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] }
+cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
core = { path = "../core" }
libc = { version = "0.2", default-features = false }
compiler_builtins = "0.1.0"
diff --git a/library/panic_unwind/Cargo.toml b/library/panic_unwind/Cargo.toml
index d720cc7bc..85386976d 100644
--- a/library/panic_unwind/Cargo.toml
+++ b/library/panic_unwind/Cargo.toml
@@ -17,4 +17,4 @@ core = { path = "../core" }
libc = { version = "0.2", default-features = false }
unwind = { path = "../unwind" }
compiler_builtins = "0.1.0"
-cfg-if = "0.1.8"
+cfg-if = "1.0"
diff --git a/library/portable-simd/crates/core_simd/src/intrinsics.rs b/library/portable-simd/crates/core_simd/src/intrinsics.rs
index 962c83a78..704e6ed01 100644
--- a/library/portable-simd/crates/core_simd/src/intrinsics.rs
+++ b/library/portable-simd/crates/core_simd/src/intrinsics.rs
@@ -103,7 +103,7 @@ extern "platform-intrinsic" {
/// val: vector of values to select if a lane is masked
/// ptr: vector of pointers to read from
/// mask: a "wide" mask of integers, selects as if simd_select(mask, read(ptr), val)
- /// note, the LLVM intrinsic accepts a mask vector of <N x i1>
+ /// note, the LLVM intrinsic accepts a mask vector of `<N x i1>`
/// FIXME: review this if/when we fix up our mask story in general?
pub(crate) fn simd_gather<T, U, V>(val: T, ptr: U, mask: V) -> T;
/// llvm.masked.scatter
diff --git a/library/portable-simd/crates/core_simd/src/ops.rs b/library/portable-simd/crates/core_simd/src/ops.rs
index 5a077a469..fc1e0bc42 100644
--- a/library/portable-simd/crates/core_simd/src/ops.rs
+++ b/library/portable-simd/crates/core_simd/src/ops.rs
@@ -40,7 +40,7 @@ macro_rules! unsafe_base {
/// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic.
/// It handles performing a bitand in addition to calling the shift operator, so that the result
-/// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if rhs >= <Int>::BITS
+/// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if `rhs >= <Int>::BITS`
/// At worst, this will maybe add another instruction and cycle,
/// at best, it may open up more optimization opportunities,
/// or simply be elided entirely, especially for SIMD ISAs which default to this.
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index bc10b12ec..a7aefc26b 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -15,19 +15,19 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core" }
-libc = { version = "0.2.135", default-features = false, features = ['rustc-dep-of-std'] }
-compiler_builtins = { version = "0.1.73" }
+libc = { version = "0.2.138", default-features = false, features = ['rustc-dep-of-std'] }
+compiler_builtins = { version = "0.1.82" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] }
std_detect = { path = "../stdarch/crates/std_detect", default-features = false, features = ['rustc-dep-of-std'] }
# Dependencies of the `backtrace` crate
-addr2line = { version = "0.16.0", optional = true, default-features = false }
+addr2line = { version = "0.17.0", optional = true, default-features = false }
rustc-demangle = { version = "0.1.21", features = ['rustc-dep-of-std'] }
-miniz_oxide = { version = "0.4.0", optional = true, default-features = false }
+miniz_oxide = { version = "0.5.0", optional = true, default-features = false }
[dependencies.object]
-version = "0.26.1"
+version = "0.29.0"
optional = true
default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs
index 708edc5de..df4903588 100644
--- a/library/std/src/collections/hash/map.rs
+++ b/library/std/src/collections/hash/map.rs
@@ -3161,14 +3161,16 @@ impl DefaultHasher {
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
#[inline]
#[allow(deprecated)]
+ #[rustc_const_unstable(feature = "const_hash", issue = "104061")]
#[must_use]
- pub fn new() -> DefaultHasher {
+ pub const fn new() -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(0, 0))
}
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
-impl Default for DefaultHasher {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const Default for DefaultHasher {
/// Creates a new `DefaultHasher` using [`new`].
/// See its documentation for more.
///
@@ -3180,7 +3182,8 @@ impl Default for DefaultHasher {
}
#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
-impl Hasher for DefaultHasher {
+#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
+impl const Hasher for DefaultHasher {
// The underlying `SipHasher13` doesn't override the other
// `write_*` methods, so it's ok not to forward them here.
diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs
index 3dd5b1250..4e3007624 100644
--- a/library/std/src/f32.rs
+++ b/library/std/src/f32.rs
@@ -77,9 +77,11 @@ impl f32 {
/// ```
/// let f = 3.3_f32;
/// let g = -3.3_f32;
+ /// let h = -3.7_f32;
///
/// assert_eq!(f.round(), 3.0);
/// assert_eq!(g.round(), -3.0);
+ /// assert_eq!(h.round(), -4.0);
/// ```
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
@@ -275,7 +277,7 @@ impl f32 {
/// This result is not an element of the function's codomain, but it is the
/// closest floating point number in the real numbers and thus fulfills the
/// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
- /// approximatively.
+ /// approximately.
///
/// # Examples
///
@@ -878,7 +880,9 @@ impl f32 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn asinh(self) -> f32 {
- (self.abs() + ((self * self) + 1.0).sqrt()).ln().copysign(self)
+ let ax = self.abs();
+ let ix = 1.0 / ax;
+ (ax + (ax / (Self::hypot(1.0, ix) + ix))).ln_1p().copysign(self)
}
/// Inverse hyperbolic cosine function.
@@ -898,7 +902,11 @@ impl f32 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn acosh(self) -> f32 {
- if self < 1.0 { Self::NAN } else { (self + ((self * self) - 1.0).sqrt()).ln() }
+ if self < 1.0 {
+ Self::NAN
+ } else {
+ (self + ((self - 1.0).sqrt() * (self + 1.0).sqrt())).ln()
+ }
}
/// Inverse hyperbolic tangent function.
diff --git a/library/std/src/f32/tests.rs b/library/std/src/f32/tests.rs
index 4ec16c84a..6ee295de6 100644
--- a/library/std/src/f32/tests.rs
+++ b/library/std/src/f32/tests.rs
@@ -587,6 +587,11 @@ fn test_asinh() {
assert_approx_eq!((-2.0f32).asinh(), -1.443635475178810342493276740273105f32);
// regression test for the catastrophic cancellation fixed in 72486
assert_approx_eq!((-3000.0f32).asinh(), -8.699514775987968673236893537700647f32);
+
+ // test for low accuracy from issue 104548
+ assert_approx_eq!(60.0f32, 60.0f32.sinh().asinh());
+ // mul needed for approximate comparison to be meaningful
+ assert_approx_eq!(1.0f32, 1e-15f32.sinh().asinh() * 1e15f32);
}
#[test]
@@ -602,6 +607,9 @@ fn test_acosh() {
assert!(nan.acosh().is_nan());
assert_approx_eq!(2.0f32.acosh(), 1.31695789692481670862504634730796844f32);
assert_approx_eq!(3.0f32.acosh(), 1.76274717403908605046521864995958461f32);
+
+ // test for low accuracy from issue 104548
+ assert_approx_eq!(60.0f32, 60.0f32.cosh().acosh());
}
#[test]
diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs
index 31351a879..ec67fdad4 100644
--- a/library/std/src/f64.rs
+++ b/library/std/src/f64.rs
@@ -77,9 +77,11 @@ impl f64 {
/// ```
/// let f = 3.3_f64;
/// let g = -3.3_f64;
+ /// let h = -3.7_f64;
///
/// assert_eq!(f.round(), 3.0);
/// assert_eq!(g.round(), -3.0);
+ /// assert_eq!(h.round(), -4.0);
/// ```
#[rustc_allow_incoherent_impl]
#[must_use = "method returns a new number and does not mutate the original value"]
@@ -275,7 +277,7 @@ impl f64 {
/// This result is not an element of the function's codomain, but it is the
/// closest floating point number in the real numbers and thus fulfills the
/// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
- /// approximatively.
+ /// approximately.
///
/// # Examples
///
@@ -880,7 +882,9 @@ impl f64 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn asinh(self) -> f64 {
- (self.abs() + ((self * self) + 1.0).sqrt()).ln().copysign(self)
+ let ax = self.abs();
+ let ix = 1.0 / ax;
+ (ax + (ax / (Self::hypot(1.0, ix) + ix))).ln_1p().copysign(self)
}
/// Inverse hyperbolic cosine function.
@@ -900,7 +904,11 @@ impl f64 {
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn acosh(self) -> f64 {
- if self < 1.0 { Self::NAN } else { (self + ((self * self) - 1.0).sqrt()).ln() }
+ if self < 1.0 {
+ Self::NAN
+ } else {
+ (self + ((self - 1.0).sqrt() * (self + 1.0).sqrt())).ln()
+ }
}
/// Inverse hyperbolic tangent function.
diff --git a/library/std/src/f64/tests.rs b/library/std/src/f64/tests.rs
index 12baa68f4..5b039d445 100644
--- a/library/std/src/f64/tests.rs
+++ b/library/std/src/f64/tests.rs
@@ -575,6 +575,11 @@ fn test_asinh() {
assert_approx_eq!((-2.0f64).asinh(), -1.443635475178810342493276740273105f64);
// regression test for the catastrophic cancellation fixed in 72486
assert_approx_eq!((-67452098.07139316f64).asinh(), -18.72007542627454439398548429400083);
+
+ // test for low accuracy from issue 104548
+ assert_approx_eq!(60.0f64, 60.0f64.sinh().asinh());
+ // mul needed for approximate comparison to be meaningful
+ assert_approx_eq!(1.0f64, 1e-15f64.sinh().asinh() * 1e15f64);
}
#[test]
@@ -590,6 +595,9 @@ fn test_acosh() {
assert!(nan.acosh().is_nan());
assert_approx_eq!(2.0f64.acosh(), 1.31695789692481670862504634730796844f64);
assert_approx_eq!(3.0f64.acosh(), 1.76274717403908605046521864995958461f64);
+
+ // test for low accuracy from issue 104548
+ assert_approx_eq!(60.0f64, 60.0f64.cosh().acosh());
}
#[test]
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index 188ff00e1..f357d505f 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -401,7 +401,7 @@ impl File {
/// Ok(())
/// }
/// ```
- #[unstable(feature = "file_create_new", issue = "none")]
+ #[unstable(feature = "file_create_new", issue = "105135")]
pub fn create_new<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new().read(true).write(true).create_new(true).open(path.as_ref())
}
@@ -510,8 +510,9 @@ impl File {
/// # Errors
///
/// This function will return an error if the file is not opened for writing.
- /// Also, std::io::ErrorKind::InvalidInput will be returned if the desired
- /// length would cause an overflow due to the implementation specifics.
+ /// Also, [`std::io::ErrorKind::InvalidInput`](crate::io::ErrorKind::InvalidInput)
+ /// will be returned if the desired length would cause an overflow due to
+ /// the implementation specifics.
///
/// # Examples
///
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 385585dad..65d4c3c89 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -220,6 +220,7 @@
#![allow(explicit_outlives_requirements)]
#![allow(unused_lifetimes)]
#![deny(rustc::existing_doc_keyword)]
+#![deny(fuzzy_provenance_casts)]
// Ensure that std can be linked against panic_abort despite compiled with `-C panic=unwind`
#![deny(ffi_unwind_calls)]
// std may use features in a platform-specific way
@@ -347,11 +348,13 @@
#![feature(stdsimd)]
#![feature(test)]
#![feature(trace_macros)]
+#![feature(get_many_mut)]
//
// Only used in tests/benchmarks:
//
// Only for const-ness:
#![feature(const_collections_with_hasher)]
+#![feature(const_hash)]
#![feature(const_io_structs)]
#![feature(const_ip)]
#![feature(const_ipv4)]
@@ -596,7 +599,7 @@ mod panicking;
mod personality;
#[path = "../../backtrace/src/lib.rs"]
-#[allow(dead_code, unused_attributes)]
+#[allow(dead_code, unused_attributes, fuzzy_provenance_casts)]
mod backtrace_rs;
// Re-export macros defined in libcore.
diff --git a/library/std/src/net/ip_addr.rs b/library/std/src/net/ip_addr.rs
index 4f14fc280..5453853e1 100644
--- a/library/std/src/net/ip_addr.rs
+++ b/library/std/src/net/ip_addr.rs
@@ -73,7 +73,6 @@ pub enum IpAddr {
/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex
/// ```
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-#[cfg_attr(not(test), rustc_diagnostic_item = "Ipv4Addr")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv4Addr {
octets: [u8; 4],
@@ -156,7 +155,6 @@ pub struct Ipv4Addr {
/// assert_eq!(localhost.is_loopback(), true);
/// ```
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-#[cfg_attr(not(test), rustc_diagnostic_item = "Ipv6Addr")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Ipv6Addr {
octets: [u8; 16],
diff --git a/library/std/src/net/mod.rs b/library/std/src/net/mod.rs
index 01e3db9de..19d90e7ec 100644
--- a/library/std/src/net/mod.rs
+++ b/library/std/src/net/mod.rs
@@ -11,7 +11,7 @@
//! [`Ipv6Addr`] are respectively IPv4 and IPv6 addresses
//! * [`SocketAddr`] represents socket addresses of either IPv4 or IPv6; [`SocketAddrV4`]
//! and [`SocketAddrV6`] are respectively IPv4 and IPv6 socket addresses
-//! * [`ToSocketAddrs`] is a trait that used for generic address resolution when interacting
+//! * [`ToSocketAddrs`] is a trait that is used for generic address resolution when interacting
//! with networking objects like [`TcpListener`], [`TcpStream`] or [`UdpSocket`]
//! * Other types are return or parameter types for various methods in this module
//!
diff --git a/library/std/src/os/android/net.rs b/library/std/src/os/android/net.rs
index ff96125c3..7cecd1bbf 100644
--- a/library/std/src/os/android/net.rs
+++ b/library/std/src/os/android/net.rs
@@ -1,4 +1,9 @@
-//! Linux and Android-specific definitions for socket options.
+//! Android-specific networking functionality.
#![unstable(feature = "tcp_quickack", issue = "96256")]
-pub use crate::os::net::tcp::TcpStreamExt;
+
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+pub use crate::os::net::linux_ext::addr::SocketAddrExt;
+
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+pub use crate::os::net::linux_ext::tcp::TcpStreamExt;
diff --git a/library/std/src/os/linux/net.rs b/library/std/src/os/linux/net.rs
index ff96125c3..94081c8dd 100644
--- a/library/std/src/os/linux/net.rs
+++ b/library/std/src/os/linux/net.rs
@@ -1,4 +1,9 @@
-//! Linux and Android-specific definitions for socket options.
+//! Linux-specific networking functionality.
#![unstable(feature = "tcp_quickack", issue = "96256")]
-pub use crate::os::net::tcp::TcpStreamExt;
+
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+pub use crate::os::net::linux_ext::addr::SocketAddrExt;
+
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+pub use crate::os::net::linux_ext::tcp::TcpStreamExt;
diff --git a/library/std/src/os/net/linux_ext/addr.rs b/library/std/src/os/net/linux_ext/addr.rs
new file mode 100644
index 000000000..df3fc8e6a
--- /dev/null
+++ b/library/std/src/os/net/linux_ext/addr.rs
@@ -0,0 +1,64 @@
+//! Linux and Android-specific extensions to socket addresses.
+
+use crate::os::unix::net::SocketAddr;
+use crate::sealed::Sealed;
+
+/// Platform-specific extensions to [`SocketAddr`].
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+pub trait SocketAddrExt: Sealed {
+ /// Creates a Unix socket address in the abstract namespace.
+ ///
+ /// The abstract namespace is a Linux-specific extension that allows Unix
+ /// sockets to be bound without creating an entry in the filesystem.
+ /// Abstract sockets are unaffected by filesystem layout or permissions,
+ /// and no cleanup is necessary when the socket is closed.
+ ///
+ /// An abstract socket address name may contain any bytes, including zero.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if the name is longer than `SUN_LEN - 1`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener, SocketAddr};
+ /// use std::os::linux::net::SocketAddrExt;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let addr = SocketAddr::from_abstract_name(b"hidden")?;
+ /// let listener = match UnixListener::bind_addr(&addr) {
+ /// Ok(sock) => sock,
+ /// Err(err) => {
+ /// println!("Couldn't bind: {err:?}");
+ /// return Err(err);
+ /// }
+ /// };
+ /// Ok(())
+ /// }
+ /// ```
+ fn from_abstract_name<N>(name: &N) -> crate::io::Result<SocketAddr>
+ where
+ N: AsRef<[u8]>;
+
+ /// Returns the contents of this address if it is in the abstract namespace.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(unix_socket_abstract)]
+ /// use std::os::unix::net::{UnixListener, SocketAddr};
+ /// use std::os::linux::net::SocketAddrExt;
+ ///
+ /// fn main() -> std::io::Result<()> {
+ /// let name = b"hidden";
+ /// let name_addr = SocketAddr::from_abstract_name(name)?;
+ /// let socket = UnixListener::bind_addr(&name_addr)?;
+ /// let local_addr = socket.local_addr().expect("Couldn't get local address");
+ /// assert_eq!(local_addr.as_abstract_name(), Some(&name[..]));
+ /// Ok(())
+ /// }
+ /// ```
+ fn as_abstract_name(&self) -> Option<&[u8]>;
+}
diff --git a/library/std/src/os/net/linux_ext/mod.rs b/library/std/src/os/net/linux_ext/mod.rs
new file mode 100644
index 000000000..318ebacfd
--- /dev/null
+++ b/library/std/src/os/net/linux_ext/mod.rs
@@ -0,0 +1,12 @@
+//! Linux and Android-specific networking functionality.
+
+#![doc(cfg(any(target_os = "linux", target_os = "android")))]
+
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+pub(crate) mod addr;
+
+#[unstable(feature = "tcp_quickack", issue = "96256")]
+pub(crate) mod tcp;
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/os/net/tcp.rs b/library/std/src/os/net/linux_ext/tcp.rs
index 5e9ee65a4..5e9ee65a4 100644
--- a/library/std/src/os/net/tcp.rs
+++ b/library/std/src/os/net/linux_ext/tcp.rs
diff --git a/library/std/src/os/net/tests.rs b/library/std/src/os/net/linux_ext/tests.rs
index 4704e3156..2db4deed0 100644
--- a/library/std/src/os/net/tests.rs
+++ b/library/std/src/os/net/linux_ext/tests.rs
@@ -1,9 +1,8 @@
-#[cfg(any(target_os = "android", target_os = "linux",))]
#[test]
fn quickack() {
use crate::{
net::{test::next_test_ip4, TcpListener, TcpStream},
- os::net::tcp::TcpStreamExt,
+ os::net::linux_ext::tcp::TcpStreamExt,
};
macro_rules! t {
diff --git a/library/std/src/os/net/mod.rs b/library/std/src/os/net/mod.rs
index d6d84d24e..5ec267c41 100644
--- a/library/std/src/os/net/mod.rs
+++ b/library/std/src/os/net/mod.rs
@@ -1,7 +1,4 @@
-//! Linux and Android-specific definitions for socket options.
+//! OS-specific networking functionality.
-#![unstable(feature = "tcp_quickack", issue = "96256")]
-#![doc(cfg(any(target_os = "linux", target_os = "android",)))]
-pub mod tcp;
-#[cfg(test)]
-mod tests;
+#[cfg(any(target_os = "linux", target_os = "android", doc))]
+pub(super) mod linux_ext;
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
index 094085e19..81ac829d2 100644
--- a/library/std/src/os/unix/net/addr.rs
+++ b/library/std/src/os/unix/net/addr.rs
@@ -1,6 +1,9 @@
use crate::ffi::OsStr;
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+use crate::os::net::linux_ext;
use crate::os::unix::ffi::OsStrExt;
use crate::path::Path;
+use crate::sealed::Sealed;
use crate::sys::cvt;
use crate::{fmt, io, mem, ptr};
@@ -224,31 +227,6 @@ impl SocketAddr {
if let AddressKind::Pathname(path) = self.address() { Some(path) } else { None }
}
- /// Returns the contents of this address if it is an abstract namespace
- /// without the leading null byte.
- ///
- /// # Examples
- ///
- /// ```no_run
- /// #![feature(unix_socket_abstract)]
- /// use std::os::unix::net::{UnixListener, SocketAddr};
- ///
- /// fn main() -> std::io::Result<()> {
- /// let namespace = b"hidden";
- /// let namespace_addr = SocketAddr::from_abstract_namespace(&namespace[..])?;
- /// let socket = UnixListener::bind_addr(&namespace_addr)?;
- /// let local_addr = socket.local_addr().expect("Couldn't get local address");
- /// assert_eq!(local_addr.as_abstract_namespace(), Some(&namespace[..]));
- /// Ok(())
- /// }
- /// ```
- #[doc(cfg(any(target_os = "android", target_os = "linux")))]
- #[cfg(any(doc, target_os = "android", target_os = "linux",))]
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
- pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
- if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
- }
-
fn address(&self) -> AddressKind<'_> {
let len = self.len as usize - sun_path_offset(&self.addr);
let path = unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.addr.sun_path) };
@@ -265,62 +243,41 @@ impl SocketAddr {
AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
}
}
+}
- /// Creates an abstract domain socket address from a namespace
- ///
- /// An abstract address does not create a file unlike traditional path-based
- /// Unix sockets. The advantage of this is that the address will disappear when
- /// the socket bound to it is closed, so no filesystem clean up is required.
- ///
- /// The leading null byte for the abstract namespace is automatically added.
- ///
- /// This is a Linux-specific extension. See more at [`unix(7)`].
- ///
- /// [`unix(7)`]: https://man7.org/linux/man-pages/man7/unix.7.html
- ///
- /// # Errors
- ///
- /// This will return an error if the given namespace is too long
- ///
- /// # Examples
- ///
- /// ```no_run
- /// #![feature(unix_socket_abstract)]
- /// use std::os::unix::net::{UnixListener, SocketAddr};
- ///
- /// fn main() -> std::io::Result<()> {
- /// let addr = SocketAddr::from_abstract_namespace(b"hidden")?;
- /// let listener = match UnixListener::bind_addr(&addr) {
- /// Ok(sock) => sock,
- /// Err(err) => {
- /// println!("Couldn't bind: {err:?}");
- /// return Err(err);
- /// }
- /// };
- /// Ok(())
- /// }
- /// ```
- #[doc(cfg(any(target_os = "android", target_os = "linux")))]
- #[cfg(any(doc, target_os = "android", target_os = "linux",))]
- #[unstable(feature = "unix_socket_abstract", issue = "85410")]
- pub fn from_abstract_namespace(namespace: &[u8]) -> io::Result<SocketAddr> {
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+impl Sealed for SocketAddr {}
+
+#[doc(cfg(any(target_os = "android", target_os = "linux")))]
+#[cfg(any(doc, target_os = "android", target_os = "linux"))]
+#[unstable(feature = "unix_socket_abstract", issue = "85410")]
+impl linux_ext::addr::SocketAddrExt for SocketAddr {
+ fn as_abstract_name(&self) -> Option<&[u8]> {
+ if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
+ }
+
+ fn from_abstract_name<N>(name: &N) -> crate::io::Result<Self>
+ where
+ N: AsRef<[u8]>,
+ {
+ let name = name.as_ref();
unsafe {
let mut addr: libc::sockaddr_un = mem::zeroed();
addr.sun_family = libc::AF_UNIX as libc::sa_family_t;
- if namespace.len() + 1 > addr.sun_path.len() {
+ if name.len() + 1 > addr.sun_path.len() {
return Err(io::const_io_error!(
io::ErrorKind::InvalidInput,
- "namespace must be shorter than SUN_LEN",
+ "abstract socket name must be shorter than SUN_LEN",
));
}
crate::ptr::copy_nonoverlapping(
- namespace.as_ptr(),
+ name.as_ptr(),
addr.sun_path.as_mut_ptr().add(1) as *mut u8,
- namespace.len(),
+ name.len(),
);
- let len = (sun_path_offset(&addr) + 1 + namespace.len()) as libc::socklen_t;
+ let len = (sun_path_offset(&addr) + 1 + name.len()) as libc::socklen_t;
SocketAddr::from_parts(addr, len)
}
}
diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs
index e4499f9b6..37fcfa844 100644
--- a/library/std/src/os/unix/net/tests.rs
+++ b/library/std/src/os/unix/net/tests.rs
@@ -7,6 +7,12 @@ use crate::sys_common::io::test::tmpdir;
use crate::thread;
use crate::time::Duration;
+#[cfg(target_os = "android")]
+use crate::os::android::net::SocketAddrExt;
+
+#[cfg(target_os = "linux")]
+use crate::os::linux::net::SocketAddrExt;
+
macro_rules! or_panic {
($e:expr) => {
match $e {
@@ -404,7 +410,7 @@ fn test_abstract_stream_connect() {
let msg1 = b"hello";
let msg2 = b"world";
- let socket_addr = or_panic!(SocketAddr::from_abstract_namespace(b"namespace"));
+ let socket_addr = or_panic!(SocketAddr::from_abstract_name(b"name"));
let listener = or_panic!(UnixListener::bind_addr(&socket_addr));
let thread = thread::spawn(move || {
@@ -418,7 +424,7 @@ fn test_abstract_stream_connect() {
let mut stream = or_panic!(UnixStream::connect_addr(&socket_addr));
let peer = or_panic!(stream.peer_addr());
- assert_eq!(peer.as_abstract_namespace().unwrap(), b"namespace");
+ assert_eq!(peer.as_abstract_name().unwrap(), b"name");
or_panic!(stream.write_all(msg1));
let mut buf = vec![];
@@ -432,7 +438,7 @@ fn test_abstract_stream_connect() {
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
fn test_abstract_stream_iter() {
- let addr = or_panic!(SocketAddr::from_abstract_namespace(b"hidden"));
+ let addr = or_panic!(SocketAddr::from_abstract_name(b"hidden"));
let listener = or_panic!(UnixListener::bind_addr(&addr));
let thread = thread::spawn(move || {
@@ -454,13 +460,13 @@ fn test_abstract_stream_iter() {
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
fn test_abstract_datagram_bind_send_to_addr() {
- let addr1 = or_panic!(SocketAddr::from_abstract_namespace(b"ns1"));
+ let addr1 = or_panic!(SocketAddr::from_abstract_name(b"ns1"));
let sock1 = or_panic!(UnixDatagram::bind_addr(&addr1));
let local = or_panic!(sock1.local_addr());
- assert_eq!(local.as_abstract_namespace().unwrap(), b"ns1");
+ assert_eq!(local.as_abstract_name().unwrap(), b"ns1");
- let addr2 = or_panic!(SocketAddr::from_abstract_namespace(b"ns2"));
+ let addr2 = or_panic!(SocketAddr::from_abstract_name(b"ns2"));
let sock2 = or_panic!(UnixDatagram::bind_addr(&addr2));
let msg = b"hello world";
@@ -469,13 +475,13 @@ fn test_abstract_datagram_bind_send_to_addr() {
let (len, addr) = or_panic!(sock2.recv_from(&mut buf));
assert_eq!(msg, &buf[..]);
assert_eq!(len, 11);
- assert_eq!(addr.as_abstract_namespace().unwrap(), b"ns1");
+ assert_eq!(addr.as_abstract_name().unwrap(), b"ns1");
}
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
fn test_abstract_datagram_connect_addr() {
- let addr1 = or_panic!(SocketAddr::from_abstract_namespace(b"ns3"));
+ let addr1 = or_panic!(SocketAddr::from_abstract_name(b"ns3"));
let bsock1 = or_panic!(UnixDatagram::bind_addr(&addr1));
let sock = or_panic!(UnixDatagram::unbound());
@@ -489,7 +495,7 @@ fn test_abstract_datagram_connect_addr() {
assert_eq!(addr.is_unnamed(), true);
assert_eq!(msg, &buf[..]);
- let addr2 = or_panic!(SocketAddr::from_abstract_namespace(b"ns4"));
+ let addr2 = or_panic!(SocketAddr::from_abstract_name(b"ns4"));
let bsock2 = or_panic!(UnixDatagram::bind_addr(&addr2));
or_panic!(sock.connect_addr(&addr2));
@@ -499,8 +505,8 @@ fn test_abstract_datagram_connect_addr() {
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
-fn test_abstract_namespace_too_long() {
- match SocketAddr::from_abstract_namespace(
+fn test_abstract_name_too_long() {
+ match SocketAddr::from_abstract_name(
b"abcdefghijklmnopqrstuvwxyzabcdefghijklmn\
opqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi\
jklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
@@ -513,11 +519,11 @@ fn test_abstract_namespace_too_long() {
#[cfg(any(target_os = "android", target_os = "linux"))]
#[test]
-fn test_abstract_namespace_no_pathname_and_not_unnamed() {
- let namespace = b"local";
- let addr = or_panic!(SocketAddr::from_abstract_namespace(&namespace[..]));
+fn test_abstract_no_pathname_and_not_unnamed() {
+ let name = b"local";
+ let addr = or_panic!(SocketAddr::from_abstract_name(name));
assert_eq!(addr.as_pathname(), None);
- assert_eq!(addr.as_abstract_namespace(), Some(&namespace[..]));
+ assert_eq!(addr.as_abstract_name(), Some(&name[..]));
assert_eq!(addr.is_unnamed(), false);
}
diff --git a/library/std/src/os/wasi/io/mod.rs b/library/std/src/os/wasi/io/mod.rs
index 57bd842a5..4e123a1ee 100644
--- a/library/std/src/os/wasi/io/mod.rs
+++ b/library/std/src/os/wasi/io/mod.rs
@@ -1,6 +1,6 @@
//! WASI-specific extensions to general I/O primitives.
-#![stable(feature = "io_safety", since = "1.63.0")]
+#![stable(feature = "io_safety_wasi", since = "1.65.0")]
-#[stable(feature = "io_safety", since = "1.63.0")]
+#[stable(feature = "io_safety_wasi", since = "1.65.0")]
pub use crate::os::fd::*;
diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs
index 72cb3406d..5c1634084 100644
--- a/library/std/src/os/windows/io/socket.rs
+++ b/library/std/src/os/windows/io/socket.rs
@@ -90,6 +90,7 @@ impl OwnedSocket {
}
// FIXME(strict_provenance_magic): we defined RawSocket to be a u64 ;-;
+ #[allow(fuzzy_provenance_casts)]
#[cfg(not(target_vendor = "uwp"))]
pub(crate) fn set_no_inherit(&self) -> io::Result<()> {
cvt(unsafe {
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index d4976a469..1039835bb 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -594,8 +594,8 @@ pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
// lang item for CTFE panic support
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
-#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[rustc_do_not_const_check] // hooked by const-eval
pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 9d6328162..6c957c2fa 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -1463,6 +1463,30 @@ impl PathBuf {
true
}
+ /// Yields a mutable reference to the underlying [`OsString`] instance.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(path_as_mut_os_str)]
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let mut path = PathBuf::from("/foo");
+ ///
+ /// path.push("bar");
+ /// assert_eq!(path, Path::new("/foo/bar"));
+ ///
+ /// // OsString's `push` does not add a separator.
+ /// path.as_mut_os_string().push("baz");
+ /// assert_eq!(path, Path::new("/foo/barbaz"));
+ /// ```
+ #[unstable(feature = "path_as_mut_os_str", issue = "105021")]
+ #[must_use]
+ #[inline]
+ pub fn as_mut_os_string(&mut self) -> &mut OsString {
+ &mut self.inner
+ }
+
/// Consumes the `PathBuf`, yielding its internal [`OsString`] storage.
///
/// # Examples
@@ -1993,6 +2017,28 @@ impl Path {
&self.inner
}
+ /// Yields a mutable reference to the underlying [`OsStr`] slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(path_as_mut_os_str)]
+ /// use std::path::{Path, PathBuf};
+ ///
+ /// let mut path = PathBuf::from("/Foo.TXT").into_boxed_path();
+ ///
+ /// assert_ne!(&*path, Path::new("/foo.txt"));
+ ///
+ /// path.as_mut_os_str().make_ascii_lowercase();
+ /// assert_eq!(&*path, Path::new("/foo.txt"));
+ /// ```
+ #[unstable(feature = "path_as_mut_os_str", issue = "105021")]
+ #[must_use]
+ #[inline]
+ pub fn as_mut_os_str(&mut self) -> &mut OsStr {
+ &mut self.inner
+ }
+
/// Yields a [`&str`] slice if the `Path` is valid unicode.
///
/// This conversion may entail doing a check for UTF-8 validity.
@@ -2142,7 +2188,10 @@ impl Path {
/// Returns the `Path` without its final component, if there is one.
///
- /// Returns [`None`] if the path terminates in a root or prefix.
+ /// This means it returns `Some("")` for relative paths with one component.
+ ///
+ /// Returns [`None`] if the path terminates in a root or prefix, or if it's
+ /// the empty string.
///
/// # Examples
///
@@ -2156,6 +2205,14 @@ impl Path {
/// let grand_parent = parent.parent().unwrap();
/// assert_eq!(grand_parent, Path::new("/"));
/// assert_eq!(grand_parent.parent(), None);
+ ///
+ /// let relative_path = Path::new("foo/bar");
+ /// let parent = relative_path.parent();
+ /// assert_eq!(parent, Some(Path::new("foo")));
+ /// let grand_parent = parent.and_then(Path::parent);
+ /// assert_eq!(grand_parent, Some(Path::new("")));
+ /// let great_grand_parent = grand_parent.and_then(Path::parent);
+ /// assert_eq!(great_grand_parent, None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(alias = "dirname")]
diff --git a/library/std/src/personality/dwarf/eh.rs b/library/std/src/personality/dwarf/eh.rs
index 27b50c13b..a783e1870 100644
--- a/library/std/src/personality/dwarf/eh.rs
+++ b/library/std/src/personality/dwarf/eh.rs
@@ -13,6 +13,7 @@
use super::DwarfReader;
use core::mem;
+use core::ptr;
pub const DW_EH_PE_omit: u8 = 0xFF;
pub const DW_EH_PE_absptr: u8 = 0x00;
@@ -151,7 +152,7 @@ unsafe fn read_encoded_pointer(
// DW_EH_PE_aligned implies it's an absolute pointer value
if encoding == DW_EH_PE_aligned {
- reader.ptr = round_up(reader.ptr as usize, mem::size_of::<usize>())? as *const u8;
+ reader.ptr = reader.ptr.with_addr(round_up(reader.ptr.addr(), mem::size_of::<usize>())?);
return Ok(reader.read::<usize>());
}
@@ -171,7 +172,7 @@ unsafe fn read_encoded_pointer(
result += match encoding & 0x70 {
DW_EH_PE_absptr => 0,
// relative to address of the encoded value, despite the name
- DW_EH_PE_pcrel => reader.ptr as usize,
+ DW_EH_PE_pcrel => reader.ptr.expose_addr(),
DW_EH_PE_funcrel => {
if context.func_start == 0 {
return Err(());
@@ -184,7 +185,7 @@ unsafe fn read_encoded_pointer(
};
if encoding & DW_EH_PE_indirect != 0 {
- result = *(result as *const usize);
+ result = *ptr::from_exposed_addr::<usize>(result);
}
Ok(result)
diff --git a/library/std/src/personality/gcc.rs b/library/std/src/personality/gcc.rs
index 7f0b0439c..5fc1b91a1 100644
--- a/library/std/src/personality/gcc.rs
+++ b/library/std/src/personality/gcc.rs
@@ -219,7 +219,7 @@ cfg_if::cfg_if! {
}
cfg_if::cfg_if! {
- if #[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] {
+ if #[cfg(all(windows, any(target_arch = "aarch64", target_arch = "x86_64"), target_env = "gnu"))] {
// On x86_64 MinGW targets, the unwinding mechanism is SEH however the unwind
// handler data (aka LSDA) uses GCC-compatible encoding.
#[lang = "eh_personality"]
diff --git a/library/std/src/prelude/v1.rs b/library/std/src/prelude/v1.rs
index 0226c4d7a..a5a798078 100644
--- a/library/std/src/prelude/v1.rs
+++ b/library/std/src/prelude/v1.rs
@@ -59,9 +59,16 @@ pub use core::prelude::v1::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
+#[cfg(not(bootstrap))]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+pub use core::prelude::v1::alloc_error_handler;
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
pub use core::prelude::v1::{bench, derive, global_allocator, test, test_case};
+#[unstable(feature = "derive_const", issue = "none")]
+#[cfg(not(bootstrap))]
+pub use core::prelude::v1::derive_const;
+
// Do not `doc(no_inline)` either.
#[unstable(
feature = "cfg_accessible",
@@ -78,6 +85,15 @@ pub use core::prelude::v1::cfg_accessible;
)]
pub use core::prelude::v1::cfg_eval;
+// Do not `doc(no_inline)` either.
+#[unstable(
+ feature = "type_ascription",
+ issue = "23416",
+ reason = "placeholder syntax for type ascription"
+)]
+#[cfg(not(bootstrap))]
+pub use core::prelude::v1::type_ascribe;
+
// The file so far is equivalent to src/libcore/prelude/v1.rs,
// and below to src/liballoc/prelude.rs.
// Those files are duplicated rather than using glob imports
diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs
index 331714a99..d6e9da187 100644
--- a/library/std/src/primitive_docs.rs
+++ b/library/std/src/primitive_docs.rs
@@ -1493,11 +1493,13 @@ mod prim_ref {}
/// However, a direct cast back is not possible. You need to use `transmute`:
///
/// ```rust
+/// # #[cfg(not(miri))] { // FIXME: use strict provenance APIs once they are stable, then remove this `cfg`
/// # let fnptr: fn(i32) -> i32 = |x| x+2;
/// # let fnptr_addr = fnptr as usize;
/// let fnptr = fnptr_addr as *const ();
/// let fnptr: fn(i32) -> i32 = unsafe { std::mem::transmute(fnptr) };
/// assert_eq!(fnptr(40), 42);
+/// # }
/// ```
///
/// Crucially, we `as`-cast to a raw pointer before `transmute`ing to a function pointer.
diff --git a/library/std/src/sync/condvar.rs b/library/std/src/sync/condvar.rs
index eb1e7135a..76a1b4a2a 100644
--- a/library/std/src/sync/condvar.rs
+++ b/library/std/src/sync/condvar.rs
@@ -3,7 +3,7 @@ mod tests;
use crate::fmt;
use crate::sync::{mutex, poison, LockResult, MutexGuard, PoisonError};
-use crate::sys_common::condvar as sys;
+use crate::sys::locks as sys;
use crate::time::{Duration, Instant};
/// A type indicating whether a timed wait on a condition variable returned
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
index 535cc1c42..c8d3289ca 100644
--- a/library/std/src/sync/lazy_lock.rs
+++ b/library/std/src/sync/lazy_lock.rs
@@ -6,7 +6,9 @@ use crate::sync::OnceLock;
/// A value which is initialized on the first access.
///
-/// This type is a thread-safe `Lazy`, and can be used in statics.
+/// This type is a thread-safe [`LazyCell`], and can be used in statics.
+///
+/// [`LazyCell`]: crate::cell::LazyCell
///
/// # Examples
///
diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs
index 7b507a169..4fee8d3e9 100644
--- a/library/std/src/sync/mod.rs
+++ b/library/std/src/sync/mod.rs
@@ -182,6 +182,7 @@ pub mod mpsc;
mod barrier;
mod condvar;
mod lazy_lock;
+mod mpmc;
mod mutex;
mod once;
mod once_lock;
diff --git a/library/std/src/sync/mpmc/array.rs b/library/std/src/sync/mpmc/array.rs
new file mode 100644
index 000000000..c1e3e48b0
--- /dev/null
+++ b/library/std/src/sync/mpmc/array.rs
@@ -0,0 +1,513 @@
+//! Bounded channel based on a preallocated array.
+//!
+//! This flavor has a fixed, positive capacity.
+//!
+//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
+//!
+//! Source:
+//! - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>
+//! - <https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub>
+
+use super::context::Context;
+use super::error::*;
+use super::select::{Operation, Selected, Token};
+use super::utils::{Backoff, CachePadded};
+use super::waker::SyncWaker;
+
+use crate::cell::UnsafeCell;
+use crate::mem::MaybeUninit;
+use crate::ptr;
+use crate::sync::atomic::{self, AtomicUsize, Ordering};
+use crate::time::Instant;
+
+/// A slot in a channel.
+struct Slot<T> {
+ /// The current stamp.
+ stamp: AtomicUsize,
+
+ /// The message in this slot.
+ msg: UnsafeCell<MaybeUninit<T>>,
+}
+
+/// The token type for the array flavor.
+#[derive(Debug)]
+pub(crate) struct ArrayToken {
+ /// Slot to read from or write to.
+ slot: *const u8,
+
+ /// Stamp to store into the slot after reading or writing.
+ stamp: usize,
+}
+
+impl Default for ArrayToken {
+ #[inline]
+ fn default() -> Self {
+ ArrayToken { slot: ptr::null(), stamp: 0 }
+ }
+}
+
+/// Bounded channel based on a preallocated array.
+pub(crate) struct Channel<T> {
+ /// The head of the channel.
+ ///
+ /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
+ /// packed into a single `usize`. The lower bits represent the index, while the upper bits
+ /// represent the lap. The mark bit in the head is always zero.
+ ///
+ /// Messages are popped from the head of the channel.
+ head: CachePadded<AtomicUsize>,
+
+ /// The tail of the channel.
+ ///
+ /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
+ /// packed into a single `usize`. The lower bits represent the index, while the upper bits
+ /// represent the lap. The mark bit indicates that the channel is disconnected.
+ ///
+ /// Messages are pushed into the tail of the channel.
+ tail: CachePadded<AtomicUsize>,
+
+ /// The buffer holding slots.
+ buffer: Box<[Slot<T>]>,
+
+ /// The channel capacity.
+ cap: usize,
+
+ /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
+ one_lap: usize,
+
+ /// If this bit is set in the tail, that means the channel is disconnected.
+ mark_bit: usize,
+
+ /// Senders waiting while the channel is full.
+ senders: SyncWaker,
+
+ /// Receivers waiting while the channel is empty and not disconnected.
+ receivers: SyncWaker,
+}
+
+impl<T> Channel<T> {
+ /// Creates a bounded channel of capacity `cap`.
+ pub(crate) fn with_capacity(cap: usize) -> Self {
+ assert!(cap > 0, "capacity must be positive");
+
+ // Compute constants `mark_bit` and `one_lap`.
+ let mark_bit = (cap + 1).next_power_of_two();
+ let one_lap = mark_bit * 2;
+
+ // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
+ let head = 0;
+ // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
+ let tail = 0;
+
+ // Allocate a buffer of `cap` slots initialized
+ // with stamps.
+ let buffer: Box<[Slot<T>]> = (0..cap)
+ .map(|i| {
+ // Set the stamp to `{ lap: 0, mark: 0, index: i }`.
+ Slot { stamp: AtomicUsize::new(i), msg: UnsafeCell::new(MaybeUninit::uninit()) }
+ })
+ .collect();
+
+ Channel {
+ buffer,
+ cap,
+ one_lap,
+ mark_bit,
+ head: CachePadded::new(AtomicUsize::new(head)),
+ tail: CachePadded::new(AtomicUsize::new(tail)),
+ senders: SyncWaker::new(),
+ receivers: SyncWaker::new(),
+ }
+ }
+
+ /// Attempts to reserve a slot for sending a message.
+ fn start_send(&self, token: &mut Token) -> bool {
+ let backoff = Backoff::new();
+ let mut tail = self.tail.load(Ordering::Relaxed);
+
+ loop {
+ // Check if the channel is disconnected.
+ if tail & self.mark_bit != 0 {
+ token.array.slot = ptr::null();
+ token.array.stamp = 0;
+ return true;
+ }
+
+ // Deconstruct the tail.
+ let index = tail & (self.mark_bit - 1);
+ let lap = tail & !(self.one_lap - 1);
+
+ // Inspect the corresponding slot.
+ debug_assert!(index < self.buffer.len());
+ let slot = unsafe { self.buffer.get_unchecked(index) };
+ let stamp = slot.stamp.load(Ordering::Acquire);
+
+ // If the tail and the stamp match, we may attempt to push.
+ if tail == stamp {
+ let new_tail = if index + 1 < self.cap {
+ // Same lap, incremented index.
+ // Set to `{ lap: lap, mark: 0, index: index + 1 }`.
+ tail + 1
+ } else {
+ // One lap forward, index wraps around to zero.
+ // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
+ lap.wrapping_add(self.one_lap)
+ };
+
+ // Try moving the tail.
+ match self.tail.compare_exchange_weak(
+ tail,
+ new_tail,
+ Ordering::SeqCst,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => {
+ // Prepare the token for the follow-up call to `write`.
+ token.array.slot = slot as *const Slot<T> as *const u8;
+ token.array.stamp = tail + 1;
+ return true;
+ }
+ Err(_) => {
+ backoff.spin_light();
+ tail = self.tail.load(Ordering::Relaxed);
+ }
+ }
+ } else if stamp.wrapping_add(self.one_lap) == tail + 1 {
+ atomic::fence(Ordering::SeqCst);
+ let head = self.head.load(Ordering::Relaxed);
+
+ // If the head lags one lap behind the tail as well...
+ if head.wrapping_add(self.one_lap) == tail {
+ // ...then the channel is full.
+ return false;
+ }
+
+ backoff.spin_light();
+ tail = self.tail.load(Ordering::Relaxed);
+ } else {
+ // Snooze because we need to wait for the stamp to get updated.
+ backoff.spin_heavy();
+ tail = self.tail.load(Ordering::Relaxed);
+ }
+ }
+ }
+
+ /// Writes a message into the channel.
+ pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
+ // If there is no slot, the channel is disconnected.
+ if token.array.slot.is_null() {
+ return Err(msg);
+ }
+
+ let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
+
+ // Write the message into the slot and update the stamp.
+ slot.msg.get().write(MaybeUninit::new(msg));
+ slot.stamp.store(token.array.stamp, Ordering::Release);
+
+ // Wake a sleeping receiver.
+ self.receivers.notify();
+ Ok(())
+ }
+
+ /// Attempts to reserve a slot for receiving a message.
+ fn start_recv(&self, token: &mut Token) -> bool {
+ let backoff = Backoff::new();
+ let mut head = self.head.load(Ordering::Relaxed);
+
+ loop {
+ // Deconstruct the head.
+ let index = head & (self.mark_bit - 1);
+ let lap = head & !(self.one_lap - 1);
+
+ // Inspect the corresponding slot.
+ debug_assert!(index < self.buffer.len());
+ let slot = unsafe { self.buffer.get_unchecked(index) };
+ let stamp = slot.stamp.load(Ordering::Acquire);
+
+ // If the stamp is ahead of the head by 1, we may attempt to pop.
+ if head + 1 == stamp {
+ let new = if index + 1 < self.cap {
+ // Same lap, incremented index.
+ // Set to `{ lap: lap, mark: 0, index: index + 1 }`.
+ head + 1
+ } else {
+ // One lap forward, index wraps around to zero.
+ // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
+ lap.wrapping_add(self.one_lap)
+ };
+
+ // Try moving the head.
+ match self.head.compare_exchange_weak(
+ head,
+ new,
+ Ordering::SeqCst,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => {
+ // Prepare the token for the follow-up call to `read`.
+ token.array.slot = slot as *const Slot<T> as *const u8;
+ token.array.stamp = head.wrapping_add(self.one_lap);
+ return true;
+ }
+ Err(_) => {
+ backoff.spin_light();
+ head = self.head.load(Ordering::Relaxed);
+ }
+ }
+ } else if stamp == head {
+ atomic::fence(Ordering::SeqCst);
+ let tail = self.tail.load(Ordering::Relaxed);
+
+ // If the tail equals the head, that means the channel is empty.
+ if (tail & !self.mark_bit) == head {
+ // If the channel is disconnected...
+ if tail & self.mark_bit != 0 {
+ // ...then receive an error.
+ token.array.slot = ptr::null();
+ token.array.stamp = 0;
+ return true;
+ } else {
+ // Otherwise, the receive operation is not ready.
+ return false;
+ }
+ }
+
+ backoff.spin_light();
+ head = self.head.load(Ordering::Relaxed);
+ } else {
+ // Snooze because we need to wait for the stamp to get updated.
+ backoff.spin_heavy();
+ head = self.head.load(Ordering::Relaxed);
+ }
+ }
+ }
+
+ /// Reads a message from the channel.
+ pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
+ if token.array.slot.is_null() {
+ // The channel is disconnected.
+ return Err(());
+ }
+
+ let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
+
+ // Read the message from the slot and update the stamp.
+ let msg = slot.msg.get().read().assume_init();
+ slot.stamp.store(token.array.stamp, Ordering::Release);
+
+ // Wake a sleeping sender.
+ self.senders.notify();
+ Ok(msg)
+ }
+
+ /// Attempts to send a message into the channel.
+ pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
+ let token = &mut Token::default();
+ if self.start_send(token) {
+ unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) }
+ } else {
+ Err(TrySendError::Full(msg))
+ }
+ }
+
+ /// Sends a message into the channel.
+ pub(crate) fn send(
+ &self,
+ msg: T,
+ deadline: Option<Instant>,
+ ) -> Result<(), SendTimeoutError<T>> {
+ let token = &mut Token::default();
+ loop {
+ // Try sending a message several times.
+ let backoff = Backoff::new();
+ loop {
+ if self.start_send(token) {
+ let res = unsafe { self.write(token, msg) };
+ return res.map_err(SendTimeoutError::Disconnected);
+ }
+
+ if backoff.is_completed() {
+ break;
+ } else {
+ backoff.spin_light();
+ }
+ }
+
+ if let Some(d) = deadline {
+ if Instant::now() >= d {
+ return Err(SendTimeoutError::Timeout(msg));
+ }
+ }
+
+ Context::with(|cx| {
+ // Prepare for blocking until a receiver wakes us up.
+ let oper = Operation::hook(token);
+ self.senders.register(oper, cx);
+
+ // Has the channel become ready just now?
+ if !self.is_full() || self.is_disconnected() {
+ let _ = cx.try_select(Selected::Aborted);
+ }
+
+ // Block the current thread.
+ let sel = cx.wait_until(deadline);
+
+ match sel {
+ Selected::Waiting => unreachable!(),
+ Selected::Aborted | Selected::Disconnected => {
+ self.senders.unregister(oper).unwrap();
+ }
+ Selected::Operation(_) => {}
+ }
+ });
+ }
+ }
+
+ /// Attempts to receive a message without blocking.
+ pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
+ let token = &mut Token::default();
+
+ if self.start_recv(token) {
+ unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
+ } else {
+ Err(TryRecvError::Empty)
+ }
+ }
+
+ /// Receives a message from the channel.
+ pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
+ let token = &mut Token::default();
+ loop {
+ if self.start_recv(token) {
+ let res = unsafe { self.read(token) };
+ return res.map_err(|_| RecvTimeoutError::Disconnected);
+ }
+
+ if let Some(d) = deadline {
+ if Instant::now() >= d {
+ return Err(RecvTimeoutError::Timeout);
+ }
+ }
+
+ Context::with(|cx| {
+ // Prepare for blocking until a sender wakes us up.
+ let oper = Operation::hook(token);
+ self.receivers.register(oper, cx);
+
+ // Has the channel become ready just now?
+ if !self.is_empty() || self.is_disconnected() {
+ let _ = cx.try_select(Selected::Aborted);
+ }
+
+ // Block the current thread.
+ let sel = cx.wait_until(deadline);
+
+ match sel {
+ Selected::Waiting => unreachable!(),
+ Selected::Aborted | Selected::Disconnected => {
+ self.receivers.unregister(oper).unwrap();
+ // If the channel was disconnected, we still have to check for remaining
+ // messages.
+ }
+ Selected::Operation(_) => {}
+ }
+ });
+ }
+ }
+
+ /// Returns the current number of messages inside the channel.
+ pub(crate) fn len(&self) -> usize {
+ loop {
+ // Load the tail, then load the head.
+ let tail = self.tail.load(Ordering::SeqCst);
+ let head = self.head.load(Ordering::SeqCst);
+
+ // If the tail didn't change, we've got consistent values to work with.
+ if self.tail.load(Ordering::SeqCst) == tail {
+ let hix = head & (self.mark_bit - 1);
+ let tix = tail & (self.mark_bit - 1);
+
+ return if hix < tix {
+ tix - hix
+ } else if hix > tix {
+ self.cap - hix + tix
+ } else if (tail & !self.mark_bit) == head {
+ 0
+ } else {
+ self.cap
+ };
+ }
+ }
+ }
+
+ /// Returns the capacity of the channel.
+ #[allow(clippy::unnecessary_wraps)] // This is intentional.
+ pub(crate) fn capacity(&self) -> Option<usize> {
+ Some(self.cap)
+ }
+
+ /// Disconnects the channel and wakes up all blocked senders and receivers.
+ ///
+ /// Returns `true` if this call disconnected the channel.
+ pub(crate) fn disconnect(&self) -> bool {
+ let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
+
+ if tail & self.mark_bit == 0 {
+ self.senders.disconnect();
+ self.receivers.disconnect();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Returns `true` if the channel is disconnected.
+ pub(crate) fn is_disconnected(&self) -> bool {
+ self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
+ }
+
+ /// Returns `true` if the channel is empty.
+ pub(crate) fn is_empty(&self) -> bool {
+ let head = self.head.load(Ordering::SeqCst);
+ let tail = self.tail.load(Ordering::SeqCst);
+
+ // Is the tail equal to the head?
+ //
+ // Note: If the head changes just before we load the tail, that means there was a moment
+ // when the channel was not empty, so it is safe to just return `false`.
+ (tail & !self.mark_bit) == head
+ }
+
+ /// Returns `true` if the channel is full.
+ pub(crate) fn is_full(&self) -> bool {
+ let tail = self.tail.load(Ordering::SeqCst);
+ let head = self.head.load(Ordering::SeqCst);
+
+ // Is the head lagging one lap behind tail?
+ //
+ // Note: If the tail changes just before we load the head, that means there was a moment
+ // when the channel was not full, so it is safe to just return `false`.
+ head.wrapping_add(self.one_lap) == tail & !self.mark_bit
+ }
+}
+
+impl<T> Drop for Channel<T> {
+ fn drop(&mut self) {
+ // Get the index of the head.
+ let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1);
+
+ // Loop over all slots that hold a message and drop them.
+ for i in 0..self.len() {
+ // Compute the index of the next slot holding a message.
+ let index = if hix + i < self.cap { hix + i } else { hix + i - self.cap };
+
+ unsafe {
+ debug_assert!(index < self.buffer.len());
+ let slot = self.buffer.get_unchecked_mut(index);
+ let msg = &mut *slot.msg.get();
+ msg.as_mut_ptr().drop_in_place();
+ }
+ }
+ }
+}
diff --git a/library/std/src/sync/mpmc/context.rs b/library/std/src/sync/mpmc/context.rs
new file mode 100644
index 000000000..bbfc6ce00
--- /dev/null
+++ b/library/std/src/sync/mpmc/context.rs
@@ -0,0 +1,155 @@
+//! Thread-local channel context.
+
+use super::select::Selected;
+use super::waker::current_thread_id;
+
+use crate::cell::Cell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::sync::Arc;
+use crate::thread::{self, Thread};
+use crate::time::Instant;
+
+/// Thread-local context.
+#[derive(Debug, Clone)]
+pub struct Context {
+ inner: Arc<Inner>,
+}
+
+/// Inner representation of `Context`.
+#[derive(Debug)]
+struct Inner {
+ /// Selected operation.
+ select: AtomicUsize,
+
+ /// A slot into which another thread may store a pointer to its `Packet`.
+ packet: AtomicPtr<()>,
+
+ /// Thread handle.
+ thread: Thread,
+
+ /// Thread id.
+ thread_id: usize,
+}
+
+impl Context {
+ /// Creates a new context for the duration of the closure.
+ #[inline]
+ pub fn with<F, R>(f: F) -> R
+ where
+ F: FnOnce(&Context) -> R,
+ {
+ thread_local! {
+ /// Cached thread-local context.
+ static CONTEXT: Cell<Option<Context>> = Cell::new(Some(Context::new()));
+ }
+
+ let mut f = Some(f);
+ let mut f = |cx: &Context| -> R {
+ let f = f.take().unwrap();
+ f(cx)
+ };
+
+ CONTEXT
+ .try_with(|cell| match cell.take() {
+ None => f(&Context::new()),
+ Some(cx) => {
+ cx.reset();
+ let res = f(&cx);
+ cell.set(Some(cx));
+ res
+ }
+ })
+ .unwrap_or_else(|_| f(&Context::new()))
+ }
+
+ /// Creates a new `Context`.
+ #[cold]
+ fn new() -> Context {
+ Context {
+ inner: Arc::new(Inner {
+ select: AtomicUsize::new(Selected::Waiting.into()),
+ packet: AtomicPtr::new(ptr::null_mut()),
+ thread: thread::current(),
+ thread_id: current_thread_id(),
+ }),
+ }
+ }
+
+ /// Resets `select` and `packet`.
+ #[inline]
+ fn reset(&self) {
+ self.inner.select.store(Selected::Waiting.into(), Ordering::Release);
+ self.inner.packet.store(ptr::null_mut(), Ordering::Release);
+ }
+
+ /// Attempts to select an operation.
+ ///
+ /// On failure, the previously selected operation is returned.
+ #[inline]
+ pub fn try_select(&self, select: Selected) -> Result<(), Selected> {
+ self.inner
+ .select
+ .compare_exchange(
+ Selected::Waiting.into(),
+ select.into(),
+ Ordering::AcqRel,
+ Ordering::Acquire,
+ )
+ .map(|_| ())
+ .map_err(|e| e.into())
+ }
+
+ /// Stores a packet.
+ ///
+ /// This method must be called after `try_select` succeeds and there is a packet to provide.
+ #[inline]
+ pub fn store_packet(&self, packet: *mut ()) {
+ if !packet.is_null() {
+ self.inner.packet.store(packet, Ordering::Release);
+ }
+ }
+
+ /// Waits until an operation is selected and returns it.
+ ///
+ /// If the deadline is reached, `Selected::Aborted` will be selected.
+ #[inline]
+ pub fn wait_until(&self, deadline: Option<Instant>) -> Selected {
+ loop {
+ // Check whether an operation has been selected.
+ let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
+ if sel != Selected::Waiting {
+ return sel;
+ }
+
+ // If there's a deadline, park the current thread until the deadline is reached.
+ if let Some(end) = deadline {
+ let now = Instant::now();
+
+ if now < end {
+ thread::park_timeout(end - now);
+ } else {
+ // The deadline has been reached. Try aborting select.
+ return match self.try_select(Selected::Aborted) {
+ Ok(()) => Selected::Aborted,
+ Err(s) => s,
+ };
+ }
+ } else {
+ thread::park();
+ }
+ }
+ }
+
+ /// Unparks the thread this context belongs to.
+ #[inline]
+ pub fn unpark(&self) {
+ self.inner.thread.unpark();
+ }
+
+ /// Returns the id of the thread this context belongs to.
+ #[inline]
+ pub fn thread_id(&self) -> usize {
+ self.inner.thread_id
+ }
+}
diff --git a/library/std/src/sync/mpmc/counter.rs b/library/std/src/sync/mpmc/counter.rs
new file mode 100644
index 000000000..a5a6bdc67
--- /dev/null
+++ b/library/std/src/sync/mpmc/counter.rs
@@ -0,0 +1,137 @@
+use crate::ops;
+use crate::process;
+use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+
+/// Reference counter internals.
+struct Counter<C> {
+ /// The number of senders associated with the channel.
+ senders: AtomicUsize,
+
+ /// The number of receivers associated with the channel.
+ receivers: AtomicUsize,
+
+ /// Set to `true` if the last sender or the last receiver reference deallocates the channel.
+ destroy: AtomicBool,
+
+ /// The internal channel.
+ chan: C,
+}
+
+/// Wraps a channel into the reference counter.
+pub(crate) fn new<C>(chan: C) -> (Sender<C>, Receiver<C>) {
+ let counter = Box::into_raw(Box::new(Counter {
+ senders: AtomicUsize::new(1),
+ receivers: AtomicUsize::new(1),
+ destroy: AtomicBool::new(false),
+ chan,
+ }));
+ let s = Sender { counter };
+ let r = Receiver { counter };
+ (s, r)
+}
+
+/// The sending side.
+pub(crate) struct Sender<C> {
+ counter: *mut Counter<C>,
+}
+
+impl<C> Sender<C> {
+ /// Returns the internal `Counter`.
+ fn counter(&self) -> &Counter<C> {
+ unsafe { &*self.counter }
+ }
+
+ /// Acquires another sender reference.
+ pub(crate) fn acquire(&self) -> Sender<C> {
+ let count = self.counter().senders.fetch_add(1, Ordering::Relaxed);
+
+ // Cloning senders and calling `mem::forget` on the clones could potentially overflow the
+ // counter. It's very difficult to recover sensibly from such degenerate scenarios so we
+ // just abort when the count becomes very large.
+ if count > isize::MAX as usize {
+ process::abort();
+ }
+
+ Sender { counter: self.counter }
+ }
+
+ /// Releases the sender reference.
+ ///
+ /// Function `disconnect` will be called if this is the last sender reference.
+ pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
+ if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 {
+ disconnect(&self.counter().chan);
+
+ if self.counter().destroy.swap(true, Ordering::AcqRel) {
+ drop(Box::from_raw(self.counter));
+ }
+ }
+ }
+}
+
+impl<C> ops::Deref for Sender<C> {
+ type Target = C;
+
+ fn deref(&self) -> &C {
+ &self.counter().chan
+ }
+}
+
+impl<C> PartialEq for Sender<C> {
+ fn eq(&self, other: &Sender<C>) -> bool {
+ self.counter == other.counter
+ }
+}
+
+/// The receiving side.
+pub(crate) struct Receiver<C> {
+ counter: *mut Counter<C>,
+}
+
+impl<C> Receiver<C> {
+ /// Returns the internal `Counter`.
+ fn counter(&self) -> &Counter<C> {
+ unsafe { &*self.counter }
+ }
+
+ /// Acquires another receiver reference.
+ pub(crate) fn acquire(&self) -> Receiver<C> {
+ let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed);
+
+ // Cloning receivers and calling `mem::forget` on the clones could potentially overflow the
+ // counter. It's very difficult to recover sensibly from such degenerate scenarios so we
+ // just abort when the count becomes very large.
+ if count > isize::MAX as usize {
+ process::abort();
+ }
+
+ Receiver { counter: self.counter }
+ }
+
+ /// Releases the receiver reference.
+ ///
+ /// Function `disconnect` will be called if this is the last receiver reference.
+ pub(crate) unsafe fn release<F: FnOnce(&C) -> bool>(&self, disconnect: F) {
+ if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 {
+ disconnect(&self.counter().chan);
+
+ if self.counter().destroy.swap(true, Ordering::AcqRel) {
+ drop(Box::from_raw(self.counter));
+ }
+ }
+ }
+}
+
+impl<C> ops::Deref for Receiver<C> {
+ type Target = C;
+
+ fn deref(&self) -> &C {
+ &self.counter().chan
+ }
+}
+
+impl<C> PartialEq for Receiver<C> {
+ fn eq(&self, other: &Receiver<C>) -> bool {
+ self.counter == other.counter
+ }
+}
diff --git a/library/std/src/sync/mpmc/error.rs b/library/std/src/sync/mpmc/error.rs
new file mode 100644
index 000000000..1b8a1f387
--- /dev/null
+++ b/library/std/src/sync/mpmc/error.rs
@@ -0,0 +1,46 @@
+use crate::error;
+use crate::fmt;
+
+pub use crate::sync::mpsc::{RecvError, RecvTimeoutError, SendError, TryRecvError, TrySendError};
+
+/// An error returned from the [`send_timeout`] method.
+///
+/// The error contains the message being sent so it can be recovered.
+///
+/// [`send_timeout`]: super::Sender::send_timeout
+#[derive(PartialEq, Eq, Clone, Copy)]
+pub enum SendTimeoutError<T> {
+ /// The message could not be sent because the channel is full and the operation timed out.
+ ///
+ /// If this is a zero-capacity channel, then the error indicates that there was no receiver
+ /// available to receive the message and the operation timed out.
+ Timeout(T),
+
+ /// The message could not be sent because the channel is disconnected.
+ Disconnected(T),
+}
+
+impl<T> fmt::Debug for SendTimeoutError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "SendTimeoutError(..)".fmt(f)
+ }
+}
+
+impl<T> fmt::Display for SendTimeoutError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f),
+ SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
+ }
+ }
+}
+
+impl<T: Send> error::Error for SendTimeoutError<T> {}
+
+impl<T> From<SendError<T>> for SendTimeoutError<T> {
+ fn from(err: SendError<T>) -> SendTimeoutError<T> {
+ match err {
+ SendError(e) => SendTimeoutError::Disconnected(e),
+ }
+ }
+}
diff --git a/library/std/src/sync/mpmc/list.rs b/library/std/src/sync/mpmc/list.rs
new file mode 100644
index 000000000..ec6c0726a
--- /dev/null
+++ b/library/std/src/sync/mpmc/list.rs
@@ -0,0 +1,638 @@
+//! Unbounded channel implemented as a linked list.
+
+use super::context::Context;
+use super::error::*;
+use super::select::{Operation, Selected, Token};
+use super::utils::{Backoff, CachePadded};
+use super::waker::SyncWaker;
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomData;
+use crate::mem::MaybeUninit;
+use crate::ptr;
+use crate::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::time::Instant;
+
+// Bits indicating the state of a slot:
+// * If a message has been written into the slot, `WRITE` is set.
+// * If a message has been read from the slot, `READ` is set.
+// * If the block is being destroyed, `DESTROY` is set.
+const WRITE: usize = 1;
+const READ: usize = 2;
+const DESTROY: usize = 4;
+
+// Each block covers one "lap" of indices.
+const LAP: usize = 32;
+// The maximum number of messages a block can hold.
+const BLOCK_CAP: usize = LAP - 1;
+// How many lower bits are reserved for metadata.
+const SHIFT: usize = 1;
+// Has two different purposes:
+// * If set in head, indicates that the block is not the last one.
+// * If set in tail, indicates that the channel is disconnected.
+const MARK_BIT: usize = 1;
+
+/// A slot in a block.
+struct Slot<T> {
+ /// The message.
+ msg: UnsafeCell<MaybeUninit<T>>,
+
+ /// The state of the slot.
+ state: AtomicUsize,
+}
+
+impl<T> Slot<T> {
+ /// Waits until a message is written into the slot.
+ fn wait_write(&self) {
+ let backoff = Backoff::new();
+ while self.state.load(Ordering::Acquire) & WRITE == 0 {
+ backoff.spin_heavy();
+ }
+ }
+}
+
+/// A block in a linked list.
+///
+/// Each block in the list can hold up to `BLOCK_CAP` messages.
+struct Block<T> {
+ /// The next block in the linked list.
+ next: AtomicPtr<Block<T>>,
+
+ /// Slots for messages.
+ slots: [Slot<T>; BLOCK_CAP],
+}
+
+impl<T> Block<T> {
+ /// Creates an empty block.
+ fn new() -> Block<T> {
+ // SAFETY: This is safe because:
+ // [1] `Block::next` (AtomicPtr) may be safely zero initialized.
+ // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].
+ // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it
+ // holds a MaybeUninit.
+ // [4] `Slot::state` (AtomicUsize) may be safely zero initialized.
+ unsafe { MaybeUninit::zeroed().assume_init() }
+ }
+
+ /// Waits until the next pointer is set.
+ fn wait_next(&self) -> *mut Block<T> {
+ let backoff = Backoff::new();
+ loop {
+ let next = self.next.load(Ordering::Acquire);
+ if !next.is_null() {
+ return next;
+ }
+ backoff.spin_heavy();
+ }
+ }
+
+ /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
+ unsafe fn destroy(this: *mut Block<T>, start: usize) {
+ // It is not necessary to set the `DESTROY` bit in the last slot because that slot has
+ // begun destruction of the block.
+ for i in start..BLOCK_CAP - 1 {
+ let slot = (*this).slots.get_unchecked(i);
+
+ // Mark the `DESTROY` bit if a thread is still using the slot.
+ if slot.state.load(Ordering::Acquire) & READ == 0
+ && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
+ {
+ // If a thread is still using the slot, it will continue destruction of the block.
+ return;
+ }
+ }
+
+ // No thread is using the block, now it is safe to destroy it.
+ drop(Box::from_raw(this));
+ }
+}
+
+/// A position in a channel.
+#[derive(Debug)]
+struct Position<T> {
+ /// The index in the channel.
+ index: AtomicUsize,
+
+ /// The block in the linked list.
+ block: AtomicPtr<Block<T>>,
+}
+
+/// The token type for the list flavor.
+#[derive(Debug)]
+pub(crate) struct ListToken {
+ /// The block of slots.
+ block: *const u8,
+
+ /// The offset into the block.
+ offset: usize,
+}
+
+impl Default for ListToken {
+ #[inline]
+ fn default() -> Self {
+ ListToken { block: ptr::null(), offset: 0 }
+ }
+}
+
+/// Unbounded channel implemented as a linked list.
+///
+/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are
+/// represented as numbers of type `usize` and wrap on overflow.
+///
+/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and
+/// improve cache efficiency.
+pub(crate) struct Channel<T> {
+ /// The head of the channel.
+ head: CachePadded<Position<T>>,
+
+ /// The tail of the channel.
+ tail: CachePadded<Position<T>>,
+
+ /// Receivers waiting while the channel is empty and not disconnected.
+ receivers: SyncWaker,
+
+ /// Indicates that dropping a `Channel<T>` may drop messages of type `T`.
+ _marker: PhantomData<T>,
+}
+
+impl<T> Channel<T> {
+ /// Creates a new unbounded channel.
+ pub(crate) fn new() -> Self {
+ Channel {
+ head: CachePadded::new(Position {
+ block: AtomicPtr::new(ptr::null_mut()),
+ index: AtomicUsize::new(0),
+ }),
+ tail: CachePadded::new(Position {
+ block: AtomicPtr::new(ptr::null_mut()),
+ index: AtomicUsize::new(0),
+ }),
+ receivers: SyncWaker::new(),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Attempts to reserve a slot for sending a message.
+ fn start_send(&self, token: &mut Token) -> bool {
+ let backoff = Backoff::new();
+ let mut tail = self.tail.index.load(Ordering::Acquire);
+ let mut block = self.tail.block.load(Ordering::Acquire);
+ let mut next_block = None;
+
+ loop {
+ // Check if the channel is disconnected.
+ if tail & MARK_BIT != 0 {
+ token.list.block = ptr::null();
+ return true;
+ }
+
+ // Calculate the offset of the index into the block.
+ let offset = (tail >> SHIFT) % LAP;
+
+ // If we reached the end of the block, wait until the next one is installed.
+ if offset == BLOCK_CAP {
+ backoff.spin_heavy();
+ tail = self.tail.index.load(Ordering::Acquire);
+ block = self.tail.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ // If we're going to have to install the next block, allocate it in advance in order to
+ // make the wait for other threads as short as possible.
+ if offset + 1 == BLOCK_CAP && next_block.is_none() {
+ next_block = Some(Box::new(Block::<T>::new()));
+ }
+
+ // If this is the first message to be sent into the channel, we need to allocate the
+ // first block and install it.
+ if block.is_null() {
+ let new = Box::into_raw(Box::new(Block::<T>::new()));
+
+ if self
+ .tail
+ .block
+ .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
+ .is_ok()
+ {
+ self.head.block.store(new, Ordering::Release);
+ block = new;
+ } else {
+ next_block = unsafe { Some(Box::from_raw(new)) };
+ tail = self.tail.index.load(Ordering::Acquire);
+ block = self.tail.block.load(Ordering::Acquire);
+ continue;
+ }
+ }
+
+ let new_tail = tail + (1 << SHIFT);
+
+ // Try advancing the tail forward.
+ match self.tail.index.compare_exchange_weak(
+ tail,
+ new_tail,
+ Ordering::SeqCst,
+ Ordering::Acquire,
+ ) {
+ Ok(_) => unsafe {
+ // If we've reached the end of the block, install the next one.
+ if offset + 1 == BLOCK_CAP {
+ let next_block = Box::into_raw(next_block.unwrap());
+ self.tail.block.store(next_block, Ordering::Release);
+ self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
+ (*block).next.store(next_block, Ordering::Release);
+ }
+
+ token.list.block = block as *const u8;
+ token.list.offset = offset;
+ return true;
+ },
+ Err(_) => {
+ backoff.spin_light();
+ tail = self.tail.index.load(Ordering::Acquire);
+ block = self.tail.block.load(Ordering::Acquire);
+ }
+ }
+ }
+ }
+
+ /// Writes a message into the channel.
+ pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
+ // If there is no slot, the channel is disconnected.
+ if token.list.block.is_null() {
+ return Err(msg);
+ }
+
+ // Write the message into the slot.
+ let block = token.list.block as *mut Block<T>;
+ let offset = token.list.offset;
+ let slot = (*block).slots.get_unchecked(offset);
+ slot.msg.get().write(MaybeUninit::new(msg));
+ slot.state.fetch_or(WRITE, Ordering::Release);
+
+ // Wake a sleeping receiver.
+ self.receivers.notify();
+ Ok(())
+ }
+
+ /// Attempts to reserve a slot for receiving a message.
+ fn start_recv(&self, token: &mut Token) -> bool {
+ let backoff = Backoff::new();
+ let mut head = self.head.index.load(Ordering::Acquire);
+ let mut block = self.head.block.load(Ordering::Acquire);
+
+ loop {
+ // Calculate the offset of the index into the block.
+ let offset = (head >> SHIFT) % LAP;
+
+ // If we reached the end of the block, wait until the next one is installed.
+ if offset == BLOCK_CAP {
+ backoff.spin_heavy();
+ head = self.head.index.load(Ordering::Acquire);
+ block = self.head.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ let mut new_head = head + (1 << SHIFT);
+
+ if new_head & MARK_BIT == 0 {
+ atomic::fence(Ordering::SeqCst);
+ let tail = self.tail.index.load(Ordering::Relaxed);
+
+ // If the tail equals the head, that means the channel is empty.
+ if head >> SHIFT == tail >> SHIFT {
+ // If the channel is disconnected...
+ if tail & MARK_BIT != 0 {
+ // ...then receive an error.
+ token.list.block = ptr::null();
+ return true;
+ } else {
+ // Otherwise, the receive operation is not ready.
+ return false;
+ }
+ }
+
+ // If head and tail are not in the same block, set `MARK_BIT` in head.
+ if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
+ new_head |= MARK_BIT;
+ }
+ }
+
+ // The block can be null here only if the first message is being sent into the channel.
+ // In that case, just wait until it gets initialized.
+ if block.is_null() {
+ backoff.spin_heavy();
+ head = self.head.index.load(Ordering::Acquire);
+ block = self.head.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ // Try moving the head index forward.
+ match self.head.index.compare_exchange_weak(
+ head,
+ new_head,
+ Ordering::SeqCst,
+ Ordering::Acquire,
+ ) {
+ Ok(_) => unsafe {
+ // If we've reached the end of the block, move to the next one.
+ if offset + 1 == BLOCK_CAP {
+ let next = (*block).wait_next();
+ let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
+ if !(*next).next.load(Ordering::Relaxed).is_null() {
+ next_index |= MARK_BIT;
+ }
+
+ self.head.block.store(next, Ordering::Release);
+ self.head.index.store(next_index, Ordering::Release);
+ }
+
+ token.list.block = block as *const u8;
+ token.list.offset = offset;
+ return true;
+ },
+ Err(_) => {
+ backoff.spin_light();
+ head = self.head.index.load(Ordering::Acquire);
+ block = self.head.block.load(Ordering::Acquire);
+ }
+ }
+ }
+ }
+
+ /// Reads a message from the channel.
+ pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
+ if token.list.block.is_null() {
+ // The channel is disconnected.
+ return Err(());
+ }
+
+ // Read the message.
+ let block = token.list.block as *mut Block<T>;
+ let offset = token.list.offset;
+ let slot = (*block).slots.get_unchecked(offset);
+ slot.wait_write();
+ let msg = slot.msg.get().read().assume_init();
+
+ // Destroy the block if we've reached the end, or if another thread wanted to destroy but
+ // couldn't because we were busy reading from the slot.
+ if offset + 1 == BLOCK_CAP {
+ Block::destroy(block, 0);
+ } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
+ Block::destroy(block, offset + 1);
+ }
+
+ Ok(msg)
+ }
+
+ /// Attempts to send a message into the channel.
+ pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
+ self.send(msg, None).map_err(|err| match err {
+ SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg),
+ SendTimeoutError::Timeout(_) => unreachable!(),
+ })
+ }
+
+ /// Sends a message into the channel.
+ pub(crate) fn send(
+ &self,
+ msg: T,
+ _deadline: Option<Instant>,
+ ) -> Result<(), SendTimeoutError<T>> {
+ let token = &mut Token::default();
+ assert!(self.start_send(token));
+ unsafe { self.write(token, msg).map_err(SendTimeoutError::Disconnected) }
+ }
+
+ /// Attempts to receive a message without blocking.
+ pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
+ let token = &mut Token::default();
+
+ if self.start_recv(token) {
+ unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
+ } else {
+ Err(TryRecvError::Empty)
+ }
+ }
+
+ /// Receives a message from the channel.
+ pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
+ let token = &mut Token::default();
+ loop {
+ if self.start_recv(token) {
+ unsafe {
+ return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
+ }
+ }
+
+ if let Some(d) = deadline {
+ if Instant::now() >= d {
+ return Err(RecvTimeoutError::Timeout);
+ }
+ }
+
+ // Prepare for blocking until a sender wakes us up.
+ Context::with(|cx| {
+ let oper = Operation::hook(token);
+ self.receivers.register(oper, cx);
+
+ // Has the channel become ready just now?
+ if !self.is_empty() || self.is_disconnected() {
+ let _ = cx.try_select(Selected::Aborted);
+ }
+
+ // Block the current thread.
+ let sel = cx.wait_until(deadline);
+
+ match sel {
+ Selected::Waiting => unreachable!(),
+ Selected::Aborted | Selected::Disconnected => {
+ self.receivers.unregister(oper).unwrap();
+ // If the channel was disconnected, we still have to check for remaining
+ // messages.
+ }
+ Selected::Operation(_) => {}
+ }
+ });
+ }
+ }
+
+ /// Returns the current number of messages inside the channel.
+ pub(crate) fn len(&self) -> usize {
+ loop {
+ // Load the tail index, then load the head index.
+ let mut tail = self.tail.index.load(Ordering::SeqCst);
+ let mut head = self.head.index.load(Ordering::SeqCst);
+
+ // If the tail index didn't change, we've got consistent indices to work with.
+ if self.tail.index.load(Ordering::SeqCst) == tail {
+ // Erase the lower bits.
+ tail &= !((1 << SHIFT) - 1);
+ head &= !((1 << SHIFT) - 1);
+
+ // Fix up indices if they fall onto block ends.
+ if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
+ tail = tail.wrapping_add(1 << SHIFT);
+ }
+ if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
+ head = head.wrapping_add(1 << SHIFT);
+ }
+
+ // Rotate indices so that head falls into the first block.
+ let lap = (head >> SHIFT) / LAP;
+ tail = tail.wrapping_sub((lap * LAP) << SHIFT);
+ head = head.wrapping_sub((lap * LAP) << SHIFT);
+
+ // Remove the lower bits.
+ tail >>= SHIFT;
+ head >>= SHIFT;
+
+ // Return the difference minus the number of blocks between tail and head.
+ return tail - head - tail / LAP;
+ }
+ }
+ }
+
+ /// Returns the capacity of the channel.
+ pub(crate) fn capacity(&self) -> Option<usize> {
+ None
+ }
+
+ /// Disconnects senders and wakes up all blocked receivers.
+ ///
+ /// Returns `true` if this call disconnected the channel.
+ pub(crate) fn disconnect_senders(&self) -> bool {
+ let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
+
+ if tail & MARK_BIT == 0 {
+ self.receivers.disconnect();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Disconnects receivers.
+ ///
+ /// Returns `true` if this call disconnected the channel.
+ pub(crate) fn disconnect_receivers(&self) -> bool {
+ let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
+
+ if tail & MARK_BIT == 0 {
+ // If receivers are dropped first, discard all messages to free
+ // memory eagerly.
+ self.discard_all_messages();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Discards all messages.
+ ///
+ /// This method should only be called when all receivers are dropped.
+ fn discard_all_messages(&self) {
+ let backoff = Backoff::new();
+ let mut tail = self.tail.index.load(Ordering::Acquire);
+ loop {
+ let offset = (tail >> SHIFT) % LAP;
+ if offset != BLOCK_CAP {
+ break;
+ }
+
+ // New updates to tail will be rejected by MARK_BIT and aborted unless it's
+ // at boundary. We need to wait for the updates take affect otherwise there
+ // can be memory leaks.
+ backoff.spin_heavy();
+ tail = self.tail.index.load(Ordering::Acquire);
+ }
+
+ let mut head = self.head.index.load(Ordering::Acquire);
+ let mut block = self.head.block.load(Ordering::Acquire);
+
+ unsafe {
+ // Drop all messages between head and tail and deallocate the heap-allocated blocks.
+ while head >> SHIFT != tail >> SHIFT {
+ let offset = (head >> SHIFT) % LAP;
+
+ if offset < BLOCK_CAP {
+ // Drop the message in the slot.
+ let slot = (*block).slots.get_unchecked(offset);
+ slot.wait_write();
+ let p = &mut *slot.msg.get();
+ p.as_mut_ptr().drop_in_place();
+ } else {
+ (*block).wait_next();
+ // Deallocate the block and move to the next one.
+ let next = (*block).next.load(Ordering::Acquire);
+ drop(Box::from_raw(block));
+ block = next;
+ }
+
+ head = head.wrapping_add(1 << SHIFT);
+ }
+
+ // Deallocate the last remaining block.
+ if !block.is_null() {
+ drop(Box::from_raw(block));
+ }
+ }
+ head &= !MARK_BIT;
+ self.head.block.store(ptr::null_mut(), Ordering::Release);
+ self.head.index.store(head, Ordering::Release);
+ }
+
+ /// Returns `true` if the channel is disconnected.
+ pub(crate) fn is_disconnected(&self) -> bool {
+ self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
+ }
+
+ /// Returns `true` if the channel is empty.
+ pub(crate) fn is_empty(&self) -> bool {
+ let head = self.head.index.load(Ordering::SeqCst);
+ let tail = self.tail.index.load(Ordering::SeqCst);
+ head >> SHIFT == tail >> SHIFT
+ }
+
+ /// Returns `true` if the channel is full.
+ pub(crate) fn is_full(&self) -> bool {
+ false
+ }
+}
+
+impl<T> Drop for Channel<T> {
+ fn drop(&mut self) {
+ let mut head = self.head.index.load(Ordering::Relaxed);
+ let mut tail = self.tail.index.load(Ordering::Relaxed);
+ let mut block = self.head.block.load(Ordering::Relaxed);
+
+ // Erase the lower bits.
+ head &= !((1 << SHIFT) - 1);
+ tail &= !((1 << SHIFT) - 1);
+
+ unsafe {
+ // Drop all messages between head and tail and deallocate the heap-allocated blocks.
+ while head != tail {
+ let offset = (head >> SHIFT) % LAP;
+
+ if offset < BLOCK_CAP {
+ // Drop the message in the slot.
+ let slot = (*block).slots.get_unchecked(offset);
+ let p = &mut *slot.msg.get();
+ p.as_mut_ptr().drop_in_place();
+ } else {
+ // Deallocate the block and move to the next one.
+ let next = (*block).next.load(Ordering::Relaxed);
+ drop(Box::from_raw(block));
+ block = next;
+ }
+
+ head = head.wrapping_add(1 << SHIFT);
+ }
+
+ // Deallocate the last remaining block.
+ if !block.is_null() {
+ drop(Box::from_raw(block));
+ }
+ }
+ }
+}
diff --git a/library/std/src/sync/mpmc/mod.rs b/library/std/src/sync/mpmc/mod.rs
new file mode 100644
index 000000000..7a602cecd
--- /dev/null
+++ b/library/std/src/sync/mpmc/mod.rs
@@ -0,0 +1,430 @@
+//! Multi-producer multi-consumer channels.
+
+// This module is not currently exposed publicly, but is used
+// as the implementation for the channels in `sync::mpsc`. The
+// implementation comes from the crossbeam-channel crate:
+//
+// Copyright (c) 2019 The Crossbeam Project Developers
+//
+// Permission is hereby granted, free of charge, to any
+// person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the
+// Software without restriction, including without
+// limitation the rights to use, copy, modify, merge,
+// publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following
+// conditions:
+//
+// The above copyright notice and this permission notice
+// shall be included in all copies or substantial portions
+// of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+// DEALINGS IN THE SOFTWARE.
+
+mod array;
+mod context;
+mod counter;
+mod error;
+mod list;
+mod select;
+mod utils;
+mod waker;
+mod zero;
+
+use crate::fmt;
+use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::time::{Duration, Instant};
+pub use error::*;
+
+/// Creates a channel of unbounded capacity.
+///
+/// This channel has a growable buffer that can hold any number of messages at a time.
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let (s, r) = counter::new(list::Channel::new());
+ let s = Sender { flavor: SenderFlavor::List(s) };
+ let r = Receiver { flavor: ReceiverFlavor::List(r) };
+ (s, r)
+}
+
+/// Creates a channel of bounded capacity.
+///
+/// This channel has a buffer that can hold at most `cap` messages at a time.
+///
+/// A special case is zero-capacity channel, which cannot hold any messages. Instead, send and
+/// receive operations must appear at the same time in order to pair up and pass the message over.
+pub fn sync_channel<T>(cap: usize) -> (Sender<T>, Receiver<T>) {
+ if cap == 0 {
+ let (s, r) = counter::new(zero::Channel::new());
+ let s = Sender { flavor: SenderFlavor::Zero(s) };
+ let r = Receiver { flavor: ReceiverFlavor::Zero(r) };
+ (s, r)
+ } else {
+ let (s, r) = counter::new(array::Channel::with_capacity(cap));
+ let s = Sender { flavor: SenderFlavor::Array(s) };
+ let r = Receiver { flavor: ReceiverFlavor::Array(r) };
+ (s, r)
+ }
+}
+
+/// The sending side of a channel.
+pub struct Sender<T> {
+ flavor: SenderFlavor<T>,
+}
+
+/// Sender flavors.
+enum SenderFlavor<T> {
+ /// Bounded channel based on a preallocated array.
+ Array(counter::Sender<array::Channel<T>>),
+
+ /// Unbounded channel implemented as a linked list.
+ List(counter::Sender<list::Channel<T>>),
+
+ /// Zero-capacity channel.
+ Zero(counter::Sender<zero::Channel<T>>),
+}
+
+unsafe impl<T: Send> Send for Sender<T> {}
+unsafe impl<T: Send> Sync for Sender<T> {}
+
+impl<T> UnwindSafe for Sender<T> {}
+impl<T> RefUnwindSafe for Sender<T> {}
+
+impl<T> Sender<T> {
+ /// Attempts to send a message into the channel without blocking.
+ ///
+ /// This method will either send a message into the channel immediately or return an error if
+ /// the channel is full or disconnected. The returned error contains the original message.
+ ///
+ /// If called on a zero-capacity channel, this method will send the message only if there
+ /// happens to be a receive operation on the other side of the channel at the same time.
+ pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.try_send(msg),
+ SenderFlavor::List(chan) => chan.try_send(msg),
+ SenderFlavor::Zero(chan) => chan.try_send(msg),
+ }
+ }
+
+ /// Blocks the current thread until a message is sent or the channel is disconnected.
+ ///
+ /// If the channel is full and not disconnected, this call will block until the send operation
+ /// can proceed. If the channel becomes disconnected, this call will wake up and return an
+ /// error. The returned error contains the original message.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a receive operation to
+ /// appear on the other side of the channel.
+ pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.send(msg, None),
+ SenderFlavor::List(chan) => chan.send(msg, None),
+ SenderFlavor::Zero(chan) => chan.send(msg, None),
+ }
+ .map_err(|err| match err {
+ SendTimeoutError::Disconnected(msg) => SendError(msg),
+ SendTimeoutError::Timeout(_) => unreachable!(),
+ })
+ }
+}
+
+// The methods below are not used by `sync::mpsc`, but
+// are useful and we'll likely want to expose them
+// eventually
+#[allow(unused)]
+impl<T> Sender<T> {
+ /// Waits for a message to be sent into the channel, but only for a limited time.
+ ///
+ /// If the channel is full and not disconnected, this call will block until the send operation
+ /// can proceed or the operation times out. If the channel becomes disconnected, this call will
+ /// wake up and return an error. The returned error contains the original message.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a receive operation to
+ /// appear on the other side of the channel.
+ pub fn send_timeout(&self, msg: T, timeout: Duration) -> Result<(), SendTimeoutError<T>> {
+ match Instant::now().checked_add(timeout) {
+ Some(deadline) => self.send_deadline(msg, deadline),
+ // So far in the future that it's practically the same as waiting indefinitely.
+ None => self.send(msg).map_err(SendTimeoutError::from),
+ }
+ }
+
+ /// Waits for a message to be sent into the channel, but only until a given deadline.
+ ///
+ /// If the channel is full and not disconnected, this call will block until the send operation
+ /// can proceed or the operation times out. If the channel becomes disconnected, this call will
+ /// wake up and return an error. The returned error contains the original message.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a receive operation to
+ /// appear on the other side of the channel.
+ pub fn send_deadline(&self, msg: T, deadline: Instant) -> Result<(), SendTimeoutError<T>> {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.send(msg, Some(deadline)),
+ SenderFlavor::List(chan) => chan.send(msg, Some(deadline)),
+ SenderFlavor::Zero(chan) => chan.send(msg, Some(deadline)),
+ }
+ }
+
+ /// Returns `true` if the channel is empty.
+ ///
+ /// Note: Zero-capacity channels are always empty.
+ pub fn is_empty(&self) -> bool {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.is_empty(),
+ SenderFlavor::List(chan) => chan.is_empty(),
+ SenderFlavor::Zero(chan) => chan.is_empty(),
+ }
+ }
+
+ /// Returns `true` if the channel is full.
+ ///
+ /// Note: Zero-capacity channels are always full.
+ pub fn is_full(&self) -> bool {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.is_full(),
+ SenderFlavor::List(chan) => chan.is_full(),
+ SenderFlavor::Zero(chan) => chan.is_full(),
+ }
+ }
+
+ /// Returns the number of messages in the channel.
+ pub fn len(&self) -> usize {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.len(),
+ SenderFlavor::List(chan) => chan.len(),
+ SenderFlavor::Zero(chan) => chan.len(),
+ }
+ }
+
+ /// If the channel is bounded, returns its capacity.
+ pub fn capacity(&self) -> Option<usize> {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.capacity(),
+ SenderFlavor::List(chan) => chan.capacity(),
+ SenderFlavor::Zero(chan) => chan.capacity(),
+ }
+ }
+
+ /// Returns `true` if senders belong to the same channel.
+ pub fn same_channel(&self, other: &Sender<T>) -> bool {
+ match (&self.flavor, &other.flavor) {
+ (SenderFlavor::Array(ref a), SenderFlavor::Array(ref b)) => a == b,
+ (SenderFlavor::List(ref a), SenderFlavor::List(ref b)) => a == b,
+ (SenderFlavor::Zero(ref a), SenderFlavor::Zero(ref b)) => a == b,
+ _ => false,
+ }
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ unsafe {
+ match &self.flavor {
+ SenderFlavor::Array(chan) => chan.release(|c| c.disconnect()),
+ SenderFlavor::List(chan) => chan.release(|c| c.disconnect_senders()),
+ SenderFlavor::Zero(chan) => chan.release(|c| c.disconnect()),
+ }
+ }
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Self {
+ let flavor = match &self.flavor {
+ SenderFlavor::Array(chan) => SenderFlavor::Array(chan.acquire()),
+ SenderFlavor::List(chan) => SenderFlavor::List(chan.acquire()),
+ SenderFlavor::Zero(chan) => SenderFlavor::Zero(chan.acquire()),
+ };
+
+ Sender { flavor }
+ }
+}
+
+impl<T> fmt::Debug for Sender<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Sender { .. }")
+ }
+}
+
+/// The receiving side of a channel.
+pub struct Receiver<T> {
+ flavor: ReceiverFlavor<T>,
+}
+
+/// Receiver flavors.
+enum ReceiverFlavor<T> {
+ /// Bounded channel based on a preallocated array.
+ Array(counter::Receiver<array::Channel<T>>),
+
+ /// Unbounded channel implemented as a linked list.
+ List(counter::Receiver<list::Channel<T>>),
+
+ /// Zero-capacity channel.
+ Zero(counter::Receiver<zero::Channel<T>>),
+}
+
+unsafe impl<T: Send> Send for Receiver<T> {}
+unsafe impl<T: Send> Sync for Receiver<T> {}
+
+impl<T> UnwindSafe for Receiver<T> {}
+impl<T> RefUnwindSafe for Receiver<T> {}
+
+impl<T> Receiver<T> {
+ /// Attempts to receive a message from the channel without blocking.
+ ///
+ /// This method will either receive a message from the channel immediately or return an error
+ /// if the channel is empty.
+ ///
+ /// If called on a zero-capacity channel, this method will receive a message only if there
+ /// happens to be a send operation on the other side of the channel at the same time.
+ pub fn try_recv(&self) -> Result<T, TryRecvError> {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.try_recv(),
+ ReceiverFlavor::List(chan) => chan.try_recv(),
+ ReceiverFlavor::Zero(chan) => chan.try_recv(),
+ }
+ }
+
+ /// Blocks the current thread until a message is received or the channel is empty and
+ /// disconnected.
+ ///
+ /// If the channel is empty and not disconnected, this call will block until the receive
+ /// operation can proceed. If the channel is empty and becomes disconnected, this call will
+ /// wake up and return an error.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a send operation to appear
+ /// on the other side of the channel.
+ pub fn recv(&self) -> Result<T, RecvError> {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.recv(None),
+ ReceiverFlavor::List(chan) => chan.recv(None),
+ ReceiverFlavor::Zero(chan) => chan.recv(None),
+ }
+ .map_err(|_| RecvError)
+ }
+
+ /// Waits for a message to be received from the channel, but only for a limited time.
+ ///
+ /// If the channel is empty and not disconnected, this call will block until the receive
+ /// operation can proceed or the operation times out. If the channel is empty and becomes
+ /// disconnected, this call will wake up and return an error.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a send operation to appear
+ /// on the other side of the channel.
+ pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> {
+ match Instant::now().checked_add(timeout) {
+ Some(deadline) => self.recv_deadline(deadline),
+ // So far in the future that it's practically the same as waiting indefinitely.
+ None => self.recv().map_err(RecvTimeoutError::from),
+ }
+ }
+
+ /// Waits for a message to be received from the channel, but only for a limited time.
+ ///
+ /// If the channel is empty and not disconnected, this call will block until the receive
+ /// operation can proceed or the operation times out. If the channel is empty and becomes
+ /// disconnected, this call will wake up and return an error.
+ ///
+ /// If called on a zero-capacity channel, this method will wait for a send operation to appear
+ /// on the other side of the channel.
+ pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.recv(Some(deadline)),
+ ReceiverFlavor::List(chan) => chan.recv(Some(deadline)),
+ ReceiverFlavor::Zero(chan) => chan.recv(Some(deadline)),
+ }
+ }
+}
+
+// The methods below are not used by `sync::mpsc`, but
+// are useful and we'll likely want to expose them
+// eventually
+#[allow(unused)]
+impl<T> Receiver<T> {
+ /// Returns `true` if the channel is empty.
+ ///
+ /// Note: Zero-capacity channels are always empty.
+ pub fn is_empty(&self) -> bool {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.is_empty(),
+ ReceiverFlavor::List(chan) => chan.is_empty(),
+ ReceiverFlavor::Zero(chan) => chan.is_empty(),
+ }
+ }
+
+ /// Returns `true` if the channel is full.
+ ///
+ /// Note: Zero-capacity channels are always full.
+ pub fn is_full(&self) -> bool {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.is_full(),
+ ReceiverFlavor::List(chan) => chan.is_full(),
+ ReceiverFlavor::Zero(chan) => chan.is_full(),
+ }
+ }
+
+ /// Returns the number of messages in the channel.
+ pub fn len(&self) -> usize {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.len(),
+ ReceiverFlavor::List(chan) => chan.len(),
+ ReceiverFlavor::Zero(chan) => chan.len(),
+ }
+ }
+
+ /// If the channel is bounded, returns its capacity.
+ pub fn capacity(&self) -> Option<usize> {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.capacity(),
+ ReceiverFlavor::List(chan) => chan.capacity(),
+ ReceiverFlavor::Zero(chan) => chan.capacity(),
+ }
+ }
+
+ /// Returns `true` if receivers belong to the same channel.
+ pub fn same_channel(&self, other: &Receiver<T>) -> bool {
+ match (&self.flavor, &other.flavor) {
+ (ReceiverFlavor::Array(a), ReceiverFlavor::Array(b)) => a == b,
+ (ReceiverFlavor::List(a), ReceiverFlavor::List(b)) => a == b,
+ (ReceiverFlavor::Zero(a), ReceiverFlavor::Zero(b)) => a == b,
+ _ => false,
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ unsafe {
+ match &self.flavor {
+ ReceiverFlavor::Array(chan) => chan.release(|c| c.disconnect()),
+ ReceiverFlavor::List(chan) => chan.release(|c| c.disconnect_receivers()),
+ ReceiverFlavor::Zero(chan) => chan.release(|c| c.disconnect()),
+ }
+ }
+ }
+}
+
+impl<T> Clone for Receiver<T> {
+ fn clone(&self) -> Self {
+ let flavor = match &self.flavor {
+ ReceiverFlavor::Array(chan) => ReceiverFlavor::Array(chan.acquire()),
+ ReceiverFlavor::List(chan) => ReceiverFlavor::List(chan.acquire()),
+ ReceiverFlavor::Zero(chan) => ReceiverFlavor::Zero(chan.acquire()),
+ };
+
+ Receiver { flavor }
+ }
+}
+
+impl<T> fmt::Debug for Receiver<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Receiver { .. }")
+ }
+}
diff --git a/library/std/src/sync/mpmc/select.rs b/library/std/src/sync/mpmc/select.rs
new file mode 100644
index 000000000..56a83fee2
--- /dev/null
+++ b/library/std/src/sync/mpmc/select.rs
@@ -0,0 +1,71 @@
+/// Temporary data that gets initialized during a blocking operation, and is consumed by
+/// `read` or `write`.
+///
+/// Each field contains data associated with a specific channel flavor.
+#[derive(Debug, Default)]
+pub struct Token {
+ pub(crate) array: super::array::ArrayToken,
+ pub(crate) list: super::list::ListToken,
+ #[allow(dead_code)]
+ pub(crate) zero: super::zero::ZeroToken,
+}
+
+/// Identifier associated with an operation by a specific thread on a specific channel.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct Operation(usize);
+
+impl Operation {
+ /// Creates an operation identifier from a mutable reference.
+ ///
+ /// This function essentially just turns the address of the reference into a number. The
+ /// reference should point to a variable that is specific to the thread and the operation,
+ /// and is alive for the entire duration of a blocking operation.
+ #[inline]
+ pub fn hook<T>(r: &mut T) -> Operation {
+ let val = r as *mut T as usize;
+ // Make sure that the pointer address doesn't equal the numerical representation of
+ // `Selected::{Waiting, Aborted, Disconnected}`.
+ assert!(val > 2);
+ Operation(val)
+ }
+}
+
+/// Current state of a blocking operation.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum Selected {
+ /// Still waiting for an operation.
+ Waiting,
+
+ /// The attempt to block the current thread has been aborted.
+ Aborted,
+
+ /// An operation became ready because a channel is disconnected.
+ Disconnected,
+
+ /// An operation became ready because a message can be sent or received.
+ Operation(Operation),
+}
+
+impl From<usize> for Selected {
+ #[inline]
+ fn from(val: usize) -> Selected {
+ match val {
+ 0 => Selected::Waiting,
+ 1 => Selected::Aborted,
+ 2 => Selected::Disconnected,
+ oper => Selected::Operation(Operation(oper)),
+ }
+ }
+}
+
+impl Into<usize> for Selected {
+ #[inline]
+ fn into(self) -> usize {
+ match self {
+ Selected::Waiting => 0,
+ Selected::Aborted => 1,
+ Selected::Disconnected => 2,
+ Selected::Operation(Operation(val)) => val,
+ }
+ }
+}
diff --git a/library/std/src/sync/mpmc/utils.rs b/library/std/src/sync/mpmc/utils.rs
new file mode 100644
index 000000000..cfe42750d
--- /dev/null
+++ b/library/std/src/sync/mpmc/utils.rs
@@ -0,0 +1,143 @@
+use crate::cell::Cell;
+use crate::ops::{Deref, DerefMut};
+
+/// Pads and aligns a value to the length of a cache line.
+#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
+// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
+// lines at a time, so we have to align to 128 bytes rather than 64.
+//
+// Sources:
+// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
+// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
+//
+// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
+//
+// Sources:
+// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
+//
+// powerpc64 has 128-byte cache line size.
+//
+// Sources:
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
+#[cfg_attr(
+ any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64",),
+ repr(align(128))
+)]
+// arm, mips, mips64, and riscv64 have 32-byte cache line size.
+//
+// Sources:
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_riscv64.go#L7
+#[cfg_attr(
+ any(
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "mips64",
+ target_arch = "riscv64",
+ ),
+ repr(align(32))
+)]
+// s390x has 256-byte cache line size.
+//
+// Sources:
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
+#[cfg_attr(target_arch = "s390x", repr(align(256)))]
+// x86 and wasm have 64-byte cache line size.
+//
+// Sources:
+// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
+// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
+//
+// All others are assumed to have 64-byte cache line size.
+#[cfg_attr(
+ not(any(
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "mips64",
+ target_arch = "riscv64",
+ target_arch = "s390x",
+ )),
+ repr(align(64))
+)]
+pub struct CachePadded<T> {
+ value: T,
+}
+
+impl<T> CachePadded<T> {
+ /// Pads and aligns a value to the length of a cache line.
+ pub fn new(value: T) -> CachePadded<T> {
+ CachePadded::<T> { value }
+ }
+}
+
+impl<T> Deref for CachePadded<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.value
+ }
+}
+
+impl<T> DerefMut for CachePadded<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+}
+
+const SPIN_LIMIT: u32 = 6;
+
+/// Performs quadratic backoff in spin loops.
+pub struct Backoff {
+ step: Cell<u32>,
+}
+
+impl Backoff {
+ /// Creates a new `Backoff`.
+ pub fn new() -> Self {
+ Backoff { step: Cell::new(0) }
+ }
+
+ /// Backs off using lightweight spinning.
+ ///
+ /// This method should be used for:
+ /// - Retrying an operation because another thread made progress. i.e. on CAS failure.
+ /// - Waiting for an operation to complete by spinning optimistically for a few iterations
+ /// before falling back to parking the thread (see `Backoff::is_completed`).
+ #[inline]
+ pub fn spin_light(&self) {
+ let step = self.step.get().min(SPIN_LIMIT);
+ for _ in 0..step.pow(2) {
+ crate::hint::spin_loop();
+ }
+
+ self.step.set(self.step.get() + 1);
+ }
+
+ /// Backs off using heavyweight spinning.
+ ///
+ /// This method should be used in blocking loops where parking the thread is not an option.
+ #[inline]
+ pub fn spin_heavy(&self) {
+ if self.step.get() <= SPIN_LIMIT {
+ for _ in 0..self.step.get().pow(2) {
+ crate::hint::spin_loop()
+ }
+ } else {
+ crate::thread::yield_now();
+ }
+
+ self.step.set(self.step.get() + 1);
+ }
+
+ /// Returns `true` if quadratic backoff has completed and parking the thread is advised.
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ self.step.get() > SPIN_LIMIT
+ }
+}
diff --git a/library/std/src/sync/mpmc/waker.rs b/library/std/src/sync/mpmc/waker.rs
new file mode 100644
index 000000000..4912ca4f8
--- /dev/null
+++ b/library/std/src/sync/mpmc/waker.rs
@@ -0,0 +1,204 @@
+//! Waking mechanism for threads blocked on channel operations.
+
+use super::context::Context;
+use super::select::{Operation, Selected};
+
+use crate::ptr;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::Mutex;
+
+/// Represents a thread blocked on a specific channel operation.
+pub(crate) struct Entry {
+ /// The operation.
+ pub(crate) oper: Operation,
+
+ /// Optional packet.
+ pub(crate) packet: *mut (),
+
+ /// Context associated with the thread owning this operation.
+ pub(crate) cx: Context,
+}
+
+/// A queue of threads blocked on channel operations.
+///
+/// This data structure is used by threads to register blocking operations and get woken up once
+/// an operation becomes ready.
+pub(crate) struct Waker {
+ /// A list of select operations.
+ selectors: Vec<Entry>,
+
+ /// A list of operations waiting to be ready.
+ observers: Vec<Entry>,
+}
+
+impl Waker {
+ /// Creates a new `Waker`.
+ #[inline]
+ pub(crate) fn new() -> Self {
+ Waker { selectors: Vec::new(), observers: Vec::new() }
+ }
+
+ /// Registers a select operation.
+ #[inline]
+ pub(crate) fn register(&mut self, oper: Operation, cx: &Context) {
+ self.register_with_packet(oper, ptr::null_mut(), cx);
+ }
+
+ /// Registers a select operation and a packet.
+ #[inline]
+ pub(crate) fn register_with_packet(&mut self, oper: Operation, packet: *mut (), cx: &Context) {
+ self.selectors.push(Entry { oper, packet, cx: cx.clone() });
+ }
+
+ /// Unregisters a select operation.
+ #[inline]
+ pub(crate) fn unregister(&mut self, oper: Operation) -> Option<Entry> {
+ if let Some((i, _)) =
+ self.selectors.iter().enumerate().find(|&(_, entry)| entry.oper == oper)
+ {
+ let entry = self.selectors.remove(i);
+ Some(entry)
+ } else {
+ None
+ }
+ }
+
+ /// Attempts to find another thread's entry, select the operation, and wake it up.
+ #[inline]
+ pub(crate) fn try_select(&mut self) -> Option<Entry> {
+ self.selectors
+ .iter()
+ .position(|selector| {
+ // Does the entry belong to a different thread?
+ selector.cx.thread_id() != current_thread_id()
+ && selector // Try selecting this operation.
+ .cx
+ .try_select(Selected::Operation(selector.oper))
+ .is_ok()
+ && {
+ // Provide the packet.
+ selector.cx.store_packet(selector.packet);
+ // Wake the thread up.
+ selector.cx.unpark();
+ true
+ }
+ })
+ // Remove the entry from the queue to keep it clean and improve
+ // performance.
+ .map(|pos| self.selectors.remove(pos))
+ }
+
+ /// Notifies all operations waiting to be ready.
+ #[inline]
+ pub(crate) fn notify(&mut self) {
+ for entry in self.observers.drain(..) {
+ if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() {
+ entry.cx.unpark();
+ }
+ }
+ }
+
+ /// Notifies all registered operations that the channel is disconnected.
+ #[inline]
+ pub(crate) fn disconnect(&mut self) {
+ for entry in self.selectors.iter() {
+ if entry.cx.try_select(Selected::Disconnected).is_ok() {
+ // Wake the thread up.
+ //
+ // Here we don't remove the entry from the queue. Registered threads must
+ // unregister from the waker by themselves. They might also want to recover the
+ // packet value and destroy it, if necessary.
+ entry.cx.unpark();
+ }
+ }
+
+ self.notify();
+ }
+}
+
+impl Drop for Waker {
+ #[inline]
+ fn drop(&mut self) {
+ debug_assert_eq!(self.selectors.len(), 0);
+ debug_assert_eq!(self.observers.len(), 0);
+ }
+}
+
+/// A waker that can be shared among threads without locking.
+///
+/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization.
+pub(crate) struct SyncWaker {
+ /// The inner `Waker`.
+ inner: Mutex<Waker>,
+
+ /// `true` if the waker is empty.
+ is_empty: AtomicBool,
+}
+
+impl SyncWaker {
+ /// Creates a new `SyncWaker`.
+ #[inline]
+ pub(crate) fn new() -> Self {
+ SyncWaker { inner: Mutex::new(Waker::new()), is_empty: AtomicBool::new(true) }
+ }
+
+ /// Registers the current thread with an operation.
+ #[inline]
+ pub(crate) fn register(&self, oper: Operation, cx: &Context) {
+ let mut inner = self.inner.lock().unwrap();
+ inner.register(oper, cx);
+ self.is_empty
+ .store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
+ }
+
+ /// Unregisters an operation previously registered by the current thread.
+ #[inline]
+ pub(crate) fn unregister(&self, oper: Operation) -> Option<Entry> {
+ let mut inner = self.inner.lock().unwrap();
+ let entry = inner.unregister(oper);
+ self.is_empty
+ .store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
+ entry
+ }
+
+ /// Attempts to find one thread (not the current one), select its operation, and wake it up.
+ #[inline]
+ pub(crate) fn notify(&self) {
+ if !self.is_empty.load(Ordering::SeqCst) {
+ let mut inner = self.inner.lock().unwrap();
+ if !self.is_empty.load(Ordering::SeqCst) {
+ inner.try_select();
+ inner.notify();
+ self.is_empty.store(
+ inner.selectors.is_empty() && inner.observers.is_empty(),
+ Ordering::SeqCst,
+ );
+ }
+ }
+ }
+
+ /// Notifies all threads that the channel is disconnected.
+ #[inline]
+ pub(crate) fn disconnect(&self) {
+ let mut inner = self.inner.lock().unwrap();
+ inner.disconnect();
+ self.is_empty
+ .store(inner.selectors.is_empty() && inner.observers.is_empty(), Ordering::SeqCst);
+ }
+}
+
+impl Drop for SyncWaker {
+ #[inline]
+ fn drop(&mut self) {
+ debug_assert!(self.is_empty.load(Ordering::SeqCst));
+ }
+}
+
+/// Returns a unique id for the current thread.
+#[inline]
+pub fn current_thread_id() -> usize {
+ // `u8` is not drop so this variable will be available during thread destruction,
+ // whereas `thread::current()` would not be
+ thread_local! { static DUMMY: u8 = 0 }
+ DUMMY.with(|x| (x as *const u8).addr())
+}
diff --git a/library/std/src/sync/mpmc/zero.rs b/library/std/src/sync/mpmc/zero.rs
new file mode 100644
index 000000000..33f768dcb
--- /dev/null
+++ b/library/std/src/sync/mpmc/zero.rs
@@ -0,0 +1,318 @@
+//! Zero-capacity channel.
+//!
+//! This kind of channel is also known as *rendezvous* channel.
+
+use super::context::Context;
+use super::error::*;
+use super::select::{Operation, Selected, Token};
+use super::utils::Backoff;
+use super::waker::Waker;
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomData;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::Mutex;
+use crate::time::Instant;
+use crate::{fmt, ptr};
+
+/// A pointer to a packet.
+pub(crate) struct ZeroToken(*mut ());
+
+impl Default for ZeroToken {
+ fn default() -> Self {
+ Self(ptr::null_mut())
+ }
+}
+
+impl fmt::Debug for ZeroToken {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&(self.0 as usize), f)
+ }
+}
+
+/// A slot for passing one message from a sender to a receiver.
+struct Packet<T> {
+ /// Equals `true` if the packet is allocated on the stack.
+ on_stack: bool,
+
+ /// Equals `true` once the packet is ready for reading or writing.
+ ready: AtomicBool,
+
+ /// The message.
+ msg: UnsafeCell<Option<T>>,
+}
+
+impl<T> Packet<T> {
+ /// Creates an empty packet on the stack.
+ fn empty_on_stack() -> Packet<T> {
+ Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(None) }
+ }
+
+ /// Creates a packet on the stack, containing a message.
+ fn message_on_stack(msg: T) -> Packet<T> {
+ Packet { on_stack: true, ready: AtomicBool::new(false), msg: UnsafeCell::new(Some(msg)) }
+ }
+
+ /// Waits until the packet becomes ready for reading or writing.
+ fn wait_ready(&self) {
+ let backoff = Backoff::new();
+ while !self.ready.load(Ordering::Acquire) {
+ backoff.spin_heavy();
+ }
+ }
+}
+
+/// Inner representation of a zero-capacity channel.
+struct Inner {
+ /// Senders waiting to pair up with a receive operation.
+ senders: Waker,
+
+ /// Receivers waiting to pair up with a send operation.
+ receivers: Waker,
+
+ /// Equals `true` when the channel is disconnected.
+ is_disconnected: bool,
+}
+
+/// Zero-capacity channel.
+pub(crate) struct Channel<T> {
+ /// Inner representation of the channel.
+ inner: Mutex<Inner>,
+
+ /// Indicates that dropping a `Channel<T>` may drop values of type `T`.
+ _marker: PhantomData<T>,
+}
+
+impl<T> Channel<T> {
+ /// Constructs a new zero-capacity channel.
+ pub(crate) fn new() -> Self {
+ Channel {
+ inner: Mutex::new(Inner {
+ senders: Waker::new(),
+ receivers: Waker::new(),
+ is_disconnected: false,
+ }),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Writes a message into the packet.
+ pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
+ // If there is no packet, the channel is disconnected.
+ if token.zero.0.is_null() {
+ return Err(msg);
+ }
+
+ let packet = &*(token.zero.0 as *const Packet<T>);
+ packet.msg.get().write(Some(msg));
+ packet.ready.store(true, Ordering::Release);
+ Ok(())
+ }
+
+ /// Reads a message from the packet.
+ pub(crate) unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
+ // If there is no packet, the channel is disconnected.
+ if token.zero.0.is_null() {
+ return Err(());
+ }
+
+ let packet = &*(token.zero.0 as *const Packet<T>);
+
+ if packet.on_stack {
+ // The message has been in the packet from the beginning, so there is no need to wait
+ // for it. However, after reading the message, we need to set `ready` to `true` in
+ // order to signal that the packet can be destroyed.
+ let msg = packet.msg.get().replace(None).unwrap();
+ packet.ready.store(true, Ordering::Release);
+ Ok(msg)
+ } else {
+ // Wait until the message becomes available, then read it and destroy the
+ // heap-allocated packet.
+ packet.wait_ready();
+ let msg = packet.msg.get().replace(None).unwrap();
+ drop(Box::from_raw(token.zero.0 as *mut Packet<T>));
+ Ok(msg)
+ }
+ }
+
+ /// Attempts to send a message into the channel.
+ pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
+ let token = &mut Token::default();
+ let mut inner = self.inner.lock().unwrap();
+
+ // If there's a waiting receiver, pair up with it.
+ if let Some(operation) = inner.receivers.try_select() {
+ token.zero.0 = operation.packet;
+ drop(inner);
+ unsafe {
+ self.write(token, msg).ok().unwrap();
+ }
+ Ok(())
+ } else if inner.is_disconnected {
+ Err(TrySendError::Disconnected(msg))
+ } else {
+ Err(TrySendError::Full(msg))
+ }
+ }
+
+ /// Sends a message into the channel.
+ pub(crate) fn send(
+ &self,
+ msg: T,
+ deadline: Option<Instant>,
+ ) -> Result<(), SendTimeoutError<T>> {
+ let token = &mut Token::default();
+ let mut inner = self.inner.lock().unwrap();
+
+ // If there's a waiting receiver, pair up with it.
+ if let Some(operation) = inner.receivers.try_select() {
+ token.zero.0 = operation.packet;
+ drop(inner);
+ unsafe {
+ self.write(token, msg).ok().unwrap();
+ }
+ return Ok(());
+ }
+
+ if inner.is_disconnected {
+ return Err(SendTimeoutError::Disconnected(msg));
+ }
+
+ Context::with(|cx| {
+ // Prepare for blocking until a receiver wakes us up.
+ let oper = Operation::hook(token);
+ let mut packet = Packet::<T>::message_on_stack(msg);
+ inner.senders.register_with_packet(oper, &mut packet as *mut Packet<T> as *mut (), cx);
+ inner.receivers.notify();
+ drop(inner);
+
+ // Block the current thread.
+ let sel = cx.wait_until(deadline);
+
+ match sel {
+ Selected::Waiting => unreachable!(),
+ Selected::Aborted => {
+ self.inner.lock().unwrap().senders.unregister(oper).unwrap();
+ let msg = unsafe { packet.msg.get().replace(None).unwrap() };
+ Err(SendTimeoutError::Timeout(msg))
+ }
+ Selected::Disconnected => {
+ self.inner.lock().unwrap().senders.unregister(oper).unwrap();
+ let msg = unsafe { packet.msg.get().replace(None).unwrap() };
+ Err(SendTimeoutError::Disconnected(msg))
+ }
+ Selected::Operation(_) => {
+ // Wait until the message is read, then drop the packet.
+ packet.wait_ready();
+ Ok(())
+ }
+ }
+ })
+ }
+
+ /// Attempts to receive a message without blocking.
+ pub(crate) fn try_recv(&self) -> Result<T, TryRecvError> {
+ let token = &mut Token::default();
+ let mut inner = self.inner.lock().unwrap();
+
+ // If there's a waiting sender, pair up with it.
+ if let Some(operation) = inner.senders.try_select() {
+ token.zero.0 = operation.packet;
+ drop(inner);
+ unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
+ } else if inner.is_disconnected {
+ Err(TryRecvError::Disconnected)
+ } else {
+ Err(TryRecvError::Empty)
+ }
+ }
+
+ /// Receives a message from the channel.
+ pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
+ let token = &mut Token::default();
+ let mut inner = self.inner.lock().unwrap();
+
+ // If there's a waiting sender, pair up with it.
+ if let Some(operation) = inner.senders.try_select() {
+ token.zero.0 = operation.packet;
+ drop(inner);
+ unsafe {
+ return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
+ }
+ }
+
+ if inner.is_disconnected {
+ return Err(RecvTimeoutError::Disconnected);
+ }
+
+ Context::with(|cx| {
+ // Prepare for blocking until a sender wakes us up.
+ let oper = Operation::hook(token);
+ let mut packet = Packet::<T>::empty_on_stack();
+ inner.receivers.register_with_packet(
+ oper,
+ &mut packet as *mut Packet<T> as *mut (),
+ cx,
+ );
+ inner.senders.notify();
+ drop(inner);
+
+ // Block the current thread.
+ let sel = cx.wait_until(deadline);
+
+ match sel {
+ Selected::Waiting => unreachable!(),
+ Selected::Aborted => {
+ self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
+ Err(RecvTimeoutError::Timeout)
+ }
+ Selected::Disconnected => {
+ self.inner.lock().unwrap().receivers.unregister(oper).unwrap();
+ Err(RecvTimeoutError::Disconnected)
+ }
+ Selected::Operation(_) => {
+ // Wait until the message is provided, then read it.
+ packet.wait_ready();
+ unsafe { Ok(packet.msg.get().replace(None).unwrap()) }
+ }
+ }
+ })
+ }
+
+ /// Disconnects the channel and wakes up all blocked senders and receivers.
+ ///
+ /// Returns `true` if this call disconnected the channel.
+ pub(crate) fn disconnect(&self) -> bool {
+ let mut inner = self.inner.lock().unwrap();
+
+ if !inner.is_disconnected {
+ inner.is_disconnected = true;
+ inner.senders.disconnect();
+ inner.receivers.disconnect();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Returns the current number of messages inside the channel.
+ pub(crate) fn len(&self) -> usize {
+ 0
+ }
+
+ /// Returns the capacity of the channel.
+ #[allow(clippy::unnecessary_wraps)] // This is intentional.
+ pub(crate) fn capacity(&self) -> Option<usize> {
+ Some(0)
+ }
+
+ /// Returns `true` if the channel is empty.
+ pub(crate) fn is_empty(&self) -> bool {
+ true
+ }
+
+ /// Returns `true` if the channel is full.
+ pub(crate) fn is_full(&self) -> bool {
+ true
+ }
+}
diff --git a/library/std/src/sync/mpsc/blocking.rs b/library/std/src/sync/mpsc/blocking.rs
deleted file mode 100644
index 021df7b09..000000000
--- a/library/std/src/sync/mpsc/blocking.rs
+++ /dev/null
@@ -1,82 +0,0 @@
-//! Generic support for building blocking abstractions.
-
-use crate::sync::atomic::{AtomicBool, Ordering};
-use crate::sync::Arc;
-use crate::thread::{self, Thread};
-use crate::time::Instant;
-
-struct Inner {
- thread: Thread,
- woken: AtomicBool,
-}
-
-unsafe impl Send for Inner {}
-unsafe impl Sync for Inner {}
-
-#[derive(Clone)]
-pub struct SignalToken {
- inner: Arc<Inner>,
-}
-
-pub struct WaitToken {
- inner: Arc<Inner>,
-}
-
-impl !Send for WaitToken {}
-
-impl !Sync for WaitToken {}
-
-pub fn tokens() -> (WaitToken, SignalToken) {
- let inner = Arc::new(Inner { thread: thread::current(), woken: AtomicBool::new(false) });
- let wait_token = WaitToken { inner: inner.clone() };
- let signal_token = SignalToken { inner };
- (wait_token, signal_token)
-}
-
-impl SignalToken {
- pub fn signal(&self) -> bool {
- let wake = self
- .inner
- .woken
- .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
- .is_ok();
- if wake {
- self.inner.thread.unpark();
- }
- wake
- }
-
- /// Converts to an unsafe raw pointer. Useful for storing in a pipe's state
- /// flag.
- #[inline]
- pub unsafe fn to_raw(self) -> *mut u8 {
- Arc::into_raw(self.inner) as *mut u8
- }
-
- /// Converts from an unsafe raw pointer. Useful for retrieving a pipe's state
- /// flag.
- #[inline]
- pub unsafe fn from_raw(signal_ptr: *mut u8) -> SignalToken {
- SignalToken { inner: Arc::from_raw(signal_ptr as *mut Inner) }
- }
-}
-
-impl WaitToken {
- pub fn wait(self) {
- while !self.inner.woken.load(Ordering::SeqCst) {
- thread::park()
- }
- }
-
- /// Returns `true` if we wake up normally.
- pub fn wait_max_until(self, end: Instant) -> bool {
- while !self.inner.woken.load(Ordering::SeqCst) {
- let now = Instant::now();
- if now >= end {
- return false;
- }
- thread::park_timeout(end - now)
- }
- true
- }
-}
diff --git a/library/std/src/sync/mpsc/cache_aligned.rs b/library/std/src/sync/mpsc/cache_aligned.rs
deleted file mode 100644
index 9197f0d6e..000000000
--- a/library/std/src/sync/mpsc/cache_aligned.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-use crate::ops::{Deref, DerefMut};
-
-#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
-#[cfg_attr(target_arch = "aarch64", repr(align(128)))]
-#[cfg_attr(not(target_arch = "aarch64"), repr(align(64)))]
-pub(super) struct CacheAligned<T>(pub T);
-
-impl<T> Deref for CacheAligned<T> {
- type Target = T;
- fn deref(&self) -> &Self::Target {
- &self.0
- }
-}
-
-impl<T> DerefMut for CacheAligned<T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.0
- }
-}
-
-impl<T> CacheAligned<T> {
- pub(super) fn new(t: T) -> Self {
- CacheAligned(t)
- }
-}
diff --git a/library/std/src/sync/mpsc/mod.rs b/library/std/src/sync/mpsc/mod.rs
index e85a87239..6e3c28f10 100644
--- a/library/std/src/sync/mpsc/mod.rs
+++ b/library/std/src/sync/mpsc/mod.rs
@@ -143,175 +143,16 @@ mod tests;
#[cfg(all(test, not(target_os = "emscripten")))]
mod sync_tests;
-// A description of how Rust's channel implementation works
-//
-// Channels are supposed to be the basic building block for all other
-// concurrent primitives that are used in Rust. As a result, the channel type
-// needs to be highly optimized, flexible, and broad enough for use everywhere.
-//
-// The choice of implementation of all channels is to be built on lock-free data
-// structures. The channels themselves are then consequently also lock-free data
-// structures. As always with lock-free code, this is a very "here be dragons"
-// territory, especially because I'm unaware of any academic papers that have
-// gone into great length about channels of these flavors.
-//
-// ## Flavors of channels
-//
-// From the perspective of a consumer of this library, there is only one flavor
-// of channel. This channel can be used as a stream and cloned to allow multiple
-// senders. Under the hood, however, there are actually three flavors of
-// channels in play.
-//
-// * Flavor::Oneshots - these channels are highly optimized for the one-send use
-// case. They contain as few atomics as possible and
-// involve one and exactly one allocation.
-// * Streams - these channels are optimized for the non-shared use case. They
-// use a different concurrent queue that is more tailored for this
-// use case. The initial allocation of this flavor of channel is not
-// optimized.
-// * Shared - this is the most general form of channel that this module offers,
-// a channel with multiple senders. This type is as optimized as it
-// can be, but the previous two types mentioned are much faster for
-// their use-cases.
-//
-// ## Concurrent queues
-//
-// The basic idea of Rust's Sender/Receiver types is that send() never blocks,
-// but recv() obviously blocks. This means that under the hood there must be
-// some shared and concurrent queue holding all of the actual data.
-//
-// With two flavors of channels, two flavors of queues are also used. We have
-// chosen to use queues from a well-known author that are abbreviated as SPSC
-// and MPSC (single producer, single consumer and multiple producer, single
-// consumer). SPSC queues are used for streams while MPSC queues are used for
-// shared channels.
-//
-// ### SPSC optimizations
-//
-// The SPSC queue found online is essentially a linked list of nodes where one
-// half of the nodes are the "queue of data" and the other half of nodes are a
-// cache of unused nodes. The unused nodes are used such that an allocation is
-// not required on every push() and a free doesn't need to happen on every
-// pop().
-//
-// As found online, however, the cache of nodes is of an infinite size. This
-// means that if a channel at one point in its life had 50k items in the queue,
-// then the queue will always have the capacity for 50k items. I believed that
-// this was an unnecessary limitation of the implementation, so I have altered
-// the queue to optionally have a bound on the cache size.
-//
-// By default, streams will have an unbounded SPSC queue with a small-ish cache
-// size. The hope is that the cache is still large enough to have very fast
-// send() operations while not too large such that millions of channels can
-// coexist at once.
-//
-// ### MPSC optimizations
-//
-// Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses
-// a linked list under the hood to earn its unboundedness, but I have not put
-// forth much effort into having a cache of nodes similar to the SPSC queue.
-//
-// For now, I believe that this is "ok" because shared channels are not the most
-// common type, but soon we may wish to revisit this queue choice and determine
-// another candidate for backend storage of shared channels.
-//
-// ## Overview of the Implementation
-//
-// Now that there's a little background on the concurrent queues used, it's
-// worth going into much more detail about the channels themselves. The basic
-// pseudocode for a send/recv are:
-//
-//
-// send(t) recv()
-// queue.push(t) return if queue.pop()
-// if increment() == -1 deschedule {
-// wakeup() if decrement() > 0
-// cancel_deschedule()
-// }
-// queue.pop()
-//
-// As mentioned before, there are no locks in this implementation, only atomic
-// instructions are used.
-//
-// ### The internal atomic counter
-//
-// Every channel has a shared counter with each half to keep track of the size
-// of the queue. This counter is used to abort descheduling by the receiver and
-// to know when to wake up on the sending side.
-//
-// As seen in the pseudocode, senders will increment this count and receivers
-// will decrement the count. The theory behind this is that if a sender sees a
-// -1 count, it will wake up the receiver, and if the receiver sees a 1+ count,
-// then it doesn't need to block.
-//
-// The recv() method has a beginning call to pop(), and if successful, it needs
-// to decrement the count. It is a crucial implementation detail that this
-// decrement does *not* happen to the shared counter. If this were the case,
-// then it would be possible for the counter to be very negative when there were
-// no receivers waiting, in which case the senders would have to determine when
-// it was actually appropriate to wake up a receiver.
-//
-// Instead, the "steal count" is kept track of separately (not atomically
-// because it's only used by receivers), and then the decrement() call when
-// descheduling will lump in all of the recent steals into one large decrement.
-//
-// The implication of this is that if a sender sees a -1 count, then there's
-// guaranteed to be a waiter waiting!
-//
-// ## Native Implementation
-//
-// A major goal of these channels is to work seamlessly on and off the runtime.
-// All of the previous race conditions have been worded in terms of
-// scheduler-isms (which is obviously not available without the runtime).
-//
-// For now, native usage of channels (off the runtime) will fall back onto
-// mutexes/cond vars for descheduling/atomic decisions. The no-contention path
-// is still entirely lock-free, the "deschedule" blocks above are surrounded by
-// a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a
-// condition variable.
-//
-// ## Select
-//
-// Being able to support selection over channels has greatly influenced this
-// design, and not only does selection need to work inside the runtime, but also
-// outside the runtime.
-//
-// The implementation is fairly straightforward. The goal of select() is not to
-// return some data, but only to return which channel can receive data without
-// blocking. The implementation is essentially the entire blocking procedure
-// followed by an increment as soon as its woken up. The cancellation procedure
-// involves an increment and swapping out of to_wake to acquire ownership of the
-// thread to unblock.
-//
-// Sadly this current implementation requires multiple allocations, so I have
-// seen the throughput of select() be much worse than it should be. I do not
-// believe that there is anything fundamental that needs to change about these
-// channels, however, in order to support a more efficient select().
-//
-// FIXME: Select is now removed, so these factors are ready to be cleaned up!
-//
-// # Conclusion
-//
-// And now that you've seen all the races that I found and attempted to fix,
-// here's the code for you to find some more!
-
-use crate::cell::UnsafeCell;
+// MPSC channels are built as a wrapper around MPMC channels, which
+// were ported from the `crossbeam-channel` crate. MPMC channels are
+// not exposed publicly, but if you are curious about the implementation,
+// that's where everything is.
+
use crate::error;
use crate::fmt;
-use crate::mem;
-use crate::sync::Arc;
+use crate::sync::mpmc;
use crate::time::{Duration, Instant};
-mod blocking;
-mod mpsc_queue;
-mod oneshot;
-mod shared;
-mod spsc_queue;
-mod stream;
-mod sync;
-
-mod cache_aligned;
-
/// The receiving half of Rust's [`channel`] (or [`sync_channel`]) type.
/// This half can only be owned by one thread.
///
@@ -341,7 +182,7 @@ mod cache_aligned;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Receiver")]
pub struct Receiver<T> {
- inner: UnsafeCell<Flavor<T>>,
+ inner: mpmc::Receiver<T>,
}
// The receiver port can be sent from place to place, so long as it
@@ -498,7 +339,7 @@ pub struct IntoIter<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Sender<T> {
- inner: UnsafeCell<Flavor<T>>,
+ inner: mpmc::Sender<T>,
}
// The send port can be sent from place to place, so long as it
@@ -557,7 +398,7 @@ impl<T> !Sync for Sender<T> {}
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SyncSender<T> {
- inner: Arc<sync::Packet<T>>,
+ inner: mpmc::Sender<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -643,34 +484,6 @@ pub enum TrySendError<T> {
Disconnected(#[stable(feature = "rust1", since = "1.0.0")] T),
}
-enum Flavor<T> {
- Oneshot(Arc<oneshot::Packet<T>>),
- Stream(Arc<stream::Packet<T>>),
- Shared(Arc<shared::Packet<T>>),
- Sync(Arc<sync::Packet<T>>),
-}
-
-#[doc(hidden)]
-trait UnsafeFlavor<T> {
- fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>>;
- unsafe fn inner_mut(&self) -> &mut Flavor<T> {
- &mut *self.inner_unsafe().get()
- }
- unsafe fn inner(&self) -> &Flavor<T> {
- &*self.inner_unsafe().get()
- }
-}
-impl<T> UnsafeFlavor<T> for Sender<T> {
- fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> {
- &self.inner
- }
-}
-impl<T> UnsafeFlavor<T> for Receiver<T> {
- fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> {
- &self.inner
- }
-}
-
/// Creates a new asynchronous channel, returning the sender/receiver halves.
/// All data sent on the [`Sender`] will become available on the [`Receiver`] in
/// the same order as it was sent, and no [`send`] will block the calling thread
@@ -711,8 +524,8 @@ impl<T> UnsafeFlavor<T> for Receiver<T> {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
- let a = Arc::new(oneshot::Packet::new());
- (Sender::new(Flavor::Oneshot(a.clone())), Receiver::new(Flavor::Oneshot(a)))
+ let (tx, rx) = mpmc::channel();
+ (Sender { inner: tx }, Receiver { inner: rx })
}
/// Creates a new synchronous, bounded channel.
@@ -760,8 +573,8 @@ pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
- let a = Arc::new(sync::Packet::new(bound));
- (SyncSender::new(a.clone()), Receiver::new(Flavor::Sync(a)))
+ let (tx, rx) = mpmc::sync_channel(bound);
+ (SyncSender { inner: tx }, Receiver { inner: rx })
}
////////////////////////////////////////////////////////////////////////////////
@@ -769,10 +582,6 @@ pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) {
////////////////////////////////////////////////////////////////////////////////
impl<T> Sender<T> {
- fn new(inner: Flavor<T>) -> Sender<T> {
- Sender { inner: UnsafeCell::new(inner) }
- }
-
/// Attempts to send a value on this channel, returning it back if it could
/// not be sent.
///
@@ -802,40 +611,7 @@ impl<T> Sender<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn send(&self, t: T) -> Result<(), SendError<T>> {
- let (new_inner, ret) = match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => {
- if !p.sent() {
- return p.send(t).map_err(SendError);
- } else {
- let a = Arc::new(stream::Packet::new());
- let rx = Receiver::new(Flavor::Stream(a.clone()));
- match p.upgrade(rx) {
- oneshot::UpSuccess => {
- let ret = a.send(t);
- (a, ret)
- }
- oneshot::UpDisconnected => (a, Err(t)),
- oneshot::UpWoke(token) => {
- // This send cannot panic because the thread is
- // asleep (we're looking at it), so the receiver
- // can't go away.
- a.send(t).ok().unwrap();
- token.signal();
- (a, Ok(()))
- }
- }
- }
- }
- Flavor::Stream(ref p) => return p.send(t).map_err(SendError),
- Flavor::Shared(ref p) => return p.send(t).map_err(SendError),
- Flavor::Sync(..) => unreachable!(),
- };
-
- unsafe {
- let tmp = Sender::new(Flavor::Stream(new_inner));
- mem::swap(self.inner_mut(), tmp.inner_mut());
- }
- ret.map_err(SendError)
+ self.inner.send(t)
}
}
@@ -847,58 +623,13 @@ impl<T> Clone for Sender<T> {
/// (including the original) need to be dropped in order for
/// [`Receiver::recv`] to stop blocking.
fn clone(&self) -> Sender<T> {
- let packet = match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => {
- let a = Arc::new(shared::Packet::new());
- {
- let guard = a.postinit_lock();
- let rx = Receiver::new(Flavor::Shared(a.clone()));
- let sleeper = match p.upgrade(rx) {
- oneshot::UpSuccess | oneshot::UpDisconnected => None,
- oneshot::UpWoke(task) => Some(task),
- };
- a.inherit_blocker(sleeper, guard);
- }
- a
- }
- Flavor::Stream(ref p) => {
- let a = Arc::new(shared::Packet::new());
- {
- let guard = a.postinit_lock();
- let rx = Receiver::new(Flavor::Shared(a.clone()));
- let sleeper = match p.upgrade(rx) {
- stream::UpSuccess | stream::UpDisconnected => None,
- stream::UpWoke(task) => Some(task),
- };
- a.inherit_blocker(sleeper, guard);
- }
- a
- }
- Flavor::Shared(ref p) => {
- p.clone_chan();
- return Sender::new(Flavor::Shared(p.clone()));
- }
- Flavor::Sync(..) => unreachable!(),
- };
-
- unsafe {
- let tmp = Sender::new(Flavor::Shared(packet.clone()));
- mem::swap(self.inner_mut(), tmp.inner_mut());
- }
- Sender::new(Flavor::Shared(packet))
+ Sender { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Sender<T> {
- fn drop(&mut self) {
- match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => p.drop_chan(),
- Flavor::Stream(ref p) => p.drop_chan(),
- Flavor::Shared(ref p) => p.drop_chan(),
- Flavor::Sync(..) => unreachable!(),
- }
- }
+ fn drop(&mut self) {}
}
#[stable(feature = "mpsc_debug", since = "1.8.0")]
@@ -913,10 +644,6 @@ impl<T> fmt::Debug for Sender<T> {
////////////////////////////////////////////////////////////////////////////////
impl<T> SyncSender<T> {
- fn new(inner: Arc<sync::Packet<T>>) -> SyncSender<T> {
- SyncSender { inner }
- }
-
/// Sends a value on this synchronous channel.
///
/// This function will *block* until space in the internal buffer becomes
@@ -955,7 +682,7 @@ impl<T> SyncSender<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn send(&self, t: T) -> Result<(), SendError<T>> {
- self.inner.send(t).map_err(SendError)
+ self.inner.send(t)
}
/// Attempts to send a value on this channel without blocking.
@@ -1011,21 +738,27 @@ impl<T> SyncSender<T> {
pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
self.inner.try_send(t)
}
+
+ // Attempts to send for a value on this receiver, returning an error if the
+ // corresponding channel has hung up, or if it waits more than `timeout`.
+ //
+ // This method is currently private and only used for tests.
+ #[allow(unused)]
+ fn send_timeout(&self, t: T, timeout: Duration) -> Result<(), mpmc::SendTimeoutError<T>> {
+ self.inner.send_timeout(t, timeout)
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for SyncSender<T> {
fn clone(&self) -> SyncSender<T> {
- self.inner.clone_chan();
- SyncSender::new(self.inner.clone())
+ SyncSender { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for SyncSender<T> {
- fn drop(&mut self) {
- self.inner.drop_chan();
- }
+ fn drop(&mut self) {}
}
#[stable(feature = "mpsc_debug", since = "1.8.0")]
@@ -1040,10 +773,6 @@ impl<T> fmt::Debug for SyncSender<T> {
////////////////////////////////////////////////////////////////////////////////
impl<T> Receiver<T> {
- fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: UnsafeCell::new(inner) }
- }
-
/// Attempts to return a pending value on this receiver without blocking.
///
/// This method will never block the caller in order to wait for data to
@@ -1069,35 +798,7 @@ impl<T> Receiver<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_recv(&self) -> Result<T, TryRecvError> {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => match p.try_recv() {
- Ok(t) => return Ok(t),
- Err(oneshot::Empty) => return Err(TryRecvError::Empty),
- Err(oneshot::Disconnected) => return Err(TryRecvError::Disconnected),
- Err(oneshot::Upgraded(rx)) => rx,
- },
- Flavor::Stream(ref p) => match p.try_recv() {
- Ok(t) => return Ok(t),
- Err(stream::Empty) => return Err(TryRecvError::Empty),
- Err(stream::Disconnected) => return Err(TryRecvError::Disconnected),
- Err(stream::Upgraded(rx)) => rx,
- },
- Flavor::Shared(ref p) => match p.try_recv() {
- Ok(t) => return Ok(t),
- Err(shared::Empty) => return Err(TryRecvError::Empty),
- Err(shared::Disconnected) => return Err(TryRecvError::Disconnected),
- },
- Flavor::Sync(ref p) => match p.try_recv() {
- Ok(t) => return Ok(t),
- Err(sync::Empty) => return Err(TryRecvError::Empty),
- Err(sync::Disconnected) => return Err(TryRecvError::Disconnected),
- },
- };
- unsafe {
- mem::swap(self.inner_mut(), new_port.inner_mut());
- }
- }
+ self.inner.try_recv()
}
/// Attempts to wait for a value on this receiver, returning an error if the
@@ -1156,31 +857,7 @@ impl<T> Receiver<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn recv(&self) -> Result<T, RecvError> {
- loop {
- let new_port = match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => match p.recv(None) {
- Ok(t) => return Ok(t),
- Err(oneshot::Disconnected) => return Err(RecvError),
- Err(oneshot::Upgraded(rx)) => rx,
- Err(oneshot::Empty) => unreachable!(),
- },
- Flavor::Stream(ref p) => match p.recv(None) {
- Ok(t) => return Ok(t),
- Err(stream::Disconnected) => return Err(RecvError),
- Err(stream::Upgraded(rx)) => rx,
- Err(stream::Empty) => unreachable!(),
- },
- Flavor::Shared(ref p) => match p.recv(None) {
- Ok(t) => return Ok(t),
- Err(shared::Disconnected) => return Err(RecvError),
- Err(shared::Empty) => unreachable!(),
- },
- Flavor::Sync(ref p) => return p.recv(None).map_err(|_| RecvError),
- };
- unsafe {
- mem::swap(self.inner_mut(), new_port.inner_mut());
- }
- }
+ self.inner.recv()
}
/// Attempts to wait for a value on this receiver, returning an error if the
@@ -1198,34 +875,6 @@ impl<T> Receiver<T> {
/// However, since channels are buffered, messages sent before the disconnect
/// will still be properly received.
///
- /// # Known Issues
- ///
- /// There is currently a known issue (see [`#39364`]) that causes `recv_timeout`
- /// to panic unexpectedly with the following example:
- ///
- /// ```no_run
- /// use std::sync::mpsc::channel;
- /// use std::thread;
- /// use std::time::Duration;
- ///
- /// let (tx, rx) = channel::<String>();
- ///
- /// thread::spawn(move || {
- /// let d = Duration::from_millis(10);
- /// loop {
- /// println!("recv");
- /// let _r = rx.recv_timeout(d);
- /// }
- /// });
- ///
- /// thread::sleep(Duration::from_millis(100));
- /// let _c1 = tx.clone();
- ///
- /// thread::sleep(Duration::from_secs(1));
- /// ```
- ///
- /// [`#39364`]: https://github.com/rust-lang/rust/issues/39364
- ///
/// # Examples
///
/// Successfully receiving value before encountering timeout:
@@ -1268,17 +917,7 @@ impl<T> Receiver<T> {
/// ```
#[stable(feature = "mpsc_recv_timeout", since = "1.12.0")]
pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> {
- // Do an optimistic try_recv to avoid the performance impact of
- // Instant::now() in the full-channel case.
- match self.try_recv() {
- Ok(result) => Ok(result),
- Err(TryRecvError::Disconnected) => Err(RecvTimeoutError::Disconnected),
- Err(TryRecvError::Empty) => match Instant::now().checked_add(timeout) {
- Some(deadline) => self.recv_deadline(deadline),
- // So far in the future that it's practically the same as waiting indefinitely.
- None => self.recv().map_err(RecvTimeoutError::from),
- },
- }
+ self.inner.recv_timeout(timeout)
}
/// Attempts to wait for a value on this receiver, returning an error if the
@@ -1339,46 +978,7 @@ impl<T> Receiver<T> {
/// ```
#[unstable(feature = "deadline_api", issue = "46316")]
pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> {
- use self::RecvTimeoutError::*;
-
- loop {
- let port_or_empty = match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => match p.recv(Some(deadline)) {
- Ok(t) => return Ok(t),
- Err(oneshot::Disconnected) => return Err(Disconnected),
- Err(oneshot::Upgraded(rx)) => Some(rx),
- Err(oneshot::Empty) => None,
- },
- Flavor::Stream(ref p) => match p.recv(Some(deadline)) {
- Ok(t) => return Ok(t),
- Err(stream::Disconnected) => return Err(Disconnected),
- Err(stream::Upgraded(rx)) => Some(rx),
- Err(stream::Empty) => None,
- },
- Flavor::Shared(ref p) => match p.recv(Some(deadline)) {
- Ok(t) => return Ok(t),
- Err(shared::Disconnected) => return Err(Disconnected),
- Err(shared::Empty) => None,
- },
- Flavor::Sync(ref p) => match p.recv(Some(deadline)) {
- Ok(t) => return Ok(t),
- Err(sync::Disconnected) => return Err(Disconnected),
- Err(sync::Empty) => None,
- },
- };
-
- if let Some(new_port) = port_or_empty {
- unsafe {
- mem::swap(self.inner_mut(), new_port.inner_mut());
- }
- }
-
- // If we're already passed the deadline, and we're here without
- // data, return a timeout, else try again.
- if Instant::now() >= deadline {
- return Err(Timeout);
- }
- }
+ self.inner.recv_deadline(deadline)
}
/// Returns an iterator that will block waiting for messages, but never
@@ -1499,14 +1099,7 @@ impl<T> IntoIterator for Receiver<T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Receiver<T> {
- fn drop(&mut self) {
- match *unsafe { self.inner() } {
- Flavor::Oneshot(ref p) => p.drop_port(),
- Flavor::Stream(ref p) => p.drop_port(),
- Flavor::Shared(ref p) => p.drop_port(),
- Flavor::Sync(ref p) => p.drop_port(),
- }
- }
+ fn drop(&mut self) {}
}
#[stable(feature = "mpsc_debug", since = "1.8.0")]
diff --git a/library/std/src/sync/mpsc/mpsc_queue.rs b/library/std/src/sync/mpsc/mpsc_queue.rs
deleted file mode 100644
index cdd64a5de..000000000
--- a/library/std/src/sync/mpsc/mpsc_queue.rs
+++ /dev/null
@@ -1,117 +0,0 @@
-//! A mostly lock-free multi-producer, single consumer queue.
-//!
-//! This module contains an implementation of a concurrent MPSC queue. This
-//! queue can be used to share data between threads, and is also used as the
-//! building block of channels in rust.
-//!
-//! Note that the current implementation of this queue has a caveat of the `pop`
-//! method, and see the method for more information about it. Due to this
-//! caveat, this queue might not be appropriate for all use-cases.
-
-// https://www.1024cores.net/home/lock-free-algorithms
-// /queues/non-intrusive-mpsc-node-based-queue
-
-#[cfg(all(test, not(target_os = "emscripten")))]
-mod tests;
-
-pub use self::PopResult::*;
-
-use core::cell::UnsafeCell;
-use core::ptr;
-
-use crate::boxed::Box;
-use crate::sync::atomic::{AtomicPtr, Ordering};
-
-/// A result of the `pop` function.
-pub enum PopResult<T> {
- /// Some data has been popped
- Data(T),
- /// The queue is empty
- Empty,
- /// The queue is in an inconsistent state. Popping data should succeed, but
- /// some pushers have yet to make enough progress in order allow a pop to
- /// succeed. It is recommended that a pop() occur "in the near future" in
- /// order to see if the sender has made progress or not
- Inconsistent,
-}
-
-struct Node<T> {
- next: AtomicPtr<Node<T>>,
- value: Option<T>,
-}
-
-/// The multi-producer single-consumer structure. This is not cloneable, but it
-/// may be safely shared so long as it is guaranteed that there is only one
-/// popper at a time (many pushers are allowed).
-pub struct Queue<T> {
- head: AtomicPtr<Node<T>>,
- tail: UnsafeCell<*mut Node<T>>,
-}
-
-unsafe impl<T: Send> Send for Queue<T> {}
-unsafe impl<T: Send> Sync for Queue<T> {}
-
-impl<T> Node<T> {
- unsafe fn new(v: Option<T>) -> *mut Node<T> {
- Box::into_raw(box Node { next: AtomicPtr::new(ptr::null_mut()), value: v })
- }
-}
-
-impl<T> Queue<T> {
- /// Creates a new queue that is safe to share among multiple producers and
- /// one consumer.
- pub fn new() -> Queue<T> {
- let stub = unsafe { Node::new(None) };
- Queue { head: AtomicPtr::new(stub), tail: UnsafeCell::new(stub) }
- }
-
- /// Pushes a new value onto this queue.
- pub fn push(&self, t: T) {
- unsafe {
- let n = Node::new(Some(t));
- let prev = self.head.swap(n, Ordering::AcqRel);
- (*prev).next.store(n, Ordering::Release);
- }
- }
-
- /// Pops some data from this queue.
- ///
- /// Note that the current implementation means that this function cannot
- /// return `Option<T>`. It is possible for this queue to be in an
- /// inconsistent state where many pushes have succeeded and completely
- /// finished, but pops cannot return `Some(t)`. This inconsistent state
- /// happens when a pusher is pre-empted at an inopportune moment.
- ///
- /// This inconsistent state means that this queue does indeed have data, but
- /// it does not currently have access to it at this time.
- pub fn pop(&self) -> PopResult<T> {
- unsafe {
- let tail = *self.tail.get();
- let next = (*tail).next.load(Ordering::Acquire);
-
- if !next.is_null() {
- *self.tail.get() = next;
- assert!((*tail).value.is_none());
- assert!((*next).value.is_some());
- let ret = (*next).value.take().unwrap();
- let _: Box<Node<T>> = Box::from_raw(tail);
- return Data(ret);
- }
-
- if self.head.load(Ordering::Acquire) == tail { Empty } else { Inconsistent }
- }
- }
-}
-
-impl<T> Drop for Queue<T> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.tail.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Ordering::Relaxed);
- let _: Box<Node<T>> = Box::from_raw(cur);
- cur = next;
- }
- }
- }
-}
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
deleted file mode 100644
index 34b2a9a98..000000000
--- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use super::{Data, Empty, Inconsistent, Queue};
-use crate::sync::mpsc::channel;
-use crate::sync::Arc;
-use crate::thread;
-
-#[test]
-fn test_full() {
- let q: Queue<Box<_>> = Queue::new();
- q.push(Box::new(1));
- q.push(Box::new(2));
-}
-
-#[test]
-fn test() {
- let nthreads = 8;
- let nmsgs = if cfg!(miri) { 100 } else { 1000 };
- let q = Queue::new();
- match q.pop() {
- Empty => {}
- Inconsistent | Data(..) => panic!(),
- }
- let (tx, rx) = channel();
- let q = Arc::new(q);
-
- for _ in 0..nthreads {
- let tx = tx.clone();
- let q = q.clone();
- thread::spawn(move || {
- for i in 0..nmsgs {
- q.push(i);
- }
- tx.send(()).unwrap();
- });
- }
-
- let mut i = 0;
- while i < nthreads * nmsgs {
- match q.pop() {
- Empty | Inconsistent => {}
- Data(_) => i += 1,
- }
- }
- drop(tx);
- for _ in 0..nthreads {
- rx.recv().unwrap();
- }
-}
diff --git a/library/std/src/sync/mpsc/oneshot.rs b/library/std/src/sync/mpsc/oneshot.rs
deleted file mode 100644
index 0e259b8ae..000000000
--- a/library/std/src/sync/mpsc/oneshot.rs
+++ /dev/null
@@ -1,315 +0,0 @@
-/// Oneshot channels/ports
-///
-/// This is the initial flavor of channels/ports used for comm module. This is
-/// an optimization for the one-use case of a channel. The major optimization of
-/// this type is to have one and exactly one allocation when the chan/port pair
-/// is created.
-///
-/// Another possible optimization would be to not use an Arc box because
-/// in theory we know when the shared packet can be deallocated (no real need
-/// for the atomic reference counting), but I was having trouble how to destroy
-/// the data early in a drop of a Port.
-///
-/// # Implementation
-///
-/// Oneshots are implemented around one atomic usize variable. This variable
-/// indicates both the state of the port/chan but also contains any threads
-/// blocked on the port. All atomic operations happen on this one word.
-///
-/// In order to upgrade a oneshot channel, an upgrade is considered a disconnect
-/// on behalf of the channel side of things (it can be mentally thought of as
-/// consuming the port). This upgrade is then also stored in the shared packet.
-/// The one caveat to consider is that when a port sees a disconnected channel
-/// it must check for data because there is no "data plus upgrade" state.
-pub use self::Failure::*;
-use self::MyUpgrade::*;
-pub use self::UpgradeResult::*;
-
-use crate::cell::UnsafeCell;
-use crate::ptr;
-use crate::sync::atomic::{AtomicPtr, Ordering};
-use crate::sync::mpsc::blocking::{self, SignalToken};
-use crate::sync::mpsc::Receiver;
-use crate::time::Instant;
-
-// Various states you can find a port in.
-const EMPTY: *mut u8 = ptr::invalid_mut::<u8>(0); // initial state: no data, no blocked receiver
-const DATA: *mut u8 = ptr::invalid_mut::<u8>(1); // data ready for receiver to take
-const DISCONNECTED: *mut u8 = ptr::invalid_mut::<u8>(2); // channel is disconnected OR upgraded
-// Any other value represents a pointer to a SignalToken value. The
-// protocol ensures that when the state moves *to* a pointer,
-// ownership of the token is given to the packet, and when the state
-// moves *from* a pointer, ownership of the token is transferred to
-// whoever changed the state.
-
-pub struct Packet<T> {
- // Internal state of the chan/port pair (stores the blocked thread as well)
- state: AtomicPtr<u8>,
- // One-shot data slot location
- data: UnsafeCell<Option<T>>,
- // when used for the second time, a oneshot channel must be upgraded, and
- // this contains the slot for the upgrade
- upgrade: UnsafeCell<MyUpgrade<T>>,
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(SignalToken),
-}
-
-enum MyUpgrade<T> {
- NothingSent,
- SendUsed,
- GoUp(Receiver<T>),
-}
-
-impl<T> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- data: UnsafeCell::new(None),
- upgrade: UnsafeCell::new(NothingSent),
- state: AtomicPtr::new(EMPTY),
- }
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- unsafe {
- // Sanity check
- match *self.upgrade.get() {
- NothingSent => {}
- _ => panic!("sending on a oneshot that's already sent on "),
- }
- assert!((*self.data.get()).is_none());
- ptr::write(self.data.get(), Some(t));
- ptr::write(self.upgrade.get(), SendUsed);
-
- match self.state.swap(DATA, Ordering::SeqCst) {
- // Sent the data, no one was waiting
- EMPTY => Ok(()),
-
- // Couldn't send the data, the port hung up first. Return the data
- // back up the stack.
- DISCONNECTED => {
- self.state.swap(DISCONNECTED, Ordering::SeqCst);
- ptr::write(self.upgrade.get(), NothingSent);
- Err((&mut *self.data.get()).take().unwrap())
- }
-
- // Not possible, these are one-use channels
- DATA => unreachable!(),
-
- // There is a thread waiting on the other end. We leave the 'DATA'
- // state inside so it'll pick it up on the other end.
- ptr => {
- SignalToken::from_raw(ptr).signal();
- Ok(())
- }
- }
- }
- }
-
- // Just tests whether this channel has been sent on or not, this is only
- // safe to use from the sender.
- pub fn sent(&self) -> bool {
- unsafe { !matches!(*self.upgrade.get(), NothingSent) }
- }
-
- pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure<T>> {
- // Attempt to not block the thread (it's a little expensive). If it looks
- // like we're not empty, then immediately go through to `try_recv`.
- if self.state.load(Ordering::SeqCst) == EMPTY {
- let (wait_token, signal_token) = blocking::tokens();
- let ptr = unsafe { signal_token.to_raw() };
-
- // race with senders to enter the blocking state
- if self.state.compare_exchange(EMPTY, ptr, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
- if let Some(deadline) = deadline {
- let timed_out = !wait_token.wait_max_until(deadline);
- // Try to reset the state
- if timed_out {
- self.abort_selection().map_err(Upgraded)?;
- }
- } else {
- wait_token.wait();
- debug_assert!(self.state.load(Ordering::SeqCst) != EMPTY);
- }
- } else {
- // drop the signal token, since we never blocked
- drop(unsafe { SignalToken::from_raw(ptr) });
- }
- }
-
- self.try_recv()
- }
-
- pub fn try_recv(&self) -> Result<T, Failure<T>> {
- unsafe {
- match self.state.load(Ordering::SeqCst) {
- EMPTY => Err(Empty),
-
- // We saw some data on the channel, but the channel can be used
- // again to send us an upgrade. As a result, we need to re-insert
- // into the channel that there's no data available (otherwise we'll
- // just see DATA next time). This is done as a cmpxchg because if
- // the state changes under our feet we'd rather just see that state
- // change.
- DATA => {
- let _ = self.state.compare_exchange(
- DATA,
- EMPTY,
- Ordering::SeqCst,
- Ordering::SeqCst,
- );
- match (&mut *self.data.get()).take() {
- Some(data) => Ok(data),
- None => unreachable!(),
- }
- }
-
- // There's no guarantee that we receive before an upgrade happens,
- // and an upgrade flags the channel as disconnected, so when we see
- // this we first need to check if there's data available and *then*
- // we go through and process the upgrade.
- DISCONNECTED => match (&mut *self.data.get()).take() {
- Some(data) => Ok(data),
- None => match ptr::replace(self.upgrade.get(), SendUsed) {
- SendUsed | NothingSent => Err(Disconnected),
- GoUp(upgrade) => Err(Upgraded(upgrade)),
- },
- },
-
- // We are the sole receiver; there cannot be a blocking
- // receiver already.
- _ => unreachable!(),
- }
- }
- }
-
- // Returns whether the upgrade was completed. If the upgrade wasn't
- // completed, then the port couldn't get sent to the other half (it will
- // never receive it).
- pub fn upgrade(&self, up: Receiver<T>) -> UpgradeResult {
- unsafe {
- let prev = match *self.upgrade.get() {
- NothingSent => NothingSent,
- SendUsed => SendUsed,
- _ => panic!("upgrading again"),
- };
- ptr::write(self.upgrade.get(), GoUp(up));
-
- match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
- // If the channel is empty or has data on it, then we're good to go.
- // Senders will check the data before the upgrade (in case we
- // plastered over the DATA state).
- DATA | EMPTY => UpSuccess,
-
- // If the other end is already disconnected, then we failed the
- // upgrade. Be sure to trash the port we were given.
- DISCONNECTED => {
- ptr::replace(self.upgrade.get(), prev);
- UpDisconnected
- }
-
- // If someone's waiting, we gotta wake them up
- ptr => UpWoke(SignalToken::from_raw(ptr)),
- }
- }
- }
-
- pub fn drop_chan(&self) {
- match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
- DATA | DISCONNECTED | EMPTY => {}
-
- // If someone's waiting, we gotta wake them up
- ptr => unsafe {
- SignalToken::from_raw(ptr).signal();
- },
- }
- }
-
- pub fn drop_port(&self) {
- match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
- // An empty channel has nothing to do, and a remotely disconnected
- // channel also has nothing to do b/c we're about to run the drop
- // glue
- DISCONNECTED | EMPTY => {}
-
- // There's data on the channel, so make sure we destroy it promptly.
- // This is why not using an arc is a little difficult (need the box
- // to stay valid while we take the data).
- DATA => unsafe {
- (&mut *self.data.get()).take().unwrap();
- },
-
- // We're the only ones that can block on this port
- _ => unreachable!(),
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // Remove a previous selecting thread from this port. This ensures that the
- // blocked thread will no longer be visible to any other threads.
- //
- // The return value indicates whether there's data on this port.
- pub fn abort_selection(&self) -> Result<bool, Receiver<T>> {
- let state = match self.state.load(Ordering::SeqCst) {
- // Each of these states means that no further activity will happen
- // with regard to abortion selection
- s @ (EMPTY | DATA | DISCONNECTED) => s,
-
- // If we've got a blocked thread, then use an atomic to gain ownership
- // of it (may fail)
- ptr => self
- .state
- .compare_exchange(ptr, EMPTY, Ordering::SeqCst, Ordering::SeqCst)
- .unwrap_or_else(|x| x),
- };
-
- // Now that we've got ownership of our state, figure out what to do
- // about it.
- match state {
- EMPTY => unreachable!(),
- // our thread used for select was stolen
- DATA => Ok(true),
-
- // If the other end has hung up, then we have complete ownership
- // of the port. First, check if there was data waiting for us. This
- // is possible if the other end sent something and then hung up.
- //
- // We then need to check to see if there was an upgrade requested,
- // and if so, the upgraded port needs to have its selection aborted.
- DISCONNECTED => unsafe {
- if (*self.data.get()).is_some() {
- Ok(true)
- } else {
- match ptr::replace(self.upgrade.get(), SendUsed) {
- GoUp(port) => Err(port),
- _ => Ok(true),
- }
- }
- },
-
- // We woke ourselves up from select.
- ptr => unsafe {
- drop(SignalToken::from_raw(ptr));
- Ok(false)
- },
- }
- }
-}
-
-impl<T> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.state.load(Ordering::SeqCst), DISCONNECTED);
- }
-}
diff --git a/library/std/src/sync/mpsc/shared.rs b/library/std/src/sync/mpsc/shared.rs
deleted file mode 100644
index 51917bd96..000000000
--- a/library/std/src/sync/mpsc/shared.rs
+++ /dev/null
@@ -1,501 +0,0 @@
-/// Shared channels.
-///
-/// This is the flavor of channels which are not necessarily optimized for any
-/// particular use case, but are the most general in how they are used. Shared
-/// channels are cloneable allowing for multiple senders.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module. You'll also note that the implementation of the shared and stream
-/// channels are quite similar, and this is no coincidence!
-pub use self::Failure::*;
-use self::StartResult::*;
-
-use core::cmp;
-use core::intrinsics::abort;
-
-use crate::cell::UnsafeCell;
-use crate::ptr;
-use crate::sync::atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicUsize, Ordering};
-use crate::sync::mpsc::blocking::{self, SignalToken};
-use crate::sync::mpsc::mpsc_queue as mpsc;
-use crate::sync::{Mutex, MutexGuard};
-use crate::thread;
-use crate::time::Instant;
-
-const DISCONNECTED: isize = isize::MIN;
-const FUDGE: isize = 1024;
-const MAX_REFCOUNT: usize = (isize::MAX) as usize;
-#[cfg(test)]
-const MAX_STEALS: isize = 5;
-#[cfg(not(test))]
-const MAX_STEALS: isize = 1 << 20;
-const EMPTY: *mut u8 = ptr::null_mut(); // initial state: no data, no blocked receiver
-
-pub struct Packet<T> {
- queue: mpsc::Queue<T>,
- cnt: AtomicIsize, // How many items are on this channel
- steals: UnsafeCell<isize>, // How many times has a port received without blocking?
- to_wake: AtomicPtr<u8>, // SignalToken for wake up
-
- // The number of channels which are currently using this packet.
- channels: AtomicUsize,
-
- // See the discussion in Port::drop and the channel send methods for what
- // these are used for
- port_dropped: AtomicBool,
- sender_drain: AtomicIsize,
-
- // this lock protects various portions of this implementation during
- // select()
- select_lock: Mutex<()>,
-}
-
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-#[derive(PartialEq, Eq)]
-enum StartResult {
- Installed,
- Abort,
-}
-
-impl<T> Packet<T> {
- // Creation of a packet *must* be followed by a call to postinit_lock
- // and later by inherit_blocker
- pub fn new() -> Packet<T> {
- Packet {
- queue: mpsc::Queue::new(),
- cnt: AtomicIsize::new(0),
- steals: UnsafeCell::new(0),
- to_wake: AtomicPtr::new(EMPTY),
- channels: AtomicUsize::new(2),
- port_dropped: AtomicBool::new(false),
- sender_drain: AtomicIsize::new(0),
- select_lock: Mutex::new(()),
- }
- }
-
- // This function should be used after newly created Packet
- // was wrapped with an Arc
- // In other case mutex data will be duplicated while cloning
- // and that could cause problems on platforms where it is
- // represented by opaque data structure
- pub fn postinit_lock(&self) -> MutexGuard<'_, ()> {
- self.select_lock.lock().unwrap()
- }
-
- // This function is used at the creation of a shared packet to inherit a
- // previously blocked thread. This is done to prevent spurious wakeups of
- // threads in select().
- //
- // This can only be called at channel-creation time
- pub fn inherit_blocker(&self, token: Option<SignalToken>, guard: MutexGuard<'_, ()>) {
- if let Some(token) = token {
- assert_eq!(self.cnt.load(Ordering::SeqCst), 0);
- assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
- self.to_wake.store(unsafe { token.to_raw() }, Ordering::SeqCst);
- self.cnt.store(-1, Ordering::SeqCst);
-
- // This store is a little sketchy. What's happening here is that
- // we're transferring a blocker from a oneshot or stream channel to
- // this shared channel. In doing so, we never spuriously wake them
- // up and rather only wake them up at the appropriate time. This
- // implementation of shared channels assumes that any blocking
- // recv() will undo the increment of steals performed in try_recv()
- // once the recv is complete. This thread that we're inheriting,
- // however, is not in the middle of recv. Hence, the first time we
- // wake them up, they're going to wake up from their old port, move
- // on to the upgraded port, and then call the block recv() function.
- //
- // When calling this function, they'll find there's data immediately
- // available, counting it as a steal. This in fact wasn't a steal
- // because we appropriately blocked them waiting for data.
- //
- // To offset this bad increment, we initially set the steal count to
- // -1. You'll find some special code in abort_selection() as well to
- // ensure that this -1 steal count doesn't escape too far.
- unsafe {
- *self.steals.get() = -1;
- }
- }
-
- // When the shared packet is constructed, we grabbed this lock. The
- // purpose of this lock is to ensure that abort_selection() doesn't
- // interfere with this method. After we unlock this lock, we're
- // signifying that we're done modifying self.cnt and self.to_wake and
- // the port is ready for the world to continue using it.
- drop(guard);
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- // See Port::drop for what's going on
- if self.port_dropped.load(Ordering::SeqCst) {
- return Err(t);
- }
-
- // Note that the multiple sender case is a little trickier
- // semantically than the single sender case. The logic for
- // incrementing is "add and if disconnected store disconnected".
- // This could end up leading some senders to believe that there
- // wasn't a disconnect if in fact there was a disconnect. This means
- // that while one thread is attempting to re-store the disconnected
- // states, other threads could walk through merrily incrementing
- // this very-negative disconnected count. To prevent senders from
- // spuriously attempting to send when the channels is actually
- // disconnected, the count has a ranged check here.
- //
- // This is also done for another reason. Remember that the return
- // value of this function is:
- //
- // `true` == the data *may* be received, this essentially has no
- // meaning
- // `false` == the data will *never* be received, this has a lot of
- // meaning
- //
- // In the SPSC case, we have a check of 'queue.is_empty()' to see
- // whether the data was actually received, but this same condition
- // means nothing in a multi-producer context. As a result, this
- // preflight check serves as the definitive "this will never be
- // received". Once we get beyond this check, we have permanently
- // entered the realm of "this may be received"
- if self.cnt.load(Ordering::SeqCst) < DISCONNECTED + FUDGE {
- return Err(t);
- }
-
- self.queue.push(t);
- match self.cnt.fetch_add(1, Ordering::SeqCst) {
- -1 => {
- self.take_to_wake().signal();
- }
-
- // In this case, we have possibly failed to send our data, and
- // we need to consider re-popping the data in order to fully
- // destroy it. We must arbitrate among the multiple senders,
- // however, because the queues that we're using are
- // single-consumer queues. In order to do this, all exiting
- // pushers will use an atomic count in order to count those
- // flowing through. Pushers who see 0 are required to drain as
- // much as possible, and then can only exit when they are the
- // only pusher (otherwise they must try again).
- n if n < DISCONNECTED + FUDGE => {
- // see the comment in 'try' for a shared channel for why this
- // window of "not disconnected" is ok.
- self.cnt.store(DISCONNECTED, Ordering::SeqCst);
-
- if self.sender_drain.fetch_add(1, Ordering::SeqCst) == 0 {
- loop {
- // drain the queue, for info on the thread yield see the
- // discussion in try_recv
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => {}
- mpsc::Empty => break,
- mpsc::Inconsistent => thread::yield_now(),
- }
- }
- // maybe we're done, if we're not the last ones
- // here, then we need to go try again.
- if self.sender_drain.fetch_sub(1, Ordering::SeqCst) == 1 {
- break;
- }
- }
-
- // At this point, there may still be data on the queue,
- // but only if the count hasn't been incremented and
- // some other sender hasn't finished pushing data just
- // yet. That sender in question will drain its own data.
- }
- }
-
- // Can't make any assumptions about this case like in the SPSC case.
- _ => {}
- }
-
- Ok(())
- }
-
- pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure> {
- // This code is essentially the exact same as that found in the stream
- // case (see stream.rs)
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- let (wait_token, signal_token) = blocking::tokens();
- if self.decrement(signal_token) == Installed {
- if let Some(deadline) = deadline {
- let timed_out = !wait_token.wait_max_until(deadline);
- if timed_out {
- self.abort_selection(false);
- }
- } else {
- wait_token.wait();
- }
- }
-
- match self.try_recv() {
- data @ Ok(..) => unsafe {
- *self.steals.get() -= 1;
- data
- },
- data => data,
- }
- }
-
- // Essentially the exact same thing as the stream decrement function.
- // Returns true if blocking should proceed.
- fn decrement(&self, token: SignalToken) -> StartResult {
- unsafe {
- assert_eq!(
- self.to_wake.load(Ordering::SeqCst),
- EMPTY,
- "This is a known bug in the Rust standard library. See https://github.com/rust-lang/rust/issues/39364"
- );
- let ptr = token.to_raw();
- self.to_wake.store(ptr, Ordering::SeqCst);
-
- let steals = ptr::replace(self.steals.get(), 0);
-
- match self.cnt.fetch_sub(1 + steals, Ordering::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, Ordering::SeqCst);
- }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 {
- return Installed;
- }
- }
- }
-
- self.to_wake.store(EMPTY, Ordering::SeqCst);
- drop(SignalToken::from_raw(ptr));
- Abort
- }
- }
-
- pub fn try_recv(&self) -> Result<T, Failure> {
- let ret = match self.queue.pop() {
- mpsc::Data(t) => Some(t),
- mpsc::Empty => None,
-
- // This is a bit of an interesting case. The channel is reported as
- // having data available, but our pop() has failed due to the queue
- // being in an inconsistent state. This means that there is some
- // pusher somewhere which has yet to complete, but we are guaranteed
- // that a pop will eventually succeed. In this case, we spin in a
- // yield loop because the remote sender should finish their enqueue
- // operation "very quickly".
- //
- // Avoiding this yield loop would require a different queue
- // abstraction which provides the guarantee that after M pushes have
- // succeeded, at least M pops will succeed. The current queues
- // guarantee that if there are N active pushes, you can pop N times
- // once all N have finished.
- mpsc::Inconsistent => {
- let data;
- loop {
- thread::yield_now();
- match self.queue.pop() {
- mpsc::Data(t) => {
- data = t;
- break;
- }
- mpsc::Empty => panic!("inconsistent => empty"),
- mpsc::Inconsistent => {}
- }
- }
- Some(data)
- }
- };
- match ret {
- // See the discussion in the stream implementation for why we
- // might decrement steals.
- Some(data) => unsafe {
- if *self.steals.get() > MAX_STEALS {
- match self.cnt.swap(0, Ordering::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, Ordering::SeqCst);
- }
- n => {
- let m = cmp::min(n, *self.steals.get());
- *self.steals.get() -= m;
- self.bump(n - m);
- }
- }
- assert!(*self.steals.get() >= 0);
- }
- *self.steals.get() += 1;
- Ok(data)
- },
-
- // See the discussion in the stream implementation for why we try
- // again.
- None => {
- match self.cnt.load(Ordering::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
- _ => {
- match self.queue.pop() {
- mpsc::Data(t) => Ok(t),
- mpsc::Empty => Err(Disconnected),
- // with no senders, an inconsistency is impossible.
- mpsc::Inconsistent => unreachable!(),
- }
- }
- }
- }
- }
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&self) {
- let old_count = self.channels.fetch_add(1, Ordering::SeqCst);
-
- // See comments on Arc::clone() on why we do this (for `mem::forget`).
- if old_count > MAX_REFCOUNT {
- abort();
- }
- }
-
- // Decrement the reference count on a channel. This is called whenever a
- // Chan is dropped and may end up waking up a receiver. It's the receiver's
- // responsibility on the other end to figure out that we've disconnected.
- pub fn drop_chan(&self) {
- match self.channels.fetch_sub(1, Ordering::SeqCst) {
- 1 => {}
- n if n > 1 => return,
- n => panic!("bad number of channels left {n}"),
- }
-
- match self.cnt.swap(DISCONNECTED, Ordering::SeqCst) {
- -1 => {
- self.take_to_wake().signal();
- }
- DISCONNECTED => {}
- n => {
- assert!(n >= 0);
- }
- }
- }
-
- // See the long discussion inside of stream.rs for why the queue is drained,
- // and why it is done in this fashion.
- pub fn drop_port(&self) {
- self.port_dropped.store(true, Ordering::SeqCst);
- let mut steals = unsafe { *self.steals.get() };
- while {
- match self.cnt.compare_exchange(
- steals,
- DISCONNECTED,
- Ordering::SeqCst,
- Ordering::SeqCst,
- ) {
- Ok(_) => false,
- Err(old) => old != DISCONNECTED,
- }
- } {
- // See the discussion in 'try_recv' for why we yield
- // control of this thread.
- loop {
- match self.queue.pop() {
- mpsc::Data(..) => {
- steals += 1;
- }
- mpsc::Empty | mpsc::Inconsistent => break,
- }
- }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&self) -> SignalToken {
- let ptr = self.to_wake.load(Ordering::SeqCst);
- self.to_wake.store(EMPTY, Ordering::SeqCst);
- assert!(ptr != EMPTY);
- unsafe { SignalToken::from_raw(ptr) }
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // increment the count on the channel (used for selection)
- fn bump(&self, amt: isize) -> isize {
- match self.cnt.fetch_add(amt, Ordering::SeqCst) {
- DISCONNECTED => {
- self.cnt.store(DISCONNECTED, Ordering::SeqCst);
- DISCONNECTED
- }
- n => n,
- }
- }
-
- // Cancels a previous thread waiting on this port, returning whether there's
- // data on the port.
- //
- // This is similar to the stream implementation (hence fewer comments), but
- // uses a different value for the "steals" variable.
- pub fn abort_selection(&self, _was_upgrade: bool) -> bool {
- // Before we do anything else, we bounce on this lock. The reason for
- // doing this is to ensure that any upgrade-in-progress is gone and
- // done with. Without this bounce, we can race with inherit_blocker
- // about looking at and dealing with to_wake. Once we have acquired the
- // lock, we are guaranteed that inherit_blocker is done.
- {
- let _guard = self.select_lock.lock().unwrap();
- }
-
- // Like the stream implementation, we want to make sure that the count
- // on the channel goes non-negative. We don't know how negative the
- // stream currently is, so instead of using a steal value of 1, we load
- // the channel count and figure out what we should do to make it
- // positive.
- let steals = {
- let cnt = self.cnt.load(Ordering::SeqCst);
- if cnt < 0 && cnt != DISCONNECTED { -cnt } else { 0 }
- };
- let prev = self.bump(steals + 1);
-
- if prev == DISCONNECTED {
- assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
- true
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
- if prev < 0 {
- drop(self.take_to_wake());
- } else {
- while self.to_wake.load(Ordering::SeqCst) != EMPTY {
- thread::yield_now();
- }
- }
- unsafe {
- // if the number of steals is -1, it was the pre-emptive -1 steal
- // count from when we inherited a blocker. This is fine because
- // we're just going to overwrite it with a real value.
- let old = self.steals.get();
- assert!(*old == 0 || *old == -1);
- *old = steals;
- prev >= 0
- }
- }
- }
-}
-
-impl<T> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.cnt.load(Ordering::SeqCst), DISCONNECTED);
- assert_eq!(self.to_wake.load(Ordering::SeqCst), EMPTY);
- assert_eq!(self.channels.load(Ordering::SeqCst), 0);
- }
-}
diff --git a/library/std/src/sync/mpsc/spsc_queue.rs b/library/std/src/sync/mpsc/spsc_queue.rs
deleted file mode 100644
index 7e745eb31..000000000
--- a/library/std/src/sync/mpsc/spsc_queue.rs
+++ /dev/null
@@ -1,236 +0,0 @@
-//! A single-producer single-consumer concurrent queue
-//!
-//! This module contains the implementation of an SPSC queue which can be used
-//! concurrently between two threads. This data structure is safe to use and
-//! enforces the semantics that there is one pusher and one popper.
-
-// https://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
-
-#[cfg(all(test, not(target_os = "emscripten")))]
-mod tests;
-
-use core::cell::UnsafeCell;
-use core::ptr;
-
-use crate::boxed::Box;
-use crate::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
-
-use super::cache_aligned::CacheAligned;
-
-// Node within the linked list queue of messages to send
-struct Node<T> {
- // FIXME: this could be an uninitialized T if we're careful enough, and
- // that would reduce memory usage (and be a bit faster).
- // is it worth it?
- value: Option<T>, // nullable for re-use of nodes
- cached: bool, // This node goes into the node cache
- next: AtomicPtr<Node<T>>, // next node in the queue
-}
-
-/// The single-producer single-consumer queue. This structure is not cloneable,
-/// but it can be safely shared in an Arc if it is guaranteed that there
-/// is only one popper and one pusher touching the queue at any one point in
-/// time.
-pub struct Queue<T, ProducerAddition = (), ConsumerAddition = ()> {
- // consumer fields
- consumer: CacheAligned<Consumer<T, ConsumerAddition>>,
-
- // producer fields
- producer: CacheAligned<Producer<T, ProducerAddition>>,
-}
-
-struct Consumer<T, Addition> {
- tail: UnsafeCell<*mut Node<T>>, // where to pop from
- tail_prev: AtomicPtr<Node<T>>, // where to pop from
- cache_bound: usize, // maximum cache size
- cached_nodes: AtomicUsize, // number of nodes marked as cacheable
- addition: Addition,
-}
-
-struct Producer<T, Addition> {
- head: UnsafeCell<*mut Node<T>>, // where to push to
- first: UnsafeCell<*mut Node<T>>, // where to get new nodes from
- tail_copy: UnsafeCell<*mut Node<T>>, // between first/tail
- addition: Addition,
-}
-
-unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Send for Queue<T, P, C> {}
-
-unsafe impl<T: Send, P: Send + Sync, C: Send + Sync> Sync for Queue<T, P, C> {}
-
-impl<T> Node<T> {
- fn new() -> *mut Node<T> {
- Box::into_raw(box Node {
- value: None,
- cached: false,
- next: AtomicPtr::new(ptr::null_mut::<Node<T>>()),
- })
- }
-}
-
-impl<T, ProducerAddition, ConsumerAddition> Queue<T, ProducerAddition, ConsumerAddition> {
- /// Creates a new queue. With given additional elements in the producer and
- /// consumer portions of the queue.
- ///
- /// Due to the performance implications of cache-contention,
- /// we wish to keep fields used mainly by the producer on a separate cache
- /// line than those used by the consumer.
- /// Since cache lines are usually 64 bytes, it is unreasonably expensive to
- /// allocate one for small fields, so we allow users to insert additional
- /// fields into the cache lines already allocated by this for the producer
- /// and consumer.
- ///
- /// This is unsafe as the type system doesn't enforce a single
- /// consumer-producer relationship. It also allows the consumer to `pop`
- /// items while there is a `peek` active due to all methods having a
- /// non-mutable receiver.
- ///
- /// # Arguments
- ///
- /// * `bound` - This queue implementation is implemented with a linked
- /// list, and this means that a push is always a malloc. In
- /// order to amortize this cost, an internal cache of nodes is
- /// maintained to prevent a malloc from always being
- /// necessary. This bound is the limit on the size of the
- /// cache (if desired). If the value is 0, then the cache has
- /// no bound. Otherwise, the cache will never grow larger than
- /// `bound` (although the queue itself could be much larger.
- pub unsafe fn with_additions(
- bound: usize,
- producer_addition: ProducerAddition,
- consumer_addition: ConsumerAddition,
- ) -> Self {
- let n1 = Node::new();
- let n2 = Node::new();
- (*n1).next.store(n2, Ordering::Relaxed);
- Queue {
- consumer: CacheAligned::new(Consumer {
- tail: UnsafeCell::new(n2),
- tail_prev: AtomicPtr::new(n1),
- cache_bound: bound,
- cached_nodes: AtomicUsize::new(0),
- addition: consumer_addition,
- }),
- producer: CacheAligned::new(Producer {
- head: UnsafeCell::new(n2),
- first: UnsafeCell::new(n1),
- tail_copy: UnsafeCell::new(n1),
- addition: producer_addition,
- }),
- }
- }
-
- /// Pushes a new value onto this queue. Note that to use this function
- /// safely, it must be externally guaranteed that there is only one pusher.
- pub fn push(&self, t: T) {
- unsafe {
- // Acquire a node (which either uses a cached one or allocates a new
- // one), and then append this to the 'head' node.
- let n = self.alloc();
- assert!((*n).value.is_none());
- (*n).value = Some(t);
- (*n).next.store(ptr::null_mut(), Ordering::Relaxed);
- (**self.producer.head.get()).next.store(n, Ordering::Release);
- *(&self.producer.head).get() = n;
- }
- }
-
- unsafe fn alloc(&self) -> *mut Node<T> {
- // First try to see if we can consume the 'first' node for our uses.
- if *self.producer.first.get() != *self.producer.tail_copy.get() {
- let ret = *self.producer.first.get();
- *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
- return ret;
- }
- // If the above fails, then update our copy of the tail and try
- // again.
- *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire);
- if *self.producer.first.get() != *self.producer.tail_copy.get() {
- let ret = *self.producer.first.get();
- *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed);
- return ret;
- }
- // If all of that fails, then we have to allocate a new node
- // (there's nothing in the node cache).
- Node::new()
- }
-
- /// Attempts to pop a value from this queue. Remember that to use this type
- /// safely you must ensure that there is only one popper at a time.
- pub fn pop(&self) -> Option<T> {
- unsafe {
- // The `tail` node is not actually a used node, but rather a
- // sentinel from where we should start popping from. Hence, look at
- // tail's next field and see if we can use it. If we do a pop, then
- // the current tail node is a candidate for going into the cache.
- let tail = *self.consumer.tail.get();
- let next = (*tail).next.load(Ordering::Acquire);
- if next.is_null() {
- return None;
- }
- assert!((*next).value.is_some());
- let ret = (*next).value.take();
-
- *self.consumer.0.tail.get() = next;
- if self.consumer.cache_bound == 0 {
- self.consumer.tail_prev.store(tail, Ordering::Release);
- } else {
- let cached_nodes = self.consumer.cached_nodes.load(Ordering::Relaxed);
- if cached_nodes < self.consumer.cache_bound && !(*tail).cached {
- self.consumer.cached_nodes.store(cached_nodes, Ordering::Relaxed);
- (*tail).cached = true;
- }
-
- if (*tail).cached {
- self.consumer.tail_prev.store(tail, Ordering::Release);
- } else {
- (*self.consumer.tail_prev.load(Ordering::Relaxed))
- .next
- .store(next, Ordering::Relaxed);
- // We have successfully erased all references to 'tail', so
- // now we can safely drop it.
- let _: Box<Node<T>> = Box::from_raw(tail);
- }
- }
- ret
- }
- }
-
- /// Attempts to peek at the head of the queue, returning `None` if the queue
- /// has no data currently
- ///
- /// # Warning
- /// The reference returned is invalid if it is not used before the consumer
- /// pops the value off the queue. If the producer then pushes another value
- /// onto the queue, it will overwrite the value pointed to by the reference.
- pub fn peek(&self) -> Option<&mut T> {
- // This is essentially the same as above with all the popping bits
- // stripped out.
- unsafe {
- let tail = *self.consumer.tail.get();
- let next = (*tail).next.load(Ordering::Acquire);
- if next.is_null() { None } else { (*next).value.as_mut() }
- }
- }
-
- pub fn producer_addition(&self) -> &ProducerAddition {
- &self.producer.addition
- }
-
- pub fn consumer_addition(&self) -> &ConsumerAddition {
- &self.consumer.addition
- }
-}
-
-impl<T, ProducerAddition, ConsumerAddition> Drop for Queue<T, ProducerAddition, ConsumerAddition> {
- fn drop(&mut self) {
- unsafe {
- let mut cur = *self.producer.first.get();
- while !cur.is_null() {
- let next = (*cur).next.load(Ordering::Relaxed);
- let _n: Box<Node<T>> = Box::from_raw(cur);
- cur = next;
- }
- }
- }
-}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
deleted file mode 100644
index eb6d5c2cf..000000000
--- a/library/std/src/sync/mpsc/spsc_queue/tests.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-use super::Queue;
-use crate::sync::mpsc::channel;
-use crate::sync::Arc;
-use crate::thread;
-
-#[test]
-fn smoke() {
- unsafe {
- let queue = Queue::with_additions(0, (), ());
- queue.push(1);
- queue.push(2);
- assert_eq!(queue.pop(), Some(1));
- assert_eq!(queue.pop(), Some(2));
- assert_eq!(queue.pop(), None);
- queue.push(3);
- queue.push(4);
- assert_eq!(queue.pop(), Some(3));
- assert_eq!(queue.pop(), Some(4));
- assert_eq!(queue.pop(), None);
- }
-}
-
-#[test]
-fn peek() {
- unsafe {
- let queue = Queue::with_additions(0, (), ());
- queue.push(vec![1]);
-
- // Ensure the borrowchecker works
- match queue.peek() {
- Some(vec) => {
- assert_eq!(&*vec, &[1]);
- }
- None => unreachable!(),
- }
-
- match queue.pop() {
- Some(vec) => {
- assert_eq!(&*vec, &[1]);
- }
- None => unreachable!(),
- }
- }
-}
-
-#[test]
-fn drop_full() {
- unsafe {
- let q: Queue<Box<_>> = Queue::with_additions(0, (), ());
- q.push(Box::new(1));
- q.push(Box::new(2));
- }
-}
-
-#[test]
-fn smoke_bound() {
- unsafe {
- let q = Queue::with_additions(0, (), ());
- q.push(1);
- q.push(2);
- assert_eq!(q.pop(), Some(1));
- assert_eq!(q.pop(), Some(2));
- assert_eq!(q.pop(), None);
- q.push(3);
- q.push(4);
- assert_eq!(q.pop(), Some(3));
- assert_eq!(q.pop(), Some(4));
- assert_eq!(q.pop(), None);
- }
-}
-
-#[test]
-fn stress() {
- unsafe {
- stress_bound(0);
- stress_bound(1);
- }
-
- unsafe fn stress_bound(bound: usize) {
- let count = if cfg!(miri) { 1000 } else { 100000 };
- let q = Arc::new(Queue::with_additions(bound, (), ()));
-
- let (tx, rx) = channel();
- let q2 = q.clone();
- let _t = thread::spawn(move || {
- for _ in 0..count {
- loop {
- match q2.pop() {
- Some(1) => break,
- Some(_) => panic!(),
- None => {}
- }
- }
- }
- tx.send(()).unwrap();
- });
- for _ in 0..count {
- q.push(1);
- }
- rx.recv().unwrap();
- }
-}
diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs
deleted file mode 100644
index 4592e9141..000000000
--- a/library/std/src/sync/mpsc/stream.rs
+++ /dev/null
@@ -1,457 +0,0 @@
-/// Stream channels
-///
-/// This is the flavor of channels which are optimized for one sender and one
-/// receiver. The sender will be upgraded to a shared channel if the channel is
-/// cloned.
-///
-/// High level implementation details can be found in the comment of the parent
-/// module.
-pub use self::Failure::*;
-use self::Message::*;
-pub use self::UpgradeResult::*;
-
-use core::cmp;
-
-use crate::cell::UnsafeCell;
-use crate::ptr;
-use crate::thread;
-use crate::time::Instant;
-
-use crate::sync::atomic::{AtomicBool, AtomicIsize, AtomicPtr, Ordering};
-use crate::sync::mpsc::blocking::{self, SignalToken};
-use crate::sync::mpsc::spsc_queue as spsc;
-use crate::sync::mpsc::Receiver;
-
-const DISCONNECTED: isize = isize::MIN;
-#[cfg(test)]
-const MAX_STEALS: isize = 5;
-#[cfg(not(test))]
-const MAX_STEALS: isize = 1 << 20;
-const EMPTY: *mut u8 = ptr::null_mut(); // initial state: no data, no blocked receiver
-
-pub struct Packet<T> {
- // internal queue for all messages
- queue: spsc::Queue<Message<T>, ProducerAddition, ConsumerAddition>,
-}
-
-struct ProducerAddition {
- cnt: AtomicIsize, // How many items are on this channel
- to_wake: AtomicPtr<u8>, // SignalToken for the blocked thread to wake up
-
- port_dropped: AtomicBool, // flag if the channel has been destroyed.
-}
-
-struct ConsumerAddition {
- steals: UnsafeCell<isize>, // How many times has a port received without blocking?
-}
-
-pub enum Failure<T> {
- Empty,
- Disconnected,
- Upgraded(Receiver<T>),
-}
-
-pub enum UpgradeResult {
- UpSuccess,
- UpDisconnected,
- UpWoke(SignalToken),
-}
-
-// Any message could contain an "upgrade request" to a new shared port, so the
-// internal queue it's a queue of T, but rather Message<T>
-enum Message<T> {
- Data(T),
- GoUp(Receiver<T>),
-}
-
-impl<T> Packet<T> {
- pub fn new() -> Packet<T> {
- Packet {
- queue: unsafe {
- spsc::Queue::with_additions(
- 128,
- ProducerAddition {
- cnt: AtomicIsize::new(0),
- to_wake: AtomicPtr::new(EMPTY),
-
- port_dropped: AtomicBool::new(false),
- },
- ConsumerAddition { steals: UnsafeCell::new(0) },
- )
- },
- }
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- // If the other port has deterministically gone away, then definitely
- // must return the data back up the stack. Otherwise, the data is
- // considered as being sent.
- if self.queue.producer_addition().port_dropped.load(Ordering::SeqCst) {
- return Err(t);
- }
-
- match self.do_send(Data(t)) {
- UpSuccess | UpDisconnected => {}
- UpWoke(token) => {
- token.signal();
- }
- }
- Ok(())
- }
-
- pub fn upgrade(&self, up: Receiver<T>) -> UpgradeResult {
- // If the port has gone away, then there's no need to proceed any
- // further.
- if self.queue.producer_addition().port_dropped.load(Ordering::SeqCst) {
- return UpDisconnected;
- }
-
- self.do_send(GoUp(up))
- }
-
- fn do_send(&self, t: Message<T>) -> UpgradeResult {
- self.queue.push(t);
- match self.queue.producer_addition().cnt.fetch_add(1, Ordering::SeqCst) {
- // As described in the mod's doc comment, -1 == wakeup
- -1 => UpWoke(self.take_to_wake()),
- // As described before, SPSC queues must be >= -2
- -2 => UpSuccess,
-
- // Be sure to preserve the disconnected state, and the return value
- // in this case is going to be whether our data was received or not.
- // This manifests itself on whether we have an empty queue or not.
- //
- // Primarily, are required to drain the queue here because the port
- // will never remove this data. We can only have at most one item to
- // drain (the port drains the rest).
- DISCONNECTED => {
- self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
- let first = self.queue.pop();
- let second = self.queue.pop();
- assert!(second.is_none());
-
- match first {
- Some(..) => UpSuccess, // we failed to send the data
- None => UpDisconnected, // we successfully sent data
- }
- }
-
- // Otherwise we just sent some data on a non-waiting queue, so just
- // make sure the world is sane and carry on!
- n => {
- assert!(n >= 0);
- UpSuccess
- }
- }
- }
-
- // Consumes ownership of the 'to_wake' field.
- fn take_to_wake(&self) -> SignalToken {
- let ptr = self.queue.producer_addition().to_wake.load(Ordering::SeqCst);
- self.queue.producer_addition().to_wake.store(EMPTY, Ordering::SeqCst);
- assert!(ptr != EMPTY);
- unsafe { SignalToken::from_raw(ptr) }
- }
-
- // Decrements the count on the channel for a sleeper, returning the sleeper
- // back if it shouldn't sleep. Note that this is the location where we take
- // steals into account.
- fn decrement(&self, token: SignalToken) -> Result<(), SignalToken> {
- assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
- let ptr = unsafe { token.to_raw() };
- self.queue.producer_addition().to_wake.store(ptr, Ordering::SeqCst);
-
- let steals = unsafe { ptr::replace(self.queue.consumer_addition().steals.get(), 0) };
-
- match self.queue.producer_addition().cnt.fetch_sub(1 + steals, Ordering::SeqCst) {
- DISCONNECTED => {
- self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
- }
- // If we factor in our steals and notice that the channel has no
- // data, we successfully sleep
- n => {
- assert!(n >= 0);
- if n - steals <= 0 {
- return Ok(());
- }
- }
- }
-
- self.queue.producer_addition().to_wake.store(EMPTY, Ordering::SeqCst);
- Err(unsafe { SignalToken::from_raw(ptr) })
- }
-
- pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure<T>> {
- // Optimistic preflight check (scheduling is expensive).
- match self.try_recv() {
- Err(Empty) => {}
- data => return data,
- }
-
- // Welp, our channel has no data. Deschedule the current thread and
- // initiate the blocking protocol.
- let (wait_token, signal_token) = blocking::tokens();
- if self.decrement(signal_token).is_ok() {
- if let Some(deadline) = deadline {
- let timed_out = !wait_token.wait_max_until(deadline);
- if timed_out {
- self.abort_selection(/* was_upgrade = */ false).map_err(Upgraded)?;
- }
- } else {
- wait_token.wait();
- }
- }
-
- match self.try_recv() {
- // Messages which actually popped from the queue shouldn't count as
- // a steal, so offset the decrement here (we already have our
- // "steal" factored into the channel count above).
- data @ (Ok(..) | Err(Upgraded(..))) => unsafe {
- *self.queue.consumer_addition().steals.get() -= 1;
- data
- },
-
- data => data,
- }
- }
-
- pub fn try_recv(&self) -> Result<T, Failure<T>> {
- match self.queue.pop() {
- // If we stole some data, record to that effect (this will be
- // factored into cnt later on).
- //
- // Note that we don't allow steals to grow without bound in order to
- // prevent eventual overflow of either steals or cnt as an overflow
- // would have catastrophic results. Sometimes, steals > cnt, but
- // other times cnt > steals, so we don't know the relation between
- // steals and cnt. This code path is executed only rarely, so we do
- // a pretty slow operation, of swapping 0 into cnt, taking steals
- // down as much as possible (without going negative), and then
- // adding back in whatever we couldn't factor into steals.
- Some(data) => unsafe {
- if *self.queue.consumer_addition().steals.get() > MAX_STEALS {
- match self.queue.producer_addition().cnt.swap(0, Ordering::SeqCst) {
- DISCONNECTED => {
- self.queue
- .producer_addition()
- .cnt
- .store(DISCONNECTED, Ordering::SeqCst);
- }
- n => {
- let m = cmp::min(n, *self.queue.consumer_addition().steals.get());
- *self.queue.consumer_addition().steals.get() -= m;
- self.bump(n - m);
- }
- }
- assert!(*self.queue.consumer_addition().steals.get() >= 0);
- }
- *self.queue.consumer_addition().steals.get() += 1;
- match data {
- Data(t) => Ok(t),
- GoUp(up) => Err(Upgraded(up)),
- }
- },
-
- None => {
- match self.queue.producer_addition().cnt.load(Ordering::SeqCst) {
- n if n != DISCONNECTED => Err(Empty),
-
- // This is a little bit of a tricky case. We failed to pop
- // data above, and then we have viewed that the channel is
- // disconnected. In this window more data could have been
- // sent on the channel. It doesn't really make sense to
- // return that the channel is disconnected when there's
- // actually data on it, so be extra sure there's no data by
- // popping one more time.
- //
- // We can ignore steals because the other end is
- // disconnected and we'll never need to really factor in our
- // steals again.
- _ => match self.queue.pop() {
- Some(Data(t)) => Ok(t),
- Some(GoUp(up)) => Err(Upgraded(up)),
- None => Err(Disconnected),
- },
- }
- }
- }
- }
-
- pub fn drop_chan(&self) {
- // Dropping a channel is pretty simple, we just flag it as disconnected
- // and then wakeup a blocker if there is one.
- match self.queue.producer_addition().cnt.swap(DISCONNECTED, Ordering::SeqCst) {
- -1 => {
- self.take_to_wake().signal();
- }
- DISCONNECTED => {}
- n => {
- assert!(n >= 0);
- }
- }
- }
-
- pub fn drop_port(&self) {
- // Dropping a port seems like a fairly trivial thing. In theory all we
- // need to do is flag that we're disconnected and then everything else
- // can take over (we don't have anyone to wake up).
- //
- // The catch for Ports is that we want to drop the entire contents of
- // the queue. There are multiple reasons for having this property, the
- // largest of which is that if another chan is waiting in this channel
- // (but not received yet), then waiting on that port will cause a
- // deadlock.
- //
- // So if we accept that we must now destroy the entire contents of the
- // queue, this code may make a bit more sense. The tricky part is that
- // we can't let any in-flight sends go un-dropped, we have to make sure
- // *everything* is dropped and nothing new will come onto the channel.
-
- // The first thing we do is set a flag saying that we're done for. All
- // sends are gated on this flag, so we're immediately guaranteed that
- // there are a bounded number of active sends that we'll have to deal
- // with.
- self.queue.producer_addition().port_dropped.store(true, Ordering::SeqCst);
-
- // Now that we're guaranteed to deal with a bounded number of senders,
- // we need to drain the queue. This draining process happens atomically
- // with respect to the "count" of the channel. If the count is nonzero
- // (with steals taken into account), then there must be data on the
- // channel. In this case we drain everything and then try again. We will
- // continue to fail while active senders send data while we're dropping
- // data, but eventually we're guaranteed to break out of this loop
- // (because there is a bounded number of senders).
- let mut steals = unsafe { *self.queue.consumer_addition().steals.get() };
- while {
- match self.queue.producer_addition().cnt.compare_exchange(
- steals,
- DISCONNECTED,
- Ordering::SeqCst,
- Ordering::SeqCst,
- ) {
- Ok(_) => false,
- Err(old) => old != DISCONNECTED,
- }
- } {
- while self.queue.pop().is_some() {
- steals += 1;
- }
- }
-
- // At this point in time, we have gated all future senders from sending,
- // and we have flagged the channel as being disconnected. The senders
- // still have some responsibility, however, because some sends might not
- // complete until after we flag the disconnection. There are more
- // details in the sending methods that see DISCONNECTED
- }
-
- ////////////////////////////////////////////////////////////////////////////
- // select implementation
- ////////////////////////////////////////////////////////////////////////////
-
- // increment the count on the channel (used for selection)
- fn bump(&self, amt: isize) -> isize {
- match self.queue.producer_addition().cnt.fetch_add(amt, Ordering::SeqCst) {
- DISCONNECTED => {
- self.queue.producer_addition().cnt.store(DISCONNECTED, Ordering::SeqCst);
- DISCONNECTED
- }
- n => n,
- }
- }
-
- // Removes a previous thread from being blocked in this port
- pub fn abort_selection(&self, was_upgrade: bool) -> Result<bool, Receiver<T>> {
- // If we're aborting selection after upgrading from a oneshot, then
- // we're guarantee that no one is waiting. The only way that we could
- // have seen the upgrade is if data was actually sent on the channel
- // half again. For us, this means that there is guaranteed to be data on
- // this channel. Furthermore, we're guaranteed that there was no
- // start_selection previously, so there's no need to modify `self.cnt`
- // at all.
- //
- // Hence, because of these invariants, we immediately return `Ok(true)`.
- // Note that the data might not actually be sent on the channel just yet.
- // The other end could have flagged the upgrade but not sent data to
- // this end. This is fine because we know it's a small bounded windows
- // of time until the data is actually sent.
- if was_upgrade {
- assert_eq!(unsafe { *self.queue.consumer_addition().steals.get() }, 0);
- assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
- return Ok(true);
- }
-
- // We want to make sure that the count on the channel goes non-negative,
- // and in the stream case we can have at most one steal, so just assume
- // that we had one steal.
- let steals = 1;
- let prev = self.bump(steals + 1);
-
- // If we were previously disconnected, then we know for sure that there
- // is no thread in to_wake, so just keep going
- let has_data = if prev == DISCONNECTED {
- assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
- true // there is data, that data is that we're disconnected
- } else {
- let cur = prev + steals + 1;
- assert!(cur >= 0);
-
- // If the previous count was negative, then we just made things go
- // positive, hence we passed the -1 boundary and we're responsible
- // for removing the to_wake() field and trashing it.
- //
- // If the previous count was positive then we're in a tougher
- // situation. A possible race is that a sender just incremented
- // through -1 (meaning it's going to try to wake a thread up), but it
- // hasn't yet read the to_wake. In order to prevent a future recv()
- // from waking up too early (this sender picking up the plastered
- // over to_wake), we spin loop here waiting for to_wake to be 0.
- // Note that this entire select() implementation needs an overhaul,
- // and this is *not* the worst part of it, so this is not done as a
- // final solution but rather out of necessity for now to get
- // something working.
- if prev < 0 {
- drop(self.take_to_wake());
- } else {
- while self.queue.producer_addition().to_wake.load(Ordering::SeqCst) != EMPTY {
- thread::yield_now();
- }
- }
- unsafe {
- assert_eq!(*self.queue.consumer_addition().steals.get(), 0);
- *self.queue.consumer_addition().steals.get() = steals;
- }
-
- // if we were previously positive, then there's surely data to
- // receive
- prev >= 0
- };
-
- // Now that we've determined that this queue "has data", we peek at the
- // queue to see if the data is an upgrade or not. If it's an upgrade,
- // then we need to destroy this port and abort selection on the
- // upgraded port.
- if has_data {
- match self.queue.peek() {
- Some(&mut GoUp(..)) => match self.queue.pop() {
- Some(GoUp(port)) => Err(port),
- _ => unreachable!(),
- },
- _ => Ok(true),
- }
- } else {
- Ok(false)
- }
- }
-}
-
-impl<T> Drop for Packet<T> {
- fn drop(&mut self) {
- // Note that this load is not only an assert for correctness about
- // disconnection, but also a proper fence before the read of
- // `to_wake`, so this assert cannot be removed with also removing
- // the `to_wake` assert.
- assert_eq!(self.queue.producer_addition().cnt.load(Ordering::SeqCst), DISCONNECTED);
- assert_eq!(self.queue.producer_addition().to_wake.load(Ordering::SeqCst), EMPTY);
- }
-}
diff --git a/library/std/src/sync/mpsc/sync.rs b/library/std/src/sync/mpsc/sync.rs
deleted file mode 100644
index 733761671..000000000
--- a/library/std/src/sync/mpsc/sync.rs
+++ /dev/null
@@ -1,495 +0,0 @@
-use self::Blocker::*;
-/// Synchronous channels/ports
-///
-/// This channel implementation differs significantly from the asynchronous
-/// implementations found next to it (oneshot/stream/share). This is an
-/// implementation of a synchronous, bounded buffer channel.
-///
-/// Each channel is created with some amount of backing buffer, and sends will
-/// *block* until buffer space becomes available. A buffer size of 0 is valid,
-/// which means that every successful send is paired with a successful recv.
-///
-/// This flavor of channels defines a new `send_opt` method for channels which
-/// is the method by which a message is sent but the thread does not panic if it
-/// cannot be delivered.
-///
-/// Another major difference is that send() will *always* return back the data
-/// if it couldn't be sent. This is because it is deterministically known when
-/// the data is received and when it is not received.
-///
-/// Implementation-wise, it can all be summed up with "use a mutex plus some
-/// logic". The mutex used here is an OS native mutex, meaning that no user code
-/// is run inside of the mutex (to prevent context switching). This
-/// implementation shares almost all code for the buffered and unbuffered cases
-/// of a synchronous channel. There are a few branches for the unbuffered case,
-/// but they're mostly just relevant to blocking senders.
-pub use self::Failure::*;
-
-use core::intrinsics::abort;
-use core::mem;
-use core::ptr;
-
-use crate::sync::atomic::{AtomicUsize, Ordering};
-use crate::sync::mpsc::blocking::{self, SignalToken, WaitToken};
-use crate::sync::{Mutex, MutexGuard};
-use crate::time::Instant;
-
-const MAX_REFCOUNT: usize = (isize::MAX) as usize;
-
-pub struct Packet<T> {
- /// Only field outside of the mutex. Just done for kicks, but mainly because
- /// the other shared channel already had the code implemented
- channels: AtomicUsize,
-
- lock: Mutex<State<T>>,
-}
-
-unsafe impl<T: Send> Send for Packet<T> {}
-
-unsafe impl<T: Send> Sync for Packet<T> {}
-
-struct State<T> {
- disconnected: bool, // Is the channel disconnected yet?
- queue: Queue, // queue of senders waiting to send data
- blocker: Blocker, // currently blocked thread on this channel
- buf: Buffer<T>, // storage for buffered messages
- cap: usize, // capacity of this channel
-
- /// A curious flag used to indicate whether a sender failed or succeeded in
- /// blocking. This is used to transmit information back to the thread that it
- /// must dequeue its message from the buffer because it was not received.
- /// This is only relevant in the 0-buffer case. This obviously cannot be
- /// safely constructed, but it's guaranteed to always have a valid pointer
- /// value.
- canceled: Option<&'static mut bool>,
-}
-
-unsafe impl<T: Send> Send for State<T> {}
-
-/// Possible flavors of threads who can be blocked on this channel.
-enum Blocker {
- BlockedSender(SignalToken),
- BlockedReceiver(SignalToken),
- NoneBlocked,
-}
-
-/// Simple queue for threading threads together. Nodes are stack-allocated, so
-/// this structure is not safe at all
-struct Queue {
- head: *mut Node,
- tail: *mut Node,
-}
-
-struct Node {
- token: Option<SignalToken>,
- next: *mut Node,
-}
-
-unsafe impl Send for Node {}
-
-/// A simple ring-buffer
-struct Buffer<T> {
- buf: Vec<Option<T>>,
- start: usize,
- size: usize,
-}
-
-#[derive(Debug)]
-pub enum Failure {
- Empty,
- Disconnected,
-}
-
-/// Atomically blocks the current thread, placing it into `slot`, unlocking `lock`
-/// in the meantime. This re-locks the mutex upon returning.
-fn wait<'a, 'b, T>(
- lock: &'a Mutex<State<T>>,
- mut guard: MutexGuard<'b, State<T>>,
- f: fn(SignalToken) -> Blocker,
-) -> MutexGuard<'a, State<T>> {
- let (wait_token, signal_token) = blocking::tokens();
- match mem::replace(&mut guard.blocker, f(signal_token)) {
- NoneBlocked => {}
- _ => unreachable!(),
- }
- drop(guard); // unlock
- wait_token.wait(); // block
- lock.lock().unwrap() // relock
-}
-
-/// Same as wait, but waiting at most until `deadline`.
-fn wait_timeout_receiver<'a, 'b, T>(
- lock: &'a Mutex<State<T>>,
- deadline: Instant,
- mut guard: MutexGuard<'b, State<T>>,
- success: &mut bool,
-) -> MutexGuard<'a, State<T>> {
- let (wait_token, signal_token) = blocking::tokens();
- match mem::replace(&mut guard.blocker, BlockedReceiver(signal_token)) {
- NoneBlocked => {}
- _ => unreachable!(),
- }
- drop(guard); // unlock
- *success = wait_token.wait_max_until(deadline); // block
- let mut new_guard = lock.lock().unwrap(); // relock
- if !*success {
- abort_selection(&mut new_guard);
- }
- new_guard
-}
-
-fn abort_selection<T>(guard: &mut MutexGuard<'_, State<T>>) -> bool {
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- NoneBlocked => true,
- BlockedSender(token) => {
- guard.blocker = BlockedSender(token);
- true
- }
- BlockedReceiver(token) => {
- drop(token);
- false
- }
- }
-}
-
-/// Wakes up a thread, dropping the lock at the correct time
-fn wakeup<T>(token: SignalToken, guard: MutexGuard<'_, State<T>>) {
- // We need to be careful to wake up the waiting thread *outside* of the mutex
- // in case it incurs a context switch.
- drop(guard);
- token.signal();
-}
-
-impl<T> Packet<T> {
- pub fn new(capacity: usize) -> Packet<T> {
- Packet {
- channels: AtomicUsize::new(1),
- lock: Mutex::new(State {
- disconnected: false,
- blocker: NoneBlocked,
- cap: capacity,
- canceled: None,
- queue: Queue { head: ptr::null_mut(), tail: ptr::null_mut() },
- buf: Buffer {
- buf: (0..capacity + if capacity == 0 { 1 } else { 0 }).map(|_| None).collect(),
- start: 0,
- size: 0,
- },
- }),
- }
- }
-
- // wait until a send slot is available, returning locked access to
- // the channel state.
- fn acquire_send_slot(&self) -> MutexGuard<'_, State<T>> {
- let mut node = Node { token: None, next: ptr::null_mut() };
- loop {
- let mut guard = self.lock.lock().unwrap();
- // are we ready to go?
- if guard.disconnected || guard.buf.size() < guard.buf.capacity() {
- return guard;
- }
- // no room; actually block
- let wait_token = guard.queue.enqueue(&mut node);
- drop(guard);
- wait_token.wait();
- }
- }
-
- pub fn send(&self, t: T) -> Result<(), T> {
- let mut guard = self.acquire_send_slot();
- if guard.disconnected {
- return Err(t);
- }
- guard.buf.enqueue(t);
-
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- // if our capacity is 0, then we need to wait for a receiver to be
- // available to take our data. After waiting, we check again to make
- // sure the port didn't go away in the meantime. If it did, we need
- // to hand back our data.
- NoneBlocked if guard.cap == 0 => {
- let mut canceled = false;
- assert!(guard.canceled.is_none());
- guard.canceled = Some(unsafe { mem::transmute(&mut canceled) });
- let mut guard = wait(&self.lock, guard, BlockedSender);
- if canceled { Err(guard.buf.dequeue()) } else { Ok(()) }
- }
-
- // success, we buffered some data
- NoneBlocked => Ok(()),
-
- // success, someone's about to receive our buffered data.
- BlockedReceiver(token) => {
- wakeup(token, guard);
- Ok(())
- }
-
- BlockedSender(..) => panic!("lolwut"),
- }
- }
-
- pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
- let mut guard = self.lock.lock().unwrap();
- if guard.disconnected {
- Err(super::TrySendError::Disconnected(t))
- } else if guard.buf.size() == guard.buf.capacity() {
- Err(super::TrySendError::Full(t))
- } else if guard.cap == 0 {
- // With capacity 0, even though we have buffer space we can't
- // transfer the data unless there's a receiver waiting.
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- NoneBlocked => Err(super::TrySendError::Full(t)),
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(token) => {
- guard.buf.enqueue(t);
- wakeup(token, guard);
- Ok(())
- }
- }
- } else {
- // If the buffer has some space and the capacity isn't 0, then we
- // just enqueue the data for later retrieval, ensuring to wake up
- // any blocked receiver if there is one.
- assert!(guard.buf.size() < guard.buf.capacity());
- guard.buf.enqueue(t);
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- BlockedReceiver(token) => wakeup(token, guard),
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- }
- Ok(())
- }
- }
-
- // Receives a message from this channel
- //
- // When reading this, remember that there can only ever be one receiver at
- // time.
- pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure> {
- let mut guard = self.lock.lock().unwrap();
-
- let mut woke_up_after_waiting = false;
- // Wait for the buffer to have something in it. No need for a
- // while loop because we're the only receiver.
- if !guard.disconnected && guard.buf.size() == 0 {
- if let Some(deadline) = deadline {
- guard =
- wait_timeout_receiver(&self.lock, deadline, guard, &mut woke_up_after_waiting);
- } else {
- guard = wait(&self.lock, guard, BlockedReceiver);
- woke_up_after_waiting = true;
- }
- }
-
- // N.B., channel could be disconnected while waiting, so the order of
- // these conditionals is important.
- if guard.disconnected && guard.buf.size() == 0 {
- return Err(Disconnected);
- }
-
- // Pick up the data, wake up our neighbors, and carry on
- assert!(guard.buf.size() > 0 || (deadline.is_some() && !woke_up_after_waiting));
-
- if guard.buf.size() == 0 {
- return Err(Empty);
- }
-
- let ret = guard.buf.dequeue();
- self.wakeup_senders(woke_up_after_waiting, guard);
- Ok(ret)
- }
-
- pub fn try_recv(&self) -> Result<T, Failure> {
- let mut guard = self.lock.lock().unwrap();
-
- // Easy cases first
- if guard.disconnected && guard.buf.size() == 0 {
- return Err(Disconnected);
- }
- if guard.buf.size() == 0 {
- return Err(Empty);
- }
-
- // Be sure to wake up neighbors
- let ret = Ok(guard.buf.dequeue());
- self.wakeup_senders(false, guard);
- ret
- }
-
- // Wake up pending senders after some data has been received
- //
- // * `waited` - flag if the receiver blocked to receive some data, or if it
- // just picked up some data on the way out
- // * `guard` - the lock guard that is held over this channel's lock
- fn wakeup_senders(&self, waited: bool, mut guard: MutexGuard<'_, State<T>>) {
- let pending_sender1: Option<SignalToken> = guard.queue.dequeue();
-
- // If this is a no-buffer channel (cap == 0), then if we didn't wait we
- // need to ACK the sender. If we waited, then the sender waking us up
- // was already the ACK.
- let pending_sender2 = if guard.cap == 0 && !waited {
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedReceiver(..) => unreachable!(),
- BlockedSender(token) => {
- guard.canceled.take();
- Some(token)
- }
- }
- } else {
- None
- };
- mem::drop(guard);
-
- // only outside of the lock do we wake up the pending threads
- if let Some(token) = pending_sender1 {
- token.signal();
- }
- if let Some(token) = pending_sender2 {
- token.signal();
- }
- }
-
- // Prepares this shared packet for a channel clone, essentially just bumping
- // a refcount.
- pub fn clone_chan(&self) {
- let old_count = self.channels.fetch_add(1, Ordering::SeqCst);
-
- // See comments on Arc::clone() on why we do this (for `mem::forget`).
- if old_count > MAX_REFCOUNT {
- abort();
- }
- }
-
- pub fn drop_chan(&self) {
- // Only flag the channel as disconnected if we're the last channel
- match self.channels.fetch_sub(1, Ordering::SeqCst) {
- 1 => {}
- _ => return,
- }
-
- // Not much to do other than wake up a receiver if one's there
- let mut guard = self.lock.lock().unwrap();
- if guard.disconnected {
- return;
- }
- guard.disconnected = true;
- match mem::replace(&mut guard.blocker, NoneBlocked) {
- NoneBlocked => {}
- BlockedSender(..) => unreachable!(),
- BlockedReceiver(token) => wakeup(token, guard),
- }
- }
-
- pub fn drop_port(&self) {
- let mut guard = self.lock.lock().unwrap();
-
- if guard.disconnected {
- return;
- }
- guard.disconnected = true;
-
- // If the capacity is 0, then the sender may want its data back after
- // we're disconnected. Otherwise it's now our responsibility to destroy
- // the buffered data. As with many other portions of this code, this
- // needs to be careful to destroy the data *outside* of the lock to
- // prevent deadlock.
- let _data = if guard.cap != 0 { mem::take(&mut guard.buf.buf) } else { Vec::new() };
- let mut queue =
- mem::replace(&mut guard.queue, Queue { head: ptr::null_mut(), tail: ptr::null_mut() });
-
- let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) {
- NoneBlocked => None,
- BlockedSender(token) => {
- *guard.canceled.take().unwrap() = true;
- Some(token)
- }
- BlockedReceiver(..) => unreachable!(),
- };
- mem::drop(guard);
-
- while let Some(token) = queue.dequeue() {
- token.signal();
- }
- if let Some(token) = waiter {
- token.signal();
- }
- }
-}
-
-impl<T> Drop for Packet<T> {
- fn drop(&mut self) {
- assert_eq!(self.channels.load(Ordering::SeqCst), 0);
- let mut guard = self.lock.lock().unwrap();
- assert!(guard.queue.dequeue().is_none());
- assert!(guard.canceled.is_none());
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Buffer, a simple ring buffer backed by Vec<T>
-////////////////////////////////////////////////////////////////////////////////
-
-impl<T> Buffer<T> {
- fn enqueue(&mut self, t: T) {
- let pos = (self.start + self.size) % self.buf.len();
- self.size += 1;
- let prev = mem::replace(&mut self.buf[pos], Some(t));
- assert!(prev.is_none());
- }
-
- fn dequeue(&mut self) -> T {
- let start = self.start;
- self.size -= 1;
- self.start = (self.start + 1) % self.buf.len();
- let result = &mut self.buf[start];
- result.take().unwrap()
- }
-
- fn size(&self) -> usize {
- self.size
- }
- fn capacity(&self) -> usize {
- self.buf.len()
- }
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Queue, a simple queue to enqueue threads with (stack-allocated nodes)
-////////////////////////////////////////////////////////////////////////////////
-
-impl Queue {
- fn enqueue(&mut self, node: &mut Node) -> WaitToken {
- let (wait_token, signal_token) = blocking::tokens();
- node.token = Some(signal_token);
- node.next = ptr::null_mut();
-
- if self.tail.is_null() {
- self.head = node as *mut Node;
- self.tail = node as *mut Node;
- } else {
- unsafe {
- (*self.tail).next = node as *mut Node;
- self.tail = node as *mut Node;
- }
- }
-
- wait_token
- }
-
- fn dequeue(&mut self) -> Option<SignalToken> {
- if self.head.is_null() {
- return None;
- }
- let node = self.head;
- self.head = unsafe { (*node).next };
- if self.head.is_null() {
- self.tail = ptr::null_mut();
- }
- unsafe {
- (*node).next = ptr::null_mut();
- Some((*node).token.take().unwrap())
- }
- }
-}
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index 63c794369..9d2f92ffc 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -1,5 +1,6 @@
use super::*;
use crate::env;
+use crate::sync::mpmc::SendTimeoutError;
use crate::thread;
use crate::time::Duration;
@@ -42,6 +43,13 @@ fn recv_timeout() {
}
#[test]
+fn send_timeout() {
+ let (tx, _rx) = sync_channel::<i32>(1);
+ assert_eq!(tx.send_timeout(1, Duration::from_millis(1)), Ok(()));
+ assert_eq!(tx.send_timeout(1, Duration::from_millis(1)), Err(SendTimeoutError::Timeout(1)));
+}
+
+#[test]
fn smoke_threads() {
let (tx, rx) = sync_channel::<i32>(0);
let _t = thread::spawn(move || {
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index f6d0796f6..1e52a4a70 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -706,3 +706,18 @@ fn issue_32114() {
let _ = tx.send(123);
assert_eq!(tx.send(123), Err(SendError(123)));
}
+
+#[test]
+fn issue_39364() {
+ let (tx, rx) = channel::<()>();
+ let t = thread::spawn(move || {
+ thread::sleep(Duration::from_millis(300));
+ let _ = tx.clone();
+ // Don't drop; hand back to caller.
+ tx
+ });
+
+ let _ = rx.recv_timeout(Duration::from_millis(500));
+ let _tx = t.join().unwrap(); // delay dropping until end of test
+ let _ = rx.recv_timeout(Duration::from_millis(500));
+}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index de851c8fb..065045f44 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -5,7 +5,7 @@ use crate::cell::UnsafeCell;
use crate::fmt;
use crate::ops::{Deref, DerefMut};
use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
-use crate::sys_common::mutex as sys;
+use crate::sys::locks as sys;
/// A mutual exclusion primitive useful for protecting shared data
///
@@ -163,7 +163,7 @@ use crate::sys_common::mutex as sys;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Mutex")]
pub struct Mutex<T: ?Sized> {
- inner: sys::MovableMutex,
+ inner: sys::Mutex,
poison: poison::Flag,
data: UnsafeCell<T>,
}
@@ -217,11 +217,7 @@ impl<T> Mutex<T> {
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
#[inline]
pub const fn new(t: T) -> Mutex<T> {
- Mutex {
- inner: sys::MovableMutex::new(),
- poison: poison::Flag::new(),
- data: UnsafeCell::new(t),
- }
+ Mutex { inner: sys::Mutex::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
}
}
@@ -264,7 +260,7 @@ impl<T: ?Sized> Mutex<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
unsafe {
- self.inner.raw_lock();
+ self.inner.lock();
MutexGuard::new(self)
}
}
@@ -526,7 +522,7 @@ impl<T: ?Sized> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
unsafe {
self.lock.poison.done(&self.poison);
- self.lock.inner.raw_unlock();
+ self.lock.inner.unlock();
}
}
}
@@ -545,7 +541,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
}
}
-pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::MovableMutex {
+pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
&guard.lock.inner
}
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index 37413ec62..16d1fd2a5 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -7,7 +7,9 @@ use crate::sync::Once;
/// A synchronization primitive which can be written to only once.
///
-/// This type is a thread-safe `OnceCell`.
+/// This type is a thread-safe [`OnceCell`], and can be used in statics.
+///
+/// [`OnceCell`]: crate::cell::OnceCell
///
/// # Examples
///
@@ -33,7 +35,7 @@ use crate::sync::Once;
#[unstable(feature = "once_cell", issue = "74465")]
pub struct OnceLock<T> {
once: Once,
- // Whether or not the value is initialized is tracked by `state_and_queue`.
+ // Whether or not the value is initialized is tracked by `once.is_completed()`.
value: UnsafeCell<MaybeUninit<T>>,
/// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 8b3877607..7c409cb3e 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -6,7 +6,7 @@ use crate::fmt;
use crate::ops::{Deref, DerefMut};
use crate::ptr::NonNull;
use crate::sync::{poison, LockResult, TryLockError, TryLockResult};
-use crate::sys_common::rwlock as sys;
+use crate::sys::locks as sys;
/// A reader-writer lock
///
@@ -78,7 +78,7 @@ use crate::sys_common::rwlock as sys;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
pub struct RwLock<T: ?Sized> {
- inner: sys::MovableRwLock,
+ inner: sys::RwLock,
poison: poison::Flag,
data: UnsafeCell<T>,
}
@@ -109,7 +109,7 @@ pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
// `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
// is preferable over `const* T` to allow for niche optimization.
data: NonNull<T>,
- inner_lock: &'a sys::MovableRwLock,
+ inner_lock: &'a sys::RwLock,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -158,11 +158,7 @@ impl<T> RwLock<T> {
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
#[inline]
pub const fn new(t: T) -> RwLock<T> {
- RwLock {
- inner: sys::MovableRwLock::new(),
- poison: poison::Flag::new(),
- data: UnsafeCell::new(t),
- }
+ RwLock { inner: sys::RwLock::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t) }
}
}
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
index e8e7c51cb..3edbe7280 100644
--- a/library/std/src/sys/common/alloc.rs
+++ b/library/std/src/sys/common/alloc.rs
@@ -4,7 +4,7 @@ use crate::ptr;
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values.
-#[cfg(all(any(
+#[cfg(any(
target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
@@ -16,9 +16,9 @@ use crate::ptr;
target_arch = "hexagon",
all(target_arch = "riscv32", not(target_os = "espidf")),
all(target_arch = "xtensa", not(target_os = "espidf")),
-)))]
+))]
pub const MIN_ALIGN: usize = 8;
-#[cfg(all(any(
+#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "mips64",
@@ -26,13 +26,13 @@ pub const MIN_ALIGN: usize = 8;
target_arch = "sparc64",
target_arch = "riscv64",
target_arch = "wasm64",
-)))]
+))]
pub const MIN_ALIGN: usize = 16;
// The allocator on the esp-idf platform guarantees 4 byte alignment.
-#[cfg(all(any(
+#[cfg(any(
all(target_arch = "riscv32", target_os = "espidf"),
all(target_arch = "xtensa", target_os = "espidf"),
-)))]
+))]
pub const MIN_ALIGN: usize = 4;
pub unsafe fn realloc_fallback(
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index af297ff1e..6fb92c037 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -1,10 +1,8 @@
-use crate::convert::TryFrom;
-use crate::ffi::{CStr, CString, OsString};
+use crate::ffi::{CStr, OsString};
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::io::{self, Error, ErrorKind};
use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
-use crate::os::unix::ffi::OsStrExt;
use crate::path::{Path, PathBuf};
use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::cvt;
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index e6534df89..6811fadb0 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -51,9 +51,9 @@ pub mod locks {
mod futex_condvar;
mod futex_mutex;
mod futex_rwlock;
- pub(crate) use futex_condvar::MovableCondvar;
- pub(crate) use futex_mutex::{MovableMutex, Mutex};
- pub(crate) use futex_rwlock::{MovableRwLock, RwLock};
+ pub(crate) use futex_condvar::Condvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
}
use crate::io::ErrorKind;
diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs
index e53a1fea6..8f65544a9 100644
--- a/library/std/src/sys/hermit/thread.rs
+++ b/library/std/src/sys/hermit/thread.rs
@@ -5,6 +5,7 @@ use crate::ffi::CStr;
use crate::io;
use crate::mem;
use crate::num::NonZeroUsize;
+use crate::ptr;
use crate::sys::hermit::abi;
use crate::sys::hermit::thread_local_dtor::run_dtors;
use crate::time::Duration;
@@ -47,7 +48,7 @@ impl Thread {
extern "C" fn thread_start(main: usize) {
unsafe {
// Finally, let's run some code.
- Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ Box::from_raw(ptr::from_exposed_addr::<Box<dyn FnOnce()>>(main).cast_mut())();
// run all destructors
run_dtors();
diff --git a/library/std/src/sys/itron/condvar.rs b/library/std/src/sys/itron/condvar.rs
index 008cd8fb1..7a47cc669 100644
--- a/library/std/src/sys/itron/condvar.rs
+++ b/library/std/src/sys/itron/condvar.rs
@@ -12,18 +12,13 @@ pub struct Condvar {
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
-pub type MovableCondvar = Condvar;
-
impl Condvar {
#[inline]
pub const fn new() -> Condvar {
Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) }
}
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- pub unsafe fn notify_one(&self) {
+ pub fn notify_one(&self) {
self.waiters.with_locked(|waiters| {
if let Some(task) = waiters.pop_front() {
// Unpark the task
@@ -39,7 +34,7 @@ impl Condvar {
});
}
- pub unsafe fn notify_all(&self) {
+ pub fn notify_all(&self) {
self.waiters.with_locked(|waiters| {
while let Some(task) = waiters.pop_front() {
// Unpark the task
@@ -76,7 +71,7 @@ impl Condvar {
}
}
- unsafe { mutex.lock() };
+ mutex.lock();
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
@@ -114,7 +109,7 @@ impl Condvar {
// we woke up because of `notify_*`.
let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) });
- unsafe { mutex.lock() };
+ mutex.lock();
success
}
}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
index 085662e6d..1f6cc4194 100644
--- a/library/std/src/sys/itron/mutex.rs
+++ b/library/std/src/sys/itron/mutex.rs
@@ -11,8 +11,6 @@ pub struct Mutex {
mtx: SpinIdOnceCell<()>,
}
-pub type MovableMutex = Mutex;
-
/// Create a mutex object. This function never panics.
fn new_mtx() -> Result<abi::ID, ItronError> {
ItronError::err_if_negative(unsafe {
@@ -39,7 +37,7 @@ impl Mutex {
}
}
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
let mtx = self.raw();
expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx");
}
@@ -49,7 +47,7 @@ impl Mutex {
expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx");
}
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
let mtx = self.raw();
match unsafe { abi::ploc_mtx(mtx) } {
abi::E_TMOUT => false,
@@ -74,7 +72,7 @@ pub(super) struct MutexGuard<'a>(&'a Mutex);
impl<'a> MutexGuard<'a> {
#[inline]
pub(super) fn lock(x: &'a Mutex) -> Self {
- unsafe { x.lock() };
+ x.lock();
Self(x)
}
}
diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs
index d28f57f33..c2b366808 100644
--- a/library/std/src/sys/itron/thread.rs
+++ b/library/std/src/sys/itron/thread.rs
@@ -11,18 +11,25 @@ use crate::{
ffi::CStr,
hint, io,
mem::ManuallyDrop,
+ ptr::NonNull,
sync::atomic::{AtomicUsize, Ordering},
sys::thread_local_dtor::run_dtors,
time::Duration,
};
pub struct Thread {
- inner: ManuallyDrop<Box<ThreadInner>>,
+ p_inner: NonNull<ThreadInner>,
/// The ID of the underlying task.
task: abi::ID,
}
+// Safety: There's nothing in `Thread` that ties it to the original creator. It
+// can be dropped by any threads.
+unsafe impl Send for Thread {}
+// Safety: `Thread` provides no methods that take `&self`.
+unsafe impl Sync for Thread {}
+
/// State data shared between a parent thread and child thread. It's dropped on
/// a transition to one of the final states.
struct ThreadInner {
@@ -90,8 +97,9 @@ impl Thread {
});
unsafe extern "C" fn trampoline(exinf: isize) {
+ let p_inner: *mut ThreadInner = crate::ptr::from_exposed_addr_mut(exinf as usize);
// Safety: `ThreadInner` is alive at this point
- let inner = unsafe { &*(exinf as *const ThreadInner) };
+ let inner = unsafe { &*p_inner };
// Safety: Since `trampoline` is called only once for each
// `ThreadInner` and only `trampoline` touches `start`,
@@ -119,13 +127,13 @@ impl Thread {
// No one will ever join, so we'll ask the collector task to
// delete the task.
- // In this case, `inner`'s ownership has been moved to us,
- // And we are responsible for dropping it. The acquire
+ // In this case, `*p_inner`'s ownership has been moved to
+ // us, and we are responsible for dropping it. The acquire
// ordering is not necessary because the parent thread made
// no memory access needing synchronization since the call
// to `acre_tsk`.
// Safety: See above.
- let _ = unsafe { Box::from_raw(inner as *const _ as *mut ThreadInner) };
+ let _ = unsafe { Box::from_raw(p_inner) };
// Safety: There are no pinned references to the stack
unsafe { terminate_and_delete_current_task() };
@@ -162,13 +170,14 @@ impl Thread {
}
}
- let inner_ptr = (&*inner) as *const ThreadInner;
+ // Safety: `Box::into_raw` returns a non-null pointer
+ let p_inner = unsafe { NonNull::new_unchecked(Box::into_raw(inner)) };
let new_task = ItronError::err_if_negative(unsafe {
abi::acre_tsk(&abi::T_CTSK {
// Activate this task immediately
tskatr: abi::TA_ACT,
- exinf: inner_ptr as abi::EXINF,
+ exinf: p_inner.as_ptr().expose_addr() as abi::EXINF,
// The entry point
task: Some(trampoline),
// Inherit the calling task's base priority
@@ -180,7 +189,7 @@ impl Thread {
})
.map_err(|e| e.as_io_error())?;
- Ok(Self { inner: ManuallyDrop::new(inner), task: new_task })
+ Ok(Self { p_inner, task: new_task })
}
pub fn yield_now() {
@@ -197,8 +206,9 @@ impl Thread {
}
}
- pub fn join(mut self) {
- let inner = &*self.inner;
+ pub fn join(self) {
+ // Safety: `ThreadInner` is alive at this point
+ let inner = unsafe { self.p_inner.as_ref() };
// Get the current task ID. Panicking here would cause a resource leak,
// so just abort on failure.
let current_task = task::current_task_id_aborting();
@@ -243,8 +253,8 @@ impl Thread {
unsafe { terminate_and_delete_task(self.task) };
// In either case, we are responsible for dropping `inner`.
- // Safety: The contents of `self.inner` will not be accessed hereafter
- let _inner = unsafe { ManuallyDrop::take(&mut self.inner) };
+ // Safety: The contents of `*p_inner` will not be accessed hereafter
+ let _inner = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
// Skip the destructor (because it would attempt to detach the thread)
crate::mem::forget(self);
@@ -253,13 +263,16 @@ impl Thread {
impl Drop for Thread {
fn drop(&mut self) {
+ // Safety: `ThreadInner` is alive at this point
+ let inner = unsafe { self.p_inner.as_ref() };
+
// Detach the thread.
- match self.inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
+ match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
LIFECYCLE_INIT => {
// [INIT → DETACHED]
// When the time comes, the child will figure out that no
// one will ever join it.
- // The ownership of `self.inner` is moved to the child thread.
+ // The ownership of `*p_inner` is moved to the child thread.
// However, the release ordering is not necessary because we
// made no memory access needing synchronization since the call
// to `acre_tsk`.
@@ -278,10 +291,9 @@ impl Drop for Thread {
// delete by entering the `FINISHED` state.
unsafe { terminate_and_delete_task(self.task) };
- // Wwe are responsible for dropping `inner`.
- // Safety: The contents of `self.inner` will not be accessed
- // hereafter
- unsafe { ManuallyDrop::drop(&mut self.inner) };
+ // Wwe are responsible for dropping `*p_inner`.
+ // Safety: The contents of `*p_inner` will not be accessed hereafter
+ let _ = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
}
_ => unsafe { hint::unreachable_unchecked() },
}
diff --git a/library/std/src/sys/sgx/condvar.rs b/library/std/src/sys/sgx/condvar.rs
index 36534e0ef..aa1174664 100644
--- a/library/std/src/sys/sgx/condvar.rs
+++ b/library/std/src/sys/sgx/condvar.rs
@@ -4,42 +4,43 @@ use crate::time::Duration;
use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+/// FIXME: `UnsafeList` is not movable.
+struct AllocatedCondvar(SpinMutex<WaitVariable<()>>);
+
pub struct Condvar {
- inner: SpinMutex<WaitVariable<()>>,
+ inner: LazyBox<AllocatedCondvar>,
}
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-impl LazyInit for Condvar {
+impl LazyInit for AllocatedCondvar {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedCondvar(SpinMutex::new(WaitVariable::new(()))))
}
}
impl Condvar {
pub const fn new() -> Condvar {
- Condvar { inner: SpinMutex::new(WaitVariable::new(())) }
+ Condvar { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn notify_one(&self) {
- let _ = WaitQueue::notify_one(self.inner.lock());
+ pub fn notify_one(&self) {
+ let _ = WaitQueue::notify_one(self.inner.0.lock());
}
#[inline]
- pub unsafe fn notify_all(&self) {
- let _ = WaitQueue::notify_all(self.inner.lock());
+ pub fn notify_all(&self) {
+ let _ = WaitQueue::notify_all(self.inner.0.lock());
}
pub unsafe fn wait(&self, mutex: &Mutex) {
- let guard = self.inner.lock();
+ let guard = self.inner.0.lock();
WaitQueue::wait(guard, || unsafe { mutex.unlock() });
- unsafe { mutex.lock() }
+ mutex.lock()
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let success = WaitQueue::wait_timeout(&self.inner, dur, || unsafe { mutex.unlock() });
- unsafe { mutex.lock() };
+ let success = WaitQueue::wait_timeout(&self.inner.0, dur, || unsafe { mutex.unlock() });
+ mutex.lock();
success
}
}
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index b1d32929e..01e4ffe3d 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -3,6 +3,7 @@
//! This module contains the facade (aka platform-specific) implementations of
//! OS level functionality for Fortanix SGX.
#![deny(unsafe_op_in_unsafe_fn)]
+#![allow(fuzzy_provenance_casts)] // FIXME: this entire module systematically confuses pointers and integers
use crate::io::ErrorKind;
use crate::sync::atomic::{AtomicBool, Ordering};
diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs
index aa747d56b..0dbf020eb 100644
--- a/library/std/src/sys/sgx/mutex.rs
+++ b/library/std/src/sys/sgx/mutex.rs
@@ -1,28 +1,28 @@
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+/// FIXME: `UnsafeList` is not movable.
+struct AllocatedMutex(SpinMutex<WaitVariable<bool>>);
+
pub struct Mutex {
- inner: SpinMutex<WaitVariable<bool>>,
+ inner: LazyBox<AllocatedMutex>,
}
-// not movable: see UnsafeList implementation
-pub(crate) type MovableMutex = LazyBox<Mutex>;
-
-impl LazyInit for Mutex {
+impl LazyInit for AllocatedMutex {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedMutex(SpinMutex::new(WaitVariable::new(false))))
}
}
// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28
impl Mutex {
pub const fn new() -> Mutex {
- Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
+ Mutex { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn lock(&self) {
- let mut guard = self.inner.lock();
+ pub fn lock(&self) {
+ let mut guard = self.inner.0.lock();
if *guard.lock_var() {
// Another thread has the lock, wait
WaitQueue::wait(guard, || {})
@@ -35,7 +35,7 @@ impl Mutex {
#[inline]
pub unsafe fn unlock(&self) {
- let guard = self.inner.lock();
+ let guard = self.inner.0.lock();
if let Err(mut guard) = WaitQueue::notify_one(guard) {
// No other waiters, unlock
*guard.lock_var_mut() = false;
@@ -45,8 +45,8 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let mut guard = try_lock_or_false!(self.inner);
+ pub fn try_lock(&self) -> bool {
+ let mut guard = try_lock_or_false!(self.inner.0);
if *guard.lock_var() {
// Another thread has the lock
false
diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs
index a97fb9ab0..d89de18ca 100644
--- a/library/std/src/sys/sgx/rwlock.rs
+++ b/library/std/src/sys/sgx/rwlock.rs
@@ -7,42 +7,45 @@ use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use super::waitqueue::{
try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
};
-use crate::mem;
+use crate::alloc::Layout;
-pub struct RwLock {
+struct AllocatedRwLock {
readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
writer: SpinMutex<WaitVariable<bool>>,
}
-pub(crate) type MovableRwLock = LazyBox<RwLock>;
+pub struct RwLock {
+ inner: LazyBox<AllocatedRwLock>,
+}
-impl LazyInit for RwLock {
+impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedRwLock {
+ readers: SpinMutex::new(WaitVariable::new(None)),
+ writer: SpinMutex::new(WaitVariable::new(false)),
+ })
}
}
-// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
-//
-// # Safety
-// Never called, as it is a compile time check.
-#[allow(dead_code)]
-unsafe fn rw_lock_size_assert(r: RwLock) {
- unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
-}
+// Check at compile time that RwLock's size and alignment matches the C definition
+// in libunwind (see also `test_c_rwlock_initializer` in `tests`).
+const _: () = {
+ let rust = Layout::new::<RwLock>();
+ let c = Layout::new::<*mut ()>();
+ assert!(rust.size() == c.size());
+ assert!(rust.align() == c.align());
+};
impl RwLock {
pub const fn new() -> RwLock {
- RwLock {
- readers: SpinMutex::new(WaitVariable::new(None)),
- writer: SpinMutex::new(WaitVariable::new(false)),
- }
+ RwLock { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn read(&self) {
- let mut rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ pub fn read(&self) {
+ let lock = &*self.inner;
+ let mut rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock, wait
drop(wguard);
@@ -57,8 +60,9 @@ impl RwLock {
#[inline]
pub unsafe fn try_read(&self) -> bool {
- let mut rguard = try_lock_or_false!(self.readers);
- let wguard = try_lock_or_false!(self.writer);
+ let lock = &*self.inner;
+ let mut rguard = try_lock_or_false!(lock.readers);
+ let wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock
false
@@ -71,9 +75,10 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
- let rguard = self.readers.lock();
- let mut wguard = self.writer.lock();
+ pub fn write(&self) {
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let mut wguard = lock.writer.lock();
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock, wait
drop(rguard);
@@ -86,9 +91,10 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
- let rguard = try_lock_or_false!(self.readers);
- let mut wguard = try_lock_or_false!(self.writer);
+ pub fn try_write(&self) -> bool {
+ let lock = &*self.inner;
+ let rguard = try_lock_or_false!(lock.readers);
+ let mut wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock
false
@@ -122,8 +128,9 @@ impl RwLock {
#[inline]
pub unsafe fn read_unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
unsafe { self.__read_unlock(rguard, wguard) };
}
@@ -158,8 +165,9 @@ impl RwLock {
#[inline]
pub unsafe fn write_unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
unsafe { self.__write_unlock(rguard, wguard) };
}
@@ -167,8 +175,9 @@ impl RwLock {
#[inline]
#[cfg_attr(test, allow(dead_code))]
unsafe fn unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
if *wguard.lock_var() == true {
unsafe { self.__write_unlock(rguard, wguard) };
} else {
@@ -201,6 +210,7 @@ pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
unsafe { (*p).write() };
return 0;
}
+
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
diff --git a/library/std/src/sys/sgx/rwlock/tests.rs b/library/std/src/sys/sgx/rwlock/tests.rs
index 479996115..5fd6670af 100644
--- a/library/std/src/sys/sgx/rwlock/tests.rs
+++ b/library/std/src/sys/sgx/rwlock/tests.rs
@@ -1,22 +1,12 @@
use super::*;
+use crate::ptr;
// Verify that the byte pattern libunwind uses to initialize an RwLock is
// equivalent to the value of RwLock::new(). If the value changes,
// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
#[test]
fn test_c_rwlock_initializer() {
- #[rustfmt::skip]
- const C_RWLOCK_INIT: &[u8] = &[
- /* 0x00 */ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x10 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x20 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x30 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x40 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x50 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x60 */ 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x70 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- ];
+ const C_RWLOCK_INIT: *mut () = ptr::null_mut();
// For the test to work, we need the padding/unused bytes in RwLock to be
// initialized as 0. In practice, this is the case with statics.
@@ -26,6 +16,6 @@ fn test_c_rwlock_initializer() {
// If the assertion fails, that not necessarily an issue with the value
// of C_RWLOCK_INIT. It might just be an issue with the way padding
// bytes are initialized in the test code.
- assert_eq!(&crate::mem::transmute_copy::<_, [u8; 144]>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
+ assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
};
}
diff --git a/library/std/src/sys/solid/io.rs b/library/std/src/sys/solid/io.rs
index 9eb17a10d..a862bb787 100644
--- a/library/std/src/sys/solid/io.rs
+++ b/library/std/src/sys/solid/io.rs
@@ -75,3 +75,7 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
}
}
+
+pub fn is_terminal<T>(_: &T) -> bool {
+ false
+}
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index 4906c6268..6135921f0 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -1,7 +1,6 @@
use super::unsupported;
-use crate::convert::TryFrom;
use crate::error::Error as StdError;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::os::{
diff --git a/library/std/src/sys/solid/rwlock.rs b/library/std/src/sys/solid/rwlock.rs
index 0a770cf03..ecb4eb83b 100644
--- a/library/std/src/sys/solid/rwlock.rs
+++ b/library/std/src/sys/solid/rwlock.rs
@@ -12,8 +12,6 @@ pub struct RwLock {
rwl: SpinIdOnceCell<()>,
}
-pub type MovableRwLock = RwLock;
-
// Safety: `num_readers` is protected by `mtx_num_readers`
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
@@ -37,13 +35,13 @@ impl RwLock {
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let rwl = self.raw();
expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl");
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
let rwl = self.raw();
match unsafe { abi::rwl_ploc_rdl(rwl) } {
abi::E_TMOUT => false,
@@ -55,13 +53,13 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
let rwl = self.raw();
expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl");
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
let rwl = self.raw();
match unsafe { abi::rwl_ploc_wrl(rwl) } {
abi::E_TMOUT => false,
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
index 117611ce4..5d89e5a13 100644
--- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -53,8 +53,6 @@ const CONTESTED_BIT: u32 = 1;
// This can never be a valid `zx_handle_t`.
const UNLOCKED: u32 = 0;
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
futex: AtomicU32,
}
@@ -86,23 +84,27 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let thread_self = zx_thread_self();
+ pub fn try_lock(&self) -> bool {
+ let thread_self = unsafe { zx_thread_self() };
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
- let thread_self = zx_thread_self();
+ pub fn lock(&self) {
+ let thread_self = unsafe { zx_thread_self() };
if let Err(state) =
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
{
- self.lock_contested(state, thread_self);
+ unsafe {
+ self.lock_contested(state, thread_self);
+ }
}
}
+ /// # Safety
+ /// `thread_self` must be the handle for the current thread.
#[cold]
- fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
+ unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
let owned_state = mark_contested(to_state(thread_self));
loop {
// Mark the mutex as contested if it is not already.
diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs
index c0576c178..4bd65dd25 100644
--- a/library/std/src/sys/unix/locks/futex_condvar.rs
+++ b/library/std/src/sys/unix/locks/futex_condvar.rs
@@ -3,8 +3,6 @@ use crate::sync::atomic::{AtomicU32, Ordering::Relaxed};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
use crate::time::Duration;
-pub type MovableCondvar = Condvar;
-
pub struct Condvar {
// The value of this atomic is simply incremented on every notification.
// This is used by `.wait()` to not miss any notifications after
@@ -21,12 +19,12 @@ impl Condvar {
// All the memory orderings here are `Relaxed`,
// because synchronization is done by unlocking and locking the mutex.
- pub unsafe fn notify_one(&self) {
+ pub fn notify_one(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake(&self.futex);
}
- pub unsafe fn notify_all(&self) {
+ pub fn notify_all(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake_all(&self.futex);
}
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
index 33b13dad4..c01229586 100644
--- a/library/std/src/sys/unix/locks/futex_mutex.rs
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake};
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
/// 0: unlocked
/// 1: locked, no other threads waiting
@@ -20,12 +18,12 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
self.lock_contended();
}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
index 0cc92244e..aa0de9002 100644
--- a/library/std/src/sys/unix/locks/futex_rwlock.rs
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
-pub type MovableRwLock = RwLock;
-
pub struct RwLock {
// The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
// Bits 0..30:
@@ -70,14 +68,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let state = self.state.load(Relaxed);
if !is_read_lockable(state)
|| self
@@ -144,14 +142,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
self.write_contended();
}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
index 9bb314b70..b2e0e49ad 100644
--- a/library/std/src/sys/unix/locks/mod.rs
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -10,22 +10,22 @@ cfg_if::cfg_if! {
mod futex_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else if #[cfg(target_os = "fuchsia")] {
mod fuchsia_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use fuchsia_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else {
mod pthread_mutex;
mod pthread_rwlock;
mod pthread_condvar;
- pub(crate) use pthread_mutex::{Mutex, MovableMutex};
- pub(crate) use pthread_rwlock::MovableRwLock;
- pub(crate) use pthread_condvar::MovableCondvar;
+ pub(crate) use pthread_mutex::Mutex;
+ pub(crate) use pthread_rwlock::RwLock;
+ pub(crate) use pthread_condvar::Condvar;
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index 4741c0c67..1ddb09905 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -1,17 +1,17 @@
use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
use crate::sys::locks::{pthread_mutex, Mutex};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use crate::time::Duration;
+struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
+
pub struct Condvar {
- inner: UnsafeCell<libc::pthread_cond_t>,
+ inner: LazyBox<AllocatedCondvar>,
+ mutex: AtomicPtr<libc::pthread_mutex_t>,
}
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
const TIMESPEC_MAX: libc::timespec =
libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
@@ -19,81 +19,104 @@ fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
}
-impl LazyInit for Condvar {
+#[inline]
+fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
+ c.inner.0.get()
+}
+
+unsafe impl Send for AllocatedCondvar {}
+unsafe impl Sync for AllocatedCondvar {}
+
+impl LazyInit for AllocatedCondvar {
fn init() -> Box<Self> {
- let mut condvar = Box::new(Self::new());
- unsafe { condvar.init() };
+ let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ // `pthread_condattr_setclock` is unfortunately not supported on these platforms.
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
+ // So on that platform, init() should always be called
+ // Moreover, that platform does not have pthread_condattr_setclock support,
+ // hence that initialization should be skipped as well
+ //
+ // Similar story for the 3DS (horizon).
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ }
+ }
+
condvar
}
}
-impl Condvar {
- pub const fn new() -> Condvar {
- // Might be moved and address is changing it is better to avoid
- // initialization of potentially opaque OS data before it landed
- Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
+impl Drop for AllocatedCondvar {
+ #[inline]
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_cond_destroy() returns EINVAL if called on
+ // a condvar that was just initialized with
+ // libc::PTHREAD_COND_INITIALIZER. Once it is used or
+ // pthread_cond_init() is called, this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))]
- unsafe fn init(&mut self) {}
-
- // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
- // So on that platform, init() should always be called
- // Moreover, that platform does not have pthread_condattr_setclock support,
- // hence that initialization should be skipped as well
- //
- // Similar story for the 3DS (horizon).
- #[cfg(any(target_os = "espidf", target_os = "horizon"))]
- unsafe fn init(&mut self) {
- let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
- assert_eq!(r, 0);
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
}
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox",
- target_os = "espidf",
- target_os = "horizon"
- )))]
- unsafe fn init(&mut self) {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
+ #[inline]
+ fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
+ // Relaxed is okay here because we never read through `self.addr`, and only use it to
+ // compare addresses.
+ match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
+ Ok(_) => {} // Stored the address
+ Err(n) if n == mutex => {} // Lost a race to store the same address
+ _ => panic!("attempted to use a condition variable with two mutexes"),
+ }
}
#[inline]
- pub unsafe fn notify_one(&self) {
- let r = libc::pthread_cond_signal(self.inner.get());
+ pub fn notify_one(&self) {
+ let r = unsafe { libc::pthread_cond_signal(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
- pub unsafe fn notify_all(&self) {
- let r = libc::pthread_cond_broadcast(self.inner.get());
+ pub fn notify_all(&self) {
+ let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+ let r = libc::pthread_cond_wait(raw(self), mutex);
debug_assert_eq!(r, 0);
}
@@ -112,6 +135,9 @@ impl Condvar {
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
use crate::mem;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
let mut now: libc::timespec = mem::zeroed();
let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
assert_eq!(r, 0);
@@ -127,7 +153,7 @@ impl Condvar {
let timeout =
sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
assert!(r == libc::ETIMEDOUT || r == 0);
r == 0
}
@@ -144,9 +170,11 @@ impl Condvar {
target_os = "horizon"
))]
pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
- use crate::ptr;
use crate::time::Instant;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
// 1000 years
let max_dur = Duration::from_secs(1000 * 365 * 86400);
@@ -187,36 +215,11 @@ impl Condvar {
.unwrap_or(TIMESPEC_MAX);
// And wait!
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
debug_assert!(r == libc::ETIMEDOUT || r == 0);
// ETIMEDOUT is not a totally reliable method of determining timeout due
// to clock shifts, so do the check ourselves
stable_now.elapsed() < dur
}
-
- #[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- // On DragonFly pthread_cond_destroy() returns EINVAL if called on
- // a condvar that was just initialized with
- // libc::PTHREAD_COND_INITIALIZER. Once it is used or
- // pthread_cond_init() is called, this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
- }
-}
-
-impl Drop for Condvar {
- #[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
- }
}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 5964935dd..8a78bc1fd 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -3,56 +3,24 @@ use crate::mem::{forget, MaybeUninit};
use crate::sys::cvt_nz;
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+struct AllocatedMutex(UnsafeCell<libc::pthread_mutex_t>);
+
pub struct Mutex {
- inner: UnsafeCell<libc::pthread_mutex_t>,
+ inner: LazyBox<AllocatedMutex>,
}
-pub(crate) type MovableMutex = LazyBox<Mutex>;
-
#[inline]
pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
- m.inner.get()
+ m.inner.0.get()
}
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
+unsafe impl Send for AllocatedMutex {}
+unsafe impl Sync for AllocatedMutex {}
-impl LazyInit for Mutex {
+impl LazyInit for AllocatedMutex {
fn init() -> Box<Self> {
- let mut mutex = Box::new(Self::new());
- unsafe { mutex.init() };
- mutex
- }
-
- fn destroy(mutex: Box<Self>) {
- // We're not allowed to pthread_mutex_destroy a locked mutex,
- // so check first if it's unlocked.
- if unsafe { mutex.try_lock() } {
- unsafe { mutex.unlock() };
- drop(mutex);
- } else {
- // The mutex is locked. This happens if a MutexGuard is leaked.
- // In this case, we just leak the Mutex too.
- forget(mutex);
- }
- }
-
- fn cancel_init(_: Box<Self>) {
- // In this case, we can just drop it without any checks,
- // since it cannot have been locked yet.
- }
-}
+ let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
-impl Mutex {
- pub const fn new() -> Mutex {
- // Might be moved to a different address, so it is better to avoid
- // initialization of potentially opaque OS data before it landed.
- // Be very careful using this newly constructed `Mutex`, reentrant
- // locking is undefined behavior until `init` is called!
- Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
- }
- #[inline]
- unsafe fn init(&mut self) {
// Issue #33770
//
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
@@ -77,49 +45,77 @@ impl Mutex {
// references, we instead create the mutex with type
// PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
// re-lock it from the same thread, thus avoiding undefined behavior.
- let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
- cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
- let attr = PthreadMutexAttr(&mut attr);
- cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
+ unsafe {
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(
+ attr.0.as_mut_ptr(),
+ libc::PTHREAD_MUTEX_NORMAL,
+ ))
.unwrap();
- cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap();
+ }
+
+ mutex
}
- #[inline]
- pub unsafe fn lock(&self) {
- let r = libc::pthread_mutex_lock(self.inner.get());
- debug_assert_eq!(r, 0);
+
+ fn destroy(mutex: Box<Self>) {
+ // We're not allowed to pthread_mutex_destroy a locked mutex,
+ // so check first if it's unlocked.
+ if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } {
+ unsafe { libc::pthread_mutex_unlock(mutex.0.get()) };
+ drop(mutex);
+ } else {
+ // The mutex is locked. This happens if a MutexGuard is leaked.
+ // In this case, we just leak the Mutex too.
+ forget(mutex);
+ }
}
+
+ fn cancel_init(_: Box<Self>) {
+ // In this case, we can just drop it without any checks,
+ // since it cannot have been locked yet.
+ }
+}
+
+impl Drop for AllocatedMutex {
#[inline]
- pub unsafe fn unlock(&self) {
- let r = libc::pthread_mutex_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
+
+impl Mutex {
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- libc::pthread_mutex_trylock(self.inner.get()) == 0
+ pub const fn new() -> Mutex {
+ Mutex { inner: LazyBox::new() }
}
+
#[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
+ pub unsafe fn lock(&self) {
+ let r = libc::pthread_mutex_lock(raw(self));
debug_assert_eq!(r, 0);
}
+
#[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
- // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
- // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
- // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
- // this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
+ pub unsafe fn unlock(&self) {
+ let r = libc::pthread_mutex_unlock(raw(self));
+ debug_assert_eq!(r, 0);
}
-}
-impl Drop for Mutex {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(raw(self)) == 0
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
index adfe2a883..04662be9d 100644
--- a/library/std/src/sys/unix/locks/pthread_rwlock.rs
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -3,20 +3,26 @@ use crate::mem::forget;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-pub struct RwLock {
+struct AllocatedRwLock {
inner: UnsafeCell<libc::pthread_rwlock_t>,
write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
num_readers: AtomicUsize,
}
-pub(crate) type MovableRwLock = LazyBox<RwLock>;
+unsafe impl Send for AllocatedRwLock {}
+unsafe impl Sync for AllocatedRwLock {}
-unsafe impl Send for RwLock {}
-unsafe impl Sync for RwLock {}
+pub struct RwLock {
+ inner: LazyBox<AllocatedRwLock>,
+}
-impl LazyInit for RwLock {
+impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedRwLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ })
}
fn destroy(mut rwlock: Box<Self>) {
@@ -35,17 +41,39 @@ impl LazyInit for RwLock {
}
}
+impl AllocatedRwLock {
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl Drop for AllocatedRwLock {
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) };
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
+ }
+}
+
impl RwLock {
+ #[inline]
pub const fn new() -> RwLock {
- RwLock {
- inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
- write_locked: UnsafeCell::new(false),
- num_readers: AtomicUsize::new(0),
- }
+ RwLock { inner: LazyBox::new() }
}
+
#[inline]
- pub unsafe fn read(&self) {
- let r = libc::pthread_rwlock_rdlock(self.inner.get());
+ pub fn read(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) };
// According to POSIX, when a thread tries to acquire this read lock
// while it already holds the write lock
@@ -62,51 +90,61 @@ impl RwLock {
// got the write lock more than once, or got a read and a write lock.
if r == libc::EAGAIN {
panic!("rwlock maximum reader count exceeded");
- } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+ } else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_rdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock read lock would result in deadlock");
} else {
// POSIX does not make guarantees about all the errors that may be returned.
// See issue #94705 for more details.
assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
}
}
+
#[inline]
- pub unsafe fn try_read(&self) -> bool {
- let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ pub fn try_read(&self) -> bool {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) };
if r == 0 {
- if *self.write_locked.get() {
+ if unsafe { *lock.write_locked.get() } {
// `pthread_rwlock_tryrdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
false
} else {
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
true
}
} else {
false
}
}
+
#[inline]
- pub unsafe fn write(&self) {
- let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ pub fn write(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) };
// See comments above for why we check for EDEADLK and write_locked. For the same reason,
// we also need to check that there are no readers (tracked in `num_readers`).
if r == libc::EDEADLK
- || (r == 0 && *self.write_locked.get())
- || self.num_readers.load(Ordering::Relaxed) != 0
+ || (r == 0 && unsafe { *lock.write_locked.get() })
+ || lock.num_readers.load(Ordering::Relaxed) != 0
{
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_wrlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock write lock would result in deadlock");
} else {
@@ -114,60 +152,44 @@ impl RwLock {
// return EDEADLK or 0. We rely on that.
debug_assert_eq!(r, 0);
}
- *self.write_locked.get() = true;
+
+ unsafe {
+ *lock.write_locked.get() = true;
+ }
}
+
#[inline]
pub unsafe fn try_write(&self) -> bool {
- let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ let lock = &*self.inner;
+ let r = libc::pthread_rwlock_trywrlock(lock.inner.get());
if r == 0 {
- if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
// `pthread_rwlock_trywrlock` succeeded when it should not have.
- self.raw_unlock();
+ lock.raw_unlock();
false
} else {
- *self.write_locked.get() = true;
+ *lock.write_locked.get() = true;
true
}
} else {
false
}
}
- #[inline]
- unsafe fn raw_unlock(&self) {
- let r = libc::pthread_rwlock_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
+
#[inline]
pub unsafe fn read_unlock(&self) {
- debug_assert!(!*self.write_locked.get());
- self.num_readers.fetch_sub(1, Ordering::Relaxed);
- self.raw_unlock();
- }
- #[inline]
- pub unsafe fn write_unlock(&self) {
- debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
- debug_assert!(*self.write_locked.get());
- *self.write_locked.get() = false;
- self.raw_unlock();
+ let lock = &*self.inner;
+ debug_assert!(!*lock.write_locked.get());
+ lock.num_readers.fetch_sub(1, Ordering::Relaxed);
+ lock.raw_unlock();
}
- #[inline]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_rwlock_destroy(self.inner.get());
- // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
- // rwlock that was just initialized with
- // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
- // or pthread_rwlock_init() is called, this behaviour no longer occurs.
- if cfg!(target_os = "dragonfly") {
- debug_assert!(r == 0 || r == libc::EINVAL);
- } else {
- debug_assert_eq!(r, 0);
- }
- }
-}
-impl Drop for RwLock {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn write_unlock(&self) {
+ let lock = &*self.inner;
+ debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*lock.write_locked.get());
+ *lock.write_locked.get() = false;
+ lock.raw_unlock();
}
}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index cca9c6767..d5abd9b58 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -149,7 +149,11 @@ impl From<libc::timespec> for Timespec {
}
}
-#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+#[cfg(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+))]
mod inner {
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::sys::cvt;
@@ -265,7 +269,11 @@ mod inner {
}
}
-#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "watchos")))]
+#[cfg(not(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+)))]
mod inner {
use crate::fmt;
use crate::mem::MaybeUninit;
@@ -281,7 +289,11 @@ mod inner {
impl Instant {
pub fn now() -> Instant {
- Instant { t: Timespec::now(libc::CLOCK_MONOTONIC) }
+ #[cfg(target_os = "macos")]
+ const clock_id: libc::clockid_t = libc::CLOCK_UPTIME_RAW;
+ #[cfg(not(target_os = "macos"))]
+ const clock_id: libc::clockid_t = libc::CLOCK_MONOTONIC;
+ Instant { t: Timespec::now(clock_id) }
}
pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
@@ -312,13 +324,8 @@ mod inner {
}
}
- #[cfg(not(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon")))]
- pub type clock_t = libc::c_int;
- #[cfg(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon"))]
- pub type clock_t = libc::c_ulong;
-
impl Timespec {
- pub fn now(clock: clock_t) -> Timespec {
+ pub fn now(clock: libc::clockid_t) -> Timespec {
// Try to use 64-bit time in preparation for Y2038.
#[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))]
{
diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs
index e4ff21b25..f5a4ce929 100644
--- a/library/std/src/sys/unix/weak.rs
+++ b/library/std/src/sys/unix/weak.rs
@@ -29,7 +29,21 @@ use crate::ptr;
use crate::sync::atomic::{self, AtomicPtr, Ordering};
// We can use true weak linkage on ELF targets.
-#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), not(bootstrap)))]
+pub(crate) macro weak {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static $name: Option<unsafe extern "C" fn($($t),*) -> $ret>;
+ }
+ #[allow(unused_unsafe)]
+ ExternWeak::new(unsafe { $name })
+ };
+ )
+}
+
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), bootstrap))]
pub(crate) macro weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
@@ -47,11 +61,31 @@ pub(crate) macro weak {
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::dlsym as weak;
+#[cfg(not(bootstrap))]
+pub(crate) struct ExternWeak<F: Copy> {
+ weak_ptr: Option<F>,
+}
+
+#[cfg(not(bootstrap))]
+impl<F: Copy> ExternWeak<F> {
+ #[inline]
+ pub(crate) fn new(weak_ptr: Option<F>) -> Self {
+ ExternWeak { weak_ptr }
+ }
+
+ #[inline]
+ pub(crate) fn get(&self) -> Option<F> {
+ self.weak_ptr
+ }
+}
+
+#[cfg(bootstrap)]
pub(crate) struct ExternWeak<F> {
weak_ptr: *const libc::c_void,
_marker: PhantomData<F>,
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self {
@@ -59,6 +93,7 @@ impl<F> ExternWeak<F> {
}
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn get(&self) -> Option<F> {
diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs
index 527a26a12..3f0943b50 100644
--- a/library/std/src/sys/unsupported/locks/condvar.rs
+++ b/library/std/src/sys/unsupported/locks/condvar.rs
@@ -3,8 +3,6 @@ use crate::time::Duration;
pub struct Condvar {}
-pub type MovableCondvar = Condvar;
-
impl Condvar {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
@@ -13,10 +11,10 @@ impl Condvar {
}
#[inline]
- pub unsafe fn notify_one(&self) {}
+ pub fn notify_one(&self) {}
#[inline]
- pub unsafe fn notify_all(&self) {}
+ pub fn notify_all(&self) {}
pub unsafe fn wait(&self, _mutex: &Mutex) {
panic!("condvar wait not supported")
diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs
index 602a2d623..0e0f9eccb 100644
--- a/library/std/src/sys/unsupported/locks/mod.rs
+++ b/library/std/src/sys/unsupported/locks/mod.rs
@@ -1,6 +1,6 @@
mod condvar;
mod mutex;
mod rwlock;
-pub use condvar::{Condvar, MovableCondvar};
-pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::MovableRwLock;
+pub use condvar::Condvar;
+pub use mutex::Mutex;
+pub use rwlock::RwLock;
diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs
index 87ea475c6..4a13c55fb 100644
--- a/library/std/src/sys/unsupported/locks/mutex.rs
+++ b/library/std/src/sys/unsupported/locks/mutex.rs
@@ -5,8 +5,6 @@ pub struct Mutex {
locked: Cell<bool>,
}
-pub type MovableMutex = Mutex;
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {} // no threads on this platform
@@ -18,7 +16,7 @@ impl Mutex {
}
#[inline]
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
}
@@ -28,7 +26,7 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.locked.replace(true) == false
}
}
diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs
index 5292691b9..789ef9b29 100644
--- a/library/std/src/sys/unsupported/locks/rwlock.rs
+++ b/library/std/src/sys/unsupported/locks/rwlock.rs
@@ -5,8 +5,6 @@ pub struct RwLock {
mode: Cell<isize>,
}
-pub type MovableRwLock = RwLock;
-
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {} // no threads on this platform
@@ -18,7 +16,7 @@ impl RwLock {
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
@@ -28,7 +26,7 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
@@ -39,14 +37,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
if self.mode.replace(-1) != 0 {
rtabort!("rwlock locked for reading")
}
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
if self.mode.get() == 0 {
self.mode.set(-1);
true
diff --git a/library/std/src/sys/wasi/net.rs b/library/std/src/sys/wasi/net.rs
index 590d268c3..cf4ebba1a 100644
--- a/library/std/src/sys/wasi/net.rs
+++ b/library/std/src/sys/wasi/net.rs
@@ -119,8 +119,14 @@ impl TcpStream {
unsupported()
}
- pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
- unsupported()
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let wasi_how = match how {
+ Shutdown::Read => wasi::SDFLAGS_RD,
+ Shutdown::Write => wasi::SDFLAGS_WR,
+ Shutdown::Both => wasi::SDFLAGS_RD | wasi::SDFLAGS_WR,
+ };
+
+ unsafe { wasi::sock_shutdown(self.socket().as_raw_fd() as _, wasi_how).map_err(err2io) }
}
pub fn duplicate(&self) -> io::Result<TcpStream> {
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
index 93838390b..d68c3e5f1 100644
--- a/library/std/src/sys/wasm/mod.rs
+++ b/library/std/src/sys/wasm/mod.rs
@@ -55,9 +55,9 @@ cfg_if::cfg_if! {
mod futex_condvar;
mod futex_mutex;
mod futex_rwlock;
- pub(crate) use futex_condvar::{Condvar, MovableCondvar};
- pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
+ pub(crate) use futex_condvar::Condvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
}
#[path = "atomics/futex.rs"]
pub mod futex;
diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs
index 01f262982..6741ae46d 100644
--- a/library/std/src/sys/windows/args.rs
+++ b/library/std/src/sys/windows/args.rs
@@ -9,17 +9,16 @@ mod tests;
use crate::ffi::OsString;
use crate::fmt;
use crate::io;
-use crate::marker::PhantomData;
use crate::num::NonZeroU16;
use crate::os::windows::prelude::*;
use crate::path::PathBuf;
-use crate::ptr::NonNull;
use crate::sys::c;
use crate::sys::process::ensure_no_nuls;
use crate::sys::windows::os::current_exe;
+use crate::sys_common::wstr::WStrUnits;
use crate::vec;
-use core::iter;
+use crate::iter;
/// This is the const equivalent to `NonZeroU16::new(n).unwrap()`
///
@@ -199,55 +198,6 @@ impl ExactSizeIterator for Args {
}
}
-/// A safe iterator over a LPWSTR
-/// (aka a pointer to a series of UTF-16 code units terminated by a NULL).
-struct WStrUnits<'a> {
- // The pointer must never be null...
- lpwstr: NonNull<u16>,
- // ...and the memory it points to must be valid for this lifetime.
- lifetime: PhantomData<&'a [u16]>,
-}
-impl WStrUnits<'_> {
- /// Create the iterator. Returns `None` if `lpwstr` is null.
- ///
- /// SAFETY: `lpwstr` must point to a null-terminated wide string that lives
- /// at least as long as the lifetime of this struct.
- unsafe fn new(lpwstr: *const u16) -> Option<Self> {
- Some(Self { lpwstr: NonNull::new(lpwstr as _)?, lifetime: PhantomData })
- }
- fn peek(&self) -> Option<NonZeroU16> {
- // SAFETY: It's always safe to read the current item because we don't
- // ever move out of the array's bounds.
- unsafe { NonZeroU16::new(*self.lpwstr.as_ptr()) }
- }
- /// Advance the iterator while `predicate` returns true.
- /// Returns the number of items it advanced by.
- fn advance_while<P: FnMut(NonZeroU16) -> bool>(&mut self, mut predicate: P) -> usize {
- let mut counter = 0;
- while let Some(w) = self.peek() {
- if !predicate(w) {
- break;
- }
- counter += 1;
- self.next();
- }
- counter
- }
-}
-impl Iterator for WStrUnits<'_> {
- // This can never return zero as that marks the end of the string.
- type Item = NonZeroU16;
- fn next(&mut self) -> Option<NonZeroU16> {
- // SAFETY: If NULL is reached we immediately return.
- // Therefore it's safe to advance the pointer after that.
- unsafe {
- let next = self.peek()?;
- self.lpwstr = NonNull::new_unchecked(self.lpwstr.as_ptr().add(1));
- Some(next)
- }
- }
-}
-
#[derive(Debug)]
pub(crate) enum Arg {
/// Add quotes (if needed)
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index be6fc2ebb..81461de4f 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -56,6 +56,7 @@ pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION;
pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
pub type LPSTARTUPINFO = *mut STARTUPINFO;
pub type LPVOID = *mut c_void;
+pub type LPCVOID = *const c_void;
pub type LPWCH = *mut WCHAR;
pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
pub type LPWSADATA = *mut WSADATA;
@@ -362,7 +363,7 @@ impl IO_STATUS_BLOCK {
pub type LPOVERLAPPED_COMPLETION_ROUTINE = unsafe extern "system" fn(
dwErrorCode: DWORD,
- dwNumberOfBytesTransfered: DWORD,
+ dwNumberOfBytesTransferred: DWORD,
lpOverlapped: *mut OVERLAPPED,
);
@@ -773,6 +774,16 @@ pub struct timeval {
pub tv_usec: c_long,
}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CONSOLE_READCONSOLE_CONTROL {
+ pub nLength: ULONG,
+ pub nInitialChars: ULONG,
+ pub dwCtrlWakeupMask: ULONG,
+ pub dwControlKeyState: ULONG,
+}
+pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
+
// Desktop specific functions & types
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
@@ -802,17 +813,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
extern "system" fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
#[repr(C)]
- #[derive(Copy, Clone)]
- pub struct CONSOLE_READCONSOLE_CONTROL {
- pub nLength: ULONG,
- pub nInitialChars: ULONG,
- pub dwCtrlWakeupMask: ULONG,
- pub dwControlKeyState: ULONG,
- }
-
- pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
-
- #[repr(C)]
pub struct BY_HANDLE_FILE_INFORMATION {
pub dwFileAttributes: DWORD,
pub ftCreationTime: FILETIME,
@@ -827,7 +827,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
}
pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
- pub type LPCVOID = *const c_void;
pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001;
@@ -855,24 +854,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
#[link(name = "kernel32")]
extern "system" {
- // Functions forbidden when targeting UWP
- pub fn ReadConsoleW(
- hConsoleInput: HANDLE,
- lpBuffer: LPVOID,
- nNumberOfCharsToRead: DWORD,
- lpNumberOfCharsRead: LPDWORD,
- pInputControl: PCONSOLE_READCONSOLE_CONTROL,
- ) -> BOOL;
-
- pub fn WriteConsoleW(
- hConsoleOutput: HANDLE,
- lpBuffer: LPCVOID,
- nNumberOfCharsToWrite: DWORD,
- lpNumberOfCharsWritten: LPDWORD,
- lpReserved: LPVOID,
- ) -> BOOL;
-
- pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
// Allowed but unused by UWP
pub fn GetFileInformationByHandle(
hFile: HANDLE,
@@ -914,6 +895,22 @@ if #[cfg(target_vendor = "uwp")] {
extern "system" {
pub fn GetCurrentProcessId() -> DWORD;
+ pub fn ReadConsoleW(
+ hConsoleInput: HANDLE,
+ lpBuffer: LPVOID,
+ nNumberOfCharsToRead: DWORD,
+ lpNumberOfCharsRead: LPDWORD,
+ pInputControl: PCONSOLE_READCONSOLE_CONTROL,
+ ) -> BOOL;
+ pub fn WriteConsoleW(
+ hConsoleOutput: HANDLE,
+ lpBuffer: LPCVOID,
+ nNumberOfCharsToWrite: DWORD,
+ lpNumberOfCharsWritten: LPDWORD,
+ lpReserved: LPVOID,
+ ) -> BOOL;
+ pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
+
pub fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn SetFileAttributesW(lpFileName: LPCWSTR, dwFileAttributes: DWORD) -> BOOL;
diff --git a/library/std/src/sys/windows/locks/condvar.rs b/library/std/src/sys/windows/locks/condvar.rs
index be9a2abbe..66fafa2c0 100644
--- a/library/std/src/sys/windows/locks/condvar.rs
+++ b/library/std/src/sys/windows/locks/condvar.rs
@@ -8,8 +8,6 @@ pub struct Condvar {
inner: UnsafeCell<c::CONDITION_VARIABLE>,
}
-pub type MovableCondvar = Condvar;
-
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
@@ -41,12 +39,12 @@ impl Condvar {
}
#[inline]
- pub unsafe fn notify_one(&self) {
- c::WakeConditionVariable(self.inner.get())
+ pub fn notify_one(&self) {
+ unsafe { c::WakeConditionVariable(self.inner.get()) }
}
#[inline]
- pub unsafe fn notify_all(&self) {
- c::WakeAllConditionVariable(self.inner.get())
+ pub fn notify_all(&self) {
+ unsafe { c::WakeAllConditionVariable(self.inner.get()) }
}
}
diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs
index 602a2d623..0e0f9eccb 100644
--- a/library/std/src/sys/windows/locks/mod.rs
+++ b/library/std/src/sys/windows/locks/mod.rs
@@ -1,6 +1,6 @@
mod condvar;
mod mutex;
mod rwlock;
-pub use condvar::{Condvar, MovableCondvar};
-pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::MovableRwLock;
+pub use condvar::Condvar;
+pub use mutex::Mutex;
+pub use rwlock::RwLock;
diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs
index 91207f5f4..ef2f84082 100644
--- a/library/std/src/sys/windows/locks/mutex.rs
+++ b/library/std/src/sys/windows/locks/mutex.rs
@@ -21,9 +21,6 @@ pub struct Mutex {
srwlock: UnsafeCell<c::SRWLOCK>,
}
-// Windows SRW Locks are movable (while not borrowed).
-pub type MovableMutex = Mutex;
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
@@ -39,13 +36,15 @@ impl Mutex {
}
#[inline]
- pub unsafe fn lock(&self) {
- c::AcquireSRWLockExclusive(raw(self));
+ pub fn lock(&self) {
+ unsafe {
+ c::AcquireSRWLockExclusive(raw(self));
+ }
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- c::TryAcquireSRWLockExclusive(raw(self)) != 0
+ pub fn try_lock(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 }
}
#[inline]
diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs
index fa5ffe574..e69415baa 100644
--- a/library/std/src/sys/windows/locks/rwlock.rs
+++ b/library/std/src/sys/windows/locks/rwlock.rs
@@ -5,8 +5,6 @@ pub struct RwLock {
inner: UnsafeCell<c::SRWLOCK>,
}
-pub type MovableRwLock = RwLock;
-
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
@@ -16,20 +14,20 @@ impl RwLock {
RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
}
#[inline]
- pub unsafe fn read(&self) {
- c::AcquireSRWLockShared(self.inner.get())
+ pub fn read(&self) {
+ unsafe { c::AcquireSRWLockShared(self.inner.get()) }
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
- c::TryAcquireSRWLockShared(self.inner.get()) != 0
+ pub fn try_read(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 }
}
#[inline]
- pub unsafe fn write(&self) {
- c::AcquireSRWLockExclusive(self.inner.get())
+ pub fn write(&self) {
+ unsafe { c::AcquireSRWLockExclusive(self.inner.get()) }
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
- c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ pub fn try_write(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 }
}
#[inline]
pub unsafe fn read_unlock(&self) {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index eab9b9612..e67411e16 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -29,6 +29,7 @@ pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
+pub mod stdio;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
@@ -36,12 +37,9 @@ pub mod thread_parker;
pub mod time;
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
- pub mod stdio;
pub mod stack_overflow;
} else {
- pub mod stdio_uwp;
pub mod stack_overflow_uwp;
- pub use self::stdio_uwp as stdio;
pub use self::stack_overflow_uwp as stack_overflow;
}
}
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index 013c776c4..9f26acc45 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -324,17 +324,18 @@ impl AnonPipe {
let mut async_result: Option<AsyncResult> = None;
struct AsyncResult {
error: u32,
- transfered: u32,
+ transferred: u32,
}
// STEP 3: The callback.
unsafe extern "system" fn callback(
dwErrorCode: u32,
- dwNumberOfBytesTransfered: u32,
+ dwNumberOfBytesTransferred: u32,
lpOverlapped: *mut c::OVERLAPPED,
) {
// Set `async_result` using a pointer smuggled through `hEvent`.
- let result = AsyncResult { error: dwErrorCode, transfered: dwNumberOfBytesTransfered };
+ let result =
+ AsyncResult { error: dwErrorCode, transferred: dwNumberOfBytesTransferred };
*(*lpOverlapped).hEvent.cast::<Option<AsyncResult>>() = Some(result);
}
@@ -365,7 +366,7 @@ impl AnonPipe {
// STEP 4: Return the result.
// `async_result` is always `Some` at this point
match result.error {
- c::ERROR_SUCCESS => Ok(result.transfered as usize),
+ c::ERROR_SUCCESS => Ok(result.transferred as usize),
error => Err(io::Error::from_raw_os_error(error as _)),
}
}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 9cbb4ef19..31e9b34fb 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -252,10 +252,6 @@ impl Command {
) -> io::Result<(Process, StdioPipes)> {
let maybe_env = self.env.capture_if_changed();
- let mut si = zeroed_startupinfo();
- si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD;
- si.dwFlags = c::STARTF_USESTDHANDLES;
-
let child_paths = if let Some(env) = maybe_env.as_ref() {
env.get(&EnvKey::new("PATH")).map(|s| s.as_os_str())
} else {
@@ -314,9 +310,21 @@ impl Command {
let stdin = stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin)?;
let stdout = stdout.to_handle(c::STD_OUTPUT_HANDLE, &mut pipes.stdout)?;
let stderr = stderr.to_handle(c::STD_ERROR_HANDLE, &mut pipes.stderr)?;
- si.hStdInput = stdin.as_raw_handle();
- si.hStdOutput = stdout.as_raw_handle();
- si.hStdError = stderr.as_raw_handle();
+
+ let mut si = zeroed_startupinfo();
+ si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD;
+
+ // If at least one of stdin, stdout or stderr are set (i.e. are non null)
+ // then set the `hStd` fields in `STARTUPINFO`.
+ // Otherwise skip this and allow the OS to apply its default behaviour.
+ // This provides more consistent behaviour between Win7 and Win8+.
+ let is_set = |stdio: &Handle| !stdio.as_raw_handle().is_null();
+ if is_set(&stderr) || is_set(&stdout) || is_set(&stdin) {
+ si.dwFlags |= c::STARTF_USESTDHANDLES;
+ si.hStdInput = stdin.as_raw_handle();
+ si.hStdOutput = stdout.as_raw_handle();
+ si.hStdError = stderr.as_raw_handle();
+ }
unsafe {
cvt(c::CreateProcessW(
@@ -513,9 +521,6 @@ fn program_exists(path: &Path) -> Option<Vec<u16>> {
impl Stdio {
fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Result<Handle> {
match *self {
- // If no stdio handle is available, then inherit means that it
- // should still be unavailable so propagate the
- // INVALID_HANDLE_VALUE.
Stdio::Inherit => match stdio::get_handle(stdio_id) {
Ok(io) => unsafe {
let io = Handle::from_raw_handle(io);
@@ -523,7 +528,8 @@ impl Stdio {
io.into_raw_handle();
ret
},
- Err(..) => unsafe { Ok(Handle::from_raw_handle(c::INVALID_HANDLE_VALUE)) },
+ // If no stdio handle is available, then propagate the null value.
+ Err(..) => unsafe { Ok(Handle::from_raw_handle(ptr::null_mut())) },
},
Stdio::MakePipe => {
@@ -730,9 +736,9 @@ fn zeroed_startupinfo() -> c::STARTUPINFO {
wShowWindow: 0,
cbReserved2: 0,
lpReserved2: ptr::null_mut(),
- hStdInput: c::INVALID_HANDLE_VALUE,
- hStdOutput: c::INVALID_HANDLE_VALUE,
- hStdError: c::INVALID_HANDLE_VALUE,
+ hStdInput: ptr::null_mut(),
+ hStdOutput: ptr::null_mut(),
+ hStdError: ptr::null_mut(),
}
}
diff --git a/library/std/src/sys/windows/stdio_uwp.rs b/library/std/src/sys/windows/stdio_uwp.rs
deleted file mode 100644
index 32550f796..000000000
--- a/library/std/src/sys/windows/stdio_uwp.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-#![unstable(issue = "none", feature = "windows_stdio")]
-
-use crate::io;
-use crate::mem::ManuallyDrop;
-use crate::os::windows::io::FromRawHandle;
-use crate::sys::c;
-use crate::sys::handle::Handle;
-
-pub struct Stdin {}
-pub struct Stdout;
-pub struct Stderr;
-
-const MAX_BUFFER_SIZE: usize = 8192;
-pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3;
-
-pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> {
- let handle = unsafe { c::GetStdHandle(handle_id) };
- if handle == c::INVALID_HANDLE_VALUE {
- Err(io::Error::last_os_error())
- } else if handle.is_null() {
- Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32))
- } else {
- Ok(handle)
- }
-}
-
-fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> {
- let handle = get_handle(handle_id)?;
- // SAFETY: The handle returned from `get_handle` must be valid and non-null.
- let handle = unsafe { Handle::from_raw_handle(handle) };
- ManuallyDrop::new(handle).write(data)
-}
-
-impl Stdin {
- pub const fn new() -> Stdin {
- Stdin {}
- }
-}
-
-impl io::Read for Stdin {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- let handle = get_handle(c::STD_INPUT_HANDLE)?;
- // SAFETY: The handle returned from `get_handle` must be valid and non-null.
- let handle = unsafe { Handle::from_raw_handle(handle) };
- ManuallyDrop::new(handle).read(buf)
- }
-}
-
-impl Stdout {
- pub const fn new() -> Stdout {
- Stdout
- }
-}
-
-impl io::Write for Stdout {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- write(c::STD_OUTPUT_HANDLE, buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- Ok(())
- }
-}
-
-impl Stderr {
- pub const fn new() -> Stderr {
- Stderr
- }
-}
-
-impl io::Write for Stderr {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- write(c::STD_ERROR_HANDLE, buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- Ok(())
- }
-}
-
-pub fn is_ebadf(err: &io::Error) -> bool {
- err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32)
-}
-
-pub fn panic_output() -> Option<impl io::Write> {
- Some(Stderr::new())
-}
diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs
deleted file mode 100644
index 8bc5b2411..000000000
--- a/library/std/src/sys_common/condvar.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-use crate::sys::locks as imp;
-use crate::sys_common::mutex::MovableMutex;
-use crate::time::Duration;
-
-mod check;
-
-type CondvarCheck = <imp::MovableMutex as check::CondvarCheck>::Check;
-
-/// An OS-based condition variable.
-pub struct Condvar {
- inner: imp::MovableCondvar,
- check: CondvarCheck,
-}
-
-impl Condvar {
- /// Creates a new condition variable for use.
- #[inline]
- #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
- pub const fn new() -> Self {
- Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() }
- }
-
- /// Signals one waiter on this condition variable to wake up.
- #[inline]
- pub fn notify_one(&self) {
- unsafe { self.inner.notify_one() };
- }
-
- /// Awakens all current waiters on this condition variable.
- #[inline]
- pub fn notify_all(&self) {
- unsafe { self.inner.notify_all() };
- }
-
- /// Waits for a signal on the specified mutex.
- ///
- /// Behavior is undefined if the mutex is not locked by the current thread.
- ///
- /// May panic if used with more than one mutex.
- #[inline]
- pub unsafe fn wait(&self, mutex: &MovableMutex) {
- self.check.verify(mutex);
- self.inner.wait(mutex.raw())
- }
-
- /// Waits for a signal on the specified mutex with a timeout duration
- /// specified by `dur` (a relative time into the future).
- ///
- /// Behavior is undefined if the mutex is not locked by the current thread.
- ///
- /// May panic if used with more than one mutex.
- #[inline]
- pub unsafe fn wait_timeout(&self, mutex: &MovableMutex, dur: Duration) -> bool {
- self.check.verify(mutex);
- self.inner.wait_timeout(mutex.raw(), dur)
- }
-}
diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs
deleted file mode 100644
index 4ac9e62bf..000000000
--- a/library/std/src/sys_common/condvar/check.rs
+++ /dev/null
@@ -1,58 +0,0 @@
-use crate::ptr;
-use crate::sync::atomic::{AtomicPtr, Ordering};
-use crate::sys::locks as imp;
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-use crate::sys_common::mutex::MovableMutex;
-
-pub trait CondvarCheck {
- type Check;
-}
-
-/// For boxed mutexes, a `Condvar` will check it's only ever used with the same
-/// mutex, based on its (stable) address.
-impl<T: LazyInit> CondvarCheck for LazyBox<T> {
- type Check = SameMutexCheck;
-}
-
-pub struct SameMutexCheck {
- addr: AtomicPtr<()>,
-}
-
-#[allow(dead_code)]
-impl SameMutexCheck {
- pub const fn new() -> Self {
- Self { addr: AtomicPtr::new(ptr::null_mut()) }
- }
- pub fn verify(&self, mutex: &MovableMutex) {
- let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _;
- // Relaxed is okay here because we never read through `self.addr`, and only use it to
- // compare addresses.
- match self.addr.compare_exchange(
- ptr::null_mut(),
- addr,
- Ordering::Relaxed,
- Ordering::Relaxed,
- ) {
- Ok(_) => {} // Stored the address
- Err(n) if n == addr => {} // Lost a race to store the same address
- _ => panic!("attempted to use a condition variable with two mutexes"),
- }
- }
-}
-
-/// Unboxed mutexes may move, so `Condvar` can not require its address to stay
-/// constant.
-impl CondvarCheck for imp::Mutex {
- type Check = NoCheck;
-}
-
-pub struct NoCheck;
-
-#[allow(dead_code)]
-impl NoCheck {
- #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
- pub const fn new() -> Self {
- Self
- }
- pub fn verify(&self, _: &MovableMutex) {}
-}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index 8c19f9332..b1987aa0f 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -21,20 +21,18 @@
mod tests;
pub mod backtrace;
-pub mod condvar;
pub mod fs;
pub mod io;
pub mod lazy_box;
pub mod memchr;
-pub mod mutex;
pub mod once;
pub mod process;
pub mod remutex;
-pub mod rwlock;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
pub mod thread_parker;
+pub mod wstr;
pub mod wtf8;
cfg_if::cfg_if! {
diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs
deleted file mode 100644
index 98046f20f..000000000
--- a/library/std/src/sys_common/mutex.rs
+++ /dev/null
@@ -1,50 +0,0 @@
-use crate::sys::locks as imp;
-
-/// An OS-based mutual exclusion lock.
-///
-/// This mutex cleans up its resources in its `Drop` implementation, may safely
-/// be moved (when not borrowed), and does not cause UB when used reentrantly.
-///
-/// This mutex does not implement poisoning.
-///
-/// This is either a wrapper around `LazyBox<imp::Mutex>` or `imp::Mutex`,
-/// depending on the platform. It is boxed on platforms where `imp::Mutex` may
-/// not be moved.
-pub struct MovableMutex(imp::MovableMutex);
-
-unsafe impl Sync for MovableMutex {}
-
-impl MovableMutex {
- /// Creates a new mutex.
- #[inline]
- #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
- pub const fn new() -> Self {
- Self(imp::MovableMutex::new())
- }
-
- pub(super) fn raw(&self) -> &imp::Mutex {
- &self.0
- }
-
- /// Locks the mutex blocking the current thread until it is available.
- #[inline]
- pub fn raw_lock(&self) {
- unsafe { self.0.lock() }
- }
-
- /// Attempts to lock the mutex without blocking, returning whether it was
- /// successfully acquired or not.
- #[inline]
- pub fn try_lock(&self) -> bool {
- unsafe { self.0.try_lock() }
- }
-
- /// Unlocks the mutex.
- ///
- /// Behavior is undefined if the current thread does not actually hold the
- /// mutex.
- #[inline]
- pub unsafe fn raw_unlock(&self) {
- self.0.unlock()
- }
-}
diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/generic.rs
index acf5f2471..d953a6745 100644
--- a/library/std/src/sys_common/once/generic.rs
+++ b/library/std/src/sys_common/once/generic.rs
@@ -107,6 +107,7 @@ struct WaiterQueue<'a> {
impl Once {
#[inline]
+ #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
pub const fn new() -> Once {
Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) }
}
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs
index b448ae3a9..4c054da64 100644
--- a/library/std/src/sys_common/remutex.rs
+++ b/library/std/src/sys_common/remutex.rs
@@ -1,11 +1,11 @@
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
-use super::mutex as sys;
use crate::cell::UnsafeCell;
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use crate::sys::locks as sys;
/// A re-entrant mutual exclusion
///
@@ -39,7 +39,7 @@ use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
- mutex: sys::MovableMutex,
+ mutex: sys::Mutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
data: T,
@@ -74,7 +74,7 @@ impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
pub const fn new(t: T) -> ReentrantMutex<T> {
ReentrantMutex {
- mutex: sys::MovableMutex::new(),
+ mutex: sys::Mutex::new(),
owner: AtomicUsize::new(0),
lock_count: UnsafeCell::new(0),
data: t,
@@ -100,7 +100,7 @@ impl<T> ReentrantMutex<T> {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
- self.mutex.raw_lock();
+ self.mutex.lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
@@ -162,7 +162,7 @@ impl<T> Drop for ReentrantMutexGuard<'_, T> {
*self.lock.lock_count.get() -= 1;
if *self.lock.lock_count.get() == 0 {
self.lock.owner.store(0, Relaxed);
- self.lock.mutex.raw_unlock();
+ self.lock.mutex.unlock();
}
}
}
diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs
deleted file mode 100644
index 042981dac..000000000
--- a/library/std/src/sys_common/rwlock.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-use crate::sys::locks as imp;
-
-/// An OS-based reader-writer lock.
-///
-/// This rwlock cleans up its resources in its `Drop` implementation and may
-/// safely be moved (when not borrowed).
-///
-/// This rwlock does not implement poisoning.
-///
-/// This is either a wrapper around `LazyBox<imp::RwLock>` or `imp::RwLock`,
-/// depending on the platform. It is boxed on platforms where `imp::RwLock` may
-/// not be moved.
-pub struct MovableRwLock(imp::MovableRwLock);
-
-impl MovableRwLock {
- /// Creates a new reader-writer lock for use.
- #[inline]
- #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
- pub const fn new() -> Self {
- Self(imp::MovableRwLock::new())
- }
-
- /// Acquires shared access to the underlying lock, blocking the current
- /// thread to do so.
- #[inline]
- pub fn read(&self) {
- unsafe { self.0.read() }
- }
-
- /// Attempts to acquire shared access to this lock, returning whether it
- /// succeeded or not.
- ///
- /// This function does not block the current thread.
- #[inline]
- pub fn try_read(&self) -> bool {
- unsafe { self.0.try_read() }
- }
-
- /// Acquires write access to the underlying lock, blocking the current thread
- /// to do so.
- #[inline]
- pub fn write(&self) {
- unsafe { self.0.write() }
- }
-
- /// Attempts to acquire exclusive access to this lock, returning whether it
- /// succeeded or not.
- ///
- /// This function does not block the current thread.
- #[inline]
- pub fn try_write(&self) -> bool {
- unsafe { self.0.try_write() }
- }
-
- /// Unlocks previously acquired shared access to this lock.
- ///
- /// Behavior is undefined if the current thread does not have shared access.
- #[inline]
- pub unsafe fn read_unlock(&self) {
- self.0.read_unlock()
- }
-
- /// Unlocks previously acquired exclusive access to this lock.
- ///
- /// Behavior is undefined if the current thread does not currently have
- /// exclusive access.
- #[inline]
- pub unsafe fn write_unlock(&self) {
- self.0.write_unlock()
- }
-}
diff --git a/library/std/src/sys_common/wstr.rs b/library/std/src/sys_common/wstr.rs
new file mode 100644
index 000000000..b230fd1a8
--- /dev/null
+++ b/library/std/src/sys_common/wstr.rs
@@ -0,0 +1,59 @@
+//! This module contains constructs to work with 16-bit characters (UCS-2 or UTF-16)
+#![allow(dead_code)]
+
+use crate::marker::PhantomData;
+use crate::num::NonZeroU16;
+use crate::ptr::NonNull;
+
+/// A safe iterator over a LPWSTR
+/// (aka a pointer to a series of UTF-16 code units terminated by a NULL).
+pub struct WStrUnits<'a> {
+ // The pointer must never be null...
+ lpwstr: NonNull<u16>,
+ // ...and the memory it points to must be valid for this lifetime.
+ lifetime: PhantomData<&'a [u16]>,
+}
+
+impl WStrUnits<'_> {
+ /// Create the iterator. Returns `None` if `lpwstr` is null.
+ ///
+ /// SAFETY: `lpwstr` must point to a null-terminated wide string that lives
+ /// at least as long as the lifetime of this struct.
+ pub unsafe fn new(lpwstr: *const u16) -> Option<Self> {
+ Some(Self { lpwstr: NonNull::new(lpwstr as _)?, lifetime: PhantomData })
+ }
+
+ pub fn peek(&self) -> Option<NonZeroU16> {
+ // SAFETY: It's always safe to read the current item because we don't
+ // ever move out of the array's bounds.
+ unsafe { NonZeroU16::new(*self.lpwstr.as_ptr()) }
+ }
+
+ /// Advance the iterator while `predicate` returns true.
+ /// Returns the number of items it advanced by.
+ pub fn advance_while<P: FnMut(NonZeroU16) -> bool>(&mut self, mut predicate: P) -> usize {
+ let mut counter = 0;
+ while let Some(w) = self.peek() {
+ if !predicate(w) {
+ break;
+ }
+ counter += 1;
+ self.next();
+ }
+ counter
+ }
+}
+
+impl Iterator for WStrUnits<'_> {
+ // This can never return zero as that marks the end of the string.
+ type Item = NonZeroU16;
+ fn next(&mut self) -> Option<NonZeroU16> {
+ // SAFETY: If NULL is reached we immediately return.
+ // Therefore it's safe to advance the pointer after that.
+ unsafe {
+ let next = self.peek()?;
+ self.lpwstr = NonNull::new_unchecked(self.lpwstr.as_ptr().add(1));
+ Some(next)
+ }
+ }
+}
diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs
index 1df1ca758..80dc4c038 100644
--- a/library/std/src/thread/local/tests.rs
+++ b/library/std/src/thread/local/tests.rs
@@ -1,15 +1,34 @@
use crate::cell::{Cell, UnsafeCell};
use crate::sync::atomic::{AtomicU8, Ordering};
-use crate::sync::mpsc::{channel, Sender};
+use crate::sync::{Arc, Condvar, Mutex};
use crate::thread::{self, LocalKey};
use crate::thread_local;
-struct Foo(Sender<()>);
+#[derive(Clone, Default)]
+struct Signal(Arc<(Mutex<bool>, Condvar)>);
+
+impl Signal {
+ fn notify(&self) {
+ let (set, cvar) = &*self.0;
+ *set.lock().unwrap() = true;
+ cvar.notify_one();
+ }
+
+ fn wait(&self) {
+ let (set, cvar) = &*self.0;
+ let mut set = set.lock().unwrap();
+ while !*set {
+ set = cvar.wait(set).unwrap();
+ }
+ }
+}
+
+struct Foo(Signal);
impl Drop for Foo {
fn drop(&mut self) {
- let Foo(ref s) = *self;
- s.send(()).unwrap();
+ let Foo(ref f) = *self;
+ f.notify();
}
}
@@ -69,14 +88,15 @@ fn smoke_dtor() {
run(&FOO2);
fn run(key: &'static LocalKey<UnsafeCell<Option<Foo>>>) {
- let (tx, rx) = channel();
+ let signal = Signal::default();
+ let signal2 = signal.clone();
let t = thread::spawn(move || unsafe {
- let mut tx = Some(tx);
+ let mut signal = Some(signal2);
key.with(|f| {
- *f.get() = Some(Foo(tx.take().unwrap()));
+ *f.get() = Some(Foo(signal.take().unwrap()));
});
});
- rx.recv().unwrap();
+ signal.wait();
t.join().unwrap();
}
}
@@ -165,48 +185,50 @@ fn self_referential() {
// requires the destructor to be run to pass the test).
#[test]
fn dtors_in_dtors_in_dtors() {
- struct S1(Sender<()>);
+ struct S1(Signal);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
- let S1(ref tx) = *self;
+ let S1(ref signal) = *self;
unsafe {
- let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone())));
+ let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone())));
}
}
}
- let (tx, rx) = channel();
+ let signal = Signal::default();
+ let signal2 = signal.clone();
let _t = thread::spawn(move || unsafe {
- let mut tx = Some(tx);
- K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
+ let mut signal = Some(signal2);
+ K1.with(|s| *s.get() = Some(S1(signal.take().unwrap())));
});
- rx.recv().unwrap();
+ signal.wait();
}
#[test]
fn dtors_in_dtors_in_dtors_const_init() {
- struct S1(Sender<()>);
+ struct S1(Signal);
thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
thread_local!(static K2: UnsafeCell<Option<Foo>> = const { UnsafeCell::new(None) });
impl Drop for S1 {
fn drop(&mut self) {
- let S1(ref tx) = *self;
+ let S1(ref signal) = *self;
unsafe {
- let _ = K2.try_with(|s| *s.get() = Some(Foo(tx.clone())));
+ let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone())));
}
}
}
- let (tx, rx) = channel();
+ let signal = Signal::default();
+ let signal2 = signal.clone();
let _t = thread::spawn(move || unsafe {
- let mut tx = Some(tx);
- K1.with(|s| *s.get() = Some(S1(tx.take().unwrap())));
+ let mut signal = Some(signal2);
+ K1.with(|s| *s.get() = Some(S1(signal.take().unwrap())));
});
- rx.recv().unwrap();
+ signal.wait();
}
// This test tests that TLS destructors have run before the thread joins. The
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 05023df1b..34bdb8bd4 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -124,9 +124,8 @@
//!
//! ## Stack size
//!
-//! The default stack size for spawned threads is 2 MiB, though this particular stack size is
-//! subject to change in the future. There are two ways to manually specify the stack size for
-//! spawned threads:
+//! The default stack size is platform-dependent and subject to change. Currently it is 2MB on all
+//! Tier-1 platforms. There are two ways to manually specify the stack size for spawned threads:
//!
//! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`].
//! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack
diff --git a/library/std/src/thread/scoped.rs b/library/std/src/thread/scoped.rs
index e6dbf35bd..ada69aa82 100644
--- a/library/std/src/thread/scoped.rs
+++ b/library/std/src/thread/scoped.rs
@@ -46,7 +46,7 @@ impl ScopeData {
// We check for 'overflow' with usize::MAX / 2, to make sure there's no
// chance it overflows to 0, which would result in unsoundness.
if self.num_running_threads.fetch_add(1, Ordering::Relaxed) > usize::MAX / 2 {
- // This can only reasonably happen by mem::forget()'ing many many ScopedJoinHandles.
+ // This can only reasonably happen by mem::forget()'ing a lot of ScopedJoinHandles.
self.decrement_num_running_threads(false);
panic!("too many running threads in thread scope");
}
diff --git a/library/std/src/time/tests.rs b/library/std/src/time/tests.rs
index 6229556c8..2e64ae59a 100644
--- a/library/std/src/time/tests.rs
+++ b/library/std/src/time/tests.rs
@@ -88,6 +88,14 @@ fn instant_math_is_associative() {
// Changing the order of instant math shouldn't change the results,
// especially when the expression reduces to X + identity.
assert_eq!((now + offset) - now, (now - now) + offset);
+
+ // On any platform, `Instant` should have the same resolution as `Duration` (e.g. 1 nanosecond)
+ // or better. Otherwise, math will be non-associative (see #91417).
+ let now = Instant::now();
+ let provided_offset = Duration::from_nanos(1);
+ let later = now + provided_offset;
+ let measured_offset = later - now;
+ assert_eq!(measured_offset, provided_offset);
}
#[test]
diff --git a/library/test/Cargo.toml b/library/test/Cargo.toml
index 2da41484c..61b6f33bc 100644
--- a/library/test/Cargo.toml
+++ b/library/test/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2021"
crate-type = ["dylib", "rlib"]
[dependencies]
-cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] }
+cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
getopts = { version = "0.2.21", features = ['rustc-dep-of-std'] }
std = { path = "../std" }
core = { path = "../core" }
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
index 8be32183f..524658bce 100644
--- a/library/test/src/cli.rs
+++ b/library/test/src/cli.rs
@@ -26,6 +26,10 @@ pub struct TestOpts {
pub test_threads: Option<usize>,
pub skip: Vec<String>,
pub time_options: Option<TestTimeOptions>,
+ /// Stop at first failing test.
+ /// May run a few more tests due to threading, but will
+ /// abort as soon as possible.
+ pub fail_fast: bool,
pub options: Options,
}
@@ -296,6 +300,7 @@ fn parse_opts_impl(matches: getopts::Matches) -> OptRes {
skip,
time_options,
options,
+ fail_fast: false,
};
Ok(test_opts)
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
index b1270c272..a3c39f71f 100644
--- a/library/test/src/console.rs
+++ b/library/test/src/console.rs
@@ -228,9 +228,9 @@ fn on_test_event(
out: &mut dyn OutputFormatter,
) -> io::Result<()> {
match (*event).clone() {
- TestEvent::TeFiltered(ref filtered_tests, shuffle_seed) => {
- st.total = filtered_tests.len();
- out.write_run_start(filtered_tests.len(), shuffle_seed)?;
+ TestEvent::TeFiltered(filtered_tests, shuffle_seed) => {
+ st.total = filtered_tests;
+ out.write_run_start(filtered_tests, shuffle_seed)?;
}
TestEvent::TeFilteredOut(filtered_out) => {
st.filtered_out = filtered_out;
@@ -293,7 +293,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed()));
- assert!(st.current_test_count() == st.total);
+ assert!(opts.fail_fast || st.current_test_count() == st.total);
out.write_run_finish(&st)
}
diff --git a/library/test/src/event.rs b/library/test/src/event.rs
index 6ff1a615e..80281ebd2 100644
--- a/library/test/src/event.rs
+++ b/library/test/src/event.rs
@@ -28,7 +28,7 @@ impl CompletedTest {
#[derive(Debug, Clone)]
pub enum TestEvent {
- TeFiltered(Vec<TestDesc>, Option<u64>),
+ TeFiltered(usize, Option<u64>),
TeWait(TestDesc),
TeResult(CompletedTest),
TeTimeout(TestDesc),
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 141f16d17..256c9e8d1 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -40,7 +40,7 @@ pub mod test {
cli::{parse_opts, TestOpts},
filter_tests,
helpers::metrics::{Metric, MetricMap},
- options::{Concurrent, Options, RunIgnored, RunStrategy, ShouldPanic},
+ options::{Options, RunIgnored, RunStrategy, ShouldPanic},
run_test, test_main, test_main_static,
test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
time::{TestExecTime, TestTimeOptions},
@@ -85,7 +85,7 @@ use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
-use options::{Concurrent, RunStrategy};
+use options::RunStrategy;
use test_result::*;
use time::TestExecTime;
@@ -219,6 +219,38 @@ pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
}
}
+struct FilteredTests {
+ tests: Vec<(TestId, TestDescAndFn)>,
+ benchs: Vec<(TestId, TestDescAndFn)>,
+ next_id: usize,
+}
+
+impl FilteredTests {
+ fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
+ let test = TestDescAndFn { desc, testfn };
+ self.benchs.push((TestId(self.next_id), test));
+ self.next_id += 1;
+ }
+ fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
+ let test = TestDescAndFn { desc, testfn };
+ self.tests.push((TestId(self.next_id), test));
+ self.next_id += 1;
+ }
+ fn add_bench_as_test(
+ &mut self,
+ desc: TestDesc,
+ benchfn: impl Fn(&mut Bencher) -> Result<(), String> + Send + 'static,
+ ) {
+ let testfn = DynTestFn(Box::new(move || {
+ bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
+ }));
+ self.add_test(desc, testfn);
+ }
+ fn total_len(&self) -> usize {
+ self.tests.len() + self.benchs.len()
+ }
+}
+
pub fn run_tests<F>(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
@@ -235,6 +267,19 @@ where
join_handle: Option<thread::JoinHandle<()>>,
}
+ impl RunningTest {
+ fn join(self, completed_test: &mut CompletedTest) {
+ if let Some(join_handle) = self.join_handle {
+ if let Err(_) = join_handle.join() {
+ if let TrOk = completed_test.result {
+ completed_test.result =
+ TrFailedMsg("panicked after reporting success".to_string());
+ }
+ }
+ }
+ }
+ }
+
// Use a deterministic hasher
type TestMap =
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
@@ -247,45 +292,51 @@ where
let tests_len = tests.len();
- let mut filtered_tests = filter_tests(opts, tests);
- if !opts.bench_benchmarks {
- filtered_tests = convert_benchmarks_to_tests(filtered_tests);
- }
+ let mut filtered = FilteredTests { tests: Vec::new(), benchs: Vec::new(), next_id: 0 };
- let filtered_tests = {
- let mut filtered_tests = filtered_tests;
- for test in filtered_tests.iter_mut() {
- test.desc.name = test.desc.name.with_padding(test.testfn.padding());
- }
+ for test in filter_tests(opts, tests) {
+ let mut desc = test.desc;
+ desc.name = desc.name.with_padding(test.testfn.padding());
- filtered_tests
- };
+ match test.testfn {
+ DynBenchFn(benchfn) => {
+ if opts.bench_benchmarks {
+ filtered.add_bench(desc, DynBenchFn(benchfn));
+ } else {
+ filtered.add_bench_as_test(desc, benchfn);
+ }
+ }
+ StaticBenchFn(benchfn) => {
+ if opts.bench_benchmarks {
+ filtered.add_bench(desc, StaticBenchFn(benchfn));
+ } else {
+ filtered.add_bench_as_test(desc, benchfn);
+ }
+ }
+ testfn => {
+ filtered.add_test(desc, testfn);
+ }
+ };
+ }
- let filtered_out = tests_len - filtered_tests.len();
+ let filtered_out = tests_len - filtered.total_len();
let event = TestEvent::TeFilteredOut(filtered_out);
notify_about_test_event(event)?;
- let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
-
let shuffle_seed = get_shuffle_seed(opts);
- let event = TestEvent::TeFiltered(filtered_descs, shuffle_seed);
+ let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
notify_about_test_event(event)?;
- let (mut filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
- .into_iter()
- .enumerate()
- .map(|(i, e)| (TestId(i), e))
- .partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
-
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
+ let mut remaining = filtered.tests;
if let Some(shuffle_seed) = shuffle_seed {
- shuffle_tests(shuffle_seed, &mut filtered_tests);
+ shuffle_tests(shuffle_seed, &mut remaining);
}
// Store the tests in a VecDeque so we can efficiently remove the first element to run the
// tests in the order they were passed (unless shuffled).
- let mut remaining = VecDeque::from(filtered_tests);
+ let mut remaining = VecDeque::from(remaining);
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
@@ -328,13 +379,22 @@ where
let (id, test) = remaining.pop_front().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
- let join_handle =
- run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
- assert!(join_handle.is_none());
- let completed_test = rx.recv().unwrap();
+ let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
+ // Wait for the test to complete.
+ let mut completed_test = rx.recv().unwrap();
+ RunningTest { join_handle }.join(&mut completed_test);
+
+ let fail_fast = match completed_test.result {
+ TrIgnored | TrOk | TrBench(_) => false,
+ TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
+ };
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
+
+ if fail_fast {
+ return Ok(());
+ }
}
} else {
while pending > 0 || !remaining.is_empty() {
@@ -345,15 +405,8 @@ where
let event = TestEvent::TeWait(desc.clone());
notify_about_test_event(event)?; //here no pad
- let join_handle = run_test(
- opts,
- !opts.run_tests,
- id,
- test,
- run_strategy,
- tx.clone(),
- Concurrent::Yes,
- );
+ let join_handle =
+ run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
pending += 1;
@@ -385,28 +438,34 @@ where
let mut completed_test = res.unwrap();
let running_test = running_tests.remove(&completed_test.id).unwrap();
- if let Some(join_handle) = running_test.join_handle {
- if let Err(_) = join_handle.join() {
- if let TrOk = completed_test.result {
- completed_test.result =
- TrFailedMsg("panicked after reporting success".to_string());
- }
- }
- }
+ running_test.join(&mut completed_test);
+
+ let fail_fast = match completed_test.result {
+ TrIgnored | TrOk | TrBench(_) => false,
+ TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
+ };
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
pending -= 1;
+
+ if fail_fast {
+ // Prevent remaining test threads from panicking
+ std::mem::forget(rx);
+ return Ok(());
+ }
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
- for (id, b) in filtered_benchs {
+ for (id, b) in filtered.benchs {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
- run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
- let completed_test = rx.recv().unwrap();
+ let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
+ // Wait for the test to complete.
+ let mut completed_test = rx.recv().unwrap();
+ RunningTest { join_handle }.join(&mut completed_test);
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
@@ -432,7 +491,9 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
}
// Skip tests that match any of the skip filters
- filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
+ if !opts.skip.is_empty() {
+ filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
+ }
// Excludes #[should_panic] tests
if opts.exclude_should_panic {
@@ -480,7 +541,6 @@ pub fn run_test(
test: TestDescAndFn,
strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>,
- concurrency: Concurrent,
) -> Option<thread::JoinHandle<()>> {
let TestDescAndFn { desc, testfn } = test;
@@ -498,7 +558,6 @@ pub fn run_test(
struct TestRunOpts {
pub strategy: RunStrategy,
pub nocapture: bool,
- pub concurrency: Concurrent,
pub time: Option<time::TestTimeOptions>,
}
@@ -509,7 +568,6 @@ pub fn run_test(
testfn: Box<dyn FnOnce() -> Result<(), String> + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
- let concurrency = opts.concurrency;
let name = desc.name.clone();
let runtest = move || match opts.strategy {
@@ -536,7 +594,7 @@ pub fn run_test(
// the test synchronously, regardless of the concurrency
// level.
let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_family = "wasm");
- if concurrency == Concurrent::Yes && supports_threads {
+ if supports_threads {
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
let mut runtest = Arc::new(Mutex::new(Some(runtest)));
let runtest2 = runtest.clone();
@@ -557,7 +615,7 @@ pub fn run_test(
}
let test_run_opts =
- TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
+ TestRunOpts { strategy, nocapture: opts.nocapture, time: opts.time_options };
match testfn {
DynBenchFn(benchfn) => {
diff --git a/library/test/src/options.rs b/library/test/src/options.rs
index baf36b5f1..75ec0b616 100644
--- a/library/test/src/options.rs
+++ b/library/test/src/options.rs
@@ -1,12 +1,5 @@
//! Enums denoting options for test execution.
-/// Whether to execute tests concurrently or not
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum Concurrent {
- Yes,
- No,
-}
-
/// Number of times to run a benchmarked function
#[derive(Clone, PartialEq, Eq)]
pub enum BenchMode {
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
index b54be64ef..3a0260f86 100644
--- a/library/test/src/tests.rs
+++ b/library/test/src/tests.rs
@@ -51,6 +51,7 @@ impl TestOpts {
skip: vec![],
time_options: None,
options: Options::new(),
+ fail_fast: false,
}
}
}
@@ -102,7 +103,7 @@ pub fn do_not_run_ignored_tests() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_ne!(result, TrOk);
}
@@ -125,7 +126,7 @@ pub fn ignored_tests_result_in_ignored() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(result, TrIgnored);
}
@@ -150,7 +151,7 @@ fn test_should_panic() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(result, TrOk);
}
@@ -175,7 +176,7 @@ fn test_should_panic_good_message() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(result, TrOk);
}
@@ -205,7 +206,7 @@ fn test_should_panic_bad_message() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(result, TrFailedMsg(failed_msg.to_string()));
}
@@ -239,7 +240,7 @@ fn test_should_panic_non_string_message_type() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(result, TrFailedMsg(failed_msg));
}
@@ -267,15 +268,7 @@ fn test_should_panic_but_succeeds() {
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
- run_test(
- &TestOpts::new(),
- false,
- TestId(0),
- desc,
- RunStrategy::InProcess,
- tx,
- Concurrent::No,
- );
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
assert_eq!(
result,
@@ -306,7 +299,7 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
let test_opts = TestOpts { time_options, ..TestOpts::new() };
let (tx, rx) = channel();
- run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx);
let exec_time = rx.recv().unwrap().exec_time;
exec_time
}
@@ -345,7 +338,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult {
let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() };
let (tx, rx) = channel();
- run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx);
let result = rx.recv().unwrap().result;
result
diff --git a/library/unwind/Cargo.toml b/library/unwind/Cargo.toml
index 69fce8d77..eab2717c4 100644
--- a/library/unwind/Cargo.toml
+++ b/library/unwind/Cargo.toml
@@ -17,10 +17,10 @@ doc = false
core = { path = "../core" }
libc = { version = "0.2.79", features = ['rustc-dep-of-std'], default-features = false }
compiler_builtins = "0.1.0"
-cfg-if = "0.1.8"
+cfg-if = "1.0"
[build-dependencies]
-cc = "1.0.69"
+cc = "1.0.76"
[features]
diff --git a/library/unwind/build.rs b/library/unwind/build.rs
index 31af39025..5c3c02fb6 100644
--- a/library/unwind/build.rs
+++ b/library/unwind/build.rs
@@ -21,29 +21,5 @@ fn main() {
if has_unwind {
println!("cargo:rustc-cfg=feature=\"system-llvm-libunwind\"");
}
- } else if target.contains("freebsd") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("netbsd") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("openbsd") {
- if target.contains("sparc64") {
- println!("cargo:rustc-link-lib=gcc");
- } else {
- println!("cargo:rustc-link-lib=c++abi");
- }
- } else if target.contains("solaris") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("illumos") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("dragonfly") {
- println!("cargo:rustc-link-lib=gcc_pic");
- } else if target.ends_with("pc-windows-gnu") {
- // This is handled in the target spec with late_link_args_[static|dynamic]
- } else if target.contains("uwp-windows-gnu") {
- println!("cargo:rustc-link-lib=unwind");
- } else if target.contains("haiku") {
- println!("cargo:rustc-link-lib=gcc_s");
- } else if target.contains("redox") {
- // redox is handled in lib.rs
}
}
diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs
index 46fe50cb9..3753071d5 100644
--- a/library/unwind/src/lib.rs
+++ b/library/unwind/src/lib.rs
@@ -15,7 +15,6 @@ cfg_if::cfg_if! {
target_os = "espidf",
))] {
// These "unix" family members do not have unwinder.
- // Note this also matches x86_64-unknown-none-linuxkernel.
} else if #[cfg(any(
unix,
windows,
@@ -105,6 +104,26 @@ extern "C" {}
#[link(name = "unwind", kind = "static", modifiers = "-bundle")]
extern "C" {}
-#[cfg(all(target_os = "windows", target_env = "gnu", target_abi = "llvm"))]
-#[link(name = "unwind", kind = "static", modifiers = "-bundle")]
+#[cfg(any(target_os = "freebsd", target_os = "netbsd"))]
+#[link(name = "gcc_s")]
+extern "C" {}
+
+#[cfg(all(target_os = "openbsd", target_arch = "sparc64"))]
+#[link(name = "gcc")]
+extern "C" {}
+
+#[cfg(all(target_os = "openbsd", not(target_arch = "sparc64")))]
+#[link(name = "c++abi")]
+extern "C" {}
+
+#[cfg(any(target_os = "solaris", target_os = "illumos"))]
+#[link(name = "gcc_s")]
+extern "C" {}
+
+#[cfg(target_os = "dragonfly")]
+#[link(name = "gcc_pic")]
+extern "C" {}
+
+#[cfg(target_os = "haiku")]
+#[link(name = "gcc_s")]
extern "C" {}
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index a5b6193b0..0fa11f376 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -27,7 +27,10 @@ pub type _Unwind_Trace_Fn =
#[cfg(target_arch = "x86")]
pub const unwinder_private_data_size: usize = 5;
-#[cfg(target_arch = "x86_64")]
+#[cfg(all(target_arch = "x86_64", not(target_os = "windows")))]
+pub const unwinder_private_data_size: usize = 2;
+
+#[cfg(all(target_arch = "x86_64", target_os = "windows"))]
pub const unwinder_private_data_size: usize = 6;
#[cfg(all(target_arch = "arm", not(any(target_os = "ios", target_os = "watchos"))))]
@@ -36,9 +39,12 @@ pub const unwinder_private_data_size: usize = 20;
#[cfg(all(target_arch = "arm", any(target_os = "ios", target_os = "watchos")))]
pub const unwinder_private_data_size: usize = 5;
-#[cfg(all(target_arch = "aarch64", target_pointer_width = "64"))]
+#[cfg(all(target_arch = "aarch64", target_pointer_width = "64", not(target_os = "windows")))]
pub const unwinder_private_data_size: usize = 2;
+#[cfg(all(target_arch = "aarch64", target_pointer_width = "64", target_os = "windows"))]
+pub const unwinder_private_data_size: usize = 6;
+
#[cfg(all(target_arch = "aarch64", target_pointer_width = "32"))]
pub const unwinder_private_data_size: usize = 5;
@@ -90,7 +96,10 @@ pub type _Unwind_Exception_Cleanup_Fn =
// rustc_codegen_ssa::src::back::symbol_export, rustc_middle::middle::exported_symbols
// and RFC 2841
#[cfg_attr(
- all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ any(
+ all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")),
+ all(target_os = "windows", target_env = "gnu", target_abi = "llvm")
+ ),
link(name = "unwind", kind = "static", modifiers = "-bundle")
)]
extern "C-unwind" {
@@ -269,7 +278,7 @@ if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] {
} // cfg_if!
cfg_if::cfg_if! {
-if #[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] {
+if #[cfg(all(windows, any(target_arch = "aarch64", target_arch = "x86_64"), target_env = "gnu"))] {
// We declare these as opaque types. This is fine since you just need to
// pass them to _GCC_specific_handler and forget about them.
pub enum EXCEPTION_RECORD {}