summaryrefslogtreecommitdiffstats
path: root/library
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/alloc/Cargo.toml4
-rw-r--r--library/alloc/benches/slice.rs4
-rw-r--r--library/alloc/src/alloc.rs30
-rw-r--r--library/alloc/src/boxed.rs34
-rw-r--r--library/alloc/src/boxed/thin.rs47
-rw-r--r--library/alloc/src/collections/binary_heap.rs1721
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs1766
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs121
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs6
-rw-r--r--library/alloc/src/collections/btree/mod.rs3
-rw-r--r--library/alloc/src/collections/btree/node.rs28
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs4
-rw-r--r--library/alloc/src/collections/linked_list/tests.rs69
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs4
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs186
-rw-r--r--library/alloc/src/collections/vec_deque/spec_from_iter.rs33
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs42
-rw-r--r--library/alloc/src/fmt.rs4
-rw-r--r--library/alloc/src/lib.rs30
-rw-r--r--library/alloc/src/rc.rs8
-rw-r--r--library/alloc/src/slice.rs353
-rw-r--r--library/alloc/src/slice/tests.rs359
-rw-r--r--library/alloc/src/str.rs14
-rw-r--r--library/alloc/src/string.rs13
-rw-r--r--library/alloc/src/sync.rs14
-rw-r--r--library/alloc/src/testing/crash_test.rs (renamed from library/alloc/src/collections/btree/testing/crash_test.rs)0
-rw-r--r--library/alloc/src/testing/mod.rs (renamed from library/alloc/src/collections/btree/testing/mod.rs)0
-rw-r--r--library/alloc/src/testing/ord_chaos.rs (renamed from library/alloc/src/collections/btree/testing/ord_chaos.rs)0
-rw-r--r--library/alloc/src/testing/rng.rs (renamed from library/alloc/src/collections/btree/testing/rng.rs)0
-rw-r--r--library/alloc/src/vec/drain.rs6
-rw-r--r--library/alloc/src/vec/into_iter.rs52
-rw-r--r--library/alloc/src/vec/is_zero.rs22
-rw-r--r--library/alloc/src/vec/mod.rs15
-rw-r--r--library/alloc/src/vec/splice.rs6
-rw-r--r--library/alloc/tests/autotraits.rs2
-rw-r--r--library/alloc/tests/lib.rs1
-rw-r--r--library/alloc/tests/slice.rs349
-rw-r--r--library/alloc/tests/vec.rs31
-rw-r--r--library/alloc/tests/vec_deque.rs50
-rw-r--r--library/core/Cargo.toml4
-rw-r--r--library/core/benches/num/int_log/mod.rs4
-rw-r--r--library/core/src/any.rs5
-rw-r--r--library/core/src/array/iter.rs6
-rw-r--r--library/core/src/array/mod.rs4
-rw-r--r--library/core/src/borrow.rs2
-rw-r--r--library/core/src/cell.rs35
-rw-r--r--library/core/src/cell/lazy.rs8
-rw-r--r--library/core/src/cell/once.rs13
-rw-r--r--library/core/src/char/decode.rs2
-rw-r--r--library/core/src/cmp.rs82
-rw-r--r--library/core/src/const_closure.rs29
-rw-r--r--library/core/src/convert/num.rs20
-rw-r--r--library/core/src/ffi/mod.rs55
-rw-r--r--library/core/src/fmt/mod.rs13
-rw-r--r--library/core/src/future/future.rs1
-rw-r--r--library/core/src/future/mod.rs9
-rw-r--r--library/core/src/hash/mod.rs4
-rw-r--r--library/core/src/hint.rs69
-rw-r--r--library/core/src/intrinsics.rs69
-rw-r--r--library/core/src/intrinsics/mir.rs302
-rw-r--r--library/core/src/iter/range.rs2
-rw-r--r--library/core/src/iter/sources/empty.rs7
-rw-r--r--library/core/src/iter/sources/from_generator.rs23
-rw-r--r--library/core/src/iter/sources/once_with.rs14
-rw-r--r--library/core/src/iter/sources/repeat_n.rs2
-rw-r--r--library/core/src/iter/sources/repeat_with.rs10
-rw-r--r--library/core/src/iter/traits/accum.rs8
-rw-r--r--library/core/src/iter/traits/iterator.rs43
-rw-r--r--library/core/src/lib.rs30
-rw-r--r--library/core/src/macros/mod.rs46
-rw-r--r--library/core/src/marker.rs20
-rw-r--r--library/core/src/mem/mod.rs3
-rw-r--r--library/core/src/num/dec2flt/fpu.rs2
-rw-r--r--library/core/src/num/f32.rs2
-rw-r--r--library/core/src/num/f64.rs2
-rw-r--r--library/core/src/num/int_macros.rs111
-rw-r--r--library/core/src/ops/function.rs319
-rw-r--r--library/core/src/ops/index.rs2
-rw-r--r--library/core/src/ops/mod.rs10
-rw-r--r--library/core/src/ops/unsize.rs20
-rw-r--r--library/core/src/option.rs57
-rw-r--r--library/core/src/panic.rs8
-rw-r--r--library/core/src/panic/panic_info.rs2
-rw-r--r--library/core/src/panicking.rs39
-rw-r--r--library/core/src/pin.rs50
-rw-r--r--library/core/src/prelude/mod.rs8
-rw-r--r--library/core/src/prelude/v1.rs9
-rw-r--r--library/core/src/ptr/alignment.rs12
-rw-r--r--library/core/src/ptr/const_ptr.rs110
-rw-r--r--library/core/src/ptr/metadata.rs2
-rw-r--r--library/core/src/ptr/mod.rs54
-rw-r--r--library/core/src/ptr/mut_ptr.rs110
-rw-r--r--library/core/src/ptr/non_null.rs17
-rw-r--r--library/core/src/result.rs7
-rw-r--r--library/core/src/slice/iter.rs12
-rw-r--r--library/core/src/slice/iter/macros.rs22
-rw-r--r--library/core/src/slice/mod.rs61
-rw-r--r--library/core/src/slice/sort.rs544
-rw-r--r--library/core/src/str/iter.rs185
-rw-r--r--library/core/src/str/mod.rs8
-rw-r--r--library/core/src/sync/atomic.rs40
-rw-r--r--library/core/src/sync/exclusive.rs4
-rw-r--r--library/core/src/task/poll.rs2
-rw-r--r--library/core/src/task/wake.rs10
-rw-r--r--library/core/src/unicode/mod.rs2
-rw-r--r--library/core/tests/any.rs1
-rw-r--r--library/core/tests/char.rs4
-rw-r--r--library/core/tests/lazy.rs6
-rw-r--r--library/core/tests/lib.rs14
-rw-r--r--library/core/tests/mem.rs1
-rw-r--r--library/core/tests/num/flt2dec/random.rs6
-rw-r--r--library/core/tests/ptr.rs20
-rw-r--r--library/core/tests/slice.rs11
-rw-r--r--library/core/tests/str.rs2
-rw-r--r--library/core/tests/task.rs8
-rw-r--r--library/core/tests/time.rs26
-rw-r--r--library/panic_abort/src/lib.rs6
-rw-r--r--library/panic_unwind/src/lib.rs4
-rw-r--r--library/proc_macro/src/bridge/client.rs2
-rw-r--r--library/proc_macro/src/bridge/fxhash.rs6
-rw-r--r--library/proc_macro/src/bridge/server.rs2
-rw-r--r--library/proc_macro/src/lib.rs2
-rw-r--r--library/rustc-std-workspace-alloc/lib.rs2
-rw-r--r--library/std/Cargo.toml5
-rw-r--r--library/std/src/alloc.rs2
-rw-r--r--library/std/src/backtrace.rs8
-rw-r--r--library/std/src/collections/hash/map/tests.rs9
-rw-r--r--library/std/src/collections/hash/set.rs2
-rw-r--r--library/std/src/env.rs9
-rw-r--r--library/std/src/fs.rs12
-rw-r--r--library/std/src/fs/tests.rs50
-rw-r--r--library/std/src/io/buffered/tests.rs4
-rw-r--r--library/std/src/io/error/repr_bitpacked.rs16
-rw-r--r--library/std/src/io/mod.rs10
-rw-r--r--library/std/src/io/stdio.rs3
-rw-r--r--library/std/src/lib.rs37
-rw-r--r--library/std/src/macros.rs1
-rw-r--r--library/std/src/net/ip_addr.rs9
-rw-r--r--library/std/src/os/fd/mod.rs6
-rw-r--r--library/std/src/os/fd/owned.rs2
-rw-r--r--library/std/src/os/net/linux_ext/addr.rs2
-rw-r--r--library/std/src/os/unix/net/addr.rs2
-rw-r--r--library/std/src/panic.rs3
-rw-r--r--library/std/src/panicking.rs12
-rw-r--r--library/std/src/path.rs120
-rw-r--r--library/std/src/personality/dwarf/eh.rs31
-rw-r--r--library/std/src/prelude/v1.rs9
-rw-r--r--library/std/src/process.rs36
-rw-r--r--library/std/src/process/tests.rs94
-rw-r--r--library/std/src/rt.rs6
-rw-r--r--library/std/src/sync/lazy_lock.rs10
-rw-r--r--library/std/src/sync/lazy_lock/tests.rs6
-rw-r--r--library/std/src/sync/mod.rs3
-rw-r--r--library/std/src/sync/mutex/tests.rs2
-rw-r--r--library/std/src/sync/once_lock.rs17
-rw-r--r--library/std/src/sync/remutex.rs (renamed from library/std/src/sys_common/remutex.rs)0
-rw-r--r--library/std/src/sync/remutex/tests.rs60
-rw-r--r--library/std/src/sync/rwlock/tests.rs4
-rw-r--r--library/std/src/sys/itron/thread.rs27
-rw-r--r--library/std/src/sys/sgx/mod.rs1
-rw-r--r--library/std/src/sys/sgx/thread.rs21
-rw-r--r--library/std/src/sys/sgx/thread_parking.rs23
-rw-r--r--library/std/src/sys/unix/android.rs4
-rw-r--r--library/std/src/sys/unix/fs.rs154
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs8
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs93
-rw-r--r--library/std/src/sys/unix/mod.rs23
-rw-r--r--library/std/src/sys/unix/net.rs2
-rw-r--r--library/std/src/sys/unix/pipe.rs4
-rw-r--r--library/std/src/sys/unix/process/process_common.rs67
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs15
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs20
-rw-r--r--library/std/src/sys/unix/process/process_unix/tests.rs8
-rw-r--r--library/std/src/sys/unix/process/process_unsupported.rs4
-rw-r--r--library/std/src/sys/unix/process/process_vxworks.rs11
-rw-r--r--library/std/src/sys/unix/stack_overflow.rs7
-rw-r--r--library/std/src/sys/unix/thread.rs46
-rw-r--r--library/std/src/sys/unix/thread_parker/mod.rs32
-rw-r--r--library/std/src/sys/unix/thread_parker/netbsd.rs113
-rw-r--r--library/std/src/sys/unix/thread_parking/darwin.rs (renamed from library/std/src/sys/unix/thread_parker/darwin.rs)2
-rw-r--r--library/std/src/sys/unix/thread_parking/mod.rs32
-rw-r--r--library/std/src/sys/unix/thread_parking/netbsd.rs52
-rw-r--r--library/std/src/sys/unix/thread_parking/pthread.rs (renamed from library/std/src/sys/unix/thread_parker/pthread.rs)12
-rw-r--r--library/std/src/sys/unix/time.rs3
-rw-r--r--library/std/src/sys/unix/weak.rs53
-rw-r--r--library/std/src/sys/unsupported/mod.rs1
-rw-r--r--library/std/src/sys/unsupported/once.rs89
-rw-r--r--library/std/src/sys/unsupported/pipe.rs4
-rw-r--r--library/std/src/sys/unsupported/process.rs4
-rw-r--r--library/std/src/sys/wasi/mod.rs2
-rw-r--r--library/std/src/sys/wasm/mod.rs2
-rw-r--r--library/std/src/sys/windows/c.rs13
-rw-r--r--library/std/src/sys/windows/mod.rs2
-rw-r--r--library/std/src/sys/windows/os.rs2
-rw-r--r--library/std/src/sys/windows/pipe.rs6
-rw-r--r--library/std/src/sys/windows/process.rs5
-rw-r--r--library/std/src/sys/windows/rand.rs121
-rw-r--r--library/std/src/sys/windows/thread.rs2
-rw-r--r--library/std/src/sys/windows/thread_parker.rs253
-rw-r--r--library/std/src/sys/windows/thread_parking.rs253
-rw-r--r--library/std/src/sys_common/backtrace.rs6
-rw-r--r--library/std/src/sys_common/io.rs3
-rw-r--r--library/std/src/sys_common/mod.rs3
-rw-r--r--library/std/src/sys_common/once/mod.rs27
-rw-r--r--library/std/src/sys_common/once/queue.rs (renamed from library/std/src/sys_common/once/generic.rs)0
-rw-r--r--library/std/src/sys_common/process.rs42
-rw-r--r--library/std/src/sys_common/remutex/tests.rs60
-rw-r--r--library/std/src/sys_common/thread_local_key.rs25
-rw-r--r--library/std/src/sys_common/thread_parker/generic.rs125
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs23
-rw-r--r--library/std/src/sys_common/thread_parking/futex.rs (renamed from library/std/src/sys_common/thread_parker/futex.rs)2
-rw-r--r--library/std/src/sys_common/thread_parking/generic.rs125
-rw-r--r--library/std/src/sys_common/thread_parking/id.rs108
-rw-r--r--library/std/src/sys_common/thread_parking/mod.rs29
-rw-r--r--library/std/src/sys_common/thread_parking/wait_flag.rs (renamed from library/std/src/sys_common/thread_parker/wait_flag.rs)2
-rw-r--r--library/std/src/thread/local.rs23
-rw-r--r--library/std/src/thread/local/tests.rs22
-rw-r--r--library/std/src/thread/mod.rs26
-rw-r--r--library/std/tests/env.rs20
-rw-r--r--library/std/tests/run-time-detect.rs3
-rw-r--r--library/test/src/cli.rs12
-rw-r--r--library/test/src/console.rs6
-rw-r--r--library/test/src/formatters/json.rs23
-rw-r--r--library/test/src/formatters/junit.rs16
-rw-r--r--library/test/src/formatters/mod.rs2
-rw-r--r--library/test/src/formatters/pretty.rs18
-rw-r--r--library/test/src/formatters/terse.rs20
-rw-r--r--library/test/src/lib.rs9
-rw-r--r--library/test/src/term/terminfo/mod.rs6
-rw-r--r--library/test/src/term/terminfo/parm.rs8
-rw-r--r--library/test/src/term/terminfo/searcher.rs6
-rw-r--r--library/test/src/test_result.rs9
-rw-r--r--library/test/src/time.rs6
-rw-r--r--library/test/src/types.rs2
-rw-r--r--library/unwind/src/libunwind.rs2
235 files changed, 6390 insertions, 4968 deletions
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index 265020209..95c07abf7 100644
--- a/library/alloc/Cargo.toml
+++ b/library/alloc/Cargo.toml
@@ -13,8 +13,8 @@ core = { path = "../core" }
compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
[dev-dependencies]
-rand = "0.7"
-rand_xorshift = "0.2"
+rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
+rand_xorshift = "0.3.0"
[[test]]
name = "collectionstests"
diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs
index bd6f38f2f..b62be9d39 100644
--- a/library/alloc/benches/slice.rs
+++ b/library/alloc/benches/slice.rs
@@ -1,6 +1,6 @@
use std::{mem, ptr};
-use rand::distributions::{Alphanumeric, Standard};
+use rand::distributions::{Alphanumeric, DistString, Standard};
use rand::Rng;
use test::{black_box, Bencher};
@@ -218,7 +218,7 @@ fn gen_strings(len: usize) -> Vec<String> {
let mut v = vec![];
for _ in 0..len {
let n = rng.gen::<usize>() % 20 + 1;
- v.push((&mut rng).sample_iter(&Alphanumeric).take(n).collect());
+ v.push(Alphanumeric.sample_string(&mut rng, n));
}
v
}
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index e5fbfc557..3a797bd5e 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -20,10 +20,10 @@ use core::marker::Destruct;
mod tests;
extern "Rust" {
- // These are the magic symbols to call the global allocator. rustc generates
+ // These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
- // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
@@ -353,7 +353,7 @@ pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Dest
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
- // This is the magic symbol to call the global alloc error handler. rustc generates
+ // This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
@@ -402,20 +402,20 @@ pub mod __alloc_error_handler {
// `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
- panic!("memory allocation of {size} bytes failed")
- }
-
- #[cfg(bootstrap)]
- #[rustc_std_internal_symbol]
- pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
- use crate::alloc::Layout;
-
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
- #[lang = "oom"]
- fn oom_impl(layout: Layout) -> !;
+ // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+ // Its value depends on the -Zoom={panic,abort} compiler option.
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+
+ #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {size} bytes failed")
+ } else {
+ core::panicking::panic_nounwind_fmt(format_args!(
+ "memory allocation of {size} bytes failed"
+ ))
}
- unsafe { oom_impl(layout) }
}
}
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index e5f6b0c0c..a563b2587 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -158,7 +158,6 @@ use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
-#[cfg(not(bootstrap))]
use core::marker::Tuple;
use core::marker::{Destruct, Unpin, Unsize};
use core::mem;
@@ -954,7 +953,7 @@ impl<T: ?Sized> Box<T> {
/// [`Layout`]: crate::Layout
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
- #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
+ #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
pub unsafe fn from_raw(raw: *mut T) -> Self {
unsafe { Self::from_raw_in(raw, Global) }
}
@@ -1981,17 +1980,6 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
- type Output = <F as FnOnce<Args>>::Output;
-
- extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
- <F as FnOnce<Args>>::call_once(*self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
@@ -2001,15 +1989,6 @@ impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F
}
}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
- extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
- <F as FnMut<Args>>::call_mut(self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
@@ -2017,15 +1996,6 @@ impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F,
}
}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
- extern "rust-call" fn call(&self, args: Args) -> Self::Output {
- <F as Fn<Args>>::call(self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
@@ -2033,7 +2003,7 @@ impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index c477c4490..c1a82e452 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -226,24 +226,45 @@ impl<H> WithHeader<H> {
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
+ struct DropGuard<H> {
+ ptr: NonNull<u8>,
+ value_layout: Layout,
+ _marker: PhantomData<H>,
+ }
+
+ impl<H> Drop for DropGuard<H> {
+ fn drop(&mut self) {
+ unsafe {
+ // SAFETY: Layout must have been computable if we're in drop
+ let (layout, value_offset) =
+ WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();
+
+ // Note: Don't deallocate if the layout size is zero, because the pointer
+ // didn't come from the allocator.
+ if layout.size() != 0 {
+ alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
+ } else {
+ debug_assert!(
+ value_offset == 0
+ && mem::size_of::<H>() == 0
+ && self.value_layout.size() == 0
+ );
+ }
+ }
+ }
+ }
+
unsafe {
- let value_layout = Layout::for_value_raw(value);
- // SAFETY: Layout must have been computable if we're in drop
- let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
+ // `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds.
+ let _guard = DropGuard {
+ ptr: self.0,
+ value_layout: Layout::for_value_raw(value),
+ _marker: PhantomData::<H>,
+ };
// We only drop the value because the Pointee trait requires that the metadata is copy
// aka trivially droppable.
ptr::drop_in_place::<T>(value);
-
- // Note: Don't deallocate if the layout size is zero, because the pointer
- // didn't come from the allocator.
- if layout.size() != 0 {
- alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
- } else {
- debug_assert!(
- value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
- );
- }
}
}
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
deleted file mode 100644
index 4583bc9a1..000000000
--- a/library/alloc/src/collections/binary_heap.rs
+++ /dev/null
@@ -1,1721 +0,0 @@
-//! A priority queue implemented with a binary heap.
-//!
-//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
-//! Checking the largest element is *O*(1). Converting a vector to a binary heap
-//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
-//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
-//! in-place heapsort.
-//!
-//! # Examples
-//!
-//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
-//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
-//! It shows how to use [`BinaryHeap`] with custom types.
-//!
-//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
-//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
-//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
-//!
-//! ```
-//! use std::cmp::Ordering;
-//! use std::collections::BinaryHeap;
-//!
-//! #[derive(Copy, Clone, Eq, PartialEq)]
-//! struct State {
-//! cost: usize,
-//! position: usize,
-//! }
-//!
-//! // The priority queue depends on `Ord`.
-//! // Explicitly implement the trait so the queue becomes a min-heap
-//! // instead of a max-heap.
-//! impl Ord for State {
-//! fn cmp(&self, other: &Self) -> Ordering {
-//! // Notice that the we flip the ordering on costs.
-//! // In case of a tie we compare positions - this step is necessary
-//! // to make implementations of `PartialEq` and `Ord` consistent.
-//! other.cost.cmp(&self.cost)
-//! .then_with(|| self.position.cmp(&other.position))
-//! }
-//! }
-//!
-//! // `PartialOrd` needs to be implemented as well.
-//! impl PartialOrd for State {
-//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-//! Some(self.cmp(other))
-//! }
-//! }
-//!
-//! // Each node is represented as a `usize`, for a shorter implementation.
-//! struct Edge {
-//! node: usize,
-//! cost: usize,
-//! }
-//!
-//! // Dijkstra's shortest path algorithm.
-//!
-//! // Start at `start` and use `dist` to track the current shortest distance
-//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
-//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
-//! // for a simpler implementation.
-//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
-//! // dist[node] = current shortest distance from `start` to `node`
-//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
-//!
-//! let mut heap = BinaryHeap::new();
-//!
-//! // We're at `start`, with a zero cost
-//! dist[start] = 0;
-//! heap.push(State { cost: 0, position: start });
-//!
-//! // Examine the frontier with lower cost nodes first (min-heap)
-//! while let Some(State { cost, position }) = heap.pop() {
-//! // Alternatively we could have continued to find all shortest paths
-//! if position == goal { return Some(cost); }
-//!
-//! // Important as we may have already found a better way
-//! if cost > dist[position] { continue; }
-//!
-//! // For each node we can reach, see if we can find a way with
-//! // a lower cost going through this node
-//! for edge in &adj_list[position] {
-//! let next = State { cost: cost + edge.cost, position: edge.node };
-//!
-//! // If so, add it to the frontier and continue
-//! if next.cost < dist[next.position] {
-//! heap.push(next);
-//! // Relaxation, we have now found a better way
-//! dist[next.position] = next.cost;
-//! }
-//! }
-//! }
-//!
-//! // Goal not reachable
-//! None
-//! }
-//!
-//! fn main() {
-//! // This is the directed graph we're going to use.
-//! // The node numbers correspond to the different states,
-//! // and the edge weights symbolize the cost of moving
-//! // from one node to another.
-//! // Note that the edges are one-way.
-//! //
-//! // 7
-//! // +-----------------+
-//! // | |
-//! // v 1 2 | 2
-//! // 0 -----> 1 -----> 3 ---> 4
-//! // | ^ ^ ^
-//! // | | 1 | |
-//! // | | | 3 | 1
-//! // +------> 2 -------+ |
-//! // 10 | |
-//! // +---------------+
-//! //
-//! // The graph is represented as an adjacency list where each index,
-//! // corresponding to a node value, has a list of outgoing edges.
-//! // Chosen for its efficiency.
-//! let graph = vec![
-//! // Node 0
-//! vec![Edge { node: 2, cost: 10 },
-//! Edge { node: 1, cost: 1 }],
-//! // Node 1
-//! vec![Edge { node: 3, cost: 2 }],
-//! // Node 2
-//! vec![Edge { node: 1, cost: 1 },
-//! Edge { node: 3, cost: 3 },
-//! Edge { node: 4, cost: 1 }],
-//! // Node 3
-//! vec![Edge { node: 0, cost: 7 },
-//! Edge { node: 4, cost: 2 }],
-//! // Node 4
-//! vec![]];
-//!
-//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
-//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
-//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
-//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
-//! assert_eq!(shortest_path(&graph, 4, 0), None);
-//! }
-//! ```
-
-#![allow(missing_docs)]
-#![stable(feature = "rust1", since = "1.0.0")]
-
-use core::fmt;
-use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
-use core::mem::{self, swap, ManuallyDrop};
-use core::ops::{Deref, DerefMut};
-use core::ptr;
-
-use crate::collections::TryReserveError;
-use crate::slice;
-use crate::vec::{self, AsVecIntoIter, Vec};
-
-use super::SpecExtend;
-
-#[cfg(test)]
-mod tests;
-
-/// A priority queue implemented with a binary heap.
-///
-/// This will be a max-heap.
-///
-/// It is a logic error for an item to be modified in such a way that the
-/// item's ordering relative to any other item, as determined by the [`Ord`]
-/// trait, changes while it is in the heap. This is normally only possible
-/// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
-/// behavior resulting from such a logic error is not specified, but will
-/// be encapsulated to the `BinaryHeap` that observed the logic error and not
-/// result in undefined behavior. This could include panics, incorrect results,
-/// aborts, memory leaks, and non-termination.
-///
-/// # Examples
-///
-/// ```
-/// use std::collections::BinaryHeap;
-///
-/// // Type inference lets us omit an explicit type signature (which
-/// // would be `BinaryHeap<i32>` in this example).
-/// let mut heap = BinaryHeap::new();
-///
-/// // We can use peek to look at the next item in the heap. In this case,
-/// // there's no items in there yet so we get None.
-/// assert_eq!(heap.peek(), None);
-///
-/// // Let's add some scores...
-/// heap.push(1);
-/// heap.push(5);
-/// heap.push(2);
-///
-/// // Now peek shows the most important item in the heap.
-/// assert_eq!(heap.peek(), Some(&5));
-///
-/// // We can check the length of a heap.
-/// assert_eq!(heap.len(), 3);
-///
-/// // We can iterate over the items in the heap, although they are returned in
-/// // a random order.
-/// for x in &heap {
-/// println!("{x}");
-/// }
-///
-/// // If we instead pop these scores, they should come back in order.
-/// assert_eq!(heap.pop(), Some(5));
-/// assert_eq!(heap.pop(), Some(2));
-/// assert_eq!(heap.pop(), Some(1));
-/// assert_eq!(heap.pop(), None);
-///
-/// // We can clear the heap of any remaining items.
-/// heap.clear();
-///
-/// // The heap should now be empty.
-/// assert!(heap.is_empty())
-/// ```
-///
-/// A `BinaryHeap` with a known list of items can be initialized from an array:
-///
-/// ```
-/// use std::collections::BinaryHeap;
-///
-/// let heap = BinaryHeap::from([1, 5, 2]);
-/// ```
-///
-/// ## Min-heap
-///
-/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
-/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
-/// value instead of the greatest one.
-///
-/// ```
-/// use std::collections::BinaryHeap;
-/// use std::cmp::Reverse;
-///
-/// let mut heap = BinaryHeap::new();
-///
-/// // Wrap values in `Reverse`
-/// heap.push(Reverse(1));
-/// heap.push(Reverse(5));
-/// heap.push(Reverse(2));
-///
-/// // If we pop these scores now, they should come back in the reverse order.
-/// assert_eq!(heap.pop(), Some(Reverse(1)));
-/// assert_eq!(heap.pop(), Some(Reverse(2)));
-/// assert_eq!(heap.pop(), Some(Reverse(5)));
-/// assert_eq!(heap.pop(), None);
-/// ```
-///
-/// # Time complexity
-///
-/// | [push] | [pop] | [peek]/[peek\_mut] |
-/// |---------|---------------|--------------------|
-/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
-///
-/// The value for `push` is an expected cost; the method documentation gives a
-/// more detailed analysis.
-///
-/// [`core::cmp::Reverse`]: core::cmp::Reverse
-/// [`Ord`]: core::cmp::Ord
-/// [`Cell`]: core::cell::Cell
-/// [`RefCell`]: core::cell::RefCell
-/// [push]: BinaryHeap::push
-/// [pop]: BinaryHeap::pop
-/// [peek]: BinaryHeap::peek
-/// [peek\_mut]: BinaryHeap::peek_mut
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
-pub struct BinaryHeap<T> {
- data: Vec<T>,
-}
-
-/// Structure wrapping a mutable reference to the greatest item on a
-/// `BinaryHeap`.
-///
-/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
-/// its documentation for more.
-///
-/// [`peek_mut`]: BinaryHeap::peek_mut
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-pub struct PeekMut<'a, T: 'a + Ord> {
- heap: &'a mut BinaryHeap<T>,
- sift: bool,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> Drop for PeekMut<'_, T> {
- fn drop(&mut self) {
- if self.sift {
- // SAFETY: PeekMut is only instantiated for non-empty heaps.
- unsafe { self.heap.sift_down(0) };
- }
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> Deref for PeekMut<'_, T> {
- type Target = T;
- fn deref(&self) -> &T {
- debug_assert!(!self.heap.is_empty());
- // SAFE: PeekMut is only instantiated for non-empty heaps
- unsafe { self.heap.data.get_unchecked(0) }
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> DerefMut for PeekMut<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- debug_assert!(!self.heap.is_empty());
- self.sift = true;
- // SAFE: PeekMut is only instantiated for non-empty heaps
- unsafe { self.heap.data.get_unchecked_mut(0) }
- }
-}
-
-impl<'a, T: Ord> PeekMut<'a, T> {
- /// Removes the peeked value from the heap and returns it.
- #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
- pub fn pop(mut this: PeekMut<'a, T>) -> T {
- let value = this.heap.pop().unwrap();
- this.sift = false;
- value
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone> Clone for BinaryHeap<T> {
- fn clone(&self) -> Self {
- BinaryHeap { data: self.data.clone() }
- }
-
- fn clone_from(&mut self, source: &Self) {
- self.data.clone_from(&source.data);
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> Default for BinaryHeap<T> {
- /// Creates an empty `BinaryHeap<T>`.
- #[inline]
- fn default() -> BinaryHeap<T> {
- BinaryHeap::new()
- }
-}
-
-#[stable(feature = "binaryheap_debug", since = "1.4.0")]
-impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_list().entries(self.iter()).finish()
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- /// Creates an empty `BinaryHeap` as a max-heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub fn new() -> BinaryHeap<T> {
- BinaryHeap { data: vec![] }
- }
-
- /// Creates an empty `BinaryHeap` with at least the specified capacity.
- ///
- /// The binary heap will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::with_capacity(10);
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
- BinaryHeap { data: Vec::with_capacity(capacity) }
- }
-
- /// Returns a mutable reference to the greatest item in the binary heap, or
- /// `None` if it is empty.
- ///
- /// Note: If the `PeekMut` value is leaked, the heap may be in an
- /// inconsistent state.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// assert!(heap.peek_mut().is_none());
- ///
- /// heap.push(1);
- /// heap.push(5);
- /// heap.push(2);
- /// {
- /// let mut val = heap.peek_mut().unwrap();
- /// *val = 0;
- /// }
- /// assert_eq!(heap.peek(), Some(&2));
- /// ```
- ///
- /// # Time complexity
- ///
- /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
- /// otherwise it's *O*(1).
- #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
- pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
- if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
- }
-
- /// Removes the greatest item from the binary heap and returns it, or `None` if it
- /// is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert_eq!(heap.pop(), Some(3));
- /// assert_eq!(heap.pop(), Some(1));
- /// assert_eq!(heap.pop(), None);
- /// ```
- ///
- /// # Time complexity
- ///
- /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn pop(&mut self) -> Option<T> {
- self.data.pop().map(|mut item| {
- if !self.is_empty() {
- swap(&mut item, &mut self.data[0]);
- // SAFETY: !self.is_empty() means that self.len() > 0
- unsafe { self.sift_down_to_bottom(0) };
- }
- item
- })
- }
-
- /// Pushes an item onto the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.push(3);
- /// heap.push(5);
- /// heap.push(1);
- ///
- /// assert_eq!(heap.len(), 3);
- /// assert_eq!(heap.peek(), Some(&5));
- /// ```
- ///
- /// # Time complexity
- ///
- /// The expected cost of `push`, averaged over every possible ordering of
- /// the elements being pushed, and over a sufficiently large number of
- /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
- /// elements that are *not* already in any sorted pattern.
- ///
- /// The time complexity degrades if elements are pushed in predominantly
- /// ascending order. In the worst case, elements are pushed in ascending
- /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
- /// containing *n* elements.
- ///
- /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
- /// occurs when capacity is exhausted and needs a resize. The resize cost
- /// has been amortized in the previous figures.
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn push(&mut self, item: T) {
- let old_len = self.len();
- self.data.push(item);
- // SAFETY: Since we pushed a new item it means that
- // old_len = self.len() - 1 < self.len()
- unsafe { self.sift_up(0, old_len) };
- }
-
- /// Consumes the `BinaryHeap` and returns a vector in sorted
- /// (ascending) order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
- /// heap.push(6);
- /// heap.push(3);
- ///
- /// let vec = heap.into_sorted_vec();
- /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
- /// ```
- #[must_use = "`self` will be dropped if the result is not used"]
- #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
- pub fn into_sorted_vec(mut self) -> Vec<T> {
- let mut end = self.len();
- while end > 1 {
- end -= 1;
- // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
- // so it's always a valid index to access.
- // It is safe to access index 0 (i.e. `ptr`), because
- // 1 <= end < self.len(), which means self.len() >= 2.
- unsafe {
- let ptr = self.data.as_mut_ptr();
- ptr::swap(ptr, ptr.add(end));
- }
- // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
- // 0 < 1 <= end <= self.len() - 1 < self.len()
- // Which means 0 < end and end < self.len().
- unsafe { self.sift_down_range(0, end) };
- }
- self.into_vec()
- }
-
- // The implementations of sift_up and sift_down use unsafe blocks in
- // order to move an element out of the vector (leaving behind a
- // hole), shift along the others and move the removed element back into the
- // vector at the final location of the hole.
- // The `Hole` type is used to represent this, and make sure
- // the hole is filled back at the end of its scope, even on panic.
- // Using a hole reduces the constant factor compared to using swaps,
- // which involves twice as many moves.
-
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
- // Take out the value at `pos` and create a hole.
- // SAFETY: The caller guarantees that pos < self.len()
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
-
- while hole.pos() > start {
- let parent = (hole.pos() - 1) / 2;
-
- // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
- // and so hole.pos() - 1 can't underflow.
- // This guarantees that parent < hole.pos() so
- // it's a valid index and also != hole.pos().
- if hole.element() <= unsafe { hole.get(parent) } {
- break;
- }
-
- // SAFETY: Same as above
- unsafe { hole.move_to(parent) };
- }
-
- hole.pos()
- }
-
- /// Take an element at `pos` and move it down the heap,
- /// while its children are larger.
- ///
- /// # Safety
- ///
- /// The caller must guarantee that `pos < end <= self.len()`.
- unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
- // SAFETY: The caller guarantees that pos < end <= self.len().
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
- let mut child = 2 * hole.pos() + 1;
-
- // Loop invariant: child == 2 * hole.pos() + 1.
- while child <= end.saturating_sub(2) {
- // compare with the greater of the two children
- // SAFETY: child < end - 1 < self.len() and
- // child + 1 < end <= self.len(), so they're valid indexes.
- // child == 2 * hole.pos() + 1 != hole.pos() and
- // child + 1 == 2 * hole.pos() + 2 != hole.pos().
- // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
- // if T is a ZST
- child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
-
- // if we are already in order, stop.
- // SAFETY: child is now either the old child or the old child+1
- // We already proven that both are < self.len() and != hole.pos()
- if hole.element() >= unsafe { hole.get(child) } {
- return;
- }
-
- // SAFETY: same as above.
- unsafe { hole.move_to(child) };
- child = 2 * hole.pos() + 1;
- }
-
- // SAFETY: && short circuit, which means that in the
- // second condition it's already true that child == end - 1 < self.len().
- if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
- // SAFETY: child is already proven to be a valid index and
- // child == 2 * hole.pos() + 1 != hole.pos().
- unsafe { hole.move_to(child) };
- }
- }
-
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_down(&mut self, pos: usize) {
- let len = self.len();
- // SAFETY: pos < len is guaranteed by the caller and
- // obviously len = self.len() <= self.len().
- unsafe { self.sift_down_range(pos, len) };
- }
-
- /// Take an element at `pos` and move it all the way down the heap,
- /// then sift it up to its position.
- ///
- /// Note: This is faster when the element is known to be large / should
- /// be closer to the bottom.
- ///
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
- let end = self.len();
- let start = pos;
-
- // SAFETY: The caller guarantees that pos < self.len().
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
- let mut child = 2 * hole.pos() + 1;
-
- // Loop invariant: child == 2 * hole.pos() + 1.
- while child <= end.saturating_sub(2) {
- // SAFETY: child < end - 1 < self.len() and
- // child + 1 < end <= self.len(), so they're valid indexes.
- // child == 2 * hole.pos() + 1 != hole.pos() and
- // child + 1 == 2 * hole.pos() + 2 != hole.pos().
- // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
- // if T is a ZST
- child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
-
- // SAFETY: Same as above
- unsafe { hole.move_to(child) };
- child = 2 * hole.pos() + 1;
- }
-
- if child == end - 1 {
- // SAFETY: child == end - 1 < self.len(), so it's a valid index
- // and child == 2 * hole.pos() + 1 != hole.pos().
- unsafe { hole.move_to(child) };
- }
- pos = hole.pos();
- drop(hole);
-
- // SAFETY: pos is the position in the hole and was already proven
- // to be a valid index.
- unsafe { self.sift_up(start, pos) };
- }
-
- /// Rebuild assuming data[0..start] is still a proper heap.
- fn rebuild_tail(&mut self, start: usize) {
- if start == self.len() {
- return;
- }
-
- let tail_len = self.len() - start;
-
- #[inline(always)]
- fn log2_fast(x: usize) -> usize {
- (usize::BITS - x.leading_zeros() - 1) as usize
- }
-
- // `rebuild` takes O(self.len()) operations
- // and about 2 * self.len() comparisons in the worst case
- // while repeating `sift_up` takes O(tail_len * log(start)) operations
- // and about 1 * tail_len * log_2(start) comparisons in the worst case,
- // assuming start >= tail_len. For larger heaps, the crossover point
- // no longer follows this reasoning and was determined empirically.
- let better_to_rebuild = if start < tail_len {
- true
- } else if self.len() <= 2048 {
- 2 * self.len() < tail_len * log2_fast(start)
- } else {
- 2 * self.len() < tail_len * 11
- };
-
- if better_to_rebuild {
- self.rebuild();
- } else {
- for i in start..self.len() {
- // SAFETY: The index `i` is always less than self.len().
- unsafe { self.sift_up(0, i) };
- }
- }
- }
-
- fn rebuild(&mut self) {
- let mut n = self.len() / 2;
- while n > 0 {
- n -= 1;
- // SAFETY: n starts from self.len() / 2 and goes down to 0.
- // The only case when !(n < self.len()) is if
- // self.len() == 0, but it's ruled out by the loop condition.
- unsafe { self.sift_down(n) };
- }
- }
-
- /// Moves all the elements of `other` into `self`, leaving `other` empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
- /// let mut b = BinaryHeap::from([-20, 5, 43]);
- ///
- /// a.append(&mut b);
- ///
- /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
- /// assert!(b.is_empty());
- /// ```
- #[stable(feature = "binary_heap_append", since = "1.11.0")]
- pub fn append(&mut self, other: &mut Self) {
- if self.len() < other.len() {
- swap(self, other);
- }
-
- let start = self.data.len();
-
- self.data.append(&mut other.data);
-
- self.rebuild_tail(start);
- }
-
- /// Clears the binary heap, returning an iterator over the removed elements
- /// in heap order. If the iterator is dropped before being fully consumed,
- /// it drops the remaining elements in heap order.
- ///
- /// The returned iterator keeps a mutable borrow on the heap to optimize
- /// its implementation.
- ///
- /// Note:
- /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
- /// You should use the latter for most cases.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_drain_sorted)]
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
- /// assert_eq!(heap.len(), 5);
- ///
- /// drop(heap.drain_sorted()); // removes all elements in heap order
- /// assert_eq!(heap.len(), 0);
- /// ```
- #[inline]
- #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
- pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
- DrainSorted { inner: self }
- }
-
- /// Retains only the elements specified by the predicate.
- ///
- /// In other words, remove all elements `e` for which `f(&e)` returns
- /// `false`. The elements are visited in unsorted (and unspecified) order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_retain)]
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
- ///
- /// heap.retain(|x| x % 2 == 0); // only keep even numbers
- ///
- /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
- /// ```
- #[unstable(feature = "binary_heap_retain", issue = "71503")]
- pub fn retain<F>(&mut self, mut f: F)
- where
- F: FnMut(&T) -> bool,
- {
- let mut first_removed = self.len();
- let mut i = 0;
- self.data.retain(|e| {
- let keep = f(e);
- if !keep && i < first_removed {
- first_removed = i;
- }
- i += 1;
- keep
- });
- // data[0..first_removed] is untouched, so we only need to rebuild the tail:
- self.rebuild_tail(first_removed);
- }
-}
-
-impl<T> BinaryHeap<T> {
- /// Returns an iterator visiting all values in the underlying vector, in
- /// arbitrary order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4]);
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order
- /// for x in heap.iter() {
- /// println!("{x}");
- /// }
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn iter(&self) -> Iter<'_, T> {
- Iter { iter: self.data.iter() }
- }
-
- /// Returns an iterator which retrieves elements in heap order.
- /// This method consumes the original heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_into_iter_sorted)]
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
- ///
- /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
- /// ```
- #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
- pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
- IntoIterSorted { inner: self }
- }
-
- /// Returns the greatest item in the binary heap, or `None` if it is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// assert_eq!(heap.peek(), None);
- ///
- /// heap.push(1);
- /// heap.push(5);
- /// heap.push(2);
- /// assert_eq!(heap.peek(), Some(&5));
- ///
- /// ```
- ///
- /// # Time complexity
- ///
- /// Cost is *O*(1) in the worst case.
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn peek(&self) -> Option<&T> {
- self.data.get(0)
- }
-
- /// Returns the number of elements the binary heap can hold without reallocating.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::with_capacity(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn capacity(&self) -> usize {
- self.data.capacity()
- }
-
- /// Reserves the minimum capacity for at least `additional` elements more than
- /// the current length. Unlike [`reserve`], this will not
- /// deliberately over-allocate to speculatively avoid frequent allocations.
- /// After calling `reserve_exact`, capacity will be greater than or equal to
- /// `self.len() + additional`. Does nothing if the capacity is already
- /// sufficient.
- ///
- /// [`reserve`]: BinaryHeap::reserve
- ///
- /// # Panics
- ///
- /// Panics if the new capacity overflows [`usize`].
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.reserve_exact(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- ///
- /// [`reserve`]: BinaryHeap::reserve
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve_exact(&mut self, additional: usize) {
- self.data.reserve_exact(additional);
- }
-
- /// Reserves capacity for at least `additional` elements more than the
- /// current length. The allocator may reserve more space to speculatively
- /// avoid frequent allocations. After calling `reserve`,
- /// capacity will be greater than or equal to `self.len() + additional`.
- /// Does nothing if capacity is already sufficient.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity overflows [`usize`].
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.reserve(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve(&mut self, additional: usize) {
- self.data.reserve(additional);
- }
-
- /// Tries to reserve the minimum capacity for at least `additional` elements
- /// more than the current length. Unlike [`try_reserve`], this will not
- /// deliberately over-allocate to speculatively avoid frequent allocations.
- /// After calling `try_reserve_exact`, capacity will be greater than or
- /// equal to `self.len() + additional` if it returns `Ok(())`.
- /// Does nothing if the capacity is already sufficient.
- ///
- /// Note that the allocator may give the collection more space than it
- /// requests. Therefore, capacity can not be relied upon to be precisely
- /// minimal. Prefer [`try_reserve`] if future insertions are expected.
- ///
- /// [`try_reserve`]: BinaryHeap::try_reserve
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// use std::collections::TryReserveError;
- ///
- /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
- /// let mut heap = BinaryHeap::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// heap.try_reserve_exact(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// heap.extend(data.iter());
- ///
- /// Ok(heap.pop())
- /// }
- /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve_2", since = "1.63.0")]
- pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.data.try_reserve_exact(additional)
- }
-
- /// Tries to reserve capacity for at least `additional` elements more than the
- /// current length. The allocator may reserve more space to speculatively
- /// avoid frequent allocations. After calling `try_reserve`, capacity will be
- /// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient. This method
- /// preserves the contents even if an error occurs.
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// use std::collections::TryReserveError;
- ///
- /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
- /// let mut heap = BinaryHeap::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// heap.try_reserve(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// heap.extend(data.iter());
- ///
- /// Ok(heap.pop())
- /// }
- /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve_2", since = "1.63.0")]
- pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.data.try_reserve(additional)
- }
-
- /// Discards as much additional capacity as possible.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
- ///
- /// assert!(heap.capacity() >= 100);
- /// heap.shrink_to_fit();
- /// assert!(heap.capacity() == 0);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn shrink_to_fit(&mut self) {
- self.data.shrink_to_fit();
- }
-
- /// Discards capacity with a lower bound.
- ///
- /// The capacity will remain at least as large as both the length
- /// and the supplied value.
- ///
- /// If the current capacity is less than the lower limit, this is a no-op.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
- ///
- /// assert!(heap.capacity() >= 100);
- /// heap.shrink_to(10);
- /// assert!(heap.capacity() >= 10);
- /// ```
- #[inline]
- #[stable(feature = "shrink_to", since = "1.56.0")]
- pub fn shrink_to(&mut self, min_capacity: usize) {
- self.data.shrink_to(min_capacity)
- }
-
- /// Returns a slice of all values in the underlying vector, in arbitrary
- /// order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_as_slice)]
- /// use std::collections::BinaryHeap;
- /// use std::io::{self, Write};
- ///
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
- ///
- /// io::sink().write(heap.as_slice()).unwrap();
- /// ```
- #[must_use]
- #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
- pub fn as_slice(&self) -> &[T] {
- self.data.as_slice()
- }
-
- /// Consumes the `BinaryHeap` and returns the underlying vector
- /// in arbitrary order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
- /// let vec = heap.into_vec();
- ///
- /// // Will print in some order
- /// for x in vec {
- /// println!("{x}");
- /// }
- /// ```
- #[must_use = "`self` will be dropped if the result is not used"]
- #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
- pub fn into_vec(self) -> Vec<T> {
- self.into()
- }
-
- /// Returns the length of the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 3]);
- ///
- /// assert_eq!(heap.len(), 2);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn len(&self) -> usize {
- self.data.len()
- }
-
- /// Checks if the binary heap is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- ///
- /// assert!(heap.is_empty());
- ///
- /// heap.push(3);
- /// heap.push(5);
- /// heap.push(1);
- ///
- /// assert!(!heap.is_empty());
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn is_empty(&self) -> bool {
- self.len() == 0
- }
-
- /// Clears the binary heap, returning an iterator over the removed elements
- /// in arbitrary order. If the iterator is dropped before being fully
- /// consumed, it drops the remaining elements in arbitrary order.
- ///
- /// The returned iterator keeps a mutable borrow on the heap to optimize
- /// its implementation.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert!(!heap.is_empty());
- ///
- /// for x in heap.drain() {
- /// println!("{x}");
- /// }
- ///
- /// assert!(heap.is_empty());
- /// ```
- #[inline]
- #[stable(feature = "drain", since = "1.6.0")]
- pub fn drain(&mut self) -> Drain<'_, T> {
- Drain { iter: self.data.drain(..) }
- }
-
- /// Drops all items from the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert!(!heap.is_empty());
- ///
- /// heap.clear();
- ///
- /// assert!(heap.is_empty());
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn clear(&mut self) {
- self.drain();
- }
-}
-
-/// Hole represents a hole in a slice i.e., an index without valid value
-/// (because it was moved from or duplicated).
-/// In drop, `Hole` will restore the slice by filling the hole
-/// position with the value that was originally removed.
-struct Hole<'a, T: 'a> {
- data: &'a mut [T],
- elt: ManuallyDrop<T>,
- pos: usize,
-}
-
-impl<'a, T> Hole<'a, T> {
- /// Create a new `Hole` at index `pos`.
- ///
- /// Unsafe because pos must be within the data slice.
- #[inline]
- unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
- debug_assert!(pos < data.len());
- // SAFE: pos should be inside the slice
- let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
- Hole { data, elt: ManuallyDrop::new(elt), pos }
- }
-
- #[inline]
- fn pos(&self) -> usize {
- self.pos
- }
-
- /// Returns a reference to the element removed.
- #[inline]
- fn element(&self) -> &T {
- &self.elt
- }
-
- /// Returns a reference to the element at `index`.
- ///
- /// Unsafe because index must be within the data slice and not equal to pos.
- #[inline]
- unsafe fn get(&self, index: usize) -> &T {
- debug_assert!(index != self.pos);
- debug_assert!(index < self.data.len());
- unsafe { self.data.get_unchecked(index) }
- }
-
- /// Move hole to new location
- ///
- /// Unsafe because index must be within the data slice and not equal to pos.
- #[inline]
- unsafe fn move_to(&mut self, index: usize) {
- debug_assert!(index != self.pos);
- debug_assert!(index < self.data.len());
- unsafe {
- let ptr = self.data.as_mut_ptr();
- let index_ptr: *const _ = ptr.add(index);
- let hole_ptr = ptr.add(self.pos);
- ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
- }
- self.pos = index;
- }
-}
-
-impl<T> Drop for Hole<'_, T> {
- #[inline]
- fn drop(&mut self) {
- // fill the hole again
- unsafe {
- let pos = self.pos;
- ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
- }
- }
-}
-
-/// An iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::iter()`]. See its
-/// documentation for more.
-///
-/// [`iter`]: BinaryHeap::iter
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Iter<'a, T: 'a> {
- iter: slice::Iter<'a, T>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
- }
-}
-
-// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Clone for Iter<'_, T> {
- fn clone(&self) -> Self {
- Iter { iter: self.iter.clone() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> Iterator for Iter<'a, T> {
- type Item = &'a T;
-
- #[inline]
- fn next(&mut self) -> Option<&'a T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-
- #[inline]
- fn last(self) -> Option<&'a T> {
- self.iter.last()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for Iter<'_, T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for Iter<'_, T> {}
-
-/// An owning iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::into_iter()`]
-/// (provided by the [`IntoIterator`] trait). See its documentation for more.
-///
-/// [`into_iter`]: BinaryHeap::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
-#[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Clone)]
-pub struct IntoIter<T> {
- iter: vec::IntoIter<T>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Iterator for IntoIter<T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> DoubleEndedIterator for IntoIter<T> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IntoIter<T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for IntoIter<T> {}
-
-// In addition to the SAFETY invariants of the following three unsafe traits
-// also refer to the vec::in_place_collect module documentation to get an overview
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<T> SourceIter for IntoIter<T> {
- type Source = IntoIter<T>;
-
- #[inline]
- unsafe fn as_inner(&mut self) -> &mut Self::Source {
- self
- }
-}
-
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<I> InPlaceIterable for IntoIter<I> {}
-
-unsafe impl<I> AsVecIntoIter for IntoIter<I> {
- type Item = I;
-
- fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
- &mut self.iter
- }
-}
-
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-#[derive(Clone, Debug)]
-pub struct IntoIterSorted<T> {
- inner: BinaryHeap<T>,
-}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> Iterator for IntoIterSorted<T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.inner.pop()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = self.inner.len();
- (exact, Some(exact))
- }
-}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
-
-/// A draining iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::drain()`]. See its
-/// documentation for more.
-///
-/// [`drain`]: BinaryHeap::drain
-#[stable(feature = "drain", since = "1.6.0")]
-#[derive(Debug)]
-pub struct Drain<'a, T: 'a> {
- iter: vec::Drain<'a, T>,
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> Iterator for Drain<'_, T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> DoubleEndedIterator for Drain<'_, T> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> ExactSizeIterator for Drain<'_, T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for Drain<'_, T> {}
-
-/// A draining iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
-/// documentation for more.
-///
-/// [`drain_sorted`]: BinaryHeap::drain_sorted
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-#[derive(Debug)]
-pub struct DrainSorted<'a, T: Ord> {
- inner: &'a mut BinaryHeap<T>,
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
- /// Removes heap elements in heap order.
- fn drop(&mut self) {
- struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
-
- impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
- fn drop(&mut self) {
- while self.0.inner.pop().is_some() {}
- }
- }
-
- while let Some(item) = self.inner.pop() {
- let guard = DropGuard(self);
- drop(item);
- mem::forget(guard);
- }
- }
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> Iterator for DrainSorted<'_, T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.inner.pop()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = self.inner.len();
- (exact, Some(exact))
- }
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
-
-#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
-impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
- /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
- ///
- /// This conversion happens in-place, and has *O*(*n*) time complexity.
- fn from(vec: Vec<T>) -> BinaryHeap<T> {
- let mut heap = BinaryHeap { data: vec };
- heap.rebuild();
- heap
- }
-}
-
-#[stable(feature = "std_collections_from_array", since = "1.56.0")]
-impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
- /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
- /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
- /// assert_eq!(a, b);
- /// }
- /// ```
- fn from(arr: [T; N]) -> Self {
- Self::from_iter(arr)
- }
-}
-
-#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
-impl<T> From<BinaryHeap<T>> for Vec<T> {
- /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
- ///
- /// This conversion requires no data movement or allocation, and has
- /// constant time complexity.
- fn from(heap: BinaryHeap<T>) -> Vec<T> {
- heap.data
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
- fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
- BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> IntoIterator for BinaryHeap<T> {
- type Item = T;
- type IntoIter = IntoIter<T>;
-
- /// Creates a consuming iterator, that is, one that moves each value out of
- /// the binary heap in arbitrary order. The binary heap cannot be used
- /// after calling this.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4]);
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order
- /// for x in heap.into_iter() {
- /// // x has type i32, not &i32
- /// println!("{x}");
- /// }
- /// ```
- fn into_iter(self) -> IntoIter<T> {
- IntoIter { iter: self.data.into_iter() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
- type Item = &'a T;
- type IntoIter = Iter<'a, T>;
-
- fn into_iter(self) -> Iter<'a, T> {
- self.iter()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> Extend<T> for BinaryHeap<T> {
- #[inline]
- fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- <Self as SpecExtend<I>>::spec_extend(self, iter);
- }
-
- #[inline]
- fn extend_one(&mut self, item: T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
-
-impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
- default fn spec_extend(&mut self, iter: I) {
- self.extend_desugared(iter.into_iter());
- }
-}
-
-impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: Vec<T>) {
- let start = self.data.len();
- self.data.append(other);
- self.rebuild_tail(start);
- }
-}
-
-impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
- self.append(other);
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
-
- self.reserve(lower);
-
- iterator.for_each(move |elem| self.push(elem));
- }
-}
-
-#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
- fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- self.extend(iter.into_iter().cloned());
- }
-
- #[inline]
- fn extend_one(&mut self, &item: &'a T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
new file mode 100644
index 000000000..0b73b1af4
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -0,0 +1,1766 @@
+//! A priority queue implemented with a binary heap.
+//!
+//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
+//! Checking the largest element is *O*(1). Converting a vector to a binary heap
+//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
+//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
+//! in-place heapsort.
+//!
+//! # Examples
+//!
+//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
+//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
+//! It shows how to use [`BinaryHeap`] with custom types.
+//!
+//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
+//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
+//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
+//!
+//! ```
+//! use std::cmp::Ordering;
+//! use std::collections::BinaryHeap;
+//!
+//! #[derive(Copy, Clone, Eq, PartialEq)]
+//! struct State {
+//! cost: usize,
+//! position: usize,
+//! }
+//!
+//! // The priority queue depends on `Ord`.
+//! // Explicitly implement the trait so the queue becomes a min-heap
+//! // instead of a max-heap.
+//! impl Ord for State {
+//! fn cmp(&self, other: &Self) -> Ordering {
+//! // Notice that the we flip the ordering on costs.
+//! // In case of a tie we compare positions - this step is necessary
+//! // to make implementations of `PartialEq` and `Ord` consistent.
+//! other.cost.cmp(&self.cost)
+//! .then_with(|| self.position.cmp(&other.position))
+//! }
+//! }
+//!
+//! // `PartialOrd` needs to be implemented as well.
+//! impl PartialOrd for State {
+//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+//! Some(self.cmp(other))
+//! }
+//! }
+//!
+//! // Each node is represented as a `usize`, for a shorter implementation.
+//! struct Edge {
+//! node: usize,
+//! cost: usize,
+//! }
+//!
+//! // Dijkstra's shortest path algorithm.
+//!
+//! // Start at `start` and use `dist` to track the current shortest distance
+//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
+//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
+//! // for a simpler implementation.
+//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
+//! // dist[node] = current shortest distance from `start` to `node`
+//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
+//!
+//! let mut heap = BinaryHeap::new();
+//!
+//! // We're at `start`, with a zero cost
+//! dist[start] = 0;
+//! heap.push(State { cost: 0, position: start });
+//!
+//! // Examine the frontier with lower cost nodes first (min-heap)
+//! while let Some(State { cost, position }) = heap.pop() {
+//! // Alternatively we could have continued to find all shortest paths
+//! if position == goal { return Some(cost); }
+//!
+//! // Important as we may have already found a better way
+//! if cost > dist[position] { continue; }
+//!
+//! // For each node we can reach, see if we can find a way with
+//! // a lower cost going through this node
+//! for edge in &adj_list[position] {
+//! let next = State { cost: cost + edge.cost, position: edge.node };
+//!
+//! // If so, add it to the frontier and continue
+//! if next.cost < dist[next.position] {
+//! heap.push(next);
+//! // Relaxation, we have now found a better way
+//! dist[next.position] = next.cost;
+//! }
+//! }
+//! }
+//!
+//! // Goal not reachable
+//! None
+//! }
+//!
+//! fn main() {
+//! // This is the directed graph we're going to use.
+//! // The node numbers correspond to the different states,
+//! // and the edge weights symbolize the cost of moving
+//! // from one node to another.
+//! // Note that the edges are one-way.
+//! //
+//! // 7
+//! // +-----------------+
+//! // | |
+//! // v 1 2 | 2
+//! // 0 -----> 1 -----> 3 ---> 4
+//! // | ^ ^ ^
+//! // | | 1 | |
+//! // | | | 3 | 1
+//! // +------> 2 -------+ |
+//! // 10 | |
+//! // +---------------+
+//! //
+//! // The graph is represented as an adjacency list where each index,
+//! // corresponding to a node value, has a list of outgoing edges.
+//! // Chosen for its efficiency.
+//! let graph = vec![
+//! // Node 0
+//! vec![Edge { node: 2, cost: 10 },
+//! Edge { node: 1, cost: 1 }],
+//! // Node 1
+//! vec![Edge { node: 3, cost: 2 }],
+//! // Node 2
+//! vec![Edge { node: 1, cost: 1 },
+//! Edge { node: 3, cost: 3 },
+//! Edge { node: 4, cost: 1 }],
+//! // Node 3
+//! vec![Edge { node: 0, cost: 7 },
+//! Edge { node: 4, cost: 2 }],
+//! // Node 4
+//! vec![]];
+//!
+//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
+//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
+//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
+//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
+//! assert_eq!(shortest_path(&graph, 4, 0), None);
+//! }
+//! ```
+
+#![allow(missing_docs)]
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::fmt;
+use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::mem::{self, swap, ManuallyDrop};
+use core::num::NonZeroUsize;
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+use crate::collections::TryReserveError;
+use crate::slice;
+use crate::vec::{self, AsVecIntoIter, Vec};
+
+use super::SpecExtend;
+
+#[cfg(test)]
+mod tests;
+
+/// A priority queue implemented with a binary heap.
+///
+/// This will be a max-heap.
+///
+/// It is a logic error for an item to be modified in such a way that the
+/// item's ordering relative to any other item, as determined by the [`Ord`]
+/// trait, changes while it is in the heap. This is normally only possible
+/// through interior mutability, global state, I/O, or unsafe code. The
+/// behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `BinaryHeap` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// As long as no elements change their relative order while being in the heap
+/// as described above, the API of `BinaryHeap` guarantees that the heap
+/// invariant remains intact i.e. its methods all behave as documented. For
+/// example if a method is documented as iterating in sorted order, that's
+/// guaranteed to work as long as elements in the heap have not changed order,
+/// even in the presence of closures getting unwinded out of, iterators getting
+/// leaked, and similar foolishness.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BinaryHeap<i32>` in this example).
+/// let mut heap = BinaryHeap::new();
+///
+/// // We can use peek to look at the next item in the heap. In this case,
+/// // there's no items in there yet so we get None.
+/// assert_eq!(heap.peek(), None);
+///
+/// // Let's add some scores...
+/// heap.push(1);
+/// heap.push(5);
+/// heap.push(2);
+///
+/// // Now peek shows the most important item in the heap.
+/// assert_eq!(heap.peek(), Some(&5));
+///
+/// // We can check the length of a heap.
+/// assert_eq!(heap.len(), 3);
+///
+/// // We can iterate over the items in the heap, although they are returned in
+/// // a random order.
+/// for x in &heap {
+/// println!("{x}");
+/// }
+///
+/// // If we instead pop these scores, they should come back in order.
+/// assert_eq!(heap.pop(), Some(5));
+/// assert_eq!(heap.pop(), Some(2));
+/// assert_eq!(heap.pop(), Some(1));
+/// assert_eq!(heap.pop(), None);
+///
+/// // We can clear the heap of any remaining items.
+/// heap.clear();
+///
+/// // The heap should now be empty.
+/// assert!(heap.is_empty())
+/// ```
+///
+/// A `BinaryHeap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// let heap = BinaryHeap::from([1, 5, 2]);
+/// ```
+///
+/// ## Min-heap
+///
+/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
+/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
+/// value instead of the greatest one.
+///
+/// ```
+/// use std::collections::BinaryHeap;
+/// use std::cmp::Reverse;
+///
+/// let mut heap = BinaryHeap::new();
+///
+/// // Wrap values in `Reverse`
+/// heap.push(Reverse(1));
+/// heap.push(Reverse(5));
+/// heap.push(Reverse(2));
+///
+/// // If we pop these scores now, they should come back in the reverse order.
+/// assert_eq!(heap.pop(), Some(Reverse(1)));
+/// assert_eq!(heap.pop(), Some(Reverse(2)));
+/// assert_eq!(heap.pop(), Some(Reverse(5)));
+/// assert_eq!(heap.pop(), None);
+/// ```
+///
+/// # Time complexity
+///
+/// | [push] | [pop] | [peek]/[peek\_mut] |
+/// |---------|---------------|--------------------|
+/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
+///
+/// The value for `push` is an expected cost; the method documentation gives a
+/// more detailed analysis.
+///
+/// [`core::cmp::Reverse`]: core::cmp::Reverse
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+/// [push]: BinaryHeap::push
+/// [pop]: BinaryHeap::pop
+/// [peek]: BinaryHeap::peek
+/// [peek\_mut]: BinaryHeap::peek_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
+pub struct BinaryHeap<T> {
+ data: Vec<T>,
+}
+
+/// Structure wrapping a mutable reference to the greatest item on a
+/// `BinaryHeap`.
+///
+/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
+/// its documentation for more.
+///
+/// [`peek_mut`]: BinaryHeap::peek_mut
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+pub struct PeekMut<'a, T: 'a + Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ // If a set_len + sift_down are required, this is Some. If a &mut T has not
+ // yet been exposed to peek_mut()'s caller, it's None.
+ original_len: Option<NonZeroUsize>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Drop for PeekMut<'_, T> {
+ fn drop(&mut self) {
+ if let Some(original_len) = self.original_len {
+ // SAFETY: That's how many elements were in the Vec at the time of
+ // the PeekMut::deref_mut call, and therefore also at the time of
+ // the BinaryHeap::peek_mut call. Since the PeekMut did not end up
+ // getting leaked, we are now undoing the leak amplification that
+ // the DerefMut prepared for.
+ unsafe { self.heap.data.set_len(original_len.get()) };
+
+ // SAFETY: PeekMut is only instantiated for non-empty heaps.
+ unsafe { self.heap.sift_down(0) };
+ }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Deref for PeekMut<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ debug_assert!(!self.heap.is_empty());
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked(0) }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> DerefMut for PeekMut<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ debug_assert!(!self.heap.is_empty());
+
+ let len = self.heap.len();
+ if len > 1 {
+ // Here we preemptively leak all the rest of the underlying vector
+ // after the currently max element. If the caller mutates the &mut T
+ // we're about to give them, and then leaks the PeekMut, all these
+ // elements will remain leaked. If they don't leak the PeekMut, then
+ // either Drop or PeekMut::pop will un-leak the vector elements.
+ //
+ // This is technique is described throughout several other places in
+ // the standard library as "leak amplification".
+ unsafe {
+ // SAFETY: len > 1 so len != 0.
+ self.original_len = Some(NonZeroUsize::new_unchecked(len));
+ // SAFETY: len > 1 so all this does for now is leak elements,
+ // which is safe.
+ self.heap.data.set_len(1);
+ }
+ }
+
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked_mut(0) }
+ }
+}
+
+impl<'a, T: Ord> PeekMut<'a, T> {
+ /// Removes the peeked value from the heap and returns it.
+ #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
+ pub fn pop(mut this: PeekMut<'a, T>) -> T {
+ if let Some(original_len) = this.original_len.take() {
+ // SAFETY: This is how many elements were in the Vec at the time of
+ // the BinaryHeap::peek_mut call.
+ unsafe { this.heap.data.set_len(original_len.get()) };
+
+ // Unlike in Drop, here we don't also need to do a sift_down even if
+ // the caller could've mutated the element. It is removed from the
+ // heap on the next line and pop() is not sensitive to its value.
+ }
+ this.heap.pop().unwrap()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for BinaryHeap<T> {
+ fn clone(&self) -> Self {
+ BinaryHeap { data: self.data.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.data.clone_from(&source.data);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Default for BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap<T>`.
+ #[inline]
+ fn default() -> BinaryHeap<T> {
+ BinaryHeap::new()
+ }
+}
+
+#[stable(feature = "binaryheap_debug", since = "1.4.0")]
+impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap` as a max-heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> BinaryHeap<T> {
+ BinaryHeap { data: vec![] }
+ }
+
+ /// Creates an empty `BinaryHeap` with at least the specified capacity.
+ ///
+ /// The binary heap will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(10);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
+ BinaryHeap { data: Vec::with_capacity(capacity) }
+ }
+
+ /// Returns a mutable reference to the greatest item in the binary heap, or
+ /// `None` if it is empty.
+ ///
+ /// Note: If the `PeekMut` value is leaked, some heap elements might get
+ /// leaked along with it, but the remaining elements will remain a valid
+ /// heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert!(heap.peek_mut().is_none());
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// {
+ /// let mut val = heap.peek_mut().unwrap();
+ /// *val = 0;
+ /// }
+ /// assert_eq!(heap.peek(), Some(&2));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
+ /// otherwise it's *O*(1).
+ #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+ pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
+ if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) }
+ }
+
+ /// Removes the greatest item from the binary heap and returns it, or `None` if it
+ /// is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.pop(), Some(3));
+ /// assert_eq!(heap.pop(), Some(1));
+ /// assert_eq!(heap.pop(), None);
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ self.data.pop().map(|mut item| {
+ if !self.is_empty() {
+ swap(&mut item, &mut self.data[0]);
+ // SAFETY: !self.is_empty() means that self.len() > 0
+ unsafe { self.sift_down_to_bottom(0) };
+ }
+ item
+ })
+ }
+
+ /// Pushes an item onto the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert_eq!(heap.len(), 3);
+ /// assert_eq!(heap.peek(), Some(&5));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The expected cost of `push`, averaged over every possible ordering of
+ /// the elements being pushed, and over a sufficiently large number of
+ /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
+ /// elements that are *not* already in any sorted pattern.
+ ///
+ /// The time complexity degrades if elements are pushed in predominantly
+ /// ascending order. In the worst case, elements are pushed in ascending
+ /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
+ /// containing *n* elements.
+ ///
+ /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
+ /// occurs when capacity is exhausted and needs a resize. The resize cost
+ /// has been amortized in the previous figures.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, item: T) {
+ let old_len = self.len();
+ self.data.push(item);
+ // SAFETY: Since we pushed a new item it means that
+ // old_len = self.len() - 1 < self.len()
+ unsafe { self.sift_up(0, old_len) };
+ }
+
+ /// Consumes the `BinaryHeap` and returns a vector in sorted
+ /// (ascending) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
+ /// heap.push(6);
+ /// heap.push(3);
+ ///
+ /// let vec = heap.into_sorted_vec();
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_sorted_vec(mut self) -> Vec<T> {
+ let mut end = self.len();
+ while end > 1 {
+ end -= 1;
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
+ // so it's always a valid index to access.
+ // It is safe to access index 0 (i.e. `ptr`), because
+ // 1 <= end < self.len(), which means self.len() >= 2.
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ ptr::swap(ptr, ptr.add(end));
+ }
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
+ // 0 < 1 <= end <= self.len() - 1 < self.len()
+ // Which means 0 < end and end < self.len().
+ unsafe { self.sift_down_range(0, end) };
+ }
+ self.into_vec()
+ }
+
+ // The implementations of sift_up and sift_down use unsafe blocks in
+ // order to move an element out of the vector (leaving behind a
+ // hole), shift along the others and move the removed element back into the
+ // vector at the final location of the hole.
+ // The `Hole` type is used to represent this, and make sure
+ // the hole is filled back at the end of its scope, even on panic.
+ // Using a hole reduces the constant factor compared to using swaps,
+ // which involves twice as many moves.
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
+ // Take out the value at `pos` and create a hole.
+ // SAFETY: The caller guarantees that pos < self.len()
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+
+ while hole.pos() > start {
+ let parent = (hole.pos() - 1) / 2;
+
+ // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
+ // and so hole.pos() - 1 can't underflow.
+ // This guarantees that parent < hole.pos() so
+ // it's a valid index and also != hole.pos().
+ if hole.element() <= unsafe { hole.get(parent) } {
+ break;
+ }
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(parent) };
+ }
+
+ hole.pos()
+ }
+
+ /// Take an element at `pos` and move it down the heap,
+ /// while its children are larger.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < end <= self.len()`.
+ unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
+ // SAFETY: The caller guarantees that pos < end <= self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // compare with the greater of the two children
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // if we are already in order, stop.
+ // SAFETY: child is now either the old child or the old child+1
+ // We already proven that both are < self.len() and != hole.pos()
+ if hole.element() >= unsafe { hole.get(child) } {
+ return;
+ }
+
+ // SAFETY: same as above.
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ // SAFETY: && short circuit, which means that in the
+ // second condition it's already true that child == end - 1 < self.len().
+ if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
+ // SAFETY: child is already proven to be a valid index and
+ // child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down(&mut self, pos: usize) {
+ let len = self.len();
+ // SAFETY: pos < len is guaranteed by the caller and
+ // obviously len = self.len() <= self.len().
+ unsafe { self.sift_down_range(pos, len) };
+ }
+
+ /// Take an element at `pos` and move it all the way down the heap,
+ /// then sift it up to its position.
+ ///
+ /// Note: This is faster when the element is known to be large / should
+ /// be closer to the bottom.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
+ let end = self.len();
+ let start = pos;
+
+ // SAFETY: The caller guarantees that pos < self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ if child == end - 1 {
+ // SAFETY: child == end - 1 < self.len(), so it's a valid index
+ // and child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ pos = hole.pos();
+ drop(hole);
+
+ // SAFETY: pos is the position in the hole and was already proven
+ // to be a valid index.
+ unsafe { self.sift_up(start, pos) };
+ }
+
+ /// Rebuild assuming data[0..start] is still a proper heap.
+ fn rebuild_tail(&mut self, start: usize) {
+ if start == self.len() {
+ return;
+ }
+
+ let tail_len = self.len() - start;
+
+ #[inline(always)]
+ fn log2_fast(x: usize) -> usize {
+ (usize::BITS - x.leading_zeros() - 1) as usize
+ }
+
+ // `rebuild` takes O(self.len()) operations
+ // and about 2 * self.len() comparisons in the worst case
+ // while repeating `sift_up` takes O(tail_len * log(start)) operations
+ // and about 1 * tail_len * log_2(start) comparisons in the worst case,
+ // assuming start >= tail_len. For larger heaps, the crossover point
+ // no longer follows this reasoning and was determined empirically.
+ let better_to_rebuild = if start < tail_len {
+ true
+ } else if self.len() <= 2048 {
+ 2 * self.len() < tail_len * log2_fast(start)
+ } else {
+ 2 * self.len() < tail_len * 11
+ };
+
+ if better_to_rebuild {
+ self.rebuild();
+ } else {
+ for i in start..self.len() {
+ // SAFETY: The index `i` is always less than self.len().
+ unsafe { self.sift_up(0, i) };
+ }
+ }
+ }
+
+ fn rebuild(&mut self) {
+ let mut n = self.len() / 2;
+ while n > 0 {
+ n -= 1;
+ // SAFETY: n starts from self.len() / 2 and goes down to 0.
+ // The only case when !(n < self.len()) is if
+ // self.len() == 0, but it's ruled out by the loop condition.
+ unsafe { self.sift_down(n) };
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
+ /// let mut b = BinaryHeap::from([-20, 5, 43]);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ /// assert!(b.is_empty());
+ /// ```
+ #[stable(feature = "binary_heap_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ if self.len() < other.len() {
+ swap(self, other);
+ }
+
+ let start = self.data.len();
+
+ self.data.append(&mut other.data);
+
+ self.rebuild_tail(start);
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in heap order. If the iterator is dropped before being fully consumed,
+ /// it drops the remaining elements in heap order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// Note:
+ /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
+ /// You should use the latter for most cases.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_drain_sorted)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ /// assert_eq!(heap.len(), 5);
+ ///
+ /// drop(heap.drain_sorted()); // removes all elements in heap order
+ /// assert_eq!(heap.len(), 0);
+ /// ```
+ #[inline]
+ #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+ pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
+ DrainSorted { inner: self }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns
+ /// `false`. The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_retain)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
+ ///
+ /// heap.retain(|x| x % 2 == 0); // only keep even numbers
+ ///
+ /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
+ /// ```
+ #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let mut first_removed = self.len();
+ let mut i = 0;
+ self.data.retain(|e| {
+ let keep = f(e);
+ if !keep && i < first_removed {
+ first_removed = i;
+ }
+ i += 1;
+ keep
+ });
+ // data[0..first_removed] is untouched, so we only need to rebuild the tail:
+ self.rebuild_tail(first_removed);
+ }
+}
+
+impl<T> BinaryHeap<T> {
+ /// Returns an iterator visiting all values in the underlying vector, in
+ /// arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.iter() {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.data.iter() }
+ }
+
+ /// Returns an iterator which retrieves elements in heap order.
+ /// This method consumes the original heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_into_iter_sorted)]
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
+ /// ```
+ #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+ pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
+ IntoIterSorted { inner: self }
+ }
+
+ /// Returns the greatest item in the binary heap, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert_eq!(heap.peek(), None);
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// assert_eq!(heap.peek(), Some(&5));
+ ///
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// Cost is *O*(1) in the worst case.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&self) -> Option<&T> {
+ self.data.get(0)
+ }
+
+ /// Returns the number of elements the binary heap can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+
+ /// Reserves the minimum capacity for at least `additional` elements more than
+ /// the current length. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve_exact(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.data.reserve_exact(additional);
+ }
+
+ /// Reserves capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.data.reserve(additional);
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` elements
+ /// more than the current length. Unlike [`try_reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `try_reserve_exact`, capacity will be greater than or
+ /// equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: BinaryHeap::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve(additional)
+ }
+
+ /// Discards as much additional capacity as possible.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to_fit();
+ /// assert!(heap.capacity() == 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.data.shrink_to_fit();
+ }
+
+ /// Discards capacity with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to(10);
+ /// assert!(heap.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.data.shrink_to(min_capacity)
+ }
+
+ /// Returns a slice of all values in the underlying vector, in arbitrary
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_as_slice)]
+ /// use std::collections::BinaryHeap;
+ /// use std::io::{self, Write};
+ ///
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// io::sink().write(heap.as_slice()).unwrap();
+ /// ```
+ #[must_use]
+ #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
+ pub fn as_slice(&self) -> &[T] {
+ self.data.as_slice()
+ }
+
+ /// Consumes the `BinaryHeap` and returns the underlying vector
+ /// in arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ /// let vec = heap.into_vec();
+ ///
+ /// // Will print in some order
+ /// for x in vec {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_vec(self) -> Vec<T> {
+ self.into()
+ }
+
+ /// Returns the length of the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.len(), 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ /// Checks if the binary heap is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// assert!(heap.is_empty());
+ ///
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert!(!heap.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in arbitrary order. If the iterator is dropped before being fully
+ /// consumed, it drops the remaining elements in arbitrary order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// for x in heap.drain() {
+ /// println!("{x}");
+ /// }
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, T> {
+ Drain { iter: self.data.drain(..) }
+ }
+
+ /// Drops all items from the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// heap.clear();
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.drain();
+ }
+}
+
+/// Hole represents a hole in a slice i.e., an index without valid value
+/// (because it was moved from or duplicated).
+/// In drop, `Hole` will restore the slice by filling the hole
+/// position with the value that was originally removed.
+struct Hole<'a, T: 'a> {
+ data: &'a mut [T],
+ elt: ManuallyDrop<T>,
+ pos: usize,
+}
+
+impl<'a, T> Hole<'a, T> {
+ /// Create a new `Hole` at index `pos`.
+ ///
+ /// Unsafe because pos must be within the data slice.
+ #[inline]
+ unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
+ debug_assert!(pos < data.len());
+ // SAFE: pos should be inside the slice
+ let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
+ Hole { data, elt: ManuallyDrop::new(elt), pos }
+ }
+
+ #[inline]
+ fn pos(&self) -> usize {
+ self.pos
+ }
+
+ /// Returns a reference to the element removed.
+ #[inline]
+ fn element(&self) -> &T {
+ &self.elt
+ }
+
+ /// Returns a reference to the element at `index`.
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn get(&self, index: usize) -> &T {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe { self.data.get_unchecked(index) }
+ }
+
+ /// Move hole to new location
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn move_to(&mut self, index: usize) {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ let index_ptr: *const _ = ptr.add(index);
+ let hole_ptr = ptr.add(self.pos);
+ ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+ }
+ self.pos = index;
+ }
+}
+
+impl<T> Drop for Hole<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // fill the hole again
+ unsafe {
+ let pos = self.pos;
+ ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
+ }
+ }
+}
+
+/// An iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::iter()`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BinaryHeap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: slice::Iter<'a, T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(self) -> Option<&'a T> {
+ self.iter.last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+/// An owning iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::into_iter()`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BinaryHeap::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T> SourceIter for IntoIter<T> {
+ type Source = IntoIter<T>;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<I> InPlaceIterable for IntoIter<I> {}
+
+unsafe impl<I> AsVecIntoIter for IntoIter<I> {
+ type Item = I;
+
+ fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
+ &mut self.iter
+ }
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+#[derive(Clone, Debug)]
+pub struct IntoIterSorted<T> {
+ inner: BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> Iterator for IntoIterSorted<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain()`]. See its
+/// documentation for more.
+///
+/// [`drain`]: BinaryHeap::drain
+#[stable(feature = "drain", since = "1.6.0")]
+#[derive(Debug)]
+pub struct Drain<'a, T: 'a> {
+ iter: vec::Drain<'a, T>,
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Drain<'_, T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
+/// documentation for more.
+///
+/// [`drain_sorted`]: BinaryHeap::drain_sorted
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+#[derive(Debug)]
+pub struct DrainSorted<'a, T: Ord> {
+ inner: &'a mut BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
+ /// Removes heap elements in heap order.
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
+
+ impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
+ fn drop(&mut self) {
+ while self.0.inner.pop().is_some() {}
+ }
+ }
+
+ while let Some(item) = self.inner.pop() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> Iterator for DrainSorted<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
+ /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
+ ///
+ /// This conversion happens in-place, and has *O*(*n*) time complexity.
+ fn from(vec: Vec<T>) -> BinaryHeap<T> {
+ let mut heap = BinaryHeap { data: vec };
+ heap.rebuild();
+ heap
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
+ /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
+ /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
+ /// assert_eq!(a, b);
+ /// }
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T> From<BinaryHeap<T>> for Vec<T> {
+ /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
+ ///
+ /// This conversion requires no data movement or allocation, and has
+ /// constant time complexity.
+ fn from(heap: BinaryHeap<T>) -> Vec<T> {
+ heap.data
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
+ BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for BinaryHeap<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the binary heap in arbitrary order. The binary heap cannot be used
+ /// after calling this.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.into_iter() {
+ /// // x has type i32, not &i32
+ /// println!("{x}");
+ /// }
+ /// ```
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { iter: self.data.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Extend<T> for BinaryHeap<T> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter.into_iter());
+ }
+}
+
+impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: Vec<T>) {
+ let start = self.data.len();
+ self.data.append(other);
+ self.rebuild_tail(start);
+ }
+}
+
+impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
+ self.append(other);
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+
+ self.reserve(lower);
+
+ iterator.for_each(move |elem| self.push(elem));
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
index 5a05215ae..ffbb6c80a 100644
--- a/library/alloc/src/collections/binary_heap/tests.rs
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -1,8 +1,9 @@
use super::*;
use crate::boxed::Box;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use core::mem;
use std::iter::TrustedLen;
use std::panic::{catch_unwind, AssertUnwindSafe};
-use std::sync::atomic::{AtomicU32, Ordering};
#[test]
fn test_iterator() {
@@ -147,6 +148,24 @@ fn test_peek_mut() {
}
#[test]
+fn test_peek_mut_leek() {
+ let data = vec![4, 2, 7];
+ let mut heap = BinaryHeap::from(data);
+ let mut max = heap.peek_mut().unwrap();
+ *max = -1;
+
+ // The PeekMut object's Drop impl would have been responsible for moving the
+ // -1 out of the max position of the BinaryHeap, but we don't run it.
+ mem::forget(max);
+
+ // Absent some mitigation like leak amplification, the -1 would incorrectly
+ // end up in the last position of the returned Vec, with the rest of the
+ // heap's original contents in front of it in sorted order.
+ let sorted_vec = heap.into_sorted_vec();
+ assert!(sorted_vec.is_sorted(), "{:?}", sorted_vec);
+}
+
+#[test]
fn test_peek_mut_pop() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
let mut heap = BinaryHeap::from(data);
@@ -291,33 +310,83 @@ fn test_drain_sorted() {
#[test]
fn test_drain_sorted_leak() {
- static DROPS: AtomicU32 = AtomicU32::new(0);
-
- #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
- struct D(u32, bool);
-
- impl Drop for D {
- fn drop(&mut self) {
- DROPS.fetch_add(1, Ordering::SeqCst);
-
- if self.1 {
- panic!("panic in `drop`");
- }
- }
- }
-
+ let d0 = CrashTestDummy::new(0);
+ let d1 = CrashTestDummy::new(1);
+ let d2 = CrashTestDummy::new(2);
+ let d3 = CrashTestDummy::new(3);
+ let d4 = CrashTestDummy::new(4);
+ let d5 = CrashTestDummy::new(5);
let mut q = BinaryHeap::from(vec![
- D(0, false),
- D(1, false),
- D(2, false),
- D(3, true),
- D(4, false),
- D(5, false),
+ d0.spawn(Panic::Never),
+ d1.spawn(Panic::Never),
+ d2.spawn(Panic::Never),
+ d3.spawn(Panic::InDrop),
+ d4.spawn(Panic::Never),
+ d5.spawn(Panic::Never),
]);
- catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).unwrap_err();
+
+ assert_eq!(d0.dropped(), 1);
+ assert_eq!(d1.dropped(), 1);
+ assert_eq!(d2.dropped(), 1);
+ assert_eq!(d3.dropped(), 1);
+ assert_eq!(d4.dropped(), 1);
+ assert_eq!(d5.dropped(), 1);
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_forget() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut q =
+ BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut it = q.drain();
+ it.next();
+ mem::forget(it);
+ }))
+ .unwrap();
+ // Behaviour after leaking is explicitly unspecified and order is arbitrary,
+ // so it's fine if these start failing, but probably worth knowing.
+ assert!(q.is_empty());
+ assert_eq!(a.dropped() + b.dropped() + c.dropped(), 1);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+ drop(q);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+}
- assert_eq!(DROPS.load(Ordering::SeqCst), 6);
+#[test]
+fn test_drain_sorted_forget() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut q =
+ BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut it = q.drain_sorted();
+ it.next();
+ mem::forget(it);
+ }))
+ .unwrap();
+ // Behaviour after leaking is explicitly unspecified,
+ // so it's fine if these start failing, but probably worth knowing.
+ assert_eq!(q.len(), 2);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+ drop(q);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
}
#[test]
@@ -415,7 +484,7 @@ fn test_retain() {
#[test]
#[cfg(not(target_os = "emscripten"))]
fn panic_safe() {
- use rand::{seq::SliceRandom, thread_rng};
+ use rand::seq::SliceRandom;
use std::cmp;
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -440,7 +509,7 @@ fn panic_safe() {
self.0.partial_cmp(&other.0)
}
}
- let mut rng = thread_rng();
+ let mut rng = crate::test_helpers::test_rng();
const DATASZ: usize = 32;
// Miri is too slow
let ntest = if cfg!(miri) { 1 } else { 10 };
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 4c372b1d6..700b1463b 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -1,12 +1,12 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
-use super::super::testing::rng::DeterministicRng;
use super::Entry::{Occupied, Vacant};
use super::*;
use crate::boxed::Box;
use crate::fmt::Debug;
use crate::rc::Rc;
use crate::string::{String, ToString};
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
+use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::convert::TryFrom;
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 9d43ac5c5..7552f2fc0 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -21,6 +21,3 @@ trait Recover<Q: ?Sized> {
fn take(&mut self, key: &Q) -> Option<Self::Key>;
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
}
-
-#[cfg(test)]
-mod testing;
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index da766b67a..691246644 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -318,7 +318,10 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
- let _ = BorrowType::TRAVERSAL_PERMIT;
+ const {
+ assert!(BorrowType::TRAVERSAL_PERMIT);
+ }
+
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1006,10 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
- let _ = BorrowType::TRAVERSAL_PERMIT;
+ const {
+ assert!(BorrowType::TRAVERSAL_PERMIT);
+ }
+
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@@ -1666,17 +1672,17 @@ pub mod marker {
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
- // If node references of this borrow type allow traversing to other
- // nodes in the tree, this constant can be evaluated. Thus reading it
- // serves as a compile-time assertion.
- const TRAVERSAL_PERMIT: () = ();
+ /// If node references of this borrow type allow traversing to other
+ /// nodes in the tree, this constant is set to `true`. It can be used
+ /// for a compile-time assertion.
+ const TRAVERSAL_PERMIT: bool = true;
}
impl BorrowType for Owned {
- // Reject evaluation, because traversal isn't needed. Instead traversal
- // happens using the result of `borrow_mut`.
- // By disabling traversal, and only creating new references to roots,
- // we know that every reference of the `Owned` type is to a root node.
- const TRAVERSAL_PERMIT: () = panic!();
+ /// Reject traversal, because it isn't needed. Instead traversal
+ /// happens using the result of `borrow_mut`.
+ /// By disabling traversal, and only creating new references to roots,
+ /// we know that every reference of the `Owned` type is to a root node.
+ const TRAVERSAL_PERMIT: bool = false;
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 502d3e1d1..7b8d41a60 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -1,6 +1,6 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::rng::DeterministicRng;
use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
diff --git a/library/alloc/src/collections/linked_list/tests.rs b/library/alloc/src/collections/linked_list/tests.rs
index f8fbfa1bf..04594d55b 100644
--- a/library/alloc/src/collections/linked_list/tests.rs
+++ b/library/alloc/src/collections/linked_list/tests.rs
@@ -1,10 +1,11 @@
use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::vec::Vec;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::thread;
-use rand::{thread_rng, RngCore};
+use rand::RngCore;
#[test]
fn test_basic() {
@@ -480,12 +481,12 @@ fn test_split_off_2() {
}
}
-fn fuzz_test(sz: i32) {
+fn fuzz_test(sz: i32, rng: &mut impl RngCore) {
let mut m: LinkedList<_> = LinkedList::new();
let mut v = vec![];
for i in 0..sz {
check_links(&m);
- let r: u8 = thread_rng().next_u32() as u8;
+ let r: u8 = rng.next_u32() as u8;
match r % 6 {
0 => {
m.pop_back();
@@ -520,11 +521,12 @@ fn fuzz_test(sz: i32) {
#[test]
fn test_fuzz() {
+ let mut rng = crate::test_helpers::test_rng();
for _ in 0..25 {
- fuzz_test(3);
- fuzz_test(16);
+ fuzz_test(3, &mut rng);
+ fuzz_test(16, &mut rng);
#[cfg(not(miri))] // Miri is too slow
- fuzz_test(189);
+ fuzz_test(189, &mut rng);
}
}
@@ -984,35 +986,34 @@ fn drain_filter_complex() {
#[test]
fn drain_filter_drop_panic_leak() {
- static mut DROPS: i32 = 0;
-
- struct D(bool);
-
- impl Drop for D {
- fn drop(&mut self) {
- unsafe {
- DROPS += 1;
- }
-
- if self.0 {
- panic!("panic in `drop`");
- }
- }
- }
-
+ let d0 = CrashTestDummy::new(0);
+ let d1 = CrashTestDummy::new(1);
+ let d2 = CrashTestDummy::new(2);
+ let d3 = CrashTestDummy::new(3);
+ let d4 = CrashTestDummy::new(4);
+ let d5 = CrashTestDummy::new(5);
+ let d6 = CrashTestDummy::new(6);
+ let d7 = CrashTestDummy::new(7);
let mut q = LinkedList::new();
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_front(D(false));
- q.push_front(D(true));
- q.push_front(D(false));
-
- catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
-
- assert_eq!(unsafe { DROPS }, 8);
+ q.push_back(d3.spawn(Panic::Never));
+ q.push_back(d4.spawn(Panic::Never));
+ q.push_back(d5.spawn(Panic::Never));
+ q.push_back(d6.spawn(Panic::Never));
+ q.push_back(d7.spawn(Panic::Never));
+ q.push_front(d2.spawn(Panic::Never));
+ q.push_front(d1.spawn(Panic::InDrop));
+ q.push_front(d0.spawn(Panic::Never));
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).unwrap_err();
+
+ assert_eq!(d0.dropped(), 1);
+ assert_eq!(d1.dropped(), 1);
+ assert_eq!(d2.dropped(), 1);
+ assert_eq!(d3.dropped(), 1);
+ assert_eq!(d4.dropped(), 1);
+ assert_eq!(d5.dropped(), 1);
+ assert_eq!(d6.dropped(), 1);
+ assert_eq!(d7.dropped(), 1);
assert!(q.is_empty());
}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
index 55f6138cd..e54880e86 100644
--- a/library/alloc/src/collections/vec_deque/into_iter.rs
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -25,6 +25,10 @@ impl<T, A: Allocator> IntoIter<T, A> {
pub(super) fn new(inner: VecDeque<T, A>) -> Self {
IntoIter { inner }
}
+
+ pub(super) fn into_vecdeque(self) -> VecDeque<T, A> {
+ self.inner
+ }
}
#[stable(feature = "collection_debug", since = "1.17.0")]
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4866c53e7..8317ac431 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -55,6 +55,10 @@ use self::spec_extend::SpecExtend;
mod spec_extend;
+use self::spec_from_iter::SpecFromIter;
+
+mod spec_from_iter;
+
#[cfg(test)]
mod tests;
@@ -531,12 +535,13 @@ impl<T> VecDeque<T> {
///
/// let deque: VecDeque<u32> = VecDeque::new();
/// ```
- // FIXME: This should probably be const
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_vec_deque_new", since = "1.68.0")]
#[must_use]
- pub fn new() -> VecDeque<T> {
- VecDeque::new_in(Global)
+ pub const fn new() -> VecDeque<T> {
+ // FIXME: This should just be `VecDeque::new_in(Global)` once that hits stable.
+ VecDeque { head: 0, len: 0, buf: RawVec::NEW }
}
/// Creates an empty deque with space for at least `capacity` elements.
@@ -586,6 +591,38 @@ impl<T, A: Allocator> VecDeque<T, A> {
VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
}
+ /// Creates a `VecDeque` from a raw allocation, when the initialized
+ /// part of that allocation forms a *contiguous* subslice thereof.
+ ///
+ /// For use by `vec::IntoIter::into_vecdeque`
+ ///
+ /// # Safety
+ ///
+ /// All the usual requirements on the allocated memory like in
+ /// `Vec::from_raw_parts_in`, but takes a *range* of elements that are
+ /// initialized rather than only supporting `0..len`. Requires that
+ /// `initialized.start` ≤ `initialized.end` ≤ `capacity`.
+ #[inline]
+ pub(crate) unsafe fn from_contiguous_raw_parts_in(
+ ptr: *mut T,
+ initialized: Range<usize>,
+ capacity: usize,
+ alloc: A,
+ ) -> Self {
+ debug_assert!(initialized.start <= initialized.end);
+ debug_assert!(initialized.end <= capacity);
+
+ // SAFETY: Our safety precondition guarantees the range length won't wrap,
+ // and that the allocation is valid for use in `RawVec`.
+ unsafe {
+ VecDeque {
+ head: initialized.start,
+ len: initialized.end.unchecked_sub(initialized.start),
+ buf: RawVec::from_raw_parts_in(ptr, capacity, alloc),
+ }
+ }
+ }
+
/// Provides a reference to the element at the given index.
///
/// Element at index 0 is the front of the queue.
@@ -599,6 +636,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
+ /// buf.push_back(6);
/// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@@ -624,10 +662,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
+ /// buf.push_back(6);
+ /// assert_eq!(buf[1], 4);
/// if let Some(elem) = buf.get_mut(1) {
/// *elem = 7;
/// }
- ///
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@@ -905,65 +944,72 @@ impl<T, A: Allocator> VecDeque<T, A> {
return;
}
- if target_cap < self.capacity() {
- // There are three cases of interest:
- // All elements are out of desired bounds
- // Elements are contiguous, and head is out of desired bounds
- // Elements are discontiguous, and tail is out of desired bounds
+ // There are three cases of interest:
+ // All elements are out of desired bounds
+ // Elements are contiguous, and tail is out of desired bounds
+ // Elements are discontiguous
+ //
+ // At all other times, element positions are unaffected.
+
+ // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
+ // overflow.
+ let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
+
+ if self.len == 0 {
+ self.head = 0;
+ } else if self.head >= target_cap && tail_outside {
+ // Head and tail are both out of bounds, so copy all of them to the front.
//
- // At all other times, element positions are unaffected.
+ // H := head
+ // L := last element
+ // H L
+ // [. . . . . . . . o o o o o o o . ]
+ // H L
+ // [o o o o o o o . ]
+ unsafe {
+ // nonoverlapping because `self.head >= target_cap >= self.len`.
+ self.copy_nonoverlapping(self.head, 0, self.len);
+ }
+ self.head = 0;
+ } else if self.head < target_cap && tail_outside {
+ // Head is in bounds, tail is out of bounds.
+ // Copy the overflowing part to the beginning of the
+ // buffer. This won't overlap because `target_cap >= self.len`.
//
- // Indicates that elements at the head should be moved.
-
- let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
- // Move elements from out of desired bounds (positions after target_cap)
- if self.len == 0 {
- self.head = 0;
- } else if self.head >= target_cap && tail_outside {
- // H := head
- // L := last element
- // H L
- // [. . . . . . . . o o o o o o o . ]
- // H L
- // [o o o o o o o . ]
- unsafe {
- // nonoverlapping because self.head >= target_cap >= self.len
- self.copy_nonoverlapping(self.head, 0, self.len);
- }
- self.head = 0;
- } else if self.head < target_cap && tail_outside {
- // H := head
- // L := last element
- // H L
- // [. . . o o o o o o o . . . . . . ]
- // L H
- // [o o . o o o o o ]
- let len = self.head + self.len - target_cap;
- unsafe {
- self.copy_nonoverlapping(target_cap, 0, len);
- }
- } else if self.head >= target_cap {
- // H := head
- // L := last element
- // L H
- // [o o o o o . . . . . . . . . o o ]
- // L H
- // [o o o o o . o o ]
- let len = self.capacity() - self.head;
- let new_head = target_cap - len;
- unsafe {
- // can't use copy_nonoverlapping here for the same reason
- // as in `handle_capacity_increase()`
- self.copy(self.head, new_head, len);
- }
- self.head = new_head;
+ // H := head
+ // L := last element
+ // H L
+ // [. . . o o o o o o o . . . . . . ]
+ // L H
+ // [o o . o o o o o ]
+ let len = self.head + self.len - target_cap;
+ unsafe {
+ self.copy_nonoverlapping(target_cap, 0, len);
}
-
- self.buf.shrink_to_fit(target_cap);
-
- debug_assert!(self.head < self.capacity() || self.capacity() == 0);
- debug_assert!(self.len <= self.capacity());
+ } else if !self.is_contiguous() {
+ // The head slice is at least partially out of bounds, tail is in bounds.
+ // Copy the head backwards so it lines up with the target capacity.
+ // This won't overlap because `target_cap >= self.len`.
+ //
+ // H := head
+ // L := last element
+ // L H
+ // [o o o o o . . . . . . . . . o o ]
+ // L H
+ // [o o o o o . o o ]
+ let head_len = self.capacity() - self.head;
+ let new_head = target_cap - head_len;
+ unsafe {
+ // can't use `copy_nonoverlapping()` here because the new and old
+ // regions for the head might overlap.
+ self.copy(self.head, new_head, head_len);
+ }
+ self.head = new_head;
}
+ self.buf.shrink_to_fit(target_cap);
+
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
+ debug_assert!(self.len <= self.capacity());
}
/// Shortens the deque, keeping the first `len` elements and dropping
@@ -1878,7 +1924,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
if T::IS_ZST {
- self.len += other.len;
+ self.len = self.len.checked_add(other.len).expect("capacity overflow");
other.len = 0;
other.head = 0;
return;
@@ -2505,7 +2551,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// The deque is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the deque
/// and all elements for which the predicate returns false are at the end.
- /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// For example, `[7, 15, 3, 5, 4, 12, 6]` is partitioned under the predicate `x % 2 != 0`
/// (all odd numbers are at the start, all even at the end).
///
/// If the deque is not partitioned, the returned result is unspecified and meaningless,
@@ -2699,18 +2745,8 @@ impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for VecDeque<T> {
- #[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
- // Since converting is O(1) now, might as well re-use that logic
- // (including things like the `vec::IntoIter`→`Vec` specialization)
- // especially as that could save us some monomorphiziation work
- // if one uses the same iterators (like slice ones) with both.
- return from_iter_via_vec(iter.into_iter());
-
- #[inline]
- fn from_iter_via_vec<U>(iter: impl Iterator<Item = U>) -> VecDeque<U> {
- Vec::from_iter(iter).into()
- }
+ SpecFromIter::spec_from_iter(iter.into_iter())
}
}
@@ -2794,9 +2830,9 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
- /// In its current implementation, this is a very cheap
- /// conversion. This isn't yet a guarantee though, and
- /// shouldn't be relied on.
+ /// This conversion is guaranteed to run in *O*(1) time
+ /// and to not re-allocate the `Vec`'s buffer or allocate
+ /// any additional memory.
#[inline]
fn from(other: Vec<T, A>) -> Self {
let (ptr, len, cap, alloc) = other.into_raw_parts_with_alloc();
diff --git a/library/alloc/src/collections/vec_deque/spec_from_iter.rs b/library/alloc/src/collections/vec_deque/spec_from_iter.rs
new file mode 100644
index 000000000..7650492eb
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/spec_from_iter.rs
@@ -0,0 +1,33 @@
+use super::{IntoIter, VecDeque};
+
+/// Specialization trait used for `VecDeque::from_iter`
+pub(super) trait SpecFromIter<T, I> {
+ fn spec_from_iter(iter: I) -> Self;
+}
+
+impl<T, I> SpecFromIter<T, I> for VecDeque<T>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_from_iter(iterator: I) -> Self {
+ // Since converting is O(1) now, just re-use the `Vec` logic for
+ // anything where we can't do something extra-special for `VecDeque`,
+ // especially as that could save us some monomorphiziation work
+ // if one uses the same iterators (like slice ones) with both.
+ crate::vec::Vec::from_iter(iterator).into()
+ }
+}
+
+impl<T> SpecFromIter<T, crate::vec::IntoIter<T>> for VecDeque<T> {
+ #[inline]
+ fn spec_from_iter(iterator: crate::vec::IntoIter<T>) -> Self {
+ iterator.into_vecdeque()
+ }
+}
+
+impl<T> SpecFromIter<T, IntoIter<T>> for VecDeque<T> {
+ #[inline]
+ fn spec_from_iter(iterator: IntoIter<T>) -> Self {
+ iterator.into_vecdeque()
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 220ad71be..205a8ff3c 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -749,6 +749,48 @@ fn test_drain() {
}
#[test]
+fn issue_108453() {
+ let mut deque = VecDeque::with_capacity(10);
+
+ deque.push_back(1u8);
+ deque.push_back(2);
+ deque.push_back(3);
+
+ deque.push_front(10);
+ deque.push_front(9);
+
+ deque.shrink_to(9);
+
+ assert_eq!(deque.into_iter().collect::<Vec<_>>(), vec![9, 10, 1, 2, 3]);
+}
+
+#[test]
+fn test_shrink_to() {
+ // test deques with capacity 16 with all possible head positions, lengths and target capacities.
+ let cap = 16;
+
+ for len in 0..cap {
+ for head in 0..cap {
+ let expected = (1..=len).collect::<VecDeque<_>>();
+
+ for target_cap in len..cap {
+ let mut deque = VecDeque::with_capacity(cap);
+ // currently, `with_capacity` always allocates the exact capacity if it's greater than 8.
+ assert_eq!(deque.capacity(), cap);
+
+ // we can let the head point anywhere in the buffer since the deque is empty.
+ deque.head = head;
+ deque.extend(1..=len);
+
+ deque.shrink_to(target_cap);
+
+ assert_eq!(deque, expected);
+ }
+ }
+ }
+}
+
+#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
index 799ce9d5d..eadb35cb9 100644
--- a/library/alloc/src/fmt.rs
+++ b/library/alloc/src/fmt.rs
@@ -419,7 +419,7 @@
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
-//! let string = format!("{:.*}", decimals, magnitude);
+//! let string = format!("{magnitude:.decimals$}");
//! f.pad_integral(true, "", &string)
//! }
//! }
@@ -518,7 +518,7 @@
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
-//! write!(&mut io::stdout(), "{}", args);
+//! write!(&mut io::stdout(), "{args}");
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 96960d43f..ca75c3895 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -3,7 +3,7 @@
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
-//! This library, like libcore, normally doesn’t need to be used directly
+//! This library, like core, normally doesn’t need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so they’d use this crate instead.
@@ -75,7 +75,7 @@
))]
#![no_std]
#![needs_allocator]
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -106,10 +106,12 @@
#![feature(const_size_of_val)]
#![feature(const_align_of_val)]
#![feature(const_ptr_read)]
+#![feature(const_maybe_uninit_zeroed)]
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_refs_to_cell)]
#![feature(core_intrinsics)]
+#![feature(core_panic)]
#![feature(const_eval_select)]
#![feature(const_pin)]
#![feature(const_waker)]
@@ -122,7 +124,9 @@
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
+#![feature(inline_const)]
#![feature(inplace_iteration)]
+#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -152,7 +156,7 @@
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
-#![cfg_attr(not(bootstrap), feature(tuple_trait))]
+#![feature(tuple_trait)]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
@@ -190,6 +194,7 @@
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
#![feature(with_negative_coherence)]
+#![cfg_attr(test, feature(panic_update_hook))]
//
// Rustdoc features:
#![feature(doc_cfg)]
@@ -206,6 +211,8 @@
extern crate std;
#[cfg(test)]
extern crate test;
+#[cfg(test)]
+mod testing;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
@@ -251,3 +258,20 @@ pub mod vec;
pub mod __export {
pub use core::format_args;
}
+
+#[cfg(test)]
+#[allow(dead_code)] // Not used in all configurations
+pub(crate) mod test_helpers {
+ /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
+ /// seed not being the same for every RNG invocation too.
+ pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use std::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ std::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec =
+ hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+ }
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 38e31b180..c9aa23fc4 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -336,7 +336,7 @@ impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
#[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2179,7 +2179,7 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
- // to allocate space on the heap. That's not a value a real pointer
+ // to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<RcBox<T>>,
@@ -2190,7 +2190,7 @@ impl<T: ?Sized> !marker::Send for Weak<T> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> !marker::Sync for Weak<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2561,7 +2561,7 @@ impl<T: ?Sized> Clone for Weak<T> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 1b61ede34..fecacc2bb 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -19,15 +19,20 @@ use core::cmp::Ordering::{self, Less};
use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
+#[cfg(not(no_global_oom_handling))]
+use core::slice::sort;
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
-use crate::alloc::Global;
+use crate::alloc::{self, Global};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::vec::Vec;
+#[cfg(test)]
+mod tests;
+
#[unstable(feature = "slice_range", issue = "76393")]
pub use core::slice::range;
#[unstable(feature = "array_chunks", issue = "74985")]
@@ -203,7 +208,7 @@ impl<T> [T] {
where
T: Ord,
{
- merge_sort(self, T::lt);
+ stable_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@@ -259,7 +264,7 @@ impl<T> [T] {
where
F: FnMut(&T, &T) -> Ordering,
{
- merge_sort(self, |a, b| compare(a, b) == Less);
+ stable_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
@@ -302,7 +307,7 @@ impl<T> [T] {
F: FnMut(&T) -> K,
K: Ord,
{
- merge_sort(self, |a, b| f(a).lt(&f(b)));
+ stable_sort(self, |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice with a key extraction function.
@@ -653,7 +658,7 @@ impl [u8] {
///
/// ```error
/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
-/// --> src/liballoc/slice.rs:608:6
+/// --> library/alloc/src/slice.rs:608:6
/// |
/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
/// | ^ unconstrained type parameter
@@ -809,324 +814,52 @@ impl<T: Clone> ToOwned for [T] {
// Sorting
////////////////////////////////////////////////////////////////////////////////
-/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
-///
-/// This is the integral subroutine of insertion sort.
-#[cfg(not(no_global_oom_handling))]
-fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- if v.len() >= 2 && is_less(&v[1], &v[0]) {
- unsafe {
- // There are three ways to implement insertion here:
- //
- // 1. Swap adjacent elements until the first one gets to its final destination.
- // However, this way we copy data around more than is necessary. If elements are big
- // structures (costly to copy), this method will be slow.
- //
- // 2. Iterate until the right place for the first element is found. Then shift the
- // elements succeeding it to make room for it and finally place it into the
- // remaining hole. This is a good method.
- //
- // 3. Copy the first element into a temporary variable. Iterate until the right place
- // for it is found. As we go along, copy every traversed element into the slot
- // preceding it. Finally, copy data from the temporary variable into the remaining
- // hole. This method is very good. Benchmarks demonstrated slightly better
- // performance than with the 2nd method.
- //
- // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
- let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
-
- // Intermediate state of the insertion process is always tracked by `hole`, which
- // serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` in the end.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and
- // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
- // initially held exactly once.
- let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
- ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
-
- for i in 2..v.len() {
- if !is_less(&v[i], &*tmp) {
- break;
- }
- ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
- hole.dest = &mut v[i];
- }
- // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
- }
- }
-
- // When dropped, copies from `src` into `dest`.
- struct InsertionHole<T> {
- src: *const T,
- dest: *mut T,
- }
-
- impl<T> Drop for InsertionHole<T> {
- fn drop(&mut self) {
- unsafe {
- ptr::copy_nonoverlapping(self.src, self.dest, 1);
- }
- }
- }
-}
-
-/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
-/// stores the result into `v[..]`.
-///
-/// # Safety
-///
-/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
-/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
-#[cfg(not(no_global_oom_handling))]
-unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- let len = v.len();
- let v = v.as_mut_ptr();
- let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
-
- // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
- // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
- // copying the lesser (or greater) one into `v`.
- //
- // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
- // consumed first, then we must copy whatever is left of the shorter run into the remaining
- // hole in `v`.
- //
- // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
- // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
- // object it initially held exactly once.
- let mut hole;
-
- if mid <= len - mid {
- // The left run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v, buf, mid);
- hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
- }
-
- // Initially, these pointers point to the beginnings of their arrays.
- let left = &mut hole.start;
- let mut right = v_mid;
- let out = &mut hole.dest;
-
- while *left < hole.end && right < v_end {
- // Consume the lesser side.
- // If equal, prefer the left run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right, &**left) {
- get_and_increment(&mut right)
- } else {
- get_and_increment(left)
- };
- ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
- }
- }
- } else {
- // The right run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v_mid, buf, len - mid);
- hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
- }
-
- // Initially, these pointers point past the ends of their arrays.
- let left = &mut hole.dest;
- let right = &mut hole.end;
- let mut out = v_end;
-
- while v < *left && buf < *right {
- // Consume the greater side.
- // If equal, prefer the right run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
- decrement_and_get(left)
- } else {
- decrement_and_get(right)
- };
- ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
- }
- }
- }
- // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
- // it will now be copied into the hole in `v`.
-
- unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
- let old = *ptr;
- *ptr = unsafe { ptr.add(1) };
- old
- }
-
- unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.sub(1) };
- *ptr
- }
-
- // When dropped, copies the range `start..end` into `dest..`.
- struct MergeHole<T> {
- start: *mut T,
- end: *mut T,
- dest: *mut T,
- }
-
- impl<T> Drop for MergeHole<T> {
- fn drop(&mut self) {
- // `T` is not a zero-sized type, and these are pointers into a slice's elements.
- unsafe {
- let len = self.end.sub_ptr(self.start);
- ptr::copy_nonoverlapping(self.start, self.dest, len);
- }
- }
- }
-}
-
-/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
-/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
-///
-/// The algorithm identifies strictly descending and non-descending subsequences, which are called
-/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
-/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
-/// satisfied:
-///
-/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
-/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
-///
-/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+#[inline]
#[cfg(not(no_global_oom_handling))]
-fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
- // Slices of up to this length get sorted using insertion sort.
- const MAX_INSERTION: usize = 20;
- // Very short runs are extended using insertion sort to span at least this many elements.
- const MIN_RUN: usize = 10;
-
- // Sorting has no meaningful behavior on zero-sized types.
if T::IS_ZST {
+ // Sorting has no meaningful behavior on zero-sized types. Do nothing.
return;
}
- let len = v.len();
+ let elem_alloc_fn = |len: usize| -> *mut T {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
+ // elements.
+ unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
+ };
- // Short arrays get sorted in-place via insertion sort to avoid allocations.
- if len <= MAX_INSERTION {
- if len >= 2 {
- for i in (0..len - 1).rev() {
- insert_head(&mut v[i..], &mut is_less);
- }
- }
- return;
- }
-
- // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
- // shallow copies of the contents of `v` without risking the dtors running on copies if
- // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
- // which will always have length at most `len / 2`.
- let mut buf = Vec::with_capacity(len / 2);
-
- // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
- // strange decision, but consider the fact that merges more often go in the opposite direction
- // (forwards). According to benchmarks, merging forwards is slightly faster than merging
- // backwards. To conclude, identifying runs by traversing backwards improves performance.
- let mut runs = vec![];
- let mut end = len;
- while end > 0 {
- // Find the next natural run, and reverse it if it's strictly descending.
- let mut start = end - 1;
- if start > 0 {
- start -= 1;
- unsafe {
- if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
- while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
- start -= 1;
- }
- v[start..end].reverse();
- } else {
- while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
- {
- start -= 1;
- }
- }
- }
- }
-
- // Insert some more elements into the run if it's too short. Insertion sort is faster than
- // merge sort on short sequences, so this significantly improves performance.
- while start > 0 && end - start < MIN_RUN {
- start -= 1;
- insert_head(&mut v[start..end], &mut is_less);
+ let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
}
+ };
- // Push this run onto the stack.
- runs.push(Run { start, len: end - start });
- end = start;
-
- // Merge some pairs of adjacent runs to satisfy the invariants.
- while let Some(r) = collapse(&runs) {
- let left = runs[r + 1];
- let right = runs[r];
- unsafe {
- merge(
- &mut v[left.start..right.start + right.len],
- left.len,
- buf.as_mut_ptr(),
- &mut is_less,
- );
- }
- runs[r] = Run { start: left.start, len: left.len + right.len };
- runs.remove(r + 1);
+ let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
+ // obscene length or 0.
+ unsafe {
+ alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
+ as *mut sort::TimSortRun
}
- }
-
- // Finally, exactly one run must remain in the stack.
- debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+ };
- // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
- // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
- // algorithm should continue building a new run instead, `None` is returned.
- //
- // TimSort is infamous for its buggy implementations, as described here:
- // http://envisage-project.eu/timsort-specification-and-verification/
- //
- // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
- // Enforcing them on just top three is not sufficient to ensure that the invariants will still
- // hold for *all* runs in the stack.
- //
- // This function correctly checks invariants for the top four runs. Additionally, if the top
- // run starts at index 0, it will always demand a merge operation until the stack is fully
- // collapsed, in order to complete the sort.
- #[inline]
- fn collapse(runs: &[Run]) -> Option<usize> {
- let n = runs.len();
- if n >= 2
- && (runs[n - 1].start == 0
- || runs[n - 2].len <= runs[n - 1].len
- || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
- || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
- {
- if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
- } else {
- None
+ let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
+ // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(
+ buf_ptr as *mut u8,
+ alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
+ );
}
- }
+ };
- #[derive(Clone, Copy)]
- struct Run {
- start: usize,
- len: usize,
- }
+ sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
}
diff --git a/library/alloc/src/slice/tests.rs b/library/alloc/src/slice/tests.rs
new file mode 100644
index 000000000..f674530aa
--- /dev/null
+++ b/library/alloc/src/slice/tests.rs
@@ -0,0 +1,359 @@
+use crate::borrow::ToOwned;
+use crate::rc::Rc;
+use crate::string::ToString;
+use crate::test_helpers::test_rng;
+use crate::vec::Vec;
+
+use core::cell::Cell;
+use core::cmp::Ordering::{self, Equal, Greater, Less};
+use core::convert::identity;
+use core::fmt;
+use core::mem;
+use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use rand::{distributions::Standard, prelude::*, Rng, RngCore};
+use std::panic;
+
+macro_rules! do_test {
+ ($input:ident, $func:ident) => {
+ let len = $input.len();
+
+ // Work out the total number of comparisons required to sort
+ // this array...
+ let mut count = 0usize;
+ $input.to_owned().$func(|a, b| {
+ count += 1;
+ a.cmp(b)
+ });
+
+ // ... and then panic on each and every single one.
+ for panic_countdown in 0..count {
+ // Refresh the counters.
+ VERSIONS.store(0, Relaxed);
+ for i in 0..len {
+ DROP_COUNTS[i].store(0, Relaxed);
+ }
+
+ let v = $input.to_owned();
+ let _ = std::panic::catch_unwind(move || {
+ let mut v = v;
+ let mut panic_countdown = panic_countdown;
+ v.$func(|a, b| {
+ if panic_countdown == 0 {
+ SILENCE_PANIC.with(|s| s.set(true));
+ panic!();
+ }
+ panic_countdown -= 1;
+ a.cmp(b)
+ })
+ });
+
+ // Check that the number of things dropped is exactly
+ // what we expect (i.e., the contents of `v`).
+ for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+ let count = c.load(Relaxed);
+ assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
+ }
+
+ // Check that the most recent versions of values were dropped.
+ assert_eq!(VERSIONS.load(Relaxed), 0);
+ }
+ };
+}
+
+const MAX_LEN: usize = 80;
+
+static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
+ // FIXME(RFC 1109): AtomicUsize is not Copy.
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+];
+
+static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+ x: u32,
+ id: usize,
+ version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+ fn eq(&self, other: &Self) -> bool {
+ self.partial_cmp(other) == Some(Ordering::Equal)
+ }
+}
+
+impl PartialOrd for DropCounter {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.version.set(self.version.get() + 1);
+ other.version.set(other.version.get() + 1);
+ VERSIONS.fetch_add(2, Relaxed);
+ self.x.partial_cmp(&other.x)
+ }
+}
+
+impl Ord for DropCounter {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.partial_cmp(other).unwrap()
+ }
+}
+
+impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+ VERSIONS.fetch_sub(self.version.get(), Relaxed);
+ }
+}
+
+std::thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)] // no threads
+fn panic_safe() {
+ panic::update_hook(move |prev, info| {
+ if !SILENCE_PANIC.with(|s| s.get()) {
+ prev(info);
+ }
+ });
+
+ let mut rng = test_rng();
+
+ // Miri is too slow (but still need to `chain` to make the types match)
+ let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
+ let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
+
+ for len in lens {
+ for &modulus in moduli {
+ for &has_runs in &[false, true] {
+ let mut input = (0..len)
+ .map(|id| DropCounter {
+ x: rng.next_u32() % modulus,
+ id: id,
+ version: Cell::new(0),
+ })
+ .collect::<Vec<_>>();
+
+ if has_runs {
+ for c in &mut input {
+ c.x = c.id as u32;
+ }
+
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ input[a..b].reverse();
+ } else {
+ input.swap(a, b);
+ }
+ }
+ }
+
+ do_test!(input, sort_by);
+ do_test!(input, sort_unstable_by);
+ }
+ }
+ }
+
+ // Set default panic hook again.
+ drop(panic::take_hook());
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_sort() {
+ let mut rng = test_rng();
+
+ for len in (2..25).chain(500..510) {
+ for &modulus in &[5, 10, 100, 1000] {
+ for _ in 0..10 {
+ let orig: Vec<_> = (&mut rng)
+ .sample_iter::<i32, _>(&Standard)
+ .map(|x| x % modulus)
+ .take(len)
+ .collect();
+
+ // Sort in default order.
+ let mut v = orig.clone();
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in ascending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| a.cmp(b));
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in descending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| b.cmp(a));
+ assert!(v.windows(2).all(|w| w[0] >= w[1]));
+
+ // Sort in lexicographic order.
+ let mut v1 = orig.clone();
+ let mut v2 = orig.clone();
+ v1.sort_by_key(|x| x.to_string());
+ v2.sort_by_cached_key(|x| x.to_string());
+ assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
+ assert!(v1 == v2);
+
+ // Sort with many pre-sorted runs.
+ let mut v = orig.clone();
+ v.sort();
+ v.reverse();
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ v[a..b].reverse();
+ } else {
+ v.swap(a, b);
+ }
+ }
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v = [0; 500];
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+ v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+
+ // Should not panic.
+ [0i32; 0].sort();
+ [(); 10].sort();
+ [(); 100].sort();
+
+ let mut v = [0xDEADBEEFu64];
+ v.sort();
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+fn test_sort_stability() {
+ // Miri is too slow
+ let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+ let rounds = if cfg!(miri) { 1 } else { 10 };
+
+ let mut rng = test_rng();
+ for len in (2..25).chain(large_range) {
+ for _ in 0..rounds {
+ let mut counts = [0; 10];
+
+ // create a vector like [(6, 1), (5, 1), (6, 2), ...],
+ // where the first item of each tuple is random, but
+ // the second item represents which occurrence of that
+ // number this element is, i.e., the second elements
+ // will occur in sorted order.
+ let orig: Vec<_> = (0..len)
+ .map(|_| {
+ let n = rng.gen::<usize>() % 10;
+ counts[n] += 1;
+ (n, counts[n])
+ })
+ .collect();
+
+ let mut v = orig.clone();
+ // Only sort on the first element, so an unstable sort
+ // may mix up the counts.
+ v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+ // This comparison includes the count (the second item
+ // of the tuple), so elements with equal first items
+ // will need to be ordered with increasing
+ // counts... i.e., exactly asserting that this sort is
+ // stable.
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ let mut v = orig.clone();
+ v.sort_by_cached_key(|&(x, _)| x);
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+}
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index b28d20cda..afbe5cfaf 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -559,10 +559,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
- let mut bytes = self.as_bytes().to_vec();
- bytes.make_ascii_uppercase();
- // make_ascii_uppercase() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(bytes) }
+ let mut s = self.to_owned();
+ s.make_ascii_uppercase();
+ s
}
/// Returns a copy of this string where each character is mapped to its
@@ -592,10 +591,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
- let mut bytes = self.as_bytes().to_vec();
- bytes.make_ascii_lowercase();
- // make_ascii_lowercase() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(bytes) }
+ let mut s = self.to_owned();
+ s.make_ascii_lowercase();
+ s
}
}
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index 7a8e6f088..ca182c810 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -363,7 +363,7 @@ use crate::vec::Vec;
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(all(not(bootstrap), not(test)), lang = "String")]
+#[cfg_attr(not(test), lang = "String")]
pub struct String {
vec: Vec<u8>,
}
@@ -2549,6 +2549,15 @@ impl ToString for char {
}
#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "bool_to_string_specialization", since = "1.68.0")]
+impl ToString for bool {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(if *self { "true" } else { "false" })
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
impl ToString for u8 {
#[inline]
@@ -2678,7 +2687,7 @@ impl From<&String> for String {
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "string_from_box", since = "1.18.0")]
impl From<Box<str>> for String {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index f7dc4d109..bab7f5f53 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -254,7 +254,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -295,7 +295,7 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
- // to allocate space on the heap. That's not a value a real pointer
+ // to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<ArcInner<T>>,
@@ -306,13 +306,13 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
@@ -1656,7 +1656,7 @@ impl<T: ?Sized> Arc<T> {
//
// The acquire label here ensures a happens-before relationship with any
// writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
- // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
+ // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
// weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
// This needs to be an `Acquire` to synchronize with the decrement of the `strong`
@@ -1712,7 +1712,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
}
// This fence is needed to prevent reordering of use of the data and
- // deletion of the data. Because it is marked `Release`, the decreasing
+ // deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
@@ -2172,7 +2172,7 @@ impl<T: ?Sized> Clone for Weak<T> {
} else {
return Weak { ptr: self.ptr };
};
- // See comments in Arc::clone() for why this is relaxed. This can use a
+ // See comments in Arc::clone() for why this is relaxed. This can use a
// fetch_add (ignoring the lock) because the weak count is only locked
// where are *no other* weak pointers in existence. (So we can't be
// running this code in that case).
diff --git a/library/alloc/src/collections/btree/testing/crash_test.rs b/library/alloc/src/testing/crash_test.rs
index bcf5f5f72..bcf5f5f72 100644
--- a/library/alloc/src/collections/btree/testing/crash_test.rs
+++ b/library/alloc/src/testing/crash_test.rs
diff --git a/library/alloc/src/collections/btree/testing/mod.rs b/library/alloc/src/testing/mod.rs
index 7a094f8a5..7a094f8a5 100644
--- a/library/alloc/src/collections/btree/testing/mod.rs
+++ b/library/alloc/src/testing/mod.rs
diff --git a/library/alloc/src/collections/btree/testing/ord_chaos.rs b/library/alloc/src/testing/ord_chaos.rs
index 96ce7c157..96ce7c157 100644
--- a/library/alloc/src/collections/btree/testing/ord_chaos.rs
+++ b/library/alloc/src/testing/ord_chaos.rs
diff --git a/library/alloc/src/collections/btree/testing/rng.rs b/library/alloc/src/testing/rng.rs
index ecf543bee..ecf543bee 100644
--- a/library/alloc/src/collections/btree/testing/rng.rs
+++ b/library/alloc/src/testing/rng.rs
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
index 541f99bcf..2b1a787cc 100644
--- a/library/alloc/src/vec/drain.rs
+++ b/library/alloc/src/vec/drain.rs
@@ -223,9 +223,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
// as_slice() must only be called when iter.len() is > 0 because
- // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
- // the iterator's internal pointers. Creating a reference to deallocated memory
- // is invalid even when it is zero-length
+ // it also gets touched by vec::Splice which may turn it into a dangling pointer
+ // which would make it and the vec pointer point to different allocations which would
+ // lead to invalid pointer arithmetic below.
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 02cc7691a..37966007e 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -1,6 +1,8 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
+#[cfg(not(no_global_oom_handling))]
+use crate::collections::VecDeque;
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
@@ -38,7 +40,9 @@ pub struct IntoIter<
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
- pub(super) end: *const T,
+ pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
@@ -130,7 +134,36 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
pub(crate) fn forget_remaining_elements(&mut self) {
- self.ptr = self.end;
+ // For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
+ // `ptr` must stay aligned, while `end` may be unaligned.
+ self.end = self.ptr;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
+ // Keep our `Drop` impl from dropping the elements and the allocator
+ let mut this = ManuallyDrop::new(self);
+
+ // SAFETY: This allocation originally came from a `Vec`, so it passes
+ // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
+ // so the `sub_ptr`s below cannot wrap, and will produce a well-formed
+ // range. `end` ≤ `buf + cap`, so the range will be in-bounds.
+ // Taking `alloc` is ok because nothing else is going to look at it,
+ // since our `Drop` impl isn't going to run so there's no more code.
+ unsafe {
+ let buf = this.buf.as_ptr();
+ let initialized = if T::IS_ZST {
+ // All the pointers are the same for ZSTs, so it's fine to
+ // say that they're all at the beginning of the "allocation".
+ 0..this.len()
+ } else {
+ this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf)
+ };
+ let cap = this.cap;
+ let alloc = ManuallyDrop::take(&mut this.alloc);
+ VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
+ }
}
}
@@ -155,10 +188,9 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
if self.ptr == self.end {
None
} else if T::IS_ZST {
- // purposefully don't use 'ptr.offset' because for
- // vectors with 0-size elements this would return the
- // same pointer.
- self.ptr = self.ptr.wrapping_byte_add(1);
+ // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
+ // reducing the `end`.
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
@@ -185,10 +217,8 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
- // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
- // effectively results in unsigned pointers representing positions 0..usize::MAX,
- // which is valid for ZSTs.
- self.ptr = self.ptr.wrapping_byte_add(step_size);
+ // See `next` for why we sub `end` here.
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -221,7 +251,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
- self.ptr = self.ptr.wrapping_byte_add(N);
+ self.end = self.end.wrapping_byte_sub(N);
// Safety: ditto
return Ok(unsafe { raw_ary.transpose().assume_init() });
}
diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs
index 8e652d676..cb9adf05c 100644
--- a/library/alloc/src/vec/is_zero.rs
+++ b/library/alloc/src/vec/is_zero.rs
@@ -4,7 +4,8 @@ use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
- /// Whether this value's representation is all zeros
+ /// Whether this value's representation is all zeros,
+ /// or can be represented with all zeroes.
fn is_zero(&self) -> bool;
}
@@ -57,7 +58,7 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
#[inline]
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
- // it's worth doing if the array is really long. The threshold here
+ // it's worth doing if the array is really long. The threshold here
// is largely arbitrary, but was picked because as of 2022-07-01 LLVM
// fails to const-fold the check in `vec![[1; 32]; n]`
// See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
@@ -147,6 +148,23 @@ impl_is_zero_option_of_nonzero!(
NonZeroIsize,
);
+macro_rules! impl_is_zero_option_of_num {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for Option<$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ const {
+ let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+ assert!(none.is_none());
+ }
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,);
+
unsafe impl<T: IsZero> IsZero for Wrapping<T> {
#[inline]
fn is_zero(&self) -> bool {
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index ba34ab680..36b0b3c9e 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -166,7 +166,7 @@ mod spec_extend;
/// vec[0] = 7;
/// assert_eq!(vec[0], 7);
///
-/// vec.extend([1, 2, 3].iter().copied());
+/// vec.extend([1, 2, 3]);
///
/// for x in &vec {
/// println!("{x}");
@@ -490,6 +490,8 @@ impl<T> Vec<T> {
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
+ /// * `ptr` must have been allocated using the global allocator, such as via
+ /// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -526,6 +528,7 @@ impl<T> Vec<T> {
/// function.
///
/// [`String`]: crate::string::String
+ /// [`alloc::alloc`]: crate::alloc::alloc
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
///
/// # Examples
@@ -681,6 +684,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
+ /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -714,6 +718,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
/// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
@@ -2424,7 +2429,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.reserve(range.len());
// SAFETY:
- // - `slice::range` guarantees that the given range is valid for indexing self
+ // - `slice::range` guarantees that the given range is valid for indexing self
unsafe {
self.spec_extend_from_within(range);
}
@@ -2681,7 +2686,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Instead use the
- // `slice::to_vec` function which is only available with cfg(test)
+ // `slice::to_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
fn clone(&self) -> Self {
@@ -3191,7 +3196,7 @@ where
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "vec_from_box", since = "1.18.0")]
impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
@@ -3209,7 +3214,7 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(no_global_oom_handling))]
#[cfg(not(test))]
#[stable(feature = "box_from_vec", since = "1.20.0")]
diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs
index bad765c7f..1861147fe 100644
--- a/library/alloc/src/vec/splice.rs
+++ b/library/alloc/src/vec/splice.rs
@@ -54,6 +54,12 @@ impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
+ // At this point draining is done and the only remaining tasks are splicing
+ // and moving things into the final place.
+ // Which means we can replace the slice::Iter with pointers that won't point to deallocated
+ // memory, so that Drain::drop is still allowed to call iter.len(), otherwise it would break
+ // the ptr.sub_ptr contract.
+ self.drain.iter = (&[]).iter();
unsafe {
if self.drain.tail_len == 0 {
diff --git a/library/alloc/tests/autotraits.rs b/library/alloc/tests/autotraits.rs
index 8ff5f0abe..879e32b3f 100644
--- a/library/alloc/tests/autotraits.rs
+++ b/library/alloc/tests/autotraits.rs
@@ -32,7 +32,7 @@ fn test_btree_map() {
// spawn(f());
// }
//
- // where with some unintentionally overconstrained Send impls in liballoc's
+ // where with some unintentionally overconstrained Send impls in alloc's
// internals, the future might incorrectly not be Send even though every
// single type involved in the program is Send and Sync.
require_send_sync(async {
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index d6d2b055b..2a93a242d 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -1,7 +1,6 @@
#![feature(allocator_api)]
#![feature(alloc_layout_extra)]
#![feature(assert_matches)]
-#![feature(box_syntax)]
#![feature(btree_drain_filter)]
#![feature(cow_is_borrowed)]
#![feature(const_box)]
diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs
index 21f894343..0693beb48 100644
--- a/library/alloc/tests/slice.rs
+++ b/library/alloc/tests/slice.rs
@@ -1,15 +1,9 @@
-use std::cell::Cell;
-use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::cmp::Ordering::{Equal, Greater, Less};
use std::convert::identity;
use std::fmt;
use std::mem;
use std::panic;
use std::rc::Rc;
-use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
-
-use rand::distributions::Standard;
-use rand::seq::SliceRandom;
-use rand::{thread_rng, Rng, RngCore};
fn square(n: usize) -> usize {
n * n
@@ -389,123 +383,6 @@ fn test_reverse() {
}
#[test]
-#[cfg_attr(miri, ignore)] // Miri is too slow
-fn test_sort() {
- let mut rng = thread_rng();
-
- for len in (2..25).chain(500..510) {
- for &modulus in &[5, 10, 100, 1000] {
- for _ in 0..10 {
- let orig: Vec<_> =
- rng.sample_iter::<i32, _>(&Standard).map(|x| x % modulus).take(len).collect();
-
- // Sort in default order.
- let mut v = orig.clone();
- v.sort();
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- // Sort in ascending order.
- let mut v = orig.clone();
- v.sort_by(|a, b| a.cmp(b));
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- // Sort in descending order.
- let mut v = orig.clone();
- v.sort_by(|a, b| b.cmp(a));
- assert!(v.windows(2).all(|w| w[0] >= w[1]));
-
- // Sort in lexicographic order.
- let mut v1 = orig.clone();
- let mut v2 = orig.clone();
- v1.sort_by_key(|x| x.to_string());
- v2.sort_by_cached_key(|x| x.to_string());
- assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
- assert!(v1 == v2);
-
- // Sort with many pre-sorted runs.
- let mut v = orig.clone();
- v.sort();
- v.reverse();
- for _ in 0..5 {
- let a = rng.gen::<usize>() % len;
- let b = rng.gen::<usize>() % len;
- if a < b {
- v[a..b].reverse();
- } else {
- v.swap(a, b);
- }
- }
- v.sort();
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
- }
- }
- }
-
- // Sort using a completely random comparison function.
- // This will reorder the elements *somehow*, but won't panic.
- let mut v = [0; 500];
- for i in 0..v.len() {
- v[i] = i as i32;
- }
- v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
- v.sort();
- for i in 0..v.len() {
- assert_eq!(v[i], i as i32);
- }
-
- // Should not panic.
- [0i32; 0].sort();
- [(); 10].sort();
- [(); 100].sort();
-
- let mut v = [0xDEADBEEFu64];
- v.sort();
- assert!(v == [0xDEADBEEF]);
-}
-
-#[test]
-fn test_sort_stability() {
- // Miri is too slow
- let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
- let rounds = if cfg!(miri) { 1 } else { 10 };
-
- for len in (2..25).chain(large_range) {
- for _ in 0..rounds {
- let mut counts = [0; 10];
-
- // create a vector like [(6, 1), (5, 1), (6, 2), ...],
- // where the first item of each tuple is random, but
- // the second item represents which occurrence of that
- // number this element is, i.e., the second elements
- // will occur in sorted order.
- let orig: Vec<_> = (0..len)
- .map(|_| {
- let n = thread_rng().gen::<usize>() % 10;
- counts[n] += 1;
- (n, counts[n])
- })
- .collect();
-
- let mut v = orig.clone();
- // Only sort on the first element, so an unstable sort
- // may mix up the counts.
- v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
-
- // This comparison includes the count (the second item
- // of the tuple), so elements with equal first items
- // will need to be ordered with increasing
- // counts... i.e., exactly asserting that this sort is
- // stable.
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- let mut v = orig.clone();
- v.sort_by_cached_key(|&(x, _)| x);
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
- }
- }
-}
-
-#[test]
fn test_rotate_left() {
let expected: Vec<_> = (0..13).collect();
let mut v = Vec::new();
@@ -1608,230 +1485,6 @@ fn test_copy_from_slice_dst_shorter() {
dst.copy_from_slice(&src);
}
-const MAX_LEN: usize = 80;
-
-static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
- // FIXME(RFC 1109): AtomicUsize is not Copy.
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
-];
-
-static VERSIONS: AtomicUsize = AtomicUsize::new(0);
-
-#[derive(Clone, Eq)]
-struct DropCounter {
- x: u32,
- id: usize,
- version: Cell<usize>,
-}
-
-impl PartialEq for DropCounter {
- fn eq(&self, other: &Self) -> bool {
- self.partial_cmp(other) == Some(Ordering::Equal)
- }
-}
-
-impl PartialOrd for DropCounter {
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.version.set(self.version.get() + 1);
- other.version.set(other.version.get() + 1);
- VERSIONS.fetch_add(2, Relaxed);
- self.x.partial_cmp(&other.x)
- }
-}
-
-impl Ord for DropCounter {
- fn cmp(&self, other: &Self) -> Ordering {
- self.partial_cmp(other).unwrap()
- }
-}
-
-impl Drop for DropCounter {
- fn drop(&mut self) {
- DROP_COUNTS[self.id].fetch_add(1, Relaxed);
- VERSIONS.fetch_sub(self.version.get(), Relaxed);
- }
-}
-
-macro_rules! test {
- ($input:ident, $func:ident) => {
- let len = $input.len();
-
- // Work out the total number of comparisons required to sort
- // this array...
- let mut count = 0usize;
- $input.to_owned().$func(|a, b| {
- count += 1;
- a.cmp(b)
- });
-
- // ... and then panic on each and every single one.
- for panic_countdown in 0..count {
- // Refresh the counters.
- VERSIONS.store(0, Relaxed);
- for i in 0..len {
- DROP_COUNTS[i].store(0, Relaxed);
- }
-
- let v = $input.to_owned();
- let _ = std::panic::catch_unwind(move || {
- let mut v = v;
- let mut panic_countdown = panic_countdown;
- v.$func(|a, b| {
- if panic_countdown == 0 {
- SILENCE_PANIC.with(|s| s.set(true));
- panic!();
- }
- panic_countdown -= 1;
- a.cmp(b)
- })
- });
-
- // Check that the number of things dropped is exactly
- // what we expect (i.e., the contents of `v`).
- for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
- let count = c.load(Relaxed);
- assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
- }
-
- // Check that the most recent versions of values were dropped.
- assert_eq!(VERSIONS.load(Relaxed), 0);
- }
- };
-}
-
-thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
-
-#[test]
-#[cfg_attr(target_os = "emscripten", ignore)] // no threads
-fn panic_safe() {
- panic::update_hook(move |prev, info| {
- if !SILENCE_PANIC.with(|s| s.get()) {
- prev(info);
- }
- });
-
- let mut rng = thread_rng();
-
- // Miri is too slow (but still need to `chain` to make the types match)
- let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
- let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
-
- for len in lens {
- for &modulus in moduli {
- for &has_runs in &[false, true] {
- let mut input = (0..len)
- .map(|id| DropCounter {
- x: rng.next_u32() % modulus,
- id: id,
- version: Cell::new(0),
- })
- .collect::<Vec<_>>();
-
- if has_runs {
- for c in &mut input {
- c.x = c.id as u32;
- }
-
- for _ in 0..5 {
- let a = rng.gen::<usize>() % len;
- let b = rng.gen::<usize>() % len;
- if a < b {
- input[a..b].reverse();
- } else {
- input.swap(a, b);
- }
- }
- }
-
- test!(input, sort_by);
- test!(input, sort_unstable_by);
- }
- }
- }
-
- // Set default panic hook again.
- drop(panic::take_hook());
-}
-
#[test]
fn repeat_generic_slice() {
assert_eq!([1, 2].repeat(2), vec![1, 2, 1, 2]);
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index 7ebed0d5c..2f07c2911 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -7,7 +7,9 @@ use std::borrow::Cow;
use std::cell::Cell;
use std::collections::TryReserveErrorKind::*;
use std::fmt::Debug;
+use std::hint;
use std::iter::InPlaceIterable;
+use std::mem;
use std::mem::{size_of, swap};
use std::ops::Bound::*;
use std::panic::{catch_unwind, AssertUnwindSafe};
@@ -1107,8 +1109,31 @@ fn test_into_iter_drop_allocator() {
#[test]
fn test_into_iter_zst() {
- for _ in vec![[0u64; 0]].into_iter() {}
- for _ in vec![[0u64; 0]; 5].into_iter().rev() {}
+ #[derive(Debug, Clone)]
+ struct AlignedZstWithDrop([u64; 0]);
+ impl Drop for AlignedZstWithDrop {
+ fn drop(&mut self) {
+ let addr = self as *mut _ as usize;
+ assert!(hint::black_box(addr) % mem::align_of::<u64>() == 0);
+ }
+ }
+
+ const C: AlignedZstWithDrop = AlignedZstWithDrop([0u64; 0]);
+
+ for _ in vec![C].into_iter() {}
+ for _ in vec![C; 5].into_iter().rev() {}
+
+ let mut it = vec![C, C].into_iter();
+ it.advance_by(1).unwrap();
+ drop(it);
+
+ let mut it = vec![C, C].into_iter();
+ it.next_chunk::<1>().unwrap();
+ drop(it);
+
+ let mut it = vec![C, C].into_iter();
+ it.next_chunk::<4>().unwrap_err();
+ drop(it);
}
#[test]
@@ -1824,7 +1849,7 @@ fn test_stable_pointers() {
}
// Test that, if we reserved enough space, adding and removing elements does not
- // invalidate references into the vector (such as `v0`). This test also
+ // invalidate references into the vector (such as `v0`). This test also
// runs in Miri, which would detect such problems.
// Note that this test does *not* constitute a stable guarantee that all these functions do not
// reallocate! Only what is explicitly documented at
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index d04de5a07..5a0b852e8 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -1046,6 +1046,20 @@ fn test_append_double_drop() {
}
#[test]
+#[should_panic]
+fn test_append_zst_capacity_overflow() {
+ let mut v = Vec::with_capacity(usize::MAX);
+ // note: using resize instead of set_len here would
+ // be *extremely* slow in unoptimized builds.
+ // SAFETY: `v` has capacity `usize::MAX`, and no initialization
+ // is needed for empty tuples.
+ unsafe { v.set_len(usize::MAX) };
+ let mut v = VecDeque::from(v);
+ let mut w = vec![()].into();
+ v.append(&mut w);
+}
+
+#[test]
fn test_retain() {
let mut buf = VecDeque::new();
buf.extend(1..5);
@@ -1736,3 +1750,39 @@ fn test_resize_keeps_reserved_space_from_item() {
d.resize(1, v);
assert_eq!(d[0].capacity(), 1234);
}
+
+#[test]
+fn test_collect_from_into_iter_keeps_allocation() {
+ let mut v = Vec::with_capacity(13);
+ v.extend(0..7);
+ check(v.as_ptr(), v.last().unwrap(), v.into_iter());
+
+ let mut v = VecDeque::with_capacity(13);
+ v.extend(0..7);
+ check(&v[0], &v[v.len() - 1], v.into_iter());
+
+ fn check(buf: *const i32, last: *const i32, mut it: impl Iterator<Item = i32>) {
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+
+ let mut v: VecDeque<i32> = it.collect();
+ assert_eq!(v.capacity(), 13);
+ assert_eq!(v.as_slices().0.as_ptr(), buf.wrapping_add(2));
+ assert_eq!(&v[v.len() - 1] as *const _, last);
+
+ assert_eq!(v.as_slices(), ([2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+ v.push_front(7);
+ assert_eq!(v.as_slices(), ([7, 2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+ v.push_front(8);
+ assert_eq!(v.as_slices(), ([8, 7, 2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+
+ // Now that we've adding thing in place of the two that we removed from
+ // the front of the iterator, we're back to matching the buffer pointer.
+ assert_eq!(v.as_slices().0.as_ptr(), buf);
+ assert_eq!(&v[v.len() - 1] as *const _, last);
+
+ v.push_front(9);
+ assert_eq!(v.as_slices(), ([9].as_slice(), [8, 7, 2, 3, 4, 5, 6].as_slice()));
+ assert_eq!(v.capacity(), 13);
+ }
+}
diff --git a/library/core/Cargo.toml b/library/core/Cargo.toml
index 2a7df9556..3dc8c84e0 100644
--- a/library/core/Cargo.toml
+++ b/library/core/Cargo.toml
@@ -24,8 +24,8 @@ path = "benches/lib.rs"
test = true
[dev-dependencies]
-rand = "0.7"
-rand_xorshift = "0.2"
+rand = { version = "0.8.5", default-features = false }
+rand_xorshift = { version = "0.3.0", default-features = false }
[features]
# Make panics and failed asserts immediately abort without formatting any message
diff --git a/library/core/benches/num/int_log/mod.rs b/library/core/benches/num/int_log/mod.rs
index 3c01e2998..bb61224b5 100644
--- a/library/core/benches/num/int_log/mod.rs
+++ b/library/core/benches/num/int_log/mod.rs
@@ -21,7 +21,7 @@ macro_rules! int_log_bench {
/* Exponentially distributed random numbers from the whole range of the type. */
let numbers: Vec<$t> = (0..256)
.map(|_| {
- let x = rng.gen::<$t>() >> rng.gen_range(0, <$t>::BITS);
+ let x = rng.gen::<$t>() >> rng.gen_range(0..<$t>::BITS);
if x != 0 { x } else { 1 }
})
.collect();
@@ -38,7 +38,7 @@ macro_rules! int_log_bench {
/* Exponentially distributed random numbers from the range 0..256. */
let numbers: Vec<$t> = (0..256)
.map(|_| {
- let x = (rng.gen::<u8>() >> rng.gen_range(0, u8::BITS)) as $t;
+ let x = (rng.gen::<u8>() >> rng.gen_range(0..u8::BITS)) as $t;
if x != 0 { x } else { 1 }
})
.collect();
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 1a379ecc1..c0fb0d993 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -148,7 +148,7 @@
//! ```
//!
//! In this example, if the concrete type of `obj` in `use_my_trait` is `SomeConcreteType`, then
-//! the `get_context_ref` call will return a reference to `obj.some_string` with type `&String`.
+//! the `get_context_by_ref` call will return a reference to `obj.some_string` with type `&String`.
#![stable(feature = "rust1", since = "1.0.0")]
@@ -662,7 +662,8 @@ impl dyn Any + Send + Sync {
/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
/// noting that the hashes and ordering will vary between Rust releases. Beware
/// of relying on them inside of your code!
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+#[derive(Clone, Copy, Debug, Hash, Eq)]
+#[derive_const(PartialEq, PartialOrd, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct TypeId {
t: u64,
diff --git a/library/core/src/array/iter.rs b/library/core/src/array/iter.rs
index b91c63018..8259c087d 100644
--- a/library/core/src/array/iter.rs
+++ b/library/core/src/array/iter.rs
@@ -109,8 +109,8 @@ impl<T, const N: usize> IntoIter<T, N> {
/// use std::array::IntoIter;
/// use std::mem::MaybeUninit;
///
- /// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
- /// # // otherwise it could leak. A fully-general version this would need a drop
+ /// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
+ /// # // otherwise it could leak. A fully-general version this would need a drop
/// # // guard to handle panics from the iterator, but this works for an example.
/// fn next_chunk<T: Copy, const N: usize>(
/// it: &mut impl Iterator<Item = T>,
@@ -211,7 +211,7 @@ impl<T, const N: usize> IntoIter<T, N> {
let initialized = 0..0;
// SAFETY: We're telling it that none of the elements are initialized,
- // which is trivially true. And ∀N: usize, 0 <= N.
+ // which is trivially true. And ∀N: usize, 0 <= N.
unsafe { Self::new_unchecked(buffer, initialized) }
}
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index 94a1a1d32..2825e0bbb 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -69,7 +69,7 @@ where
/// if any element creation was unsuccessful.
///
/// The return type of this function depends on the return type of the closure.
-/// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+/// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N], E>`.
/// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
///
/// # Arguments
@@ -522,7 +522,7 @@ impl<T, const N: usize> [T; N] {
/// return an array the same size as `self` or the first error encountered.
///
/// The return type of this function depends on the return type of the closure.
- /// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N]; E>`.
+ /// If you return `Result<T, E>` from the closure, you'll get a `Result<[T; N], E>`.
/// If you return `Option<T>` from the closure, you'll get an `Option<[T; N]>`.
///
/// # Examples
diff --git a/library/core/src/borrow.rs b/library/core/src/borrow.rs
index fdd56cb4e..4a8302ee4 100644
--- a/library/core/src/borrow.rs
+++ b/library/core/src/borrow.rs
@@ -26,7 +26,7 @@
/// to be modified, it can additionally implement [`BorrowMut<T>`].
///
/// Further, when providing implementations for additional traits, it needs
-/// to be considered whether they should behave identical to those of the
+/// to be considered whether they should behave identically to those of the
/// underlying type as a consequence of acting as a representation of that
/// underlying type. Generic code typically uses `Borrow<T>` when it relies
/// on the identical behavior of these additional trait implementations.
diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs
index 47cce2aa3..129213fde 100644
--- a/library/core/src/cell.rs
+++ b/library/core/src/cell.rs
@@ -568,7 +568,7 @@ impl<T: Default> Cell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {}
impl<T> Cell<[T]> {
@@ -807,7 +807,8 @@ impl<T> RefCell<T> {
///
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently borrowed, or
+ /// if `self` and `other` point to the same `RefCell`.
///
/// # Examples
///
@@ -1193,7 +1194,7 @@ impl<T: Default> Default for RefCell<T> {
impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn eq(&self, other: &RefCell<T>) -> bool {
*self.borrow() == *other.borrow()
@@ -1207,7 +1208,7 @@ impl<T: ?Sized + Eq> Eq for RefCell<T> {}
impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn partial_cmp(&self, other: &RefCell<T>) -> Option<Ordering> {
self.borrow().partial_cmp(&*other.borrow())
@@ -1215,7 +1216,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn lt(&self, other: &RefCell<T>) -> bool {
*self.borrow() < *other.borrow()
@@ -1223,7 +1224,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn le(&self, other: &RefCell<T>) -> bool {
*self.borrow() <= *other.borrow()
@@ -1231,7 +1232,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn gt(&self, other: &RefCell<T>) -> bool {
*self.borrow() > *other.borrow()
@@ -1239,7 +1240,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn ge(&self, other: &RefCell<T>) -> bool {
*self.borrow() >= *other.borrow()
@@ -1250,7 +1251,7 @@ impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
impl<T: ?Sized + Ord> Ord for RefCell<T> {
/// # Panics
///
- /// Panics if the value in either `RefCell` is currently borrowed.
+ /// Panics if the value in either `RefCell` is currently mutably borrowed.
#[inline]
fn cmp(&self, other: &RefCell<T>) -> Ordering {
self.borrow().cmp(&*other.borrow())
@@ -1266,7 +1267,7 @@ impl<T> const From<T> for RefCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<RefCell<U>> for RefCell<T> {}
struct BorrowRef<'b> {
@@ -1492,7 +1493,7 @@ impl<'b, T: ?Sized> Ref<'b, T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Ref<'b, U>> for Ref<'b, T> {}
#[stable(feature = "std_guard_impls", since = "1.20.0")]
@@ -1738,7 +1739,7 @@ impl<T: ?Sized> DerefMut for RefMut<'_, T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<RefMut<'b, U>> for RefMut<'b, T> {}
#[stable(feature = "std_guard_impls", since = "1.20.0")]
@@ -1783,7 +1784,7 @@ impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
/// until the reference expires. As a special exception, given an `&T`, any part of it that is
/// inside an `UnsafeCell<_>` may be deallocated during the lifetime of the reference, after the
/// last time the reference is used (dereferenced or reborrowed). Since you cannot deallocate a part
-/// of what a reference points to, this means the memory an `&T` points to can be deallocted only if
+/// of what a reference points to, this means the memory an `&T` points to can be deallocated only if
/// *every part of it* (including padding) is inside an `UnsafeCell`.
///
/// However, whenever a `&UnsafeCell<T>` is constructed or dereferenced, it must still point to
@@ -1993,7 +1994,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
- // #[repr(transparent)]. This exploits libstd's special status, there is
+ // #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
self as *const UnsafeCell<T> as *const T as *mut T
}
@@ -2051,7 +2052,7 @@ impl<T: ?Sized> UnsafeCell<T> {
#[rustc_const_stable(feature = "unsafe_cell_raw_get", since = "1.56.0")]
pub const fn raw_get(this: *const Self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
- // #[repr(transparent)]. This exploits libstd's special status, there is
+ // #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
this as *const T as *mut T
}
@@ -2074,7 +2075,7 @@ impl<T> const From<T> for UnsafeCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {}
/// [`UnsafeCell`], but [`Sync`].
@@ -2164,7 +2165,7 @@ impl<T> const From<T> for SyncUnsafeCell<T> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
//#[unstable(feature = "sync_unsafe_cell", issue = "95439")]
impl<T: CoerceUnsized<U>, U> CoerceUnsized<SyncUnsafeCell<U>> for SyncUnsafeCell<T> {}
diff --git a/library/core/src/cell/lazy.rs b/library/core/src/cell/lazy.rs
index b355d94ce..65d12c25c 100644
--- a/library/core/src/cell/lazy.rs
+++ b/library/core/src/cell/lazy.rs
@@ -35,7 +35,7 @@ pub struct LazyCell<T, F = fn() -> T> {
init: Cell<Option<F>>,
}
-impl<T, F> LazyCell<T, F> {
+impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// Creates a new lazy value with the given initializing function.
///
/// # Examples
@@ -51,13 +51,12 @@ impl<T, F> LazyCell<T, F> {
///
/// assert_eq!(&*lazy, "HELLO, WORLD!");
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub const fn new(init: F) -> LazyCell<T, F> {
LazyCell { cell: OnceCell::new(), init: Cell::new(Some(init)) }
}
-}
-impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// Forces the evaluation of this lazy value and returns a reference to
/// the result.
///
@@ -75,6 +74,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
/// assert_eq!(LazyCell::force(&lazy), &92);
/// assert_eq!(&*lazy, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn force(this: &LazyCell<T, F>) -> &T {
this.cell.get_or_init(|| match this.init.take() {
@@ -87,6 +87,7 @@ impl<T, F: FnOnce() -> T> LazyCell<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
type Target = T;
+ #[inline]
fn deref(&self) -> &T {
LazyCell::force(self)
}
@@ -95,6 +96,7 @@ impl<T, F: FnOnce() -> T> Deref for LazyCell<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Default> Default for LazyCell<T> {
/// Creates a new lazy value using `Default` as the initializing function.
+ #[inline]
fn default() -> LazyCell<T> {
LazyCell::new(T::default)
}
diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs
index 8c01643c7..7757068a4 100644
--- a/library/core/src/cell/once.rs
+++ b/library/core/src/cell/once.rs
@@ -37,8 +37,9 @@ pub struct OnceCell<T> {
impl<T> OnceCell<T> {
/// Creates a new empty cell.
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[inline]
#[must_use]
+ #[unstable(feature = "once_cell", issue = "74465")]
pub const fn new() -> OnceCell<T> {
OnceCell { inner: UnsafeCell::new(None) }
}
@@ -46,6 +47,7 @@ impl<T> OnceCell<T> {
/// Gets the reference to the underlying value.
///
/// Returns `None` if the cell is empty.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get(&self) -> Option<&T> {
// SAFETY: Safe due to `inner`'s invariant
@@ -55,6 +57,7 @@ impl<T> OnceCell<T> {
/// Gets the mutable reference to the underlying value.
///
/// Returns `None` if the cell is empty.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_mut(&mut self) -> Option<&mut T> {
self.inner.get_mut().as_mut()
@@ -82,6 +85,7 @@ impl<T> OnceCell<T> {
///
/// assert!(cell.get().is_some());
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn set(&self, value: T) -> Result<(), T> {
// SAFETY: Safe because we cannot have overlapping mutable borrows
@@ -123,6 +127,7 @@ impl<T> OnceCell<T> {
/// let value = cell.get_or_init(|| unreachable!());
/// assert_eq!(value, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_or_init<F>(&self, f: F) -> &T
where
@@ -205,6 +210,7 @@ impl<T> OnceCell<T> {
/// cell.set("hello".to_string()).unwrap();
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn into_inner(self) -> Option<T> {
// Because `into_inner` takes `self` by value, the compiler statically verifies
@@ -233,6 +239,7 @@ impl<T> OnceCell<T> {
/// assert_eq!(cell.take(), Some("hello".to_string()));
/// assert_eq!(cell.get(), None);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn take(&mut self) -> Option<T> {
mem::take(self).into_inner()
@@ -241,6 +248,7 @@ impl<T> OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T> Default for OnceCell<T> {
+ #[inline]
fn default() -> Self {
Self::new()
}
@@ -258,6 +266,7 @@ impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Clone> Clone for OnceCell<T> {
+ #[inline]
fn clone(&self) -> OnceCell<T> {
let res = OnceCell::new();
if let Some(value) = self.get() {
@@ -272,6 +281,7 @@ impl<T: Clone> Clone for OnceCell<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: PartialEq> PartialEq for OnceCell<T> {
+ #[inline]
fn eq(&self, other: &Self) -> bool {
self.get() == other.get()
}
@@ -283,6 +293,7 @@ impl<T: Eq> Eq for OnceCell<T> {}
#[unstable(feature = "once_cell", issue = "74465")]
impl<T> const From<T> for OnceCell<T> {
/// Creates a new `OnceCell<T>` which already contains the given `value`.
+ #[inline]
fn from(value: T) -> Self {
OnceCell { inner: UnsafeCell::new(Some(value)) }
}
diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs
index 11f1c30f6..eeb088030 100644
--- a/library/core/src/char/decode.rs
+++ b/library/core/src/char/decode.rs
@@ -67,7 +67,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
}
// all ok, so lets decode it.
- let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
+ let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000;
// SAFETY: we checked that it's a legal unicode value
Some(Ok(unsafe { from_u32_unchecked(c) }))
}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index 949896e57..a7d6fec7d 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -24,8 +24,6 @@
use crate::const_closure::ConstFnMutClosure;
use crate::marker::Destruct;
-#[cfg(bootstrap)]
-use crate::marker::StructuralPartialEq;
use self::Ordering::*;
@@ -333,7 +331,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> {
/// assert_eq!(Ordering::Greater, result);
/// ```
#[derive(Clone, Copy, Eq, Debug, Hash)]
-#[cfg_attr(not(bootstrap), derive_const(PartialOrd, Ord, PartialEq))]
+#[derive_const(PartialOrd, Ord, PartialEq)]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
@@ -800,9 +798,12 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- // HACK(fee1-dead): go back to using `self.max_by(other, Ord::cmp)`
- // when trait methods are allowed to be used when a const closure is
- // expected.
+ #[cfg(not(bootstrap))]
+ {
+ max_by(self, other, Ord::cmp)
+ }
+
+ #[cfg(bootstrap)]
match self.cmp(&other) {
Ordering::Less | Ordering::Equal => other,
Ordering::Greater => self,
@@ -827,9 +828,12 @@ pub trait Ord: Eq + PartialOrd<Self> {
Self: Sized,
Self: ~const Destruct,
{
- // HACK(fee1-dead): go back to using `self.min_by(other, Ord::cmp)`
- // when trait methods are allowed to be used when a const closure is
- // expected.
+ #[cfg(not(bootstrap))]
+ {
+ min_by(self, other, Ord::cmp)
+ }
+
+ #[cfg(bootstrap)]
match self.cmp(&other) {
Ordering::Less | Ordering::Equal => self,
Ordering::Greater => other,
@@ -879,40 +883,6 @@ pub macro Ord($item:item) {
/* compiler built-in */
}
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg(bootstrap)]
-impl StructuralPartialEq for Ordering {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const PartialEq for Ordering {
- #[inline]
- fn eq(&self, other: &Self) -> bool {
- (*self as i32).eq(&(*other as i32))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const Ord for Ordering {
- #[inline]
- fn cmp(&self, other: &Ordering) -> Ordering {
- (*self as i32).cmp(&(*other as i32))
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_cmp", issue = "92391")]
-#[cfg(bootstrap)]
-impl const PartialOrd for Ordering {
- #[inline]
- fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
- (*self as i32).partial_cmp(&(*other as i32))
- }
-}
-
/// Trait for types that form a [partial order](https://en.wikipedia.org/wiki/Partial_order).
///
/// The `lt`, `le`, `gt`, and `ge` methods of this trait can be called using
@@ -1264,17 +1234,23 @@ where
F: ~const Destruct,
K: ~const Destruct,
{
- const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
- f: &mut F,
- (v1, v2): (&T, &T),
- ) -> Ordering
- where
- T: ~const Destruct,
- K: ~const Destruct,
- {
- f(v1).cmp(&f(v2))
+ cfg_if! {
+ if #[cfg(bootstrap)] {
+ const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(
+ f: &mut F,
+ (v1, v2): (&T, &T),
+ ) -> Ordering
+ where
+ T: ~const Destruct,
+ K: ~const Destruct,
+ {
+ f(v1).cmp(&f(v2))
+ }
+ min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
+ } else {
+ min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2)))
+ }
}
- min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp))
}
/// Compares and returns the maximum of two values.
diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs
index 151c8e6d8..97900a486 100644
--- a/library/core/src/const_closure.rs
+++ b/library/core/src/const_closure.rs
@@ -1,5 +1,4 @@
use crate::marker::Destruct;
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
/// Struct representing a closure with mutably borrowed data.
@@ -46,33 +45,6 @@ impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData,
macro_rules! impl_fn_mut_tuple {
($($var:ident)*) => {
- #[cfg(bootstrap)]
- #[allow(unused_parens)]
- impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
- FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
- where
- Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct,
- {
- type Output = ClosureReturnValue;
-
- extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output {
- self.call_mut(args)
- }
- }
- #[cfg(bootstrap)]
- #[allow(unused_parens)]
- impl<'a, $($var,)* ClosureArguments, Function, ClosureReturnValue> const
- FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
- where
- Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue,
- {
- extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output {
- #[allow(non_snake_case)]
- let ($($var),*) = &mut self.data;
- (self.func)(($($var),*), args)
- }
- }
- #[cfg(not(bootstrap))]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
@@ -85,7 +57,6 @@ macro_rules! impl_fn_mut_tuple {
self.call_mut(args)
}
}
- #[cfg(not(bootstrap))]
#[allow(unused_parens)]
impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const
FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function>
diff --git a/library/core/src/convert/num.rs b/library/core/src/convert/num.rs
index 9c0d7e9a1..4da7c3234 100644
--- a/library/core/src/convert/num.rs
+++ b/library/core/src/convert/num.rs
@@ -168,6 +168,26 @@ impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0"
// Float -> Float
impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+// bool -> Float
+#[stable(feature = "float_from_bool", since = "1.68.0")]
+#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+impl const From<bool> for f32 {
+ /// Converts `bool` to `f32` losslessly.
+ #[inline]
+ fn from(small: bool) -> Self {
+ small as u8 as Self
+ }
+}
+#[stable(feature = "float_from_bool", since = "1.68.0")]
+#[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")]
+impl const From<bool> for f64 {
+ /// Converts `bool` to `f64` losslessly.
+ #[inline]
+ fn from(small: bool) -> Self {
+ small as u8 as Self
+ }
+}
+
// no possible bounds violation
macro_rules! try_from_unbounded {
($source:ty, $($target:ty),*) => {$(
diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs
index ec1eaa99f..76daceecd 100644
--- a/library/core/src/ffi/mod.rs
+++ b/library/core/src/ffi/mod.rs
@@ -227,7 +227,12 @@ impl fmt::Debug for c_void {
/// Basic implementation of a `va_list`.
// The name is WIP, using `VaListImpl` for now.
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -251,7 +256,12 @@ pub struct VaListImpl<'f> {
}
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -319,6 +329,25 @@ pub struct VaListImpl<'f> {
_marker: PhantomData<&'f mut &'f c_void>,
}
+/// s390x ABI implementation of a `va_list`.
+#[cfg(target_arch = "s390x")]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gpr: i64,
+ fpr: i64,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
/// x86_64 ABI implementation of a `va_list`.
#[cfg(all(target_arch = "x86_64", not(target_os = "uefi"), not(windows)))]
#[repr(C)]
@@ -352,6 +381,7 @@ pub struct VaList<'a, 'f: 'a> {
all(
not(target_arch = "aarch64"),
not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
not(target_arch = "x86_64")
),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
@@ -363,7 +393,12 @@ pub struct VaList<'a, 'f: 'a> {
inner: VaListImpl<'f>,
#[cfg(all(
- any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(
+ target_arch = "aarch64",
+ target_arch = "powerpc",
+ target_arch = "s390x",
+ target_arch = "x86_64"
+ ),
any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
@@ -376,7 +411,12 @@ pub struct VaList<'a, 'f: 'a> {
}
#[cfg(any(
- all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "s390x"),
+ not(target_arch = "x86_64")
+ ),
all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
target_family = "wasm",
target_arch = "asmjs",
@@ -398,7 +438,12 @@ impl<'f> VaListImpl<'f> {
}
#[cfg(all(
- any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(
+ target_arch = "aarch64",
+ target_arch = "powerpc",
+ target_arch = "s390x",
+ target_arch = "x86_64"
+ ),
any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
not(target_family = "wasm"),
not(target_arch = "asmjs"),
diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs
index 48b617743..fa5073e33 100644
--- a/library/core/src/fmt/mod.rs
+++ b/library/core/src/fmt/mod.rs
@@ -174,6 +174,11 @@ pub trait Write {
/// This method should generally not be invoked manually, but rather through
/// the [`write!`] macro itself.
///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error. Please see
+ /// [write_str](Write::write_str) for details.
+ ///
/// # Examples
///
/// ```
@@ -405,7 +410,7 @@ impl<'a> Arguments<'a> {
/// 1. The `pieces` slice must be at least as long as `fmt`.
/// 2. Every [`rt::v1::Argument::position`] value within `fmt` must be a
/// valid index of `args`.
- /// 3. Every [`Count::Param`] within `fmt` must contain a valid index of
+ /// 3. Every [`rt::v1::Count::Param`] within `fmt` must contain a valid index of
/// `args`.
#[doc(hidden)]
#[inline]
@@ -558,7 +563,7 @@ impl Display for Arguments<'_> {
///
/// Derived `Debug` formats are not stable, and so may change with future Rust
/// versions. Additionally, `Debug` implementations of types provided by the
-/// standard library (`libstd`, `libcore`, `liballoc`, etc.) are not stable, and
+/// standard library (`std`, `core`, `alloc`, etc.) are not stable, and
/// may also change with future Rust versions.
///
/// # Examples
@@ -2471,8 +2476,8 @@ impl Display for char {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Pointer for *const T {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- // Cast is needed here because `.addr()` requires `T: Sized`.
- pointer_fmt_inner((*self as *const ()).addr(), f)
+ // Cast is needed here because `.expose_addr()` requires `T: Sized`.
+ pointer_fmt_inner((*self as *const ()).expose_addr(), f)
}
}
diff --git a/library/core/src/future/future.rs b/library/core/src/future/future.rs
index f29d3e1e9..8c7111cb3 100644
--- a/library/core/src/future/future.rs
+++ b/library/core/src/future/future.rs
@@ -37,6 +37,7 @@ use crate::task::{Context, Poll};
pub trait Future {
/// The type of value produced on completion.
#[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_diagnostic_item = "FutureOutput"]
type Output;
/// Attempt to resolve the future to a final value, registering
diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs
index f2b961d62..c4fb36209 100644
--- a/library/core/src/future/mod.rs
+++ b/library/core/src/future/mod.rs
@@ -44,7 +44,7 @@ pub use poll_fn::{poll_fn, PollFn};
/// non-Send/Sync as well, and we don't want that.
///
/// It also simplifies the HIR lowering of `.await`.
-#[cfg_attr(not(bootstrap), lang = "ResumeTy")]
+#[lang = "ResumeTy"]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[derive(Debug, Copy, Clone)]
@@ -61,7 +61,6 @@ unsafe impl Sync for ResumeTy {}
/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
// This is `const` to avoid extra errors after we recover from `const async fn`
-#[cfg_attr(bootstrap, lang = "from_generator")]
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
@@ -113,10 +112,14 @@ pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
unsafe { &mut *cx.0.as_ptr().cast() }
}
-#[cfg_attr(not(bootstrap), lang = "identity_future")]
+// FIXME(swatinem): This fn is currently needed to work around shortcomings
+// in type and lifetime inference.
+// See the comment at the bottom of `LoweringContext::make_async_expr` and
+// <https://github.com/rust-lang/rust/issues/104826>.
#[doc(hidden)]
#[unstable(feature = "gen_future", issue = "50547")]
#[inline]
+#[lang = "identity_future"]
pub const fn identity_future<O, Fut: Future<Output = O>>(f: Fut) -> Fut {
f
}
diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs
index c755afa39..71a0d1825 100644
--- a/library/core/src/hash/mod.rs
+++ b/library/core/src/hash/mod.rs
@@ -199,7 +199,7 @@ pub trait Hash {
/// println!("Hash is {:x}!", hasher.finish());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- fn hash<H: Hasher>(&self, state: &mut H);
+ fn hash<H: ~const Hasher>(&self, state: &mut H);
/// Feeds a slice of this type into the given [`Hasher`].
///
@@ -980,7 +980,7 @@ mod impls {
#[rustc_const_unstable(feature = "const_hash", issue = "104061")]
impl<T: ?Sized + ~const Hash> const Hash for &mut T {
#[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
+ fn hash<H: ~const Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs
index e8d724ab1..5a76e8669 100644
--- a/library/core/src/hint.rs
+++ b/library/core/src/hint.rs
@@ -219,6 +219,75 @@ pub fn spin_loop() {
/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
///
/// [`std::convert::identity`]: crate::convert::identity
+///
+/// # When is this useful?
+///
+/// First and foremost: `black_box` does _not_ guarantee any exact behavior and, in some cases, may
+/// do nothing at all. As such, it **must not be relied upon to control critical program behavior.**
+/// This _immediately_ precludes any direct use of this function for cryptographic or security
+/// purposes.
+///
+/// While not suitable in those mission-critical cases, `back_box`'s functionality can generally be
+/// relied upon for benchmarking, and should be used there. It will try to ensure that the
+/// compiler doesn't optimize away part of the intended test code based on context. For
+/// example:
+///
+/// ```
+/// fn contains(haystack: &[&str], needle: &str) -> bool {
+/// haystack.iter().any(|x| x == &needle)
+/// }
+///
+/// pub fn benchmark() {
+/// let haystack = vec!["abc", "def", "ghi", "jkl", "mno"];
+/// let needle = "ghi";
+/// for _ in 0..10 {
+/// contains(&haystack, needle);
+/// }
+/// }
+/// ```
+///
+/// The compiler could theoretically make optimizations like the following:
+///
+/// - `needle` and `haystack` are always the same, move the call to `contains` outside the loop and
+/// delete the loop
+/// - Inline `contains`
+/// - `needle` and `haystack` have values known at compile time, `contains` is always true. Remove
+/// the call and replace with `true`
+/// - Nothing is done with the result of `contains`: delete this function call entirely
+/// - `benchmark` now has no purpose: delete this function
+///
+/// It is not likely that all of the above happens, but the compiler is definitely able to make some
+/// optimizations that could result in a very inaccurate benchmark. This is where `black_box` comes
+/// in:
+///
+/// ```
+/// use std::hint::black_box;
+///
+/// // Same `contains` function
+/// fn contains(haystack: &[&str], needle: &str) -> bool {
+/// haystack.iter().any(|x| x == &needle)
+/// }
+///
+/// pub fn benchmark() {
+/// let haystack = vec!["abc", "def", "ghi", "jkl", "mno"];
+/// let needle = "ghi";
+/// for _ in 0..10 {
+/// // Adjust our benchmark loop contents
+/// black_box(contains(black_box(&haystack), black_box(needle)));
+/// }
+/// }
+/// ```
+///
+/// This essentially tells the compiler to block optimizations across any calls to `black_box`. So,
+/// it now:
+///
+/// - Treats both arguments to `contains` as unpredictable: the body of `contains` can no longer be
+/// optimized based on argument values
+/// - Treats the call to `contains` and its result as volatile: the body of `benchmark` cannot
+/// optimize this away
+///
+/// This makes our benchmark much more realistic to how the function would be used in situ, where
+/// arguments are usually not known at compile time and the result is used in some way.
#[inline]
#[stable(feature = "bench_black_box", since = "1.66.0")]
#[rustc_const_unstable(feature = "const_black_box", issue = "none")]
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 7ed7d767f..a315a28fb 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -55,7 +55,6 @@
#![allow(missing_docs)]
use crate::marker::DiscriminantKind;
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
use crate::mem;
@@ -959,13 +958,13 @@ extern "rust-intrinsic" {
#[rustc_safe_intrinsic]
pub fn assert_zero_valid<T>();
- /// A guard for unsafe functions that cannot ever be executed if `T` has invalid
- /// bit patterns: This will statically either panic, or do nothing.
+ /// A guard for `std::mem::uninitialized`. This will statically either panic, or do nothing.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_const_unstable(feature = "const_assert_type2", issue = "none")]
#[rustc_safe_intrinsic]
- pub fn assert_uninit_valid<T>();
+ #[cfg(not(bootstrap))]
+ pub fn assert_mem_uninitialized_valid<T>();
/// Gets a reference to a static `Location` indicating where it was called.
///
@@ -2175,66 +2174,6 @@ extern "rust-intrinsic" {
/// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
/// which violates the principle that a `const fn` must behave the same at
/// compile-time and at run-time. The unsafe code in crate B is fine.
- #[cfg(bootstrap)]
- #[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
- pub fn const_eval_select<ARG, F, G, RET>(arg: ARG, called_in_const: F, called_at_rt: G) -> RET
- where
- G: FnOnce<ARG, Output = RET>,
- F: FnOnce<ARG, Output = RET>;
-
- /// Selects which function to call depending on the context.
- ///
- /// If this function is evaluated at compile-time, then a call to this
- /// intrinsic will be replaced with a call to `called_in_const`. It gets
- /// replaced with a call to `called_at_rt` otherwise.
- ///
- /// # Type Requirements
- ///
- /// The two functions must be both function items. They cannot be function
- /// pointers or closures. The first function must be a `const fn`.
- ///
- /// `arg` will be the tupled arguments that will be passed to either one of
- /// the two functions, therefore, both functions must accept the same type of
- /// arguments. Both functions must return RET.
- ///
- /// # Safety
- ///
- /// The two functions must behave observably equivalent. Safe code in other
- /// crates may assume that calling a `const fn` at compile-time and at run-time
- /// produces the same result. A function that produces a different result when
- /// evaluated at run-time, or has any other observable side-effects, is
- /// *unsound*.
- ///
- /// Here is an example of how this could cause a problem:
- /// ```no_run
- /// #![feature(const_eval_select)]
- /// #![feature(core_intrinsics)]
- /// use std::hint::unreachable_unchecked;
- /// use std::intrinsics::const_eval_select;
- ///
- /// // Crate A
- /// pub const fn inconsistent() -> i32 {
- /// fn runtime() -> i32 { 1 }
- /// const fn compiletime() -> i32 { 2 }
- ///
- /// unsafe {
- // // âš  This code violates the required equivalence of `compiletime`
- /// // and `runtime`.
- /// const_eval_select((), compiletime, runtime)
- /// }
- /// }
- ///
- /// // Crate B
- /// const X: i32 = inconsistent();
- /// let x = inconsistent();
- /// if x != X { unsafe { unreachable_unchecked(); }}
- /// ```
- ///
- /// This code causes Undefined Behavior when being run, since the
- /// `unreachable_unchecked` is actually being reached. The bug is in *crate A*,
- /// which violates the principle that a `const fn` must behave the same at
- /// compile-time and at run-time. The unsafe code in crate B is fine.
- #[cfg(not(bootstrap))]
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
pub fn const_eval_select<ARG: Tuple, F, G, RET>(
arg: ARG,
@@ -2281,7 +2220,7 @@ macro_rules! assert_unsafe_precondition {
fn runtime$(<$($tt)*>)?($($i:$ty),*) {
if !$e {
// don't unwind to reduce impact on code size
- ::core::panicking::panic_str_nounwind(
+ ::core::panicking::panic_nounwind(
concat!("unsafe precondition(s) violated: ", $name)
);
}
diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs
index 8ba1c1228..e3157b669 100644
--- a/library/core/src/intrinsics/mir.rs
+++ b/library/core/src/intrinsics/mir.rs
@@ -21,15 +21,14 @@
//! #[custom_mir(dialect = "built")]
//! pub fn simple(x: i32) -> i32 {
//! mir!(
-//! let temp1: i32;
-//! let temp2: _;
+//! let temp2: i32;
//!
//! {
-//! temp1 = x;
-//! Goto(exit)
+//! let temp1 = x;
+//! Goto(my_second_block)
//! }
//!
-//! exit = {
+//! my_second_block = {
//! temp2 = Move(temp1);
//! RET = temp2;
//! Return()
@@ -38,22 +37,200 @@
//! }
//! ```
//!
-//! Hopefully most of this is fairly self-explanatory. Expanding on some notable details:
+//! The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
+//! attribute only works on functions - there is no way to insert custom MIR into the middle of
+//! another function. The `dialect` and `phase` parameters indicate which [version of MIR][dialect
+//! docs] you are inserting here. Generally you'll want to use `#![custom_mir(dialect = "built")]`
+//! if you want your MIR to be modified by the full MIR pipeline, or `#![custom_mir(dialect =
+//! "runtime", phase = "optimized")] if you don't.
//!
-//! - The `custom_mir` attribute tells the compiler to treat the function as being custom MIR. This
-//! attribute only works on functions - there is no way to insert custom MIR into the middle of
-//! another function.
-//! - The `dialect` and `phase` parameters indicate which version of MIR you are inserting here.
-//! This will normally be the phase that corresponds to the thing you are trying to test. The
-//! phase can be omitted for dialects that have just one.
-//! - You should define your function signature like you normally would. Externally, this function
-//! can be called like any other function.
-//! - Type inference works - you don't have to spell out the type of all of your locals.
+//! [dialect docs]:
+//! https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/enum.MirPhase.html
//!
-//! For now, all statements and terminators are parsed from nested invocations of the special
-//! functions provided in this module. We additionally want to (but do not yet) support more
-//! "normal" Rust syntax in places where it makes sense. Also, most kinds of instructions are not
-//! supported yet.
+//! The input to the [`mir!`] macro is:
+//!
+//! - A possibly empty list of local declarations. Locals can also be declared inline on
+//! assignments via `let`. Type inference generally works. Shadowing does not.
+//! - A list of basic blocks. The first of these is the start block and is where execution begins.
+//! All blocks other than the start block need to be given a name, so that they can be referred
+//! to later.
+//! - Each block is a list of semicolon terminated statements, followed by a terminator. The
+//! syntax for the various statements and terminators is designed to be as similar as possible
+//! to the syntax for analogous concepts in native Rust. See below for a list.
+//!
+//! # Examples
+//!
+#![cfg_attr(bootstrap, doc = "```rust,compile_fail")]
+#![cfg_attr(not(bootstrap), doc = "```rust")]
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! pub fn choose_load(a: &i32, b: &i32, c: bool) -> i32 {
+//! mir!(
+//! {
+//! match c {
+//! true => t,
+//! _ => f,
+//! }
+//! }
+//!
+//! t = {
+//! let temp = a;
+//! Goto(load_and_exit)
+//! }
+//!
+//! f = {
+//! temp = b;
+//! Goto(load_and_exit)
+//! }
+//!
+//! load_and_exit = {
+//! RET = *temp;
+//! Return()
+//! }
+//! )
+//! }
+//!
+//! #[custom_mir(dialect = "built")]
+//! fn unwrap_unchecked<T>(opt: Option<T>) -> T {
+//! mir!({
+//! RET = Move(Field(Variant(opt, 1), 0));
+//! Return()
+//! })
+//! }
+//!
+//! #[custom_mir(dialect = "runtime", phase = "optimized")]
+//! fn push_and_pop<T>(v: &mut Vec<T>, value: T) {
+//! mir!(
+//! let unused;
+//! let popped;
+//!
+//! {
+//! Call(unused, pop, Vec::push(v, value))
+//! }
+//!
+//! pop = {
+//! Call(popped, drop, Vec::pop(v))
+//! }
+//!
+//! drop = {
+//! Drop(popped, ret)
+//! }
+//!
+//! ret = {
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! We can also set off compilation failures that happen in sufficiently late stages of the
+//! compiler:
+//!
+//! ```rust,compile_fail
+//! #![feature(core_intrinsics, custom_mir)]
+//!
+//! extern crate core;
+//! use core::intrinsics::mir::*;
+//!
+//! #[custom_mir(dialect = "built")]
+//! fn borrow_error(should_init: bool) -> i32 {
+//! mir!(
+//! let temp: i32;
+//!
+//! {
+//! match should_init {
+//! true => init,
+//! _ => use_temp,
+//! }
+//! }
+//!
+//! init = {
+//! temp = 0;
+//! Goto(use_temp)
+//! }
+//!
+//! use_temp = {
+//! RET = temp;
+//! Return()
+//! }
+//! )
+//! }
+//! ```
+//!
+//! ```text
+//! error[E0381]: used binding is possibly-uninitialized
+//! --> test.rs:24:13
+//! |
+//! 8 | / mir!(
+//! 9 | | let temp: i32;
+//! 10 | |
+//! 11 | | {
+//! ... |
+//! 19 | | temp = 0;
+//! | | -------- binding initialized here in some conditions
+//! ... |
+//! 24 | | RET = temp;
+//! | | ^^^^^^^^^^ value used here but it is possibly-uninitialized
+//! 25 | | Return()
+//! 26 | | }
+//! 27 | | )
+//! | |_____- binding declared here but left uninitialized
+//!
+//! error: aborting due to previous error
+//!
+//! For more information about this error, try `rustc --explain E0381`.
+//! ```
+//!
+//! # Syntax
+//!
+//! The lists below are an exhaustive description of how various MIR constructs can be created.
+//! Anything missing from the list should be assumed to not be supported, PRs welcome.
+//!
+//! #### Locals
+//!
+//! - The `_0` return local can always be accessed via `RET`.
+//! - Arguments can be accessed via their regular name.
+//! - All other locals need to be declared with `let` somewhere and then can be accessed by name.
+//!
+//! #### Places
+//! - Locals implicit convert to places.
+//! - Field accesses, derefs, and indexing work normally.
+//! - Fields in variants can be accessed via the [`Variant`] and [`Field`] associated functions,
+//! see their documentation for details.
+//!
+//! #### Operands
+//! - Places implicitly convert to `Copy` operands.
+//! - `Move` operands can be created via [`Move`].
+//! - Const blocks, literals, named constants, and const params all just work.
+//! - [`Static`] and [`StaticMut`] can be used to create `&T` and `*mut T`s to statics. These are
+//! constants in MIR and the only way to access statics.
+//!
+//! #### Statements
+//! - Assign statements work via normal Rust assignment.
+//! - [`Retag`] statements have an associated function.
+//!
+//! #### Rvalues
+//!
+//! - Operands implicitly convert to `Use` rvalues.
+//! - `&`, `&mut`, `addr_of!`, and `addr_of_mut!` all work to create their associated rvalue.
+//! - [`Discriminant`] has an associated function.
+//!
+//! #### Terminators
+//!
+//! Custom MIR does not currently support cleanup blocks or non-trivial unwind paths. As such, there
+//! are no resume and abort terminators, and terminators that might unwind do not have any way to
+//! indicate the unwind block.
+//!
+//! - [`Goto`], [`Return`], [`Unreachable`], [`Drop`](Drop()), and [`DropAndReplace`] have associated functions.
+//! - `match some_int_operand` becomes a `SwitchInt`. Each arm should be `literal => basic_block`
+//! - The exception is the last arm, which must be `_ => basic_block` and corresponds to the
+//! otherwise branch.
+//! - [`Call`] has an associated function as well. The third argument of this function is a normal
+//! function call expresion, for example `my_other_function(a, 5)`.
//!
#![unstable(
@@ -69,21 +246,93 @@
pub struct BasicBlock;
macro_rules! define {
- ($name:literal, $($sig:tt)*) => {
+ ($name:literal, $( #[ $meta:meta ] )* fn $($sig:tt)*) => {
#[rustc_diagnostic_item = $name]
- pub $($sig)* { panic!() }
+ $( #[ $meta ] )*
+ pub fn $($sig)* { panic!() }
}
}
define!("mir_return", fn Return() -> BasicBlock);
define!("mir_goto", fn Goto(destination: BasicBlock) -> BasicBlock);
+define!("mir_unreachable", fn Unreachable() -> BasicBlock);
+define!("mir_drop", fn Drop<T>(place: T, goto: BasicBlock));
+define!("mir_drop_and_replace", fn DropAndReplace<T>(place: T, value: T, goto: BasicBlock));
+define!("mir_call", fn Call<T>(place: T, goto: BasicBlock, call: T));
+define!("mir_storage_live", fn StorageLive<T>(local: T));
+define!("mir_storage_dead", fn StorageDead<T>(local: T));
define!("mir_retag", fn Retag<T>(place: T));
-define!("mir_retag_raw", fn RetagRaw<T>(place: T));
define!("mir_move", fn Move<T>(place: T) -> T);
define!("mir_static", fn Static<T>(s: T) -> &'static T);
define!("mir_static_mut", fn StaticMut<T>(s: T) -> *mut T);
+define!(
+ "mir_discriminant",
+ /// Gets the discriminant of a place.
+ fn Discriminant<T>(place: T) -> <T as ::core::marker::DiscriminantKind>::Discriminant
+);
+define!("mir_set_discriminant", fn SetDiscriminant<T>(place: T, index: u32));
+define!(
+ "mir_field",
+ /// Access the field with the given index of some place.
+ ///
+ /// This only makes sense to use in conjunction with [`Variant`]. If the type you are looking to
+ /// access the field of does not have variants, you can use normal field projection syntax.
+ ///
+ /// There is no proper way to do a place projection to a variant in Rust, and so these two
+ /// functions are a workaround. You can access a field of a variant via `Field(Variant(place,
+ /// var_idx), field_idx)`, where `var_idx` and `field_idx` are appropriate literals. Some
+ /// caveats:
+ ///
+ /// - The return type of `Variant` is always `()`. Don't worry about that, the correct MIR will
+ /// still be generated.
+ /// - In some situations, the return type of `Field` cannot be inferred. You may need to
+ /// annotate it on the function in these cases.
+ /// - Since `Field` is a function call which is not a place expression, using this on the left
+ /// hand side of an expression is rejected by the compiler. [`place!`] is a macro provided to
+ /// work around that issue. Wrap the left hand side of an assignment in the macro to convince
+ /// the compiler that it's ok.
+ ///
+ /// # Examples
+ ///
+ #[cfg_attr(bootstrap, doc = "```rust,compile_fail")]
+ #[cfg_attr(not(bootstrap), doc = "```rust")]
+ /// #![feature(custom_mir, core_intrinsics)]
+ ///
+ /// extern crate core;
+ /// use core::intrinsics::mir::*;
+ ///
+ /// #[custom_mir(dialect = "built")]
+ /// fn unwrap_deref(opt: Option<&i32>) -> i32 {
+ /// mir!({
+ /// RET = *Field::<&i32>(Variant(opt, 1), 0);
+ /// Return()
+ /// })
+ /// }
+ ///
+ /// #[custom_mir(dialect = "built")]
+ /// fn set(opt: &mut Option<i32>) {
+ /// mir!({
+ /// place!(Field(Variant(*opt, 1), 0)) = 5;
+ /// Return()
+ /// })
+ /// }
+ /// ```
+ fn Field<F>(place: (), field: u32) -> F
+);
+define!(
+ "mir_variant",
+ /// Adds a variant projection with the given index to the place.
+ ///
+ /// See [`Field`] for documentation.
+ fn Variant<T>(place: T, index: u32) -> ()
+);
+define!(
+ "mir_make_place",
+ #[doc(hidden)]
+ fn __internal_make_place<T>(place: T) -> *mut T
+);
-/// Convenience macro for generating custom MIR.
+/// Macro for generating custom MIR.
///
/// See the module documentation for syntax details. This macro is not magic - it only transforms
/// your MIR into something that is easier to parse in the compiler.
@@ -139,6 +388,13 @@ pub macro mir {
}}
}
+/// Helper macro that allows you to treat a value expression like a place expression.
+///
+/// See the documentation on [`Variant`] for why this is necessary and how to use it.
+pub macro place($e:expr) {
+ (*::core::intrinsics::mir::__internal_make_place($e))
+}
+
/// Helper macro that extracts the `let` declarations out of a bunch of statements.
///
/// This macro is written using the "statement muncher" strategy. Each invocation parses the first
diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs
index ac7b389b1..b5739f2f3 100644
--- a/library/core/src/iter/range.rs
+++ b/library/core/src/iter/range.rs
@@ -756,7 +756,7 @@ impl<A: Step> Iterator for ops::Range<A> {
where
Self: TrustedRandomAccessNoCoerce,
{
- // SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
+ // SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
// Additionally Self: TrustedRandomAccess is only implemented for Copy types
// which means even repeated reads of the same index would be safe.
diff --git a/library/core/src/iter/sources/empty.rs b/library/core/src/iter/sources/empty.rs
index 98734c527..617dfd123 100644
--- a/library/core/src/iter/sources/empty.rs
+++ b/library/core/src/iter/sources/empty.rs
@@ -22,17 +22,12 @@ pub const fn empty<T>() -> Empty<T> {
Empty(marker::PhantomData)
}
-// Newtype for use in `PhantomData` to avoid
-// > error: const-stable function cannot use `#[feature(const_fn_fn_ptr_basics)]`
-// in `const fn empty<T>()` above.
-struct FnReturning<T>(fn() -> T);
-
/// An iterator that yields nothing.
///
/// This `struct` is created by the [`empty()`] function. See its documentation for more.
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "iter_empty", since = "1.2.0")]
-pub struct Empty<T>(marker::PhantomData<FnReturning<T>>);
+pub struct Empty<T>(marker::PhantomData<fn() -> T>);
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T> fmt::Debug for Empty<T> {
diff --git a/library/core/src/iter/sources/from_generator.rs b/library/core/src/iter/sources/from_generator.rs
index 8e7cbd34a..4cbe731b2 100644
--- a/library/core/src/iter/sources/from_generator.rs
+++ b/library/core/src/iter/sources/from_generator.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::ops::{Generator, GeneratorState};
use crate::pin::Pin;
@@ -23,14 +24,21 @@ use crate::pin::Pin;
/// ```
#[inline]
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
-pub fn from_generator<G: Generator<Return = ()> + Unpin>(
- generator: G,
-) -> impl Iterator<Item = G::Yield> {
+pub fn from_generator<G: Generator<Return = ()> + Unpin>(generator: G) -> FromGenerator<G> {
FromGenerator(generator)
}
-struct FromGenerator<G>(G);
+/// An iterator over the values yielded by an underlying generator.
+///
+/// This `struct` is created by the [`iter::from_generator()`] function. See its documentation for
+/// more.
+///
+/// [`iter::from_generator()`]: from_generator
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
+#[derive(Clone)]
+pub struct FromGenerator<G>(G);
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
type Item = G::Yield;
@@ -41,3 +49,10 @@ impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
}
}
}
+
+#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
+impl<G> fmt::Debug for FromGenerator<G> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FromGenerator").finish()
+ }
+}
diff --git a/library/core/src/iter/sources/once_with.rs b/library/core/src/iter/sources/once_with.rs
index d79f85c25..9309a06c8 100644
--- a/library/core/src/iter/sources/once_with.rs
+++ b/library/core/src/iter/sources/once_with.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::iter::{FusedIterator, TrustedLen};
/// Creates an iterator that lazily generates a value exactly once by invoking
@@ -66,12 +67,23 @@ pub fn once_with<A, F: FnOnce() -> A>(gen: F) -> OnceWith<F> {
///
/// This `struct` is created by the [`once_with()`] function.
/// See its documentation for more.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
#[stable(feature = "iter_once_with", since = "1.43.0")]
pub struct OnceWith<F> {
gen: Option<F>,
}
+#[stable(feature = "iter_once_with_debug", since = "1.68.0")]
+impl<F> fmt::Debug for OnceWith<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.gen.is_some() {
+ f.write_str("OnceWith(Some(_))")
+ } else {
+ f.write_str("OnceWith(None)")
+ }
+ }
+}
+
#[stable(feature = "iter_once_with", since = "1.43.0")]
impl<A, F: FnOnce() -> A> Iterator for OnceWith<F> {
type Item = A;
diff --git a/library/core/src/iter/sources/repeat_n.rs b/library/core/src/iter/sources/repeat_n.rs
index fd8d25ce1..dc61d6065 100644
--- a/library/core/src/iter/sources/repeat_n.rs
+++ b/library/core/src/iter/sources/repeat_n.rs
@@ -126,7 +126,7 @@ impl<A: Clone> Iterator for RepeatN<A> {
// zero so it won't be dropped later, and thus it's okay to take it here.
unsafe { ManuallyDrop::take(&mut self.element) }
} else {
- A::clone(&mut self.element)
+ A::clone(&self.element)
})
}
diff --git a/library/core/src/iter/sources/repeat_with.rs b/library/core/src/iter/sources/repeat_with.rs
index ab2d0472b..3f34105a3 100644
--- a/library/core/src/iter/sources/repeat_with.rs
+++ b/library/core/src/iter/sources/repeat_with.rs
@@ -1,3 +1,4 @@
+use crate::fmt;
use crate::iter::{FusedIterator, TrustedLen};
use crate::ops::Try;
@@ -71,12 +72,19 @@ pub fn repeat_with<A, F: FnMut() -> A>(repeater: F) -> RepeatWith<F> {
///
/// This `struct` is created by the [`repeat_with()`] function.
/// See its documentation for more.
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone)]
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
pub struct RepeatWith<F> {
repeater: F,
}
+#[stable(feature = "iterator_repeat_with_debug", since = "1.68.0")]
+impl<F> fmt::Debug for RepeatWith<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RepeatWith").finish_non_exhaustive()
+ }
+}
+
#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
type Item = A;
diff --git a/library/core/src/iter/traits/accum.rs b/library/core/src/iter/traits/accum.rs
index 84d83ee39..e31669b39 100644
--- a/library/core/src/iter/traits/accum.rs
+++ b/library/core/src/iter/traits/accum.rs
@@ -10,6 +10,10 @@ use crate::num::Wrapping;
/// [`sum()`]: Iterator::sum
/// [`FromIterator`]: iter::FromIterator
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+#[rustc_on_unimplemented(
+ message = "a value of type `{Self}` cannot be made by summing an iterator over elements of type `{A}`",
+ label = "value of type `{Self}` cannot be made by summing a `std::iter::Iterator<Item={A}>`"
+)]
pub trait Sum<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// "summing up" the items.
@@ -27,6 +31,10 @@ pub trait Sum<A = Self>: Sized {
/// [`product()`]: Iterator::product
/// [`FromIterator`]: iter::FromIterator
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+#[rustc_on_unimplemented(
+ message = "a value of type `{Self}` cannot be made by multiplying all elements of type `{A}` from an iterator",
+ label = "value of type `{Self}` cannot be made by multiplying all elements from a `std::iter::Iterator<Item={A}>`"
+)]
pub trait Product<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// multiplying the items.
diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs
index 83c7e8977..a4a665d48 100644
--- a/library/core/src/iter/traits/iterator.rs
+++ b/library/core/src/iter/traits/iterator.rs
@@ -58,6 +58,11 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
syntax `start..end` or the inclusive range syntax `start..=end`"
),
+ on(
+ _Self = "{float}",
+ note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
+ syntax `start..end` or the inclusive range syntax `start..=end`"
+ ),
label = "`{Self}` is not an iterator",
message = "`{Self}` is not an iterator"
)]
@@ -66,6 +71,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub trait Iterator {
/// The type of the elements being iterated over.
+ #[rustc_diagnostic_item = "IteratorItem"]
#[stable(feature = "rust1", since = "1.0.0")]
type Item;
@@ -803,7 +809,7 @@ pub trait Iterator {
/// (0..5).map(|x| x * 2 + 1)
/// .for_each(move |x| tx.send(x).unwrap());
///
- /// let v: Vec<_> = rx.iter().collect();
+ /// let v: Vec<_> = rx.iter().collect();
/// assert_eq!(v, vec![1, 3, 5, 7, 9]);
/// ```
///
@@ -1380,8 +1386,8 @@ pub trait Iterator {
Take::new(self, n)
}
- /// An iterator adapter similar to [`fold`] that holds internal state and
- /// produces a new iterator.
+ /// An iterator adapter which, like [`fold`], holds internal state, but
+ /// unlike [`fold`], produces a new iterator.
///
/// [`fold`]: Iterator::fold
///
@@ -1393,20 +1399,25 @@ pub trait Iterator {
///
/// On iteration, the closure will be applied to each element of the
/// iterator and the return value from the closure, an [`Option`], is
- /// yielded by the iterator.
+ /// returned by the `next` method. Thus the closure can return
+ /// `Some(value)` to yield `value`, or `None` to end the iteration.
///
/// # Examples
///
/// Basic usage:
///
/// ```
- /// let a = [1, 2, 3];
+ /// let a = [1, 2, 3, 4];
///
/// let mut iter = a.iter().scan(1, |state, &x| {
- /// // each iteration, we'll multiply the state by the element
+ /// // each iteration, we'll multiply the state by the element ...
/// *state = *state * x;
///
- /// // then, we'll yield the negation of the state
+ /// // ... and terminate if the state exceeds 6
+ /// if *state > 6 {
+ /// return None;
+ /// }
+ /// // ... else yield the negation of the state
/// Some(-*state)
/// });
///
@@ -1508,6 +1519,18 @@ pub trait Iterator {
/// assert_eq!(merged, "alphabetagamma");
/// ```
///
+ /// Flattening works on any `IntoIterator` type, including `Option` and `Result`:
+ ///
+ /// ```
+ /// let options = vec![Some(123), Some(321), None, Some(231)];
+ /// let flattened_options: Vec<_> = options.into_iter().flatten().collect();
+ /// assert_eq!(flattened_options, vec![123, 321, 231]);
+ ///
+ /// let results = vec![Ok(123), Ok(321), Err(456), Ok(231)];
+ /// let flattened_results: Vec<_> = results.into_iter().flatten().collect();
+ /// assert_eq!(flattened_results, vec![123, 321, 231]);
+ /// ```
+ ///
/// Flattening only removes one level of nesting at a time:
///
/// ```
@@ -1829,6 +1852,7 @@ pub trait Iterator {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "iterator_collect_fn")]
fn collect<B: FromIterator<Self::Item>>(self) -> B
where
Self: Sized,
@@ -2652,7 +2676,10 @@ pub trait Iterator {
/// argument is a double reference. You can see this effect in the
/// examples below, with `&&x`.
///
+ /// If you need the index of the element, see [`position()`].
+ ///
/// [`Some(element)`]: Some
+ /// [`position()`]: Iterator::position
///
/// # Examples
///
@@ -2733,7 +2760,7 @@ pub trait Iterator {
/// the first true result or the first error.
///
/// The return type of this method depends on the return type of the closure.
- /// If you return `Result<bool, E>` from the closure, you'll get a `Result<Option<Self::Item>; E>`.
+ /// If you return `Result<bool, E>` from the closure, you'll get a `Result<Option<Self::Item>, E>`.
/// If you return `Option<bool>` from the closure, you'll get an `Option<Option<Self::Item>>`.
///
/// # Examples
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 1823fd300..8790649ab 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -38,18 +38,18 @@
//! which do not trigger a panic can be assured that this function is never
//! called. The `lang` attribute is called `eh_personality`.
-// Since libcore defines many fundamental lang items, all tests live in a
-// separate crate, libcoretest, to avoid bizarre issues.
+// Since core defines many fundamental lang items, all tests live in a
+// separate crate, libcoretest (library/core/tests), to avoid bizarre issues.
//
// Here we explicitly #[cfg]-out this whole crate when testing. If we don't do
// this, both the generated test artifact and the linked libtest (which
-// transitively includes libcore) will both define the same set of lang items,
+// transitively includes core) will both define the same set of lang items,
// and this will cause the E0152 "found duplicate lang item" error. See
// discussion in #50466 for details.
//
// This cfg won't affect doc tests.
#![cfg(not(test))]
-// To run libcore tests without x.py without ending up with two copies of libcore, Miri needs to be
+// To run core tests without x.py without ending up with two copies of core, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -158,6 +158,7 @@
#![feature(const_unsafecell_get_mut)]
#![feature(const_waker)]
#![feature(core_panic)]
+#![feature(char_indices_offset)]
#![feature(duration_consts_float)]
#![feature(maybe_uninit_uninit_array)]
#![feature(ptr_alignment_type)]
@@ -166,6 +167,8 @@
#![feature(slice_ptr_get)]
#![feature(slice_split_at_unchecked)]
#![feature(str_internals)]
+#![feature(str_split_remainder)]
+#![feature(str_split_inclusive_remainder)]
#![feature(strict_provenance)]
#![feature(utf16_extra)]
#![feature(utf16_extra_const)]
@@ -188,13 +191,14 @@
#![feature(cfg_sanitize)]
#![feature(cfg_target_has_atomic)]
#![feature(cfg_target_has_atomic_equal_alignment)]
+#![cfg_attr(not(bootstrap), feature(const_closures))]
#![feature(const_fn_floating_point_arithmetic)]
#![feature(const_mut_refs)]
#![feature(const_precise_live_drops)]
#![feature(const_refs_to_cell)]
#![feature(decl_macro)]
#![feature(deprecated_suggestion)]
-#![cfg_attr(not(bootstrap), feature(derive_const))]
+#![feature(derive_const)]
#![feature(doc_cfg)]
#![feature(doc_notable_trait)]
#![feature(rustdoc_internals)]
@@ -236,7 +240,6 @@
#![feature(arm_target_feature)]
#![feature(avx512_target_feature)]
#![feature(cmpxchg16b_target_feature)]
-#![feature(f16c_target_feature)]
#![feature(hexagon_target_feature)]
#![feature(mips_target_feature)]
#![feature(powerpc_target_feature)]
@@ -245,6 +248,7 @@
#![feature(sse4a_target_feature)]
#![feature(tbm_target_feature)]
#![feature(wasm_target_feature)]
+#![cfg_attr(bootstrap, feature(f16c_target_feature))]
// allow using `core::` in intra-doc links
#[allow(unused_extern_crates)]
@@ -309,7 +313,7 @@ pub mod f64;
#[macro_use]
pub mod num;
-/* The libcore prelude, not as all-encompassing as the libstd prelude */
+/* The core prelude, not as all-encompassing as the std prelude */
pub mod prelude;
@@ -376,12 +380,12 @@ mod const_closure;
#[stable(feature = "core_primitive", since = "1.43.0")]
pub mod primitive;
-// Pull in the `core_arch` crate directly into libcore. The contents of
+// Pull in the `core_arch` crate directly into core. The contents of
// `core_arch` are in a different repository: rust-lang/stdarch.
//
-// `core_arch` depends on libcore, but the contents of this module are
+// `core_arch` depends on core, but the contents of this module are
// set up in such a way that directly pulling it here works such that the
-// crate uses the this crate as its libcore.
+// crate uses the this crate as its core.
#[path = "../../stdarch/crates/core_arch/src/mod.rs"]
#[allow(
missing_docs,
@@ -400,12 +404,12 @@ mod core_arch;
#[stable(feature = "simd_arch", since = "1.27.0")]
pub mod arch;
-// Pull in the `core_simd` crate directly into libcore. The contents of
+// Pull in the `core_simd` crate directly into core. The contents of
// `core_simd` are in a different repository: rust-lang/portable-simd.
//
-// `core_simd` depends on libcore, but the contents of this module are
+// `core_simd` depends on core, but the contents of this module are
// set up in such a way that directly pulling it here works such that the
-// crate uses this crate as its libcore.
+// crate uses this crate as its core.
#[path = "../../portable-simd/crates/core_simd/src/mod.rs"]
#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
#[allow(rustdoc::bare_urls)]
diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs
index f29cd357d..3b026bc0e 100644
--- a/library/core/src/macros/mod.rs
+++ b/library/core/src/macros/mod.rs
@@ -1315,22 +1315,41 @@ pub(crate) mod builtin {
/// Parses a file as an expression or an item according to the context.
///
- /// The file is located relative to the current file (similarly to how
- /// modules are found). The provided path is interpreted in a platform-specific
- /// way at compile time. So, for instance, an invocation with a Windows path
- /// containing backslashes `\` would not compile correctly on Unix.
+ /// **Warning**: For multi-file Rust projects, the `include!` macro is probably not what you
+ /// are looking for. Usually, multi-file Rust projects use
+ /// [modules](https://doc.rust-lang.org/reference/items/modules.html). Multi-file projects and
+ /// modules are explained in the Rust-by-Example book
+ /// [here](https://doc.rust-lang.org/rust-by-example/mod/split.html) and the module system is
+ /// explained in the Rust Book
+ /// [here](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).
+ ///
+ /// The included file is placed in the surrounding code
+ /// [unhygienically](https://doc.rust-lang.org/reference/macros-by-example.html#hygiene). If
+ /// the included file is parsed as an expression and variables or functions share names across
+ /// both files, it could result in variables or functions being different from what the
+ /// included file expected.
+ ///
+ /// The included file is located relative to the current file (similarly to how modules are
+ /// found). The provided path is interpreted in a platform-specific way at compile time. So,
+ /// for instance, an invocation with a Windows path containing backslashes `\` would not
+ /// compile correctly on Unix.
///
- /// Using this macro is often a bad idea, because if the file is
- /// parsed as an expression, it is going to be placed in the
- /// surrounding code unhygienically. This could result in variables
- /// or functions being different from what the file expected if
- /// there are variables or functions that have the same name in
- /// the current file.
+ /// # Uses
+ ///
+ /// The `include!` macro is primarily used for two purposes. It is used to include
+ /// documentation that is written in a separate file and it is used to include [build artifacts
+ /// usually as a result from the `build.rs`
+ /// script](https://doc.rust-lang.org/cargo/reference/build-scripts.html#outputs-of-the-build-script).
+ ///
+ /// When using the `include` macro to include stretches of documentation, remember that the
+ /// included file still needs to be a valid rust syntax. It is also possible to
+ /// use the [`include_str`] macro as `#![doc = include_str!("...")]` (at the module level) or
+ /// `#[doc = include_str!("...")]` (at the item level) to include documentation from a plain
+ /// text or markdown file.
///
/// # Examples
///
- /// Assume there are two files in the same directory with the following
- /// contents:
+ /// Assume there are two files in the same directory with the following contents:
///
/// File 'monkeys.in':
///
@@ -1461,7 +1480,6 @@ pub(crate) mod builtin {
/// [the reference]: ../../../reference/attributes/derive.html
#[unstable(feature = "derive_const", issue = "none")]
#[rustc_builtin_macro]
- #[cfg(not(bootstrap))]
pub macro derive_const($item:item) {
/* compiler built-in */
}
@@ -1516,7 +1534,6 @@ pub(crate) mod builtin {
/// Attribute macro applied to a function to register it as a handler for allocation failure.
///
/// See also [`std::alloc::handle_alloc_error`](../../../std/alloc/fn.handle_alloc_error.html).
- #[cfg(not(bootstrap))]
#[unstable(feature = "alloc_error_handler", issue = "51540")]
#[allow_internal_unstable(rustc_attrs)]
#[rustc_builtin_macro]
@@ -1553,7 +1570,6 @@ pub(crate) mod builtin {
issue = "23416",
reason = "placeholder syntax for type ascription"
)]
- #[cfg(not(bootstrap))]
pub macro type_ascribe($expr:expr, $ty:ty) {
/* compiler built-in */
}
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index 42c342801..1326fc9ab 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -96,7 +96,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {}
)]
#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
#[rustc_specialization_trait]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Sized {
// Empty.
}
@@ -126,9 +126,9 @@ pub trait Sized {
/// [`Rc`]: ../../std/rc/struct.Rc.html
/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
/// [nomicon-coerce]: ../../nomicon/coercions.html
-#[unstable(feature = "unsize", issue = "27732")]
+#[unstable(feature = "unsize", issue = "18598")]
#[lang = "unsize"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Unsize<T: ?Sized> {
// Empty.
}
@@ -623,6 +623,12 @@ impl<T: ?Sized> !Sync for *mut T {}
/// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so
/// as not to indicate ownership.
///
+/// ## Layout
+///
+/// For all `T`, the following are guaranteed:
+/// * `size_of::<PhantomData<T>>() == 0`
+/// * `align_of::<PhantomData<T>>() == 1`
+///
/// [drop check]: ../../nomicon/dropck.html
#[lang = "phantom_data"]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -695,7 +701,7 @@ impl<T: ?Sized> StructuralEq for PhantomData<T> {}
reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
)]
#[lang = "discriminant_kind"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait DiscriminantKind {
/// The type of the discriminant, which must satisfy the trait
/// bounds required by `mem::Discriminant`.
@@ -796,7 +802,7 @@ impl<T: ?Sized> Unpin for *mut T {}
#[lang = "destruct"]
#[rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg)]
#[const_trait]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Destruct {}
/// A marker for tuple types.
@@ -806,12 +812,12 @@ pub trait Destruct {}
#[unstable(feature = "tuple_trait", issue = "none")]
#[lang = "tuple_trait"]
#[rustc_on_unimplemented(message = "`{Self}` is not a tuple")]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Tuple {}
/// A marker for things
#[unstable(feature = "pointer_sized_trait", issue = "none")]
-#[cfg_attr(not(bootstrap), lang = "pointer_sized")]
+#[lang = "pointer_sized"]
#[rustc_on_unimplemented(
message = "`{Self}` needs to be a pointer-sized type",
label = "`{Self}` needs to be a pointer-sized type"
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 383bdc7b6..5e01ccc07 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -682,7 +682,8 @@ pub unsafe fn zeroed<T>() -> T {
pub unsafe fn uninitialized<T>() -> T {
// SAFETY: the caller must guarantee that an uninitialized value is valid for `T`.
unsafe {
- intrinsics::assert_uninit_valid::<T>();
+ #[cfg(not(bootstrap))] // If the compiler hits this itself then it deserves the UB.
+ intrinsics::assert_mem_uninitialized_valid::<T>();
let mut val = MaybeUninit::<T>::uninit();
// Fill memory with 0x01, as an imperfect mitigation for old code that uses this function on
diff --git a/library/core/src/num/dec2flt/fpu.rs b/library/core/src/num/dec2flt/fpu.rs
index ec5fa45fd..3806977f7 100644
--- a/library/core/src/num/dec2flt/fpu.rs
+++ b/library/core/src/num/dec2flt/fpu.rs
@@ -26,7 +26,7 @@ mod fpu_precision {
/// Developer's Manual (Volume 1).
///
/// The only field which is relevant for the following code is PC, Precision Control. This
- /// field determines the precision of the operations performed by the FPU. It can be set to:
+ /// field determines the precision of the operations performed by the FPU. It can be set to:
/// - 0b00, single precision i.e., 32-bits
/// - 0b10, double precision i.e., 64-bits
/// - 0b11, double extended precision i.e., 80-bits (default state)
diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs
index 2c6a0ba64..1308b0770 100644
--- a/library/core/src/num/f32.rs
+++ b/library/core/src/num/f32.rs
@@ -428,7 +428,7 @@ impl f32 {
self != self
}
- // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // FIXME(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs
index fd3c18ce2..2a22c4302 100644
--- a/library/core/src/num/f64.rs
+++ b/library/core/src/num/f64.rs
@@ -427,7 +427,7 @@ impl f64 {
self != self
}
- // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // FIXME(#50145): `abs` is publicly unavailable in core due to
// concerns about portability, so this implementation is for
// private use internally.
#[inline]
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index 57096f439..2cae98b8e 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -1514,37 +1514,50 @@ macro_rules! int_impl {
(a as Self, b)
}
- /// Calculates `self + rhs + carry` without the ability to overflow.
+ /// Calculates `self` + `rhs` + `carry` and checks for overflow.
///
- /// Performs "signed ternary addition" which takes in an extra bit to add, and may return an
- /// additional bit of overflow. This signed function is used only on the highest-ordered data,
- /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ /// Performs "ternary addition" of two integer operands and a carry-in
+ /// bit, and returns a tuple of the sum along with a boolean indicating
+ /// whether an arithmetic overflow would occur. On overflow, the wrapped
+ /// value is returned.
///
- /// # Examples
+ /// This allows chaining together multiple additions to create a wider
+ /// addition, and can be useful for bignum addition. This method should
+ /// only be used for the most significant word; for the less significant
+ /// words the unsigned method
+ #[doc = concat!("[`", stringify!($UnsignedT), "::carrying_add`]")]
+ /// should be used.
///
- /// Basic usage:
+ /// The output boolean returned by this method is *not* a carry flag,
+ /// and should *not* be added to a more significant word.
///
- /// ```
- /// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, false), (7, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".carrying_add(2, true), (8, false));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(0, true), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, true), (", stringify!($SelfT), "::MIN + 1, true));")]
- #[doc = concat!("assert_eq!(",
- stringify!($SelfT), "::MAX.carrying_add(", stringify!($SelfT), "::MAX, true), ",
- "(-1, true));"
- )]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.carrying_add(-1, true), (", stringify!($SelfT), "::MIN, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".carrying_add(", stringify!($SelfT), "::MAX, true), (", stringify!($SelfT), "::MIN, true));")]
- /// ```
+ /// If the input carry is false, this method is equivalent to
+ /// [`overflowing_add`](Self::overflowing_add).
///
- /// If `carry` is false, this method is equivalent to [`overflowing_add`](Self::overflowing_add):
+ /// # Examples
///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5_", stringify!($SelfT), ".carrying_add(2, false), 5_", stringify!($SelfT), ".overflowing_add(2));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.carrying_add(1, false), ", stringify!($SelfT), "::MAX.overflowing_add(1));")]
+ /// // Only the most significant word is signed.
+ /// //
+ #[doc = concat!("// 10 MAX (a = 10 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ #[doc = concat!("// + -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
+ /// // ---------
+ #[doc = concat!("// 6 8 (sum = 6 × 2^", stringify!($BITS), " + 8)")]
+ ///
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (10, ", stringify!($UnsignedT), "::MAX);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (-5, 9);")]
+ /// let carry0 = false;
+ ///
+ #[doc = concat!("// ", stringify!($UnsignedT), "::carrying_add for the less significant words")]
+ /// let (sum0, carry1) = a0.carrying_add(b0, carry0);
+ /// assert_eq!(carry1, true);
+ ///
+ #[doc = concat!("// ", stringify!($SelfT), "::carrying_add for the most significant word")]
+ /// let (sum1, overflow) = a1.carrying_add(b1, carry1);
+ /// assert_eq!(overflow, false);
+ ///
+ /// assert_eq!((sum1, sum0), (6, 8));
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
@@ -1608,25 +1621,51 @@ macro_rules! int_impl {
(a as Self, b)
}
- /// Calculates `self - rhs - borrow` without the ability to overflow.
+ /// Calculates `self` &minus; `rhs` &minus; `borrow` and checks for
+ /// overflow.
///
- /// Performs "signed ternary subtraction" which takes in an extra bit to subtract, and may return an
- /// additional bit of overflow. This signed function is used only on the highest-ordered data,
- /// for which the signed overflow result indicates whether the big integer overflowed or not.
+ /// Performs "ternary subtraction" by subtracting both an integer
+ /// operand and a borrow-in bit from `self`, and returns a tuple of the
+ /// difference along with a boolean indicating whether an arithmetic
+ /// overflow would occur. On overflow, the wrapped value is returned.
///
- /// # Examples
+ /// This allows chaining together multiple subtractions to create a
+ /// wider subtraction, and can be useful for bignum subtraction. This
+ /// method should only be used for the most significant word; for the
+ /// less significant words the unsigned method
+ #[doc = concat!("[`", stringify!($UnsignedT), "::borrowing_sub`]")]
+ /// should be used.
///
- /// Basic usage:
+ /// The output boolean returned by this method is *not* a borrow flag,
+ /// and should *not* be subtracted from a more significant word.
+ ///
+ /// If the input borrow is false, this method is equivalent to
+ /// [`overflowing_sub`](Self::overflowing_sub).
+ ///
+ /// # Examples
///
/// ```
/// #![feature(bigint_helper_methods)]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, false), (3, false));")]
- #[doc = concat!("assert_eq!(5", stringify!($SelfT), ".borrowing_sub(2, true), (2, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, false), (-1, false));")]
- #[doc = concat!("assert_eq!(0", stringify!($SelfT), ".borrowing_sub(1, true), (-2, false));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.borrowing_sub(1, true), (", stringify!($SelfT), "::MAX - 1, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, false), (", stringify!($SelfT), "::MIN, true));")]
- #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.borrowing_sub(-1, true), (", stringify!($SelfT), "::MAX, false));")]
+ /// // Only the most significant word is signed.
+ /// //
+ #[doc = concat!("// 6 8 (a = 6 × 2^", stringify!($BITS), " + 8)")]
+ #[doc = concat!("// - -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
+ /// // ---------
+ #[doc = concat!("// 10 MAX (diff = 10 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
+ ///
+ #[doc = concat!("let (a1, a0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (6, 8);")]
+ #[doc = concat!("let (b1, b0): (", stringify!($SelfT), ", ", stringify!($UnsignedT), ") = (-5, 9);")]
+ /// let borrow0 = false;
+ ///
+ #[doc = concat!("// ", stringify!($UnsignedT), "::borrowing_sub for the less significant words")]
+ /// let (diff0, borrow1) = a0.borrowing_sub(b0, borrow0);
+ /// assert_eq!(borrow1, true);
+ ///
+ #[doc = concat!("// ", stringify!($SelfT), "::borrowing_sub for the most significant word")]
+ /// let (diff1, overflow) = a1.borrowing_sub(b1, borrow1);
+ /// assert_eq!(overflow, false);
+ ///
+ #[doc = concat!("assert_eq!((diff1, diff0), (10, ", stringify!($UnsignedT), "::MAX));")]
/// ```
#[unstable(feature = "bigint_helper_methods", issue = "85532")]
#[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")]
diff --git a/library/core/src/ops/function.rs b/library/core/src/ops/function.rs
index 127b047db..b7e1aee9d 100644
--- a/library/core/src/ops/function.rs
+++ b/library/core/src/ops/function.rs
@@ -1,4 +1,3 @@
-#[cfg(not(bootstrap))]
use crate::marker::Tuple;
/// The version of the call operator that takes an immutable receiver.
@@ -54,87 +53,6 @@ use crate::marker::Tuple;
/// let double = |x| x * 2;
/// assert_eq!(call_with_one(double), 2);
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
- label = "expected an `Fn<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait Fn<Args>: FnMut<Args> {
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call(&self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes an immutable receiver.
-///
-/// Instances of `Fn` can be called repeatedly without mutating state.
-///
-/// *This trait (`Fn`) is not to be confused with [function pointers]
-/// (`fn`).*
-///
-/// `Fn` is implemented automatically by closures which only take immutable
-/// references to captured variables or don't capture anything at all, as well
-/// as (safe) [function pointers] (with some caveats, see their documentation
-/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
-/// implements `Fn`, too.
-///
-/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
-/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
-/// is expected.
-///
-/// Use `Fn` as a bound when you want to accept a parameter of function-like
-/// type and need to call it repeatedly and without mutating state (e.g., when
-/// calling it concurrently). If you do not need such strict requirements, use
-/// [`FnMut`] or [`FnOnce`] as bounds.
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Calling a closure
-///
-/// ```
-/// let square = |x| x * x;
-/// assert_eq!(square(5), 25);
-/// ```
-///
-/// ## Using a `Fn` parameter
-///
-/// ```
-/// fn call_with_one<F>(func: F) -> usize
-/// where F: Fn(usize) -> usize {
-/// func(1)
-/// }
-///
-/// let double = |x| x * 2;
-/// assert_eq!(call_with_one(double), 2);
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -222,95 +140,6 @@ pub trait Fn<Args: Tuple>: FnMut<Args> {
///
/// assert_eq!(x, 5);
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn_mut"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait FnMut<Args>: FnOnce<Args> {
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes a mutable receiver.
-///
-/// Instances of `FnMut` can be called repeatedly and may mutate state.
-///
-/// `FnMut` is implemented automatically by closures which take mutable
-/// references to captured variables, as well as all types that implement
-/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
-/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
-/// implements `FnMut`, too.
-///
-/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
-/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
-/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
-///
-/// Use `FnMut` as a bound when you want to accept a parameter of function-like
-/// type and need to call it repeatedly, while allowing it to mutate state.
-/// If you don't want the parameter to mutate state, use [`Fn`] as a
-/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Calling a mutably capturing closure
-///
-/// ```
-/// let mut x = 5;
-/// {
-/// let mut square_x = || x *= x;
-/// square_x();
-/// }
-/// assert_eq!(x, 25);
-/// ```
-///
-/// ## Using a `FnMut` parameter
-///
-/// ```
-/// fn do_twice<F>(mut func: F)
-/// where F: FnMut()
-/// {
-/// func();
-/// func();
-/// }
-///
-/// let mut x: usize = 1;
-/// {
-/// let add_two_to_x = || x += 2;
-/// do_twice(add_two_to_x);
-/// }
-///
-/// assert_eq!(x, 5);
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn_mut"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -390,92 +219,6 @@ pub trait FnMut<Args: Tuple>: FnOnce<Args> {
///
/// // `consume_and_return_x` can no longer be invoked at this point
/// ```
-#[cfg(bootstrap)]
-#[lang = "fn_once"]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_paren_sugar]
-#[rustc_on_unimplemented(
- on(
- Args = "()",
- note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
- ),
- on(
- _Self = "unsafe fn",
- note = "unsafe function cannot be called generically without an unsafe block",
- // SAFETY: tidy is not smart enough to tell that the below unsafe block is a string
- label = "call the function in a closure: `|| unsafe {{ /* code */ }}`"
- ),
- message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
- label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
-)]
-#[fundamental] // so that regex can rely that `&str: !FnMut`
-#[must_use = "closures are lazy and do nothing unless called"]
-#[const_trait]
-pub trait FnOnce<Args> {
- /// The returned type after the call operator is used.
- #[lang = "fn_once_output"]
- #[stable(feature = "fn_once_output", since = "1.12.0")]
- type Output;
-
- /// Performs the call operation.
- #[unstable(feature = "fn_traits", issue = "29625")]
- extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
-}
-
-/// The version of the call operator that takes a by-value receiver.
-///
-/// Instances of `FnOnce` can be called, but might not be callable multiple
-/// times. Because of this, if the only thing known about a type is that it
-/// implements `FnOnce`, it can only be called once.
-///
-/// `FnOnce` is implemented automatically by closures that might consume captured
-/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
-/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
-///
-/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
-/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
-///
-/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
-/// type and only need to call it once. If you need to call the parameter
-/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
-/// state, use [`Fn`].
-///
-/// See the [chapter on closures in *The Rust Programming Language*][book] for
-/// some more information on this topic.
-///
-/// Also of note is the special syntax for `Fn` traits (e.g.
-/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
-/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
-///
-/// [book]: ../../book/ch13-01-closures.html
-/// [function pointers]: fn
-/// [nomicon]: ../../nomicon/hrtb.html
-///
-/// # Examples
-///
-/// ## Using a `FnOnce` parameter
-///
-/// ```
-/// fn consume_with_relish<F>(func: F)
-/// where F: FnOnce() -> String
-/// {
-/// // `func` consumes its captured variables, so it cannot be run more
-/// // than once.
-/// println!("Consumed: {}", func());
-///
-/// println!("Delicious!");
-///
-/// // Attempting to invoke `func()` again will throw a `use of moved
-/// // value` error for `func`.
-/// }
-///
-/// let x = String::from("x");
-/// let consume_and_return_x = move || x;
-/// consume_with_relish(consume_and_return_x);
-///
-/// // `consume_and_return_x` can no longer be invoked at this point
-/// ```
-#[cfg(not(bootstrap))]
#[lang = "fn_once"]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_paren_sugar]
@@ -507,68 +250,6 @@ pub trait FnOnce<Args: Tuple> {
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
}
-#[cfg(bootstrap)]
-mod impls {
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const Fn<A> for &F
- where
- F: ~const Fn<A>,
- {
- extern "rust-call" fn call(&self, args: A) -> F::Output {
- (**self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnMut<A> for &F
- where
- F: ~const Fn<A>,
- {
- extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
- (**self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnOnce<A> for &F
- where
- F: ~const Fn<A>,
- {
- type Output = F::Output;
-
- extern "rust-call" fn call_once(self, args: A) -> F::Output {
- (*self).call(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnMut<A> for &mut F
- where
- F: ~const FnMut<A>,
- {
- extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
- (*self).call_mut(args)
- }
- }
-
- #[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_fn_trait_ref_impls", issue = "101803")]
- impl<A, F: ?Sized> const FnOnce<A> for &mut F
- where
- F: ~const FnMut<A>,
- {
- type Output = F::Output;
- extern "rust-call" fn call_once(self, args: A) -> F::Output {
- (*self).call_mut(args)
- }
- }
-}
-
-#[cfg(not(bootstrap))]
mod impls {
use crate::marker::Tuple;
diff --git a/library/core/src/ops/index.rs b/library/core/src/ops/index.rs
index 5e3dc48b6..228efb0bc 100644
--- a/library/core/src/ops/index.rs
+++ b/library/core/src/ops/index.rs
@@ -165,7 +165,7 @@ see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#ind
#[doc(alias = "]")]
#[doc(alias = "[]")]
#[const_trait]
-pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
+pub trait IndexMut<Idx: ?Sized>: ~const Index<Idx> {
/// Performs the mutable indexing (`container[index]`) operation.
///
/// # Panics
diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs
index a5e5b13b3..97d9b750d 100644
--- a/library/core/src/ops/mod.rs
+++ b/library/core/src/ops/mod.rs
@@ -17,10 +17,10 @@
//! should have some resemblance to multiplication (and share expected
//! properties like associativity).
//!
-//! Note that the `&&` and `||` operators short-circuit, i.e., they only
-//! evaluate their second operand if it contributes to the result. Since this
-//! behavior is not enforceable by traits, `&&` and `||` are not supported as
-//! overloadable operators.
+//! Note that the `&&` and `||` operators are currently not supported for
+//! overloading. Due to their short circuiting nature, they require a different
+//! design from traits for other operators like [`BitAnd`]. Designs for them are
+//! under discussion.
//!
//! Many of the operators take their operands by value. In non-generic
//! contexts involving built-in types, this is usually not a problem.
@@ -201,7 +201,7 @@ pub(crate) use self::try_trait::{ChangeOutputType, NeverShortCircuit};
#[unstable(feature = "generator_trait", issue = "43122")]
pub use self::generator::{Generator, GeneratorState};
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
pub use self::unsize::CoerceUnsized;
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/core/src/ops/unsize.rs b/library/core/src/ops/unsize.rs
index a920b9165..b51f12580 100644
--- a/library/core/src/ops/unsize.rs
+++ b/library/core/src/ops/unsize.rs
@@ -31,41 +31,41 @@ use crate::marker::Unsize;
/// [dst-coerce]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
/// [unsize]: crate::marker::Unsize
/// [nomicon-coerce]: ../../nomicon/coercions.html
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
#[lang = "coerce_unsized"]
pub trait CoerceUnsized<T: ?Sized> {
// Empty.
}
// &mut T -> &mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
// &mut T -> &U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {}
// &mut T -> *mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {}
// &mut T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {}
// &T -> &U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
// &T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {}
// *mut T -> *mut U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
// *mut T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {}
// *const T -> *const U
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
/// `DispatchFromDyn` is used in the implementation of object safety checks (specifically allowing
diff --git a/library/core/src/option.rs b/library/core/src/option.rs
index 505d964e5..7cc00e3f8 100644
--- a/library/core/src/option.rs
+++ b/library/core/src/option.rs
@@ -72,6 +72,50 @@
//! }
//! ```
//!
+//! # The question mark operator, `?`
+//!
+//! Similar to the [`Result`] type, when writing code that calls many functions that return the
+//! [`Option`] type, handling `Some`/`None` can be tedious. The question mark
+//! operator, [`?`], hides some of the boilerplate of propagating values
+//! up the call stack.
+//!
+//! It replaces this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! fn add_last_numbers(stack: &mut Vec<i32>) -> Option<i32> {
+//! let a = stack.pop();
+//! let b = stack.pop();
+//!
+//! match (a, b) {
+//! (Some(x), Some(y)) => Some(x + y),
+//! _ => None,
+//! }
+//! }
+//!
+//! ```
+//!
+//! With this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! fn add_last_numbers(stack: &mut Vec<i32>) -> Option<i32> {
+//! Some(stack.pop()? + stack.pop()?)
+//! }
+//! ```
+//!
+//! *It's much nicer!*
+//!
+//! Ending the expression with [`?`] will result in the [`Some`]'s unwrapped value, unless the
+//! result is [`None`], in which case [`None`] is returned early from the enclosing function.
+//!
+//! [`?`] can be used in functions that return [`Option`] because of the
+//! early return of [`None`] that it provides.
+//!
+//! [`?`]: crate::ops::Try
+//! [`Some`]: Some
+//! [`None`]: None
+//!
//! # Representation
//!
//! Rust guarantees to optimize the following types `T` such that
@@ -608,13 +652,14 @@ impl<T> Option<T> {
///
/// # Examples
///
- /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, preserving
- /// the original. The [`map`] method takes the `self` argument by value, consuming the original,
- /// so this technique uses `as_ref` to first take an `Option` to a reference
- /// to the value inside the original.
+ /// Calculates the length of an <code>Option<[String]></code> as an <code>Option<[usize]></code>
+ /// without moving the [`String`]. The [`map`] method takes the `self` argument by value,
+ /// consuming the original, so this technique uses `as_ref` to first take an `Option` to a
+ /// reference to the value inside the original.
///
/// [`map`]: Option::map
/// [String]: ../../std/string/struct.String.html "String"
+ /// [`String`]: ../../std/string/struct.String.html "String"
///
/// ```
/// let text: Option<String> = Some("Hello, world!".to_string());
@@ -902,8 +947,8 @@ impl<T> Option<T> {
///
/// # Examples
///
- /// Converts an <code>Option<[String]></code> into an <code>Option<[usize]></code>, consuming
- /// the original:
+ /// Calculates the length of an <code>Option<[String]></code> as an
+ /// <code>Option<[usize]></code>, consuming the original:
///
/// [String]: ../../std/string/struct.String.html "String"
/// ```
diff --git a/library/core/src/panic.rs b/library/core/src/panic.rs
index 461b70c32..8338a5d7e 100644
--- a/library/core/src/panic.rs
+++ b/library/core/src/panic.rs
@@ -90,14 +90,14 @@ pub macro unreachable_2021 {
),
}
-/// An internal trait used by libstd to pass data from libstd to `panic_unwind`
-/// and other panic runtimes. Not intended to be stabilized any time soon, do
-/// not use.
+/// An internal trait used by std to pass data from std to `panic_unwind` and
+/// other panic runtimes. Not intended to be stabilized any time soon, do not
+/// use.
#[unstable(feature = "std_internals", issue = "none")]
#[doc(hidden)]
pub unsafe trait BoxMeUp {
/// Take full ownership of the contents.
- /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in libcore.
+ /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in core.
///
/// After this method got called, only some dummy default value is left in `self`.
/// Calling this method twice, or calling `get` after calling this method, is an error.
diff --git a/library/core/src/panic/panic_info.rs b/library/core/src/panic/panic_info.rs
index 1923155eb..0d385c9d1 100644
--- a/library/core/src/panic/panic_info.rs
+++ b/library/core/src/panic/panic_info.rs
@@ -157,7 +157,7 @@ impl fmt::Display for PanicInfo<'_> {
write!(formatter, "'{}', ", payload)?
}
// NOTE: we cannot use downcast_ref::<String>() here
- // since String is not available in libcore!
+ // since String is not available in core!
// The payload is a String when `std::panic!` is called with multiple arguments,
// but in that case the message is also available.
diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs
index a704a00fa..48e90e6d7 100644
--- a/library/core/src/panicking.rs
+++ b/library/core/src/panicking.rs
@@ -1,8 +1,8 @@
-//! Panic support for libcore
+//! Panic support for core
//!
//! The core library cannot define panicking, but it does *declare* panicking. This
-//! means that the functions inside of libcore are allowed to panic, but to be
-//! useful an upstream crate must define panicking for libcore to use. The current
+//! means that the functions inside of core are allowed to panic, but to be
+//! useful an upstream crate must define panicking for core to use. The current
//! interface for panicking is:
//!
//! ```
@@ -13,7 +13,7 @@
//! This definition allows for panicking with any general message, but it does not
//! allow for failing with a `Box<Any>` value. (`PanicInfo` just contains a `&(dyn Any + Send)`,
//! for which we fill in a dummy value in `PanicInfo::internal_constructor`.)
-//! The reason for this is that libcore is not allowed to allocate.
+//! The reason for this is that core is not allowed to allocate.
//!
//! This module contains a few other panicking functions, but these are just the
//! necessary lang items for the compiler. All panics are funneled through this
@@ -64,12 +64,17 @@ pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
unsafe { panic_impl(&pi) }
}
-/// Like panic_fmt, but without unwinding and track_caller to reduce the impact on codesize.
-/// Also just works on `str`, as a `fmt::Arguments` needs more space to be passed.
+/// Like `panic_fmt`, but for non-unwinding panics.
+///
+/// Has to be a separate function so that it can carry the `rustc_nounwind` attribute.
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+// This attribute has the key side-effect that if the panic handler ignores `can_unwind`
+// and unwinds anyway, we will hit the "unwinding out of nounwind function" guard,
+// which causes a "panic in a function that cannot unwind".
#[rustc_nounwind]
-pub fn panic_str_nounwind(msg: &'static str) -> ! {
+pub fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
}
@@ -82,8 +87,6 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
- let pieces = [msg];
- let fmt = fmt::Arguments::new_v1(&pieces, &[]);
let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller(), false);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
@@ -93,7 +96,7 @@ pub fn panic_str_nounwind(msg: &'static str) -> ! {
// Next we define a bunch of higher-level wrappers that all bottom out in the two core functions
// above.
-/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
+/// The underlying implementation of core's `panic!` macro when no formatting is used.
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
@@ -111,6 +114,15 @@ pub const fn panic(expr: &'static str) -> ! {
panic_fmt(fmt::Arguments::new_v1(&[expr], &[]));
}
+/// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize.
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[cfg_attr(not(bootstrap), lang = "panic_nounwind")] // needed by codegen for non-unwinding panics
+#[rustc_nounwind]
+pub fn panic_nounwind(expr: &'static str) -> ! {
+ panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+}
+
#[inline]
#[track_caller]
#[rustc_diagnostic_item = "panic_str"]
@@ -153,10 +165,11 @@ fn panic_bounds_check(index: usize, len: usize) -> ! {
/// any extra arguments (including those synthesized by track_caller).
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[lang = "panic_no_unwind"] // needed by codegen for panic in nounwind function
+#[cfg_attr(bootstrap, lang = "panic_no_unwind")] // needed by codegen for panic in nounwind function
+#[cfg_attr(not(bootstrap), lang = "panic_cannot_unwind")] // needed by codegen for panic in nounwind function
#[rustc_nounwind]
-fn panic_no_unwind() -> ! {
- panic_str_nounwind("panic in a function that cannot unwind")
+fn panic_cannot_unwind() -> ! {
+ panic_nounwind("panic in a function that cannot unwind")
}
/// This function is used instead of panic_fmt in const eval.
diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs
index 4524fa4c4..febe57dc9 100644
--- a/library/core/src/pin.rs
+++ b/library/core/src/pin.rs
@@ -485,6 +485,16 @@ impl<P: Deref<Target: Unpin>> Pin<P> {
///
/// Unlike `Pin::new_unchecked`, this method is safe because the pointer
/// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// // We can pin the value, since it doesn't care about being moved
+ /// let mut pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// ```
#[inline(always)]
#[rustc_const_unstable(feature = "const_pin", issue = "76654")]
#[stable(feature = "pin", since = "1.33.0")]
@@ -496,8 +506,20 @@ impl<P: Deref<Target: Unpin>> Pin<P> {
/// Unwraps this `Pin<P>` returning the underlying pointer.
///
- /// This requires that the data inside this `Pin` is [`Unpin`] so that we
+ /// This requires that the data inside this `Pin` implements [`Unpin`] so that we
/// can ignore the pinning invariants when unwrapping it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// let pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// // Unwrap the pin to get a reference to the value
+ /// let r = Pin::into_inner(pinned);
+ /// assert_eq!(*r, 5);
+ /// ```
#[inline(always)]
#[rustc_const_unstable(feature = "const_pin", issue = "76654")]
#[stable(feature = "pin_into_inner", since = "1.39.0")]
@@ -600,9 +622,8 @@ impl<P: Deref> Pin<P> {
/// that the closure is pinned.
///
/// The better alternative is to avoid all that trouble and do the pinning in the outer function
- /// instead (here using the unstable `pin` macro):
+ /// instead (here using the [`pin!`][crate::pin::pin] macro):
/// ```
- /// #![feature(pin_macro)]
/// use std::pin::pin;
/// use std::task::Context;
/// use std::future::Future;
@@ -707,6 +728,18 @@ impl<P: DerefMut> Pin<P> {
///
/// This overwrites pinned data, but that is okay: its destructor gets
/// run before being overwritten, so no pinning guarantee is violated.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// let mut val: u8 = 5;
+ /// let mut pinned: Pin<&mut u8> = Pin::new(&mut val);
+ /// println!("{}", pinned); // 5
+ /// pinned.as_mut().set(10);
+ /// println!("{}", pinned); // 10
+ /// ```
#[stable(feature = "pin", since = "1.33.0")]
#[inline(always)]
pub fn set(&mut self, value: P::Target)
@@ -720,7 +753,7 @@ impl<P: DerefMut> Pin<P> {
impl<'a, T: ?Sized> Pin<&'a T> {
/// Constructs a new pin by mapping the interior value.
///
- /// For example, if you wanted to get a `Pin` of a field of something,
+ /// For example, if you wanted to get a `Pin` of a field of something,
/// you could use this to get access to that field in one line of code.
/// However, there are several gotchas with these "pinning projections";
/// see the [`pin` module] documentation for further details on that topic.
@@ -823,7 +856,7 @@ impl<'a, T: ?Sized> Pin<&'a mut T> {
/// Construct a new pin by mapping the interior value.
///
- /// For example, if you wanted to get a `Pin` of a field of something,
+ /// For example, if you wanted to get a `Pin` of a field of something,
/// you could use this to get access to that field in one line of code.
/// However, there are several gotchas with these "pinning projections";
/// see the [`pin` module] documentation for further details on that topic.
@@ -992,7 +1025,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### Basic usage
///
/// ```rust
-/// #![feature(pin_macro)]
/// # use core::marker::PhantomPinned as Foo;
/// use core::pin::{pin, Pin};
///
@@ -1010,7 +1042,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### Manually polling a `Future` (without `Unpin` bounds)
///
/// ```rust
-/// #![feature(pin_macro)]
/// use std::{
/// future::Future,
/// pin::pin,
@@ -1049,7 +1080,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// ### With `Generator`s
///
/// ```rust
-/// #![feature(generators, generator_trait, pin_macro)]
+/// #![feature(generators, generator_trait)]
/// use core::{
/// ops::{Generator, GeneratorState},
/// pin::pin,
@@ -1092,7 +1123,6 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// The following, for instance, fails to compile:
///
/// ```rust,compile_fail
-/// #![feature(pin_macro)]
/// use core::pin::{pin, Pin};
/// # use core::{marker::PhantomPinned as Foo, mem::drop as stuff};
///
@@ -1134,7 +1164,7 @@ impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
/// constructor.
///
/// [`Box::pin`]: ../../std/boxed/struct.Box.html#method.pin
-#[unstable(feature = "pin_macro", issue = "93178")]
+#[stable(feature = "pin_macro", since = "1.68.0")]
#[rustc_macro_transparency = "semitransparent"]
#[allow_internal_unstable(unsafe_pin_internals)]
pub macro pin($value:expr $(,)?) {
diff --git a/library/core/src/prelude/mod.rs b/library/core/src/prelude/mod.rs
index 3cd3a3b78..12f762ef1 100644
--- a/library/core/src/prelude/mod.rs
+++ b/library/core/src/prelude/mod.rs
@@ -1,8 +1,8 @@
-//! The libcore prelude
+//! The core prelude
//!
-//! This module is intended for users of libcore which do not link to libstd as
-//! well. This module is imported by default when `#![no_std]` is used in the
-//! same manner as the standard library's prelude.
+//! This module is intended for users of core which do not link to std as well.
+//! This module is imported by default when `#![no_std]` is used in the same
+//! manner as the standard library's prelude.
#![stable(feature = "core_prelude", since = "1.4.0")]
diff --git a/library/core/src/prelude/v1.rs b/library/core/src/prelude/v1.rs
index 2d67d742c..10525a16f 100644
--- a/library/core/src/prelude/v1.rs
+++ b/library/core/src/prelude/v1.rs
@@ -75,14 +75,12 @@ pub use crate::macros::builtin::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
-#[cfg(not(bootstrap))]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use crate::macros::builtin::alloc_error_handler;
-#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use crate::macros::builtin::{bench, derive, global_allocator, test, test_case};
+pub use crate::macros::builtin::{
+ alloc_error_handler, bench, derive, global_allocator, test, test_case,
+};
#[unstable(feature = "derive_const", issue = "none")]
-#[cfg(not(bootstrap))]
pub use crate::macros::builtin::derive_const;
#[unstable(
@@ -104,5 +102,4 @@ pub use crate::macros::builtin::cfg_eval;
issue = "23416",
reason = "placeholder syntax for type ascription"
)]
-#[cfg(not(bootstrap))]
pub use crate::macros::builtin::type_ascribe;
diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs
index 64a5290c3..2123147c7 100644
--- a/library/core/src/ptr/alignment.rs
+++ b/library/core/src/ptr/alignment.rs
@@ -10,8 +10,7 @@ use crate::{cmp, fmt, hash, mem, num};
/// are likely not to be supported by actual allocators and linkers.
#[unstable(feature = "ptr_alignment_type", issue = "102070")]
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(transparent)]
pub struct Alignment(AlignmentEnum);
@@ -203,8 +202,7 @@ type AlignmentEnum = AlignmentEnum32;
type AlignmentEnum = AlignmentEnum64;
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u16)]
enum AlignmentEnum16 {
_Align1Shl0 = 1 << 0,
@@ -226,8 +224,7 @@ enum AlignmentEnum16 {
}
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u32)]
enum AlignmentEnum32 {
_Align1Shl0 = 1 << 0,
@@ -265,8 +262,7 @@ enum AlignmentEnum32 {
}
#[derive(Copy, Clone, Eq)]
-#[cfg_attr(bootstrap, derive(PartialEq))]
-#[cfg_attr(not(bootstrap), derive_const(PartialEq))]
+#[derive_const(PartialEq)]
#[repr(u64)]
enum AlignmentEnum64 {
_Align1Shl0 = 1 << 0,
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index d34813599..7b1cb5488 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics;
+use crate::intrinsics::{self, const_eval_select};
use crate::mem;
use crate::slice::{self, SliceIndex};
@@ -34,12 +34,23 @@ impl<T: ?Sized> *const T {
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
#[inline]
pub const fn is_null(self) -> bool {
- // Compare via a cast to a thin pointer, so fat pointers are only
- // considering their "data" part for null-ness.
- match (self as *const u8).guaranteed_eq(null()) {
- None => false,
- Some(res) => res,
+ #[inline]
+ fn runtime_impl(ptr: *const u8) -> bool {
+ ptr.addr() == 0
}
+
+ #[inline]
+ const fn const_impl(ptr: *const u8) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ match (ptr).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self as *const u8,), const_impl, runtime_impl) }
}
/// Casts to a pointer of another type.
@@ -191,14 +202,11 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
- unsafe { mem::transmute(self) }
+ unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
@@ -228,12 +236,9 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn expose_addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
- self as usize
+ self.cast::<()>() as usize
}
/// Creates a new pointer with the given address.
@@ -251,10 +256,7 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: usize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
@@ -277,10 +279,7 @@ impl<T: ?Sized> *const T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -1008,7 +1007,7 @@ impl<T: ?Sized> *const T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
@@ -1173,7 +1172,7 @@ impl<T: ?Sized> *const T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
pub const fn wrapping_sub(self, count: usize) -> Self
where
T: Sized,
@@ -1350,26 +1349,6 @@ impl<T: ?Sized> *const T {
panic!("align_offset: align is not a power-of-two");
}
- #[cfg(bootstrap)]
- {
- fn rt_impl<T>(p: *const T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
-
- const fn ctfe_impl<T>(_: *const T, _: usize) -> usize {
- usize::MAX
- }
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
- }
-
- #[cfg(not(bootstrap))]
{
// SAFETY: `align` has been checked to be a power of 2 above
unsafe { align_offset(self, align) }
@@ -1406,8 +1385,7 @@ impl<T: ?Sized> *const T {
/// is never aligned if cast to a type with a stricter alignment than the reference's
/// underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1433,8 +1411,7 @@ impl<T: ?Sized> *const T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1460,8 +1437,7 @@ impl<T: ?Sized> *const T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1537,8 +1513,7 @@ impl<T: ?Sized> *const T {
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
/// cannot be stricter aligned than the reference's underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1563,8 +1538,7 @@ impl<T: ?Sized> *const T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1588,8 +1562,7 @@ impl<T: ?Sized> *const T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1613,11 +1586,22 @@ impl<T: ?Sized> *const T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
- // The cast to `()` is used to
- // 1. deal with fat pointers; and
- // 2. ensure that `align_offset` doesn't actually try to compute an offset.
- self.cast::<()>().align_offset(align) == 0
+ #[inline]
+ fn runtime_impl(ptr: *const (), align: usize) -> bool {
+ ptr.addr() & (align - 1) == 0
+ }
+
+ #[inline]
+ const fn const_impl(ptr: *const (), align: usize) -> bool {
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ ptr.align_offset(align) == 0
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
}
}
diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs
index a8604843e..2ea032d4a 100644
--- a/library/core/src/ptr/metadata.rs
+++ b/library/core/src/ptr/metadata.rs
@@ -50,7 +50,7 @@ use crate::hash::{Hash, Hasher};
///
/// [`to_raw_parts`]: *const::to_raw_parts
#[lang = "pointee_trait"]
-#[cfg_attr(not(bootstrap), rustc_deny_explicit_impl)]
+#[rustc_deny_explicit_impl]
pub trait Pointee {
/// The type for metadata in pointers and references to `Self`.
#[lang = "metadata_type"]
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 48b2e88da..1ad9af154 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -516,6 +516,27 @@ pub const fn null<T: ?Sized + Thin>() -> *const T {
from_raw_parts(invalid(0), ())
}
+/// Creates a null mutable raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *mut i32 = ptr::null_mut();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
+#[rustc_allow_const_fn_unstable(ptr_metadata)]
+#[rustc_diagnostic_item = "ptr_null_mut"]
+pub const fn null_mut<T: ?Sized + Thin>() -> *mut T {
+ from_raw_parts_mut(invalid_mut(0), ())
+}
+
/// Creates an invalid pointer with the given address.
///
/// This is different from `addr as *const T`, which creates a pointer that picks up a previously
@@ -663,25 +684,26 @@ where
addr as *mut T
}
-/// Creates a null mutable raw pointer.
+/// Convert a reference to a raw pointer.
///
-/// # Examples
-///
-/// ```
-/// use std::ptr;
+/// This is equivalent to `r as *const T`, but is a bit safer since it will never silently change
+/// type or mutability, in particular if the code is refactored.
+#[inline(always)]
+#[must_use]
+#[unstable(feature = "ptr_from_ref", issue = "106116")]
+pub fn from_ref<T: ?Sized>(r: &T) -> *const T {
+ r
+}
+
+/// Convert a mutable reference to a raw pointer.
///
-/// let p: *mut i32 = ptr::null_mut();
-/// assert!(p.is_null());
-/// ```
+/// This is equivalent to `r as *mut T`, but is a bit safer since it will never silently change
+/// type or mutability, in particular if the code is refactored.
#[inline(always)]
#[must_use]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_promotable]
-#[rustc_const_stable(feature = "const_ptr_null", since = "1.24.0")]
-#[rustc_allow_const_fn_unstable(ptr_metadata)]
-#[rustc_diagnostic_item = "ptr_null_mut"]
-pub const fn null_mut<T: ?Sized + Thin>() -> *mut T {
- from_raw_parts_mut(invalid_mut(0), ())
+#[unstable(feature = "ptr_from_ref", issue = "106116")]
+pub fn from_mut<T: ?Sized>(r: &mut T) -> *mut T {
+ r
}
/// Forms a raw slice from a pointer and a length.
@@ -1679,7 +1701,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
- // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
+ // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index c924a90b1..ed1e3bd48 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -1,6 +1,6 @@
use super::*;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
-use crate::intrinsics;
+use crate::intrinsics::{self, const_eval_select};
use crate::slice::{self, SliceIndex};
impl<T: ?Sized> *mut T {
@@ -33,12 +33,23 @@ impl<T: ?Sized> *mut T {
#[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
#[inline]
pub const fn is_null(self) -> bool {
- // Compare via a cast to a thin pointer, so fat pointers are only
- // considering their "data" part for null-ness.
- match (self as *mut u8).guaranteed_eq(null_mut()) {
- None => false,
- Some(res) => res,
+ #[inline]
+ fn runtime_impl(ptr: *mut u8) -> bool {
+ ptr.addr() == 0
}
+
+ #[inline]
+ const fn const_impl(ptr: *mut u8) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ match (ptr).guaranteed_eq(null_mut()) {
+ None => false,
+ Some(res) => res,
+ }
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self as *mut u8,), const_impl, runtime_impl) }
}
/// Casts to a pointer of another type.
@@ -197,14 +208,11 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
- unsafe { mem::transmute(self) }
+ unsafe { mem::transmute(self.cast::<()>()) }
}
/// Gets the "address" portion of the pointer, and 'exposes' the "provenance" part for future
@@ -234,12 +242,9 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline(always)]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn expose_addr(self) -> usize
- where
- T: Sized,
- {
+ pub fn expose_addr(self) -> usize {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
- self as usize
+ self.cast::<()>() as usize
}
/// Creates a new pointer with the given address.
@@ -257,10 +262,7 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: usize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: usize) -> Self {
// FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
//
// In the mean-time, this operation is defined to be "as if" it was
@@ -283,10 +285,7 @@ impl<T: ?Sized> *mut T {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(usize) -> usize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -1110,7 +1109,7 @@ impl<T: ?Sized> *mut T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn sub(self, count: usize) -> Self
where
@@ -1275,7 +1274,7 @@ impl<T: ?Sized> *mut T {
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
#[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
- #[inline]
+ #[inline(always)]
pub const fn wrapping_sub(self, count: usize) -> Self
where
T: Sized,
@@ -1618,26 +1617,6 @@ impl<T: ?Sized> *mut T {
panic!("align_offset: align is not a power-of-two");
}
- #[cfg(bootstrap)]
- {
- fn rt_impl<T>(p: *mut T, align: usize) -> usize {
- // SAFETY: `align` has been checked to be a power of 2 above
- unsafe { align_offset(p, align) }
- }
-
- const fn ctfe_impl<T>(_: *mut T, _: usize) -> usize {
- usize::MAX
- }
-
- // SAFETY:
- // It is permissible for `align_offset` to always return `usize::MAX`,
- // algorithm correctness can not depend on `align_offset` returning non-max values.
- //
- // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can.
- unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) }
- }
-
- #[cfg(not(bootstrap))]
{
// SAFETY: `align` has been checked to be a power of 2 above
unsafe { align_offset(self, align) }
@@ -1674,8 +1653,7 @@ impl<T: ?Sized> *mut T {
/// is never aligned if cast to a type with a stricter alignment than the reference's
/// underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
/// #![feature(const_mut_refs)]
@@ -1702,8 +1680,7 @@ impl<T: ?Sized> *mut T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1730,8 +1707,7 @@ impl<T: ?Sized> *mut T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1807,8 +1783,7 @@ impl<T: ?Sized> *mut T {
/// return `true` if the pointer is guaranteed to be aligned. This means that the pointer
/// cannot be stricter aligned than the reference's underlying allocation.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
/// #![feature(const_mut_refs)]
@@ -1834,8 +1809,7 @@ impl<T: ?Sized> *mut T {
/// Due to this behavior, it is possible that a runtime pointer derived from a compiletime
/// pointer is aligned, even if the compiletime pointer wasn't aligned.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1860,8 +1834,7 @@ impl<T: ?Sized> *mut T {
/// If a pointer is created from a fixed address, this function behaves the same during
/// runtime and compiletime.
///
- #[cfg_attr(bootstrap, doc = "```ignore")]
- #[cfg_attr(not(bootstrap), doc = "```")]
+ /// ```
/// #![feature(pointer_is_aligned)]
/// #![feature(const_pointer_is_aligned)]
///
@@ -1885,11 +1858,22 @@ impl<T: ?Sized> *mut T {
panic!("is_aligned_to: align is not a power-of-two");
}
- // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
- // The cast to `()` is used to
- // 1. deal with fat pointers; and
- // 2. ensure that `align_offset` doesn't actually try to compute an offset.
- self.cast::<()>().align_offset(align) == 0
+ #[inline]
+ fn runtime_impl(ptr: *mut (), align: usize) -> bool {
+ ptr.addr() & (align - 1) == 0
+ }
+
+ #[inline]
+ const fn const_impl(ptr: *mut (), align: usize) -> bool {
+ // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead.
+ // The cast to `()` is used to
+ // 1. deal with fat pointers; and
+ // 2. ensure that `align_offset` doesn't actually try to compute an offset.
+ ptr.align_offset(align) == 0
+ }
+
+ // SAFETY: The two versions are equivalent at runtime.
+ unsafe { const_eval_select((self.cast::<()>(), align), const_impl, runtime_impl) }
}
}
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index c4348169c..8c1a64886 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -268,10 +268,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn addr(self) -> NonZeroUsize
- where
- T: Sized,
- {
+ pub fn addr(self) -> NonZeroUsize {
// SAFETY: The pointer is guaranteed by the type to be non-null,
// meaning that the address will be non-zero.
unsafe { NonZeroUsize::new_unchecked(self.pointer.addr()) }
@@ -286,10 +283,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn with_addr(self, addr: NonZeroUsize) -> Self
- where
- T: Sized,
- {
+ pub fn with_addr(self, addr: NonZeroUsize) -> Self {
// SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed to be non-zero.
unsafe { NonNull::new_unchecked(self.pointer.with_addr(addr.get()) as *mut _) }
}
@@ -303,10 +297,7 @@ impl<T: ?Sized> NonNull<T> {
#[must_use]
#[inline]
#[unstable(feature = "strict_provenance", issue = "95228")]
- pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self
- where
- T: Sized,
- {
+ pub fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self {
self.with_addr(f(self.addr()))
}
@@ -712,7 +703,7 @@ impl<T: ?Sized> const Clone for NonNull<T> {
#[stable(feature = "nonnull", since = "1.25.0")]
impl<T: ?Sized> Copy for NonNull<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/core/src/result.rs b/library/core/src/result.rs
index 3f33c5fd6..f00c40f35 100644
--- a/library/core/src/result.rs
+++ b/library/core/src/result.rs
@@ -209,11 +209,10 @@
//!
//! *It's much nicer!*
//!
-//! Ending the expression with [`?`] will result in the unwrapped
-//! success ([`Ok`]) value, unless the result is [`Err`], in which case
-//! [`Err`] is returned early from the enclosing function.
+//! Ending the expression with [`?`] will result in the [`Ok`]'s unwrapped value, unless the result
+//! is [`Err`], in which case [`Err`] is returned early from the enclosing function.
//!
-//! [`?`] can only be used in functions that return [`Result`] because of the
+//! [`?`] can be used in functions that return [`Result`] because of the
//! early return of [`Err`] that it provides.
//!
//! [`expect`]: Result::expect
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index 062289767..90ab43d12 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -6,7 +6,7 @@ mod macros;
use crate::cmp;
use crate::cmp::Ordering;
use crate::fmt;
-use crate::intrinsics::{assume, exact_div, unchecked_sub};
+use crate::intrinsics::assume;
use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use crate::marker::{PhantomData, Send, Sized, Sync};
use crate::mem::{self, SizedTypeProperties};
@@ -35,12 +35,6 @@ impl<'a, T> IntoIterator for &'a mut [T] {
}
}
-// Macro helper functions
-#[inline(always)]
-fn size_from_ptr<T>(_: *const T) -> usize {
- mem::size_of::<T>()
-}
-
/// Immutable slice iterator
///
/// This struct is created by the [`iter`] method on [slices].
@@ -65,7 +59,7 @@ fn size_from_ptr<T>(_: *const T) -> usize {
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
- end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a T>,
@@ -186,7 +180,7 @@ impl<T> AsRef<[T]> for Iter<'_, T> {
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
- end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: PhantomData<&'a mut T>,
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index ce51d48e3..0fd57b197 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -9,30 +9,20 @@ macro_rules! is_empty {
};
}
-// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
-// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
macro_rules! len {
($self: ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
let start = $self.ptr;
- let size = size_from_ptr(start.as_ptr());
- if size == 0 {
- // This _cannot_ use `unchecked_sub` because we depend on wrapping
+ if T::IS_ZST {
+ // This _cannot_ use `ptr_sub` because we depend on wrapping
// to represent the length of long ZST slice iterators.
$self.end.addr().wrapping_sub(start.as_ptr().addr())
} else {
- // We know that `start <= end`, so can do better than `offset_from`,
- // which needs to deal in signed. By setting appropriate flags here
- // we can tell LLVM this, which helps it remove bounds checks.
- // SAFETY: By the type invariant, `start <= end`
- let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) };
- // By also telling LLVM that the pointers are apart by an exact
- // multiple of the type size, it can optimize `len() == 0` down to
- // `start == end` instead of `(end - start) < size`.
- // SAFETY: By the type invariant, the pointers are aligned so the
- // distance between them must be a multiple of pointee size
- unsafe { exact_div(diff, size) }
+ // To get rid of some bounds checks (see `position`), we use ptr_sub instead of
+ // offset_from (Tested by `codegen/slice-position-bounds-check`.)
+ // SAFETY: by the type invariant pointers are aligned and `start <= end`
+ unsafe { $self.end.sub_ptr(start.as_ptr()) }
}
}};
}
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index d9281a925..d93a3a57e 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -29,13 +29,19 @@ use crate::slice;
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
+#[unstable(
+ feature = "slice_internals",
+ issue = "none",
+ reason = "exposed from core to be reused in std;"
+)]
+pub mod sort;
+
mod ascii;
mod cmp;
mod index;
mod iter;
mod raw;
mod rotate;
-mod sort;
mod specialize;
#[stable(feature = "rust1", since = "1.0.0")]
@@ -703,7 +709,7 @@ impl<T> [T] {
// Because this function is first compiled in isolation,
// this check tells LLVM that the indexing below is
- // in-bounds. Then after inlining -- once the actual
+ // in-bounds. Then after inlining -- once the actual
// lengths of the slices are known -- it's removed.
let (a, b) = (&mut a[..n], &mut b[..n]);
@@ -781,6 +787,22 @@ impl<T> [T] {
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
+ ///
+ /// There's no `windows_mut`, as that existing would let safe code violate the
+ /// "only one `&mut` at a time to the same thing" rule. However, you can sometimes
+ /// use [`Cell::as_slice_of_cells`](crate::cell::Cell::as_slice_of_cells) in
+ /// conjunction with `windows` to accomplish something similar:
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let mut array = ['R', 'u', 's', 't', ' ', '2', '0', '1', '5'];
+ /// let slice = &mut array[..];
+ /// let slice_of_cells: &[Cell<char>] = Cell::from_mut(slice).as_slice_of_cells();
+ /// for w in slice_of_cells.windows(3) {
+ /// Cell::swap(&w[0], &w[2]);
+ /// }
+ /// assert_eq!(array, ['s', 't', ' ', '2', '0', '1', '5', 'u', 'R']);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
@@ -893,7 +915,7 @@ impl<T> [T] {
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
- assert_ne!(chunk_size, 0);
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
ChunksExact::new(self, chunk_size)
}
@@ -935,7 +957,7 @@ impl<T> [T] {
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
- assert_ne!(chunk_size, 0);
+ assert_ne!(chunk_size, 0, "chunks cannot have a size of zero");
ChunksExactMut::new(self, chunk_size)
}
@@ -1002,11 +1024,22 @@ impl<T> [T] {
/// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
/// assert_eq!(remainder, &['m']);
/// ```
+ ///
+ /// If you expect the slice to be an exact multiple, you can combine
+ /// `let`-`else` with an empty slice pattern:
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice = ['R', 'u', 's', 't'];
+ /// let (chunks, []) = slice.as_chunks::<2>() else {
+ /// panic!("slice didn't have even length")
+ /// };
+ /// assert_eq!(chunks, &[['R', 'u'], ['s', 't']]);
+ /// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
#[must_use]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1037,7 +1070,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1076,7 +1109,7 @@ impl<T> [T] {
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
ArrayChunks::new(self)
}
@@ -1155,7 +1188,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (multiple_of_n, remainder) = self.split_at_mut(len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1192,7 +1225,7 @@ impl<T> [T] {
#[inline]
#[must_use]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
let len = self.len() / N;
let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N);
// SAFETY: We already panicked for zero, and ensured by construction
@@ -1233,11 +1266,11 @@ impl<T> [T] {
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "chunks cannot have a size of zero");
ArrayChunksMut::new(self)
}
- /// Returns an iterator over overlapping windows of `N` elements of a slice,
+ /// Returns an iterator over overlapping windows of `N` elements of a slice,
/// starting at the beginning of the slice.
///
/// This is the const generic equivalent of [`windows`].
@@ -1265,7 +1298,7 @@ impl<T> [T] {
#[unstable(feature = "array_windows", issue = "75027")]
#[inline]
pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
- assert_ne!(N, 0);
+ assert_ne!(N, 0, "windows cannot have a size of zero");
ArrayWindows::new(self)
}
@@ -2465,7 +2498,7 @@ impl<T> [T] {
let mid = left + size / 2;
// SAFETY: the while condition means `size` is strictly positive, so
- // `size/2 < size`. Thus `left + size/2 < left + size`, which
+ // `size/2 < size`. Thus `left + size/2 < left + size`, which
// coupled with the `left + size <= self.len()` invariant means
// we have `left + size/2 < self.len()`, and this is in-bounds.
let cmp = f(unsafe { self.get_unchecked(mid) });
@@ -3795,7 +3828,7 @@ impl<T> [T] {
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
- /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// For example, `[7, 15, 3, 5, 4, 12, 6]` is partitioned under the predicate `x % 2 != 0`
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index 87f77b7f2..2181f9a81 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -3,8 +3,11 @@
//! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
//! published at: <https://github.com/orlp/pdqsort>
//!
-//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
+//! Unstable sorting is compatible with core because it doesn't allocate memory, unlike our
//! stable sorting implementation.
+//!
+//! In addition it also contains the core logic of the stable sort used by `slice::sort` based on
+//! TimSort.
use crate::cmp;
use crate::mem::{self, MaybeUninit, SizedTypeProperties};
@@ -18,9 +21,9 @@ struct CopyOnDrop<T> {
impl<T> Drop for CopyOnDrop<T> {
fn drop(&mut self) {
- // SAFETY: This is a helper class.
- // Please refer to its usage for correctness.
- // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
+ // SAFETY: This is a helper class.
+ // Please refer to its usage for correctness.
+ // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
@@ -831,6 +834,15 @@ fn partition_at_index_loop<'a, T, F>(
) where
F: FnMut(&T, &T) -> bool,
{
+ // Limit the amount of iterations and fall back to heapsort, similarly to `slice::sort_unstable`.
+ // This lowers the worst case running time from O(n^2) to O(n log n).
+ // FIXME: Investigate whether it would be better to use something like Median of Medians
+ // or Fast Deterministic Selection to guarantee O(n) worst case.
+ let mut limit = usize::BITS - v.len().leading_zeros();
+
+ // True if the last partitioning was reasonably balanced.
+ let mut was_balanced = true;
+
loop {
// For slices of up to this length it's probably faster to simply sort them.
const MAX_INSERTION: usize = 10;
@@ -839,6 +851,18 @@ fn partition_at_index_loop<'a, T, F>(
return;
}
+ if limit == 0 {
+ heapsort(v, is_less);
+ return;
+ }
+
+ // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+ // some elements around. Hopefully we'll choose a better pivot this time.
+ if !was_balanced {
+ break_patterns(v);
+ limit -= 1;
+ }
+
// Choose a pivot
let (pivot, _) = choose_pivot(v, is_less);
@@ -863,6 +887,7 @@ fn partition_at_index_loop<'a, T, F>(
}
let (mid, _) = partition(v, pivot, is_less);
+ was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8;
// Split the slice into `left`, `pivot`, and `right`.
let (left, right) = v.split_at_mut(mid);
@@ -883,6 +908,7 @@ fn partition_at_index_loop<'a, T, F>(
}
}
+/// Reorder the slice such that the element at `index` is at its final sorted position.
pub fn partition_at_index<T, F>(
v: &mut [T],
index: usize,
@@ -927,3 +953,513 @@ where
let pivot = &mut pivot[0];
(left, pivot, right)
}
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ // SAFETY: Copy tmp back even if panic, and ensure unique observation.
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *const T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ // SAFETY: The caller must ensure that src and dest are correctly set.
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+
+ // SAFETY: mid and len must be in-bounds of v.
+ let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+
+ // SAFETY: buf must have enough capacity for `v[..mid]`.
+ unsafe {
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+ }
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+
+ // SAFETY: left and right must be valid and part of v same for out.
+ unsafe {
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ }
+ } else {
+ // The right run is shorter.
+
+ // SAFETY: buf must have enough capacity for `v[mid..]`.
+ unsafe {
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+ }
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+
+ // SAFETY: left and right must be valid and part of v same for out.
+ unsafe {
+ let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+
+ // SAFETY: ptr.add(1) must still be a valid pointer and part of `v`.
+ *ptr = unsafe { ptr.add(1) };
+ old
+ }
+
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ // SAFETY: ptr.sub(1) must still be a valid pointer and part of `v`.
+ *ptr = unsafe { ptr.sub(1) };
+ *ptr
+ }
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // SAFETY: `T` is not a zero-sized type, and these are pointers into a slice's elements.
+ unsafe {
+ let len = self.end.sub_ptr(self.start);
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// This merge sort borrows some (but not all) ideas from TimSort, which used to be described in
+/// detail [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt). However Python
+/// has switched to a Powersort based implementation.
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>(
+ v: &mut [T],
+ is_less: &mut CmpF,
+ elem_alloc_fn: ElemAllocF,
+ elem_dealloc_fn: ElemDeallocF,
+ run_alloc_fn: RunAllocF,
+ run_dealloc_fn: RunDeallocF,
+) where
+ CmpF: FnMut(&T, &T) -> bool,
+ ElemAllocF: Fn(usize) -> *mut T,
+ ElemDeallocF: Fn(*mut T, usize),
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ // The caller should have already checked that.
+ debug_assert!(!T::IS_ZST);
+
+ let len = v.len();
+
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn);
+ let buf_ptr = buf.buf_ptr;
+
+ let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+
+ // SAFETY: The v.get_unchecked must be fed with correct inbound indicies.
+ unsafe {
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+ v[start..end].reverse();
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
+ {
+ start -= 1;
+ }
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(TimSortRun { start, len: end - start });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(runs.as_slice()) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and
+ // neither side may be on length 0.
+ unsafe {
+ merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less);
+ }
+ runs[r] = TimSortRun { start: left.start, len: left.len + right.len };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for its buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[TimSortRun]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
+ } else {
+ None
+ }
+ }
+
+ // Extremely basic versions of Vec.
+ // Their use is super limited and by having the code here, it allows reuse between the sort
+ // implementations.
+ struct BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ buf_ptr: *mut T,
+ capacity: usize,
+ elem_dealloc_fn: ElemDeallocF,
+ }
+
+ impl<T, ElemDeallocF> BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ fn new<ElemAllocF>(
+ len: usize,
+ elem_alloc_fn: ElemAllocF,
+ elem_dealloc_fn: ElemDeallocF,
+ ) -> Self
+ where
+ ElemAllocF: Fn(usize) -> *mut T,
+ {
+ Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn }
+ }
+ }
+
+ impl<T, ElemDeallocF> Drop for BufGuard<T, ElemDeallocF>
+ where
+ ElemDeallocF: Fn(*mut T, usize),
+ {
+ fn drop(&mut self) {
+ (self.elem_dealloc_fn)(self.buf_ptr, self.capacity);
+ }
+ }
+
+ struct RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ buf_ptr: *mut TimSortRun,
+ capacity: usize,
+ len: usize,
+ run_alloc_fn: RunAllocF,
+ run_dealloc_fn: RunDeallocF,
+ }
+
+ impl<RunAllocF, RunDeallocF> RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn new(run_alloc_fn: RunAllocF, run_dealloc_fn: RunDeallocF) -> Self {
+ // Most slices can be sorted with at most 16 runs in-flight.
+ const START_RUN_CAPACITY: usize = 16;
+
+ Self {
+ buf_ptr: run_alloc_fn(START_RUN_CAPACITY),
+ capacity: START_RUN_CAPACITY,
+ len: 0,
+ run_alloc_fn,
+ run_dealloc_fn,
+ }
+ }
+
+ fn push(&mut self, val: TimSortRun) {
+ if self.len == self.capacity {
+ let old_capacity = self.capacity;
+ let old_buf_ptr = self.buf_ptr;
+
+ self.capacity = self.capacity * 2;
+ self.buf_ptr = (self.run_alloc_fn)(self.capacity);
+
+ // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has
+ // old_capacity valid elements.
+ unsafe {
+ ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity);
+ }
+
+ (self.run_dealloc_fn)(old_buf_ptr, old_capacity);
+ }
+
+ // SAFETY: The invariant was just checked.
+ unsafe {
+ self.buf_ptr.add(self.len).write(val);
+ }
+ self.len += 1;
+ }
+
+ fn remove(&mut self, index: usize) {
+ if index >= self.len {
+ panic!("Index out of bounds");
+ }
+
+ // SAFETY: buf_ptr needs to be valid and len invariant upheld.
+ unsafe {
+ // the place we are taking from.
+ let ptr = self.buf_ptr.add(index);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.add(1), ptr, self.len - index - 1);
+ }
+ self.len -= 1;
+ }
+
+ fn as_slice(&self) -> &[TimSortRun] {
+ // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld.
+ unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) }
+ }
+
+ fn len(&self) -> usize {
+ self.len
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> core::ops::Index<usize> for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ type Output = TimSortRun;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ if index < self.len {
+ // SAFETY: buf_ptr and len invariant must be upheld.
+ unsafe {
+ return &*(self.buf_ptr.add(index));
+ }
+ }
+
+ panic!("Index out of bounds");
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> core::ops::IndexMut<usize> for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ if index < self.len {
+ // SAFETY: buf_ptr and len invariant must be upheld.
+ unsafe {
+ return &mut *(self.buf_ptr.add(index));
+ }
+ }
+
+ panic!("Index out of bounds");
+ }
+ }
+
+ impl<RunAllocF, RunDeallocF> Drop for RunVec<RunAllocF, RunDeallocF>
+ where
+ RunAllocF: Fn(usize) -> *mut TimSortRun,
+ RunDeallocF: Fn(*mut TimSortRun, usize),
+ {
+ fn drop(&mut self) {
+ // As long as TimSortRun is Copy we don't need to drop them individually but just the
+ // whole allocation.
+ (self.run_dealloc_fn)(self.buf_ptr, self.capacity);
+ }
+ }
+}
+
+/// Internal type used by merge_sort.
+#[derive(Clone, Copy, Debug)]
+pub struct TimSortRun {
+ len: usize,
+ start: usize,
+}
diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs
index 24083ee6a..d969475aa 100644
--- a/library/core/src/str/iter.rs
+++ b/library/core/src/str/iter.rs
@@ -585,16 +585,17 @@ where
impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
#[inline]
fn get_end(&mut self) -> Option<&'a str> {
- if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) {
+ if !self.finished {
self.finished = true;
- // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
- unsafe {
- let string = self.matcher.haystack().get_unchecked(self.start..self.end);
- Some(string)
+
+ if self.allow_trailing_empty || self.end - self.start > 0 {
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ let string = unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) };
+ return Some(string);
}
- } else {
- None
}
+
+ None
}
#[inline]
@@ -716,14 +717,14 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
}
#[inline]
- fn as_str(&self) -> &'a str {
+ fn remainder(&self) -> Option<&'a str> {
// `Self::get_end` doesn't change `self.start`
if self.finished {
- return "";
+ return None;
}
// SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
- unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) }
+ Some(unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) })
}
}
@@ -746,44 +747,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> Split<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".split(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplit<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".rsplit(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "Mary had a little");
+ /// assert_eq!(split.remainder(), Some("Mary had a little"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -806,44 +811,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> SplitTerminator<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "A..B..".split_terminator('.');
- /// assert_eq!(split.as_str(), "A..B..");
+ /// assert_eq!(split.remainder(), Some("A..B.."));
/// split.next();
- /// assert_eq!(split.as_str(), ".B..");
+ /// assert_eq!(split.remainder(), Some(".B.."));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplitTerminator<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "A..B..".rsplit_terminator('.');
- /// assert_eq!(split.as_str(), "A..B..");
+ /// assert_eq!(split.remainder(), Some("A..B.."));
/// split.next();
- /// assert_eq!(split.as_str(), "A..B");
+ /// assert_eq!(split.remainder(), Some("A..B"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -905,8 +914,8 @@ impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> {
}
#[inline]
- fn as_str(&self) -> &'a str {
- self.iter.as_str()
+ fn remainder(&self) -> Option<&'a str> {
+ self.iter.remainder()
}
}
@@ -929,44 +938,48 @@ generate_pattern_iterators! {
}
impl<'a, P: Pattern<'a>> SplitN<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".splitn(3, ' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
impl<'a, P: Pattern<'a>> RSplitN<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_as_str)]
+ /// #![feature(str_split_remainder)]
/// let mut split = "Mary had a little lamb".rsplitn(3, ' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "Mary had a little");
+ /// assert_eq!(split.remainder(), Some("Mary had a little"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
@@ -1239,22 +1252,22 @@ impl<'a> SplitWhitespace<'a> {
/// # Examples
///
/// ```
- /// #![feature(str_split_whitespace_as_str)]
+ /// #![feature(str_split_whitespace_remainder)]
///
/// let mut split = "Mary had a little lamb".split_whitespace();
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
///
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
///
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.inner.iter.as_str()
+ #[unstable(feature = "str_split_whitespace_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.inner.iter.remainder()
}
}
@@ -1290,32 +1303,34 @@ impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> {
impl FusedIterator for SplitAsciiWhitespace<'_> {}
impl<'a> SplitAsciiWhitespace<'a> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_whitespace_as_str)]
+ /// #![feature(str_split_whitespace_remainder)]
///
/// let mut split = "Mary had a little lamb".split_ascii_whitespace();
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
///
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
///
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
#[must_use]
- #[unstable(feature = "str_split_whitespace_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
+ #[unstable(feature = "str_split_whitespace_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
if self.inner.iter.iter.finished {
- return "";
+ return None;
}
// SAFETY: Slice is created from str.
- unsafe { crate::str::from_utf8_unchecked(&self.inner.iter.iter.v) }
+ Some(unsafe { crate::str::from_utf8_unchecked(&self.inner.iter.iter.v) })
}
}
@@ -1358,23 +1373,25 @@ impl<'a, P: Pattern<'a, Searcher: ReverseSearcher<'a>>> DoubleEndedIterator
impl<'a, P: Pattern<'a>> FusedIterator for SplitInclusive<'a, P> {}
impl<'a, P: Pattern<'a>> SplitInclusive<'a, P> {
- /// Returns remainder of the split string
+ /// Returns remainder of the split string.
+ ///
+ /// If the iterator is empty, returns `None`.
///
/// # Examples
///
/// ```
- /// #![feature(str_split_inclusive_as_str)]
+ /// #![feature(str_split_inclusive_remainder)]
/// let mut split = "Mary had a little lamb".split_inclusive(' ');
- /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// assert_eq!(split.remainder(), Some("Mary had a little lamb"));
/// split.next();
- /// assert_eq!(split.as_str(), "had a little lamb");
+ /// assert_eq!(split.remainder(), Some("had a little lamb"));
/// split.by_ref().for_each(drop);
- /// assert_eq!(split.as_str(), "");
+ /// assert_eq!(split.remainder(), None);
/// ```
#[inline]
- #[unstable(feature = "str_split_inclusive_as_str", issue = "77998")]
- pub fn as_str(&self) -> &'a str {
- self.0.as_str()
+ #[unstable(feature = "str_split_inclusive_remainder", issue = "77998")]
+ pub fn remainder(&self) -> Option<&'a str> {
+ self.0.remainder()
}
}
diff --git a/library/core/src/str/mod.rs b/library/core/src/str/mod.rs
index 45fd2caae..ab2f8520e 100644
--- a/library/core/src/str/mod.rs
+++ b/library/core/src/str/mod.rs
@@ -368,7 +368,7 @@ impl str {
#[inline(always)]
pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
// SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
- // has the same layout as `&[u8]` (only libstd can make this guarantee).
+ // has the same layout as `&[u8]` (only std can make this guarantee).
// The pointer dereference is safe since it comes from a mutable reference which
// is guaranteed to be valid for writes.
unsafe { &mut *(self as *mut str as *mut [u8]) }
@@ -970,8 +970,10 @@ impl str {
/// An iterator over the lines of a string, as string slices.
///
- /// Lines are ended with either a newline (`\n`) or a carriage return with
- /// a line feed (`\r\n`).
+ /// Lines are split at line endings that are either newlines (`\n`) or
+ /// sequences of a carriage return followed by a line feed (`\r\n`).
+ ///
+ /// Line terminators are not included in the lines returned by the iterator.
///
/// The final line ending is optional. A string that ends with a final line
/// ending will return the same lines as an otherwise identical string
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index edc68d6fa..14367eb09 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -1786,6 +1786,42 @@ impl<T> AtomicPtr<T> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
+
+ /// Returns a mutable pointer to the underlying pointer.
+ ///
+ /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+ /// This method is mostly useful for FFI, where the function signature may use
+ /// `*mut *mut T` instead of `&AtomicPtr<T>`.
+ ///
+ /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+ /// atomic types work with interior mutability. All modifications of an atomic change the value
+ /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+ /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+ /// restriction: operations on it must be atomic.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// #![feature(atomic_mut_ptr)]
+ //// use std::sync::atomic::AtomicPtr;
+ ///
+ /// extern "C" {
+ /// fn my_atomic_op(arg: *mut *mut u32);
+ /// }
+ ///
+ /// let mut value = 17;
+ /// let atomic = AtomicPtr::new(&mut value);
+ ///
+ /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
+ /// unsafe {
+ /// my_atomic_op(atomic.as_mut_ptr());
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut *mut T {
+ self.p.get()
+ }
}
#[cfg(target_has_atomic_load_store = "8")]
@@ -2678,9 +2714,9 @@ macro_rules! atomic_int {
#[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
/// }
///
- #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
+ #[doc = concat!("let atomic = ", stringify!($atomic_type), "::new(1);")]
///
- // SAFETY: Safe as long as `my_atomic_op` is atomic.
+ /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
/// unsafe {
/// my_atomic_op(atomic.as_mut_ptr());
/// }
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index c65c27500..301ad41c9 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -138,7 +138,7 @@ impl<T: ?Sized> Exclusive<T> {
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
}
- /// Build a _mutable_ references to an `Exclusive<T>` from
+ /// Build a _mutable_ reference to an `Exclusive<T>` from
/// a _mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
@@ -149,7 +149,7 @@ impl<T: ?Sized> Exclusive<T> {
unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
}
- /// Build a _pinned mutable_ references to an `Exclusive<T>` from
+ /// Build a _pinned mutable_ reference to an `Exclusive<T>` from
/// a _pinned mutable_ reference to a `T`. This allows you to skip
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index f1dc4f7b5..25b61c0e6 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -9,7 +9,7 @@ use crate::task::Ready;
/// scheduled to receive a wakeup instead.
#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
-#[cfg_attr(not(bootstrap), lang = "Poll")]
+#[lang = "Poll"]
#[stable(feature = "futures_api", since = "1.36.0")]
pub enum Poll<T> {
/// Represents that a value is immediately ready.
diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs
index 0cff972df..89adfccd9 100644
--- a/library/core/src/task/wake.rs
+++ b/library/core/src/task/wake.rs
@@ -104,7 +104,7 @@ pub struct RawWakerVTable {
/// pointer.
wake_by_ref: unsafe fn(*const ()),
- /// This function gets called when a [`RawWaker`] gets dropped.
+ /// This function gets called when a [`Waker`] gets dropped.
///
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
@@ -151,7 +151,7 @@ impl RawWakerVTable {
///
/// # `drop`
///
- /// This function gets called when a [`RawWaker`] gets dropped.
+ /// This function gets called when a [`Waker`] gets dropped.
///
/// The implementation of this function must make sure to release any
/// resources that are associated with this instance of a [`RawWaker`] and
@@ -174,6 +174,7 @@ impl RawWakerVTable {
/// Currently, `Context` only serves to provide access to a [`&Waker`](Waker)
/// which can be used to wake the current task.
#[stable(feature = "futures_api", since = "1.36.0")]
+#[cfg_attr(not(bootstrap), lang = "Context")]
pub struct Context<'a> {
waker: &'a Waker,
// Ensure we future-proof against variance changes by forcing
@@ -181,6 +182,9 @@ pub struct Context<'a> {
// are contravariant while return-position lifetimes are
// covariant).
_marker: PhantomData<fn(&'a ()) -> &'a ()>,
+ // Ensure `Context` is `!Send` and `!Sync` in order to allow
+ // for future `!Send` and / or `!Sync` fields.
+ _marker2: PhantomData<*mut ()>,
}
impl<'a> Context<'a> {
@@ -190,7 +194,7 @@ impl<'a> Context<'a> {
#[must_use]
#[inline]
pub const fn from_waker(waker: &'a Waker) -> Self {
- Context { waker, _marker: PhantomData }
+ Context { waker, _marker: PhantomData, _marker2: PhantomData }
}
/// Returns a reference to the [`Waker`] for the current task.
diff --git a/library/core/src/unicode/mod.rs b/library/core/src/unicode/mod.rs
index 72fa059b7..e1faa407d 100644
--- a/library/core/src/unicode/mod.rs
+++ b/library/core/src/unicode/mod.rs
@@ -17,7 +17,7 @@ mod unicode_data;
#[stable(feature = "unicode_version", since = "1.45.0")]
pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION;
-// For use in liballoc, not re-exported in libstd.
+// For use in alloc, not re-exported in std.
pub use unicode_data::{
case_ignorable::lookup as Case_Ignorable, cased::lookup as Cased, conversions,
};
diff --git a/library/core/tests/any.rs b/library/core/tests/any.rs
index e98dac8d1..a8f6b7ebb 100644
--- a/library/core/tests/any.rs
+++ b/library/core/tests/any.rs
@@ -131,7 +131,6 @@ fn distinct_type_names() {
assert_ne!(type_name_of_val(Velocity), type_name_of_val(Velocity(0.0, -9.8)),);
}
-#[cfg(not(bootstrap))]
#[test]
fn dyn_type_name() {
trait Foo {
diff --git a/library/core/tests/char.rs b/library/core/tests/char.rs
index 8542e5c70..ac0b2ca16 100644
--- a/library/core/tests/char.rs
+++ b/library/core/tests/char.rs
@@ -306,6 +306,10 @@ fn test_decode_utf16() {
}
check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
+ check(&[0xD800], &[Err(0xD800)]);
+ check(&[0xD840, 0xDC00], &[Ok('\u{20000}')]);
+ check(&[0xD840, 0xD840, 0xDC00], &[Err(0xD840), Ok('\u{20000}')]);
+ check(&[0xDC00, 0xD840], &[Err(0xDC00), Err(0xD840)]);
}
#[test]
diff --git a/library/core/tests/lazy.rs b/library/core/tests/lazy.rs
index 70fcc6d2d..c7c3c479b 100644
--- a/library/core/tests/lazy.rs
+++ b/library/core/tests/lazy.rs
@@ -106,6 +106,12 @@ fn lazy_new() {
assert_eq!(called.get(), 1);
}
+// Check that we can infer `T` from closure's type.
+#[test]
+fn lazy_type_inference() {
+ let _ = LazyCell::new(|| ());
+}
+
#[test]
fn aliasing_in_get() {
let x = OnceCell::new();
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
index 99d4a40c4..42a26ae16 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
@@ -48,7 +48,6 @@
#![feature(is_sorted)]
#![feature(layout_for_ptr)]
#![feature(pattern)]
-#![feature(pin_macro)]
#![feature(sort_internals)]
#![feature(slice_take)]
#![feature(slice_from_ptr_range)]
@@ -155,3 +154,16 @@ mod time;
mod tuple;
mod unicode;
mod waker;
+
+/// Copied from `std::test_helpers::test_rng`, see that function for rationale.
+#[track_caller]
+#[allow(dead_code)] // Not used in all configurations.
+pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use core::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ core::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+}
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index 1cfb4fd9f..f7740a114 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -77,7 +77,6 @@ fn align_of_val_basic() {
}
#[test]
-#[cfg(not(bootstrap))] // stage 0 doesn't have the fix yet, so the test fails
fn align_of_val_raw_packed() {
#[repr(C, packed)]
struct B {
diff --git a/library/core/tests/num/flt2dec/random.rs b/library/core/tests/num/flt2dec/random.rs
index d09500393..0084c1c81 100644
--- a/library/core/tests/num/flt2dec/random.rs
+++ b/library/core/tests/num/flt2dec/random.rs
@@ -9,8 +9,6 @@ use core::num::flt2dec::MAX_SIG_DIGITS;
use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
use rand::distributions::{Distribution, Uniform};
-use rand::rngs::StdRng;
-use rand::SeedableRng;
pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
match decode(v).1 {
@@ -92,7 +90,7 @@ where
if cfg!(target_os = "emscripten") {
return; // using rng pulls in i128 support, which doesn't work
}
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
let f32_range = Uniform::new(0x0000_0001u32, 0x7f80_0000);
iterate("f32_random_equivalence_test", k, n, f, g, |_| {
let x = f32::from_bits(f32_range.sample(&mut rng));
@@ -108,7 +106,7 @@ where
if cfg!(target_os = "emscripten") {
return; // using rng pulls in i128 support, which doesn't work
}
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
let f64_range = Uniform::new(0x0000_0000_0000_0001u64, 0x7ff0_0000_0000_0000);
iterate("f64_random_equivalence_test", k, n, f, g, |_| {
let x = f64::from_bits(f64_range.sample(&mut rng));
diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
index 90bc83510..80d30f14c 100644
--- a/library/core/tests/ptr.rs
+++ b/library/core/tests/ptr.rs
@@ -359,7 +359,6 @@ fn align_offset_zst() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_zst_const() {
const {
// For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
@@ -397,7 +396,6 @@ fn align_offset_stride_one() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_stride_one_const() {
const {
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
@@ -493,7 +491,6 @@ fn align_offset_various_strides() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_various_strides_const() {
const unsafe fn test_stride<T>(ptr: *const T, numptr: usize, align: usize) {
let mut expected = usize::MAX;
@@ -561,7 +558,6 @@ fn align_offset_various_strides_const() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_with_provenance_const() {
const {
// On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4.
@@ -681,7 +677,6 @@ fn align_offset_issue_103361() {
}
#[test]
-#[cfg(not(bootstrap))]
fn align_offset_issue_103361_const() {
#[cfg(target_pointer_width = "64")]
const SIZE: usize = 1 << 47;
@@ -715,7 +710,6 @@ fn is_aligned() {
}
#[test]
-#[cfg(not(bootstrap))]
fn is_aligned_const() {
const {
let data = 42;
@@ -735,18 +729,6 @@ fn is_aligned_const() {
}
#[test]
-#[cfg(bootstrap)]
-fn is_aligned_const() {
- const {
- let data = 42;
- let ptr: *const i32 = &data;
- // The bootstrap compiler always returns false for is_aligned.
- assert!(!ptr.is_aligned());
- assert!(!ptr.is_aligned_to(1));
- }
-}
-
-#[test]
fn offset_from() {
let mut a = [0; 5];
let ptr1: *mut i32 = &mut a[1];
@@ -825,7 +807,7 @@ fn ptr_metadata_bounds() {
}
// "Synthetic" trait impls generated by the compiler like those of `Pointee`
// are not checked for bounds of associated type.
- // So with a buggy libcore we could have both:
+ // So with a buggy core we could have both:
// * `<dyn Display as Pointee>::Metadata == DynMetadata`
// * `DynMetadata: !PartialEq`
// … and cause an ICE here:
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
index 4e06e0f43..39559cdbb 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
@@ -1488,7 +1488,7 @@ mod slice_index {
// optional:
//
// one or more similar inputs for which data[input] succeeds,
- // and the corresponding output as an array. This helps validate
+ // and the corresponding output as an array. This helps validate
// "critical points" where an input range straddles the boundary
// between valid and invalid.
// (such as the input `len..len`, which is just barely valid)
@@ -1805,7 +1805,7 @@ fn brute_force_rotate_test_1() {
fn sort_unstable() {
use core::cmp::Ordering::{Equal, Greater, Less};
use core::slice::heapsort;
- use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
+ use rand::{seq::SliceRandom, Rng};
// Miri is too slow (but still need to `chain` to make the types match)
let lens = if cfg!(miri) { (2..20).chain(0..0) } else { (2..25).chain(500..510) };
@@ -1813,7 +1813,7 @@ fn sort_unstable() {
let mut v = [0; 600];
let mut tmp = [0; 600];
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
for len in lens {
let v = &mut v[0..len];
@@ -1879,11 +1879,10 @@ fn sort_unstable() {
#[cfg_attr(miri, ignore)] // Miri is too slow
fn select_nth_unstable() {
use core::cmp::Ordering::{Equal, Greater, Less};
- use rand::rngs::StdRng;
use rand::seq::SliceRandom;
- use rand::{Rng, SeedableRng};
+ use rand::Rng;
- let mut rng = StdRng::from_entropy();
+ let mut rng = crate::test_rng();
for len in (2..21).chain(500..501) {
let mut orig = vec![0; len];
diff --git a/library/core/tests/str.rs b/library/core/tests/str.rs
index ed939ca71..f5066343a 100644
--- a/library/core/tests/str.rs
+++ b/library/core/tests/str.rs
@@ -1 +1 @@
-// All `str` tests live in liballoc/tests
+// All `str` tests live in library/alloc/tests/str.rs
diff --git a/library/core/tests/task.rs b/library/core/tests/task.rs
index 56be30e92..163b34c96 100644
--- a/library/core/tests/task.rs
+++ b/library/core/tests/task.rs
@@ -1,4 +1,4 @@
-use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+use core::task::{Poll, RawWaker, RawWakerVTable, Waker};
#[test]
fn poll_const() {
@@ -21,9 +21,5 @@ fn waker_const() {
static WAKER: Waker = unsafe { Waker::from_raw(VOID_WAKER) };
- static CONTEXT: Context<'static> = Context::from_waker(&WAKER);
-
- static WAKER_REF: &'static Waker = CONTEXT.waker();
-
- WAKER_REF.wake_by_ref();
+ WAKER.wake_by_ref();
}
diff --git a/library/core/tests/time.rs b/library/core/tests/time.rs
index a05128de4..2975c81f8 100644
--- a/library/core/tests/time.rs
+++ b/library/core/tests/time.rs
@@ -174,6 +174,32 @@ fn div() {
}
#[test]
+fn div_duration_f32() {
+ assert_eq!(Duration::ZERO.div_duration_f32(Duration::MAX), 0.0);
+ assert_eq!(Duration::MAX.div_duration_f32(Duration::ZERO), f32::INFINITY);
+ assert_eq!((Duration::SECOND * 2).div_duration_f32(Duration::SECOND), 2.0);
+ assert!(Duration::ZERO.div_duration_f32(Duration::ZERO).is_nan());
+ // These tests demonstrate it doesn't panic with extreme values.
+ // Accuracy of the computed value is not a huge concern, we know floats don't work well
+ // at these extremes.
+ assert!((Duration::MAX).div_duration_f32(Duration::NANOSECOND) > 10.0f32.powf(28.0));
+ assert!((Duration::NANOSECOND).div_duration_f32(Duration::MAX) < 0.1);
+}
+
+#[test]
+fn div_duration_f64() {
+ assert_eq!(Duration::ZERO.div_duration_f64(Duration::MAX), 0.0);
+ assert_eq!(Duration::MAX.div_duration_f64(Duration::ZERO), f64::INFINITY);
+ assert_eq!((Duration::SECOND * 2).div_duration_f64(Duration::SECOND), 2.0);
+ assert!(Duration::ZERO.div_duration_f64(Duration::ZERO).is_nan());
+ // These tests demonstrate it doesn't panic with extreme values.
+ // Accuracy of the computed value is not a huge concern, we know floats don't work well
+ // at these extremes.
+ assert!((Duration::MAX).div_duration_f64(Duration::NANOSECOND) > 10.0f64.powf(28.0));
+ assert!((Duration::NANOSECOND).div_duration_f64(Duration::MAX) < 0.1);
+}
+
+#[test]
fn checked_div() {
assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs
index cba8ef25d..a3cebf99c 100644
--- a/library/panic_abort/src/lib.rs
+++ b/library/panic_abort/src/lib.rs
@@ -61,7 +61,7 @@ pub unsafe fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
//
// https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail
//
- // Note: this is the same implementation as in libstd's `abort_internal`
+ // Note: this is the same implementation as in std's `abort_internal`
unsafe fn abort() -> ! {
#[allow(unused)]
const FAST_FAIL_FATAL_APP_EXIT: usize = 7;
@@ -89,7 +89,7 @@ pub unsafe fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// This... is a bit of an oddity. The tl;dr; is that this is required to link
// correctly, the longer explanation is below.
//
-// Right now the binaries of libcore/libstd that we ship are all compiled with
+// Right now the binaries of core/std that we ship are all compiled with
// `-C panic=unwind`. This is done to ensure that the binaries are maximally
// compatible with as many situations as possible. The compiler, however,
// requires a "personality function" for all functions compiled with `-C
@@ -109,7 +109,7 @@ pub unsafe fn __rust_start_panic(_payload: *mut &mut dyn BoxMeUp) -> u32 {
// library just defines this symbol so there's at least some personality
// somewhere.
//
-// Essentially this symbol is just defined to get wired up to libcore/libstd
+// Essentially this symbol is just defined to get wired up to core/std
// binaries, but it should never be called as we don't link in an unwinding
// runtime at all.
pub mod personalities {
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index 7e7180a38..ea3c9a7a6 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -82,11 +82,11 @@ cfg_if::cfg_if! {
}
extern "C" {
- /// Handler in libstd called when a panic object is dropped outside of
+ /// Handler in std called when a panic object is dropped outside of
/// `catch_unwind`.
fn __rust_drop_panic() -> !;
- /// Handler in libstd called when a foreign exception is caught.
+ /// Handler in std called when a foreign exception is caught.
fn __rust_foreign_exception() -> !;
}
diff --git a/library/proc_macro/src/bridge/client.rs b/library/proc_macro/src/bridge/client.rs
index 506b2a773..52a08cad9 100644
--- a/library/proc_macro/src/bridge/client.rs
+++ b/library/proc_macro/src/bridge/client.rs
@@ -356,7 +356,7 @@ impl<I, O> Clone for Client<I, O> {
fn maybe_install_panic_hook(force_show_panics: bool) {
// Hide the default panic output within `proc_macro` expansions.
- // NB. the server can't do this because it may use a different libstd.
+ // NB. the server can't do this because it may use a different std.
static HIDE_PANICS_DURING_EXPANSION: Once = Once::new();
HIDE_PANICS_DURING_EXPANSION.call_once(|| {
let prev = panic::take_hook();
diff --git a/library/proc_macro/src/bridge/fxhash.rs b/library/proc_macro/src/bridge/fxhash.rs
index 4b1e412e2..17bd0a1b3 100644
--- a/library/proc_macro/src/bridge/fxhash.rs
+++ b/library/proc_macro/src/bridge/fxhash.rs
@@ -15,9 +15,9 @@ use std::ops::BitXor;
/// Type alias for a hashmap using the `fx` hash algorithm.
pub type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>;
-/// A speedy hash algorithm for use within rustc. The hashmap in liballoc
-/// by default uses SipHash which isn't quite as speedy as we want. In the
-/// compiler we're not really worried about DOS attempts, so we use a fast
+/// A speedy hash algorithm for use within rustc. The hashmap in alloc by
+/// default uses SipHash which isn't quite as speedy as we want. In the compiler
+/// we're not really worried about DOS attempts, so we use a fast
/// non-cryptographic hash.
///
/// This is the same as the algorithm used by Firefox -- which is a homespun
diff --git a/library/proc_macro/src/bridge/server.rs b/library/proc_macro/src/bridge/server.rs
index 8202c40d6..2ea87d866 100644
--- a/library/proc_macro/src/bridge/server.rs
+++ b/library/proc_macro/src/bridge/server.rs
@@ -112,7 +112,7 @@ macro_rules! define_dispatcher_impl {
$name::$method(server, $($arg),*)
};
// HACK(eddyb) don't use `panic::catch_unwind` in a panic.
- // If client and server happen to use the same `libstd`,
+ // If client and server happen to use the same `std`,
// `catch_unwind` asserts that the panic counter was 0,
// even when the closure passed to it didn't panic.
let r = if thread::panicking() {
diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs
index 0d3fc2c52..f0e4f5d8a 100644
--- a/library/proc_macro/src/lib.rs
+++ b/library/proc_macro/src/lib.rs
@@ -1493,7 +1493,7 @@ pub mod tracked_env {
use std::ffi::OsStr;
/// Retrieve an environment variable and add it to build dependency info.
- /// Build system executing the compiler will know that the variable was accessed during
+ /// The build system executing the compiler will know that the variable was accessed during
/// compilation, and will be able to rerun the build when the value of that variable changes.
/// Besides the dependency tracking this function should be equivalent to `env::var` from the
/// standard library, except that the argument must be UTF-8.
diff --git a/library/rustc-std-workspace-alloc/lib.rs b/library/rustc-std-workspace-alloc/lib.rs
index c38a8d2f2..87db7af44 100644
--- a/library/rustc-std-workspace-alloc/lib.rs
+++ b/library/rustc-std-workspace-alloc/lib.rs
@@ -3,7 +3,7 @@
// See rustc-std-workspace-core for why this crate is needed.
-// Rename the crate to avoid conflicting with the alloc module in liballoc.
+// Rename the crate to avoid conflicting with the alloc module in alloc.
extern crate alloc as foo;
pub use foo::*;
diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml
index a7aefc26b..adf521d9b 100644
--- a/library/std/Cargo.toml
+++ b/library/std/Cargo.toml
@@ -16,7 +16,7 @@ panic_unwind = { path = "../panic_unwind", optional = true }
panic_abort = { path = "../panic_abort" }
core = { path = "../core" }
libc = { version = "0.2.138", default-features = false, features = ['rustc-dep-of-std'] }
-compiler_builtins = { version = "0.1.82" }
+compiler_builtins = { version = "0.1.85" }
profiler_builtins = { path = "../profiler_builtins", optional = true }
unwind = { path = "../unwind" }
hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] }
@@ -33,7 +33,8 @@ default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned', 'archive']
[dev-dependencies]
-rand = "0.7"
+rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
+rand_xorshift = "0.3.0"
[target.'cfg(any(all(target_family = "wasm", not(target_os = "emscripten")), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies]
dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] }
diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs
index 61c1ff578..c5a5991cc 100644
--- a/library/std/src/alloc.rs
+++ b/library/std/src/alloc.rs
@@ -338,7 +338,7 @@ fn default_alloc_error_hook(layout: Layout) {
#[allow(unused_unsafe)]
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
- panic!("memory allocation of {} bytes failed\n", layout.size());
+ panic!("memory allocation of {} bytes failed", layout.size());
} else {
rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
}
diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs
index 9cb74f951..7543ffadd 100644
--- a/library/std/src/backtrace.rs
+++ b/library/std/src/backtrace.rs
@@ -23,10 +23,10 @@
//!
//! ## Platform support
//!
-//! Not all platforms that libstd compiles for support capturing backtraces.
-//! Some platforms simply do nothing when capturing a backtrace. To check
-//! whether the platform supports capturing backtraces you can consult the
-//! `BacktraceStatus` enum as a result of `Backtrace::status`.
+//! Not all platforms that std compiles for support capturing backtraces. Some
+//! platforms simply do nothing when capturing a backtrace. To check whether the
+//! platform supports capturing backtraces you can consult the `BacktraceStatus`
+//! enum as a result of `Backtrace::status`.
//!
//! Like above with accuracy platform support is done on a best effort basis.
//! Sometimes libraries might not be available at runtime or something may go
diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs
index 65634f206..6b89518e2 100644
--- a/library/std/src/collections/hash/map/tests.rs
+++ b/library/std/src/collections/hash/map/tests.rs
@@ -3,7 +3,8 @@ use super::HashMap;
use super::RandomState;
use crate::assert_matches::assert_matches;
use crate::cell::RefCell;
-use rand::{thread_rng, Rng};
+use crate::test_helpers::test_rng;
+use rand::Rng;
use realstd::collections::TryReserveErrorKind::*;
// https://github.com/rust-lang/rust/issues/62301
@@ -710,16 +711,16 @@ fn test_entry_take_doesnt_corrupt() {
}
let mut m = HashMap::new();
- let mut rng = thread_rng();
+ let mut rng = test_rng();
// Populate the map with some items.
for _ in 0..50 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
m.insert(x, ());
}
for _ in 0..1000 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
match m.entry(x) {
Vacant(_) => {}
Occupied(e) => {
diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs
index cee884145..b59f89d32 100644
--- a/library/std/src/collections/hash/set.rs
+++ b/library/std/src/collections/hash/set.rs
@@ -317,7 +317,7 @@ impl<T, S> HashSet<T, S> {
///
/// let mut set = HashSet::from([1, 2, 3, 4, 5, 6]);
/// set.retain(|&k| k % 2 == 0);
- /// assert_eq!(set.len(), 3);
+ /// assert_eq!(set, HashSet::from([2, 4, 6]));
/// ```
///
/// # Performance
diff --git a/library/std/src/env.rs b/library/std/src/env.rs
index 6eb7cbea6..183f9ab3b 100644
--- a/library/std/src/env.rs
+++ b/library/std/src/env.rs
@@ -570,6 +570,13 @@ impl Error for JoinPathsError {
///
/// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-getuserprofiledirectorya
///
+/// # Deprecation
+///
+/// This function is deprecated because the behaviour on Windows is not correct.
+/// The 'HOME' environment variable is not standard on Windows, and may not produce
+/// desired results; for instance, under Cygwin or Mingw it will return `/home/you`
+/// when it should return `C:\Users\you`.
+///
/// # Examples
///
/// ```
@@ -582,7 +589,7 @@ impl Error for JoinPathsError {
/// ```
#[deprecated(
since = "1.29.0",
- note = "This function's behavior is unexpected and probably not what you want. \
+ note = "This function's behavior may be unexpected on Windows. \
Consider using a crate from crates.io instead."
)]
#[must_use]
diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs
index f357d505f..286ad68fd 100644
--- a/library/std/src/fs.rs
+++ b/library/std/src/fs.rs
@@ -249,8 +249,9 @@ pub struct DirBuilder {
pub fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
fn inner(path: &Path) -> io::Result<Vec<u8>> {
let mut file = File::open(path)?;
- let mut bytes = Vec::new();
- file.read_to_end(&mut bytes)?;
+ let size = file.metadata().map(|m| m.len()).unwrap_or(0);
+ let mut bytes = Vec::with_capacity(size as usize);
+ io::default_read_to_end(&mut file, &mut bytes)?;
Ok(bytes)
}
inner(path.as_ref())
@@ -288,8 +289,9 @@ pub fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
pub fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> {
fn inner(path: &Path) -> io::Result<String> {
let mut file = File::open(path)?;
- let mut string = String::new();
- file.read_to_string(&mut string)?;
+ let size = file.metadata().map(|m| m.len()).unwrap_or(0);
+ let mut string = String::with_capacity(size as usize);
+ io::default_read_to_string(&mut file, &mut string)?;
Ok(string)
}
inner(path.as_ref())
@@ -1510,7 +1512,7 @@ impl FileType {
}
/// Tests whether this file type represents a regular file.
- /// The result is mutually exclusive to the results of
+ /// The result is mutually exclusive to the results of
/// [`is_dir`] and [`is_symlink`]; only zero or one of these
/// tests may pass.
///
diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs
index b8959316d..eb582be01 100644
--- a/library/std/src/fs/tests.rs
+++ b/library/std/src/fs/tests.rs
@@ -10,7 +10,7 @@ use crate::sys_common::io::test::{tmpdir, TempDir};
use crate::thread;
use crate::time::{Duration, Instant};
-use rand::{rngs::StdRng, RngCore, SeedableRng};
+use rand::RngCore;
#[cfg(unix)]
use crate::os::unix::fs::symlink as symlink_dir;
@@ -1181,7 +1181,7 @@ fn _assert_send_sync() {
#[test]
fn binary_file() {
let mut bytes = [0; 1024];
- StdRng::from_entropy().fill_bytes(&mut bytes);
+ crate::test_helpers::test_rng().fill_bytes(&mut bytes);
let tmpdir = tmpdir();
@@ -1194,7 +1194,7 @@ fn binary_file() {
#[test]
fn write_then_read() {
let mut bytes = [0; 1024];
- StdRng::from_entropy().fill_bytes(&mut bytes);
+ crate::test_helpers::test_rng().fill_bytes(&mut bytes);
let tmpdir = tmpdir();
@@ -1551,3 +1551,47 @@ fn hiberfil_sys() {
fs::metadata(hiberfil).unwrap();
assert_eq!(true, hiberfil.exists());
}
+
+/// Test that two different ways of obtaining the FileType give the same result.
+/// Cf. https://github.com/rust-lang/rust/issues/104900
+#[test]
+fn test_eq_direntry_metadata() {
+ let tmpdir = tmpdir();
+ let file_path = tmpdir.join("file");
+ File::create(file_path).unwrap();
+ for e in fs::read_dir(tmpdir.path()).unwrap() {
+ let e = e.unwrap();
+ let p = e.path();
+ let ft1 = e.file_type().unwrap();
+ let ft2 = p.metadata().unwrap().file_type();
+ assert_eq!(ft1, ft2);
+ }
+}
+
+/// Regression test for https://github.com/rust-lang/rust/issues/50619.
+#[test]
+#[cfg(target_os = "linux")]
+fn test_read_dir_infinite_loop() {
+ use crate::io::ErrorKind;
+ use crate::process::Command;
+
+ // Create a zombie child process
+ let Ok(mut child) = Command::new("echo").spawn() else { return };
+
+ // Make sure the process is (un)dead
+ match child.kill() {
+ // InvalidInput means the child already exited
+ Err(e) if e.kind() != ErrorKind::InvalidInput => return,
+ _ => {}
+ }
+
+ // open() on this path will succeed, but readdir() will fail
+ let id = child.id();
+ let path = format!("/proc/{id}/net");
+
+ // Skip the test if we can't open the directory in the first place
+ let Ok(dir) = fs::read_dir(path) else { return };
+
+ // Check for duplicate errors
+ assert!(dir.filter(|e| e.is_err()).take(2).count() < 2);
+}
diff --git a/library/std/src/io/buffered/tests.rs b/library/std/src/io/buffered/tests.rs
index f4e688eb9..4c1b7d576 100644
--- a/library/std/src/io/buffered/tests.rs
+++ b/library/std/src/io/buffered/tests.rs
@@ -288,8 +288,8 @@ fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() {
let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true });
assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..]));
- // The following seek will require two underlying seeks. The first will
- // succeed but the second will fail. This should still invalidate the
+ // The following seek will require two underlying seeks. The first will
+ // succeed but the second will fail. This should still invalidate the
// buffer.
assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err());
assert_eq!(reader.buffer().len(), 0);
diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs
index 781ae03ad..358148405 100644
--- a/library/std/src/io/error/repr_bitpacked.rs
+++ b/library/std/src/io/error/repr_bitpacked.rs
@@ -166,7 +166,7 @@ impl Repr {
// `new_unchecked` is safe.
let res = Self(unsafe { NonNull::new_unchecked(tagged) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will
- // only run in libstd's tests, unless the user uses -Zbuild-std)
+ // only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(matches!(res.data(), ErrorData::Custom(_)), "repr(custom) encoding failed");
res
}
@@ -177,7 +177,7 @@ impl Repr {
// Safety: `TAG_OS` is not zero, so the result of the `|` is not 0.
let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will
- // only run in libstd's tests, unless the user uses -Zbuild-std)
+ // only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(
matches!(res.data(), ErrorData::Os(c) if c == code),
"repr(os) encoding failed for {code}"
@@ -191,7 +191,7 @@ impl Repr {
// Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0.
let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData);
// quickly smoke-check we encoded the right thing (This generally will
- // only run in libstd's tests, unless the user uses -Zbuild-std)
+ // only run in std's tests, unless the user uses -Zbuild-std)
debug_assert!(
matches!(res.data(), ErrorData::Simple(k) if k == kind),
"repr(simple) encoding failed {:?}",
@@ -348,7 +348,7 @@ fn kind_from_prim(ek: u32) -> Option<ErrorKind> {
// that our encoding relies on for correctness and soundness. (Some of these are
// a bit overly thorough/cautious, admittedly)
//
-// If any of these are hit on a platform that libstd supports, we should likely
+// If any of these are hit on a platform that std supports, we should likely
// just use `repr_unpacked.rs` there instead (unless the fix is easy).
macro_rules! static_assert {
($condition:expr) => {
@@ -374,10 +374,10 @@ static_assert!((TAG_MASK + 1).is_power_of_two());
static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1);
static_assert!(align_of::<Custom>() >= TAG_MASK + 1);
-static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE_MESSAGE), TAG_SIMPLE_MESSAGE);
-static_assert!(@usize_eq: (TAG_MASK & TAG_CUSTOM), TAG_CUSTOM);
-static_assert!(@usize_eq: (TAG_MASK & TAG_OS), TAG_OS);
-static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE), TAG_SIMPLE);
+static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE);
+static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM);
+static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS);
+static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE, TAG_SIMPLE);
// This is obviously true (`TAG_CUSTOM` is `0b01`), but in `Repr::new_custom` we
// offset a pointer by this value, and expect it to both be within the same
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index 23a13523f..de528e853 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -2137,8 +2137,10 @@ pub trait BufRead: Read {
}
/// Read all bytes until a newline (the `0xA` byte) is reached, and append
- /// them to the provided buffer. You do not need to clear the buffer before
- /// appending.
+ /// them to the provided `String` buffer.
+ ///
+ /// Previous content of the buffer will be preserved. To avoid appending to
+ /// the buffer, you need to [`clear`] it first.
///
/// This function will read bytes from the underlying stream until the
/// newline delimiter (the `0xA` byte) or EOF is found. Once found, all bytes
@@ -2151,9 +2153,11 @@ pub trait BufRead: Read {
///
/// This function is blocking and should be used carefully: it is possible for
/// an attacker to continuously send bytes without ever sending a newline
- /// or EOF.
+ /// or EOF. You can use [`take`] to limit the maximum number of bytes read.
///
/// [`Ok(0)`]: Ok
+ /// [`clear`]: String::clear
+ /// [`take`]: crate::io::Read::take
///
/// # Errors
///
diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs
index 1141a957d..14bfef4c7 100644
--- a/library/std/src/io/stdio.rs
+++ b/library/std/src/io/stdio.rs
@@ -10,9 +10,8 @@ use crate::fmt;
use crate::fs::File;
use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
use crate::sync::atomic::{AtomicBool, Ordering};
-use crate::sync::{Arc, Mutex, MutexGuard, OnceLock};
+use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard};
use crate::sys::stdio;
-use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
type LocalStream = Arc<Mutex<Vec<u8>>>;
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 65d4c3c89..a7e13f5b8 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -14,7 +14,7 @@
//! # How to read this documentation
//!
//! If you already know the name of what you are looking for, the fastest way to
-//! find it is to use the <a href="#" onclick="focusSearchBar();">search
+//! find it is to use the <a href="#" onclick="window.searchState.focus();">search
//! bar</a> at the top of the page.
//!
//! Otherwise, you may want to jump to one of these useful sections:
@@ -202,7 +202,7 @@
no_global_oom_handling,
not(no_global_oom_handling)
))]
-// To run libstd tests without x.py without ending up with two copies of libstd, Miri needs to be
+// To run std tests without x.py without ending up with two copies of std, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -532,7 +532,7 @@ pub mod process;
pub mod sync;
pub mod time;
-// Pull in `std_float` crate into libstd. The contents of
+// Pull in `std_float` crate into std. The contents of
// `std_float` are in a different repository: rust-lang/portable-simd.
#[path = "../../portable-simd/crates/std_float/src/lib.rs"]
#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
@@ -602,7 +602,7 @@ mod personality;
#[allow(dead_code, unused_attributes, fuzzy_provenance_casts)]
mod backtrace_rs;
-// Re-export macros defined in libcore.
+// Re-export macros defined in core.
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(deprecated, deprecated_in_future)]
pub use core::{
@@ -610,7 +610,7 @@ pub use core::{
unimplemented, unreachable, write, writeln,
};
-// Re-export built-in macros defined through libcore.
+// Re-export built-in macros defined through core.
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
#[allow(deprecated)]
pub use core::{
@@ -652,3 +652,30 @@ mod sealed {
#[unstable(feature = "sealed", issue = "none")]
pub trait Sealed {}
}
+
+#[cfg(test)]
+#[allow(dead_code)] // Not used in all configurations.
+pub(crate) mod test_helpers {
+ /// Test-only replacement for `rand::thread_rng()`, which is unusable for
+ /// us, as we want to allow running stdlib tests on tier-3 targets which may
+ /// not have `getrandom` support.
+ ///
+ /// Does a bit of a song and dance to ensure that the seed is different on
+ /// each call (as some tests sadly rely on this), but doesn't try that hard.
+ ///
+ /// This is duplicated in the `core`, `alloc` test suites (as well as
+ /// `std`'s integration tests), but figuring out a mechanism to share these
+ /// seems far more painful than copy-pasting a 7 line function a couple
+ /// times, given that even under a perma-unstable feature, I don't think we
+ /// want to expose types from `rand` from `std`.
+ #[track_caller]
+ pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use core::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = crate::collections::hash_map::RandomState::new().build_hasher();
+ core::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+ }
+}
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index 6e4ba1404..fcc5cfafd 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -3,6 +3,7 @@
//! This module contains a set of macros which are exported from the standard
//! library. Each macro is available for use when linking against the standard
//! library.
+// ignore-tidy-dbg
#[doc = include_str!("../../core/src/macros/panic.md")]
#[macro_export]
diff --git a/library/std/src/net/ip_addr.rs b/library/std/src/net/ip_addr.rs
index 5453853e1..07f08c1b5 100644
--- a/library/std/src/net/ip_addr.rs
+++ b/library/std/src/net/ip_addr.rs
@@ -1195,6 +1195,9 @@ impl Ipv6Addr {
/// An IPv6 address representing localhost: `::1`.
///
+ /// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other
+ /// languages.
+ ///
/// # Examples
///
/// ```
@@ -1203,11 +1206,15 @@ impl Ipv6Addr {
/// let addr = Ipv6Addr::LOCALHOST;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1));
/// ```
+ #[doc(alias = "IN6ADDR_LOOPBACK_INIT")]
+ #[doc(alias = "in6addr_loopback")]
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
/// An IPv6 address representing the unspecified address: `::`
///
+ /// This corresponds to constant `IN6ADDR_ANY_INIT` or `in6addr_any` in other languages.
+ ///
/// # Examples
///
/// ```
@@ -1216,6 +1223,8 @@ impl Ipv6Addr {
/// let addr = Ipv6Addr::UNSPECIFIED;
/// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
/// ```
+ #[doc(alias = "IN6ADDR_ANY_INIT")]
+ #[doc(alias = "in6addr_any")]
#[stable(feature = "ip_constructors", since = "1.30.0")]
pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0);
diff --git a/library/std/src/os/fd/mod.rs b/library/std/src/os/fd/mod.rs
index c6aa7c77d..35de4860f 100644
--- a/library/std/src/os/fd/mod.rs
+++ b/library/std/src/os/fd/mod.rs
@@ -3,7 +3,7 @@
//! This module is supported on Unix platforms and WASI, which both use a
//! similar file descriptor system for referencing OS resources.
-#![stable(feature = "io_safety", since = "1.63.0")]
+#![stable(feature = "os_fd", since = "1.66.0")]
#![deny(unsafe_op_in_unsafe_fn)]
// `RawFd`, `AsRawFd`, etc.
@@ -19,7 +19,7 @@ mod net;
mod tests;
// Export the types and traits for the public API.
-#[unstable(feature = "os_fd", issue = "98699")]
+#[stable(feature = "os_fd", since = "1.66.0")]
pub use owned::*;
-#[unstable(feature = "os_fd", issue = "98699")]
+#[stable(feature = "os_fd", since = "1.66.0")]
pub use raw::*;
diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs
index c16518577..c41e093a7 100644
--- a/library/std/src/os/fd/owned.rs
+++ b/library/std/src/os/fd/owned.rs
@@ -100,7 +100,7 @@ impl BorrowedFd<'_> {
// For ESP-IDF, F_DUPFD is used instead, because the CLOEXEC semantics
// will never be supported, as this is a bare metal framework with
- // no capabilities for multi-process execution. While F_DUPFD is also
+ // no capabilities for multi-process execution. While F_DUPFD is also
// not supported yet, it might be (currently it returns ENOSYS).
#[cfg(target_os = "espidf")]
let cmd = libc::F_DUPFD;
diff --git a/library/std/src/os/net/linux_ext/addr.rs b/library/std/src/os/net/linux_ext/addr.rs
index df3fc8e6a..85065984f 100644
--- a/library/std/src/os/net/linux_ext/addr.rs
+++ b/library/std/src/os/net/linux_ext/addr.rs
@@ -38,7 +38,7 @@ pub trait SocketAddrExt: Sealed {
/// Ok(())
/// }
/// ```
- fn from_abstract_name<N>(name: &N) -> crate::io::Result<SocketAddr>
+ fn from_abstract_name<N>(name: N) -> crate::io::Result<SocketAddr>
where
N: AsRef<[u8]>;
diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs
index 81ac829d2..ece2b33bd 100644
--- a/library/std/src/os/unix/net/addr.rs
+++ b/library/std/src/os/unix/net/addr.rs
@@ -256,7 +256,7 @@ impl linux_ext::addr::SocketAddrExt for SocketAddr {
if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None }
}
- fn from_abstract_name<N>(name: &N) -> crate::io::Result<Self>
+ fn from_abstract_name<N>(name: N) -> crate::io::Result<Self>
where
N: AsRef<[u8]>,
{
diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
index c4f022de0..9fa8f5702 100644
--- a/library/std/src/panic.rs
+++ b/library/std/src/panic.rs
@@ -114,6 +114,9 @@ where
/// aborting the process as well. This function *only* catches unwinding panics,
/// not those that abort the process.
///
+/// Note that if a custom panic hook has been set, it will be invoked before
+/// the panic is caught, before unwinding.
+///
/// Also note that unwinding into Rust code with a foreign exception (e.g.
/// an exception thrown from C++ code) is undefined behavior.
///
diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs
index 1039835bb..b0db3112e 100644
--- a/library/std/src/panicking.rs
+++ b/library/std/src/panicking.rs
@@ -306,11 +306,11 @@ pub mod panic_count {
// and after increase and decrease, but not necessarily during their execution.
//
// Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG)
- // records whether panic::always_abort() has been called. This can only be
+ // records whether panic::always_abort() has been called. This can only be
// set, never cleared.
// panic::always_abort() is usually called to prevent memory allocations done by
// the panic handling in the child created by `libc::fork`.
- // Memory allocations performed in a child created with `libc::fork` are undefined
+ // Memory allocations performed in a child created with `libc::fork` are undefined
// behavior in most operating systems.
// Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory
// allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is
@@ -517,7 +517,7 @@ pub fn panicking() -> bool {
!panic_count::count_is_zero()
}
-/// Entry point of panics from the libcore crate (`panic_impl` lang item).
+/// Entry point of panics from the core crate (`panic_impl` lang item).
#[cfg(not(test))]
#[panic_handler]
pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! {
@@ -699,7 +699,11 @@ fn rust_panic_with_hook(
// have limited options. Currently our preference is to
// just abort. In the future we may consider resuming
// unwinding or otherwise exiting the thread cleanly.
- rtprintpanic!("thread panicked while panicking. aborting.\n");
+ if !can_unwind {
+ rtprintpanic!("thread caused non-unwinding panic. aborting.\n");
+ } else {
+ rtprintpanic!("thread panicked while panicking. aborting.\n");
+ }
crate::sys::abort_internal();
}
diff --git a/library/std/src/path.rs b/library/std/src/path.rs
index 6c957c2fa..4778114b4 100644
--- a/library/std/src/path.rs
+++ b/library/std/src/path.rs
@@ -271,7 +271,7 @@ pub const MAIN_SEPARATOR: char = crate::sys::path::MAIN_SEP;
/// The primary separator of path components for the current platform.
///
/// For example, `/` on Unix and `\` on Windows.
-#[unstable(feature = "main_separator_str", issue = "94071")]
+#[stable(feature = "main_separator_str", since = "1.68.0")]
pub const MAIN_SEPARATOR_STR: &str = crate::sys::path::MAIN_SEP_STR;
////////////////////////////////////////////////////////////////////////////////
@@ -306,7 +306,7 @@ unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
// This casts are safe as OsStr is internally a wrapper around [u8] on all
// platforms.
//
- // Note that currently this relies on the special knowledge that libstd has;
+ // Note that currently this relies on the special knowledge that std has;
// these types are single-element structs but are not marked
// repr(transparent) or repr(C) which would make these casts not allowable
// outside std.
@@ -607,7 +607,7 @@ pub struct Components<'a> {
// true if path *physically* has a root separator; for most Windows
// prefixes, it may have a "logical" root separator for the purposes of
- // normalization, e.g., \\server\share == \\server\share\.
+ // normalization, e.g., \\server\share == \\server\share\.
has_physical_root: bool,
// The iterator is double-ended, and these two states keep track of what has
@@ -1246,6 +1246,9 @@ impl PathBuf {
/// and `path` is not empty, the new path is normalized: all references
/// to `.` and `..` are removed.
///
+ /// Consider using [`Path::join`] if you need a new `PathBuf` instead of
+ /// using this function on a cloned `PathBuf`.
+ ///
/// # Examples
///
/// Pushing a relative path extends the existing path:
@@ -1411,7 +1414,8 @@ impl PathBuf {
self.push(file_name);
}
- /// Updates [`self.extension`] to `extension`.
+ /// Updates [`self.extension`] to `Some(extension)` or to `None` if
+ /// `extension` is empty.
///
/// Returns `false` and does nothing if [`self.file_name`] is [`None`],
/// returns `true` and updates the extension otherwise.
@@ -1419,6 +1423,20 @@ impl PathBuf {
/// If [`self.extension`] is [`None`], the extension is added; otherwise
/// it is replaced.
///
+ /// If `extension` is the empty string, [`self.extension`] will be [`None`]
+ /// afterwards, not `Some("")`.
+ ///
+ /// # Caveats
+ ///
+ /// The new `extension` may contain dots and will be used in its entirety,
+ /// but only the part after the final dot will be reflected in
+ /// [`self.extension`].
+ ///
+ /// If the file stem contains internal dots and `extension` is empty, part
+ /// of the old file stem will be considered the new [`self.extension`].
+ ///
+ /// See the examples below.
+ ///
/// [`self.file_name`]: Path::file_name
/// [`self.extension`]: Path::extension
///
@@ -1432,8 +1450,20 @@ impl PathBuf {
/// p.set_extension("force");
/// assert_eq!(Path::new("/feel/the.force"), p.as_path());
///
- /// p.set_extension("dark_side");
- /// assert_eq!(Path::new("/feel/the.dark_side"), p.as_path());
+ /// p.set_extension("dark.side");
+ /// assert_eq!(Path::new("/feel/the.dark.side"), p.as_path());
+ ///
+ /// p.set_extension("cookie");
+ /// assert_eq!(Path::new("/feel/the.dark.cookie"), p.as_path());
+ ///
+ /// p.set_extension("");
+ /// assert_eq!(Path::new("/feel/the.dark"), p.as_path());
+ ///
+ /// p.set_extension("");
+ /// assert_eq!(Path::new("/feel/the"), p.as_path());
+ ///
+ /// p.set_extension("");
+ /// assert_eq!(Path::new("/feel/the"), p.as_path());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn set_extension<S: AsRef<OsStr>>(&mut self, extension: S) -> bool {
@@ -1748,6 +1778,14 @@ impl ops::Deref for PathBuf {
}
}
+#[stable(feature = "path_buf_deref_mut", since = "1.68.0")]
+impl ops::DerefMut for PathBuf {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Path {
+ Path::from_inner_mut(&mut self.inner)
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl Borrow<Path> for PathBuf {
#[inline]
@@ -2000,6 +2038,12 @@ impl Path {
unsafe { &*(s.as_ref() as *const OsStr as *const Path) }
}
+ fn from_inner_mut(inner: &mut OsStr) -> &mut Path {
+ // SAFETY: Path is just a wrapper around OsStr,
+ // therefore converting &mut OsStr to &mut Path is safe.
+ unsafe { &mut *(inner as *mut OsStr as *mut Path) }
+ }
+
/// Yields the underlying [`OsStr`] slice.
///
/// # Examples
@@ -2025,12 +2069,12 @@ impl Path {
/// #![feature(path_as_mut_os_str)]
/// use std::path::{Path, PathBuf};
///
- /// let mut path = PathBuf::from("/Foo.TXT").into_boxed_path();
+ /// let mut path = PathBuf::from("Foo.TXT");
///
- /// assert_ne!(&*path, Path::new("/foo.txt"));
+ /// assert_ne!(path, Path::new("foo.txt"));
///
/// path.as_mut_os_str().make_ascii_lowercase();
- /// assert_eq!(&*path, Path::new("/foo.txt"));
+ /// assert_eq!(path, Path::new("foo.txt"));
/// ```
#[unstable(feature = "path_as_mut_os_str", issue = "105021")]
#[must_use]
@@ -3133,9 +3177,9 @@ impl<'a> IntoIterator for &'a Path {
}
macro_rules! impl_cmp {
- ($lhs:ty, $rhs: ty) => {
+ (<$($life:lifetime),*> $lhs:ty, $rhs: ty) => {
#[stable(feature = "partialeq_path", since = "1.6.0")]
- impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ impl<$($life),*> PartialEq<$rhs> for $lhs {
#[inline]
fn eq(&self, other: &$rhs) -> bool {
<Path as PartialEq>::eq(self, other)
@@ -3143,7 +3187,7 @@ macro_rules! impl_cmp {
}
#[stable(feature = "partialeq_path", since = "1.6.0")]
- impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ impl<$($life),*> PartialEq<$lhs> for $rhs {
#[inline]
fn eq(&self, other: &$lhs) -> bool {
<Path as PartialEq>::eq(self, other)
@@ -3151,7 +3195,7 @@ macro_rules! impl_cmp {
}
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialOrd<$rhs> for $lhs {
+ impl<$($life),*> PartialOrd<$rhs> for $lhs {
#[inline]
fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
<Path as PartialOrd>::partial_cmp(self, other)
@@ -3159,7 +3203,7 @@ macro_rules! impl_cmp {
}
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialOrd<$lhs> for $rhs {
+ impl<$($life),*> PartialOrd<$lhs> for $rhs {
#[inline]
fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
<Path as PartialOrd>::partial_cmp(self, other)
@@ -3168,16 +3212,16 @@ macro_rules! impl_cmp {
};
}
-impl_cmp!(PathBuf, Path);
-impl_cmp!(PathBuf, &'a Path);
-impl_cmp!(Cow<'a, Path>, Path);
-impl_cmp!(Cow<'a, Path>, &'b Path);
-impl_cmp!(Cow<'a, Path>, PathBuf);
+impl_cmp!(<> PathBuf, Path);
+impl_cmp!(<'a> PathBuf, &'a Path);
+impl_cmp!(<'a> Cow<'a, Path>, Path);
+impl_cmp!(<'a, 'b> Cow<'a, Path>, &'b Path);
+impl_cmp!(<'a> Cow<'a, Path>, PathBuf);
macro_rules! impl_cmp_os_str {
- ($lhs:ty, $rhs: ty) => {
+ (<$($life:lifetime),*> $lhs:ty, $rhs: ty) => {
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ impl<$($life),*> PartialEq<$rhs> for $lhs {
#[inline]
fn eq(&self, other: &$rhs) -> bool {
<Path as PartialEq>::eq(self, other.as_ref())
@@ -3185,7 +3229,7 @@ macro_rules! impl_cmp_os_str {
}
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ impl<$($life),*> PartialEq<$lhs> for $rhs {
#[inline]
fn eq(&self, other: &$lhs) -> bool {
<Path as PartialEq>::eq(self.as_ref(), other)
@@ -3193,7 +3237,7 @@ macro_rules! impl_cmp_os_str {
}
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialOrd<$rhs> for $lhs {
+ impl<$($life),*> PartialOrd<$rhs> for $lhs {
#[inline]
fn partial_cmp(&self, other: &$rhs) -> Option<cmp::Ordering> {
<Path as PartialOrd>::partial_cmp(self, other.as_ref())
@@ -3201,7 +3245,7 @@ macro_rules! impl_cmp_os_str {
}
#[stable(feature = "cmp_path", since = "1.8.0")]
- impl<'a, 'b> PartialOrd<$lhs> for $rhs {
+ impl<$($life),*> PartialOrd<$lhs> for $rhs {
#[inline]
fn partial_cmp(&self, other: &$lhs) -> Option<cmp::Ordering> {
<Path as PartialOrd>::partial_cmp(self.as_ref(), other)
@@ -3210,20 +3254,20 @@ macro_rules! impl_cmp_os_str {
};
}
-impl_cmp_os_str!(PathBuf, OsStr);
-impl_cmp_os_str!(PathBuf, &'a OsStr);
-impl_cmp_os_str!(PathBuf, Cow<'a, OsStr>);
-impl_cmp_os_str!(PathBuf, OsString);
-impl_cmp_os_str!(Path, OsStr);
-impl_cmp_os_str!(Path, &'a OsStr);
-impl_cmp_os_str!(Path, Cow<'a, OsStr>);
-impl_cmp_os_str!(Path, OsString);
-impl_cmp_os_str!(&'a Path, OsStr);
-impl_cmp_os_str!(&'a Path, Cow<'b, OsStr>);
-impl_cmp_os_str!(&'a Path, OsString);
-impl_cmp_os_str!(Cow<'a, Path>, OsStr);
-impl_cmp_os_str!(Cow<'a, Path>, &'b OsStr);
-impl_cmp_os_str!(Cow<'a, Path>, OsString);
+impl_cmp_os_str!(<> PathBuf, OsStr);
+impl_cmp_os_str!(<'a> PathBuf, &'a OsStr);
+impl_cmp_os_str!(<'a> PathBuf, Cow<'a, OsStr>);
+impl_cmp_os_str!(<> PathBuf, OsString);
+impl_cmp_os_str!(<> Path, OsStr);
+impl_cmp_os_str!(<'a> Path, &'a OsStr);
+impl_cmp_os_str!(<'a> Path, Cow<'a, OsStr>);
+impl_cmp_os_str!(<> Path, OsString);
+impl_cmp_os_str!(<'a> &'a Path, OsStr);
+impl_cmp_os_str!(<'a, 'b> &'a Path, Cow<'b, OsStr>);
+impl_cmp_os_str!(<'a> &'a Path, OsString);
+impl_cmp_os_str!(<'a> Cow<'a, Path>, OsStr);
+impl_cmp_os_str!(<'a, 'b> Cow<'a, Path>, &'b OsStr);
+impl_cmp_os_str!(<'a> Cow<'a, Path>, OsString);
#[stable(since = "1.7.0", feature = "strip_prefix")]
impl fmt::Display for StripPrefixError {
diff --git a/library/std/src/personality/dwarf/eh.rs b/library/std/src/personality/dwarf/eh.rs
index a783e1870..87585a8fc 100644
--- a/library/std/src/personality/dwarf/eh.rs
+++ b/library/std/src/personality/dwarf/eh.rs
@@ -84,7 +84,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding)?;
- let cs_action = reader.read_uleb128();
+ let cs_action_entry = reader.read_uleb128();
// Callsite table is sorted by cs_start, so if we've passed the ip, we
// may stop searching.
if ip < func_start + cs_start {
@@ -95,7 +95,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
return Ok(EHAction::None);
} else {
let lpad = lpad_base + cs_lpad;
- return Ok(interpret_cs_action(cs_action, lpad));
+ return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad));
}
}
}
@@ -113,26 +113,39 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result
let mut idx = ip;
loop {
let cs_lpad = reader.read_uleb128();
- let cs_action = reader.read_uleb128();
+ let cs_action_entry = reader.read_uleb128();
idx -= 1;
if idx == 0 {
// Can never have null landing pad for sjlj -- that would have
// been indicated by a -1 call site index.
let lpad = (cs_lpad + 1) as usize;
- return Ok(interpret_cs_action(cs_action, lpad));
+ return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad));
}
}
}
}
-fn interpret_cs_action(cs_action: u64, lpad: usize) -> EHAction {
- if cs_action == 0 {
- // If cs_action is 0 then this is a cleanup (Drop::drop). We run these
+unsafe fn interpret_cs_action(
+ action_table: *mut u8,
+ cs_action_entry: u64,
+ lpad: usize,
+) -> EHAction {
+ if cs_action_entry == 0 {
+ // If cs_action_entry is 0 then this is a cleanup (Drop::drop). We run these
// for both Rust panics and foreign exceptions.
EHAction::Cleanup(lpad)
} else {
- // Stop unwinding Rust panics at catch_unwind.
- EHAction::Catch(lpad)
+ // If lpad != 0 and cs_action_entry != 0, we have to check ttype_index.
+ // If ttype_index == 0 under the condition, we take cleanup action.
+ let action_record = (action_table as *mut u8).offset(cs_action_entry as isize - 1);
+ let mut action_reader = DwarfReader::new(action_record);
+ let ttype_index = action_reader.read_sleb128();
+ if ttype_index == 0 {
+ EHAction::Cleanup(lpad)
+ } else {
+ // Stop unwinding Rust panics at catch_unwind.
+ EHAction::Catch(lpad)
+ }
}
}
diff --git a/library/std/src/prelude/v1.rs b/library/std/src/prelude/v1.rs
index a5a798078..2aefd7c51 100644
--- a/library/std/src/prelude/v1.rs
+++ b/library/std/src/prelude/v1.rs
@@ -59,14 +59,12 @@ pub use core::prelude::v1::{RustcDecodable, RustcEncodable};
// Do not `doc(no_inline)` so that they become doc items on their own
// (no public module for them to be re-exported from).
-#[cfg(not(bootstrap))]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use core::prelude::v1::alloc_error_handler;
-#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-pub use core::prelude::v1::{bench, derive, global_allocator, test, test_case};
+pub use core::prelude::v1::{
+ alloc_error_handler, bench, derive, global_allocator, test, test_case,
+};
#[unstable(feature = "derive_const", issue = "none")]
-#[cfg(not(bootstrap))]
pub use core::prelude::v1::derive_const;
// Do not `doc(no_inline)` either.
@@ -91,7 +89,6 @@ pub use core::prelude::v1::cfg_eval;
issue = "23416",
reason = "placeholder syntax for type ascription"
)]
-#[cfg(not(bootstrap))]
pub use core::prelude::v1::type_ascribe;
// The file so far is equivalent to src/libcore/prelude/v1.rs,
diff --git a/library/std/src/process.rs b/library/std/src/process.rs
index 400d25beb..62ce2cb33 100644
--- a/library/std/src/process.rs
+++ b/library/std/src/process.rs
@@ -362,6 +362,10 @@ impl Read for ChildStdout {
fn is_read_vectored(&self) -> bool {
self.inner.is_read_vectored()
}
+
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.inner.read_to_end(buf)
+ }
}
impl AsInner<AnonPipe> for ChildStdout {
@@ -907,10 +911,8 @@ impl Command {
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn output(&mut self) -> io::Result<Output> {
- self.inner
- .spawn(imp::Stdio::MakePipe, false)
- .map(Child::from_inner)
- .and_then(|p| p.wait_with_output())
+ let (status, stdout, stderr) = self.inner.output()?;
+ Ok(Output { status: ExitStatus(status), stdout, stderr })
}
/// Executes a command as a child process, waiting for it to finish and
@@ -1036,6 +1038,15 @@ impl fmt::Debug for Command {
/// Format the program and arguments of a Command for display. Any
/// non-utf8 data is lossily converted using the utf8 replacement
/// character.
+ ///
+ /// The default format approximates a shell invocation of the program along with its
+ /// arguments. It does not include most of the other command properties. The output is not guaranteed to work
+ /// (e.g. due to lack of shell-escaping or differences in path resolution)
+ /// On some platforms you can use [the alternate syntax] to show more fields.
+ ///
+ /// Note that the debug implementation is platform-specific.
+ ///
+ /// [the alternate syntax]: fmt#sign0
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
@@ -2153,18 +2164,11 @@ pub fn id() -> u32 {
/// to provide similar functionality.
#[cfg_attr(not(test), lang = "termination")]
#[stable(feature = "termination_trait_lib", since = "1.61.0")]
-#[rustc_on_unimplemented(
- on(
- all(not(bootstrap), cause = "MainFunctionType"),
- message = "`main` has invalid return type `{Self}`",
- label = "`main` can only return types that implement `{Termination}`"
- ),
- on(
- bootstrap,
- message = "`main` has invalid return type `{Self}`",
- label = "`main` can only return types that implement `{Termination}`"
- )
-)]
+#[rustc_on_unimplemented(on(
+ cause = "MainFunctionType",
+ message = "`main` has invalid return type `{Self}`",
+ label = "`main` can only return types that implement `{Termination}`"
+))]
pub trait Termination {
/// Is called to get the representation of the value as status code.
/// This status code is returned to the operating system.
diff --git a/library/std/src/process/tests.rs b/library/std/src/process/tests.rs
index 955ad6891..b4f6cc2da 100644
--- a/library/std/src/process/tests.rs
+++ b/library/std/src/process/tests.rs
@@ -417,6 +417,100 @@ fn env_empty() {
assert!(p.is_ok());
}
+#[test]
+#[cfg(not(windows))]
+#[cfg_attr(any(target_os = "emscripten", target_env = "sgx"), ignore)]
+fn main() {
+ const PIDFD: &'static str =
+ if cfg!(target_os = "linux") { " create_pidfd: false,\n" } else { "" };
+
+ let mut command = Command::new("some-boring-name");
+
+ assert_eq!(format!("{command:?}"), format!(r#""some-boring-name""#));
+
+ assert_eq!(
+ format!("{command:#?}"),
+ format!(
+ r#"Command {{
+ program: "some-boring-name",
+ args: [
+ "some-boring-name",
+ ],
+{PIDFD}}}"#
+ )
+ );
+
+ command.args(&["1", "2", "3"]);
+
+ assert_eq!(format!("{command:?}"), format!(r#""some-boring-name" "1" "2" "3""#));
+
+ assert_eq!(
+ format!("{command:#?}"),
+ format!(
+ r#"Command {{
+ program: "some-boring-name",
+ args: [
+ "some-boring-name",
+ "1",
+ "2",
+ "3",
+ ],
+{PIDFD}}}"#
+ )
+ );
+
+ crate::os::unix::process::CommandExt::arg0(&mut command, "exciting-name");
+
+ assert_eq!(
+ format!("{command:?}"),
+ format!(r#"["some-boring-name"] "exciting-name" "1" "2" "3""#)
+ );
+
+ assert_eq!(
+ format!("{command:#?}"),
+ format!(
+ r#"Command {{
+ program: "some-boring-name",
+ args: [
+ "exciting-name",
+ "1",
+ "2",
+ "3",
+ ],
+{PIDFD}}}"#
+ )
+ );
+
+ let mut command_with_env_and_cwd = Command::new("boring-name");
+ command_with_env_and_cwd.current_dir("/some/path").env("FOO", "bar");
+ assert_eq!(
+ format!("{command_with_env_and_cwd:?}"),
+ r#"cd "/some/path" && FOO="bar" "boring-name""#
+ );
+ assert_eq!(
+ format!("{command_with_env_and_cwd:#?}"),
+ format!(
+ r#"Command {{
+ program: "boring-name",
+ args: [
+ "boring-name",
+ ],
+ env: CommandEnv {{
+ clear: false,
+ vars: {{
+ "FOO": Some(
+ "bar",
+ ),
+ }},
+ }},
+ cwd: Some(
+ "/some/path",
+ ),
+{PIDFD}}}"#
+ )
+ );
+}
+
// See issue #91991
#[test]
#[cfg(windows)]
diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs
index 9c2f0c1dd..f1eeb75be 100644
--- a/library/std/src/rt.rs
+++ b/library/std/src/rt.rs
@@ -139,9 +139,9 @@ fn lang_start_internal(
// mechanism itself.
//
// There are a couple of instances where unwinding can begin. First is inside of the
- // `rt::init`, `rt::cleanup` and similar functions controlled by libstd. In those instances a
- // panic is a libstd implementation bug. A quite likely one too, as there isn't any way to
- // prevent libstd from accidentally introducing a panic to these functions. Another is from
+ // `rt::init`, `rt::cleanup` and similar functions controlled by bstd. In those instances a
+ // panic is a std implementation bug. A quite likely one too, as there isn't any way to
+ // prevent std from accidentally introducing a panic to these functions. Another is from
// user code from `main` or, more nefariously, as described in e.g. issue #86030.
// SAFETY: Only called once during runtime initialization.
panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) }).map_err(rt_abort)?;
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
index c8d3289ca..4a1530530 100644
--- a/library/std/src/sync/lazy_lock.rs
+++ b/library/std/src/sync/lazy_lock.rs
@@ -46,17 +46,15 @@ pub struct LazyLock<T, F = fn() -> T> {
cell: OnceLock<T>,
init: Cell<Option<F>>,
}
-
-impl<T, F> LazyLock<T, F> {
+impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// Creates a new lazy value with the given initializing
/// function.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub const fn new(f: F) -> LazyLock<T, F> {
LazyLock { cell: OnceLock::new(), init: Cell::new(Some(f)) }
}
-}
-impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// Forces the evaluation of this lazy value and
/// returns a reference to result. This is equivalent
/// to the `Deref` impl, but is explicit.
@@ -73,6 +71,7 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
/// assert_eq!(LazyLock::force(&lazy), &92);
/// assert_eq!(&*lazy, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn force(this: &LazyLock<T, F>) -> &T {
this.cell.get_or_init(|| match this.init.take() {
@@ -85,6 +84,8 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
type Target = T;
+
+ #[inline]
fn deref(&self) -> &T {
LazyLock::force(self)
}
@@ -93,6 +94,7 @@ impl<T, F: FnOnce() -> T> Deref for LazyLock<T, F> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Default> Default for LazyLock<T> {
/// Creates a new lazy value using `Default` as the initializing function.
+ #[inline]
fn default() -> LazyLock<T> {
LazyLock::new(T::default)
}
diff --git a/library/std/src/sync/lazy_lock/tests.rs b/library/std/src/sync/lazy_lock/tests.rs
index f11b66bfc..a5d4e25c5 100644
--- a/library/std/src/sync/lazy_lock/tests.rs
+++ b/library/std/src/sync/lazy_lock/tests.rs
@@ -136,6 +136,12 @@ fn sync_lazy_poisoning() {
}
}
+// Check that we can infer `T` from closure's type.
+#[test]
+fn lazy_type_inference() {
+ let _ = LazyCell::new(|| ());
+}
+
#[test]
fn is_sync_send() {
fn assert_traits<T: Send + Sync>() {}
diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs
index 4fee8d3e9..ba20bab87 100644
--- a/library/std/src/sync/mod.rs
+++ b/library/std/src/sync/mod.rs
@@ -177,6 +177,8 @@ pub use self::lazy_lock::LazyLock;
#[unstable(feature = "once_cell", issue = "74465")]
pub use self::once_lock::OnceLock;
+pub(crate) use self::remutex::{ReentrantMutex, ReentrantMutexGuard};
+
pub mod mpsc;
mod barrier;
@@ -187,4 +189,5 @@ mod mutex;
mod once;
mod once_lock;
mod poison;
+mod remutex;
mod rwlock;
diff --git a/library/std/src/sync/mutex/tests.rs b/library/std/src/sync/mutex/tests.rs
index 93900566f..1786a3c09 100644
--- a/library/std/src/sync/mutex/tests.rs
+++ b/library/std/src/sync/mutex/tests.rs
@@ -181,7 +181,7 @@ fn test_mutex_arc_poison() {
let arc2 = arc.clone();
let _ = thread::spawn(move || {
let lock = arc2.lock().unwrap();
- assert_eq!(*lock, 2);
+ assert_eq!(*lock, 2); // deliberate assertion failure to poison the mutex
})
.join();
assert!(arc.lock().is_err());
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index 16d1fd2a5..ed339ca5d 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -61,8 +61,9 @@ pub struct OnceLock<T> {
impl<T> OnceLock<T> {
/// Creates a new empty cell.
- #[unstable(feature = "once_cell", issue = "74465")]
+ #[inline]
#[must_use]
+ #[unstable(feature = "once_cell", issue = "74465")]
pub const fn new() -> OnceLock<T> {
OnceLock {
once: Once::new(),
@@ -75,6 +76,7 @@ impl<T> OnceLock<T> {
///
/// Returns `None` if the cell is empty, or being initialized. This
/// method never blocks.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get(&self) -> Option<&T> {
if self.is_initialized() {
@@ -88,6 +90,7 @@ impl<T> OnceLock<T> {
/// Gets the mutable reference to the underlying value.
///
/// Returns `None` if the cell is empty. This method never blocks.
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_mut(&mut self) -> Option<&mut T> {
if self.is_initialized() {
@@ -125,6 +128,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(CELL.get(), Some(&92));
/// }
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn set(&self, value: T) -> Result<(), T> {
let mut value = Some(value);
@@ -164,6 +168,7 @@ impl<T> OnceLock<T> {
/// let value = cell.get_or_init(|| unreachable!());
/// assert_eq!(value, &92);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_or_init<F>(&self, f: F) -> &T
where
@@ -203,6 +208,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(value, Ok(&92));
/// assert_eq!(cell.get(), Some(&92))
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
where
@@ -241,6 +247,7 @@ impl<T> OnceLock<T> {
/// cell.set("hello".to_string()).unwrap();
/// assert_eq!(cell.into_inner(), Some("hello".to_string()));
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn into_inner(mut self) -> Option<T> {
self.take()
@@ -267,6 +274,7 @@ impl<T> OnceLock<T> {
/// assert_eq!(cell.take(), Some("hello".to_string()));
/// assert_eq!(cell.get(), None);
/// ```
+ #[inline]
#[unstable(feature = "once_cell", issue = "74465")]
pub fn take(&mut self) -> Option<T> {
if self.is_initialized() {
@@ -315,6 +323,7 @@ impl<T> OnceLock<T> {
/// # Safety
///
/// The value must be initialized
+ #[inline]
unsafe fn get_unchecked(&self) -> &T {
debug_assert!(self.is_initialized());
(&*self.value.get()).assume_init_ref()
@@ -323,6 +332,7 @@ impl<T> OnceLock<T> {
/// # Safety
///
/// The value must be initialized
+ #[inline]
unsafe fn get_unchecked_mut(&mut self) -> &mut T {
debug_assert!(self.is_initialized());
(&mut *self.value.get()).assume_init_mut()
@@ -360,6 +370,7 @@ impl<T> const Default for OnceLock<T> {
/// assert_eq!(OnceLock::<()>::new(), OnceLock::default());
/// }
/// ```
+ #[inline]
fn default() -> OnceLock<T> {
OnceLock::new()
}
@@ -377,6 +388,7 @@ impl<T: fmt::Debug> fmt::Debug for OnceLock<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: Clone> Clone for OnceLock<T> {
+ #[inline]
fn clone(&self) -> OnceLock<T> {
let cell = Self::new();
if let Some(value) = self.get() {
@@ -408,6 +420,7 @@ impl<T> From<T> for OnceLock<T> {
/// Ok(())
/// # }
/// ```
+ #[inline]
fn from(value: T) -> Self {
let cell = Self::new();
match cell.set(value) {
@@ -419,6 +432,7 @@ impl<T> From<T> for OnceLock<T> {
#[unstable(feature = "once_cell", issue = "74465")]
impl<T: PartialEq> PartialEq for OnceLock<T> {
+ #[inline]
fn eq(&self, other: &OnceLock<T>) -> bool {
self.get() == other.get()
}
@@ -429,6 +443,7 @@ impl<T: Eq> Eq for OnceLock<T> {}
#[unstable(feature = "once_cell", issue = "74465")]
unsafe impl<#[may_dangle] T> Drop for OnceLock<T> {
+ #[inline]
fn drop(&mut self) {
if self.is_initialized() {
// SAFETY: The cell is initialized and being dropped, so it can't
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sync/remutex.rs
index 4c054da64..4c054da64 100644
--- a/library/std/src/sys_common/remutex.rs
+++ b/library/std/src/sync/remutex.rs
diff --git a/library/std/src/sync/remutex/tests.rs b/library/std/src/sync/remutex/tests.rs
new file mode 100644
index 000000000..fc553081d
--- /dev/null
+++ b/library/std/src/sync/remutex/tests.rs
@@ -0,0 +1,60 @@
+use super::{ReentrantMutex, ReentrantMutexGuard};
+use crate::cell::RefCell;
+use crate::sync::Arc;
+use crate::thread;
+
+#[test]
+fn smoke() {
+ let m = ReentrantMutex::new(());
+ {
+ let a = m.lock();
+ {
+ let b = m.lock();
+ {
+ let c = m.lock();
+ assert_eq!(*c, ());
+ }
+ assert_eq!(*b, ());
+ }
+ assert_eq!(*a, ());
+ }
+}
+
+#[test]
+fn is_mutex() {
+ let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+ let m2 = m.clone();
+ let lock = m.lock();
+ let child = thread::spawn(move || {
+ let lock = m2.lock();
+ assert_eq!(*lock.borrow(), 4950);
+ });
+ for i in 0..100 {
+ let lock = m.lock();
+ *lock.borrow_mut() += i;
+ }
+ drop(lock);
+ child.join().unwrap();
+}
+
+#[test]
+fn trylock_works() {
+ let m = Arc::new(ReentrantMutex::new(()));
+ let m2 = m.clone();
+ let _lock = m.try_lock();
+ let _lock2 = m.try_lock();
+ thread::spawn(move || {
+ let lock = m2.try_lock();
+ assert!(lock.is_none());
+ })
+ .join()
+ .unwrap();
+ let _lock3 = m.try_lock();
+}
+
+pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
+impl Drop for Answer<'_> {
+ fn drop(&mut self) {
+ *self.0.borrow_mut() = 42;
+ }
+}
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index b5b3ad989..1a9d3d3f1 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -2,7 +2,7 @@ use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::mpsc::channel;
use crate::sync::{Arc, RwLock, RwLockReadGuard, TryLockError};
use crate::thread;
-use rand::{self, Rng};
+use rand::Rng;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
@@ -28,7 +28,7 @@ fn frob() {
let tx = tx.clone();
let r = r.clone();
thread::spawn(move || {
- let mut rng = rand::thread_rng();
+ let mut rng = crate::test_helpers::test_rng();
for _ in 0..M {
if rng.gen_bool(1.0 / (N as f64)) {
drop(r.write().unwrap());
diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs
index c2b366808..19350b83f 100644
--- a/library/std/src/sys/itron/thread.rs
+++ b/library/std/src/sys/itron/thread.rs
@@ -119,7 +119,7 @@ impl Thread {
let old_lifecycle = inner
.lifecycle
- .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::Release);
+ .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::AcqRel);
match old_lifecycle {
LIFECYCLE_DETACHED => {
@@ -129,9 +129,9 @@ impl Thread {
// In this case, `*p_inner`'s ownership has been moved to
// us, and we are responsible for dropping it. The acquire
- // ordering is not necessary because the parent thread made
- // no memory access needing synchronization since the call
- // to `acre_tsk`.
+ // ordering ensures that the swap operation that wrote
+ // `LIFECYCLE_DETACHED` happens-before `Box::from_raw(
+ // p_inner)`.
// Safety: See above.
let _ = unsafe { Box::from_raw(p_inner) };
@@ -151,6 +151,9 @@ impl Thread {
// Since the parent might drop `*inner` and terminate us as
// soon as it sees `JOIN_FINALIZE`, the release ordering
// must be used in the above `swap` call.
+ //
+ // To make the task referred to by `parent_tid` visible, we
+ // must use the acquire ordering in the above `swap` call.
// [JOINING → JOIN_FINALIZE]
// Wake up the parent task.
@@ -218,11 +221,15 @@ impl Thread {
let current_task = current_task as usize;
- match inner.lifecycle.swap(current_task, Ordering::Acquire) {
+ match inner.lifecycle.swap(current_task, Ordering::AcqRel) {
LIFECYCLE_INIT => {
// [INIT → JOINING]
// The child task will transition the state to `JOIN_FINALIZE`
// and wake us up.
+ //
+ // To make the task referred to by `current_task` visible from
+ // the child task's point of view, we must use the release
+ // ordering in the above `swap` call.
loop {
expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk");
// To synchronize with the child task's memory accesses to
@@ -267,15 +274,15 @@ impl Drop for Thread {
let inner = unsafe { self.p_inner.as_ref() };
// Detach the thread.
- match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
+ match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::AcqRel) {
LIFECYCLE_INIT => {
// [INIT → DETACHED]
// When the time comes, the child will figure out that no
// one will ever join it.
// The ownership of `*p_inner` is moved to the child thread.
- // However, the release ordering is not necessary because we
- // made no memory access needing synchronization since the call
- // to `acre_tsk`.
+ // The release ordering ensures that the above swap operation on
+ // `lifecycle` happens-before the child thread's
+ // `Box::from_raw(p_inner)`.
}
LIFECYCLE_FINISHED => {
// [FINISHED → JOINED]
@@ -287,7 +294,7 @@ impl Drop for Thread {
// Terminate and delete the task
// Safety: `self.task` still represents a task we own (because
// this method or `join_inner` is called only once for
- // each `Thread`). The task indicated that it's safe to
+ // each `Thread`). The task indicated that it's safe to
// delete by entering the `FINISHED` state.
unsafe { terminate_and_delete_task(self.task) };
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index 01e4ffe3d..9865a945b 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -34,6 +34,7 @@ pub mod process;
pub mod stdio;
pub mod thread;
pub mod thread_local_key;
+pub mod thread_parking;
pub mod time;
mod condvar;
diff --git a/library/std/src/sys/sgx/thread.rs b/library/std/src/sys/sgx/thread.rs
index d745a6196..1608b8cb6 100644
--- a/library/std/src/sys/sgx/thread.rs
+++ b/library/std/src/sys/sgx/thread.rs
@@ -65,39 +65,36 @@ mod task_queue {
/// execution. The signal is sent once all TLS destructors have finished at
/// which point no new thread locals should be created.
pub mod wait_notify {
- use super::super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+ use crate::pin::Pin;
use crate::sync::Arc;
+ use crate::sys_common::thread_parking::Parker;
- pub struct Notifier(Arc<SpinMutex<WaitVariable<bool>>>);
+ pub struct Notifier(Arc<Parker>);
impl Notifier {
/// Notify the waiter. The waiter is either notified right away (if
/// currently blocked in `Waiter::wait()`) or later when it calls the
/// `Waiter::wait()` method.
pub fn notify(self) {
- let mut guard = self.0.lock();
- *guard.lock_var_mut() = true;
- let _ = WaitQueue::notify_one(guard);
+ Pin::new(&*self.0).unpark()
}
}
- pub struct Waiter(Arc<SpinMutex<WaitVariable<bool>>>);
+ pub struct Waiter(Arc<Parker>);
impl Waiter {
/// Wait for a notification. If `Notifier::notify()` has already been
/// called, this will return immediately, otherwise the current thread
/// is blocked until notified.
pub fn wait(self) {
- let guard = self.0.lock();
- if *guard.lock_var() {
- return;
- }
- WaitQueue::wait(guard, || {});
+ // SAFETY:
+ // This is only ever called on one thread.
+ unsafe { Pin::new(&*self.0).park() }
}
}
pub fn new() -> (Notifier, Waiter) {
- let inner = Arc::new(SpinMutex::new(WaitVariable::new(false)));
+ let inner = Arc::new(Parker::new());
(Notifier(inner.clone()), Waiter(inner))
}
}
diff --git a/library/std/src/sys/sgx/thread_parking.rs b/library/std/src/sys/sgx/thread_parking.rs
new file mode 100644
index 000000000..0006cd4f1
--- /dev/null
+++ b/library/std/src/sys/sgx/thread_parking.rs
@@ -0,0 +1,23 @@
+use super::abi::usercalls;
+use crate::io::ErrorKind;
+use crate::time::Duration;
+use fortanix_sgx_abi::{EV_UNPARK, WAIT_INDEFINITE};
+
+pub type ThreadId = fortanix_sgx_abi::Tcs;
+
+pub use super::abi::thread::current;
+
+pub fn park(_hint: usize) {
+ usercalls::wait(EV_UNPARK, WAIT_INDEFINITE).unwrap();
+}
+
+pub fn park_timeout(dur: Duration, _hint: usize) {
+ let timeout = u128::min(dur.as_nanos(), WAIT_INDEFINITE as u128 - 1) as u64;
+ if let Err(e) = usercalls::wait(EV_UNPARK, timeout) {
+ assert!(matches!(e.kind(), ErrorKind::TimedOut | ErrorKind::WouldBlock))
+ }
+}
+
+pub fn unpark(tid: ThreadId, _hint: usize) {
+ let _ = usercalls::send(EV_UNPARK, Some(tid));
+}
diff --git a/library/std/src/sys/unix/android.rs b/library/std/src/sys/unix/android.rs
index 73ff10ab8..0f704994f 100644
--- a/library/std/src/sys/unix/android.rs
+++ b/library/std/src/sys/unix/android.rs
@@ -1,7 +1,7 @@
//! Android ABI-compatibility module
//!
-//! The ABI of Android has changed quite a bit over time, and libstd attempts to
-//! be both forwards and backwards compatible as much as possible. We want to
+//! The ABI of Android has changed quite a bit over time, and std attempts to be
+//! both forwards and backwards compatible as much as possible. We want to
//! always work with the most recent version of Android, but we also want to
//! work with older versions of Android for whenever projects need to.
//!
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index 37a49f2d7..8e1f35d6c 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -149,12 +149,13 @@ cfg_has_statx! {{
) -> Option<io::Result<FileAttr>> {
use crate::sync::atomic::{AtomicU8, Ordering};
- // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`
- // We store the availability in global to avoid unnecessary syscalls.
- // 0: Unknown
- // 1: Not available
- // 2: Available
- static STATX_STATE: AtomicU8 = AtomicU8::new(0);
+ // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`.
+ // We check for it on first failure and remember availability to avoid having to
+ // do it again.
+ #[repr(u8)]
+ enum STATX_STATE{ Unknown = 0, Present, Unavailable }
+ static STATX_SAVED_STATE: AtomicU8 = AtomicU8::new(STATX_STATE::Unknown as u8);
+
syscall! {
fn statx(
fd: c_int,
@@ -165,31 +166,44 @@ cfg_has_statx! {{
) -> c_int
}
- match STATX_STATE.load(Ordering::Relaxed) {
- 0 => {
- // It is a trick to call `statx` with null pointers to check if the syscall
- // is available. According to the manual, it is expected to fail with EFAULT.
- // We do this mainly for performance, since it is nearly hundreds times
- // faster than a normal successful call.
- let err = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut()))
- .err()
- .and_then(|e| e.raw_os_error());
- // We don't check `err == Some(libc::ENOSYS)` because the syscall may be limited
- // and returns `EPERM`. Listing all possible errors seems not a good idea.
- // See: https://github.com/rust-lang/rust/issues/65662
- if err != Some(libc::EFAULT) {
- STATX_STATE.store(1, Ordering::Relaxed);
- return None;
- }
- STATX_STATE.store(2, Ordering::Relaxed);
- }
- 1 => return None,
- _ => {}
+ if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Unavailable as u8 {
+ return None;
}
let mut buf: libc::statx = mem::zeroed();
if let Err(err) = cvt(statx(fd, path, flags, mask, &mut buf)) {
- return Some(Err(err));
+ if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Present as u8 {
+ return Some(Err(err));
+ }
+
+ // Availability not checked yet.
+ //
+ // First try the cheap way.
+ if err.raw_os_error() == Some(libc::ENOSYS) {
+ STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed);
+ return None;
+ }
+
+ // Error other than `ENOSYS` is not a good enough indicator -- it is
+ // known that `EPERM` can be returned as a result of using seccomp to
+ // block the syscall.
+ // Availability is checked by performing a call which expects `EFAULT`
+ // if the syscall is usable.
+ // See: https://github.com/rust-lang/rust/issues/65662
+ // FIXME this can probably just do the call if `EPERM` was received, but
+ // previous iteration of the code checked it for all errors and for now
+ // this is retained.
+ // FIXME what about transient conditions like `ENOMEM`?
+ let err2 = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut()))
+ .err()
+ .and_then(|e| e.raw_os_error());
+ if err2 == Some(libc::EFAULT) {
+ STATX_SAVED_STATE.store(STATX_STATE::Present as u8, Ordering::Relaxed);
+ return Some(Err(err));
+ } else {
+ STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed);
+ return None;
+ }
}
// We cannot fill `stat64` exhaustively because of private padding fields.
@@ -243,17 +257,15 @@ struct InnerReadDir {
pub struct ReadDir {
inner: Arc<InnerReadDir>,
- #[cfg(not(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "illumos",
- target_os = "fuchsia",
- target_os = "redox",
- )))]
end_of_stream: bool,
}
+impl ReadDir {
+ fn new(inner: InnerReadDir) -> Self {
+ Self { inner: Arc::new(inner), end_of_stream: false }
+ }
+}
+
struct Dir(*mut libc::DIR);
unsafe impl Send for Dir {}
@@ -332,11 +344,23 @@ pub struct FileTimes {
modified: Option<SystemTime>,
}
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(Copy, Clone, Eq, Debug)]
pub struct FileType {
mode: mode_t,
}
+impl PartialEq for FileType {
+ fn eq(&self, other: &Self) -> bool {
+ self.masked() == other.masked()
+ }
+}
+
+impl core::hash::Hash for FileType {
+ fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
+ self.masked().hash(state);
+ }
+}
+
#[derive(Debug)]
pub struct DirBuilder {
mode: mode_t,
@@ -548,7 +572,11 @@ impl FileType {
}
pub fn is(&self, mode: mode_t) -> bool {
- self.mode & libc::S_IFMT == mode
+ self.masked() == mode
+ }
+
+ fn masked(&self) -> mode_t {
+ self.mode & libc::S_IFMT
}
}
@@ -578,18 +606,26 @@ impl Iterator for ReadDir {
target_os = "illumos"
))]
fn next(&mut self) -> Option<io::Result<DirEntry>> {
+ if self.end_of_stream {
+ return None;
+ }
+
unsafe {
loop {
// As of POSIX.1-2017, readdir() is not required to be thread safe; only
// readdir_r() is. However, readdir_r() cannot correctly handle platforms
- // with unlimited or variable NAME_MAX. Many modern platforms guarantee
+ // with unlimited or variable NAME_MAX. Many modern platforms guarantee
// thread safety for readdir() as long an individual DIR* is not accessed
// concurrently, which is sufficient for Rust.
super::os::set_errno(0);
let entry_ptr = readdir64(self.inner.dirp.0);
if entry_ptr.is_null() {
- // null can mean either the end is reached or an error occurred.
- // So we had to clear errno beforehand to check for an error now.
+ // We either encountered an error, or reached the end. Either way,
+ // the next call to next() should return None.
+ self.end_of_stream = true;
+
+ // To distinguish between errors and end-of-directory, we had to clear
+ // errno beforehand to check for an error now.
return match super::os::errno() {
0 => None,
e => Some(Err(Error::from_raw_os_error(e))),
@@ -1347,18 +1383,7 @@ pub fn readdir(path: &Path) -> io::Result<ReadDir> {
} else {
let root = path.to_path_buf();
let inner = InnerReadDir { dirp: Dir(ptr), root };
- Ok(ReadDir {
- inner: Arc::new(inner),
- #[cfg(not(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "illumos",
- target_os = "fuchsia",
- target_os = "redox",
- )))]
- end_of_stream: false,
- })
+ Ok(ReadDir::new(inner))
}
}
@@ -1739,12 +1764,16 @@ mod remove_dir_impl {
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use crate::os::unix::prelude::{OwnedFd, RawFd};
use crate::path::{Path, PathBuf};
- use crate::sync::Arc;
use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::{cvt, cvt_r};
- #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))]
+ #[cfg(not(any(
+ all(target_os = "linux", target_env = "gnu"),
+ all(target_os = "macos", not(target_arch = "aarch64"))
+ )))]
use libc::{fdopendir, openat, unlinkat};
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::{fdopendir, openat64 as openat, unlinkat};
#[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
use macos_weak::{fdopendir, openat, unlinkat};
@@ -1811,21 +1840,8 @@ mod remove_dir_impl {
// a valid root is not needed because we do not call any functions involving the full path
// of the DirEntrys.
let dummy_root = PathBuf::new();
- Ok((
- ReadDir {
- inner: Arc::new(InnerReadDir { dirp, root: dummy_root }),
- #[cfg(not(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "illumos",
- target_os = "fuchsia",
- target_os = "redox",
- )))]
- end_of_stream: false,
- },
- new_parent_fd,
- ))
+ let inner = InnerReadDir { dirp, root: dummy_root };
+ Ok((ReadDir::new(inner), new_parent_fd))
}
#[cfg(any(
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 94546ca09..73b9bef7e 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -61,6 +61,10 @@ use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use crate::sys::cvt;
use crate::sys::weak::syscall;
+#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+use libc::sendfile as sendfile64;
+#[cfg(all(target_os = "linux", target_env = "gnu"))]
+use libc::sendfile64;
use libc::{EBADF, EINVAL, ENOSYS, EOPNOTSUPP, EOVERFLOW, EPERM, EXDEV};
#[cfg(test)]
@@ -583,7 +587,7 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
// - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM)
// - copy_file_range cannot be used with pipes or device nodes (EINVAL)
// - the writer fd was opened with O_APPEND (EBADF²)
- // and no bytes were written successfully yet. (All these errnos should
+ // and no bytes were written successfully yet. (All these errnos should
// not be returned if something was already written, but they happen in
// the wild, see #91152.)
//
@@ -647,7 +651,7 @@ fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) ->
let result = match mode {
SpliceMode::Sendfile => {
- cvt(unsafe { libc::sendfile(writer, reader, ptr::null_mut(), chunk_size) })
+ cvt(unsafe { sendfile64(writer, reader, ptr::null_mut(), chunk_size) })
}
SpliceMode::Splice => cvt(unsafe {
splice(reader, ptr::null_mut(), writer, ptr::null_mut(), chunk_size, 0)
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index 1ddb09905..6be1abc2b 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -2,6 +2,7 @@ use crate::cell::UnsafeCell;
use crate::ptr;
use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
use crate::sys::locks::{pthread_mutex, Mutex};
+use crate::sys::time::TIMESPEC_MAX;
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use crate::time::Duration;
@@ -12,13 +13,6 @@ pub struct Condvar {
mutex: AtomicPtr<libc::pthread_mutex_t>,
}
-const TIMESPEC_MAX: libc::timespec =
- libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
-
-fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
- if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
-}
-
#[inline]
fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
c.inner.0.get()
@@ -133,26 +127,15 @@ impl Condvar {
target_os = "horizon"
)))]
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- use crate::mem;
+ use crate::sys::time::Timespec;
let mutex = pthread_mutex::raw(mutex);
self.verify(mutex);
- let mut now: libc::timespec = mem::zeroed();
- let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
- assert_eq!(r, 0);
-
- // Nanosecond calculations can't overflow because both values are below 1e9.
- let nsec = dur.subsec_nanos() + now.tv_nsec as u32;
-
- let sec = saturating_cast_to_time_t(dur.as_secs())
- .checked_add((nsec / 1_000_000_000) as libc::time_t)
- .and_then(|s| s.checked_add(now.tv_sec));
- let nsec = nsec % 1_000_000_000;
-
- let timeout =
- sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
-
+ let timeout = Timespec::now(libc::CLOCK_MONOTONIC)
+ .checked_add_duration(&dur)
+ .and_then(|t| t.to_timespec())
+ .unwrap_or(TIMESPEC_MAX);
let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
assert!(r == libc::ETIMEDOUT || r == 0);
r == 0
@@ -169,57 +152,41 @@ impl Condvar {
target_os = "espidf",
target_os = "horizon"
))]
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ use crate::sys::time::SystemTime;
use crate::time::Instant;
let mutex = pthread_mutex::raw(mutex);
self.verify(mutex);
- // 1000 years
- let max_dur = Duration::from_secs(1000 * 365 * 86400);
-
- if dur > max_dur {
- // OSX implementation of `pthread_cond_timedwait` is buggy
- // with super long durations. When duration is greater than
- // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
- // in macOS Sierra return error 316.
- //
- // This program demonstrates the issue:
- // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
- //
- // To work around this issue, and possible bugs of other OSes, timeout
- // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
- // because of spurious wakeups.
-
- dur = max_dur;
- }
-
- // First, figure out what time it currently is, in both system and
- // stable time. pthread_cond_timedwait uses system time, but we want to
- // report timeout based on stable time.
- let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
- let stable_now = Instant::now();
- let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
- assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error());
-
- let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
- let extra = (nsec / 1_000_000_000) as libc::time_t;
- let nsec = nsec % 1_000_000_000;
- let seconds = saturating_cast_to_time_t(dur.as_secs());
-
- let timeout = sys_now
- .tv_sec
- .checked_add(extra)
- .and_then(|s| s.checked_add(seconds))
- .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec })
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra returns error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
+ // because of spurious wakeups.
+ let dur = Duration::min(dur, Duration::from_secs(1000 * 365 * 86400));
+
+ // pthread_cond_timedwait uses system time, but we want to report timeout
+ // based on stable time.
+ let now = Instant::now();
+
+ let timeout = SystemTime::now()
+ .t
+ .checked_add_duration(&dur)
+ .and_then(|t| t.to_timespec())
.unwrap_or(TIMESPEC_MAX);
- // And wait!
let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
debug_assert!(r == libc::ETIMEDOUT || r == 0);
// ETIMEDOUT is not a totally reliable method of determining timeout due
// to clock shifts, so do the check ourselves
- stable_now.elapsed() < dur
+ now.elapsed() < dur
}
}
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 9055a011c..30a96be14 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -40,7 +40,7 @@ pub mod stdio;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
-pub mod thread_parker;
+pub mod thread_parking;
pub mod time;
#[cfg(target_os = "espidf")]
@@ -95,6 +95,10 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
)))]
'poll: {
use crate::sys::os::errno;
+ #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+ use libc::open as open64;
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::open64;
let pfds: &mut [_] = &mut [
libc::pollfd { fd: 0, events: 0, revents: 0 },
libc::pollfd { fd: 1, events: 0, revents: 0 },
@@ -116,7 +120,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
if pfd.revents & libc::POLLNVAL == 0 {
continue;
}
- if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
// If the stream is closed but we failed to reopen it, abort the
// process. Otherwise we wouldn't preserve the safety of
// operations on the corresponding Rust object Stdin, Stdout, or
@@ -139,9 +143,13 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
)))]
{
use crate::sys::os::errno;
+ #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+ use libc::open as open64;
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::open64;
for fd in 0..3 {
if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF {
- if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
+ if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 {
// If the stream is closed but we failed to reopen it, abort the
// process. Otherwise we wouldn't preserve the safety of
// operations on the corresponding Rust object Stdin, Stdout, or
@@ -156,7 +164,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
unsafe fn reset_sigpipe(#[allow(unused_variables)] sigpipe: u8) {
#[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))]
{
- // We don't want to add this as a public type to libstd, nor do we
+ // We don't want to add this as a public type to std, nor do we
// want to `include!` a file from the compiler (which would break
// Miri and xargo for example), so we choose to duplicate these
// constants from `compiler/rustc_session/src/config/sigpipe.rs`.
@@ -176,12 +184,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
sigpipe::SIG_DFL => (true, Some(libc::SIG_DFL)),
_ => unreachable!(),
};
- // The bootstrap compiler doesn't know about sigpipe::DEFAULT, and always passes in
- // SIG_IGN. This causes some tests to fail because they expect SIGPIPE to be reset to
- // default on process spawning (which doesn't happen if #[unix_sigpipe] is specified).
- // Since we can't differentiate between the cases here, treat SIG_IGN as DEFAULT
- // unconditionally.
- if sigpipe_attr_specified && !(cfg!(bootstrap) && sigpipe == sigpipe::SIG_IGN) {
+ if sigpipe_attr_specified {
UNIX_SIGPIPE_ATTR_SPECIFIED.store(true, crate::sync::atomic::Ordering::Relaxed);
}
if let Some(handler) = handler {
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index b84bf8f92..c86f80972 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -512,7 +512,7 @@ impl FromRawFd for Socket {
// A workaround for this bug is to call the res_init libc function, to clear
// the cached configs. Unfortunately, while we believe glibc's implementation
// of res_init is thread-safe, we know that other implementations are not
-// (https://github.com/rust-lang/rust/issues/43592). Code here in libstd could
+// (https://github.com/rust-lang/rust/issues/43592). Code here in std could
// try to synchronize its res_init calls with a Mutex, but that wouldn't
// protect programs that call into libc in other ways. So instead of calling
// res_init unconditionally, we call it only when we detect we're linking
diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs
index a56c275c9..a744d0ab6 100644
--- a/library/std/src/sys/unix/pipe.rs
+++ b/library/std/src/sys/unix/pipe.rs
@@ -58,6 +58,10 @@ impl AnonPipe {
self.0.is_read_vectored()
}
+ pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.0.read_to_end(buf)
+ }
+
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index 848adca78..afd03d79c 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -144,6 +144,7 @@ pub enum ChildStdio {
Null,
}
+#[derive(Debug)]
pub enum Stdio {
Inherit,
Null,
@@ -510,16 +511,68 @@ impl ChildStdio {
}
impl fmt::Debug for Command {
+ // show all attributes but `self.closures` which does not implement `Debug`
+ // and `self.argv` which is not useful for debugging
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if self.program != self.args[0] {
- write!(f, "[{:?}] ", self.program)?;
- }
- write!(f, "{:?}", self.args[0])?;
+ if f.alternate() {
+ let mut debug_command = f.debug_struct("Command");
+ debug_command.field("program", &self.program).field("args", &self.args);
+ if !self.env.is_unchanged() {
+ debug_command.field("env", &self.env);
+ }
+
+ if self.cwd.is_some() {
+ debug_command.field("cwd", &self.cwd);
+ }
+ if self.uid.is_some() {
+ debug_command.field("uid", &self.uid);
+ }
+ if self.gid.is_some() {
+ debug_command.field("gid", &self.gid);
+ }
+
+ if self.groups.is_some() {
+ debug_command.field("groups", &self.groups);
+ }
+
+ if self.stdin.is_some() {
+ debug_command.field("stdin", &self.stdin);
+ }
+ if self.stdout.is_some() {
+ debug_command.field("stdout", &self.stdout);
+ }
+ if self.stderr.is_some() {
+ debug_command.field("stderr", &self.stderr);
+ }
+ if self.pgroup.is_some() {
+ debug_command.field("pgroup", &self.pgroup);
+ }
+
+ #[cfg(target_os = "linux")]
+ {
+ debug_command.field("create_pidfd", &self.create_pidfd);
+ }
- for arg in &self.args[1..] {
- write!(f, " {:?}", arg)?;
+ debug_command.finish()
+ } else {
+ if let Some(ref cwd) = self.cwd {
+ write!(f, "cd {cwd:?} && ")?;
+ }
+ for (key, value_opt) in self.get_envs() {
+ if let Some(value) = value_opt {
+ write!(f, "{}={value:?} ", key.to_string_lossy())?;
+ }
+ }
+ if self.program != self.args[0] {
+ write!(f, "[{:?}] ", self.program)?;
+ }
+ write!(f, "{:?}", self.args[0])?;
+
+ for arg in &self.args[1..] {
+ write!(f, " {:?}", arg)?;
+ }
+ Ok(())
}
- Ok(())
}
}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
index 66ea3db20..d4c7e58b3 100644
--- a/library/std/src/sys/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -35,6 +35,11 @@ impl Command {
Ok((Process { handle: Handle::new(process_handle) }, ours))
}
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?;
+ crate::sys_common::process::wait_with_output(proc, pipes)
+ }
+
pub fn exec(&mut self, default: Stdio) -> io::Error {
if self.saw_nul() {
return io::const_io_error!(
@@ -257,7 +262,7 @@ impl ExitStatus {
// available on Fuchsia.
//
// It does not appear that Fuchsia is Unix-like enough to implement ExitStatus (or indeed many
- // other things from std::os::unix) properly. This veneer is always going to be a bodge. So
+ // other things from std::os::unix) properly. This veneer is always going to be a bodge. So
// while I don't know if these implementations are actually correct, I think they will do for
// now at least.
pub fn core_dumped(&self) -> bool {
@@ -272,9 +277,9 @@ impl ExitStatus {
pub fn into_raw(&self) -> c_int {
// We don't know what someone who calls into_raw() will do with this value, but it should
- // have the conventional Unix representation. Despite the fact that this is not
+ // have the conventional Unix representation. Despite the fact that this is not
// standardised in SuS or POSIX, all Unix systems encode the signal and exit status the
- // same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every
+ // same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every
// Unix.)
//
// The caller of `std::os::unix::into_raw` is probably wanting a Unix exit status, and may
@@ -282,14 +287,14 @@ impl ExitStatus {
// different Unix variant.
//
// The other view would be to say that the caller on Fuchsia ought to know that `into_raw`
- // will give a raw Fuchsia status (whatever that is - I don't know, personally). That is
+ // will give a raw Fuchsia status (whatever that is - I don't know, personally). That is
// not possible here because we must return a c_int because that's what Unix (including
// SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't
// necessarily fit.
//
// It seems to me that the right answer would be to provide std::os::fuchsia with its
// own ExitStatusExt, rather that trying to provide a not very convincing imitation of
- // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
+ // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
// fixing this up that is beyond the scope of my efforts now.
let exit_status_as_if_unix: u8 = self.0.try_into().expect("Fuchsia process return code bigger than 8 bits, but std::os::unix::ExitStatusExt::into_raw() was called to try to convert the value into a traditional Unix-style wait status, which cannot represent values greater than 255.");
let wait_status_as_if_unix = (exit_status_as_if_unix as c_int) << 8;
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 56a805cef..3bc17b775 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -66,14 +66,15 @@ impl Command {
//
// Note that as soon as we're done with the fork there's no need to hold
// a lock any more because the parent won't do anything and the child is
- // in its own process. Thus the parent drops the lock guard while the child
- // forgets it to avoid unlocking it on a new thread, which would be invalid.
+ // in its own process. Thus the parent drops the lock guard immediately.
+ // The child calls `mem::forget` to leak the lock, which is crucial because
+ // releasing a lock is not async-signal-safe.
let env_lock = sys::os::env_read_lock();
let (pid, pidfd) = unsafe { self.do_fork()? };
if pid == 0 {
crate::panic::always_abort();
- mem::forget(env_lock);
+ mem::forget(env_lock); // avoid non-async-signal-safe unlocking
drop(input);
let Err(err) = unsafe { self.do_exec(theirs, envp.as_ref()) };
let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32;
@@ -132,6 +133,11 @@ impl Command {
}
}
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?;
+ crate::sys_common::process::wait_with_output(proc, pipes)
+ }
+
// Attempts to fork the process. If successful, returns Ok((0, -1))
// in the child, and Ok((child_pid, -1)) in the parent.
#[cfg(not(target_os = "linux"))]
@@ -660,11 +666,11 @@ impl ExitStatus {
}
pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
- // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
+ // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
// true on all actual versions of Unix, is widely assumed, and is specified in SuS
- // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not
// true for a platform pretending to be Unix, the tests (our doctests, and also
- // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
+ // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
match NonZero_c_int::try_from(self.0) {
/* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
/* was zero, couldn't convert */ Err(_) => Ok(()),
@@ -740,6 +746,8 @@ fn signal_string(signal: i32) -> &'static str {
libc::SIGWINCH => " (SIGWINCH)",
#[cfg(not(target_os = "haiku"))]
libc::SIGIO => " (SIGIO)",
+ #[cfg(target_os = "haiku")]
+ libc::SIGPOLL => " (SIGPOLL)",
libc::SIGSYS => " (SIGSYS)",
// For information on Linux signals, run `man 7 signal`
#[cfg(all(
diff --git a/library/std/src/sys/unix/process/process_unix/tests.rs b/library/std/src/sys/unix/process/process_unix/tests.rs
index e0e2d478f..e5e1f956b 100644
--- a/library/std/src/sys/unix/process/process_unix/tests.rs
+++ b/library/std/src/sys/unix/process/process_unix/tests.rs
@@ -3,7 +3,7 @@ use crate::panic::catch_unwind;
use crate::process::Command;
// Many of the other aspects of this situation, including heap alloc concurrency
-// safety etc., are tested in src/test/ui/process/process-panic-after-fork.rs
+// safety etc., are tested in tests/ui/process/process-panic-after-fork.rs
#[test]
fn exitstatus_display_tests() {
@@ -19,17 +19,17 @@ fn exitstatus_display_tests() {
t(0x00000, "exit status: 0");
t(0x0ff00, "exit status: 255");
- // On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar.
+ // On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar.
// https://github.com/rust-lang/rust/pull/82749#issuecomment-790525956
// The purpose of this test is to test our string formatting, not our understanding of the wait
- // status magic numbers. So restrict these to Linux.
+ // status magic numbers. So restrict these to Linux.
if cfg!(target_os = "linux") {
t(0x0137f, "stopped (not terminated) by signal: 19 (SIGSTOP)");
t(0x0ffff, "continued (WIFCONTINUED)");
}
// Testing "unrecognised wait status" is hard because the wait.h macros typically
- // assume that the value came from wait and isn't mad. With the glibc I have here
+ // assume that the value came from wait and isn't mad. With the glibc I have here
// this works:
if cfg!(all(target_os = "linux", target_env = "gnu")) {
t(0x000ff, "unrecognised wait status: 255 0xff");
diff --git a/library/std/src/sys/unix/process/process_unsupported.rs b/library/std/src/sys/unix/process/process_unsupported.rs
index 72f9f3f9c..f28ca58d0 100644
--- a/library/std/src/sys/unix/process/process_unsupported.rs
+++ b/library/std/src/sys/unix/process/process_unsupported.rs
@@ -20,6 +20,10 @@ impl Command {
unsupported()
}
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ unsupported()
+ }
+
pub fn exec(&mut self, _default: Stdio) -> io::Error {
unsupported_err()
}
diff --git a/library/std/src/sys/unix/process/process_vxworks.rs b/library/std/src/sys/unix/process/process_vxworks.rs
index 200ef6719..569a4b149 100644
--- a/library/std/src/sys/unix/process/process_vxworks.rs
+++ b/library/std/src/sys/unix/process/process_vxworks.rs
@@ -108,6 +108,11 @@ impl Command {
}
}
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?;
+ crate::sys_common::process::wait_with_output(proc, pipes)
+ }
+
pub fn exec(&mut self, default: Stdio) -> io::Error {
let ret = Command::spawn(self, default, false);
match ret {
@@ -190,11 +195,11 @@ impl ExitStatus {
}
pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
- // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
+ // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
// true on all actual versions of Unix, is widely assumed, and is specified in SuS
- // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not
// true for a platform pretending to be Unix, the tests (our doctests, and also
- // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
+ // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
match NonZero_c_int::try_from(self.0) {
Ok(failure) => Err(ExitStatusError(failure)),
Err(_) => Ok(()),
diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs
index 75a5c0f92..b59d4ba26 100644
--- a/library/std/src/sys/unix/stack_overflow.rs
+++ b/library/std/src/sys/unix/stack_overflow.rs
@@ -45,7 +45,10 @@ mod imp {
use crate::thread;
use libc::MAP_FAILED;
- use libc::{mmap, munmap};
+ #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+ use libc::{mmap as mmap64, munmap};
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::{mmap64, munmap};
use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIG_DFL};
use libc::{sigaltstack, SIGSTKSZ, SS_DISABLE};
use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SIGSEGV};
@@ -135,7 +138,7 @@ mod imp {
#[cfg(not(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",)))]
let flags = MAP_PRIVATE | MAP_ANON;
let stackp =
- mmap(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0);
+ mmap64(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0);
if stackp == MAP_FAILED {
panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
}
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index c1d30dd9d..2a1830d06 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -73,7 +73,7 @@ impl Thread {
n => {
assert_eq!(n, libc::EINVAL);
// EINVAL means |stack_size| is either too small or not a
- // multiple of the system page size. Because it's definitely
+ // multiple of the system page size. Because it's definitely
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
// Round up to the nearest page and try again.
let page_size = os::page_size();
@@ -136,7 +136,7 @@ impl Thread {
unsafe {
// Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
- let name = truncate_cstr(name, TASK_COMM_LEN);
+ let name = truncate_cstr::<{ TASK_COMM_LEN }>(name);
let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
// We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
debug_assert_eq!(res, 0);
@@ -153,7 +153,7 @@ impl Thread {
#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
pub fn set_name(name: &CStr) {
unsafe {
- let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE);
+ let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name);
let res = libc::pthread_setname_np(name.as_ptr());
// We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
debug_assert_eq!(res, 0);
@@ -285,17 +285,12 @@ impl Drop for Thread {
}
#[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios", target_os = "watchos"))]
-fn truncate_cstr(cstr: &CStr, max_with_nul: usize) -> crate::borrow::Cow<'_, CStr> {
- use crate::{borrow::Cow, ffi::CString};
-
- if cstr.to_bytes_with_nul().len() > max_with_nul {
- let bytes = cstr.to_bytes()[..max_with_nul - 1].to_vec();
- // SAFETY: the non-nul bytes came straight from a CStr.
- // (CString will add the terminating nul.)
- Cow::Owned(unsafe { CString::from_vec_unchecked(bytes) })
- } else {
- Cow::Borrowed(cstr)
+fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
+ let mut result = [0; MAX_WITH_NUL];
+ for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) {
+ *dst = *src as libc::c_char;
}
+ result
}
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
@@ -510,7 +505,7 @@ mod cgroups {
let limit = raw_quota.next()?;
let period = raw_quota.next()?;
match (limit.parse::<usize>(), period.parse::<usize>()) {
- (Ok(limit), Ok(period)) => {
+ (Ok(limit), Ok(period)) if period > 0 => {
quota = quota.min(limit / period);
}
_ => {}
@@ -570,7 +565,7 @@ mod cgroups {
let period = parse_file("cpu.cfs_period_us");
match (limit, period) {
- (Some(limit), Some(period)) => quota = quota.min(limit / period),
+ (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period),
_ => {}
}
@@ -658,7 +653,10 @@ pub mod guard {
))]
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
- use libc::{mmap, mprotect};
+ #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+ use libc::{mmap as mmap64, mprotect};
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::{mmap64, mprotect};
use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
use crate::io;
@@ -757,10 +755,10 @@ pub mod guard {
if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
// Linux doesn't allocate the whole stack right away, and
// the kernel has its own stack-guard mechanism to fault
- // when growing too close to an existing mapping. If we map
+ // when growing too close to an existing mapping. If we map
// our own guard, then the kernel starts enforcing a rather
// large gap above that, rendering much of the possible
- // stack space useless. See #43052.
+ // stack space useless. See #43052.
//
// Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
@@ -776,14 +774,14 @@ pub mod guard {
None
} else if cfg!(target_os = "freebsd") {
// FreeBSD's stack autogrows, and optionally includes a guard page
- // at the bottom. If we try to remap the bottom of the stack
- // ourselves, FreeBSD's guard page moves upwards. So we'll just use
+ // at the bottom. If we try to remap the bottom of the stack
+ // ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page.
let stackptr = get_stack_start_aligned()?;
let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl, but there are
- // few reasons to change it from the default. The default value has
+ // few reasons to change it from the default. The default value has
// been 1 ever since FreeBSD 11.1 and 10.4.
const GUARD_PAGES: usize = 1;
let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
@@ -808,7 +806,7 @@ pub mod guard {
// read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313.
let stackptr = get_stack_start_aligned()?;
- let result = mmap(
+ let result = mmap64(
stackptr,
page_size,
PROT_READ | PROT_WRITE,
@@ -879,9 +877,9 @@ pub mod guard {
} else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
{
// glibc used to include the guard area within the stack, as noted in the BUGS
- // section of `man pthread_attr_getguardsize`. This has been corrected starting
+ // section of `man pthread_attr_getguardsize`. This has been corrected starting
// with glibc 2.27, and in some distro backports, so the guard is now placed at the
- // end (below) the stack. There's no easy way for us to know which we have at
+ // end (below) the stack. There's no easy way for us to know which we have at
// runtime, so we'll just match any fault in the range right above or below the
// stack base to call that fault a stack overflow.
Some(stackaddr - guardsize..stackaddr + guardsize)
diff --git a/library/std/src/sys/unix/thread_parker/mod.rs b/library/std/src/sys/unix/thread_parker/mod.rs
deleted file mode 100644
index 35f1e68a8..000000000
--- a/library/std/src/sys/unix/thread_parker/mod.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-//! Thread parking on systems without futex support.
-
-#![cfg(not(any(
- target_os = "linux",
- target_os = "android",
- all(target_os = "emscripten", target_feature = "atomics"),
- target_os = "freebsd",
- target_os = "openbsd",
- target_os = "dragonfly",
- target_os = "fuchsia",
-)))]
-
-cfg_if::cfg_if! {
- if #[cfg(all(
- any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "tvos",
- ),
- not(miri),
- ))] {
- mod darwin;
- pub use darwin::Parker;
- } else if #[cfg(target_os = "netbsd")] {
- mod netbsd;
- pub use netbsd::Parker;
- } else {
- mod pthread;
- pub use pthread::Parker;
- }
-}
diff --git a/library/std/src/sys/unix/thread_parker/netbsd.rs b/library/std/src/sys/unix/thread_parker/netbsd.rs
deleted file mode 100644
index 7657605b5..000000000
--- a/library/std/src/sys/unix/thread_parker/netbsd.rs
+++ /dev/null
@@ -1,113 +0,0 @@
-use crate::ffi::{c_int, c_void};
-use crate::pin::Pin;
-use crate::ptr::{null, null_mut};
-use crate::sync::atomic::{
- AtomicU64,
- Ordering::{Acquire, Relaxed, Release},
-};
-use crate::time::Duration;
-use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC};
-
-extern "C" {
- fn ___lwp_park60(
- clock_id: clockid_t,
- flags: c_int,
- ts: *mut timespec,
- unpark: lwpid_t,
- hint: *const c_void,
- unparkhint: *const c_void,
- ) -> c_int;
- fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int;
-}
-
-/// The thread is not parked and the token is not available.
-///
-/// Zero cannot be a valid LWP id, since it is used as empty value for the unpark
-/// argument in _lwp_park.
-const EMPTY: u64 = 0;
-/// The token is available. Do not park anymore.
-const NOTIFIED: u64 = u64::MAX;
-
-pub struct Parker {
- /// The parker state. Contains either one of the two state values above or the LWP
- /// id of the parked thread.
- state: AtomicU64,
-}
-
-impl Parker {
- pub unsafe fn new(parker: *mut Parker) {
- parker.write(Parker { state: AtomicU64::new(EMPTY) })
- }
-
- // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
- pub unsafe fn park(self: Pin<&Self>) {
- // If the token has already been made available, we can skip
- // a bit of work, so check for it here.
- if self.state.load(Acquire) != NOTIFIED {
- let parked = _lwp_self() as u64;
- let hint = self.state.as_mut_ptr().cast();
- if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
- // Loop to guard against spurious wakeups.
- loop {
- ___lwp_park60(0, 0, null_mut(), 0, hint, null());
- if self.state.load(Acquire) == NOTIFIED {
- break;
- }
- }
- }
- }
-
- // At this point, the change to NOTIFIED has always been observed with acquire
- // ordering, so we can just use a relaxed store here (instead of a swap).
- self.state.store(EMPTY, Relaxed);
- }
-
- // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
- pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
- if self.state.load(Acquire) != NOTIFIED {
- let parked = _lwp_self() as u64;
- let hint = self.state.as_mut_ptr().cast();
- let mut timeout = timespec {
- // Saturate so that the operation will definitely time out
- // (even if it is after the heat death of the universe).
- tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX),
- tv_nsec: dur.subsec_nanos().into(),
- };
-
- if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
- // Timeout needs to be mutable since it is modified on NetBSD 9.0 and
- // above.
- ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, hint, null());
- // Use a swap to get acquire ordering even if the token was set after
- // the timeout occurred.
- self.state.swap(EMPTY, Acquire);
- return;
- }
- }
-
- self.state.store(EMPTY, Relaxed);
- }
-
- // Does not actually need `Pin`, but the pthread implementation does.
- pub fn unpark(self: Pin<&Self>) {
- let state = self.state.swap(NOTIFIED, Release);
- if !matches!(state, EMPTY | NOTIFIED) {
- let lwp = state as lwpid_t;
- let hint = self.state.as_mut_ptr().cast();
-
- // If the parking thread terminated and did not actually park, this will
- // probably return an error, which is OK. In the worst case, another
- // thread has received the same LWP id. It will then receive a spurious
- // wakeup, but those are allowable per the API contract. The same reasoning
- // applies if a timeout occurred before this call, but the state was not
- // yet reset.
-
- // SAFETY:
- // The syscall has no invariants to hold. Only unsafe because it is an
- // extern function.
- unsafe {
- _lwp_unpark(lwp, hint);
- }
- }
- }
-}
diff --git a/library/std/src/sys/unix/thread_parker/darwin.rs b/library/std/src/sys/unix/thread_parking/darwin.rs
index 2f5356fe2..b709fada3 100644
--- a/library/std/src/sys/unix/thread_parker/darwin.rs
+++ b/library/std/src/sys/unix/thread_parking/darwin.rs
@@ -46,7 +46,7 @@ unsafe impl Sync for Parker {}
unsafe impl Send for Parker {}
impl Parker {
- pub unsafe fn new(parker: *mut Parker) {
+ pub unsafe fn new_in_place(parker: *mut Parker) {
let semaphore = dispatch_semaphore_create(0);
assert!(
!semaphore.is_null(),
diff --git a/library/std/src/sys/unix/thread_parking/mod.rs b/library/std/src/sys/unix/thread_parking/mod.rs
new file mode 100644
index 000000000..185333c07
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parking/mod.rs
@@ -0,0 +1,32 @@
+//! Thread parking on systems without futex support.
+
+#![cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+)))]
+
+cfg_if::cfg_if! {
+ if #[cfg(all(
+ any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "tvos",
+ ),
+ not(miri),
+ ))] {
+ mod darwin;
+ pub use darwin::Parker;
+ } else if #[cfg(target_os = "netbsd")] {
+ mod netbsd;
+ pub use netbsd::{current, park, park_timeout, unpark, ThreadId};
+ } else {
+ mod pthread;
+ pub use pthread::Parker;
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parking/netbsd.rs b/library/std/src/sys/unix/thread_parking/netbsd.rs
new file mode 100644
index 000000000..3be081221
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parking/netbsd.rs
@@ -0,0 +1,52 @@
+use crate::ffi::{c_int, c_void};
+use crate::ptr;
+use crate::time::Duration;
+use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC};
+
+extern "C" {
+ fn ___lwp_park60(
+ clock_id: clockid_t,
+ flags: c_int,
+ ts: *mut timespec,
+ unpark: lwpid_t,
+ hint: *const c_void,
+ unparkhint: *const c_void,
+ ) -> c_int;
+ fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int;
+}
+
+pub type ThreadId = lwpid_t;
+
+#[inline]
+pub fn current() -> ThreadId {
+ unsafe { _lwp_self() }
+}
+
+#[inline]
+pub fn park(hint: usize) {
+ unsafe {
+ ___lwp_park60(0, 0, ptr::null_mut(), 0, ptr::invalid(hint), ptr::null());
+ }
+}
+
+pub fn park_timeout(dur: Duration, hint: usize) {
+ let mut timeout = timespec {
+ // Saturate so that the operation will definitely time out
+ // (even if it is after the heat death of the universe).
+ tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX),
+ tv_nsec: dur.subsec_nanos().into(),
+ };
+
+ // Timeout needs to be mutable since it is modified on NetBSD 9.0 and
+ // above.
+ unsafe {
+ ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, ptr::invalid(hint), ptr::null());
+ }
+}
+
+#[inline]
+pub fn unpark(tid: ThreadId, hint: usize) {
+ unsafe {
+ _lwp_unpark(tid, ptr::invalid(hint));
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/pthread.rs b/library/std/src/sys/unix/thread_parking/pthread.rs
index 3dfc0026e..082d25e68 100644
--- a/library/std/src/sys/unix/thread_parker/pthread.rs
+++ b/library/std/src/sys/unix/thread_parking/pthread.rs
@@ -6,6 +6,7 @@ use crate::pin::Pin;
use crate::ptr::addr_of_mut;
use crate::sync::atomic::AtomicUsize;
use crate::sync::atomic::Ordering::SeqCst;
+use crate::sys::time::TIMESPEC_MAX;
use crate::time::Duration;
const EMPTY: usize = 0;
@@ -32,9 +33,6 @@ unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t
debug_assert_eq!(r, 0);
}
-const TIMESPEC_MAX: libc::timespec =
- libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
-
unsafe fn wait_timeout(
cond: *mut libc::pthread_cond_t,
lock: *mut libc::pthread_mutex_t,
@@ -46,7 +44,8 @@ unsafe fn wait_timeout(
target_os = "macos",
target_os = "ios",
target_os = "watchos",
- target_os = "espidf"
+ target_os = "espidf",
+ target_os = "horizon",
))]
let (now, dur) = {
use crate::cmp::min;
@@ -72,7 +71,8 @@ unsafe fn wait_timeout(
target_os = "macos",
target_os = "ios",
target_os = "watchos",
- target_os = "espidf"
+ target_os = "espidf",
+ target_os = "horizon",
)))]
let (now, dur) = {
use crate::sys::time::Timespec;
@@ -99,7 +99,7 @@ impl Parker {
///
/// # Safety
/// The constructed parker must never be moved.
- pub unsafe fn new(parker: *mut Parker) {
+ pub unsafe fn new_in_place(parker: *mut Parker) {
// Use the default mutex implementation to allow for simpler initialization.
// This could lead to undefined behaviour when deadlocking. This is avoided
// by not deadlocking. Note in particular the unlocking operation before any
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index d5abd9b58..2daad981b 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -5,6 +5,9 @@ pub use self::inner::Instant;
const NSEC_PER_SEC: u64 = 1_000_000_000;
pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
+#[allow(dead_code)] // Used for pthread condvar timeouts
+pub const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs
index f5a4ce929..62ffee70b 100644
--- a/library/std/src/sys/unix/weak.rs
+++ b/library/std/src/sys/unix/weak.rs
@@ -1,9 +1,8 @@
//! Support for "weak linkage" to symbols on Unix
//!
-//! Some I/O operations we do in libstd require newer versions of OSes but we
-//! need to maintain binary compatibility with older releases for now. In order
-//! to use the new functionality when available we use this module for
-//! detection.
+//! Some I/O operations we do in std require newer versions of OSes but we need
+//! to maintain binary compatibility with older releases for now. In order to
+//! use the new functionality when available we use this module for detection.
//!
//! One option to use here is weak linkage, but that is unfortunately only
//! really workable with ELF. Otherwise, use dlsym to get the symbol value at
@@ -29,7 +28,7 @@ use crate::ptr;
use crate::sync::atomic::{self, AtomicPtr, Ordering};
// We can use true weak linkage on ELF targets.
-#[cfg(all(not(any(target_os = "macos", target_os = "ios")), not(bootstrap)))]
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
pub(crate) macro weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
@@ -43,30 +42,14 @@ pub(crate) macro weak {
)
}
-#[cfg(all(not(any(target_os = "macos", target_os = "ios")), bootstrap))]
-pub(crate) macro weak {
- (fn $name:ident($($t:ty),*) -> $ret:ty) => (
- let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
- extern "C" {
- #[linkage = "extern_weak"]
- static $name: *const libc::c_void;
- }
- #[allow(unused_unsafe)]
- ExternWeak::new(unsafe { $name })
- };
- )
-}
-
// On non-ELF targets, use the dlsym approximation of weak linkage.
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::dlsym as weak;
-#[cfg(not(bootstrap))]
pub(crate) struct ExternWeak<F: Copy> {
weak_ptr: Option<F>,
}
-#[cfg(not(bootstrap))]
impl<F: Copy> ExternWeak<F> {
#[inline]
pub(crate) fn new(weak_ptr: Option<F>) -> Self {
@@ -79,34 +62,6 @@ impl<F: Copy> ExternWeak<F> {
}
}
-#[cfg(bootstrap)]
-pub(crate) struct ExternWeak<F> {
- weak_ptr: *const libc::c_void,
- _marker: PhantomData<F>,
-}
-
-#[cfg(bootstrap)]
-impl<F> ExternWeak<F> {
- #[inline]
- pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self {
- ExternWeak { weak_ptr, _marker: PhantomData }
- }
-}
-
-#[cfg(bootstrap)]
-impl<F> ExternWeak<F> {
- #[inline]
- pub(crate) fn get(&self) -> Option<F> {
- unsafe {
- if self.weak_ptr.is_null() {
- None
- } else {
- Some(mem::transmute_copy::<*const libc::c_void, F>(&self.weak_ptr))
- }
- }
- }
-}
-
pub(crate) macro dlsym {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
dlsym!(fn $name($($t),*) -> $ret, stringify!($name));
diff --git a/library/std/src/sys/unsupported/mod.rs b/library/std/src/sys/unsupported/mod.rs
index 7bf6d40b7..15b22c620 100644
--- a/library/std/src/sys/unsupported/mod.rs
+++ b/library/std/src/sys/unsupported/mod.rs
@@ -9,6 +9,7 @@ pub mod fs;
pub mod io;
pub mod locks;
pub mod net;
+pub mod once;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
diff --git a/library/std/src/sys/unsupported/once.rs b/library/std/src/sys/unsupported/once.rs
new file mode 100644
index 000000000..b4bb4975f
--- /dev/null
+++ b/library/std/src/sys/unsupported/once.rs
@@ -0,0 +1,89 @@
+use crate::cell::Cell;
+use crate::sync as public;
+
+pub struct Once {
+ state: Cell<State>,
+}
+
+pub struct OnceState {
+ poisoned: bool,
+ set_state_to: Cell<State>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum State {
+ Incomplete,
+ Poisoned,
+ Running,
+ Complete,
+}
+
+struct CompletionGuard<'a> {
+ state: &'a Cell<State>,
+ set_state_on_drop_to: State,
+}
+
+impl<'a> Drop for CompletionGuard<'a> {
+ fn drop(&mut self) {
+ self.state.set(self.set_state_on_drop_to);
+ }
+}
+
+// Safety: threads are not supported on this platform.
+unsafe impl Sync for Once {}
+
+impl Once {
+ #[inline]
+ #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
+ pub const fn new() -> Once {
+ Once { state: Cell::new(State::Incomplete) }
+ }
+
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ self.state.get() == State::Complete
+ }
+
+ #[cold]
+ #[track_caller]
+ pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) {
+ let state = self.state.get();
+ match state {
+ State::Poisoned if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ State::Incomplete | State::Poisoned => {
+ self.state.set(State::Running);
+ // `guard` will set the new state on drop.
+ let mut guard =
+ CompletionGuard { state: &self.state, set_state_on_drop_to: State::Poisoned };
+ // Run the function, letting it know if we're poisoned or not.
+ let f_state = public::OnceState {
+ inner: OnceState {
+ poisoned: state == State::Poisoned,
+ set_state_to: Cell::new(State::Complete),
+ },
+ };
+ f(&f_state);
+ guard.set_state_on_drop_to = f_state.inner.set_state_to.get();
+ }
+ State::Running => {
+ panic!("one-time initialization may not be performed recursively");
+ }
+ State::Complete => {}
+ }
+ }
+}
+
+impl OnceState {
+ #[inline]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ #[inline]
+ pub fn poison(&self) {
+ self.set_state_to.set(State::Poisoned)
+ }
+}
diff --git a/library/std/src/sys/unsupported/pipe.rs b/library/std/src/sys/unsupported/pipe.rs
index 25514c232..0bba673b4 100644
--- a/library/std/src/sys/unsupported/pipe.rs
+++ b/library/std/src/sys/unsupported/pipe.rs
@@ -15,6 +15,10 @@ impl AnonPipe {
self.0
}
+ pub fn read_to_end(&self, _buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.0
+ }
+
pub fn write(&self, _buf: &[u8]) -> io::Result<usize> {
self.0
}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
index 633f17c05..a494f2d6b 100644
--- a/library/std/src/sys/unsupported/process.rs
+++ b/library/std/src/sys/unsupported/process.rs
@@ -75,6 +75,10 @@ impl Command {
) -> io::Result<(Process, StdioPipes)> {
unsupported()
}
+
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ unsupported()
+ }
}
impl From<AnonPipe> for Stdio {
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index c8c47763a..1dc3f2b20 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -32,6 +32,8 @@ pub mod io;
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
pub mod net;
+#[path = "../unsupported/once.rs"]
+pub mod once;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod os_str;
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
index d68c3e5f1..77ebe3c4a 100644
--- a/library/std/src/sys/wasm/mod.rs
+++ b/library/std/src/sys/wasm/mod.rs
@@ -66,6 +66,8 @@ cfg_if::cfg_if! {
} else {
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
+ #[path = "../unsupported/once.rs"]
+ pub mod once;
#[path = "../unsupported/thread.rs"]
pub mod thread;
}
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 81461de4f..f58dcf128 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -295,8 +295,6 @@ pub fn nt_success(status: NTSTATUS) -> bool {
status >= 0
}
-// "RNG\0"
-pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0];
pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002;
#[repr(C)]
@@ -834,6 +832,10 @@ if #[cfg(not(target_vendor = "uwp"))] {
#[link(name = "advapi32")]
extern "system" {
+ // Forbidden when targeting UWP
+ #[link_name = "SystemFunction036"]
+ pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN;
+
// Allowed but unused by UWP
pub fn OpenProcessToken(
ProcessHandle: HANDLE,
@@ -1258,13 +1260,6 @@ extern "system" {
cbBuffer: ULONG,
dwFlags: ULONG,
) -> NTSTATUS;
- pub fn BCryptOpenAlgorithmProvider(
- phalgorithm: *mut BCRYPT_ALG_HANDLE,
- pszAlgId: LPCWSTR,
- pszimplementation: LPCWSTR,
- dwflags: ULONG,
- ) -> NTSTATUS;
- pub fn BCryptCloseAlgorithmProvider(hAlgorithm: BCRYPT_ALG_HANDLE, dwFlags: ULONG) -> NTSTATUS;
}
// Functions that aren't available on every version of Windows that we support,
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index e67411e16..77359abe4 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -33,7 +33,7 @@ pub mod stdio;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
-pub mod thread_parker;
+pub mod thread_parking;
pub mod time;
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index 352337ba3..d7adeb266 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -157,7 +157,7 @@ impl<'a> Iterator for SplitPaths<'a> {
// Double quotes are used as a way of introducing literal semicolons
// (since c:\some;dir is a valid Windows path). Double quotes are not
// themselves permitted in path names, so there is no way to escape a
- // double quote. Quoted regions can appear in arbitrary locations, so
+ // double quote. Quoted regions can appear in arbitrary locations, so
//
// c:\foo;c:\som"e;di"r;c:\bar
//
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index 9f26acc45..7b25edaa5 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -1,7 +1,7 @@
use crate::os::windows::prelude::*;
use crate::ffi::OsStr;
-use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{self, IoSlice, IoSliceMut, Read};
use crate::mem;
use crate::path::Path;
use crate::ptr;
@@ -261,6 +261,10 @@ impl AnonPipe {
self.inner.is_read_vectored()
}
+ pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.handle().read_to_end(buf)
+ }
+
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
unsafe {
let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 31e9b34fb..10bc949e1 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -351,6 +351,11 @@ impl Command {
))
}
}
+
+ pub fn output(&mut self) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?;
+ crate::sys_common::process::wait_with_output(proc, pipes)
+ }
}
impl fmt::Debug for Command {
diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs
index b5a49489d..cdf37cfe9 100644
--- a/library/std/src/sys/windows/rand.rs
+++ b/library/std/src/sys/windows/rand.rs
@@ -1,106 +1,39 @@
-//! # Random key generation
-//!
-//! This module wraps the RNG provided by the OS. There are a few different
-//! ways to interface with the OS RNG so it's worth exploring each of the options.
-//! Note that at the time of writing these all go through the (undocumented)
-//! `bcryptPrimitives.dll` but they use different route to get there.
-//!
-//! Originally we were using [`RtlGenRandom`], however that function is
-//! deprecated and warns it "may be altered or unavailable in subsequent versions".
-//!
-//! So we switched to [`BCryptGenRandom`] with the `BCRYPT_USE_SYSTEM_PREFERRED_RNG`
-//! flag to query and find the system configured RNG. However, this change caused a small
-//! but significant number of users to experience panics caused by a failure of
-//! this function. See [#94098].
-//!
-//! The current version falls back to using `BCryptOpenAlgorithmProvider` if
-//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason.
-//!
-//! [#94098]: https://github.com/rust-lang/rust/issues/94098
-//! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom
-//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
+use crate::io;
use crate::mem;
use crate::ptr;
use crate::sys::c;
-/// Generates high quality secure random keys for use by [`HashMap`].
-///
-/// This is used to seed the default [`RandomState`].
-///
-/// [`HashMap`]: crate::collections::HashMap
-/// [`RandomState`]: crate::collections::hash_map::RandomState
pub fn hashmap_random_keys() -> (u64, u64) {
- Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng)
+ let mut v = (0, 0);
+ let ret = unsafe {
+ c::BCryptGenRandom(
+ ptr::null_mut(),
+ &mut v as *mut _ as *mut u8,
+ mem::size_of_val(&v) as c::ULONG,
+ c::BCRYPT_USE_SYSTEM_PREFERRED_RNG,
+ )
+ };
+ if c::nt_success(ret) { v } else { fallback_rng() }
}
-struct Rng {
- algorithm: c::BCRYPT_ALG_HANDLE,
- flags: u32,
-}
-impl Rng {
- const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) };
-
- /// Create the RNG from an existing algorithm handle.
- ///
- /// # Safety
- ///
- /// The handle must either be null or a valid algorithm handle.
- const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self {
- Self { algorithm, flags }
- }
-
- /// Open a handle to the RNG algorithm.
- fn open() -> Result<Self, c::NTSTATUS> {
- use crate::sync::atomic::AtomicPtr;
- use crate::sync::atomic::Ordering::{Acquire, Release};
-
- // An atomic is used so we don't need to reopen the handle every time.
- static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(ptr::null_mut());
-
- let mut handle = HANDLE.load(Acquire);
- if handle.is_null() {
- let status = unsafe {
- c::BCryptOpenAlgorithmProvider(
- &mut handle,
- c::BCRYPT_RNG_ALGORITHM.as_ptr(),
- ptr::null(),
- 0,
- )
- };
- if c::nt_success(status) {
- // If another thread opens a handle first then use that handle instead.
- let result = HANDLE.compare_exchange(ptr::null_mut(), handle, Release, Acquire);
- if let Err(previous_handle) = result {
- // Close our handle and return the previous one.
- unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) };
- handle = previous_handle;
- }
- Ok(unsafe { Self::new(handle, 0) })
- } else {
- Err(status)
- }
- } else {
- Ok(unsafe { Self::new(handle, 0) })
- }
- }
+/// Generate random numbers using the fallback RNG function (RtlGenRandom)
+///
+/// This is necessary because of a failure to load the SysWOW64 variant of the
+/// bcryptprimitives.dll library from code that lives in bcrypt.dll
+/// See <https://bugzilla.mozilla.org/show_bug.cgi?id=1788004#c9>
+#[cfg(not(target_vendor = "uwp"))]
+#[inline(never)]
+fn fallback_rng() -> (u64, u64) {
+ let mut v = (0, 0);
+ let ret =
+ unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) };
- fn gen_random_keys(self) -> Result<(u64, u64), c::NTSTATUS> {
- let mut v = (0, 0);
- let status = unsafe {
- let size = mem::size_of_val(&v).try_into().unwrap();
- c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags)
- };
- if c::nt_success(status) { Ok(v) } else { Err(status) }
- }
+ if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) }
}
-/// Generate random numbers using the fallback RNG function
+/// We can't use RtlGenRandom with UWP, so there is no fallback
+#[cfg(target_vendor = "uwp")]
#[inline(never)]
-fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) {
- match Rng::open().and_then(|rng| rng.gen_random_keys()) {
- Ok(keys) => keys,
- Err(status) => {
- panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}")
- }
- }
+fn fallback_rng() -> (u64, u64) {
+ panic!("fallback RNG broken: RtlGenRandom() not supported on UWP");
}
diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs
index c5c9e97e6..1cb576c95 100644
--- a/library/std/src/sys/windows/thread.rs
+++ b/library/std/src/sys/windows/thread.rs
@@ -26,7 +26,7 @@ impl Thread {
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
- // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
+ // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
let ret = c::CreateThread(
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
deleted file mode 100644
index 2f7ae863b..000000000
--- a/library/std/src/sys/windows/thread_parker.rs
+++ /dev/null
@@ -1,253 +0,0 @@
-// Thread parker implementation for Windows.
-//
-// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+).
-// This modern API is exactly the same as the futex syscalls the Linux thread
-// parker uses. When These APIs are available, the implementation of this
-// thread parker matches the Linux thread parker exactly.
-//
-// However, when the modern API is not available, this implementation falls
-// back to NT Keyed Events, which are similar, but have some important
-// differences. These are available since Windows XP.
-//
-// WaitOnAddress first checks the state of the thread parker to make sure it no
-// WakeByAddressSingle calls can be missed between updating the parker state
-// and calling the function.
-//
-// NtWaitForKeyedEvent does not have this option, and unconditionally blocks
-// without checking the parker state first. Instead, NtReleaseKeyedEvent
-// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for
-// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed,
-// but we need to be careful not to block unpark() if park_timeout() was woken
-// up by a timeout instead of unpark().
-//
-// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a
-// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure
-// a successfully awoken park() was awoken by unpark() and not a
-// NtReleaseKeyedEvent call from some other code, as these events are not only
-// matched by the key (address of the parker (state)), but also by this HANDLE.
-// We lazily allocate this handle the first time it is needed.
-//
-// The fast path (calling park() after unpark() was already called) and the
-// possible states are the same for both implementations. This is used here to
-// make sure the fast path does not even check which API to use, but can return
-// right away, independent of the used API. Only the slow paths (which will
-// actually block/wake a thread) check which API is available and have
-// different implementations.
-//
-// Unfortunately, NT Keyed Events are an undocumented Windows API. However:
-// - This API is relatively simple with obvious behaviour, and there are
-// several (unofficial) articles documenting the details. [1]
-// - `parking_lot` has been using this API for years (on Windows versions
-// before Windows 8). [2] Many big projects extensively use parking_lot,
-// such as servo and the Rust compiler itself.
-// - It is the underlying API used by Windows SRW locks and Windows critical
-// sections. [3] [4]
-// - The source code of the implementations of Wine, ReactOs, and Windows XP
-// are available and match the expected behaviour.
-// - The main risk with an undocumented API is that it might change in the
-// future. But since we only use it for older versions of Windows, that's not
-// a problem.
-// - Even if these functions do not block or wake as we expect (which is
-// unlikely, see all previous points), this implementation would still be
-// memory safe. The NT Keyed Events API is only used to sleep/block in the
-// right place.
-//
-// [1]: http://www.locklessinc.com/articles/keyed_events/
-// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e
-// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c
-// [4]: Windows Internals, Part 1, ISBN 9780735671300
-
-use crate::pin::Pin;
-use crate::ptr;
-use crate::sync::atomic::{
- AtomicI8, AtomicPtr,
- Ordering::{Acquire, Relaxed, Release},
-};
-use crate::sys::{c, dur2timeout};
-use crate::time::Duration;
-
-pub struct Parker {
- state: AtomicI8,
-}
-
-const PARKED: i8 = -1;
-const EMPTY: i8 = 0;
-const NOTIFIED: i8 = 1;
-
-// Notes about memory ordering:
-//
-// Memory ordering is only relevant for the relative ordering of operations
-// between different variables. Even Ordering::Relaxed guarantees a
-// monotonic/consistent order when looking at just a single atomic variable.
-//
-// So, since this parker is just a single atomic variable, we only need to look
-// at the ordering guarantees we need to provide to the 'outside world'.
-//
-// The only memory ordering guarantee that parking and unparking provide, is
-// that things which happened before unpark() are visible on the thread
-// returning from park() afterwards. Otherwise, it was effectively unparked
-// before unpark() was called while still consuming the 'token'.
-//
-// In other words, unpark() needs to synchronize with the part of park() that
-// consumes the token and returns.
-//
-// This is done with a release-acquire synchronization, by using
-// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
-// Ordering::Acquire when reading this state in park() after waking up.
-impl Parker {
- /// Construct the Windows parker. The UNIX parker implementation
- /// requires this to happen in-place.
- pub unsafe fn new(parker: *mut Parker) {
- parker.write(Self { state: AtomicI8::new(EMPTY) });
- }
-
- // Assumes this is only called by the thread that owns the Parker,
- // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
- // but other implementations do.
- pub unsafe fn park(self: Pin<&Self>) {
- // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
- // first case.
- if self.state.fetch_sub(1, Acquire) == NOTIFIED {
- return;
- }
-
- if let Some(wait_on_address) = c::WaitOnAddress::option() {
- loop {
- // Wait for something to happen, assuming it's still set to PARKED.
- wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE);
- // Change NOTIFIED=>EMPTY but leave PARKED alone.
- if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
- // Actually woken up by unpark().
- return;
- } else {
- // Spurious wake up. We loop to try again.
- }
- }
- } else {
- // Wait for unpark() to produce this event.
- c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
- // Set the state back to EMPTY (from either PARKED or NOTIFIED).
- // Note that we don't just write EMPTY, but use swap() to also
- // include an acquire-ordered read to synchronize with unpark()'s
- // release-ordered write.
- self.state.swap(EMPTY, Acquire);
- }
- }
-
- // Assumes this is only called by the thread that owns the Parker,
- // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
- // but other implementations do.
- pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) {
- // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
- // first case.
- if self.state.fetch_sub(1, Acquire) == NOTIFIED {
- return;
- }
-
- if let Some(wait_on_address) = c::WaitOnAddress::option() {
- // Wait for something to happen, assuming it's still set to PARKED.
- wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout));
- // Set the state back to EMPTY (from either PARKED or NOTIFIED).
- // Note that we don't just write EMPTY, but use swap() to also
- // include an acquire-ordered read to synchronize with unpark()'s
- // release-ordered write.
- if self.state.swap(EMPTY, Acquire) == NOTIFIED {
- // Actually woken up by unpark().
- } else {
- // Timeout or spurious wake up.
- // We return either way, because we can't easily tell if it was the
- // timeout or not.
- }
- } else {
- // Need to wait for unpark() using NtWaitForKeyedEvent.
- let handle = keyed_event_handle();
-
- // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative
- // values to indicate a relative time on the monotonic clock.
- // This is documented here for the underlying KeWaitForSingleObject function:
- // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
- let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) {
- Ok(t) => -t,
- Err(_) => i64::MIN,
- };
-
- // Wait for unpark() to produce this event.
- let unparked =
- c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS;
-
- // Set the state back to EMPTY (from either PARKED or NOTIFIED).
- let prev_state = self.state.swap(EMPTY, Acquire);
-
- if !unparked && prev_state == NOTIFIED {
- // We were awoken by a timeout, not by unpark(), but the state
- // was set to NOTIFIED, which means we *just* missed an
- // unpark(), which is now blocked on us to wait for it.
- // Wait for it to consume the event and unblock that thread.
- c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut());
- }
- }
- }
-
- // This implementation doesn't require `Pin`, but other implementations do.
- pub fn unpark(self: Pin<&Self>) {
- // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
- // wake the thread in the first case.
- //
- // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
- // purpose, to make sure every unpark() has a release-acquire ordering
- // with park().
- if self.state.swap(NOTIFIED, Release) == PARKED {
- unsafe {
- if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
- wake_by_address_single(self.ptr());
- } else {
- // If we run NtReleaseKeyedEvent before the waiting thread runs
- // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
- // If the waiting thread wakes up before we run NtReleaseKeyedEvent
- // (e.g. due to a timeout), this blocks until we do wake up a thread.
- // To prevent this thread from blocking indefinitely in that case,
- // park_impl() will, after seeing the state set to NOTIFIED after
- // waking up, call NtWaitForKeyedEvent again to unblock us.
- c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
- }
- }
- }
- }
-
- fn ptr(&self) -> c::LPVOID {
- &self.state as *const _ as c::LPVOID
- }
-}
-
-fn keyed_event_handle() -> c::HANDLE {
- const INVALID: c::HANDLE = ptr::invalid_mut(!0);
- static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID);
- match HANDLE.load(Relaxed) {
- INVALID => {
- let mut handle = c::INVALID_HANDLE_VALUE;
- unsafe {
- match c::NtCreateKeyedEvent(
- &mut handle,
- c::GENERIC_READ | c::GENERIC_WRITE,
- ptr::null_mut(),
- 0,
- ) {
- c::STATUS_SUCCESS => {}
- r => panic!("Unable to create keyed event handle: error {r}"),
- }
- }
- match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) {
- Ok(_) => handle,
- Err(h) => {
- // Lost the race to another thread initializing HANDLE before we did.
- // Closing our handle and using theirs instead.
- unsafe {
- c::CloseHandle(handle);
- }
- h
- }
- }
- }
- handle => handle,
- }
-}
diff --git a/library/std/src/sys/windows/thread_parking.rs b/library/std/src/sys/windows/thread_parking.rs
new file mode 100644
index 000000000..5d43676ad
--- /dev/null
+++ b/library/std/src/sys/windows/thread_parking.rs
@@ -0,0 +1,253 @@
+// Thread parker implementation for Windows.
+//
+// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+).
+// This modern API is exactly the same as the futex syscalls the Linux thread
+// parker uses. When These APIs are available, the implementation of this
+// thread parker matches the Linux thread parker exactly.
+//
+// However, when the modern API is not available, this implementation falls
+// back to NT Keyed Events, which are similar, but have some important
+// differences. These are available since Windows XP.
+//
+// WaitOnAddress first checks the state of the thread parker to make sure it no
+// WakeByAddressSingle calls can be missed between updating the parker state
+// and calling the function.
+//
+// NtWaitForKeyedEvent does not have this option, and unconditionally blocks
+// without checking the parker state first. Instead, NtReleaseKeyedEvent
+// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for
+// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed,
+// but we need to be careful not to block unpark() if park_timeout() was woken
+// up by a timeout instead of unpark().
+//
+// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a
+// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure
+// a successfully awoken park() was awoken by unpark() and not a
+// NtReleaseKeyedEvent call from some other code, as these events are not only
+// matched by the key (address of the parker (state)), but also by this HANDLE.
+// We lazily allocate this handle the first time it is needed.
+//
+// The fast path (calling park() after unpark() was already called) and the
+// possible states are the same for both implementations. This is used here to
+// make sure the fast path does not even check which API to use, but can return
+// right away, independent of the used API. Only the slow paths (which will
+// actually block/wake a thread) check which API is available and have
+// different implementations.
+//
+// Unfortunately, NT Keyed Events are an undocumented Windows API. However:
+// - This API is relatively simple with obvious behaviour, and there are
+// several (unofficial) articles documenting the details. [1]
+// - `parking_lot` has been using this API for years (on Windows versions
+// before Windows 8). [2] Many big projects extensively use parking_lot,
+// such as servo and the Rust compiler itself.
+// - It is the underlying API used by Windows SRW locks and Windows critical
+// sections. [3] [4]
+// - The source code of the implementations of Wine, ReactOs, and Windows XP
+// are available and match the expected behaviour.
+// - The main risk with an undocumented API is that it might change in the
+// future. But since we only use it for older versions of Windows, that's not
+// a problem.
+// - Even if these functions do not block or wake as we expect (which is
+// unlikely, see all previous points), this implementation would still be
+// memory safe. The NT Keyed Events API is only used to sleep/block in the
+// right place.
+//
+// [1]: http://www.locklessinc.com/articles/keyed_events/
+// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e
+// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c
+// [4]: Windows Internals, Part 1, ISBN 9780735671300
+
+use crate::pin::Pin;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicI8, AtomicPtr,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::{c, dur2timeout};
+use crate::time::Duration;
+
+pub struct Parker {
+ state: AtomicI8,
+}
+
+const PARKED: i8 = -1;
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+
+// Notes about memory ordering:
+//
+// Memory ordering is only relevant for the relative ordering of operations
+// between different variables. Even Ordering::Relaxed guarantees a
+// monotonic/consistent order when looking at just a single atomic variable.
+//
+// So, since this parker is just a single atomic variable, we only need to look
+// at the ordering guarantees we need to provide to the 'outside world'.
+//
+// The only memory ordering guarantee that parking and unparking provide, is
+// that things which happened before unpark() are visible on the thread
+// returning from park() afterwards. Otherwise, it was effectively unparked
+// before unpark() was called while still consuming the 'token'.
+//
+// In other words, unpark() needs to synchronize with the part of park() that
+// consumes the token and returns.
+//
+// This is done with a release-acquire synchronization, by using
+// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
+// Ordering::Acquire when reading this state in park() after waking up.
+impl Parker {
+ /// Construct the Windows parker. The UNIX parker implementation
+ /// requires this to happen in-place.
+ pub unsafe fn new_in_place(parker: *mut Parker) {
+ parker.write(Self { state: AtomicI8::new(EMPTY) });
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
+ // but other implementations do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if let Some(wait_on_address) = c::WaitOnAddress::option() {
+ loop {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE);
+ // Change NOTIFIED=>EMPTY but leave PARKED alone.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() {
+ // Actually woken up by unpark().
+ return;
+ } else {
+ // Spurious wake up. We loop to try again.
+ }
+ }
+ } else {
+ // Wait for unpark() to produce this event.
+ c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ self.state.swap(EMPTY, Acquire);
+ }
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`. This implementation doesn't require `Pin`,
+ // but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if let Some(wait_on_address) = c::WaitOnAddress::option() {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout));
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ if self.state.swap(EMPTY, Acquire) == NOTIFIED {
+ // Actually woken up by unpark().
+ } else {
+ // Timeout or spurious wake up.
+ // We return either way, because we can't easily tell if it was the
+ // timeout or not.
+ }
+ } else {
+ // Need to wait for unpark() using NtWaitForKeyedEvent.
+ let handle = keyed_event_handle();
+
+ // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative
+ // values to indicate a relative time on the monotonic clock.
+ // This is documented here for the underlying KeWaitForSingleObject function:
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
+ let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) {
+ Ok(t) => -t,
+ Err(_) => i64::MIN,
+ };
+
+ // Wait for unpark() to produce this event.
+ let unparked =
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS;
+
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ let prev_state = self.state.swap(EMPTY, Acquire);
+
+ if !unparked && prev_state == NOTIFIED {
+ // We were awoken by a timeout, not by unpark(), but the state
+ // was set to NOTIFIED, which means we *just* missed an
+ // unpark(), which is now blocked on us to wait for it.
+ // Wait for it to consume the event and unblock that thread.
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ pub fn unpark(self: Pin<&Self>) {
+ // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
+ // wake the thread in the first case.
+ //
+ // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
+ // purpose, to make sure every unpark() has a release-acquire ordering
+ // with park().
+ if self.state.swap(NOTIFIED, Release) == PARKED {
+ unsafe {
+ if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
+ wake_by_address_single(self.ptr());
+ } else {
+ // If we run NtReleaseKeyedEvent before the waiting thread runs
+ // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+ // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+ // (e.g. due to a timeout), this blocks until we do wake up a thread.
+ // To prevent this thread from blocking indefinitely in that case,
+ // park_impl() will, after seeing the state set to NOTIFIED after
+ // waking up, call NtWaitForKeyedEvent again to unblock us.
+ c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+ }
+
+ fn ptr(&self) -> c::LPVOID {
+ &self.state as *const _ as c::LPVOID
+ }
+}
+
+fn keyed_event_handle() -> c::HANDLE {
+ const INVALID: c::HANDLE = ptr::invalid_mut(!0);
+ static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID);
+ match HANDLE.load(Relaxed) {
+ INVALID => {
+ let mut handle = c::INVALID_HANDLE_VALUE;
+ unsafe {
+ match c::NtCreateKeyedEvent(
+ &mut handle,
+ c::GENERIC_READ | c::GENERIC_WRITE,
+ ptr::null_mut(),
+ 0,
+ ) {
+ c::STATUS_SUCCESS => {}
+ r => panic!("Unable to create keyed event handle: error {r}"),
+ }
+ }
+ match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) {
+ Ok(_) => handle,
+ Err(h) => {
+ // Lost the race to another thread initializing HANDLE before we did.
+ // Closing our handle and using theirs instead.
+ unsafe {
+ c::CloseHandle(handle);
+ }
+ h
+ }
+ }
+ }
+ handle => handle,
+ }
+}
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
index 8807077cb..f1d804ef4 100644
--- a/library/std/src/sys_common/backtrace.rs
+++ b/library/std/src/sys_common/backtrace.rs
@@ -20,7 +20,7 @@ pub fn lock() -> impl Drop {
/// Prints the current backtrace.
pub fn print(w: &mut dyn Write, format: PrintFmt) -> io::Result<()> {
// There are issues currently linking libbacktrace into tests, and in
- // general during libstd's own unit tests we're not testing this path. In
+ // general during std's own unit tests we're not testing this path. In
// test mode immediately return here to optimize away any references to the
// libbacktrace symbols
if cfg!(test) {
@@ -111,7 +111,7 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt::
}
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
-/// this is only inline(never) when backtraces in libstd are enabled, otherwise
+/// this is only inline(never) when backtraces in std are enabled, otherwise
/// it's fine to optimize away.
#[cfg_attr(feature = "backtrace", inline(never))]
pub fn __rust_begin_short_backtrace<F, T>(f: F) -> T
@@ -127,7 +127,7 @@ where
}
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that
-/// this is only inline(never) when backtraces in libstd are enabled, otherwise
+/// this is only inline(never) when backtraces in std are enabled, otherwise
/// it's fine to optimize away.
#[cfg_attr(feature = "backtrace", inline(never))]
pub fn __rust_end_short_backtrace<F, T>(f: F) -> T
diff --git a/library/std/src/sys_common/io.rs b/library/std/src/sys_common/io.rs
index d1e9fed41..4a42ff3c6 100644
--- a/library/std/src/sys_common/io.rs
+++ b/library/std/src/sys_common/io.rs
@@ -39,9 +39,10 @@ pub mod test {
}
}
+ #[track_caller] // for `test_rng`
pub fn tmpdir() -> TempDir {
let p = env::temp_dir();
- let mut r = rand::thread_rng();
+ let mut r = crate::test_helpers::test_rng();
let ret = p.join(&format!("rust-{}", r.next_u32()));
fs::create_dir(&ret).unwrap();
TempDir(ret)
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index b1987aa0f..6b24b0e9a 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -27,11 +27,10 @@ pub mod lazy_box;
pub mod memchr;
pub mod once;
pub mod process;
-pub mod remutex;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
-pub mod thread_parker;
+pub mod thread_parking;
pub mod wstr;
pub mod wtf8;
diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs
index 8742e68cc..359697d83 100644
--- a/library/std/src/sys_common/once/mod.rs
+++ b/library/std/src/sys_common/once/mod.rs
@@ -6,22 +6,6 @@
// As a result, we end up implementing it ourselves in the standard library.
// This also gives us the opportunity to optimize the implementation a bit which
// should help the fast path on call sites.
-//
-// So to recap, the guarantees of a Once are that it will call the
-// initialization closure at most once, and it will never return until the one
-// that's running has finished running. This means that we need some form of
-// blocking here while the custom callback is running at the very least.
-// Additionally, we add on the restriction of **poisoning**. Whenever an
-// initialization closure panics, the Once enters a "poisoned" state which means
-// that all future calls will immediately panic as well.
-//
-// So to implement this, one might first reach for a `Mutex`, but those cannot
-// be put into a `static`. It also gets a lot harder with poisoning to figure
-// out when the mutex needs to be deallocated because it's not after the closure
-// finishes, but after the first successful closure finishes.
-//
-// All in all, this is instead implemented with atomics and lock-free
-// operations! Whee!
cfg_if::cfg_if! {
if #[cfg(any(
@@ -36,8 +20,15 @@ cfg_if::cfg_if! {
))] {
mod futex;
pub use futex::{Once, OnceState};
+ } else if #[cfg(any(
+ windows,
+ target_family = "unix",
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ target_os = "solid_asp3",
+ ))] {
+ mod queue;
+ pub use queue::{Once, OnceState};
} else {
- mod generic;
- pub use generic::{Once, OnceState};
+ pub use crate::sys::once::{Once, OnceState};
}
}
diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/queue.rs
index d953a6745..d953a6745 100644
--- a/library/std/src/sys_common/once/generic.rs
+++ b/library/std/src/sys_common/once/queue.rs
diff --git a/library/std/src/sys_common/process.rs b/library/std/src/sys_common/process.rs
index 9f978789a..18883048d 100644
--- a/library/std/src/sys_common/process.rs
+++ b/library/std/src/sys_common/process.rs
@@ -4,10 +4,13 @@
use crate::collections::BTreeMap;
use crate::env;
use crate::ffi::{OsStr, OsString};
-use crate::sys::process::EnvKey;
+use crate::fmt;
+use crate::io;
+use crate::sys::pipe::read2;
+use crate::sys::process::{EnvKey, ExitStatus, Process, StdioPipes};
// Stores a set of changes to an environment
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub struct CommandEnv {
clear: bool,
saw_path: bool,
@@ -20,6 +23,14 @@ impl Default for CommandEnv {
}
}
+impl fmt::Debug for CommandEnv {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut debug_command_env = f.debug_struct("CommandEnv");
+ debug_command_env.field("clear", &self.clear).field("vars", &self.vars);
+ debug_command_env.finish()
+ }
+}
+
impl CommandEnv {
// Capture the current environment with these changes applied
pub fn capture(&self) -> BTreeMap<EnvKey, OsString> {
@@ -117,3 +128,30 @@ impl<'a> ExactSizeIterator for CommandEnvs<'a> {
self.iter.is_empty()
}
}
+
+pub fn wait_with_output(
+ mut process: Process,
+ mut pipes: StdioPipes,
+) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> {
+ drop(pipes.stdin.take());
+
+ let (mut stdout, mut stderr) = (Vec::new(), Vec::new());
+ match (pipes.stdout.take(), pipes.stderr.take()) {
+ (None, None) => {}
+ (Some(out), None) => {
+ let res = out.read_to_end(&mut stdout);
+ res.unwrap();
+ }
+ (None, Some(err)) => {
+ let res = err.read_to_end(&mut stderr);
+ res.unwrap();
+ }
+ (Some(out), Some(err)) => {
+ let res = read2(out, &mut stdout, err, &mut stderr);
+ res.unwrap();
+ }
+ }
+
+ let status = process.wait()?;
+ Ok((status, stdout, stderr))
+}
diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs
deleted file mode 100644
index 8e97ce11c..000000000
--- a/library/std/src/sys_common/remutex/tests.rs
+++ /dev/null
@@ -1,60 +0,0 @@
-use crate::cell::RefCell;
-use crate::sync::Arc;
-use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
-use crate::thread;
-
-#[test]
-fn smoke() {
- let m = ReentrantMutex::new(());
- {
- let a = m.lock();
- {
- let b = m.lock();
- {
- let c = m.lock();
- assert_eq!(*c, ());
- }
- assert_eq!(*b, ());
- }
- assert_eq!(*a, ());
- }
-}
-
-#[test]
-fn is_mutex() {
- let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
- let m2 = m.clone();
- let lock = m.lock();
- let child = thread::spawn(move || {
- let lock = m2.lock();
- assert_eq!(*lock.borrow(), 4950);
- });
- for i in 0..100 {
- let lock = m.lock();
- *lock.borrow_mut() += i;
- }
- drop(lock);
- child.join().unwrap();
-}
-
-#[test]
-fn trylock_works() {
- let m = Arc::new(ReentrantMutex::new(()));
- let m2 = m.clone();
- let _lock = m.try_lock();
- let _lock2 = m.try_lock();
- thread::spawn(move || {
- let lock = m2.try_lock();
- assert!(lock.is_none());
- })
- .join()
- .unwrap();
- let _lock3 = m.try_lock();
-}
-
-pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
-impl Drop for Answer<'_> {
- fn drop(&mut self) {
- *self.0.borrow_mut() = 42;
- }
-}
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
index 747579f17..2672a2a75 100644
--- a/library/std/src/sys_common/thread_local_key.rs
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -117,10 +117,14 @@ pub struct Key {
/// This value specifies no destructor by default.
pub const INIT: StaticKey = StaticKey::new(None);
+// Define a sentinel value that is unlikely to be returned
+// as a TLS key (but it may be returned).
+const KEY_SENTVAL: usize = 0;
+
impl StaticKey {
#[rustc_const_unstable(feature = "thread_local_internals", issue = "none")]
pub const fn new(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> StaticKey {
- StaticKey { key: atomic::AtomicUsize::new(0), dtor }
+ StaticKey { key: atomic::AtomicUsize::new(KEY_SENTVAL), dtor }
}
/// Gets the value associated with this TLS key
@@ -144,31 +148,36 @@ impl StaticKey {
#[inline]
unsafe fn key(&self) -> imp::Key {
match self.key.load(Ordering::Relaxed) {
- 0 => self.lazy_init() as imp::Key,
+ KEY_SENTVAL => self.lazy_init() as imp::Key,
n => n as imp::Key,
}
}
unsafe fn lazy_init(&self) -> usize {
- // POSIX allows the key created here to be 0, but the compare_exchange
- // below relies on using 0 as a sentinel value to check who won the
+ // POSIX allows the key created here to be KEY_SENTVAL, but the compare_exchange
+ // below relies on using KEY_SENTVAL as a sentinel value to check who won the
// race to set the shared TLS key. As far as I know, there is no
// guaranteed value that cannot be returned as a posix_key_create key,
// so there is no value we can initialize the inner key with to
// prove that it has not yet been set. As such, we'll continue using a
- // value of 0, but with some gyrations to make sure we have a non-0
+ // value of KEY_SENTVAL, but with some gyrations to make sure we have a non-KEY_SENTVAL
// value returned from the creation routine.
// FIXME: this is clearly a hack, and should be cleaned up.
let key1 = imp::create(self.dtor);
- let key = if key1 != 0 {
+ let key = if key1 as usize != KEY_SENTVAL {
key1
} else {
let key2 = imp::create(self.dtor);
imp::destroy(key1);
key2
};
- rtassert!(key != 0);
- match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) {
+ rtassert!(key as usize != KEY_SENTVAL);
+ match self.key.compare_exchange(
+ KEY_SENTVAL,
+ key as usize,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
// The CAS succeeded, so we've created the actual key
Ok(_) => key as usize,
// If someone beat us to the punch, use their key instead
diff --git a/library/std/src/sys_common/thread_parker/generic.rs b/library/std/src/sys_common/thread_parker/generic.rs
deleted file mode 100644
index f3d8b34d3..000000000
--- a/library/std/src/sys_common/thread_parker/generic.rs
+++ /dev/null
@@ -1,125 +0,0 @@
-//! Parker implementation based on a Mutex and Condvar.
-
-use crate::pin::Pin;
-use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
-use crate::sync::{Condvar, Mutex};
-use crate::time::Duration;
-
-const EMPTY: usize = 0;
-const PARKED: usize = 1;
-const NOTIFIED: usize = 2;
-
-pub struct Parker {
- state: AtomicUsize,
- lock: Mutex<()>,
- cvar: Condvar,
-}
-
-impl Parker {
- /// Construct the generic parker. The UNIX parker implementation
- /// requires this to happen in-place.
- pub unsafe fn new(parker: *mut Parker) {
- parker.write(Parker {
- state: AtomicUsize::new(EMPTY),
- lock: Mutex::new(()),
- cvar: Condvar::new(),
- });
- }
-
- // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
- pub unsafe fn park(self: Pin<&Self>) {
- // If we were previously notified then we consume this notification and
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
-
- // Otherwise we need to coordinate going to sleep
- let mut m = self.lock.lock().unwrap();
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read here, even though we know it will be `NOTIFIED`.
- // This is because `unpark` may have been called again since we read
- // `NOTIFIED` in the `compare_exchange` above. We must perform an
- // acquire operation that synchronizes with that `unpark` to observe
- // any writes it made before the call to unpark. To do that we must
- // read from the write it made to `state`.
- let old = self.state.swap(EMPTY, SeqCst);
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => panic!("inconsistent park state"),
- }
- loop {
- m = self.cvar.wait(m).unwrap();
- match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
- Ok(_) => return, // got a notification
- Err(_) => {} // spurious wakeup, go back to sleep
- }
- }
- }
-
- // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
- pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
- // Like `park` above we have a fast path for an already-notified thread, and
- // afterwards we start coordinating for a sleep.
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
- let m = self.lock.lock().unwrap();
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read again here, see `park`.
- let old = self.state.swap(EMPTY, SeqCst);
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => panic!("inconsistent park_timeout state"),
- }
-
- // Wait with a timeout, and if we spuriously wake up or otherwise wake up
- // from a notification we just want to unconditionally set the state back to
- // empty, either consuming a notification or un-flagging ourselves as
- // parked.
- let (_m, _result) = self.cvar.wait_timeout(m, dur).unwrap();
- match self.state.swap(EMPTY, SeqCst) {
- NOTIFIED => {} // got a notification, hurray!
- PARKED => {} // no notification, alas
- n => panic!("inconsistent park_timeout state: {n}"),
- }
- }
-
- // This implementation doesn't require `Pin`, but other implementations do.
- pub fn unpark(self: Pin<&Self>) {
- // To ensure the unparked thread will observe any writes we made
- // before this call, we must perform a release operation that `park`
- // can synchronize with. To do that we must write `NOTIFIED` even if
- // `state` is already `NOTIFIED`. That is why this must be a swap
- // rather than a compare-and-swap that returns if it reads `NOTIFIED`
- // on failure.
- match self.state.swap(NOTIFIED, SeqCst) {
- EMPTY => return, // no one was waiting
- NOTIFIED => return, // already unparked
- PARKED => {} // gotta go wake someone up
- _ => panic!("inconsistent state in unpark"),
- }
-
- // There is a period between when the parked thread sets `state` to
- // `PARKED` (or last checked `state` in the case of a spurious wake
- // up) and when it actually waits on `cvar`. If we were to notify
- // during this period it would be ignored and then when the parked
- // thread went to sleep it would never wake up. Fortunately, it has
- // `lock` locked at this stage so we can acquire `lock` to wait until
- // it is ready to receive the notification.
- //
- // Releasing `lock` before the call to `notify_one` means that when the
- // parked thread wakes it doesn't get woken only to have to wait for us
- // to release `lock`.
- drop(self.lock.lock().unwrap());
- self.cvar.notify_one()
- }
-}
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
deleted file mode 100644
index f86a9a555..000000000
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-cfg_if::cfg_if! {
- if #[cfg(any(
- target_os = "linux",
- target_os = "android",
- all(target_arch = "wasm32", target_feature = "atomics"),
- target_os = "freebsd",
- target_os = "openbsd",
- target_os = "dragonfly",
- target_os = "fuchsia",
- target_os = "hermit",
- ))] {
- mod futex;
- pub use futex::Parker;
- } else if #[cfg(target_os = "solid_asp3")] {
- mod wait_flag;
- pub use wait_flag::Parker;
- } else if #[cfg(any(windows, target_family = "unix"))] {
- pub use crate::sys::thread_parker::Parker;
- } else {
- mod generic;
- pub use generic::Parker;
- }
-}
diff --git a/library/std/src/sys_common/thread_parker/futex.rs b/library/std/src/sys_common/thread_parking/futex.rs
index d9e2f39e3..588e7b278 100644
--- a/library/std/src/sys_common/thread_parker/futex.rs
+++ b/library/std/src/sys_common/thread_parking/futex.rs
@@ -35,7 +35,7 @@ pub struct Parker {
impl Parker {
/// Construct the futex parker. The UNIX parker implementation
/// requires this to happen in-place.
- pub unsafe fn new(parker: *mut Parker) {
+ pub unsafe fn new_in_place(parker: *mut Parker) {
parker.write(Self { state: AtomicU32::new(EMPTY) });
}
diff --git a/library/std/src/sys_common/thread_parking/generic.rs b/library/std/src/sys_common/thread_parking/generic.rs
new file mode 100644
index 000000000..3209bffe3
--- /dev/null
+++ b/library/std/src/sys_common/thread_parking/generic.rs
@@ -0,0 +1,125 @@
+//! Parker implementation based on a Mutex and Condvar.
+
+use crate::pin::Pin;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::{Condvar, Mutex};
+use crate::time::Duration;
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+pub struct Parker {
+ state: AtomicUsize,
+ lock: Mutex<()>,
+ cvar: Condvar,
+}
+
+impl Parker {
+ /// Construct the generic parker. The UNIX parker implementation
+ /// requires this to happen in-place.
+ pub unsafe fn new_in_place(parker: *mut Parker) {
+ parker.write(Parker {
+ state: AtomicUsize::new(EMPTY),
+ lock: Mutex::new(()),
+ cvar: Condvar::new(),
+ });
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ let mut m = self.lock.lock().unwrap();
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => panic!("inconsistent park state"),
+ }
+ loop {
+ m = self.cvar.wait(m).unwrap();
+ match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+ Ok(_) => return, // got a notification
+ Err(_) => {} // spurious wakeup, go back to sleep
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe` and `Pin`, but other implementations do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread, and
+ // afterwards we start coordinating for a sleep.
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+ let m = self.lock.lock().unwrap();
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => panic!("inconsistent park_timeout state"),
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ let (_m, _result) = self.cvar.wait_timeout(m, dur).unwrap();
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => {} // got a notification, hurray!
+ PARKED => {} // no notification, alas
+ n => panic!("inconsistent park_timeout state: {n}"),
+ }
+ }
+
+ // This implementation doesn't require `Pin`, but other implementations do.
+ pub fn unpark(self: Pin<&Self>) {
+ // To ensure the unparked thread will observe any writes we made
+ // before this call, we must perform a release operation that `park`
+ // can synchronize with. To do that we must write `NOTIFIED` even if
+ // `state` is already `NOTIFIED`. That is why this must be a swap
+ // rather than a compare-and-swap that returns if it reads `NOTIFIED`
+ // on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ drop(self.lock.lock().unwrap());
+ self.cvar.notify_one()
+ }
+}
diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs
new file mode 100644
index 000000000..e98169597
--- /dev/null
+++ b/library/std/src/sys_common/thread_parking/id.rs
@@ -0,0 +1,108 @@
+//! Thread parking using thread ids.
+//!
+//! Some platforms (notably NetBSD) have thread parking primitives whose semantics
+//! match those offered by `thread::park`, with the difference that the thread to
+//! be unparked is referenced by a platform-specific thread id. Since the thread
+//! parker is constructed before that id is known, an atomic state variable is used
+//! to manage the park state and propagate the thread id. This also avoids platform
+//! calls in the case where `unpark` is called before `park`.
+
+use crate::cell::UnsafeCell;
+use crate::pin::Pin;
+use crate::sync::atomic::{
+ fence, AtomicI8,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::thread_parking::{current, park, park_timeout, unpark, ThreadId};
+use crate::time::Duration;
+
+pub struct Parker {
+ state: AtomicI8,
+ tid: UnsafeCell<Option<ThreadId>>,
+}
+
+const PARKED: i8 = -1;
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+
+impl Parker {
+ pub fn new() -> Parker {
+ Parker { state: AtomicI8::new(EMPTY), tid: UnsafeCell::new(None) }
+ }
+
+ /// Create a new thread parker. UNIX requires this to happen in-place.
+ pub unsafe fn new_in_place(parker: *mut Parker) {
+ parker.write(Parker::new())
+ }
+
+ /// # Safety
+ /// * must always be called from the same thread
+ /// * must be called before the state is set to PARKED
+ unsafe fn init_tid(&self) {
+ // The field is only ever written to from this thread, so we don't need
+ // synchronization to read it here.
+ if self.tid.get().read().is_none() {
+ // Because this point is only reached once, before the state is set
+ // to PARKED for the first time, the non-atomic write here can not
+ // conflict with reads by other threads.
+ self.tid.get().write(Some(current()));
+ // Ensure that the write can be observed by all threads reading the
+ // state. Synchronizes with the acquire barrier in `unpark`.
+ fence(Release);
+ }
+ }
+
+ pub unsafe fn park(self: Pin<&Self>) {
+ self.init_tid();
+
+ // Changes NOTIFIED to EMPTY and EMPTY to PARKED.
+ let mut state = self.state.fetch_sub(1, Acquire).wrapping_sub(1);
+ if state == PARKED {
+ // Loop to guard against spurious wakeups.
+ while state == PARKED {
+ park(self.state.as_mut_ptr().addr());
+ state = self.state.load(Acquire);
+ }
+
+ // Since the state change has already been observed with acquire
+ // ordering, the state can be reset with a relaxed store instead
+ // of a swap.
+ self.state.store(EMPTY, Relaxed);
+ }
+ }
+
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ self.init_tid();
+
+ let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1);
+ if state == PARKED {
+ park_timeout(dur, self.state.as_mut_ptr().addr());
+ // Swap to ensure that we observe all state changes with acquire
+ // ordering, even if the state has been changed after the timeout
+ // occured.
+ self.state.swap(EMPTY, Acquire);
+ }
+ }
+
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if state == PARKED {
+ // Synchronize with the release fence in `init_tid` to observe the
+ // write to `tid`.
+ fence(Acquire);
+ // # Safety
+ // The thread id is initialized before the state is set to `PARKED`
+ // for the first time and is not written to from that point on
+ // (negating the need for an atomic read).
+ let tid = unsafe { self.tid.get().read().unwrap_unchecked() };
+ // It is possible that the waiting thread woke up because of a timeout
+ // and terminated before this call is made. This call then returns an
+ // error or wakes up an unrelated thread. The platform API and
+ // environment does allow this, however.
+ unpark(tid, self.state.as_mut_ptr().addr());
+ }
+ }
+}
+
+unsafe impl Send for Parker {}
+unsafe impl Sync for Parker {}
diff --git a/library/std/src/sys_common/thread_parking/mod.rs b/library/std/src/sys_common/thread_parking/mod.rs
new file mode 100644
index 000000000..0ead6633c
--- /dev/null
+++ b/library/std/src/sys_common/thread_parking/mod.rs
@@ -0,0 +1,29 @@
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_arch = "wasm32", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+ target_os = "hermit",
+ ))] {
+ mod futex;
+ pub use futex::Parker;
+ } else if #[cfg(any(
+ target_os = "netbsd",
+ all(target_vendor = "fortanix", target_env = "sgx"),
+ ))] {
+ mod id;
+ pub use id::Parker;
+ } else if #[cfg(target_os = "solid_asp3")] {
+ mod wait_flag;
+ pub use wait_flag::Parker;
+ } else if #[cfg(any(windows, target_family = "unix"))] {
+ pub use crate::sys::thread_parking::Parker;
+ } else {
+ mod generic;
+ pub use generic::Parker;
+ }
+}
diff --git a/library/std/src/sys_common/thread_parker/wait_flag.rs b/library/std/src/sys_common/thread_parking/wait_flag.rs
index 6561c1866..d0f8899a9 100644
--- a/library/std/src/sys_common/thread_parker/wait_flag.rs
+++ b/library/std/src/sys_common/thread_parking/wait_flag.rs
@@ -41,7 +41,7 @@ pub struct Parker {
impl Parker {
/// Construct a parker for the current thread. The UNIX parker
/// implementation requires this to happen in-place.
- pub unsafe fn new(parker: *mut Parker) {
+ pub unsafe fn new_in_place(parker: *mut Parker) {
parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() })
}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index 5d267891b..b30bb7b77 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -905,9 +905,8 @@ pub mod statik {
pub mod fast {
use super::lazy::LazyKeyInner;
use crate::cell::Cell;
- use crate::fmt;
- use crate::mem;
use crate::sys::thread_local_dtor::register_dtor;
+ use crate::{fmt, mem, panic};
#[derive(Copy, Clone)]
enum DtorState {
@@ -950,7 +949,7 @@ pub mod fast {
// note that this is just a publicly-callable function only for the
// const-initialized form of thread locals, basically a way to call the
- // free `register_dtor` function defined elsewhere in libstd.
+ // free `register_dtor` function defined elsewhere in std.
pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
unsafe {
register_dtor(a, dtor);
@@ -1028,10 +1027,15 @@ pub mod fast {
// `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
// causes future calls to `get` to run `try_initialize_drop` again,
// which will now fail, and return `None`.
- unsafe {
+ //
+ // Wrap the call in a catch to ensure unwinding is caught in the event
+ // a panic takes place in a destructor.
+ if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe {
let value = (*ptr).inner.take();
(*ptr).dtor_state.set(DtorState::RunningOrHasRun);
drop(value);
+ })) {
+ rtabort!("thread local panicked on drop");
}
}
}
@@ -1044,10 +1048,8 @@ pub mod fast {
pub mod os {
use super::lazy::LazyKeyInner;
use crate::cell::Cell;
- use crate::fmt;
- use crate::marker;
- use crate::ptr;
use crate::sys_common::thread_local_key::StaticKey as OsStaticKey;
+ use crate::{fmt, marker, panic, ptr};
/// Use a regular global static to store this key; the state provided will then be
/// thread-local.
@@ -1137,12 +1139,17 @@ pub mod os {
//
// Note that to prevent an infinite loop we reset it back to null right
// before we return from the destructor ourselves.
- unsafe {
+ //
+ // Wrap the call in a catch to ensure unwinding is caught in the event
+ // a panic takes place in a destructor.
+ if let Err(_) = panic::catch_unwind(|| unsafe {
let ptr = Box::from_raw(ptr as *mut Value<T>);
let key = ptr.key;
key.os.set(ptr::invalid_mut(1));
drop(ptr);
key.os.set(ptr::null_mut());
+ }) {
+ rtabort!("thread local panicked on drop");
}
}
}
diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs
index 80dc4c038..964c7fc5b 100644
--- a/library/std/src/thread/local/tests.rs
+++ b/library/std/src/thread/local/tests.rs
@@ -23,11 +23,11 @@ impl Signal {
}
}
-struct Foo(Signal);
+struct NotifyOnDrop(Signal);
-impl Drop for Foo {
+impl Drop for NotifyOnDrop {
fn drop(&mut self) {
- let Foo(ref f) = *self;
+ let NotifyOnDrop(ref f) = *self;
f.notify();
}
}
@@ -82,18 +82,18 @@ fn states() {
#[test]
fn smoke_dtor() {
- thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
+ thread_local!(static FOO: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None));
run(&FOO);
- thread_local!(static FOO2: UnsafeCell<Option<Foo>> = const { UnsafeCell::new(None) });
+ thread_local!(static FOO2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) });
run(&FOO2);
- fn run(key: &'static LocalKey<UnsafeCell<Option<Foo>>>) {
+ fn run(key: &'static LocalKey<UnsafeCell<Option<NotifyOnDrop>>>) {
let signal = Signal::default();
let signal2 = signal.clone();
let t = thread::spawn(move || unsafe {
let mut signal = Some(signal2);
key.with(|f| {
- *f.get() = Some(Foo(signal.take().unwrap()));
+ *f.get() = Some(NotifyOnDrop(signal.take().unwrap()));
});
});
signal.wait();
@@ -187,13 +187,13 @@ fn self_referential() {
fn dtors_in_dtors_in_dtors() {
struct S1(Signal);
thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell::new(None));
- thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell::new(None));
+ thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = UnsafeCell::new(None));
impl Drop for S1 {
fn drop(&mut self) {
let S1(ref signal) = *self;
unsafe {
- let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone())));
+ let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone())));
}
}
}
@@ -211,13 +211,13 @@ fn dtors_in_dtors_in_dtors() {
fn dtors_in_dtors_in_dtors_const_init() {
struct S1(Signal);
thread_local!(static K1: UnsafeCell<Option<S1>> = const { UnsafeCell::new(None) });
- thread_local!(static K2: UnsafeCell<Option<Foo>> = const { UnsafeCell::new(None) });
+ thread_local!(static K2: UnsafeCell<Option<NotifyOnDrop>> = const { UnsafeCell::new(None) });
impl Drop for S1 {
fn drop(&mut self) {
let S1(ref signal) = *self;
unsafe {
- let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone())));
+ let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone())));
}
}
}
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index 34bdb8bd4..692ff0cbc 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -173,10 +173,16 @@ use crate::sync::Arc;
use crate::sys::thread as imp;
use crate::sys_common::thread;
use crate::sys_common::thread_info;
-use crate::sys_common::thread_parker::Parker;
+use crate::sys_common::thread_parking::Parker;
use crate::sys_common::{AsInner, IntoInner};
use crate::time::Duration;
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+mod scoped;
+
+#[stable(feature = "scoped_threads", since = "1.63.0")]
+pub use scoped::{scope, Scope, ScopedJoinHandle};
+
////////////////////////////////////////////////////////////////////////////////
// Thread-local storage
////////////////////////////////////////////////////////////////////////////////
@@ -184,12 +190,6 @@ use crate::time::Duration;
#[macro_use]
mod local;
-#[stable(feature = "scoped_threads", since = "1.63.0")]
-mod scoped;
-
-#[stable(feature = "scoped_threads", since = "1.63.0")]
-pub use scoped::{scope, Scope, ScopedJoinHandle};
-
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::local::{AccessError, LocalKey};
@@ -209,7 +209,6 @@ pub use self::local::{AccessError, LocalKey};
))]
#[doc(hidden)]
pub use self::local::fast::Key as __FastLocalKeyInner;
-
// when building for tests, use real std's type
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(test)]
@@ -220,12 +219,21 @@ pub use self::local::fast::Key as __FastLocalKeyInner;
pub use realstd::thread::__FastLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(not(test))]
#[cfg(all(
not(target_thread_local),
not(all(target_family = "wasm", not(target_feature = "atomics"))),
))]
#[doc(hidden)]
pub use self::local::os::Key as __OsLocalKeyInner;
+// when building for tests, use real std's type
+#[unstable(feature = "libstd_thread_internals", issue = "none")]
+#[cfg(test)]
+#[cfg(all(
+ not(target_thread_local),
+ not(all(target_family = "wasm", not(target_feature = "atomics"))),
+))]
+pub use realstd::thread::__OsLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
@@ -1216,7 +1224,7 @@ impl Thread {
let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr();
addr_of_mut!((*ptr).name).write(name);
addr_of_mut!((*ptr).id).write(ThreadId::new());
- Parker::new(addr_of_mut!((*ptr).parker));
+ Parker::new_in_place(addr_of_mut!((*ptr).parker));
Pin::new_unchecked(arc.assume_init())
};
diff --git a/library/std/tests/env.rs b/library/std/tests/env.rs
index b095c2dde..aae2c49d8 100644
--- a/library/std/tests/env.rs
+++ b/library/std/tests/env.rs
@@ -1,12 +1,24 @@
use std::env::*;
use std::ffi::{OsStr, OsString};
-use rand::distributions::Alphanumeric;
-use rand::{thread_rng, Rng};
+use rand::distributions::{Alphanumeric, DistString};
+
+/// Copied from `std::test_helpers::test_rng`, since these tests rely on the
+/// seed not being the same for every RNG invocation too.
+#[track_caller]
+pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use core::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ core::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+}
+#[track_caller]
fn make_rand_name() -> OsString {
- let rng = thread_rng();
- let n = format!("TEST{}", rng.sample_iter(&Alphanumeric).take(10).collect::<String>());
+ let n = format!("TEST{}", Alphanumeric.sample_string(&mut test_rng(), 10));
let n = OsString::from(n);
assert!(var_os(&n).is_none());
n
diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs
index 02c076f1b..1a2c12556 100644
--- a/library/std/tests/run-time-detect.rs
+++ b/library/std/tests/run-time-detect.rs
@@ -1,9 +1,8 @@
-//! These tests just check that the macros are available in libstd.
+//! These tests just check that the macros are available in std.
#![cfg_attr(
any(
all(target_arch = "arm", any(target_os = "linux", target_os = "android")),
- all(bootstrap, target_arch = "aarch64", any(target_os = "linux", target_os = "android")),
all(target_arch = "powerpc", target_os = "linux"),
all(target_arch = "powerpc64", target_os = "linux"),
),
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
index 524658bce..796796e07 100644
--- a/library/test/src/cli.rs
+++ b/library/test/src/cli.rs
@@ -354,8 +354,7 @@ fn get_shuffle_seed(matches: &getopts::Matches, allow_unstable: bool) -> OptPart
Err(e) => {
return Err(format!(
"argument for --shuffle-seed must be a number \
- (error: {})",
- e
+ (error: {e})"
));
}
},
@@ -383,8 +382,7 @@ fn get_test_threads(matches: &getopts::Matches) -> OptPartRes<Option<usize>> {
Err(e) => {
return Err(format!(
"argument for --test-threads must be a number > 0 \
- (error: {})",
- e
+ (error: {e})"
));
}
},
@@ -418,8 +416,7 @@ fn get_format(
Some(v) => {
return Err(format!(
"argument for --format must be pretty, terse, json or junit (was \
- {})",
- v
+ {v})"
));
}
};
@@ -436,8 +433,7 @@ fn get_color_config(matches: &getopts::Matches) -> OptPartRes<ColorConfig> {
Some(v) => {
return Err(format!(
"argument for --color must be auto, always, or never (was \
- {})",
- v
+ {v})"
));
}
};
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
index a3c39f71f..24cbe035f 100644
--- a/library/test/src/console.rs
+++ b/library/test/src/console.rs
@@ -147,7 +147,7 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Res
let mut ntest = 0;
let mut nbench = 0;
- for test in filter_tests(&opts, tests).into_iter() {
+ for test in filter_tests(opts, tests).into_iter() {
use crate::TestFn::*;
let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
@@ -244,7 +244,7 @@ fn on_test_event(
let stdout = &completed_test.stdout;
st.write_log_result(test, result, exec_time.as_ref())?;
- out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?;
+ out.write_result(test, result, exec_time.as_ref(), stdout, st)?;
handle_test_result(st, completed_test);
}
}
@@ -262,7 +262,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
let max_name_len = tests
.iter()
- .max_by_key(|t| len_if_padded(*t))
+ .max_by_key(|t| len_if_padded(t))
.map(|t| t.desc.name.as_slice().len())
.unwrap_or(0);
diff --git a/library/test/src/formatters/json.rs b/library/test/src/formatters/json.rs
index c07fdafb1..95d2faf25 100644
--- a/library/test/src/formatters/json.rs
+++ b/library/test/src/formatters/json.rs
@@ -40,20 +40,20 @@ impl<T: Write> JsonFormatter<T> {
extra: Option<&str>,
) -> io::Result<()> {
// A doc test's name includes a filename which must be escaped for correct json.
- self.write_message(&*format!(
+ self.write_message(&format!(
r#"{{ "type": "{}", "name": "{}", "event": "{}""#,
ty,
EscapedString(name),
evt
))?;
if let Some(exec_time) = exec_time {
- self.write_message(&*format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?;
+ self.write_message(&format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?;
}
if let Some(stdout) = stdout {
- self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?;
+ self.write_message(&format!(r#", "stdout": "{}""#, EscapedString(stdout)))?;
}
if let Some(extra) = extra {
- self.write_message(&*format!(r#", {}"#, extra))?;
+ self.write_message(&format!(r#", {extra}"#))?;
}
self.writeln_message(" }")
}
@@ -62,18 +62,17 @@ impl<T: Write> JsonFormatter<T> {
impl<T: Write> OutputFormatter for JsonFormatter<T> {
fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
let shuffle_seed_json = if let Some(shuffle_seed) = shuffle_seed {
- format!(r#", "shuffle_seed": {}"#, shuffle_seed)
+ format!(r#", "shuffle_seed": {shuffle_seed}"#)
} else {
String::new()
};
- self.writeln_message(&*format!(
- r#"{{ "type": "suite", "event": "started", "test_count": {}{} }}"#,
- test_count, shuffle_seed_json
+ self.writeln_message(&format!(
+ r#"{{ "type": "suite", "event": "started", "test_count": {test_count}{shuffle_seed_json} }}"#
))
}
fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
- self.writeln_message(&*format!(
+ self.writeln_message(&format!(
r#"{{ "type": "test", "event": "started", "name": "{}" }}"#,
EscapedString(desc.name.as_slice())
))
@@ -152,20 +151,20 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
mbps
);
- self.writeln_message(&*line)
+ self.writeln_message(&line)
}
}
}
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
- self.writeln_message(&*format!(
+ self.writeln_message(&format!(
r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#,
EscapedString(desc.name.as_slice())
))
}
fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
- self.write_message(&*format!(
+ self.write_message(&format!(
"{{ \"type\": \"suite\", \
\"event\": \"{}\", \
\"passed\": {}, \
diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs
index e6fb4f570..7a40ce33c 100644
--- a/library/test/src/formatters/junit.rs
+++ b/library/test/src/formatters/junit.rs
@@ -64,7 +64,7 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
self.write_message("<testsuites>")?;
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testsuite name=\"test\" package=\"test\" id=\"0\" \
errors=\"0\" \
failures=\"{}\" \
@@ -73,12 +73,12 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
>",
state.failed, state.total, state.ignored
))?;
- for (desc, result, duration) in std::mem::replace(&mut self.results, Vec::new()) {
+ for (desc, result, duration) in std::mem::take(&mut self.results) {
let (class_name, test_name) = parse_class_name(&desc);
match result {
TestResult::TrIgnored => { /* no-op */ }
TestResult::TrFailed => {
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testcase classname=\"{}\" \
name=\"{}\" time=\"{}\">",
class_name,
@@ -90,19 +90,19 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
}
TestResult::TrFailedMsg(ref m) => {
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testcase classname=\"{}\" \
name=\"{}\" time=\"{}\">",
class_name,
test_name,
duration.as_secs_f64()
))?;
- self.write_message(&*format!("<failure message=\"{m}\" type=\"assert\"/>"))?;
+ self.write_message(&format!("<failure message=\"{m}\" type=\"assert\"/>"))?;
self.write_message("</testcase>")?;
}
TestResult::TrTimedFail => {
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testcase classname=\"{}\" \
name=\"{}\" time=\"{}\">",
class_name,
@@ -114,7 +114,7 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
}
TestResult::TrBench(ref b) => {
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testcase classname=\"benchmark::{}\" \
name=\"{}\" time=\"{}\" />",
class_name, test_name, b.ns_iter_summ.sum
@@ -122,7 +122,7 @@ impl<T: Write> OutputFormatter for JunitFormatter<T> {
}
TestResult::TrOk => {
- self.write_message(&*format!(
+ self.write_message(&format!(
"<testcase classname=\"{}\" \
name=\"{}\" time=\"{}\"/>",
class_name,
diff --git a/library/test/src/formatters/mod.rs b/library/test/src/formatters/mod.rs
index cb8085975..cb67b6491 100644
--- a/library/test/src/formatters/mod.rs
+++ b/library/test/src/formatters/mod.rs
@@ -38,5 +38,5 @@ pub(crate) fn write_stderr_delimiter(test_output: &mut Vec<u8>, test_name: &Test
Some(_) => test_output.push(b'\n'),
None => (),
}
- writeln!(test_output, "---- {} stderr ----", test_name).unwrap();
+ writeln!(test_output, "---- {test_name} stderr ----").unwrap();
}
diff --git a/library/test/src/formatters/pretty.rs b/library/test/src/formatters/pretty.rs
index 694202229..247778e51 100644
--- a/library/test/src/formatters/pretty.rs
+++ b/library/test/src/formatters/pretty.rs
@@ -47,7 +47,7 @@ impl<T: Write> PrettyFormatter<T> {
pub fn write_ignored(&mut self, message: Option<&'static str>) -> io::Result<()> {
if let Some(message) = message {
- self.write_short_result(&format!("ignored, {}", message), term::color::YELLOW)
+ self.write_short_result(&format!("ignored, {message}"), term::color::YELLOW)
} else {
self.write_short_result("ignored", term::color::YELLOW)
}
@@ -134,7 +134,7 @@ impl<T: Write> PrettyFormatter<T> {
let mut results = Vec::new();
let mut stdouts = String::new();
- for &(ref f, ref stdout) in inputs {
+ for (f, stdout) in inputs {
results.push(f.name.to_string());
if !stdout.is_empty() {
stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
@@ -171,9 +171,9 @@ impl<T: Write> PrettyFormatter<T> {
fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
let name = desc.padded_name(self.max_name_len, desc.name.padding());
if let Some(test_mode) = desc.test_mode() {
- self.write_plain(&format!("test {name} - {test_mode} ... "))?;
+ self.write_plain(format!("test {name} - {test_mode} ... "))?;
} else {
- self.write_plain(&format!("test {name} ... "))?;
+ self.write_plain(format!("test {name} ... "))?;
}
Ok(())
@@ -188,7 +188,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
} else {
String::new()
};
- self.write_plain(&format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
+ self.write_plain(format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
}
fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
@@ -221,7 +221,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
TestResult::TrIgnored => self.write_ignored(desc.ignore_message)?,
TestResult::TrBench(ref bs) => {
self.write_bench()?;
- self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?;
+ self.write_plain(format!(": {}", fmt_bench_samples(bs)))?;
}
TestResult::TrTimedFail => self.write_time_failed()?,
}
@@ -231,7 +231,7 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
}
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
- self.write_plain(&format!(
+ self.write_plain(format!(
"test {} has been running for over {} seconds\n",
desc.name,
time::TEST_WARN_TIMEOUT_S
@@ -267,11 +267,11 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
);
- self.write_plain(&s)?;
+ self.write_plain(s)?;
if let Some(ref exec_time) = state.exec_time {
let time_str = format!("; finished in {exec_time}");
- self.write_plain(&time_str)?;
+ self.write_plain(time_str)?;
}
self.write_plain("\n\n")?;
diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs
index 5dace8bae..0837ab169 100644
--- a/library/test/src/formatters/terse.rs
+++ b/library/test/src/formatters/terse.rs
@@ -70,7 +70,7 @@ impl<T: Write> TerseFormatter<T> {
// screen when dealing with line-buffered output (e.g., piping to
// `stamp` in the rust CI).
let out = format!(" {}/{}\n", self.test_count + 1, self.total_test_count);
- self.write_plain(&out)?;
+ self.write_plain(out)?;
}
self.test_count += 1;
@@ -106,7 +106,7 @@ impl<T: Write> TerseFormatter<T> {
self.write_plain("\nsuccesses:\n")?;
let mut successes = Vec::new();
let mut stdouts = String::new();
- for &(ref f, ref stdout) in &state.not_failures {
+ for (f, stdout) in &state.not_failures {
successes.push(f.name.to_string());
if !stdout.is_empty() {
stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
@@ -132,7 +132,7 @@ impl<T: Write> TerseFormatter<T> {
self.write_plain("\nfailures:\n")?;
let mut failures = Vec::new();
let mut fail_out = String::new();
- for &(ref f, ref stdout) in &state.failures {
+ for (f, stdout) in &state.failures {
failures.push(f.name.to_string());
if !stdout.is_empty() {
fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
@@ -157,9 +157,9 @@ impl<T: Write> TerseFormatter<T> {
fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
let name = desc.padded_name(self.max_name_len, desc.name.padding());
if let Some(test_mode) = desc.test_mode() {
- self.write_plain(&format!("test {name} - {test_mode} ... "))?;
+ self.write_plain(format!("test {name} - {test_mode} ... "))?;
} else {
- self.write_plain(&format!("test {name} ... "))?;
+ self.write_plain(format!("test {name} ... "))?;
}
Ok(())
@@ -175,7 +175,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
} else {
String::new()
};
- self.write_plain(&format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
+ self.write_plain(format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
}
fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
@@ -209,13 +209,13 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
self.write_test_name(desc)?;
}
self.write_bench()?;
- self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
+ self.write_plain(format!(": {}\n", fmt_bench_samples(bs)))
}
}
}
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
- self.write_plain(&format!(
+ self.write_plain(format!(
"test {} has been running for over {} seconds\n",
desc.name,
time::TEST_WARN_TIMEOUT_S
@@ -245,11 +245,11 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
state.passed, state.failed, state.ignored, state.measured, state.filtered_out
);
- self.write_plain(&s)?;
+ self.write_plain(s)?;
if let Some(ref exec_time) = state.exec_time {
let time_str = format!("; finished in {exec_time}");
- self.write_plain(&time_str)?;
+ self.write_plain(time_str)?;
}
self.write_plain("\n\n")?;
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
index 256c9e8d1..69fb529d7 100644
--- a/library/test/src/lib.rs
+++ b/library/test/src/lib.rs
@@ -116,7 +116,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Opt
} else {
if !opts.nocapture {
// If we encounter a non-unwinding panic, flush any captured output from the current test,
- // and stop capturing output to ensure that the non-unwinding panic message is visible.
+ // and stop capturing output to ensure that the non-unwinding panic message is visible.
// We also acquire the locks for both output streams to prevent output from other threads
// from interleaving with the panic message or appearing after it.
let builtin_panic_hook = panic::take_hook();
@@ -213,8 +213,7 @@ pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
} else {
Err(format!(
"the test returned a termination value with a non-zero status code \
- ({}) which indicates a failure",
- code
+ ({code}) which indicates a failure"
))
}
}
@@ -750,7 +749,7 @@ fn spawn_test_subprocess(
})() {
Ok(r) => r,
Err(e) => {
- write!(&mut test_output, "Unexpected error: {}", e).unwrap();
+ write!(&mut test_output, "Unexpected error: {e}").unwrap();
TrFailed
}
};
@@ -790,7 +789,7 @@ fn run_test_in_spawned_subprocess(
}
});
let record_result2 = record_result.clone();
- panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
+ panic::set_hook(Box::new(move |info| record_result2(Some(info))));
if let Err(message) = testfn() {
panic!("{}", message);
}
diff --git a/library/test/src/term/terminfo/mod.rs b/library/test/src/term/terminfo/mod.rs
index 355859019..67ba89410 100644
--- a/library/test/src/term/terminfo/mod.rs
+++ b/library/test/src/term/terminfo/mod.rs
@@ -149,7 +149,7 @@ impl<T: Write + Send> Terminal for TerminfoTerminal<T> {
// are there any terminals that have color/attrs and not sgr0?
// Try falling back to sgr, then op
let cmd = match ["sgr0", "sgr", "op"].iter().find_map(|cap| self.ti.strings.get(*cap)) {
- Some(op) => match expand(&op, &[], &mut Variables::new()) {
+ Some(op) => match expand(op, &[], &mut Variables::new()) {
Ok(cmd) => cmd,
Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidData, e)),
},
@@ -180,12 +180,12 @@ impl<T: Write + Send> TerminfoTerminal<T> {
}
fn dim_if_necessary(&self, color: color::Color) -> color::Color {
- if color >= self.num_colors && color >= 8 && color < 16 { color - 8 } else { color }
+ if color >= self.num_colors && (8..16).contains(&color) { color - 8 } else { color }
}
fn apply_cap(&mut self, cmd: &str, params: &[Param]) -> io::Result<bool> {
match self.ti.strings.get(cmd) {
- Some(cmd) => match expand(&cmd, params, &mut Variables::new()) {
+ Some(cmd) => match expand(cmd, params, &mut Variables::new()) {
Ok(s) => self.out.write_all(&s).and(Ok(true)),
Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e)),
},
diff --git a/library/test/src/term/terminfo/parm.rs b/library/test/src/term/terminfo/parm.rs
index 0756c8374..2815f6cfc 100644
--- a/library/test/src/term/terminfo/parm.rs
+++ b/library/test/src/term/terminfo/parm.rs
@@ -282,14 +282,14 @@ pub(crate) fn expand(
);
}
SetVar => {
- if cur >= 'A' && cur <= 'Z' {
+ if cur.is_ascii_uppercase() {
if let Some(arg) = stack.pop() {
let idx = (cur as u8) - b'A';
vars.sta_va[idx as usize] = arg;
} else {
return Err("stack is empty".to_string());
}
- } else if cur >= 'a' && cur <= 'z' {
+ } else if cur.is_ascii_lowercase() {
if let Some(arg) = stack.pop() {
let idx = (cur as u8) - b'a';
vars.dyn_va[idx as usize] = arg;
@@ -301,10 +301,10 @@ pub(crate) fn expand(
}
}
GetVar => {
- if cur >= 'A' && cur <= 'Z' {
+ if cur.is_ascii_uppercase() {
let idx = (cur as u8) - b'A';
stack.push(vars.sta_va[idx as usize].clone());
- } else if cur >= 'a' && cur <= 'z' {
+ } else if cur.is_ascii_lowercase() {
let idx = (cur as u8) - b'a';
stack.push(vars.dyn_va[idx as usize].clone());
} else {
diff --git a/library/test/src/term/terminfo/searcher.rs b/library/test/src/term/terminfo/searcher.rs
index 68e181a68..3e8ccc91a 100644
--- a/library/test/src/term/terminfo/searcher.rs
+++ b/library/test/src/term/terminfo/searcher.rs
@@ -22,7 +22,7 @@ pub(crate) fn get_dbpath_for_term(term: &str) -> Option<PathBuf> {
if let Ok(dirs) = env::var("TERMINFO_DIRS") {
for i in dirs.split(':') {
- if i == "" {
+ if i.is_empty() {
dirs_to_search.push(PathBuf::from("/usr/share/terminfo"));
} else {
dirs_to_search.push(PathBuf::from(i));
@@ -30,7 +30,7 @@ pub(crate) fn get_dbpath_for_term(term: &str) -> Option<PathBuf> {
}
} else {
// Found nothing in TERMINFO_DIRS, use the default paths:
- // According to /etc/terminfo/README, after looking at
+ // According to /etc/terminfo/README, after looking at
// ~/.terminfo, ncurses will search /etc/terminfo, then
// /lib/terminfo, and eventually /usr/share/terminfo.
// On Haiku the database can be found at /boot/system/data/terminfo
@@ -49,7 +49,7 @@ pub(crate) fn get_dbpath_for_term(term: &str) -> Option<PathBuf> {
for mut p in dirs_to_search {
if fs::metadata(&p).is_ok() {
p.push(&first_char.to_string());
- p.push(&term);
+ p.push(term);
if fs::metadata(&p).is_ok() {
return Some(p);
}
diff --git a/library/test/src/test_result.rs b/library/test/src/test_result.rs
index 7f44d6e3d..1da238e3e 100644
--- a/library/test/src/test_result.rs
+++ b/library/test/src/test_result.rs
@@ -33,7 +33,7 @@ pub fn calc_result<'a>(
) -> TestResult {
let result = match (&desc.should_panic, task_result) {
(&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk,
- (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
+ (&ShouldPanic::YesWithMessage(msg), Err(err)) => {
let maybe_panic_str = err
.downcast_ref::<String>()
.map(|e| &**e)
@@ -44,16 +44,15 @@ pub fn calc_result<'a>(
} else if let Some(panic_str) = maybe_panic_str {
TestResult::TrFailedMsg(format!(
r#"panic did not contain expected string
- panic message: `{:?}`,
- expected substring: `{:?}`"#,
- panic_str, msg
+ panic message: `{panic_str:?}`,
+ expected substring: `{msg:?}`"#
))
} else {
TestResult::TrFailedMsg(format!(
r#"expected panic with string value,
found non-string value: `{:?}`
expected substring: `{:?}`"#,
- (**err).type_id(),
+ (*err).type_id(),
msg
))
}
diff --git a/library/test/src/time.rs b/library/test/src/time.rs
index 8c64e5d1b..7fd69d7f7 100644
--- a/library/test/src/time.rs
+++ b/library/test/src/time.rs
@@ -107,16 +107,14 @@ impl TimeThreshold {
let durations_str = env::var(env_var_name).ok()?;
let (warn_str, critical_str) = durations_str.split_once(',').unwrap_or_else(|| {
panic!(
- "Duration variable {} expected to have 2 numbers separated by comma, but got {}",
- env_var_name, durations_str
+ "Duration variable {env_var_name} expected to have 2 numbers separated by comma, but got {durations_str}"
)
});
let parse_u64 = |v| {
u64::from_str(v).unwrap_or_else(|_| {
panic!(
- "Duration value in variable {} is expected to be a number, but got {}",
- env_var_name, v
+ "Duration value in variable {env_var_name} is expected to be a number, but got {v}"
)
})
};
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
index 888afff79..6f2e03309 100644
--- a/library/test/src/types.rs
+++ b/library/test/src/types.rs
@@ -47,7 +47,7 @@ impl TestName {
match *self {
StaticTestName(s) => s,
DynTestName(ref s) => s,
- AlignedTestName(ref s, _) => &*s,
+ AlignedTestName(ref s, _) => s,
}
}
diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs
index 0fa11f376..036a35869 100644
--- a/library/unwind/src/libunwind.rs
+++ b/library/unwind/src/libunwind.rs
@@ -88,7 +88,7 @@ pub type _Unwind_Exception_Cleanup_Fn =
extern "C" fn(unwind_code: _Unwind_Reason_Code, exception: *mut _Unwind_Exception);
// FIXME: The `#[link]` attributes on `extern "C"` block marks those symbols declared in
-// the block are reexported in dylib build of libstd. This is needed when build rustc with
+// the block are reexported in dylib build of std. This is needed when build rustc with
// feature `llvm-libunwind', as no other cdylib will provided those _Unwind_* symbols.
// However the `link` attribute is duplicated multiple times and does not just export symbol,
// a better way to manually export symbol would be another attribute like `#[export]`.