From 64d98f8ee037282c35007b64c2649055c56af1db Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:19:03 +0200 Subject: Merging upstream version 1.68.2+dfsg1. Signed-off-by: Daniel Baumann --- library/std/src/alloc.rs | 2 +- library/std/src/backtrace.rs | 8 +- library/std/src/collections/hash/map/tests.rs | 9 +- library/std/src/collections/hash/set.rs | 2 +- library/std/src/env.rs | 9 +- library/std/src/fs.rs | 12 +- library/std/src/fs/tests.rs | 50 +++- library/std/src/io/buffered/tests.rs | 4 +- library/std/src/io/error/repr_bitpacked.rs | 16 +- library/std/src/io/mod.rs | 10 +- library/std/src/io/stdio.rs | 3 +- library/std/src/lib.rs | 37 ++- library/std/src/macros.rs | 1 + library/std/src/net/ip_addr.rs | 9 + library/std/src/os/fd/mod.rs | 6 +- library/std/src/os/fd/owned.rs | 2 +- library/std/src/os/net/linux_ext/addr.rs | 2 +- library/std/src/os/unix/net/addr.rs | 2 +- library/std/src/panic.rs | 3 + library/std/src/panicking.rs | 12 +- library/std/src/path.rs | 120 ++++++--- library/std/src/personality/dwarf/eh.rs | 31 ++- library/std/src/prelude/v1.rs | 9 +- library/std/src/process.rs | 36 +-- library/std/src/process/tests.rs | 94 +++++++ library/std/src/rt.rs | 6 +- library/std/src/sync/lazy_lock.rs | 10 +- library/std/src/sync/lazy_lock/tests.rs | 6 + library/std/src/sync/mod.rs | 3 + library/std/src/sync/mutex/tests.rs | 2 +- library/std/src/sync/once_lock.rs | 17 +- library/std/src/sync/remutex.rs | 178 +++++++++++++ library/std/src/sync/remutex/tests.rs | 60 +++++ library/std/src/sync/rwlock/tests.rs | 4 +- library/std/src/sys/itron/thread.rs | 27 +- library/std/src/sys/sgx/mod.rs | 1 + library/std/src/sys/sgx/thread.rs | 21 +- library/std/src/sys/sgx/thread_parking.rs | 23 ++ library/std/src/sys/unix/android.rs | 4 +- library/std/src/sys/unix/fs.rs | 154 ++++++----- library/std/src/sys/unix/kernel_copy.rs | 8 +- library/std/src/sys/unix/locks/pthread_condvar.rs | 93 +++---- library/std/src/sys/unix/mod.rs | 23 +- library/std/src/sys/unix/net.rs | 2 +- library/std/src/sys/unix/pipe.rs | 4 + library/std/src/sys/unix/process/process_common.rs | 67 ++++- .../std/src/sys/unix/process/process_fuchsia.rs | 15 +- library/std/src/sys/unix/process/process_unix.rs | 20 +- .../std/src/sys/unix/process/process_unix/tests.rs | 8 +- .../src/sys/unix/process/process_unsupported.rs | 4 + .../std/src/sys/unix/process/process_vxworks.rs | 11 +- library/std/src/sys/unix/stack_overflow.rs | 7 +- library/std/src/sys/unix/thread.rs | 46 ++-- library/std/src/sys/unix/thread_parker/darwin.rs | 131 ---------- library/std/src/sys/unix/thread_parker/mod.rs | 32 --- library/std/src/sys/unix/thread_parker/netbsd.rs | 113 -------- library/std/src/sys/unix/thread_parker/pthread.rs | 271 -------------------- library/std/src/sys/unix/thread_parking/darwin.rs | 131 ++++++++++ library/std/src/sys/unix/thread_parking/mod.rs | 32 +++ library/std/src/sys/unix/thread_parking/netbsd.rs | 52 ++++ library/std/src/sys/unix/thread_parking/pthread.rs | 271 ++++++++++++++++++++ library/std/src/sys/unix/time.rs | 3 + library/std/src/sys/unix/weak.rs | 53 +--- library/std/src/sys/unsupported/mod.rs | 1 + library/std/src/sys/unsupported/once.rs | 89 +++++++ library/std/src/sys/unsupported/pipe.rs | 4 + library/std/src/sys/unsupported/process.rs | 4 + library/std/src/sys/wasi/mod.rs | 2 + library/std/src/sys/wasm/mod.rs | 2 + library/std/src/sys/windows/c.rs | 13 +- library/std/src/sys/windows/mod.rs | 2 +- library/std/src/sys/windows/os.rs | 2 +- library/std/src/sys/windows/pipe.rs | 6 +- library/std/src/sys/windows/process.rs | 5 + library/std/src/sys/windows/rand.rs | 121 ++------- library/std/src/sys/windows/thread.rs | 2 +- library/std/src/sys/windows/thread_parker.rs | 253 ------------------ library/std/src/sys/windows/thread_parking.rs | 253 ++++++++++++++++++ library/std/src/sys_common/backtrace.rs | 6 +- library/std/src/sys_common/io.rs | 3 +- library/std/src/sys_common/mod.rs | 3 +- library/std/src/sys_common/once/generic.rs | 283 --------------------- library/std/src/sys_common/once/mod.rs | 27 +- library/std/src/sys_common/once/queue.rs | 283 +++++++++++++++++++++ library/std/src/sys_common/process.rs | 42 ++- library/std/src/sys_common/remutex.rs | 178 ------------- library/std/src/sys_common/remutex/tests.rs | 60 ----- library/std/src/sys_common/thread_local_key.rs | 25 +- library/std/src/sys_common/thread_parker/futex.rs | 97 ------- .../std/src/sys_common/thread_parker/generic.rs | 125 --------- library/std/src/sys_common/thread_parker/mod.rs | 23 -- .../std/src/sys_common/thread_parker/wait_flag.rs | 102 -------- library/std/src/sys_common/thread_parking/futex.rs | 97 +++++++ .../std/src/sys_common/thread_parking/generic.rs | 125 +++++++++ library/std/src/sys_common/thread_parking/id.rs | 108 ++++++++ library/std/src/sys_common/thread_parking/mod.rs | 29 +++ .../std/src/sys_common/thread_parking/wait_flag.rs | 102 ++++++++ library/std/src/thread/local.rs | 23 +- library/std/src/thread/local/tests.rs | 22 +- library/std/src/thread/mod.rs | 26 +- 100 files changed, 2715 insertions(+), 2235 deletions(-) create mode 100644 library/std/src/sync/remutex.rs create mode 100644 library/std/src/sync/remutex/tests.rs create mode 100644 library/std/src/sys/sgx/thread_parking.rs delete mode 100644 library/std/src/sys/unix/thread_parker/darwin.rs delete mode 100644 library/std/src/sys/unix/thread_parker/mod.rs delete mode 100644 library/std/src/sys/unix/thread_parker/netbsd.rs delete mode 100644 library/std/src/sys/unix/thread_parker/pthread.rs create mode 100644 library/std/src/sys/unix/thread_parking/darwin.rs create mode 100644 library/std/src/sys/unix/thread_parking/mod.rs create mode 100644 library/std/src/sys/unix/thread_parking/netbsd.rs create mode 100644 library/std/src/sys/unix/thread_parking/pthread.rs create mode 100644 library/std/src/sys/unsupported/once.rs delete mode 100644 library/std/src/sys/windows/thread_parker.rs create mode 100644 library/std/src/sys/windows/thread_parking.rs delete mode 100644 library/std/src/sys_common/once/generic.rs create mode 100644 library/std/src/sys_common/once/queue.rs delete mode 100644 library/std/src/sys_common/remutex.rs delete mode 100644 library/std/src/sys_common/remutex/tests.rs delete mode 100644 library/std/src/sys_common/thread_parker/futex.rs delete mode 100644 library/std/src/sys_common/thread_parker/generic.rs delete mode 100644 library/std/src/sys_common/thread_parker/mod.rs delete mode 100644 library/std/src/sys_common/thread_parker/wait_flag.rs create mode 100644 library/std/src/sys_common/thread_parking/futex.rs create mode 100644 library/std/src/sys_common/thread_parking/generic.rs create mode 100644 library/std/src/sys_common/thread_parking/id.rs create mode 100644 library/std/src/sys_common/thread_parking/mod.rs create mode 100644 library/std/src/sys_common/thread_parking/wait_flag.rs (limited to 'library/std/src') diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index 61c1ff578..c5a5991cc 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -338,7 +338,7 @@ fn default_alloc_error_hook(layout: Layout) { #[allow(unused_unsafe)] if unsafe { __rust_alloc_error_handler_should_panic != 0 } { - panic!("memory allocation of {} bytes failed\n", layout.size()); + panic!("memory allocation of {} bytes failed", layout.size()); } else { rtprintpanic!("memory allocation of {} bytes failed\n", layout.size()); } diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 9cb74f951..7543ffadd 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -23,10 +23,10 @@ //! //! ## Platform support //! -//! Not all platforms that libstd compiles for support capturing backtraces. -//! Some platforms simply do nothing when capturing a backtrace. To check -//! whether the platform supports capturing backtraces you can consult the -//! `BacktraceStatus` enum as a result of `Backtrace::status`. +//! Not all platforms that std compiles for support capturing backtraces. Some +//! platforms simply do nothing when capturing a backtrace. To check whether the +//! platform supports capturing backtraces you can consult the `BacktraceStatus` +//! enum as a result of `Backtrace::status`. //! //! Like above with accuracy platform support is done on a best effort basis. //! Sometimes libraries might not be available at runtime or something may go diff --git a/library/std/src/collections/hash/map/tests.rs b/library/std/src/collections/hash/map/tests.rs index 65634f206..6b89518e2 100644 --- a/library/std/src/collections/hash/map/tests.rs +++ b/library/std/src/collections/hash/map/tests.rs @@ -3,7 +3,8 @@ use super::HashMap; use super::RandomState; use crate::assert_matches::assert_matches; use crate::cell::RefCell; -use rand::{thread_rng, Rng}; +use crate::test_helpers::test_rng; +use rand::Rng; use realstd::collections::TryReserveErrorKind::*; // https://github.com/rust-lang/rust/issues/62301 @@ -710,16 +711,16 @@ fn test_entry_take_doesnt_corrupt() { } let mut m = HashMap::new(); - let mut rng = thread_rng(); + let mut rng = test_rng(); // Populate the map with some items. for _ in 0..50 { - let x = rng.gen_range(-10, 10); + let x = rng.gen_range(-10..10); m.insert(x, ()); } for _ in 0..1000 { - let x = rng.gen_range(-10, 10); + let x = rng.gen_range(-10..10); match m.entry(x) { Vacant(_) => {} Occupied(e) => { diff --git a/library/std/src/collections/hash/set.rs b/library/std/src/collections/hash/set.rs index cee884145..b59f89d32 100644 --- a/library/std/src/collections/hash/set.rs +++ b/library/std/src/collections/hash/set.rs @@ -317,7 +317,7 @@ impl HashSet { /// /// let mut set = HashSet::from([1, 2, 3, 4, 5, 6]); /// set.retain(|&k| k % 2 == 0); - /// assert_eq!(set.len(), 3); + /// assert_eq!(set, HashSet::from([2, 4, 6])); /// ``` /// /// # Performance diff --git a/library/std/src/env.rs b/library/std/src/env.rs index 6eb7cbea6..183f9ab3b 100644 --- a/library/std/src/env.rs +++ b/library/std/src/env.rs @@ -570,6 +570,13 @@ impl Error for JoinPathsError { /// /// [msdn]: https://docs.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-getuserprofiledirectorya /// +/// # Deprecation +/// +/// This function is deprecated because the behaviour on Windows is not correct. +/// The 'HOME' environment variable is not standard on Windows, and may not produce +/// desired results; for instance, under Cygwin or Mingw it will return `/home/you` +/// when it should return `C:\Users\you`. +/// /// # Examples /// /// ``` @@ -582,7 +589,7 @@ impl Error for JoinPathsError { /// ``` #[deprecated( since = "1.29.0", - note = "This function's behavior is unexpected and probably not what you want. \ + note = "This function's behavior may be unexpected on Windows. \ Consider using a crate from crates.io instead." )] #[must_use] diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index f357d505f..286ad68fd 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -249,8 +249,9 @@ pub struct DirBuilder { pub fn read>(path: P) -> io::Result> { fn inner(path: &Path) -> io::Result> { let mut file = File::open(path)?; - let mut bytes = Vec::new(); - file.read_to_end(&mut bytes)?; + let size = file.metadata().map(|m| m.len()).unwrap_or(0); + let mut bytes = Vec::with_capacity(size as usize); + io::default_read_to_end(&mut file, &mut bytes)?; Ok(bytes) } inner(path.as_ref()) @@ -288,8 +289,9 @@ pub fn read>(path: P) -> io::Result> { pub fn read_to_string>(path: P) -> io::Result { fn inner(path: &Path) -> io::Result { let mut file = File::open(path)?; - let mut string = String::new(); - file.read_to_string(&mut string)?; + let size = file.metadata().map(|m| m.len()).unwrap_or(0); + let mut string = String::with_capacity(size as usize); + io::default_read_to_string(&mut file, &mut string)?; Ok(string) } inner(path.as_ref()) @@ -1510,7 +1512,7 @@ impl FileType { } /// Tests whether this file type represents a regular file. - /// The result is mutually exclusive to the results of + /// The result is mutually exclusive to the results of /// [`is_dir`] and [`is_symlink`]; only zero or one of these /// tests may pass. /// diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index b8959316d..eb582be01 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -10,7 +10,7 @@ use crate::sys_common::io::test::{tmpdir, TempDir}; use crate::thread; use crate::time::{Duration, Instant}; -use rand::{rngs::StdRng, RngCore, SeedableRng}; +use rand::RngCore; #[cfg(unix)] use crate::os::unix::fs::symlink as symlink_dir; @@ -1181,7 +1181,7 @@ fn _assert_send_sync() { #[test] fn binary_file() { let mut bytes = [0; 1024]; - StdRng::from_entropy().fill_bytes(&mut bytes); + crate::test_helpers::test_rng().fill_bytes(&mut bytes); let tmpdir = tmpdir(); @@ -1194,7 +1194,7 @@ fn binary_file() { #[test] fn write_then_read() { let mut bytes = [0; 1024]; - StdRng::from_entropy().fill_bytes(&mut bytes); + crate::test_helpers::test_rng().fill_bytes(&mut bytes); let tmpdir = tmpdir(); @@ -1551,3 +1551,47 @@ fn hiberfil_sys() { fs::metadata(hiberfil).unwrap(); assert_eq!(true, hiberfil.exists()); } + +/// Test that two different ways of obtaining the FileType give the same result. +/// Cf. https://github.com/rust-lang/rust/issues/104900 +#[test] +fn test_eq_direntry_metadata() { + let tmpdir = tmpdir(); + let file_path = tmpdir.join("file"); + File::create(file_path).unwrap(); + for e in fs::read_dir(tmpdir.path()).unwrap() { + let e = e.unwrap(); + let p = e.path(); + let ft1 = e.file_type().unwrap(); + let ft2 = p.metadata().unwrap().file_type(); + assert_eq!(ft1, ft2); + } +} + +/// Regression test for https://github.com/rust-lang/rust/issues/50619. +#[test] +#[cfg(target_os = "linux")] +fn test_read_dir_infinite_loop() { + use crate::io::ErrorKind; + use crate::process::Command; + + // Create a zombie child process + let Ok(mut child) = Command::new("echo").spawn() else { return }; + + // Make sure the process is (un)dead + match child.kill() { + // InvalidInput means the child already exited + Err(e) if e.kind() != ErrorKind::InvalidInput => return, + _ => {} + } + + // open() on this path will succeed, but readdir() will fail + let id = child.id(); + let path = format!("/proc/{id}/net"); + + // Skip the test if we can't open the directory in the first place + let Ok(dir) = fs::read_dir(path) else { return }; + + // Check for duplicate errors + assert!(dir.filter(|e| e.is_err()).take(2).count() < 2); +} diff --git a/library/std/src/io/buffered/tests.rs b/library/std/src/io/buffered/tests.rs index f4e688eb9..4c1b7d576 100644 --- a/library/std/src/io/buffered/tests.rs +++ b/library/std/src/io/buffered/tests.rs @@ -288,8 +288,8 @@ fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() { let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true }); assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..])); - // The following seek will require two underlying seeks. The first will - // succeed but the second will fail. This should still invalidate the + // The following seek will require two underlying seeks. The first will + // succeed but the second will fail. This should still invalidate the // buffer. assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err()); assert_eq!(reader.buffer().len(), 0); diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 781ae03ad..358148405 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -166,7 +166,7 @@ impl Repr { // `new_unchecked` is safe. let res = Self(unsafe { NonNull::new_unchecked(tagged) }, PhantomData); // quickly smoke-check we encoded the right thing (This generally will - // only run in libstd's tests, unless the user uses -Zbuild-std) + // only run in std's tests, unless the user uses -Zbuild-std) debug_assert!(matches!(res.data(), ErrorData::Custom(_)), "repr(custom) encoding failed"); res } @@ -177,7 +177,7 @@ impl Repr { // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); // quickly smoke-check we encoded the right thing (This generally will - // only run in libstd's tests, unless the user uses -Zbuild-std) + // only run in std's tests, unless the user uses -Zbuild-std) debug_assert!( matches!(res.data(), ErrorData::Os(c) if c == code), "repr(os) encoding failed for {code}" @@ -191,7 +191,7 @@ impl Repr { // Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0. let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); // quickly smoke-check we encoded the right thing (This generally will - // only run in libstd's tests, unless the user uses -Zbuild-std) + // only run in std's tests, unless the user uses -Zbuild-std) debug_assert!( matches!(res.data(), ErrorData::Simple(k) if k == kind), "repr(simple) encoding failed {:?}", @@ -348,7 +348,7 @@ fn kind_from_prim(ek: u32) -> Option { // that our encoding relies on for correctness and soundness. (Some of these are // a bit overly thorough/cautious, admittedly) // -// If any of these are hit on a platform that libstd supports, we should likely +// If any of these are hit on a platform that std supports, we should likely // just use `repr_unpacked.rs` there instead (unless the fix is easy). macro_rules! static_assert { ($condition:expr) => { @@ -374,10 +374,10 @@ static_assert!((TAG_MASK + 1).is_power_of_two()); static_assert!(align_of::() >= TAG_MASK + 1); static_assert!(align_of::() >= TAG_MASK + 1); -static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE_MESSAGE), TAG_SIMPLE_MESSAGE); -static_assert!(@usize_eq: (TAG_MASK & TAG_CUSTOM), TAG_CUSTOM); -static_assert!(@usize_eq: (TAG_MASK & TAG_OS), TAG_OS); -static_assert!(@usize_eq: (TAG_MASK & TAG_SIMPLE), TAG_SIMPLE); +static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE); +static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM); +static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS); +static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE, TAG_SIMPLE); // This is obviously true (`TAG_CUSTOM` is `0b01`), but in `Repr::new_custom` we // offset a pointer by this value, and expect it to both be within the same diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index 23a13523f..de528e853 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -2137,8 +2137,10 @@ pub trait BufRead: Read { } /// Read all bytes until a newline (the `0xA` byte) is reached, and append - /// them to the provided buffer. You do not need to clear the buffer before - /// appending. + /// them to the provided `String` buffer. + /// + /// Previous content of the buffer will be preserved. To avoid appending to + /// the buffer, you need to [`clear`] it first. /// /// This function will read bytes from the underlying stream until the /// newline delimiter (the `0xA` byte) or EOF is found. Once found, all bytes @@ -2151,9 +2153,11 @@ pub trait BufRead: Read { /// /// This function is blocking and should be used carefully: it is possible for /// an attacker to continuously send bytes without ever sending a newline - /// or EOF. + /// or EOF. You can use [`take`] to limit the maximum number of bytes read. /// /// [`Ok(0)`]: Ok + /// [`clear`]: String::clear + /// [`take`]: crate::io::Read::take /// /// # Errors /// diff --git a/library/std/src/io/stdio.rs b/library/std/src/io/stdio.rs index 1141a957d..14bfef4c7 100644 --- a/library/std/src/io/stdio.rs +++ b/library/std/src/io/stdio.rs @@ -10,9 +10,8 @@ use crate::fmt; use crate::fs::File; use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines}; use crate::sync::atomic::{AtomicBool, Ordering}; -use crate::sync::{Arc, Mutex, MutexGuard, OnceLock}; +use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard}; use crate::sys::stdio; -use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; type LocalStream = Arc>>; diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 65d4c3c89..a7e13f5b8 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -14,7 +14,7 @@ //! # How to read this documentation //! //! If you already know the name of what you are looking for, the fastest way to -//! find it is to use the search +//! find it is to use the search //! bar at the top of the page. //! //! Otherwise, you may want to jump to one of these useful sections: @@ -202,7 +202,7 @@ no_global_oom_handling, not(no_global_oom_handling) ))] -// To run libstd tests without x.py without ending up with two copies of libstd, Miri needs to be +// To run std tests without x.py without ending up with two copies of std, Miri needs to be // able to "empty" this crate. See . // rustc itself never sets the feature, so this line has no affect there. #![cfg(any(not(feature = "miri-test-libstd"), test, doctest))] @@ -532,7 +532,7 @@ pub mod process; pub mod sync; pub mod time; -// Pull in `std_float` crate into libstd. The contents of +// Pull in `std_float` crate into std. The contents of // `std_float` are in a different repository: rust-lang/portable-simd. #[path = "../../portable-simd/crates/std_float/src/lib.rs"] #[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)] @@ -602,7 +602,7 @@ mod personality; #[allow(dead_code, unused_attributes, fuzzy_provenance_casts)] mod backtrace_rs; -// Re-export macros defined in libcore. +// Re-export macros defined in core. #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated, deprecated_in_future)] pub use core::{ @@ -610,7 +610,7 @@ pub use core::{ unimplemented, unreachable, write, writeln, }; -// Re-export built-in macros defined through libcore. +// Re-export built-in macros defined through core. #[stable(feature = "builtin_macro_prelude", since = "1.38.0")] #[allow(deprecated)] pub use core::{ @@ -652,3 +652,30 @@ mod sealed { #[unstable(feature = "sealed", issue = "none")] pub trait Sealed {} } + +#[cfg(test)] +#[allow(dead_code)] // Not used in all configurations. +pub(crate) mod test_helpers { + /// Test-only replacement for `rand::thread_rng()`, which is unusable for + /// us, as we want to allow running stdlib tests on tier-3 targets which may + /// not have `getrandom` support. + /// + /// Does a bit of a song and dance to ensure that the seed is different on + /// each call (as some tests sadly rely on this), but doesn't try that hard. + /// + /// This is duplicated in the `core`, `alloc` test suites (as well as + /// `std`'s integration tests), but figuring out a mechanism to share these + /// seems far more painful than copy-pasting a 7 line function a couple + /// times, given that even under a perma-unstable feature, I don't think we + /// want to expose types from `rand` from `std`. + #[track_caller] + pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng { + use core::hash::{BuildHasher, Hash, Hasher}; + let mut hasher = crate::collections::hash_map::RandomState::new().build_hasher(); + core::panic::Location::caller().hash(&mut hasher); + let hc64 = hasher.finish(); + let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::>(); + let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap(); + rand::SeedableRng::from_seed(seed) + } +} diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs index 6e4ba1404..fcc5cfafd 100644 --- a/library/std/src/macros.rs +++ b/library/std/src/macros.rs @@ -3,6 +3,7 @@ //! This module contains a set of macros which are exported from the standard //! library. Each macro is available for use when linking against the standard //! library. +// ignore-tidy-dbg #[doc = include_str!("../../core/src/macros/panic.md")] #[macro_export] diff --git a/library/std/src/net/ip_addr.rs b/library/std/src/net/ip_addr.rs index 5453853e1..07f08c1b5 100644 --- a/library/std/src/net/ip_addr.rs +++ b/library/std/src/net/ip_addr.rs @@ -1195,6 +1195,9 @@ impl Ipv6Addr { /// An IPv6 address representing localhost: `::1`. /// + /// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other + /// languages. + /// /// # Examples /// /// ``` @@ -1203,11 +1206,15 @@ impl Ipv6Addr { /// let addr = Ipv6Addr::LOCALHOST; /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); /// ``` + #[doc(alias = "IN6ADDR_LOOPBACK_INIT")] + #[doc(alias = "in6addr_loopback")] #[stable(feature = "ip_constructors", since = "1.30.0")] pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); /// An IPv6 address representing the unspecified address: `::` /// + /// This corresponds to constant `IN6ADDR_ANY_INIT` or `in6addr_any` in other languages. + /// /// # Examples /// /// ``` @@ -1216,6 +1223,8 @@ impl Ipv6Addr { /// let addr = Ipv6Addr::UNSPECIFIED; /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); /// ``` + #[doc(alias = "IN6ADDR_ANY_INIT")] + #[doc(alias = "in6addr_any")] #[stable(feature = "ip_constructors", since = "1.30.0")] pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); diff --git a/library/std/src/os/fd/mod.rs b/library/std/src/os/fd/mod.rs index c6aa7c77d..35de4860f 100644 --- a/library/std/src/os/fd/mod.rs +++ b/library/std/src/os/fd/mod.rs @@ -3,7 +3,7 @@ //! This module is supported on Unix platforms and WASI, which both use a //! similar file descriptor system for referencing OS resources. -#![stable(feature = "io_safety", since = "1.63.0")] +#![stable(feature = "os_fd", since = "1.66.0")] #![deny(unsafe_op_in_unsafe_fn)] // `RawFd`, `AsRawFd`, etc. @@ -19,7 +19,7 @@ mod net; mod tests; // Export the types and traits for the public API. -#[unstable(feature = "os_fd", issue = "98699")] +#[stable(feature = "os_fd", since = "1.66.0")] pub use owned::*; -#[unstable(feature = "os_fd", issue = "98699")] +#[stable(feature = "os_fd", since = "1.66.0")] pub use raw::*; diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs index c16518577..c41e093a7 100644 --- a/library/std/src/os/fd/owned.rs +++ b/library/std/src/os/fd/owned.rs @@ -100,7 +100,7 @@ impl BorrowedFd<'_> { // For ESP-IDF, F_DUPFD is used instead, because the CLOEXEC semantics // will never be supported, as this is a bare metal framework with - // no capabilities for multi-process execution. While F_DUPFD is also + // no capabilities for multi-process execution. While F_DUPFD is also // not supported yet, it might be (currently it returns ENOSYS). #[cfg(target_os = "espidf")] let cmd = libc::F_DUPFD; diff --git a/library/std/src/os/net/linux_ext/addr.rs b/library/std/src/os/net/linux_ext/addr.rs index df3fc8e6a..85065984f 100644 --- a/library/std/src/os/net/linux_ext/addr.rs +++ b/library/std/src/os/net/linux_ext/addr.rs @@ -38,7 +38,7 @@ pub trait SocketAddrExt: Sealed { /// Ok(()) /// } /// ``` - fn from_abstract_name(name: &N) -> crate::io::Result + fn from_abstract_name(name: N) -> crate::io::Result where N: AsRef<[u8]>; diff --git a/library/std/src/os/unix/net/addr.rs b/library/std/src/os/unix/net/addr.rs index 81ac829d2..ece2b33bd 100644 --- a/library/std/src/os/unix/net/addr.rs +++ b/library/std/src/os/unix/net/addr.rs @@ -256,7 +256,7 @@ impl linux_ext::addr::SocketAddrExt for SocketAddr { if let AddressKind::Abstract(name) = self.address() { Some(name) } else { None } } - fn from_abstract_name(name: &N) -> crate::io::Result + fn from_abstract_name(name: N) -> crate::io::Result where N: AsRef<[u8]>, { diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs index c4f022de0..9fa8f5702 100644 --- a/library/std/src/panic.rs +++ b/library/std/src/panic.rs @@ -114,6 +114,9 @@ where /// aborting the process as well. This function *only* catches unwinding panics, /// not those that abort the process. /// +/// Note that if a custom panic hook has been set, it will be invoked before +/// the panic is caught, before unwinding. +/// /// Also note that unwinding into Rust code with a foreign exception (e.g. /// an exception thrown from C++ code) is undefined behavior. /// diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 1039835bb..b0db3112e 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -306,11 +306,11 @@ pub mod panic_count { // and after increase and decrease, but not necessarily during their execution. // // Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG) - // records whether panic::always_abort() has been called. This can only be + // records whether panic::always_abort() has been called. This can only be // set, never cleared. // panic::always_abort() is usually called to prevent memory allocations done by // the panic handling in the child created by `libc::fork`. - // Memory allocations performed in a child created with `libc::fork` are undefined + // Memory allocations performed in a child created with `libc::fork` are undefined // behavior in most operating systems. // Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory // allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is @@ -517,7 +517,7 @@ pub fn panicking() -> bool { !panic_count::count_is_zero() } -/// Entry point of panics from the libcore crate (`panic_impl` lang item). +/// Entry point of panics from the core crate (`panic_impl` lang item). #[cfg(not(test))] #[panic_handler] pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! { @@ -699,7 +699,11 @@ fn rust_panic_with_hook( // have limited options. Currently our preference is to // just abort. In the future we may consider resuming // unwinding or otherwise exiting the thread cleanly. - rtprintpanic!("thread panicked while panicking. aborting.\n"); + if !can_unwind { + rtprintpanic!("thread caused non-unwinding panic. aborting.\n"); + } else { + rtprintpanic!("thread panicked while panicking. aborting.\n"); + } crate::sys::abort_internal(); } diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 6c957c2fa..4778114b4 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -271,7 +271,7 @@ pub const MAIN_SEPARATOR: char = crate::sys::path::MAIN_SEP; /// The primary separator of path components for the current platform. /// /// For example, `/` on Unix and `\` on Windows. -#[unstable(feature = "main_separator_str", issue = "94071")] +#[stable(feature = "main_separator_str", since = "1.68.0")] pub const MAIN_SEPARATOR_STR: &str = crate::sys::path::MAIN_SEP_STR; //////////////////////////////////////////////////////////////////////////////// @@ -306,7 +306,7 @@ unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr { // This casts are safe as OsStr is internally a wrapper around [u8] on all // platforms. // - // Note that currently this relies on the special knowledge that libstd has; + // Note that currently this relies on the special knowledge that std has; // these types are single-element structs but are not marked // repr(transparent) or repr(C) which would make these casts not allowable // outside std. @@ -607,7 +607,7 @@ pub struct Components<'a> { // true if path *physically* has a root separator; for most Windows // prefixes, it may have a "logical" root separator for the purposes of - // normalization, e.g., \\server\share == \\server\share\. + // normalization, e.g., \\server\share == \\server\share\. has_physical_root: bool, // The iterator is double-ended, and these two states keep track of what has @@ -1246,6 +1246,9 @@ impl PathBuf { /// and `path` is not empty, the new path is normalized: all references /// to `.` and `..` are removed. /// + /// Consider using [`Path::join`] if you need a new `PathBuf` instead of + /// using this function on a cloned `PathBuf`. + /// /// # Examples /// /// Pushing a relative path extends the existing path: @@ -1411,7 +1414,8 @@ impl PathBuf { self.push(file_name); } - /// Updates [`self.extension`] to `extension`. + /// Updates [`self.extension`] to `Some(extension)` or to `None` if + /// `extension` is empty. /// /// Returns `false` and does nothing if [`self.file_name`] is [`None`], /// returns `true` and updates the extension otherwise. @@ -1419,6 +1423,20 @@ impl PathBuf { /// If [`self.extension`] is [`None`], the extension is added; otherwise /// it is replaced. /// + /// If `extension` is the empty string, [`self.extension`] will be [`None`] + /// afterwards, not `Some("")`. + /// + /// # Caveats + /// + /// The new `extension` may contain dots and will be used in its entirety, + /// but only the part after the final dot will be reflected in + /// [`self.extension`]. + /// + /// If the file stem contains internal dots and `extension` is empty, part + /// of the old file stem will be considered the new [`self.extension`]. + /// + /// See the examples below. + /// /// [`self.file_name`]: Path::file_name /// [`self.extension`]: Path::extension /// @@ -1432,8 +1450,20 @@ impl PathBuf { /// p.set_extension("force"); /// assert_eq!(Path::new("/feel/the.force"), p.as_path()); /// - /// p.set_extension("dark_side"); - /// assert_eq!(Path::new("/feel/the.dark_side"), p.as_path()); + /// p.set_extension("dark.side"); + /// assert_eq!(Path::new("/feel/the.dark.side"), p.as_path()); + /// + /// p.set_extension("cookie"); + /// assert_eq!(Path::new("/feel/the.dark.cookie"), p.as_path()); + /// + /// p.set_extension(""); + /// assert_eq!(Path::new("/feel/the.dark"), p.as_path()); + /// + /// p.set_extension(""); + /// assert_eq!(Path::new("/feel/the"), p.as_path()); + /// + /// p.set_extension(""); + /// assert_eq!(Path::new("/feel/the"), p.as_path()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn set_extension>(&mut self, extension: S) -> bool { @@ -1748,6 +1778,14 @@ impl ops::Deref for PathBuf { } } +#[stable(feature = "path_buf_deref_mut", since = "1.68.0")] +impl ops::DerefMut for PathBuf { + #[inline] + fn deref_mut(&mut self) -> &mut Path { + Path::from_inner_mut(&mut self.inner) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Borrow for PathBuf { #[inline] @@ -2000,6 +2038,12 @@ impl Path { unsafe { &*(s.as_ref() as *const OsStr as *const Path) } } + fn from_inner_mut(inner: &mut OsStr) -> &mut Path { + // SAFETY: Path is just a wrapper around OsStr, + // therefore converting &mut OsStr to &mut Path is safe. + unsafe { &mut *(inner as *mut OsStr as *mut Path) } + } + /// Yields the underlying [`OsStr`] slice. /// /// # Examples @@ -2025,12 +2069,12 @@ impl Path { /// #![feature(path_as_mut_os_str)] /// use std::path::{Path, PathBuf}; /// - /// let mut path = PathBuf::from("/Foo.TXT").into_boxed_path(); + /// let mut path = PathBuf::from("Foo.TXT"); /// - /// assert_ne!(&*path, Path::new("/foo.txt")); + /// assert_ne!(path, Path::new("foo.txt")); /// /// path.as_mut_os_str().make_ascii_lowercase(); - /// assert_eq!(&*path, Path::new("/foo.txt")); + /// assert_eq!(path, Path::new("foo.txt")); /// ``` #[unstable(feature = "path_as_mut_os_str", issue = "105021")] #[must_use] @@ -3133,9 +3177,9 @@ impl<'a> IntoIterator for &'a Path { } macro_rules! impl_cmp { - ($lhs:ty, $rhs: ty) => { + (<$($life:lifetime),*> $lhs:ty, $rhs: ty) => { #[stable(feature = "partialeq_path", since = "1.6.0")] - impl<'a, 'b> PartialEq<$rhs> for $lhs { + impl<$($life),*> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { ::eq(self, other) @@ -3143,7 +3187,7 @@ macro_rules! impl_cmp { } #[stable(feature = "partialeq_path", since = "1.6.0")] - impl<'a, 'b> PartialEq<$lhs> for $rhs { + impl<$($life),*> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { ::eq(self, other) @@ -3151,7 +3195,7 @@ macro_rules! impl_cmp { } #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialOrd<$rhs> for $lhs { + impl<$($life),*> PartialOrd<$rhs> for $lhs { #[inline] fn partial_cmp(&self, other: &$rhs) -> Option { ::partial_cmp(self, other) @@ -3159,7 +3203,7 @@ macro_rules! impl_cmp { } #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialOrd<$lhs> for $rhs { + impl<$($life),*> PartialOrd<$lhs> for $rhs { #[inline] fn partial_cmp(&self, other: &$lhs) -> Option { ::partial_cmp(self, other) @@ -3168,16 +3212,16 @@ macro_rules! impl_cmp { }; } -impl_cmp!(PathBuf, Path); -impl_cmp!(PathBuf, &'a Path); -impl_cmp!(Cow<'a, Path>, Path); -impl_cmp!(Cow<'a, Path>, &'b Path); -impl_cmp!(Cow<'a, Path>, PathBuf); +impl_cmp!(<> PathBuf, Path); +impl_cmp!(<'a> PathBuf, &'a Path); +impl_cmp!(<'a> Cow<'a, Path>, Path); +impl_cmp!(<'a, 'b> Cow<'a, Path>, &'b Path); +impl_cmp!(<'a> Cow<'a, Path>, PathBuf); macro_rules! impl_cmp_os_str { - ($lhs:ty, $rhs: ty) => { + (<$($life:lifetime),*> $lhs:ty, $rhs: ty) => { #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialEq<$rhs> for $lhs { + impl<$($life),*> PartialEq<$rhs> for $lhs { #[inline] fn eq(&self, other: &$rhs) -> bool { ::eq(self, other.as_ref()) @@ -3185,7 +3229,7 @@ macro_rules! impl_cmp_os_str { } #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialEq<$lhs> for $rhs { + impl<$($life),*> PartialEq<$lhs> for $rhs { #[inline] fn eq(&self, other: &$lhs) -> bool { ::eq(self.as_ref(), other) @@ -3193,7 +3237,7 @@ macro_rules! impl_cmp_os_str { } #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialOrd<$rhs> for $lhs { + impl<$($life),*> PartialOrd<$rhs> for $lhs { #[inline] fn partial_cmp(&self, other: &$rhs) -> Option { ::partial_cmp(self, other.as_ref()) @@ -3201,7 +3245,7 @@ macro_rules! impl_cmp_os_str { } #[stable(feature = "cmp_path", since = "1.8.0")] - impl<'a, 'b> PartialOrd<$lhs> for $rhs { + impl<$($life),*> PartialOrd<$lhs> for $rhs { #[inline] fn partial_cmp(&self, other: &$lhs) -> Option { ::partial_cmp(self.as_ref(), other) @@ -3210,20 +3254,20 @@ macro_rules! impl_cmp_os_str { }; } -impl_cmp_os_str!(PathBuf, OsStr); -impl_cmp_os_str!(PathBuf, &'a OsStr); -impl_cmp_os_str!(PathBuf, Cow<'a, OsStr>); -impl_cmp_os_str!(PathBuf, OsString); -impl_cmp_os_str!(Path, OsStr); -impl_cmp_os_str!(Path, &'a OsStr); -impl_cmp_os_str!(Path, Cow<'a, OsStr>); -impl_cmp_os_str!(Path, OsString); -impl_cmp_os_str!(&'a Path, OsStr); -impl_cmp_os_str!(&'a Path, Cow<'b, OsStr>); -impl_cmp_os_str!(&'a Path, OsString); -impl_cmp_os_str!(Cow<'a, Path>, OsStr); -impl_cmp_os_str!(Cow<'a, Path>, &'b OsStr); -impl_cmp_os_str!(Cow<'a, Path>, OsString); +impl_cmp_os_str!(<> PathBuf, OsStr); +impl_cmp_os_str!(<'a> PathBuf, &'a OsStr); +impl_cmp_os_str!(<'a> PathBuf, Cow<'a, OsStr>); +impl_cmp_os_str!(<> PathBuf, OsString); +impl_cmp_os_str!(<> Path, OsStr); +impl_cmp_os_str!(<'a> Path, &'a OsStr); +impl_cmp_os_str!(<'a> Path, Cow<'a, OsStr>); +impl_cmp_os_str!(<> Path, OsString); +impl_cmp_os_str!(<'a> &'a Path, OsStr); +impl_cmp_os_str!(<'a, 'b> &'a Path, Cow<'b, OsStr>); +impl_cmp_os_str!(<'a> &'a Path, OsString); +impl_cmp_os_str!(<'a> Cow<'a, Path>, OsStr); +impl_cmp_os_str!(<'a, 'b> Cow<'a, Path>, &'b OsStr); +impl_cmp_os_str!(<'a> Cow<'a, Path>, OsString); #[stable(since = "1.7.0", feature = "strip_prefix")] impl fmt::Display for StripPrefixError { diff --git a/library/std/src/personality/dwarf/eh.rs b/library/std/src/personality/dwarf/eh.rs index a783e1870..87585a8fc 100644 --- a/library/std/src/personality/dwarf/eh.rs +++ b/library/std/src/personality/dwarf/eh.rs @@ -84,7 +84,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding)?; let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding)?; let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding)?; - let cs_action = reader.read_uleb128(); + let cs_action_entry = reader.read_uleb128(); // Callsite table is sorted by cs_start, so if we've passed the ip, we // may stop searching. if ip < func_start + cs_start { @@ -95,7 +95,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result return Ok(EHAction::None); } else { let lpad = lpad_base + cs_lpad; - return Ok(interpret_cs_action(cs_action, lpad)); + return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad)); } } } @@ -113,26 +113,39 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result let mut idx = ip; loop { let cs_lpad = reader.read_uleb128(); - let cs_action = reader.read_uleb128(); + let cs_action_entry = reader.read_uleb128(); idx -= 1; if idx == 0 { // Can never have null landing pad for sjlj -- that would have // been indicated by a -1 call site index. let lpad = (cs_lpad + 1) as usize; - return Ok(interpret_cs_action(cs_action, lpad)); + return Ok(interpret_cs_action(action_table as *mut u8, cs_action_entry, lpad)); } } } } -fn interpret_cs_action(cs_action: u64, lpad: usize) -> EHAction { - if cs_action == 0 { - // If cs_action is 0 then this is a cleanup (Drop::drop). We run these +unsafe fn interpret_cs_action( + action_table: *mut u8, + cs_action_entry: u64, + lpad: usize, +) -> EHAction { + if cs_action_entry == 0 { + // If cs_action_entry is 0 then this is a cleanup (Drop::drop). We run these // for both Rust panics and foreign exceptions. EHAction::Cleanup(lpad) } else { - // Stop unwinding Rust panics at catch_unwind. - EHAction::Catch(lpad) + // If lpad != 0 and cs_action_entry != 0, we have to check ttype_index. + // If ttype_index == 0 under the condition, we take cleanup action. + let action_record = (action_table as *mut u8).offset(cs_action_entry as isize - 1); + let mut action_reader = DwarfReader::new(action_record); + let ttype_index = action_reader.read_sleb128(); + if ttype_index == 0 { + EHAction::Cleanup(lpad) + } else { + // Stop unwinding Rust panics at catch_unwind. + EHAction::Catch(lpad) + } } } diff --git a/library/std/src/prelude/v1.rs b/library/std/src/prelude/v1.rs index a5a798078..2aefd7c51 100644 --- a/library/std/src/prelude/v1.rs +++ b/library/std/src/prelude/v1.rs @@ -59,14 +59,12 @@ pub use core::prelude::v1::{RustcDecodable, RustcEncodable}; // Do not `doc(no_inline)` so that they become doc items on their own // (no public module for them to be re-exported from). -#[cfg(not(bootstrap))] #[stable(feature = "builtin_macro_prelude", since = "1.38.0")] -pub use core::prelude::v1::alloc_error_handler; -#[stable(feature = "builtin_macro_prelude", since = "1.38.0")] -pub use core::prelude::v1::{bench, derive, global_allocator, test, test_case}; +pub use core::prelude::v1::{ + alloc_error_handler, bench, derive, global_allocator, test, test_case, +}; #[unstable(feature = "derive_const", issue = "none")] -#[cfg(not(bootstrap))] pub use core::prelude::v1::derive_const; // Do not `doc(no_inline)` either. @@ -91,7 +89,6 @@ pub use core::prelude::v1::cfg_eval; issue = "23416", reason = "placeholder syntax for type ascription" )] -#[cfg(not(bootstrap))] pub use core::prelude::v1::type_ascribe; // The file so far is equivalent to src/libcore/prelude/v1.rs, diff --git a/library/std/src/process.rs b/library/std/src/process.rs index 400d25beb..62ce2cb33 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -362,6 +362,10 @@ impl Read for ChildStdout { fn is_read_vectored(&self) -> bool { self.inner.is_read_vectored() } + + fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { + self.inner.read_to_end(buf) + } } impl AsInner for ChildStdout { @@ -907,10 +911,8 @@ impl Command { /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn output(&mut self) -> io::Result { - self.inner - .spawn(imp::Stdio::MakePipe, false) - .map(Child::from_inner) - .and_then(|p| p.wait_with_output()) + let (status, stdout, stderr) = self.inner.output()?; + Ok(Output { status: ExitStatus(status), stdout, stderr }) } /// Executes a command as a child process, waiting for it to finish and @@ -1036,6 +1038,15 @@ impl fmt::Debug for Command { /// Format the program and arguments of a Command for display. Any /// non-utf8 data is lossily converted using the utf8 replacement /// character. + /// + /// The default format approximates a shell invocation of the program along with its + /// arguments. It does not include most of the other command properties. The output is not guaranteed to work + /// (e.g. due to lack of shell-escaping or differences in path resolution) + /// On some platforms you can use [the alternate syntax] to show more fields. + /// + /// Note that the debug implementation is platform-specific. + /// + /// [the alternate syntax]: fmt#sign0 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(f) } @@ -2153,18 +2164,11 @@ pub fn id() -> u32 { /// to provide similar functionality. #[cfg_attr(not(test), lang = "termination")] #[stable(feature = "termination_trait_lib", since = "1.61.0")] -#[rustc_on_unimplemented( - on( - all(not(bootstrap), cause = "MainFunctionType"), - message = "`main` has invalid return type `{Self}`", - label = "`main` can only return types that implement `{Termination}`" - ), - on( - bootstrap, - message = "`main` has invalid return type `{Self}`", - label = "`main` can only return types that implement `{Termination}`" - ) -)] +#[rustc_on_unimplemented(on( + cause = "MainFunctionType", + message = "`main` has invalid return type `{Self}`", + label = "`main` can only return types that implement `{Termination}`" +))] pub trait Termination { /// Is called to get the representation of the value as status code. /// This status code is returned to the operating system. diff --git a/library/std/src/process/tests.rs b/library/std/src/process/tests.rs index 955ad6891..b4f6cc2da 100644 --- a/library/std/src/process/tests.rs +++ b/library/std/src/process/tests.rs @@ -417,6 +417,100 @@ fn env_empty() { assert!(p.is_ok()); } +#[test] +#[cfg(not(windows))] +#[cfg_attr(any(target_os = "emscripten", target_env = "sgx"), ignore)] +fn main() { + const PIDFD: &'static str = + if cfg!(target_os = "linux") { " create_pidfd: false,\n" } else { "" }; + + let mut command = Command::new("some-boring-name"); + + assert_eq!(format!("{command:?}"), format!(r#""some-boring-name""#)); + + assert_eq!( + format!("{command:#?}"), + format!( + r#"Command {{ + program: "some-boring-name", + args: [ + "some-boring-name", + ], +{PIDFD}}}"# + ) + ); + + command.args(&["1", "2", "3"]); + + assert_eq!(format!("{command:?}"), format!(r#""some-boring-name" "1" "2" "3""#)); + + assert_eq!( + format!("{command:#?}"), + format!( + r#"Command {{ + program: "some-boring-name", + args: [ + "some-boring-name", + "1", + "2", + "3", + ], +{PIDFD}}}"# + ) + ); + + crate::os::unix::process::CommandExt::arg0(&mut command, "exciting-name"); + + assert_eq!( + format!("{command:?}"), + format!(r#"["some-boring-name"] "exciting-name" "1" "2" "3""#) + ); + + assert_eq!( + format!("{command:#?}"), + format!( + r#"Command {{ + program: "some-boring-name", + args: [ + "exciting-name", + "1", + "2", + "3", + ], +{PIDFD}}}"# + ) + ); + + let mut command_with_env_and_cwd = Command::new("boring-name"); + command_with_env_and_cwd.current_dir("/some/path").env("FOO", "bar"); + assert_eq!( + format!("{command_with_env_and_cwd:?}"), + r#"cd "/some/path" && FOO="bar" "boring-name""# + ); + assert_eq!( + format!("{command_with_env_and_cwd:#?}"), + format!( + r#"Command {{ + program: "boring-name", + args: [ + "boring-name", + ], + env: CommandEnv {{ + clear: false, + vars: {{ + "FOO": Some( + "bar", + ), + }}, + }}, + cwd: Some( + "/some/path", + ), +{PIDFD}}}"# + ) + ); +} + // See issue #91991 #[test] #[cfg(windows)] diff --git a/library/std/src/rt.rs b/library/std/src/rt.rs index 9c2f0c1dd..f1eeb75be 100644 --- a/library/std/src/rt.rs +++ b/library/std/src/rt.rs @@ -139,9 +139,9 @@ fn lang_start_internal( // mechanism itself. // // There are a couple of instances where unwinding can begin. First is inside of the - // `rt::init`, `rt::cleanup` and similar functions controlled by libstd. In those instances a - // panic is a libstd implementation bug. A quite likely one too, as there isn't any way to - // prevent libstd from accidentally introducing a panic to these functions. Another is from + // `rt::init`, `rt::cleanup` and similar functions controlled by bstd. In those instances a + // panic is a std implementation bug. A quite likely one too, as there isn't any way to + // prevent std from accidentally introducing a panic to these functions. Another is from // user code from `main` or, more nefariously, as described in e.g. issue #86030. // SAFETY: Only called once during runtime initialization. panic::catch_unwind(move || unsafe { init(argc, argv, sigpipe) }).map_err(rt_abort)?; diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs index c8d3289ca..4a1530530 100644 --- a/library/std/src/sync/lazy_lock.rs +++ b/library/std/src/sync/lazy_lock.rs @@ -46,17 +46,15 @@ pub struct LazyLock T> { cell: OnceLock, init: Cell>, } - -impl LazyLock { +impl T> LazyLock { /// Creates a new lazy value with the given initializing /// function. + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub const fn new(f: F) -> LazyLock { LazyLock { cell: OnceLock::new(), init: Cell::new(Some(f)) } } -} -impl T> LazyLock { /// Forces the evaluation of this lazy value and /// returns a reference to result. This is equivalent /// to the `Deref` impl, but is explicit. @@ -73,6 +71,7 @@ impl T> LazyLock { /// assert_eq!(LazyLock::force(&lazy), &92); /// assert_eq!(&*lazy, &92); /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn force(this: &LazyLock) -> &T { this.cell.get_or_init(|| match this.init.take() { @@ -85,6 +84,8 @@ impl T> LazyLock { #[unstable(feature = "once_cell", issue = "74465")] impl T> Deref for LazyLock { type Target = T; + + #[inline] fn deref(&self) -> &T { LazyLock::force(self) } @@ -93,6 +94,7 @@ impl T> Deref for LazyLock { #[unstable(feature = "once_cell", issue = "74465")] impl Default for LazyLock { /// Creates a new lazy value using `Default` as the initializing function. + #[inline] fn default() -> LazyLock { LazyLock::new(T::default) } diff --git a/library/std/src/sync/lazy_lock/tests.rs b/library/std/src/sync/lazy_lock/tests.rs index f11b66bfc..a5d4e25c5 100644 --- a/library/std/src/sync/lazy_lock/tests.rs +++ b/library/std/src/sync/lazy_lock/tests.rs @@ -136,6 +136,12 @@ fn sync_lazy_poisoning() { } } +// Check that we can infer `T` from closure's type. +#[test] +fn lazy_type_inference() { + let _ = LazyCell::new(|| ()); +} + #[test] fn is_sync_send() { fn assert_traits() {} diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs index 4fee8d3e9..ba20bab87 100644 --- a/library/std/src/sync/mod.rs +++ b/library/std/src/sync/mod.rs @@ -177,6 +177,8 @@ pub use self::lazy_lock::LazyLock; #[unstable(feature = "once_cell", issue = "74465")] pub use self::once_lock::OnceLock; +pub(crate) use self::remutex::{ReentrantMutex, ReentrantMutexGuard}; + pub mod mpsc; mod barrier; @@ -187,4 +189,5 @@ mod mutex; mod once; mod once_lock; mod poison; +mod remutex; mod rwlock; diff --git a/library/std/src/sync/mutex/tests.rs b/library/std/src/sync/mutex/tests.rs index 93900566f..1786a3c09 100644 --- a/library/std/src/sync/mutex/tests.rs +++ b/library/std/src/sync/mutex/tests.rs @@ -181,7 +181,7 @@ fn test_mutex_arc_poison() { let arc2 = arc.clone(); let _ = thread::spawn(move || { let lock = arc2.lock().unwrap(); - assert_eq!(*lock, 2); + assert_eq!(*lock, 2); // deliberate assertion failure to poison the mutex }) .join(); assert!(arc.lock().is_err()); diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs index 16d1fd2a5..ed339ca5d 100644 --- a/library/std/src/sync/once_lock.rs +++ b/library/std/src/sync/once_lock.rs @@ -61,8 +61,9 @@ pub struct OnceLock { impl OnceLock { /// Creates a new empty cell. - #[unstable(feature = "once_cell", issue = "74465")] + #[inline] #[must_use] + #[unstable(feature = "once_cell", issue = "74465")] pub const fn new() -> OnceLock { OnceLock { once: Once::new(), @@ -75,6 +76,7 @@ impl OnceLock { /// /// Returns `None` if the cell is empty, or being initialized. This /// method never blocks. + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn get(&self) -> Option<&T> { if self.is_initialized() { @@ -88,6 +90,7 @@ impl OnceLock { /// Gets the mutable reference to the underlying value. /// /// Returns `None` if the cell is empty. This method never blocks. + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn get_mut(&mut self) -> Option<&mut T> { if self.is_initialized() { @@ -125,6 +128,7 @@ impl OnceLock { /// assert_eq!(CELL.get(), Some(&92)); /// } /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn set(&self, value: T) -> Result<(), T> { let mut value = Some(value); @@ -164,6 +168,7 @@ impl OnceLock { /// let value = cell.get_or_init(|| unreachable!()); /// assert_eq!(value, &92); /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn get_or_init(&self, f: F) -> &T where @@ -203,6 +208,7 @@ impl OnceLock { /// assert_eq!(value, Ok(&92)); /// assert_eq!(cell.get(), Some(&92)) /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn get_or_try_init(&self, f: F) -> Result<&T, E> where @@ -241,6 +247,7 @@ impl OnceLock { /// cell.set("hello".to_string()).unwrap(); /// assert_eq!(cell.into_inner(), Some("hello".to_string())); /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn into_inner(mut self) -> Option { self.take() @@ -267,6 +274,7 @@ impl OnceLock { /// assert_eq!(cell.take(), Some("hello".to_string())); /// assert_eq!(cell.get(), None); /// ``` + #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn take(&mut self) -> Option { if self.is_initialized() { @@ -315,6 +323,7 @@ impl OnceLock { /// # Safety /// /// The value must be initialized + #[inline] unsafe fn get_unchecked(&self) -> &T { debug_assert!(self.is_initialized()); (&*self.value.get()).assume_init_ref() @@ -323,6 +332,7 @@ impl OnceLock { /// # Safety /// /// The value must be initialized + #[inline] unsafe fn get_unchecked_mut(&mut self) -> &mut T { debug_assert!(self.is_initialized()); (&mut *self.value.get()).assume_init_mut() @@ -360,6 +370,7 @@ impl const Default for OnceLock { /// assert_eq!(OnceLock::<()>::new(), OnceLock::default()); /// } /// ``` + #[inline] fn default() -> OnceLock { OnceLock::new() } @@ -377,6 +388,7 @@ impl fmt::Debug for OnceLock { #[unstable(feature = "once_cell", issue = "74465")] impl Clone for OnceLock { + #[inline] fn clone(&self) -> OnceLock { let cell = Self::new(); if let Some(value) = self.get() { @@ -408,6 +420,7 @@ impl From for OnceLock { /// Ok(()) /// # } /// ``` + #[inline] fn from(value: T) -> Self { let cell = Self::new(); match cell.set(value) { @@ -419,6 +432,7 @@ impl From for OnceLock { #[unstable(feature = "once_cell", issue = "74465")] impl PartialEq for OnceLock { + #[inline] fn eq(&self, other: &OnceLock) -> bool { self.get() == other.get() } @@ -429,6 +443,7 @@ impl Eq for OnceLock {} #[unstable(feature = "once_cell", issue = "74465")] unsafe impl<#[may_dangle] T> Drop for OnceLock { + #[inline] fn drop(&mut self) { if self.is_initialized() { // SAFETY: The cell is initialized and being dropped, so it can't diff --git a/library/std/src/sync/remutex.rs b/library/std/src/sync/remutex.rs new file mode 100644 index 000000000..4c054da64 --- /dev/null +++ b/library/std/src/sync/remutex.rs @@ -0,0 +1,178 @@ +#[cfg(all(test, not(target_os = "emscripten")))] +mod tests; + +use crate::cell::UnsafeCell; +use crate::ops::Deref; +use crate::panic::{RefUnwindSafe, UnwindSafe}; +use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +use crate::sys::locks as sys; + +/// A re-entrant mutual exclusion +/// +/// This mutex will block *other* threads waiting for the lock to become +/// available. The thread which has already locked the mutex can lock it +/// multiple times without blocking, preventing a common source of deadlocks. +/// +/// This is used by stdout().lock() and friends. +/// +/// ## Implementation details +/// +/// The 'owner' field tracks which thread has locked the mutex. +/// +/// We use current_thread_unique_ptr() as the thread identifier, +/// which is just the address of a thread local variable. +/// +/// If `owner` is set to the identifier of the current thread, +/// we assume the mutex is already locked and instead of locking it again, +/// we increment `lock_count`. +/// +/// When unlocking, we decrement `lock_count`, and only unlock the mutex when +/// it reaches zero. +/// +/// `lock_count` is protected by the mutex and only accessed by the thread that has +/// locked the mutex, so needs no synchronization. +/// +/// `owner` can be checked by other threads that want to see if they already +/// hold the lock, so needs to be atomic. If it compares equal, we're on the +/// same thread that holds the mutex and memory access can use relaxed ordering +/// since we're not dealing with multiple threads. If it compares unequal, +/// synchronization is left to the mutex, making relaxed memory ordering for +/// the `owner` field fine in all cases. +pub struct ReentrantMutex { + mutex: sys::Mutex, + owner: AtomicUsize, + lock_count: UnsafeCell, + data: T, +} + +unsafe impl Send for ReentrantMutex {} +unsafe impl Sync for ReentrantMutex {} + +impl UnwindSafe for ReentrantMutex {} +impl RefUnwindSafe for ReentrantMutex {} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// Deref implementation. +/// +/// # Mutability +/// +/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`, +/// because implementation of the trait would violate Rust’s reference aliasing +/// rules. Use interior mutability (usually `RefCell`) in order to mutate the +/// guarded data. +#[must_use = "if unused the ReentrantMutex will immediately unlock"] +pub struct ReentrantMutexGuard<'a, T: 'a> { + lock: &'a ReentrantMutex, +} + +impl !Send for ReentrantMutexGuard<'_, T> {} + +impl ReentrantMutex { + /// Creates a new reentrant mutex in an unlocked state. + pub const fn new(t: T) -> ReentrantMutex { + ReentrantMutex { + mutex: sys::Mutex::new(), + owner: AtomicUsize::new(0), + lock_count: UnsafeCell::new(0), + data: t, + } + } + + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// This function will block the caller until it is available to acquire the mutex. + /// Upon returning, the thread is the only thread with the mutex held. When the thread + /// calling this method already holds the lock, the call shall succeed without + /// blocking. + /// + /// # Errors + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return failure if the mutex would otherwise be + /// acquired. + pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { + let this_thread = current_thread_unique_ptr(); + // Safety: We only touch lock_count when we own the lock. + unsafe { + if self.owner.load(Relaxed) == this_thread { + self.increment_lock_count(); + } else { + self.mutex.lock(); + self.owner.store(this_thread, Relaxed); + debug_assert_eq!(*self.lock_count.get(), 0); + *self.lock_count.get() = 1; + } + } + ReentrantMutexGuard { lock: self } + } + + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then `Err` is returned. + /// Otherwise, an RAII guard is returned. + /// + /// This function does not block. + /// + /// # Errors + /// + /// If another user of this mutex panicked while holding the mutex, then + /// this call will return failure if the mutex would otherwise be + /// acquired. + pub fn try_lock(&self) -> Option> { + let this_thread = current_thread_unique_ptr(); + // Safety: We only touch lock_count when we own the lock. + unsafe { + if self.owner.load(Relaxed) == this_thread { + self.increment_lock_count(); + Some(ReentrantMutexGuard { lock: self }) + } else if self.mutex.try_lock() { + self.owner.store(this_thread, Relaxed); + debug_assert_eq!(*self.lock_count.get(), 0); + *self.lock_count.get() = 1; + Some(ReentrantMutexGuard { lock: self }) + } else { + None + } + } + } + + unsafe fn increment_lock_count(&self) { + *self.lock_count.get() = (*self.lock_count.get()) + .checked_add(1) + .expect("lock count overflow in reentrant mutex"); + } +} + +impl Deref for ReentrantMutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.data + } +} + +impl Drop for ReentrantMutexGuard<'_, T> { + #[inline] + fn drop(&mut self) { + // Safety: We own the lock. + unsafe { + *self.lock.lock_count.get() -= 1; + if *self.lock.lock_count.get() == 0 { + self.lock.owner.store(0, Relaxed); + self.lock.mutex.unlock(); + } + } + } +} + +/// Get an address that is unique per running thread. +/// +/// This can be used as a non-null usize-sized ID. +pub fn current_thread_unique_ptr() -> usize { + // Use a non-drop type to make sure it's still available during thread destruction. + thread_local! { static X: u8 = const { 0 } } + X.with(|x| <*const _>::addr(x)) +} diff --git a/library/std/src/sync/remutex/tests.rs b/library/std/src/sync/remutex/tests.rs new file mode 100644 index 000000000..fc553081d --- /dev/null +++ b/library/std/src/sync/remutex/tests.rs @@ -0,0 +1,60 @@ +use super::{ReentrantMutex, ReentrantMutexGuard}; +use crate::cell::RefCell; +use crate::sync::Arc; +use crate::thread; + +#[test] +fn smoke() { + let m = ReentrantMutex::new(()); + { + let a = m.lock(); + { + let b = m.lock(); + { + let c = m.lock(); + assert_eq!(*c, ()); + } + assert_eq!(*b, ()); + } + assert_eq!(*a, ()); + } +} + +#[test] +fn is_mutex() { + let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); + let m2 = m.clone(); + let lock = m.lock(); + let child = thread::spawn(move || { + let lock = m2.lock(); + assert_eq!(*lock.borrow(), 4950); + }); + for i in 0..100 { + let lock = m.lock(); + *lock.borrow_mut() += i; + } + drop(lock); + child.join().unwrap(); +} + +#[test] +fn trylock_works() { + let m = Arc::new(ReentrantMutex::new(())); + let m2 = m.clone(); + let _lock = m.try_lock(); + let _lock2 = m.try_lock(); + thread::spawn(move || { + let lock = m2.try_lock(); + assert!(lock.is_none()); + }) + .join() + .unwrap(); + let _lock3 = m.try_lock(); +} + +pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); +impl Drop for Answer<'_> { + fn drop(&mut self) { + *self.0.borrow_mut() = 42; + } +} diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs index b5b3ad989..1a9d3d3f1 100644 --- a/library/std/src/sync/rwlock/tests.rs +++ b/library/std/src/sync/rwlock/tests.rs @@ -2,7 +2,7 @@ use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::mpsc::channel; use crate::sync::{Arc, RwLock, RwLockReadGuard, TryLockError}; use crate::thread; -use rand::{self, Rng}; +use rand::Rng; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); @@ -28,7 +28,7 @@ fn frob() { let tx = tx.clone(); let r = r.clone(); thread::spawn(move || { - let mut rng = rand::thread_rng(); + let mut rng = crate::test_helpers::test_rng(); for _ in 0..M { if rng.gen_bool(1.0 / (N as f64)) { drop(r.write().unwrap()); diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs index c2b366808..19350b83f 100644 --- a/library/std/src/sys/itron/thread.rs +++ b/library/std/src/sys/itron/thread.rs @@ -119,7 +119,7 @@ impl Thread { let old_lifecycle = inner .lifecycle - .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::Release); + .swap(LIFECYCLE_EXITED_OR_FINISHED_OR_JOIN_FINALIZE, Ordering::AcqRel); match old_lifecycle { LIFECYCLE_DETACHED => { @@ -129,9 +129,9 @@ impl Thread { // In this case, `*p_inner`'s ownership has been moved to // us, and we are responsible for dropping it. The acquire - // ordering is not necessary because the parent thread made - // no memory access needing synchronization since the call - // to `acre_tsk`. + // ordering ensures that the swap operation that wrote + // `LIFECYCLE_DETACHED` happens-before `Box::from_raw( + // p_inner)`. // Safety: See above. let _ = unsafe { Box::from_raw(p_inner) }; @@ -151,6 +151,9 @@ impl Thread { // Since the parent might drop `*inner` and terminate us as // soon as it sees `JOIN_FINALIZE`, the release ordering // must be used in the above `swap` call. + // + // To make the task referred to by `parent_tid` visible, we + // must use the acquire ordering in the above `swap` call. // [JOINING → JOIN_FINALIZE] // Wake up the parent task. @@ -218,11 +221,15 @@ impl Thread { let current_task = current_task as usize; - match inner.lifecycle.swap(current_task, Ordering::Acquire) { + match inner.lifecycle.swap(current_task, Ordering::AcqRel) { LIFECYCLE_INIT => { // [INIT → JOINING] // The child task will transition the state to `JOIN_FINALIZE` // and wake us up. + // + // To make the task referred to by `current_task` visible from + // the child task's point of view, we must use the release + // ordering in the above `swap` call. loop { expect_success_aborting(unsafe { abi::slp_tsk() }, &"slp_tsk"); // To synchronize with the child task's memory accesses to @@ -267,15 +274,15 @@ impl Drop for Thread { let inner = unsafe { self.p_inner.as_ref() }; // Detach the thread. - match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) { + match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::AcqRel) { LIFECYCLE_INIT => { // [INIT → DETACHED] // When the time comes, the child will figure out that no // one will ever join it. // The ownership of `*p_inner` is moved to the child thread. - // However, the release ordering is not necessary because we - // made no memory access needing synchronization since the call - // to `acre_tsk`. + // The release ordering ensures that the above swap operation on + // `lifecycle` happens-before the child thread's + // `Box::from_raw(p_inner)`. } LIFECYCLE_FINISHED => { // [FINISHED → JOINED] @@ -287,7 +294,7 @@ impl Drop for Thread { // Terminate and delete the task // Safety: `self.task` still represents a task we own (because // this method or `join_inner` is called only once for - // each `Thread`). The task indicated that it's safe to + // each `Thread`). The task indicated that it's safe to // delete by entering the `FINISHED` state. unsafe { terminate_and_delete_task(self.task) }; diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs index 01e4ffe3d..9865a945b 100644 --- a/library/std/src/sys/sgx/mod.rs +++ b/library/std/src/sys/sgx/mod.rs @@ -34,6 +34,7 @@ pub mod process; pub mod stdio; pub mod thread; pub mod thread_local_key; +pub mod thread_parking; pub mod time; mod condvar; diff --git a/library/std/src/sys/sgx/thread.rs b/library/std/src/sys/sgx/thread.rs index d745a6196..1608b8cb6 100644 --- a/library/std/src/sys/sgx/thread.rs +++ b/library/std/src/sys/sgx/thread.rs @@ -65,39 +65,36 @@ mod task_queue { /// execution. The signal is sent once all TLS destructors have finished at /// which point no new thread locals should be created. pub mod wait_notify { - use super::super::waitqueue::{SpinMutex, WaitQueue, WaitVariable}; + use crate::pin::Pin; use crate::sync::Arc; + use crate::sys_common::thread_parking::Parker; - pub struct Notifier(Arc>>); + pub struct Notifier(Arc); impl Notifier { /// Notify the waiter. The waiter is either notified right away (if /// currently blocked in `Waiter::wait()`) or later when it calls the /// `Waiter::wait()` method. pub fn notify(self) { - let mut guard = self.0.lock(); - *guard.lock_var_mut() = true; - let _ = WaitQueue::notify_one(guard); + Pin::new(&*self.0).unpark() } } - pub struct Waiter(Arc>>); + pub struct Waiter(Arc); impl Waiter { /// Wait for a notification. If `Notifier::notify()` has already been /// called, this will return immediately, otherwise the current thread /// is blocked until notified. pub fn wait(self) { - let guard = self.0.lock(); - if *guard.lock_var() { - return; - } - WaitQueue::wait(guard, || {}); + // SAFETY: + // This is only ever called on one thread. + unsafe { Pin::new(&*self.0).park() } } } pub fn new() -> (Notifier, Waiter) { - let inner = Arc::new(SpinMutex::new(WaitVariable::new(false))); + let inner = Arc::new(Parker::new()); (Notifier(inner.clone()), Waiter(inner)) } } diff --git a/library/std/src/sys/sgx/thread_parking.rs b/library/std/src/sys/sgx/thread_parking.rs new file mode 100644 index 000000000..0006cd4f1 --- /dev/null +++ b/library/std/src/sys/sgx/thread_parking.rs @@ -0,0 +1,23 @@ +use super::abi::usercalls; +use crate::io::ErrorKind; +use crate::time::Duration; +use fortanix_sgx_abi::{EV_UNPARK, WAIT_INDEFINITE}; + +pub type ThreadId = fortanix_sgx_abi::Tcs; + +pub use super::abi::thread::current; + +pub fn park(_hint: usize) { + usercalls::wait(EV_UNPARK, WAIT_INDEFINITE).unwrap(); +} + +pub fn park_timeout(dur: Duration, _hint: usize) { + let timeout = u128::min(dur.as_nanos(), WAIT_INDEFINITE as u128 - 1) as u64; + if let Err(e) = usercalls::wait(EV_UNPARK, timeout) { + assert!(matches!(e.kind(), ErrorKind::TimedOut | ErrorKind::WouldBlock)) + } +} + +pub fn unpark(tid: ThreadId, _hint: usize) { + let _ = usercalls::send(EV_UNPARK, Some(tid)); +} diff --git a/library/std/src/sys/unix/android.rs b/library/std/src/sys/unix/android.rs index 73ff10ab8..0f704994f 100644 --- a/library/std/src/sys/unix/android.rs +++ b/library/std/src/sys/unix/android.rs @@ -1,7 +1,7 @@ //! Android ABI-compatibility module //! -//! The ABI of Android has changed quite a bit over time, and libstd attempts to -//! be both forwards and backwards compatible as much as possible. We want to +//! The ABI of Android has changed quite a bit over time, and std attempts to be +//! both forwards and backwards compatible as much as possible. We want to //! always work with the most recent version of Android, but we also want to //! work with older versions of Android for whenever projects need to. //! diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index 37a49f2d7..8e1f35d6c 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -149,12 +149,13 @@ cfg_has_statx! {{ ) -> Option> { use crate::sync::atomic::{AtomicU8, Ordering}; - // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx` - // We store the availability in global to avoid unnecessary syscalls. - // 0: Unknown - // 1: Not available - // 2: Available - static STATX_STATE: AtomicU8 = AtomicU8::new(0); + // Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`. + // We check for it on first failure and remember availability to avoid having to + // do it again. + #[repr(u8)] + enum STATX_STATE{ Unknown = 0, Present, Unavailable } + static STATX_SAVED_STATE: AtomicU8 = AtomicU8::new(STATX_STATE::Unknown as u8); + syscall! { fn statx( fd: c_int, @@ -165,31 +166,44 @@ cfg_has_statx! {{ ) -> c_int } - match STATX_STATE.load(Ordering::Relaxed) { - 0 => { - // It is a trick to call `statx` with null pointers to check if the syscall - // is available. According to the manual, it is expected to fail with EFAULT. - // We do this mainly for performance, since it is nearly hundreds times - // faster than a normal successful call. - let err = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut())) - .err() - .and_then(|e| e.raw_os_error()); - // We don't check `err == Some(libc::ENOSYS)` because the syscall may be limited - // and returns `EPERM`. Listing all possible errors seems not a good idea. - // See: https://github.com/rust-lang/rust/issues/65662 - if err != Some(libc::EFAULT) { - STATX_STATE.store(1, Ordering::Relaxed); - return None; - } - STATX_STATE.store(2, Ordering::Relaxed); - } - 1 => return None, - _ => {} + if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Unavailable as u8 { + return None; } let mut buf: libc::statx = mem::zeroed(); if let Err(err) = cvt(statx(fd, path, flags, mask, &mut buf)) { - return Some(Err(err)); + if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Present as u8 { + return Some(Err(err)); + } + + // Availability not checked yet. + // + // First try the cheap way. + if err.raw_os_error() == Some(libc::ENOSYS) { + STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed); + return None; + } + + // Error other than `ENOSYS` is not a good enough indicator -- it is + // known that `EPERM` can be returned as a result of using seccomp to + // block the syscall. + // Availability is checked by performing a call which expects `EFAULT` + // if the syscall is usable. + // See: https://github.com/rust-lang/rust/issues/65662 + // FIXME this can probably just do the call if `EPERM` was received, but + // previous iteration of the code checked it for all errors and for now + // this is retained. + // FIXME what about transient conditions like `ENOMEM`? + let err2 = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut())) + .err() + .and_then(|e| e.raw_os_error()); + if err2 == Some(libc::EFAULT) { + STATX_SAVED_STATE.store(STATX_STATE::Present as u8, Ordering::Relaxed); + return Some(Err(err)); + } else { + STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed); + return None; + } } // We cannot fill `stat64` exhaustively because of private padding fields. @@ -243,17 +257,15 @@ struct InnerReadDir { pub struct ReadDir { inner: Arc, - #[cfg(not(any( - target_os = "android", - target_os = "linux", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia", - target_os = "redox", - )))] end_of_stream: bool, } +impl ReadDir { + fn new(inner: InnerReadDir) -> Self { + Self { inner: Arc::new(inner), end_of_stream: false } + } +} + struct Dir(*mut libc::DIR); unsafe impl Send for Dir {} @@ -332,11 +344,23 @@ pub struct FileTimes { modified: Option, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, Eq, Debug)] pub struct FileType { mode: mode_t, } +impl PartialEq for FileType { + fn eq(&self, other: &Self) -> bool { + self.masked() == other.masked() + } +} + +impl core::hash::Hash for FileType { + fn hash(&self, state: &mut H) { + self.masked().hash(state); + } +} + #[derive(Debug)] pub struct DirBuilder { mode: mode_t, @@ -548,7 +572,11 @@ impl FileType { } pub fn is(&self, mode: mode_t) -> bool { - self.mode & libc::S_IFMT == mode + self.masked() == mode + } + + fn masked(&self) -> mode_t { + self.mode & libc::S_IFMT } } @@ -578,18 +606,26 @@ impl Iterator for ReadDir { target_os = "illumos" ))] fn next(&mut self) -> Option> { + if self.end_of_stream { + return None; + } + unsafe { loop { // As of POSIX.1-2017, readdir() is not required to be thread safe; only // readdir_r() is. However, readdir_r() cannot correctly handle platforms - // with unlimited or variable NAME_MAX. Many modern platforms guarantee + // with unlimited or variable NAME_MAX. Many modern platforms guarantee // thread safety for readdir() as long an individual DIR* is not accessed // concurrently, which is sufficient for Rust. super::os::set_errno(0); let entry_ptr = readdir64(self.inner.dirp.0); if entry_ptr.is_null() { - // null can mean either the end is reached or an error occurred. - // So we had to clear errno beforehand to check for an error now. + // We either encountered an error, or reached the end. Either way, + // the next call to next() should return None. + self.end_of_stream = true; + + // To distinguish between errors and end-of-directory, we had to clear + // errno beforehand to check for an error now. return match super::os::errno() { 0 => None, e => Some(Err(Error::from_raw_os_error(e))), @@ -1347,18 +1383,7 @@ pub fn readdir(path: &Path) -> io::Result { } else { let root = path.to_path_buf(); let inner = InnerReadDir { dirp: Dir(ptr), root }; - Ok(ReadDir { - inner: Arc::new(inner), - #[cfg(not(any( - target_os = "android", - target_os = "linux", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia", - target_os = "redox", - )))] - end_of_stream: false, - }) + Ok(ReadDir::new(inner)) } } @@ -1739,12 +1764,16 @@ mod remove_dir_impl { use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; use crate::os::unix::prelude::{OwnedFd, RawFd}; use crate::path::{Path, PathBuf}; - use crate::sync::Arc; use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::{cvt, cvt_r}; - #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))] + #[cfg(not(any( + all(target_os = "linux", target_env = "gnu"), + all(target_os = "macos", not(target_arch = "aarch64")) + )))] use libc::{fdopendir, openat, unlinkat}; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::{fdopendir, openat64 as openat, unlinkat}; #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))] use macos_weak::{fdopendir, openat, unlinkat}; @@ -1811,21 +1840,8 @@ mod remove_dir_impl { // a valid root is not needed because we do not call any functions involving the full path // of the DirEntrys. let dummy_root = PathBuf::new(); - Ok(( - ReadDir { - inner: Arc::new(InnerReadDir { dirp, root: dummy_root }), - #[cfg(not(any( - target_os = "android", - target_os = "linux", - target_os = "solaris", - target_os = "illumos", - target_os = "fuchsia", - target_os = "redox", - )))] - end_of_stream: false, - }, - new_parent_fd, - )) + let inner = InnerReadDir { dirp, root: dummy_root }; + Ok((ReadDir::new(inner), new_parent_fd)) } #[cfg(any( diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs index 94546ca09..73b9bef7e 100644 --- a/library/std/src/sys/unix/kernel_copy.rs +++ b/library/std/src/sys/unix/kernel_copy.rs @@ -61,6 +61,10 @@ use crate::ptr; use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering}; use crate::sys::cvt; use crate::sys::weak::syscall; +#[cfg(not(all(target_os = "linux", target_env = "gnu")))] +use libc::sendfile as sendfile64; +#[cfg(all(target_os = "linux", target_env = "gnu"))] +use libc::sendfile64; use libc::{EBADF, EINVAL, ENOSYS, EOPNOTSUPP, EOVERFLOW, EPERM, EXDEV}; #[cfg(test)] @@ -583,7 +587,7 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> // - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM) // - copy_file_range cannot be used with pipes or device nodes (EINVAL) // - the writer fd was opened with O_APPEND (EBADF²) - // and no bytes were written successfully yet. (All these errnos should + // and no bytes were written successfully yet. (All these errnos should // not be returned if something was already written, but they happen in // the wild, see #91152.) // @@ -647,7 +651,7 @@ fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> let result = match mode { SpliceMode::Sendfile => { - cvt(unsafe { libc::sendfile(writer, reader, ptr::null_mut(), chunk_size) }) + cvt(unsafe { sendfile64(writer, reader, ptr::null_mut(), chunk_size) }) } SpliceMode::Splice => cvt(unsafe { splice(reader, ptr::null_mut(), writer, ptr::null_mut(), chunk_size, 0) diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs index 1ddb09905..6be1abc2b 100644 --- a/library/std/src/sys/unix/locks/pthread_condvar.rs +++ b/library/std/src/sys/unix/locks/pthread_condvar.rs @@ -2,6 +2,7 @@ use crate::cell::UnsafeCell; use crate::ptr; use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; use crate::sys::locks::{pthread_mutex, Mutex}; +use crate::sys::time::TIMESPEC_MAX; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; @@ -12,13 +13,6 @@ pub struct Condvar { mutex: AtomicPtr, } -const TIMESPEC_MAX: libc::timespec = - libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; - -fn saturating_cast_to_time_t(value: u64) -> libc::time_t { - if value > ::MAX as u64 { ::MAX } else { value as libc::time_t } -} - #[inline] fn raw(c: &Condvar) -> *mut libc::pthread_cond_t { c.inner.0.get() @@ -133,26 +127,15 @@ impl Condvar { target_os = "horizon" )))] pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - use crate::mem; + use crate::sys::time::Timespec; let mutex = pthread_mutex::raw(mutex); self.verify(mutex); - let mut now: libc::timespec = mem::zeroed(); - let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); - assert_eq!(r, 0); - - // Nanosecond calculations can't overflow because both values are below 1e9. - let nsec = dur.subsec_nanos() + now.tv_nsec as u32; - - let sec = saturating_cast_to_time_t(dur.as_secs()) - .checked_add((nsec / 1_000_000_000) as libc::time_t) - .and_then(|s| s.checked_add(now.tv_sec)); - let nsec = nsec % 1_000_000_000; - - let timeout = - sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX); - + let timeout = Timespec::now(libc::CLOCK_MONOTONIC) + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec()) + .unwrap_or(TIMESPEC_MAX); let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); assert!(r == libc::ETIMEDOUT || r == 0); r == 0 @@ -169,57 +152,41 @@ impl Condvar { target_os = "espidf", target_os = "horizon" ))] - pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool { + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::sys::time::SystemTime; use crate::time::Instant; let mutex = pthread_mutex::raw(mutex); self.verify(mutex); - // 1000 years - let max_dur = Duration::from_secs(1000 * 365 * 86400); - - if dur > max_dur { - // OSX implementation of `pthread_cond_timedwait` is buggy - // with super long durations. When duration is greater than - // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` - // in macOS Sierra return error 316. - // - // This program demonstrates the issue: - // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c - // - // To work around this issue, and possible bugs of other OSes, timeout - // is clamped to 1000 years, which is allowable per the API of `wait_timeout` - // because of spurious wakeups. - - dur = max_dur; - } - - // First, figure out what time it currently is, in both system and - // stable time. pthread_cond_timedwait uses system time, but we want to - // report timeout based on stable time. - let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; - let stable_now = Instant::now(); - let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); - assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error()); - - let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long; - let extra = (nsec / 1_000_000_000) as libc::time_t; - let nsec = nsec % 1_000_000_000; - let seconds = saturating_cast_to_time_t(dur.as_secs()); - - let timeout = sys_now - .tv_sec - .checked_add(extra) - .and_then(|s| s.checked_add(seconds)) - .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec }) + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra returns error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `wait_timeout` + // because of spurious wakeups. + let dur = Duration::min(dur, Duration::from_secs(1000 * 365 * 86400)); + + // pthread_cond_timedwait uses system time, but we want to report timeout + // based on stable time. + let now = Instant::now(); + + let timeout = SystemTime::now() + .t + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec()) .unwrap_or(TIMESPEC_MAX); - // And wait! let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); debug_assert!(r == libc::ETIMEDOUT || r == 0); // ETIMEDOUT is not a totally reliable method of determining timeout due // to clock shifts, so do the check ourselves - stable_now.elapsed() < dur + now.elapsed() < dur } } diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index 9055a011c..30a96be14 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -40,7 +40,7 @@ pub mod stdio; pub mod thread; pub mod thread_local_dtor; pub mod thread_local_key; -pub mod thread_parker; +pub mod thread_parking; pub mod time; #[cfg(target_os = "espidf")] @@ -95,6 +95,10 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { )))] 'poll: { use crate::sys::os::errno; + #[cfg(not(all(target_os = "linux", target_env = "gnu")))] + use libc::open as open64; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::open64; let pfds: &mut [_] = &mut [ libc::pollfd { fd: 0, events: 0, revents: 0 }, libc::pollfd { fd: 1, events: 0, revents: 0 }, @@ -116,7 +120,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { if pfd.revents & libc::POLLNVAL == 0 { continue; } - if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 { + if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 { // If the stream is closed but we failed to reopen it, abort the // process. Otherwise we wouldn't preserve the safety of // operations on the corresponding Rust object Stdin, Stdout, or @@ -139,9 +143,13 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { )))] { use crate::sys::os::errno; + #[cfg(not(all(target_os = "linux", target_env = "gnu")))] + use libc::open as open64; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::open64; for fd in 0..3 { if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF { - if libc::open("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 { + if open64("/dev/null\0".as_ptr().cast(), libc::O_RDWR, 0) == -1 { // If the stream is closed but we failed to reopen it, abort the // process. Otherwise we wouldn't preserve the safety of // operations on the corresponding Rust object Stdin, Stdout, or @@ -156,7 +164,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { unsafe fn reset_sigpipe(#[allow(unused_variables)] sigpipe: u8) { #[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))] { - // We don't want to add this as a public type to libstd, nor do we + // We don't want to add this as a public type to std, nor do we // want to `include!` a file from the compiler (which would break // Miri and xargo for example), so we choose to duplicate these // constants from `compiler/rustc_session/src/config/sigpipe.rs`. @@ -176,12 +184,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) { sigpipe::SIG_DFL => (true, Some(libc::SIG_DFL)), _ => unreachable!(), }; - // The bootstrap compiler doesn't know about sigpipe::DEFAULT, and always passes in - // SIG_IGN. This causes some tests to fail because they expect SIGPIPE to be reset to - // default on process spawning (which doesn't happen if #[unix_sigpipe] is specified). - // Since we can't differentiate between the cases here, treat SIG_IGN as DEFAULT - // unconditionally. - if sigpipe_attr_specified && !(cfg!(bootstrap) && sigpipe == sigpipe::SIG_IGN) { + if sigpipe_attr_specified { UNIX_SIGPIPE_ATTR_SPECIFIED.store(true, crate::sync::atomic::Ordering::Relaxed); } if let Some(handler) = handler { diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs index b84bf8f92..c86f80972 100644 --- a/library/std/src/sys/unix/net.rs +++ b/library/std/src/sys/unix/net.rs @@ -512,7 +512,7 @@ impl FromRawFd for Socket { // A workaround for this bug is to call the res_init libc function, to clear // the cached configs. Unfortunately, while we believe glibc's implementation // of res_init is thread-safe, we know that other implementations are not -// (https://github.com/rust-lang/rust/issues/43592). Code here in libstd could +// (https://github.com/rust-lang/rust/issues/43592). Code here in std could // try to synchronize its res_init calls with a Mutex, but that wouldn't // protect programs that call into libc in other ways. So instead of calling // res_init unconditionally, we call it only when we detect we're linking diff --git a/library/std/src/sys/unix/pipe.rs b/library/std/src/sys/unix/pipe.rs index a56c275c9..a744d0ab6 100644 --- a/library/std/src/sys/unix/pipe.rs +++ b/library/std/src/sys/unix/pipe.rs @@ -58,6 +58,10 @@ impl AnonPipe { self.0.is_read_vectored() } + pub fn read_to_end(&self, buf: &mut Vec) -> io::Result { + self.0.read_to_end(buf) + } + pub fn write(&self, buf: &[u8]) -> io::Result { self.0.write(buf) } diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs index 848adca78..afd03d79c 100644 --- a/library/std/src/sys/unix/process/process_common.rs +++ b/library/std/src/sys/unix/process/process_common.rs @@ -144,6 +144,7 @@ pub enum ChildStdio { Null, } +#[derive(Debug)] pub enum Stdio { Inherit, Null, @@ -510,16 +511,68 @@ impl ChildStdio { } impl fmt::Debug for Command { + // show all attributes but `self.closures` which does not implement `Debug` + // and `self.argv` which is not useful for debugging fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.program != self.args[0] { - write!(f, "[{:?}] ", self.program)?; - } - write!(f, "{:?}", self.args[0])?; + if f.alternate() { + let mut debug_command = f.debug_struct("Command"); + debug_command.field("program", &self.program).field("args", &self.args); + if !self.env.is_unchanged() { + debug_command.field("env", &self.env); + } + + if self.cwd.is_some() { + debug_command.field("cwd", &self.cwd); + } + if self.uid.is_some() { + debug_command.field("uid", &self.uid); + } + if self.gid.is_some() { + debug_command.field("gid", &self.gid); + } + + if self.groups.is_some() { + debug_command.field("groups", &self.groups); + } + + if self.stdin.is_some() { + debug_command.field("stdin", &self.stdin); + } + if self.stdout.is_some() { + debug_command.field("stdout", &self.stdout); + } + if self.stderr.is_some() { + debug_command.field("stderr", &self.stderr); + } + if self.pgroup.is_some() { + debug_command.field("pgroup", &self.pgroup); + } + + #[cfg(target_os = "linux")] + { + debug_command.field("create_pidfd", &self.create_pidfd); + } - for arg in &self.args[1..] { - write!(f, " {:?}", arg)?; + debug_command.finish() + } else { + if let Some(ref cwd) = self.cwd { + write!(f, "cd {cwd:?} && ")?; + } + for (key, value_opt) in self.get_envs() { + if let Some(value) = value_opt { + write!(f, "{}={value:?} ", key.to_string_lossy())?; + } + } + if self.program != self.args[0] { + write!(f, "[{:?}] ", self.program)?; + } + write!(f, "{:?}", self.args[0])?; + + for arg in &self.args[1..] { + write!(f, " {:?}", arg)?; + } + Ok(()) } - Ok(()) } } diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs index 66ea3db20..d4c7e58b3 100644 --- a/library/std/src/sys/unix/process/process_fuchsia.rs +++ b/library/std/src/sys/unix/process/process_fuchsia.rs @@ -35,6 +35,11 @@ impl Command { Ok((Process { handle: Handle::new(process_handle) }, ours)) } + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?; + crate::sys_common::process::wait_with_output(proc, pipes) + } + pub fn exec(&mut self, default: Stdio) -> io::Error { if self.saw_nul() { return io::const_io_error!( @@ -257,7 +262,7 @@ impl ExitStatus { // available on Fuchsia. // // It does not appear that Fuchsia is Unix-like enough to implement ExitStatus (or indeed many - // other things from std::os::unix) properly. This veneer is always going to be a bodge. So + // other things from std::os::unix) properly. This veneer is always going to be a bodge. So // while I don't know if these implementations are actually correct, I think they will do for // now at least. pub fn core_dumped(&self) -> bool { @@ -272,9 +277,9 @@ impl ExitStatus { pub fn into_raw(&self) -> c_int { // We don't know what someone who calls into_raw() will do with this value, but it should - // have the conventional Unix representation. Despite the fact that this is not + // have the conventional Unix representation. Despite the fact that this is not // standardised in SuS or POSIX, all Unix systems encode the signal and exit status the - // same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every + // same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every // Unix.) // // The caller of `std::os::unix::into_raw` is probably wanting a Unix exit status, and may @@ -282,14 +287,14 @@ impl ExitStatus { // different Unix variant. // // The other view would be to say that the caller on Fuchsia ought to know that `into_raw` - // will give a raw Fuchsia status (whatever that is - I don't know, personally). That is + // will give a raw Fuchsia status (whatever that is - I don't know, personally). That is // not possible here because we must return a c_int because that's what Unix (including // SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't // necessarily fit. // // It seems to me that the right answer would be to provide std::os::fuchsia with its // own ExitStatusExt, rather that trying to provide a not very convincing imitation of - // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But + // Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But // fixing this up that is beyond the scope of my efforts now. let exit_status_as_if_unix: u8 = self.0.try_into().expect("Fuchsia process return code bigger than 8 bits, but std::os::unix::ExitStatusExt::into_raw() was called to try to convert the value into a traditional Unix-style wait status, which cannot represent values greater than 255."); let wait_status_as_if_unix = (exit_status_as_if_unix as c_int) << 8; diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs index 56a805cef..3bc17b775 100644 --- a/library/std/src/sys/unix/process/process_unix.rs +++ b/library/std/src/sys/unix/process/process_unix.rs @@ -66,14 +66,15 @@ impl Command { // // Note that as soon as we're done with the fork there's no need to hold // a lock any more because the parent won't do anything and the child is - // in its own process. Thus the parent drops the lock guard while the child - // forgets it to avoid unlocking it on a new thread, which would be invalid. + // in its own process. Thus the parent drops the lock guard immediately. + // The child calls `mem::forget` to leak the lock, which is crucial because + // releasing a lock is not async-signal-safe. let env_lock = sys::os::env_read_lock(); let (pid, pidfd) = unsafe { self.do_fork()? }; if pid == 0 { crate::panic::always_abort(); - mem::forget(env_lock); + mem::forget(env_lock); // avoid non-async-signal-safe unlocking drop(input); let Err(err) = unsafe { self.do_exec(theirs, envp.as_ref()) }; let errno = err.raw_os_error().unwrap_or(libc::EINVAL) as u32; @@ -132,6 +133,11 @@ impl Command { } } + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?; + crate::sys_common::process::wait_with_output(proc, pipes) + } + // Attempts to fork the process. If successful, returns Ok((0, -1)) // in the child, and Ok((child_pid, -1)) in the parent. #[cfg(not(target_os = "linux"))] @@ -660,11 +666,11 @@ impl ExitStatus { } pub fn exit_ok(&self) -> Result<(), ExitStatusError> { - // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is + // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is // true on all actual versions of Unix, is widely assumed, and is specified in SuS - // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not // true for a platform pretending to be Unix, the tests (our doctests, and also - // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. + // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. match NonZero_c_int::try_from(self.0) { /* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)), /* was zero, couldn't convert */ Err(_) => Ok(()), @@ -740,6 +746,8 @@ fn signal_string(signal: i32) -> &'static str { libc::SIGWINCH => " (SIGWINCH)", #[cfg(not(target_os = "haiku"))] libc::SIGIO => " (SIGIO)", + #[cfg(target_os = "haiku")] + libc::SIGPOLL => " (SIGPOLL)", libc::SIGSYS => " (SIGSYS)", // For information on Linux signals, run `man 7 signal` #[cfg(all( diff --git a/library/std/src/sys/unix/process/process_unix/tests.rs b/library/std/src/sys/unix/process/process_unix/tests.rs index e0e2d478f..e5e1f956b 100644 --- a/library/std/src/sys/unix/process/process_unix/tests.rs +++ b/library/std/src/sys/unix/process/process_unix/tests.rs @@ -3,7 +3,7 @@ use crate::panic::catch_unwind; use crate::process::Command; // Many of the other aspects of this situation, including heap alloc concurrency -// safety etc., are tested in src/test/ui/process/process-panic-after-fork.rs +// safety etc., are tested in tests/ui/process/process-panic-after-fork.rs #[test] fn exitstatus_display_tests() { @@ -19,17 +19,17 @@ fn exitstatus_display_tests() { t(0x00000, "exit status: 0"); t(0x0ff00, "exit status: 255"); - // On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar. + // On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar. // https://github.com/rust-lang/rust/pull/82749#issuecomment-790525956 // The purpose of this test is to test our string formatting, not our understanding of the wait - // status magic numbers. So restrict these to Linux. + // status magic numbers. So restrict these to Linux. if cfg!(target_os = "linux") { t(0x0137f, "stopped (not terminated) by signal: 19 (SIGSTOP)"); t(0x0ffff, "continued (WIFCONTINUED)"); } // Testing "unrecognised wait status" is hard because the wait.h macros typically - // assume that the value came from wait and isn't mad. With the glibc I have here + // assume that the value came from wait and isn't mad. With the glibc I have here // this works: if cfg!(all(target_os = "linux", target_env = "gnu")) { t(0x000ff, "unrecognised wait status: 255 0xff"); diff --git a/library/std/src/sys/unix/process/process_unsupported.rs b/library/std/src/sys/unix/process/process_unsupported.rs index 72f9f3f9c..f28ca58d0 100644 --- a/library/std/src/sys/unix/process/process_unsupported.rs +++ b/library/std/src/sys/unix/process/process_unsupported.rs @@ -20,6 +20,10 @@ impl Command { unsupported() } + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + unsupported() + } + pub fn exec(&mut self, _default: Stdio) -> io::Error { unsupported_err() } diff --git a/library/std/src/sys/unix/process/process_vxworks.rs b/library/std/src/sys/unix/process/process_vxworks.rs index 200ef6719..569a4b149 100644 --- a/library/std/src/sys/unix/process/process_vxworks.rs +++ b/library/std/src/sys/unix/process/process_vxworks.rs @@ -108,6 +108,11 @@ impl Command { } } + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?; + crate::sys_common::process::wait_with_output(proc, pipes) + } + pub fn exec(&mut self, default: Stdio) -> io::Error { let ret = Command::spawn(self, default, false); match ret { @@ -190,11 +195,11 @@ impl ExitStatus { } pub fn exit_ok(&self) -> Result<(), ExitStatusError> { - // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is + // This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is // true on all actual versions of Unix, is widely assumed, and is specified in SuS - // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not // true for a platform pretending to be Unix, the tests (our doctests, and also - // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. + // procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too. match NonZero_c_int::try_from(self.0) { Ok(failure) => Err(ExitStatusError(failure)), Err(_) => Ok(()), diff --git a/library/std/src/sys/unix/stack_overflow.rs b/library/std/src/sys/unix/stack_overflow.rs index 75a5c0f92..b59d4ba26 100644 --- a/library/std/src/sys/unix/stack_overflow.rs +++ b/library/std/src/sys/unix/stack_overflow.rs @@ -45,7 +45,10 @@ mod imp { use crate::thread; use libc::MAP_FAILED; - use libc::{mmap, munmap}; + #[cfg(not(all(target_os = "linux", target_env = "gnu")))] + use libc::{mmap as mmap64, munmap}; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::{mmap64, munmap}; use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIG_DFL}; use libc::{sigaltstack, SIGSTKSZ, SS_DISABLE}; use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SIGSEGV}; @@ -135,7 +138,7 @@ mod imp { #[cfg(not(any(target_os = "openbsd", target_os = "netbsd", target_os = "linux",)))] let flags = MAP_PRIVATE | MAP_ANON; let stackp = - mmap(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0); + mmap64(ptr::null_mut(), SIGSTKSZ + page_size(), PROT_READ | PROT_WRITE, flags, -1, 0); if stackp == MAP_FAILED { panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error()); } diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs index c1d30dd9d..2a1830d06 100644 --- a/library/std/src/sys/unix/thread.rs +++ b/library/std/src/sys/unix/thread.rs @@ -73,7 +73,7 @@ impl Thread { n => { assert_eq!(n, libc::EINVAL); // EINVAL means |stack_size| is either too small or not a - // multiple of the system page size. Because it's definitely + // multiple of the system page size. Because it's definitely // >= PTHREAD_STACK_MIN, it must be an alignment issue. // Round up to the nearest page and try again. let page_size = os::page_size(); @@ -136,7 +136,7 @@ impl Thread { unsafe { // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20. - let name = truncate_cstr(name, TASK_COMM_LEN); + let name = truncate_cstr::<{ TASK_COMM_LEN }>(name); let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr()); // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. debug_assert_eq!(res, 0); @@ -153,7 +153,7 @@ impl Thread { #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))] pub fn set_name(name: &CStr) { unsafe { - let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE); + let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name); let res = libc::pthread_setname_np(name.as_ptr()); // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked. debug_assert_eq!(res, 0); @@ -285,17 +285,12 @@ impl Drop for Thread { } #[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios", target_os = "watchos"))] -fn truncate_cstr(cstr: &CStr, max_with_nul: usize) -> crate::borrow::Cow<'_, CStr> { - use crate::{borrow::Cow, ffi::CString}; - - if cstr.to_bytes_with_nul().len() > max_with_nul { - let bytes = cstr.to_bytes()[..max_with_nul - 1].to_vec(); - // SAFETY: the non-nul bytes came straight from a CStr. - // (CString will add the terminating nul.) - Cow::Owned(unsafe { CString::from_vec_unchecked(bytes) }) - } else { - Cow::Borrowed(cstr) +fn truncate_cstr(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] { + let mut result = [0; MAX_WITH_NUL]; + for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) { + *dst = *src as libc::c_char; } + result } pub fn available_parallelism() -> io::Result { @@ -510,7 +505,7 @@ mod cgroups { let limit = raw_quota.next()?; let period = raw_quota.next()?; match (limit.parse::(), period.parse::()) { - (Ok(limit), Ok(period)) => { + (Ok(limit), Ok(period)) if period > 0 => { quota = quota.min(limit / period); } _ => {} @@ -570,7 +565,7 @@ mod cgroups { let period = parse_file("cpu.cfs_period_us"); match (limit, period) { - (Some(limit), Some(period)) => quota = quota.min(limit / period), + (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period), _ => {} } @@ -658,7 +653,10 @@ pub mod guard { ))] #[cfg_attr(test, allow(dead_code))] pub mod guard { - use libc::{mmap, mprotect}; + #[cfg(not(all(target_os = "linux", target_env = "gnu")))] + use libc::{mmap as mmap64, mprotect}; + #[cfg(all(target_os = "linux", target_env = "gnu"))] + use libc::{mmap64, mprotect}; use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE}; use crate::io; @@ -757,10 +755,10 @@ pub mod guard { if cfg!(all(target_os = "linux", not(target_env = "musl"))) { // Linux doesn't allocate the whole stack right away, and // the kernel has its own stack-guard mechanism to fault - // when growing too close to an existing mapping. If we map + // when growing too close to an existing mapping. If we map // our own guard, then the kernel starts enforcing a rather // large gap above that, rendering much of the possible - // stack space useless. See #43052. + // stack space useless. See #43052. // // Instead, we'll just note where we expect rlimit to start // faulting, so our handler can report "stack overflow", and @@ -776,14 +774,14 @@ pub mod guard { None } else if cfg!(target_os = "freebsd") { // FreeBSD's stack autogrows, and optionally includes a guard page - // at the bottom. If we try to remap the bottom of the stack - // ourselves, FreeBSD's guard page moves upwards. So we'll just use + // at the bottom. If we try to remap the bottom of the stack + // ourselves, FreeBSD's guard page moves upwards. So we'll just use // the builtin guard page. let stackptr = get_stack_start_aligned()?; let guardaddr = stackptr.addr(); // Technically the number of guard pages is tunable and controlled // by the security.bsd.stack_guard_page sysctl, but there are - // few reasons to change it from the default. The default value has + // few reasons to change it from the default. The default value has // been 1 ever since FreeBSD 11.1 and 10.4. const GUARD_PAGES: usize = 1; let guard = guardaddr..guardaddr + GUARD_PAGES * page_size; @@ -808,7 +806,7 @@ pub mod guard { // read/write permissions and only then mprotect() it to // no permissions at all. See issue #50313. let stackptr = get_stack_start_aligned()?; - let result = mmap( + let result = mmap64( stackptr, page_size, PROT_READ | PROT_WRITE, @@ -879,9 +877,9 @@ pub mod guard { } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc"))) { // glibc used to include the guard area within the stack, as noted in the BUGS - // section of `man pthread_attr_getguardsize`. This has been corrected starting + // section of `man pthread_attr_getguardsize`. This has been corrected starting // with glibc 2.27, and in some distro backports, so the guard is now placed at the - // end (below) the stack. There's no easy way for us to know which we have at + // end (below) the stack. There's no easy way for us to know which we have at // runtime, so we'll just match any fault in the range right above or below the // stack base to call that fault a stack overflow. Some(stackaddr - guardsize..stackaddr + guardsize) diff --git a/library/std/src/sys/unix/thread_parker/darwin.rs b/library/std/src/sys/unix/thread_parker/darwin.rs deleted file mode 100644 index 2f5356fe2..000000000 --- a/library/std/src/sys/unix/thread_parker/darwin.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! Thread parking for Darwin-based systems. -//! -//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they -//! cannot be used in `std` because they are non-public (their use will lead to -//! rejection from the App Store) and because they are only available starting -//! with macOS version 10.12, even though the minimum target version is 10.7. -//! -//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin -//! supports semaphores, which allow us to implement the behaviour we need with -//! only one primitive (as opposed to a mutex-condvar pair). We use the semaphore -//! provided by libdispatch, as the underlying Mach semaphore is only dubiously -//! public. - -use crate::pin::Pin; -use crate::sync::atomic::{ - AtomicI8, - Ordering::{Acquire, Release}, -}; -use crate::time::Duration; - -type dispatch_semaphore_t = *mut crate::ffi::c_void; -type dispatch_time_t = u64; - -const DISPATCH_TIME_NOW: dispatch_time_t = 0; -const DISPATCH_TIME_FOREVER: dispatch_time_t = !0; - -// Contained in libSystem.dylib, which is linked by default. -extern "C" { - fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t; - fn dispatch_semaphore_create(val: isize) -> dispatch_semaphore_t; - fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> isize; - fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> isize; - fn dispatch_release(object: *mut crate::ffi::c_void); -} - -const EMPTY: i8 = 0; -const NOTIFIED: i8 = 1; -const PARKED: i8 = -1; - -pub struct Parker { - semaphore: dispatch_semaphore_t, - state: AtomicI8, -} - -unsafe impl Sync for Parker {} -unsafe impl Send for Parker {} - -impl Parker { - pub unsafe fn new(parker: *mut Parker) { - let semaphore = dispatch_semaphore_create(0); - assert!( - !semaphore.is_null(), - "failed to create dispatch semaphore for thread synchronization" - ); - parker.write(Parker { semaphore, state: AtomicI8::new(EMPTY) }) - } - - // Does not need `Pin`, but other implementation do. - pub unsafe fn park(self: Pin<&Self>) { - // The semaphore counter must be zero at this point, because unparking - // threads will not actually increase it until we signalled that we - // are waiting. - - // Change NOTIFIED to EMPTY and EMPTY to PARKED. - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - - // Another thread may increase the semaphore counter from this point on. - // If it is faster than us, we will decrement it again immediately below. - // If we are faster, we wait. - - // Ensure that the semaphore counter has actually been decremented, even - // if the call timed out for some reason. - while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} - - // At this point, the semaphore counter is zero again. - - // We were definitely woken up, so we don't need to check the state. - // Still, we need to reset the state using a swap to observe the state - // change with acquire ordering. - self.state.swap(EMPTY, Acquire); - } - - // Does not need `Pin`, but other implementation do. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - - let nanos = dur.as_nanos().try_into().unwrap_or(i64::MAX); - let timeout = dispatch_time(DISPATCH_TIME_NOW, nanos); - - let timeout = dispatch_semaphore_wait(self.semaphore, timeout) != 0; - - let state = self.state.swap(EMPTY, Acquire); - if state == NOTIFIED && timeout { - // If the state was NOTIFIED but semaphore_wait returned without - // decrementing the count because of a timeout, it means another - // thread is about to call semaphore_signal. We must wait for that - // to happen to ensure the semaphore count is reset. - while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} - } else { - // Either a timeout occurred and we reset the state before any thread - // tried to wake us up, or we were woken up and reset the state, - // making sure to observe the state change with acquire ordering. - // Either way, the semaphore counter is now zero again. - } - } - - // Does not need `Pin`, but other implementation do. - pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - if state == PARKED { - unsafe { - dispatch_semaphore_signal(self.semaphore); - } - } - } -} - -impl Drop for Parker { - fn drop(&mut self) { - // SAFETY: - // We always ensure that the semaphore count is reset, so this will - // never cause an exception. - unsafe { - dispatch_release(self.semaphore); - } - } -} diff --git a/library/std/src/sys/unix/thread_parker/mod.rs b/library/std/src/sys/unix/thread_parker/mod.rs deleted file mode 100644 index 35f1e68a8..000000000 --- a/library/std/src/sys/unix/thread_parker/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Thread parking on systems without futex support. - -#![cfg(not(any( - target_os = "linux", - target_os = "android", - all(target_os = "emscripten", target_feature = "atomics"), - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - target_os = "fuchsia", -)))] - -cfg_if::cfg_if! { - if #[cfg(all( - any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "tvos", - ), - not(miri), - ))] { - mod darwin; - pub use darwin::Parker; - } else if #[cfg(target_os = "netbsd")] { - mod netbsd; - pub use netbsd::Parker; - } else { - mod pthread; - pub use pthread::Parker; - } -} diff --git a/library/std/src/sys/unix/thread_parker/netbsd.rs b/library/std/src/sys/unix/thread_parker/netbsd.rs deleted file mode 100644 index 7657605b5..000000000 --- a/library/std/src/sys/unix/thread_parker/netbsd.rs +++ /dev/null @@ -1,113 +0,0 @@ -use crate::ffi::{c_int, c_void}; -use crate::pin::Pin; -use crate::ptr::{null, null_mut}; -use crate::sync::atomic::{ - AtomicU64, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::time::Duration; -use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC}; - -extern "C" { - fn ___lwp_park60( - clock_id: clockid_t, - flags: c_int, - ts: *mut timespec, - unpark: lwpid_t, - hint: *const c_void, - unparkhint: *const c_void, - ) -> c_int; - fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int; -} - -/// The thread is not parked and the token is not available. -/// -/// Zero cannot be a valid LWP id, since it is used as empty value for the unpark -/// argument in _lwp_park. -const EMPTY: u64 = 0; -/// The token is available. Do not park anymore. -const NOTIFIED: u64 = u64::MAX; - -pub struct Parker { - /// The parker state. Contains either one of the two state values above or the LWP - /// id of the parked thread. - state: AtomicU64, -} - -impl Parker { - pub unsafe fn new(parker: *mut Parker) { - parker.write(Parker { state: AtomicU64::new(EMPTY) }) - } - - // Does not actually need `unsafe` or `Pin`, but the pthread implementation does. - pub unsafe fn park(self: Pin<&Self>) { - // If the token has already been made available, we can skip - // a bit of work, so check for it here. - if self.state.load(Acquire) != NOTIFIED { - let parked = _lwp_self() as u64; - let hint = self.state.as_mut_ptr().cast(); - if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() { - // Loop to guard against spurious wakeups. - loop { - ___lwp_park60(0, 0, null_mut(), 0, hint, null()); - if self.state.load(Acquire) == NOTIFIED { - break; - } - } - } - } - - // At this point, the change to NOTIFIED has always been observed with acquire - // ordering, so we can just use a relaxed store here (instead of a swap). - self.state.store(EMPTY, Relaxed); - } - - // Does not actually need `unsafe` or `Pin`, but the pthread implementation does. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - if self.state.load(Acquire) != NOTIFIED { - let parked = _lwp_self() as u64; - let hint = self.state.as_mut_ptr().cast(); - let mut timeout = timespec { - // Saturate so that the operation will definitely time out - // (even if it is after the heat death of the universe). - tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX), - tv_nsec: dur.subsec_nanos().into(), - }; - - if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() { - // Timeout needs to be mutable since it is modified on NetBSD 9.0 and - // above. - ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, hint, null()); - // Use a swap to get acquire ordering even if the token was set after - // the timeout occurred. - self.state.swap(EMPTY, Acquire); - return; - } - } - - self.state.store(EMPTY, Relaxed); - } - - // Does not actually need `Pin`, but the pthread implementation does. - pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - if !matches!(state, EMPTY | NOTIFIED) { - let lwp = state as lwpid_t; - let hint = self.state.as_mut_ptr().cast(); - - // If the parking thread terminated and did not actually park, this will - // probably return an error, which is OK. In the worst case, another - // thread has received the same LWP id. It will then receive a spurious - // wakeup, but those are allowable per the API contract. The same reasoning - // applies if a timeout occurred before this call, but the state was not - // yet reset. - - // SAFETY: - // The syscall has no invariants to hold. Only unsafe because it is an - // extern function. - unsafe { - _lwp_unpark(lwp, hint); - } - } - } -} diff --git a/library/std/src/sys/unix/thread_parker/pthread.rs b/library/std/src/sys/unix/thread_parker/pthread.rs deleted file mode 100644 index 3dfc0026e..000000000 --- a/library/std/src/sys/unix/thread_parker/pthread.rs +++ /dev/null @@ -1,271 +0,0 @@ -//! Thread parking without `futex` using the `pthread` synchronization primitives. - -use crate::cell::UnsafeCell; -use crate::marker::PhantomPinned; -use crate::pin::Pin; -use crate::ptr::addr_of_mut; -use crate::sync::atomic::AtomicUsize; -use crate::sync::atomic::Ordering::SeqCst; -use crate::time::Duration; - -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; - -unsafe fn lock(lock: *mut libc::pthread_mutex_t) { - let r = libc::pthread_mutex_lock(lock); - debug_assert_eq!(r, 0); -} - -unsafe fn unlock(lock: *mut libc::pthread_mutex_t) { - let r = libc::pthread_mutex_unlock(lock); - debug_assert_eq!(r, 0); -} - -unsafe fn notify_one(cond: *mut libc::pthread_cond_t) { - let r = libc::pthread_cond_signal(cond); - debug_assert_eq!(r, 0); -} - -unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) { - let r = libc::pthread_cond_wait(cond, lock); - debug_assert_eq!(r, 0); -} - -const TIMESPEC_MAX: libc::timespec = - libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; - -unsafe fn wait_timeout( - cond: *mut libc::pthread_cond_t, - lock: *mut libc::pthread_mutex_t, - dur: Duration, -) { - // Use the system clock on systems that do not support pthread_condattr_setclock. - // This unfortunately results in problems when the system time changes. - #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "espidf" - ))] - let (now, dur) = { - use crate::cmp::min; - use crate::sys::time::SystemTime; - - // OSX implementation of `pthread_cond_timedwait` is buggy - // with super long durations. When duration is greater than - // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` - // in macOS Sierra return error 316. - // - // This program demonstrates the issue: - // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c - // - // To work around this issue, and possible bugs of other OSes, timeout - // is clamped to 1000 years, which is allowable per the API of `park_timeout` - // because of spurious wakeups. - let dur = min(dur, Duration::from_secs(1000 * 365 * 86400)); - let now = SystemTime::now().t; - (now, dur) - }; - // Use the monotonic clock on other systems. - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "espidf" - )))] - let (now, dur) = { - use crate::sys::time::Timespec; - - (Timespec::now(libc::CLOCK_MONOTONIC), dur) - }; - - let timeout = - now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX); - let r = libc::pthread_cond_timedwait(cond, lock, &timeout); - debug_assert!(r == libc::ETIMEDOUT || r == 0); -} - -pub struct Parker { - state: AtomicUsize, - lock: UnsafeCell, - cvar: UnsafeCell, - // The `pthread` primitives require a stable address, so make this struct `!Unpin`. - _pinned: PhantomPinned, -} - -impl Parker { - /// Construct the UNIX parker in-place. - /// - /// # Safety - /// The constructed parker must never be moved. - pub unsafe fn new(parker: *mut Parker) { - // Use the default mutex implementation to allow for simpler initialization. - // This could lead to undefined behaviour when deadlocking. This is avoided - // by not deadlocking. Note in particular the unlocking operation before any - // panic, as code after the panic could try to park again. - addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY)); - addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)); - - cfg_if::cfg_if! { - if #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "watchos", - target_os = "l4re", - target_os = "android", - target_os = "redox" - ))] { - addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)); - } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] { - let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null()); - assert_eq!(r, 0); - } else { - use crate::mem::MaybeUninit; - let mut attr = MaybeUninit::::uninit(); - let r = libc::pthread_condattr_init(attr.as_mut_ptr()); - assert_eq!(r, 0); - let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); - assert_eq!(r, 0); - let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr()); - assert_eq!(r, 0); - let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); - assert_eq!(r, 0); - } - } - } - - // This implementation doesn't require `unsafe`, but other implementations - // may assume this is only called by the thread that owns the Parker. - pub unsafe fn park(self: Pin<&Self>) { - // If we were previously notified then we consume this notification and - // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { - return; - } - - // Otherwise we need to coordinate going to sleep - lock(self.lock.get()); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - - unlock(self.lock.get()); - - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => { - unlock(self.lock.get()); - - panic!("inconsistent park state") - } - } - - loop { - wait(self.cvar.get(), self.lock.get()); - - match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { - Ok(_) => break, // got a notification - Err(_) => {} // spurious wakeup, go back to sleep - } - } - - unlock(self.lock.get()); - } - - // This implementation doesn't require `unsafe`, but other implementations - // may assume this is only called by the thread that owns the Parker. Use - // `Pin` to guarantee a stable address for the mutex and condition variable. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - // Like `park` above we have a fast path for an already-notified thread, and - // afterwards we start coordinating for a sleep. - // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { - return; - } - - lock(self.lock.get()); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read again here, see `park`. - let old = self.state.swap(EMPTY, SeqCst); - unlock(self.lock.get()); - - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => { - unlock(self.lock.get()); - panic!("inconsistent park_timeout state") - } - } - - // Wait with a timeout, and if we spuriously wake up or otherwise wake up - // from a notification we just want to unconditionally set the state back to - // empty, either consuming a notification or un-flagging ourselves as - // parked. - wait_timeout(self.cvar.get(), self.lock.get(), dur); - - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! - PARKED => unlock(self.lock.get()), // no notification, alas - n => { - unlock(self.lock.get()); - panic!("inconsistent park_timeout state: {n}") - } - } - } - - pub fn unpark(self: Pin<&Self>) { - // To ensure the unparked thread will observe any writes we made - // before this call, we must perform a release operation that `park` - // can synchronize with. To do that we must write `NOTIFIED` even if - // `state` is already `NOTIFIED`. That is why this must be a swap - // rather than a compare-and-swap that returns if it reads `NOTIFIED` - // on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => return, // no one was waiting - NOTIFIED => return, // already unparked - PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), - } - - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - unsafe { - lock(self.lock.get()); - unlock(self.lock.get()); - notify_one(self.cvar.get()); - } - } -} - -impl Drop for Parker { - fn drop(&mut self) { - unsafe { - libc::pthread_cond_destroy(self.cvar.get_mut()); - libc::pthread_mutex_destroy(self.lock.get_mut()); - } - } -} - -unsafe impl Sync for Parker {} -unsafe impl Send for Parker {} diff --git a/library/std/src/sys/unix/thread_parking/darwin.rs b/library/std/src/sys/unix/thread_parking/darwin.rs new file mode 100644 index 000000000..b709fada3 --- /dev/null +++ b/library/std/src/sys/unix/thread_parking/darwin.rs @@ -0,0 +1,131 @@ +//! Thread parking for Darwin-based systems. +//! +//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they +//! cannot be used in `std` because they are non-public (their use will lead to +//! rejection from the App Store) and because they are only available starting +//! with macOS version 10.12, even though the minimum target version is 10.7. +//! +//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin +//! supports semaphores, which allow us to implement the behaviour we need with +//! only one primitive (as opposed to a mutex-condvar pair). We use the semaphore +//! provided by libdispatch, as the underlying Mach semaphore is only dubiously +//! public. + +use crate::pin::Pin; +use crate::sync::atomic::{ + AtomicI8, + Ordering::{Acquire, Release}, +}; +use crate::time::Duration; + +type dispatch_semaphore_t = *mut crate::ffi::c_void; +type dispatch_time_t = u64; + +const DISPATCH_TIME_NOW: dispatch_time_t = 0; +const DISPATCH_TIME_FOREVER: dispatch_time_t = !0; + +// Contained in libSystem.dylib, which is linked by default. +extern "C" { + fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t; + fn dispatch_semaphore_create(val: isize) -> dispatch_semaphore_t; + fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> isize; + fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> isize; + fn dispatch_release(object: *mut crate::ffi::c_void); +} + +const EMPTY: i8 = 0; +const NOTIFIED: i8 = 1; +const PARKED: i8 = -1; + +pub struct Parker { + semaphore: dispatch_semaphore_t, + state: AtomicI8, +} + +unsafe impl Sync for Parker {} +unsafe impl Send for Parker {} + +impl Parker { + pub unsafe fn new_in_place(parker: *mut Parker) { + let semaphore = dispatch_semaphore_create(0); + assert!( + !semaphore.is_null(), + "failed to create dispatch semaphore for thread synchronization" + ); + parker.write(Parker { semaphore, state: AtomicI8::new(EMPTY) }) + } + + // Does not need `Pin`, but other implementation do. + pub unsafe fn park(self: Pin<&Self>) { + // The semaphore counter must be zero at this point, because unparking + // threads will not actually increase it until we signalled that we + // are waiting. + + // Change NOTIFIED to EMPTY and EMPTY to PARKED. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + // Another thread may increase the semaphore counter from this point on. + // If it is faster than us, we will decrement it again immediately below. + // If we are faster, we wait. + + // Ensure that the semaphore counter has actually been decremented, even + // if the call timed out for some reason. + while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} + + // At this point, the semaphore counter is zero again. + + // We were definitely woken up, so we don't need to check the state. + // Still, we need to reset the state using a swap to observe the state + // change with acquire ordering. + self.state.swap(EMPTY, Acquire); + } + + // Does not need `Pin`, but other implementation do. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + let nanos = dur.as_nanos().try_into().unwrap_or(i64::MAX); + let timeout = dispatch_time(DISPATCH_TIME_NOW, nanos); + + let timeout = dispatch_semaphore_wait(self.semaphore, timeout) != 0; + + let state = self.state.swap(EMPTY, Acquire); + if state == NOTIFIED && timeout { + // If the state was NOTIFIED but semaphore_wait returned without + // decrementing the count because of a timeout, it means another + // thread is about to call semaphore_signal. We must wait for that + // to happen to ensure the semaphore count is reset. + while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {} + } else { + // Either a timeout occurred and we reset the state before any thread + // tried to wake us up, or we were woken up and reset the state, + // making sure to observe the state change with acquire ordering. + // Either way, the semaphore counter is now zero again. + } + } + + // Does not need `Pin`, but other implementation do. + pub fn unpark(self: Pin<&Self>) { + let state = self.state.swap(NOTIFIED, Release); + if state == PARKED { + unsafe { + dispatch_semaphore_signal(self.semaphore); + } + } + } +} + +impl Drop for Parker { + fn drop(&mut self) { + // SAFETY: + // We always ensure that the semaphore count is reset, so this will + // never cause an exception. + unsafe { + dispatch_release(self.semaphore); + } + } +} diff --git a/library/std/src/sys/unix/thread_parking/mod.rs b/library/std/src/sys/unix/thread_parking/mod.rs new file mode 100644 index 000000000..185333c07 --- /dev/null +++ b/library/std/src/sys/unix/thread_parking/mod.rs @@ -0,0 +1,32 @@ +//! Thread parking on systems without futex support. + +#![cfg(not(any( + target_os = "linux", + target_os = "android", + all(target_os = "emscripten", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", +)))] + +cfg_if::cfg_if! { + if #[cfg(all( + any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "tvos", + ), + not(miri), + ))] { + mod darwin; + pub use darwin::Parker; + } else if #[cfg(target_os = "netbsd")] { + mod netbsd; + pub use netbsd::{current, park, park_timeout, unpark, ThreadId}; + } else { + mod pthread; + pub use pthread::Parker; + } +} diff --git a/library/std/src/sys/unix/thread_parking/netbsd.rs b/library/std/src/sys/unix/thread_parking/netbsd.rs new file mode 100644 index 000000000..3be081221 --- /dev/null +++ b/library/std/src/sys/unix/thread_parking/netbsd.rs @@ -0,0 +1,52 @@ +use crate::ffi::{c_int, c_void}; +use crate::ptr; +use crate::time::Duration; +use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC}; + +extern "C" { + fn ___lwp_park60( + clock_id: clockid_t, + flags: c_int, + ts: *mut timespec, + unpark: lwpid_t, + hint: *const c_void, + unparkhint: *const c_void, + ) -> c_int; + fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int; +} + +pub type ThreadId = lwpid_t; + +#[inline] +pub fn current() -> ThreadId { + unsafe { _lwp_self() } +} + +#[inline] +pub fn park(hint: usize) { + unsafe { + ___lwp_park60(0, 0, ptr::null_mut(), 0, ptr::invalid(hint), ptr::null()); + } +} + +pub fn park_timeout(dur: Duration, hint: usize) { + let mut timeout = timespec { + // Saturate so that the operation will definitely time out + // (even if it is after the heat death of the universe). + tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX), + tv_nsec: dur.subsec_nanos().into(), + }; + + // Timeout needs to be mutable since it is modified on NetBSD 9.0 and + // above. + unsafe { + ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, ptr::invalid(hint), ptr::null()); + } +} + +#[inline] +pub fn unpark(tid: ThreadId, hint: usize) { + unsafe { + _lwp_unpark(tid, ptr::invalid(hint)); + } +} diff --git a/library/std/src/sys/unix/thread_parking/pthread.rs b/library/std/src/sys/unix/thread_parking/pthread.rs new file mode 100644 index 000000000..082d25e68 --- /dev/null +++ b/library/std/src/sys/unix/thread_parking/pthread.rs @@ -0,0 +1,271 @@ +//! Thread parking without `futex` using the `pthread` synchronization primitives. + +use crate::cell::UnsafeCell; +use crate::marker::PhantomPinned; +use crate::pin::Pin; +use crate::ptr::addr_of_mut; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use crate::sys::time::TIMESPEC_MAX; +use crate::time::Duration; + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +unsafe fn lock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_lock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn unlock(lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_mutex_unlock(lock); + debug_assert_eq!(r, 0); +} + +unsafe fn notify_one(cond: *mut libc::pthread_cond_t) { + let r = libc::pthread_cond_signal(cond); + debug_assert_eq!(r, 0); +} + +unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) { + let r = libc::pthread_cond_wait(cond, lock); + debug_assert_eq!(r, 0); +} + +unsafe fn wait_timeout( + cond: *mut libc::pthread_cond_t, + lock: *mut libc::pthread_mutex_t, + dur: Duration, +) { + // Use the system clock on systems that do not support pthread_condattr_setclock. + // This unfortunately results in problems when the system time changes. + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "espidf", + target_os = "horizon", + ))] + let (now, dur) = { + use crate::cmp::min; + use crate::sys::time::SystemTime; + + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `park_timeout` + // because of spurious wakeups. + let dur = min(dur, Duration::from_secs(1000 * 365 * 86400)); + let now = SystemTime::now().t; + (now, dur) + }; + // Use the monotonic clock on other systems. + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "espidf", + target_os = "horizon", + )))] + let (now, dur) = { + use crate::sys::time::Timespec; + + (Timespec::now(libc::CLOCK_MONOTONIC), dur) + }; + + let timeout = + now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX); + let r = libc::pthread_cond_timedwait(cond, lock, &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); +} + +pub struct Parker { + state: AtomicUsize, + lock: UnsafeCell, + cvar: UnsafeCell, + // The `pthread` primitives require a stable address, so make this struct `!Unpin`. + _pinned: PhantomPinned, +} + +impl Parker { + /// Construct the UNIX parker in-place. + /// + /// # Safety + /// The constructed parker must never be moved. + pub unsafe fn new_in_place(parker: *mut Parker) { + // Use the default mutex implementation to allow for simpler initialization. + // This could lead to undefined behaviour when deadlocking. This is avoided + // by not deadlocking. Note in particular the unlocking operation before any + // panic, as code after the panic could try to park again. + addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY)); + addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)); + + cfg_if::cfg_if! { + if #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] { + addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)); + } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] { + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null()); + assert_eq!(r, 0); + } else { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + } + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. + pub unsafe fn park(self: Pin<&Self>) { + // If we were previously notified then we consume this notification and + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + // Otherwise we need to coordinate going to sleep + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + + panic!("inconsistent park state") + } + } + + loop { + wait(self.cvar.get(), self.lock.get()); + + match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + Ok(_) => break, // got a notification + Err(_) => {} // spurious wakeup, go back to sleep + } + } + + unlock(self.lock.get()); + } + + // This implementation doesn't require `unsafe`, but other implementations + // may assume this is only called by the thread that owns the Parker. Use + // `Pin` to guarantee a stable address for the mutex and condition variable. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + // Like `park` above we have a fast path for an already-notified thread, and + // afterwards we start coordinating for a sleep. + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + lock(self.lock.get()); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read again here, see `park`. + let old = self.state.swap(EMPTY, SeqCst); + unlock(self.lock.get()); + + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state") + } + } + + // Wait with a timeout, and if we spuriously wake up or otherwise wake up + // from a notification we just want to unconditionally set the state back to + // empty, either consuming a notification or un-flagging ourselves as + // parked. + wait_timeout(self.cvar.get(), self.lock.get(), dur); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! + PARKED => unlock(self.lock.get()), // no notification, alas + n => { + unlock(self.lock.get()); + panic!("inconsistent park_timeout state: {n}") + } + } + } + + pub fn unpark(self: Pin<&Self>) { + // To ensure the unparked thread will observe any writes we made + // before this call, we must perform a release operation that `park` + // can synchronize with. To do that we must write `NOTIFIED` even if + // `state` is already `NOTIFIED`. That is why this must be a swap + // rather than a compare-and-swap that returns if it reads `NOTIFIED` + // on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + unsafe { + lock(self.lock.get()); + unlock(self.lock.get()); + notify_one(self.cvar.get()); + } + } +} + +impl Drop for Parker { + fn drop(&mut self) { + unsafe { + libc::pthread_cond_destroy(self.cvar.get_mut()); + libc::pthread_mutex_destroy(self.lock.get_mut()); + } + } +} + +unsafe impl Sync for Parker {} +unsafe impl Send for Parker {} diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs index d5abd9b58..2daad981b 100644 --- a/library/std/src/sys/unix/time.rs +++ b/library/std/src/sys/unix/time.rs @@ -5,6 +5,9 @@ pub use self::inner::Instant; const NSEC_PER_SEC: u64 = 1_000_000_000; pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; +#[allow(dead_code)] // Used for pthread condvar timeouts +pub const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs index f5a4ce929..62ffee70b 100644 --- a/library/std/src/sys/unix/weak.rs +++ b/library/std/src/sys/unix/weak.rs @@ -1,9 +1,8 @@ //! Support for "weak linkage" to symbols on Unix //! -//! Some I/O operations we do in libstd require newer versions of OSes but we -//! need to maintain binary compatibility with older releases for now. In order -//! to use the new functionality when available we use this module for -//! detection. +//! Some I/O operations we do in std require newer versions of OSes but we need +//! to maintain binary compatibility with older releases for now. In order to +//! use the new functionality when available we use this module for detection. //! //! One option to use here is weak linkage, but that is unfortunately only //! really workable with ELF. Otherwise, use dlsym to get the symbol value at @@ -29,7 +28,7 @@ use crate::ptr; use crate::sync::atomic::{self, AtomicPtr, Ordering}; // We can use true weak linkage on ELF targets. -#[cfg(all(not(any(target_os = "macos", target_os = "ios")), not(bootstrap)))] +#[cfg(not(any(target_os = "macos", target_os = "ios")))] pub(crate) macro weak { (fn $name:ident($($t:ty),*) -> $ret:ty) => ( let ref $name: ExternWeak $ret> = { @@ -43,30 +42,14 @@ pub(crate) macro weak { ) } -#[cfg(all(not(any(target_os = "macos", target_os = "ios")), bootstrap))] -pub(crate) macro weak { - (fn $name:ident($($t:ty),*) -> $ret:ty) => ( - let ref $name: ExternWeak $ret> = { - extern "C" { - #[linkage = "extern_weak"] - static $name: *const libc::c_void; - } - #[allow(unused_unsafe)] - ExternWeak::new(unsafe { $name }) - }; - ) -} - // On non-ELF targets, use the dlsym approximation of weak linkage. #[cfg(any(target_os = "macos", target_os = "ios"))] pub(crate) use self::dlsym as weak; -#[cfg(not(bootstrap))] pub(crate) struct ExternWeak { weak_ptr: Option, } -#[cfg(not(bootstrap))] impl ExternWeak { #[inline] pub(crate) fn new(weak_ptr: Option) -> Self { @@ -79,34 +62,6 @@ impl ExternWeak { } } -#[cfg(bootstrap)] -pub(crate) struct ExternWeak { - weak_ptr: *const libc::c_void, - _marker: PhantomData, -} - -#[cfg(bootstrap)] -impl ExternWeak { - #[inline] - pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self { - ExternWeak { weak_ptr, _marker: PhantomData } - } -} - -#[cfg(bootstrap)] -impl ExternWeak { - #[inline] - pub(crate) fn get(&self) -> Option { - unsafe { - if self.weak_ptr.is_null() { - None - } else { - Some(mem::transmute_copy::<*const libc::c_void, F>(&self.weak_ptr)) - } - } - } -} - pub(crate) macro dlsym { (fn $name:ident($($t:ty),*) -> $ret:ty) => ( dlsym!(fn $name($($t),*) -> $ret, stringify!($name)); diff --git a/library/std/src/sys/unsupported/mod.rs b/library/std/src/sys/unsupported/mod.rs index 7bf6d40b7..15b22c620 100644 --- a/library/std/src/sys/unsupported/mod.rs +++ b/library/std/src/sys/unsupported/mod.rs @@ -9,6 +9,7 @@ pub mod fs; pub mod io; pub mod locks; pub mod net; +pub mod once; pub mod os; #[path = "../unix/os_str.rs"] pub mod os_str; diff --git a/library/std/src/sys/unsupported/once.rs b/library/std/src/sys/unsupported/once.rs new file mode 100644 index 000000000..b4bb4975f --- /dev/null +++ b/library/std/src/sys/unsupported/once.rs @@ -0,0 +1,89 @@ +use crate::cell::Cell; +use crate::sync as public; + +pub struct Once { + state: Cell, +} + +pub struct OnceState { + poisoned: bool, + set_state_to: Cell, +} + +#[derive(Clone, Copy, PartialEq, Eq)] +enum State { + Incomplete, + Poisoned, + Running, + Complete, +} + +struct CompletionGuard<'a> { + state: &'a Cell, + set_state_on_drop_to: State, +} + +impl<'a> Drop for CompletionGuard<'a> { + fn drop(&mut self) { + self.state.set(self.set_state_on_drop_to); + } +} + +// Safety: threads are not supported on this platform. +unsafe impl Sync for Once {} + +impl Once { + #[inline] + #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] + pub const fn new() -> Once { + Once { state: Cell::new(State::Incomplete) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + self.state.get() == State::Complete + } + + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { + let state = self.state.get(); + match state { + State::Poisoned if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + State::Incomplete | State::Poisoned => { + self.state.set(State::Running); + // `guard` will set the new state on drop. + let mut guard = + CompletionGuard { state: &self.state, set_state_on_drop_to: State::Poisoned }; + // Run the function, letting it know if we're poisoned or not. + let f_state = public::OnceState { + inner: OnceState { + poisoned: state == State::Poisoned, + set_state_to: Cell::new(State::Complete), + }, + }; + f(&f_state); + guard.set_state_on_drop_to = f_state.inner.set_state_to.get(); + } + State::Running => { + panic!("one-time initialization may not be performed recursively"); + } + State::Complete => {} + } + } +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_to.set(State::Poisoned) + } +} diff --git a/library/std/src/sys/unsupported/pipe.rs b/library/std/src/sys/unsupported/pipe.rs index 25514c232..0bba673b4 100644 --- a/library/std/src/sys/unsupported/pipe.rs +++ b/library/std/src/sys/unsupported/pipe.rs @@ -15,6 +15,10 @@ impl AnonPipe { self.0 } + pub fn read_to_end(&self, _buf: &mut Vec) -> io::Result { + self.0 + } + pub fn write(&self, _buf: &[u8]) -> io::Result { self.0 } diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs index 633f17c05..a494f2d6b 100644 --- a/library/std/src/sys/unsupported/process.rs +++ b/library/std/src/sys/unsupported/process.rs @@ -75,6 +75,10 @@ impl Command { ) -> io::Result<(Process, StdioPipes)> { unsupported() } + + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + unsupported() + } } impl From for Stdio { diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs index c8c47763a..1dc3f2b20 100644 --- a/library/std/src/sys/wasi/mod.rs +++ b/library/std/src/sys/wasi/mod.rs @@ -32,6 +32,8 @@ pub mod io; #[path = "../unsupported/locks/mod.rs"] pub mod locks; pub mod net; +#[path = "../unsupported/once.rs"] +pub mod once; pub mod os; #[path = "../unix/os_str.rs"] pub mod os_str; diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs index d68c3e5f1..77ebe3c4a 100644 --- a/library/std/src/sys/wasm/mod.rs +++ b/library/std/src/sys/wasm/mod.rs @@ -66,6 +66,8 @@ cfg_if::cfg_if! { } else { #[path = "../unsupported/locks/mod.rs"] pub mod locks; + #[path = "../unsupported/once.rs"] + pub mod once; #[path = "../unsupported/thread.rs"] pub mod thread; } diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index 81461de4f..f58dcf128 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -295,8 +295,6 @@ pub fn nt_success(status: NTSTATUS) -> bool { status >= 0 } -// "RNG\0" -pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0]; pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002; #[repr(C)] @@ -834,6 +832,10 @@ if #[cfg(not(target_vendor = "uwp"))] { #[link(name = "advapi32")] extern "system" { + // Forbidden when targeting UWP + #[link_name = "SystemFunction036"] + pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN; + // Allowed but unused by UWP pub fn OpenProcessToken( ProcessHandle: HANDLE, @@ -1258,13 +1260,6 @@ extern "system" { cbBuffer: ULONG, dwFlags: ULONG, ) -> NTSTATUS; - pub fn BCryptOpenAlgorithmProvider( - phalgorithm: *mut BCRYPT_ALG_HANDLE, - pszAlgId: LPCWSTR, - pszimplementation: LPCWSTR, - dwflags: ULONG, - ) -> NTSTATUS; - pub fn BCryptCloseAlgorithmProvider(hAlgorithm: BCRYPT_ALG_HANDLE, dwFlags: ULONG) -> NTSTATUS; } // Functions that aren't available on every version of Windows that we support, diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs index e67411e16..77359abe4 100644 --- a/library/std/src/sys/windows/mod.rs +++ b/library/std/src/sys/windows/mod.rs @@ -33,7 +33,7 @@ pub mod stdio; pub mod thread; pub mod thread_local_dtor; pub mod thread_local_key; -pub mod thread_parker; +pub mod thread_parking; pub mod time; cfg_if::cfg_if! { if #[cfg(not(target_vendor = "uwp"))] { diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs index 352337ba3..d7adeb266 100644 --- a/library/std/src/sys/windows/os.rs +++ b/library/std/src/sys/windows/os.rs @@ -157,7 +157,7 @@ impl<'a> Iterator for SplitPaths<'a> { // Double quotes are used as a way of introducing literal semicolons // (since c:\some;dir is a valid Windows path). Double quotes are not // themselves permitted in path names, so there is no way to escape a - // double quote. Quoted regions can appear in arbitrary locations, so + // double quote. Quoted regions can appear in arbitrary locations, so // // c:\foo;c:\som"e;di"r;c:\bar // diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs index 9f26acc45..7b25edaa5 100644 --- a/library/std/src/sys/windows/pipe.rs +++ b/library/std/src/sys/windows/pipe.rs @@ -1,7 +1,7 @@ use crate::os::windows::prelude::*; use crate::ffi::OsStr; -use crate::io::{self, IoSlice, IoSliceMut}; +use crate::io::{self, IoSlice, IoSliceMut, Read}; use crate::mem; use crate::path::Path; use crate::ptr; @@ -261,6 +261,10 @@ impl AnonPipe { self.inner.is_read_vectored() } + pub fn read_to_end(&self, buf: &mut Vec) -> io::Result { + self.handle().read_to_end(buf) + } + pub fn write(&self, buf: &[u8]) -> io::Result { unsafe { let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD; diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs index 31e9b34fb..10bc949e1 100644 --- a/library/std/src/sys/windows/process.rs +++ b/library/std/src/sys/windows/process.rs @@ -351,6 +351,11 @@ impl Command { )) } } + + pub fn output(&mut self) -> io::Result<(ExitStatus, Vec, Vec)> { + let (proc, pipes) = self.spawn(Stdio::MakePipe, false)?; + crate::sys_common::process::wait_with_output(proc, pipes) + } } impl fmt::Debug for Command { diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs index b5a49489d..cdf37cfe9 100644 --- a/library/std/src/sys/windows/rand.rs +++ b/library/std/src/sys/windows/rand.rs @@ -1,106 +1,39 @@ -//! # Random key generation -//! -//! This module wraps the RNG provided by the OS. There are a few different -//! ways to interface with the OS RNG so it's worth exploring each of the options. -//! Note that at the time of writing these all go through the (undocumented) -//! `bcryptPrimitives.dll` but they use different route to get there. -//! -//! Originally we were using [`RtlGenRandom`], however that function is -//! deprecated and warns it "may be altered or unavailable in subsequent versions". -//! -//! So we switched to [`BCryptGenRandom`] with the `BCRYPT_USE_SYSTEM_PREFERRED_RNG` -//! flag to query and find the system configured RNG. However, this change caused a small -//! but significant number of users to experience panics caused by a failure of -//! this function. See [#94098]. -//! -//! The current version falls back to using `BCryptOpenAlgorithmProvider` if -//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason. -//! -//! [#94098]: https://github.com/rust-lang/rust/issues/94098 -//! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom -//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom +use crate::io; use crate::mem; use crate::ptr; use crate::sys::c; -/// Generates high quality secure random keys for use by [`HashMap`]. -/// -/// This is used to seed the default [`RandomState`]. -/// -/// [`HashMap`]: crate::collections::HashMap -/// [`RandomState`]: crate::collections::hash_map::RandomState pub fn hashmap_random_keys() -> (u64, u64) { - Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng) + let mut v = (0, 0); + let ret = unsafe { + c::BCryptGenRandom( + ptr::null_mut(), + &mut v as *mut _ as *mut u8, + mem::size_of_val(&v) as c::ULONG, + c::BCRYPT_USE_SYSTEM_PREFERRED_RNG, + ) + }; + if c::nt_success(ret) { v } else { fallback_rng() } } -struct Rng { - algorithm: c::BCRYPT_ALG_HANDLE, - flags: u32, -} -impl Rng { - const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) }; - - /// Create the RNG from an existing algorithm handle. - /// - /// # Safety - /// - /// The handle must either be null or a valid algorithm handle. - const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self { - Self { algorithm, flags } - } - - /// Open a handle to the RNG algorithm. - fn open() -> Result { - use crate::sync::atomic::AtomicPtr; - use crate::sync::atomic::Ordering::{Acquire, Release}; - - // An atomic is used so we don't need to reopen the handle every time. - static HANDLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - - let mut handle = HANDLE.load(Acquire); - if handle.is_null() { - let status = unsafe { - c::BCryptOpenAlgorithmProvider( - &mut handle, - c::BCRYPT_RNG_ALGORITHM.as_ptr(), - ptr::null(), - 0, - ) - }; - if c::nt_success(status) { - // If another thread opens a handle first then use that handle instead. - let result = HANDLE.compare_exchange(ptr::null_mut(), handle, Release, Acquire); - if let Err(previous_handle) = result { - // Close our handle and return the previous one. - unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) }; - handle = previous_handle; - } - Ok(unsafe { Self::new(handle, 0) }) - } else { - Err(status) - } - } else { - Ok(unsafe { Self::new(handle, 0) }) - } - } +/// Generate random numbers using the fallback RNG function (RtlGenRandom) +/// +/// This is necessary because of a failure to load the SysWOW64 variant of the +/// bcryptprimitives.dll library from code that lives in bcrypt.dll +/// See +#[cfg(not(target_vendor = "uwp"))] +#[inline(never)] +fn fallback_rng() -> (u64, u64) { + let mut v = (0, 0); + let ret = + unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) }; - fn gen_random_keys(self) -> Result<(u64, u64), c::NTSTATUS> { - let mut v = (0, 0); - let status = unsafe { - let size = mem::size_of_val(&v).try_into().unwrap(); - c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags) - }; - if c::nt_success(status) { Ok(v) } else { Err(status) } - } + if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) } } -/// Generate random numbers using the fallback RNG function +/// We can't use RtlGenRandom with UWP, so there is no fallback +#[cfg(target_vendor = "uwp")] #[inline(never)] -fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) { - match Rng::open().and_then(|rng| rng.gen_random_keys()) { - Ok(keys) => keys, - Err(status) => { - panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}") - } - } +fn fallback_rng() -> (u64, u64) { + panic!("fallback RNG broken: RtlGenRandom() not supported on UWP"); } diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs index c5c9e97e6..1cb576c95 100644 --- a/library/std/src/sys/windows/thread.rs +++ b/library/std/src/sys/windows/thread.rs @@ -26,7 +26,7 @@ impl Thread { // FIXME On UNIX, we guard against stack sizes that are too small but // that's because pthreads enforces that stacks are at least - // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's + // PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's // just that below a certain threshold you can't do anything useful. // That threshold is application and architecture-specific, however. let ret = c::CreateThread( diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs deleted file mode 100644 index 2f7ae863b..000000000 --- a/library/std/src/sys/windows/thread_parker.rs +++ /dev/null @@ -1,253 +0,0 @@ -// Thread parker implementation for Windows. -// -// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+). -// This modern API is exactly the same as the futex syscalls the Linux thread -// parker uses. When These APIs are available, the implementation of this -// thread parker matches the Linux thread parker exactly. -// -// However, when the modern API is not available, this implementation falls -// back to NT Keyed Events, which are similar, but have some important -// differences. These are available since Windows XP. -// -// WaitOnAddress first checks the state of the thread parker to make sure it no -// WakeByAddressSingle calls can be missed between updating the parker state -// and calling the function. -// -// NtWaitForKeyedEvent does not have this option, and unconditionally blocks -// without checking the parker state first. Instead, NtReleaseKeyedEvent -// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for -// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed, -// but we need to be careful not to block unpark() if park_timeout() was woken -// up by a timeout instead of unpark(). -// -// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a -// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure -// a successfully awoken park() was awoken by unpark() and not a -// NtReleaseKeyedEvent call from some other code, as these events are not only -// matched by the key (address of the parker (state)), but also by this HANDLE. -// We lazily allocate this handle the first time it is needed. -// -// The fast path (calling park() after unpark() was already called) and the -// possible states are the same for both implementations. This is used here to -// make sure the fast path does not even check which API to use, but can return -// right away, independent of the used API. Only the slow paths (which will -// actually block/wake a thread) check which API is available and have -// different implementations. -// -// Unfortunately, NT Keyed Events are an undocumented Windows API. However: -// - This API is relatively simple with obvious behaviour, and there are -// several (unofficial) articles documenting the details. [1] -// - `parking_lot` has been using this API for years (on Windows versions -// before Windows 8). [2] Many big projects extensively use parking_lot, -// such as servo and the Rust compiler itself. -// - It is the underlying API used by Windows SRW locks and Windows critical -// sections. [3] [4] -// - The source code of the implementations of Wine, ReactOs, and Windows XP -// are available and match the expected behaviour. -// - The main risk with an undocumented API is that it might change in the -// future. But since we only use it for older versions of Windows, that's not -// a problem. -// - Even if these functions do not block or wake as we expect (which is -// unlikely, see all previous points), this implementation would still be -// memory safe. The NT Keyed Events API is only used to sleep/block in the -// right place. -// -// [1]: http://www.locklessinc.com/articles/keyed_events/ -// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e -// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c -// [4]: Windows Internals, Part 1, ISBN 9780735671300 - -use crate::pin::Pin; -use crate::ptr; -use crate::sync::atomic::{ - AtomicI8, AtomicPtr, - Ordering::{Acquire, Relaxed, Release}, -}; -use crate::sys::{c, dur2timeout}; -use crate::time::Duration; - -pub struct Parker { - state: AtomicI8, -} - -const PARKED: i8 = -1; -const EMPTY: i8 = 0; -const NOTIFIED: i8 = 1; - -// Notes about memory ordering: -// -// Memory ordering is only relevant for the relative ordering of operations -// between different variables. Even Ordering::Relaxed guarantees a -// monotonic/consistent order when looking at just a single atomic variable. -// -// So, since this parker is just a single atomic variable, we only need to look -// at the ordering guarantees we need to provide to the 'outside world'. -// -// The only memory ordering guarantee that parking and unparking provide, is -// that things which happened before unpark() are visible on the thread -// returning from park() afterwards. Otherwise, it was effectively unparked -// before unpark() was called while still consuming the 'token'. -// -// In other words, unpark() needs to synchronize with the part of park() that -// consumes the token and returns. -// -// This is done with a release-acquire synchronization, by using -// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using -// Ordering::Acquire when reading this state in park() after waking up. -impl Parker { - /// Construct the Windows parker. The UNIX parker implementation - /// requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { - parker.write(Self { state: AtomicI8::new(EMPTY) }); - } - - // Assumes this is only called by the thread that owns the Parker, - // which means that `self.state != PARKED`. This implementation doesn't require `Pin`, - // but other implementations do. - pub unsafe fn park(self: Pin<&Self>) { - // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the - // first case. - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - - if let Some(wait_on_address) = c::WaitOnAddress::option() { - loop { - // Wait for something to happen, assuming it's still set to PARKED. - wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE); - // Change NOTIFIED=>EMPTY but leave PARKED alone. - if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() { - // Actually woken up by unpark(). - return; - } else { - // Spurious wake up. We loop to try again. - } - } - } else { - // Wait for unpark() to produce this event. - c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut()); - // Set the state back to EMPTY (from either PARKED or NOTIFIED). - // Note that we don't just write EMPTY, but use swap() to also - // include an acquire-ordered read to synchronize with unpark()'s - // release-ordered write. - self.state.swap(EMPTY, Acquire); - } - } - - // Assumes this is only called by the thread that owns the Parker, - // which means that `self.state != PARKED`. This implementation doesn't require `Pin`, - // but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { - // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the - // first case. - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - - if let Some(wait_on_address) = c::WaitOnAddress::option() { - // Wait for something to happen, assuming it's still set to PARKED. - wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout)); - // Set the state back to EMPTY (from either PARKED or NOTIFIED). - // Note that we don't just write EMPTY, but use swap() to also - // include an acquire-ordered read to synchronize with unpark()'s - // release-ordered write. - if self.state.swap(EMPTY, Acquire) == NOTIFIED { - // Actually woken up by unpark(). - } else { - // Timeout or spurious wake up. - // We return either way, because we can't easily tell if it was the - // timeout or not. - } - } else { - // Need to wait for unpark() using NtWaitForKeyedEvent. - let handle = keyed_event_handle(); - - // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative - // values to indicate a relative time on the monotonic clock. - // This is documented here for the underlying KeWaitForSingleObject function: - // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject - let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) { - Ok(t) => -t, - Err(_) => i64::MIN, - }; - - // Wait for unpark() to produce this event. - let unparked = - c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS; - - // Set the state back to EMPTY (from either PARKED or NOTIFIED). - let prev_state = self.state.swap(EMPTY, Acquire); - - if !unparked && prev_state == NOTIFIED { - // We were awoken by a timeout, not by unpark(), but the state - // was set to NOTIFIED, which means we *just* missed an - // unpark(), which is now blocked on us to wait for it. - // Wait for it to consume the event and unblock that thread. - c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut()); - } - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - pub fn unpark(self: Pin<&Self>) { - // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and - // wake the thread in the first case. - // - // Note that even NOTIFIED=>NOTIFIED results in a write. This is on - // purpose, to make sure every unpark() has a release-acquire ordering - // with park(). - if self.state.swap(NOTIFIED, Release) == PARKED { - unsafe { - if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() { - wake_by_address_single(self.ptr()); - } else { - // If we run NtReleaseKeyedEvent before the waiting thread runs - // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up. - // If the waiting thread wakes up before we run NtReleaseKeyedEvent - // (e.g. due to a timeout), this blocks until we do wake up a thread. - // To prevent this thread from blocking indefinitely in that case, - // park_impl() will, after seeing the state set to NOTIFIED after - // waking up, call NtWaitForKeyedEvent again to unblock us. - c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut()); - } - } - } - } - - fn ptr(&self) -> c::LPVOID { - &self.state as *const _ as c::LPVOID - } -} - -fn keyed_event_handle() -> c::HANDLE { - const INVALID: c::HANDLE = ptr::invalid_mut(!0); - static HANDLE: AtomicPtr = AtomicPtr::new(INVALID); - match HANDLE.load(Relaxed) { - INVALID => { - let mut handle = c::INVALID_HANDLE_VALUE; - unsafe { - match c::NtCreateKeyedEvent( - &mut handle, - c::GENERIC_READ | c::GENERIC_WRITE, - ptr::null_mut(), - 0, - ) { - c::STATUS_SUCCESS => {} - r => panic!("Unable to create keyed event handle: error {r}"), - } - } - match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) { - Ok(_) => handle, - Err(h) => { - // Lost the race to another thread initializing HANDLE before we did. - // Closing our handle and using theirs instead. - unsafe { - c::CloseHandle(handle); - } - h - } - } - } - handle => handle, - } -} diff --git a/library/std/src/sys/windows/thread_parking.rs b/library/std/src/sys/windows/thread_parking.rs new file mode 100644 index 000000000..5d43676ad --- /dev/null +++ b/library/std/src/sys/windows/thread_parking.rs @@ -0,0 +1,253 @@ +// Thread parker implementation for Windows. +// +// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+). +// This modern API is exactly the same as the futex syscalls the Linux thread +// parker uses. When These APIs are available, the implementation of this +// thread parker matches the Linux thread parker exactly. +// +// However, when the modern API is not available, this implementation falls +// back to NT Keyed Events, which are similar, but have some important +// differences. These are available since Windows XP. +// +// WaitOnAddress first checks the state of the thread parker to make sure it no +// WakeByAddressSingle calls can be missed between updating the parker state +// and calling the function. +// +// NtWaitForKeyedEvent does not have this option, and unconditionally blocks +// without checking the parker state first. Instead, NtReleaseKeyedEvent +// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for +// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed, +// but we need to be careful not to block unpark() if park_timeout() was woken +// up by a timeout instead of unpark(). +// +// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a +// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure +// a successfully awoken park() was awoken by unpark() and not a +// NtReleaseKeyedEvent call from some other code, as these events are not only +// matched by the key (address of the parker (state)), but also by this HANDLE. +// We lazily allocate this handle the first time it is needed. +// +// The fast path (calling park() after unpark() was already called) and the +// possible states are the same for both implementations. This is used here to +// make sure the fast path does not even check which API to use, but can return +// right away, independent of the used API. Only the slow paths (which will +// actually block/wake a thread) check which API is available and have +// different implementations. +// +// Unfortunately, NT Keyed Events are an undocumented Windows API. However: +// - This API is relatively simple with obvious behaviour, and there are +// several (unofficial) articles documenting the details. [1] +// - `parking_lot` has been using this API for years (on Windows versions +// before Windows 8). [2] Many big projects extensively use parking_lot, +// such as servo and the Rust compiler itself. +// - It is the underlying API used by Windows SRW locks and Windows critical +// sections. [3] [4] +// - The source code of the implementations of Wine, ReactOs, and Windows XP +// are available and match the expected behaviour. +// - The main risk with an undocumented API is that it might change in the +// future. But since we only use it for older versions of Windows, that's not +// a problem. +// - Even if these functions do not block or wake as we expect (which is +// unlikely, see all previous points), this implementation would still be +// memory safe. The NT Keyed Events API is only used to sleep/block in the +// right place. +// +// [1]: http://www.locklessinc.com/articles/keyed_events/ +// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e +// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c +// [4]: Windows Internals, Part 1, ISBN 9780735671300 + +use crate::pin::Pin; +use crate::ptr; +use crate::sync::atomic::{ + AtomicI8, AtomicPtr, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::{c, dur2timeout}; +use crate::time::Duration; + +pub struct Parker { + state: AtomicI8, +} + +const PARKED: i8 = -1; +const EMPTY: i8 = 0; +const NOTIFIED: i8 = 1; + +// Notes about memory ordering: +// +// Memory ordering is only relevant for the relative ordering of operations +// between different variables. Even Ordering::Relaxed guarantees a +// monotonic/consistent order when looking at just a single atomic variable. +// +// So, since this parker is just a single atomic variable, we only need to look +// at the ordering guarantees we need to provide to the 'outside world'. +// +// The only memory ordering guarantee that parking and unparking provide, is +// that things which happened before unpark() are visible on the thread +// returning from park() afterwards. Otherwise, it was effectively unparked +// before unpark() was called while still consuming the 'token'. +// +// In other words, unpark() needs to synchronize with the part of park() that +// consumes the token and returns. +// +// This is done with a release-acquire synchronization, by using +// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using +// Ordering::Acquire when reading this state in park() after waking up. +impl Parker { + /// Construct the Windows parker. The UNIX parker implementation + /// requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Self { state: AtomicI8::new(EMPTY) }); + } + + // Assumes this is only called by the thread that owns the Parker, + // which means that `self.state != PARKED`. This implementation doesn't require `Pin`, + // but other implementations do. + pub unsafe fn park(self: Pin<&Self>) { + // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the + // first case. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + if let Some(wait_on_address) = c::WaitOnAddress::option() { + loop { + // Wait for something to happen, assuming it's still set to PARKED. + wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE); + // Change NOTIFIED=>EMPTY but leave PARKED alone. + if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() { + // Actually woken up by unpark(). + return; + } else { + // Spurious wake up. We loop to try again. + } + } + } else { + // Wait for unpark() to produce this event. + c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut()); + // Set the state back to EMPTY (from either PARKED or NOTIFIED). + // Note that we don't just write EMPTY, but use swap() to also + // include an acquire-ordered read to synchronize with unpark()'s + // release-ordered write. + self.state.swap(EMPTY, Acquire); + } + } + + // Assumes this is only called by the thread that owns the Parker, + // which means that `self.state != PARKED`. This implementation doesn't require `Pin`, + // but other implementations do. + pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { + // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the + // first case. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + + if let Some(wait_on_address) = c::WaitOnAddress::option() { + // Wait for something to happen, assuming it's still set to PARKED. + wait_on_address(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout)); + // Set the state back to EMPTY (from either PARKED or NOTIFIED). + // Note that we don't just write EMPTY, but use swap() to also + // include an acquire-ordered read to synchronize with unpark()'s + // release-ordered write. + if self.state.swap(EMPTY, Acquire) == NOTIFIED { + // Actually woken up by unpark(). + } else { + // Timeout or spurious wake up. + // We return either way, because we can't easily tell if it was the + // timeout or not. + } + } else { + // Need to wait for unpark() using NtWaitForKeyedEvent. + let handle = keyed_event_handle(); + + // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative + // values to indicate a relative time on the monotonic clock. + // This is documented here for the underlying KeWaitForSingleObject function: + // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject + let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) { + Ok(t) => -t, + Err(_) => i64::MIN, + }; + + // Wait for unpark() to produce this event. + let unparked = + c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS; + + // Set the state back to EMPTY (from either PARKED or NOTIFIED). + let prev_state = self.state.swap(EMPTY, Acquire); + + if !unparked && prev_state == NOTIFIED { + // We were awoken by a timeout, not by unpark(), but the state + // was set to NOTIFIED, which means we *just* missed an + // unpark(), which is now blocked on us to wait for it. + // Wait for it to consume the event and unblock that thread. + c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut()); + } + } + } + + // This implementation doesn't require `Pin`, but other implementations do. + pub fn unpark(self: Pin<&Self>) { + // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and + // wake the thread in the first case. + // + // Note that even NOTIFIED=>NOTIFIED results in a write. This is on + // purpose, to make sure every unpark() has a release-acquire ordering + // with park(). + if self.state.swap(NOTIFIED, Release) == PARKED { + unsafe { + if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() { + wake_by_address_single(self.ptr()); + } else { + // If we run NtReleaseKeyedEvent before the waiting thread runs + // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up. + // If the waiting thread wakes up before we run NtReleaseKeyedEvent + // (e.g. due to a timeout), this blocks until we do wake up a thread. + // To prevent this thread from blocking indefinitely in that case, + // park_impl() will, after seeing the state set to NOTIFIED after + // waking up, call NtWaitForKeyedEvent again to unblock us. + c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut()); + } + } + } + } + + fn ptr(&self) -> c::LPVOID { + &self.state as *const _ as c::LPVOID + } +} + +fn keyed_event_handle() -> c::HANDLE { + const INVALID: c::HANDLE = ptr::invalid_mut(!0); + static HANDLE: AtomicPtr = AtomicPtr::new(INVALID); + match HANDLE.load(Relaxed) { + INVALID => { + let mut handle = c::INVALID_HANDLE_VALUE; + unsafe { + match c::NtCreateKeyedEvent( + &mut handle, + c::GENERIC_READ | c::GENERIC_WRITE, + ptr::null_mut(), + 0, + ) { + c::STATUS_SUCCESS => {} + r => panic!("Unable to create keyed event handle: error {r}"), + } + } + match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) { + Ok(_) => handle, + Err(h) => { + // Lost the race to another thread initializing HANDLE before we did. + // Closing our handle and using theirs instead. + unsafe { + c::CloseHandle(handle); + } + h + } + } + } + handle => handle, + } +} diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index 8807077cb..f1d804ef4 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -20,7 +20,7 @@ pub fn lock() -> impl Drop { /// Prints the current backtrace. pub fn print(w: &mut dyn Write, format: PrintFmt) -> io::Result<()> { // There are issues currently linking libbacktrace into tests, and in - // general during libstd's own unit tests we're not testing this path. In + // general during std's own unit tests we're not testing this path. In // test mode immediately return here to optimize away any references to the // libbacktrace symbols if cfg!(test) { @@ -111,7 +111,7 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt:: } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that -/// this is only inline(never) when backtraces in libstd are enabled, otherwise +/// this is only inline(never) when backtraces in std are enabled, otherwise /// it's fine to optimize away. #[cfg_attr(feature = "backtrace", inline(never))] pub fn __rust_begin_short_backtrace(f: F) -> T @@ -127,7 +127,7 @@ where } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that -/// this is only inline(never) when backtraces in libstd are enabled, otherwise +/// this is only inline(never) when backtraces in std are enabled, otherwise /// it's fine to optimize away. #[cfg_attr(feature = "backtrace", inline(never))] pub fn __rust_end_short_backtrace(f: F) -> T diff --git a/library/std/src/sys_common/io.rs b/library/std/src/sys_common/io.rs index d1e9fed41..4a42ff3c6 100644 --- a/library/std/src/sys_common/io.rs +++ b/library/std/src/sys_common/io.rs @@ -39,9 +39,10 @@ pub mod test { } } + #[track_caller] // for `test_rng` pub fn tmpdir() -> TempDir { let p = env::temp_dir(); - let mut r = rand::thread_rng(); + let mut r = crate::test_helpers::test_rng(); let ret = p.join(&format!("rust-{}", r.next_u32())); fs::create_dir(&ret).unwrap(); TempDir(ret) diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index b1987aa0f..6b24b0e9a 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -27,11 +27,10 @@ pub mod lazy_box; pub mod memchr; pub mod once; pub mod process; -pub mod remutex; pub mod thread; pub mod thread_info; pub mod thread_local_dtor; -pub mod thread_parker; +pub mod thread_parking; pub mod wstr; pub mod wtf8; diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/generic.rs deleted file mode 100644 index d953a6745..000000000 --- a/library/std/src/sys_common/once/generic.rs +++ /dev/null @@ -1,283 +0,0 @@ -// Each `Once` has one word of atomic state, and this state is CAS'd on to -// determine what to do. There are four possible state of a `Once`: -// -// * Incomplete - no initialization has run yet, and no thread is currently -// using the Once. -// * Poisoned - some thread has previously attempted to initialize the Once, but -// it panicked, so the Once is now poisoned. There are no other -// threads currently accessing this Once. -// * Running - some thread is currently attempting to run initialization. It may -// succeed, so all future threads need to wait for it to finish. -// Note that this state is accompanied with a payload, described -// below. -// * Complete - initialization has completed and all future calls should finish -// immediately. -// -// With 4 states we need 2 bits to encode this, and we use the remaining bits -// in the word we have allocated as a queue of threads waiting for the thread -// responsible for entering the RUNNING state. This queue is just a linked list -// of Waiter nodes which is monotonically increasing in size. Each node is -// allocated on the stack, and whenever the running closure finishes it will -// consume the entire queue and notify all waiters they should try again. -// -// You'll find a few more details in the implementation, but that's the gist of -// it! -// -// Atomic orderings: -// When running `Once` we deal with multiple atomics: -// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. -// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the -// result of the `Once`, and (3) for synchronizing `Waiter` nodes. -// - At the end of the `call` function we have to make sure the result -// of the `Once` is acquired. So every load which can be the only one to -// load COMPLETED must have at least acquire ordering, which means all -// three of them. -// - `WaiterQueue::drop` is the only place that may store COMPLETED, and -// must do so with release ordering to make the result available. -// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and -// needs to make the nodes available with release ordering. The load in -// its `compare_exchange` can be relaxed because it only has to compare -// the atomic, not to read other data. -// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load -// `state_and_queue` with acquire ordering. -// - There is just one store where `state_and_queue` is used only as a -// state flag, without having to synchronize data: switching the state -// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed, -// but the read has to be Acquire because of the requirements mentioned -// above. -// * `Waiter.signaled` is both used as a flag, and to protect a field with -// interior mutability in `Waiter`. `Waiter.thread` is changed in -// `WaiterQueue::drop` which then sets `signaled` with release ordering. -// After `wait` loads `signaled` with acquire ordering and sees it is true, -// it needs to see the changes to drop the `Waiter` struct correctly. -// * There is one place where the two atomics `Once.state_and_queue` and -// `Waiter.signaled` come together, and might be reordered by the compiler or -// processor. Because both use acquire ordering such a reordering is not -// allowed, so no need for `SeqCst`. - -use crate::cell::Cell; -use crate::fmt; -use crate::ptr; -use crate::sync as public; -use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; -use crate::thread::{self, Thread}; - -type Masked = (); - -pub struct Once { - state_and_queue: AtomicPtr, -} - -pub struct OnceState { - poisoned: bool, - set_state_on_drop_to: Cell<*mut Masked>, -} - -// Four states that a Once can be in, encoded into the lower bits of -// `state_and_queue` in the Once structure. -const INCOMPLETE: usize = 0x0; -const POISONED: usize = 0x1; -const RUNNING: usize = 0x2; -const COMPLETE: usize = 0x3; - -// Mask to learn about the state. All other bits are the queue of waiters if -// this is in the RUNNING state. -const STATE_MASK: usize = 0x3; - -// Representation of a node in the linked list of waiters, used while in the -// RUNNING state. -// Note: `Waiter` can't hold a mutable pointer to the next thread, because then -// `wait` would both hand out a mutable reference to its `Waiter` node, and keep -// a shared reference to check `signaled`. Instead we hold shared references and -// use interior mutability. -#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. -struct Waiter { - thread: Cell>, - signaled: AtomicBool, - next: *const Waiter, -} - -// Head of a linked list of waiters. -// Every node is a struct on the stack of a waiting thread. -// Will wake up the waiters when it gets dropped, i.e. also on panic. -struct WaiterQueue<'a> { - state_and_queue: &'a AtomicPtr, - set_state_on_drop_to: *mut Masked, -} - -impl Once { - #[inline] - #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] - pub const fn new() -> Once { - Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) } - } - - #[inline] - pub fn is_completed(&self) -> bool { - // An `Acquire` load is enough because that makes all the initialization - // operations visible to us, and, this being a fast path, weaker - // ordering helps with performance. This `Acquire` synchronizes with - // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE - } - - // This is a non-generic function to reduce the monomorphization cost of - // using `call_once` (this isn't exactly a trivial or small implementation). - // - // Additionally, this is tagged with `#[cold]` as it should indeed be cold - // and it helps let LLVM know that calls to this function should be off the - // fast path. Essentially, this should help generate more straight line code - // in LLVM. - // - // Finally, this takes an `FnMut` instead of a `FnOnce` because there's - // currently no way to take an `FnOnce` and call it via virtual dispatch - // without some allocation overhead. - #[cold] - #[track_caller] - pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { - let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); - loop { - match state_and_queue.addr() { - COMPLETE => break, - POISONED if !ignore_poisoning => { - // Panic to propagate the poison. - panic!("Once instance has previously been poisoned"); - } - POISONED | INCOMPLETE => { - // Try to register this thread as the one RUNNING. - let exchange_result = self.state_and_queue.compare_exchange( - state_and_queue, - ptr::invalid_mut(RUNNING), - Ordering::Acquire, - Ordering::Acquire, - ); - if let Err(old) = exchange_result { - state_and_queue = old; - continue; - } - // `waiter_queue` will manage other waiting threads, and - // wake them up on drop. - let mut waiter_queue = WaiterQueue { - state_and_queue: &self.state_and_queue, - set_state_on_drop_to: ptr::invalid_mut(POISONED), - }; - // Run the initialization function, letting it know if we're - // poisoned or not. - let init_state = public::OnceState { - inner: OnceState { - poisoned: state_and_queue.addr() == POISONED, - set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), - }, - }; - init(&init_state); - waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get(); - break; - } - _ => { - // All other values must be RUNNING with possibly a - // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue.addr() & STATE_MASK == RUNNING); - wait(&self.state_and_queue, state_and_queue); - state_and_queue = self.state_and_queue.load(Ordering::Acquire); - } - } - } - } -} - -fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { - // Note: the following code was carefully written to avoid creating a - // mutable reference to `node` that gets aliased. - loop { - // Don't queue this thread if the status is no longer running, - // otherwise we will not be woken up. - if current_state.addr() & STATE_MASK != RUNNING { - return; - } - - // Create the node for our current thread. - let node = Waiter { - thread: Cell::new(Some(thread::current())), - signaled: AtomicBool::new(false), - next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, - }; - let me = &node as *const Waiter as *const Masked as *mut Masked; - - // Try to slide in the node at the head of the linked list, making sure - // that another thread didn't just replace the head of the linked list. - let exchange_result = state_and_queue.compare_exchange( - current_state, - me.with_addr(me.addr() | RUNNING), - Ordering::Release, - Ordering::Relaxed, - ); - if let Err(old) = exchange_result { - current_state = old; - continue; - } - - // We have enqueued ourselves, now lets wait. - // It is important not to return before being signaled, otherwise we - // would drop our `Waiter` node and leave a hole in the linked list - // (and a dangling reference). Guard against spurious wakeups by - // reparking ourselves until we are signaled. - while !node.signaled.load(Ordering::Acquire) { - // If the managing thread happens to signal and unpark us before we - // can park ourselves, the result could be this thread never gets - // unparked. Luckily `park` comes with the guarantee that if it got - // an `unpark` just before on an unparked thread it does not park. - thread::park(); - } - break; - } -} - -#[stable(feature = "std_debug", since = "1.16.0")] -impl fmt::Debug for Once { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Once").finish_non_exhaustive() - } -} - -impl Drop for WaiterQueue<'_> { - fn drop(&mut self) { - // Swap out our state with however we finished. - let state_and_queue = - self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); - - // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); - - // Walk the entire linked list of waiters and wake them up (in lifo - // order, last to register is first to wake up). - unsafe { - // Right after setting `node.signaled = true` the other thread may - // free `node` if there happens to be has a spurious wakeup. - // So we have to take out the `thread` field and copy the pointer to - // `next` first. - let mut queue = - state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; - while !queue.is_null() { - let next = (*queue).next; - let thread = (*queue).thread.take().unwrap(); - (*queue).signaled.store(true, Ordering::Release); - // ^- FIXME (maybe): This is another case of issue #55005 - // `store()` has a potentially dangling ref to `signaled`. - queue = next; - thread.unpark(); - } - } - } -} - -impl OnceState { - #[inline] - pub fn is_poisoned(&self) -> bool { - self.poisoned - } - - #[inline] - pub fn poison(&self) { - self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); - } -} diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs index 8742e68cc..359697d83 100644 --- a/library/std/src/sys_common/once/mod.rs +++ b/library/std/src/sys_common/once/mod.rs @@ -6,22 +6,6 @@ // As a result, we end up implementing it ourselves in the standard library. // This also gives us the opportunity to optimize the implementation a bit which // should help the fast path on call sites. -// -// So to recap, the guarantees of a Once are that it will call the -// initialization closure at most once, and it will never return until the one -// that's running has finished running. This means that we need some form of -// blocking here while the custom callback is running at the very least. -// Additionally, we add on the restriction of **poisoning**. Whenever an -// initialization closure panics, the Once enters a "poisoned" state which means -// that all future calls will immediately panic as well. -// -// So to implement this, one might first reach for a `Mutex`, but those cannot -// be put into a `static`. It also gets a lot harder with poisoning to figure -// out when the mutex needs to be deallocated because it's not after the closure -// finishes, but after the first successful closure finishes. -// -// All in all, this is instead implemented with atomics and lock-free -// operations! Whee! cfg_if::cfg_if! { if #[cfg(any( @@ -36,8 +20,15 @@ cfg_if::cfg_if! { ))] { mod futex; pub use futex::{Once, OnceState}; + } else if #[cfg(any( + windows, + target_family = "unix", + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", + ))] { + mod queue; + pub use queue::{Once, OnceState}; } else { - mod generic; - pub use generic::{Once, OnceState}; + pub use crate::sys::once::{Once, OnceState}; } } diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs new file mode 100644 index 000000000..d953a6745 --- /dev/null +++ b/library/std/src/sys_common/once/queue.rs @@ -0,0 +1,283 @@ +// Each `Once` has one word of atomic state, and this state is CAS'd on to +// determine what to do. There are four possible state of a `Once`: +// +// * Incomplete - no initialization has run yet, and no thread is currently +// using the Once. +// * Poisoned - some thread has previously attempted to initialize the Once, but +// it panicked, so the Once is now poisoned. There are no other +// threads currently accessing this Once. +// * Running - some thread is currently attempting to run initialization. It may +// succeed, so all future threads need to wait for it to finish. +// Note that this state is accompanied with a payload, described +// below. +// * Complete - initialization has completed and all future calls should finish +// immediately. +// +// With 4 states we need 2 bits to encode this, and we use the remaining bits +// in the word we have allocated as a queue of threads waiting for the thread +// responsible for entering the RUNNING state. This queue is just a linked list +// of Waiter nodes which is monotonically increasing in size. Each node is +// allocated on the stack, and whenever the running closure finishes it will +// consume the entire queue and notify all waiters they should try again. +// +// You'll find a few more details in the implementation, but that's the gist of +// it! +// +// Atomic orderings: +// When running `Once` we deal with multiple atomics: +// `Once.state_and_queue` and an unknown number of `Waiter.signaled`. +// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the +// result of the `Once`, and (3) for synchronizing `Waiter` nodes. +// - At the end of the `call` function we have to make sure the result +// of the `Once` is acquired. So every load which can be the only one to +// load COMPLETED must have at least acquire ordering, which means all +// three of them. +// - `WaiterQueue::drop` is the only place that may store COMPLETED, and +// must do so with release ordering to make the result available. +// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and +// needs to make the nodes available with release ordering. The load in +// its `compare_exchange` can be relaxed because it only has to compare +// the atomic, not to read other data. +// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load +// `state_and_queue` with acquire ordering. +// - There is just one store where `state_and_queue` is used only as a +// state flag, without having to synchronize data: switching the state +// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed, +// but the read has to be Acquire because of the requirements mentioned +// above. +// * `Waiter.signaled` is both used as a flag, and to protect a field with +// interior mutability in `Waiter`. `Waiter.thread` is changed in +// `WaiterQueue::drop` which then sets `signaled` with release ordering. +// After `wait` loads `signaled` with acquire ordering and sees it is true, +// it needs to see the changes to drop the `Waiter` struct correctly. +// * There is one place where the two atomics `Once.state_and_queue` and +// `Waiter.signaled` come together, and might be reordered by the compiler or +// processor. Because both use acquire ordering such a reordering is not +// allowed, so no need for `SeqCst`. + +use crate::cell::Cell; +use crate::fmt; +use crate::ptr; +use crate::sync as public; +use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::thread::{self, Thread}; + +type Masked = (); + +pub struct Once { + state_and_queue: AtomicPtr, +} + +pub struct OnceState { + poisoned: bool, + set_state_on_drop_to: Cell<*mut Masked>, +} + +// Four states that a Once can be in, encoded into the lower bits of +// `state_and_queue` in the Once structure. +const INCOMPLETE: usize = 0x0; +const POISONED: usize = 0x1; +const RUNNING: usize = 0x2; +const COMPLETE: usize = 0x3; + +// Mask to learn about the state. All other bits are the queue of waiters if +// this is in the RUNNING state. +const STATE_MASK: usize = 0x3; + +// Representation of a node in the linked list of waiters, used while in the +// RUNNING state. +// Note: `Waiter` can't hold a mutable pointer to the next thread, because then +// `wait` would both hand out a mutable reference to its `Waiter` node, and keep +// a shared reference to check `signaled`. Instead we hold shared references and +// use interior mutability. +#[repr(align(4))] // Ensure the two lower bits are free to use as state bits. +struct Waiter { + thread: Cell>, + signaled: AtomicBool, + next: *const Waiter, +} + +// Head of a linked list of waiters. +// Every node is a struct on the stack of a waiting thread. +// Will wake up the waiters when it gets dropped, i.e. also on panic. +struct WaiterQueue<'a> { + state_and_queue: &'a AtomicPtr, + set_state_on_drop_to: *mut Masked, +} + +impl Once { + #[inline] + #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] + pub const fn new() -> Once { + Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) } + } + + #[inline] + pub fn is_completed(&self) -> bool { + // An `Acquire` load is enough because that makes all the initialization + // operations visible to us, and, this being a fast path, weaker + // ordering helps with performance. This `Acquire` synchronizes with + // `Release` operations on the slow path. + self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE + } + + // This is a non-generic function to reduce the monomorphization cost of + // using `call_once` (this isn't exactly a trivial or small implementation). + // + // Additionally, this is tagged with `#[cold]` as it should indeed be cold + // and it helps let LLVM know that calls to this function should be off the + // fast path. Essentially, this should help generate more straight line code + // in LLVM. + // + // Finally, this takes an `FnMut` instead of a `FnOnce` because there's + // currently no way to take an `FnOnce` and call it via virtual dispatch + // without some allocation overhead. + #[cold] + #[track_caller] + pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) { + let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); + loop { + match state_and_queue.addr() { + COMPLETE => break, + POISONED if !ignore_poisoning => { + // Panic to propagate the poison. + panic!("Once instance has previously been poisoned"); + } + POISONED | INCOMPLETE => { + // Try to register this thread as the one RUNNING. + let exchange_result = self.state_and_queue.compare_exchange( + state_and_queue, + ptr::invalid_mut(RUNNING), + Ordering::Acquire, + Ordering::Acquire, + ); + if let Err(old) = exchange_result { + state_and_queue = old; + continue; + } + // `waiter_queue` will manage other waiting threads, and + // wake them up on drop. + let mut waiter_queue = WaiterQueue { + state_and_queue: &self.state_and_queue, + set_state_on_drop_to: ptr::invalid_mut(POISONED), + }; + // Run the initialization function, letting it know if we're + // poisoned or not. + let init_state = public::OnceState { + inner: OnceState { + poisoned: state_and_queue.addr() == POISONED, + set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), + }, + }; + init(&init_state); + waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get(); + break; + } + _ => { + // All other values must be RUNNING with possibly a + // pointer to the waiter queue in the more significant bits. + assert!(state_and_queue.addr() & STATE_MASK == RUNNING); + wait(&self.state_and_queue, state_and_queue); + state_and_queue = self.state_and_queue.load(Ordering::Acquire); + } + } + } + } +} + +fn wait(state_and_queue: &AtomicPtr, mut current_state: *mut Masked) { + // Note: the following code was carefully written to avoid creating a + // mutable reference to `node` that gets aliased. + loop { + // Don't queue this thread if the status is no longer running, + // otherwise we will not be woken up. + if current_state.addr() & STATE_MASK != RUNNING { + return; + } + + // Create the node for our current thread. + let node = Waiter { + thread: Cell::new(Some(thread::current())), + signaled: AtomicBool::new(false), + next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, + }; + let me = &node as *const Waiter as *const Masked as *mut Masked; + + // Try to slide in the node at the head of the linked list, making sure + // that another thread didn't just replace the head of the linked list. + let exchange_result = state_and_queue.compare_exchange( + current_state, + me.with_addr(me.addr() | RUNNING), + Ordering::Release, + Ordering::Relaxed, + ); + if let Err(old) = exchange_result { + current_state = old; + continue; + } + + // We have enqueued ourselves, now lets wait. + // It is important not to return before being signaled, otherwise we + // would drop our `Waiter` node and leave a hole in the linked list + // (and a dangling reference). Guard against spurious wakeups by + // reparking ourselves until we are signaled. + while !node.signaled.load(Ordering::Acquire) { + // If the managing thread happens to signal and unpark us before we + // can park ourselves, the result could be this thread never gets + // unparked. Luckily `park` comes with the guarantee that if it got + // an `unpark` just before on an unparked thread it does not park. + thread::park(); + } + break; + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Once").finish_non_exhaustive() + } +} + +impl Drop for WaiterQueue<'_> { + fn drop(&mut self) { + // Swap out our state with however we finished. + let state_and_queue = + self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); + + // We should only ever see an old state which was RUNNING. + assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); + + // Walk the entire linked list of waiters and wake them up (in lifo + // order, last to register is first to wake up). + unsafe { + // Right after setting `node.signaled = true` the other thread may + // free `node` if there happens to be has a spurious wakeup. + // So we have to take out the `thread` field and copy the pointer to + // `next` first. + let mut queue = + state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; + while !queue.is_null() { + let next = (*queue).next; + let thread = (*queue).thread.take().unwrap(); + (*queue).signaled.store(true, Ordering::Release); + // ^- FIXME (maybe): This is another case of issue #55005 + // `store()` has a potentially dangling ref to `signaled`. + queue = next; + thread.unpark(); + } + } + } +} + +impl OnceState { + #[inline] + pub fn is_poisoned(&self) -> bool { + self.poisoned + } + + #[inline] + pub fn poison(&self) { + self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); + } +} diff --git a/library/std/src/sys_common/process.rs b/library/std/src/sys_common/process.rs index 9f978789a..18883048d 100644 --- a/library/std/src/sys_common/process.rs +++ b/library/std/src/sys_common/process.rs @@ -4,10 +4,13 @@ use crate::collections::BTreeMap; use crate::env; use crate::ffi::{OsStr, OsString}; -use crate::sys::process::EnvKey; +use crate::fmt; +use crate::io; +use crate::sys::pipe::read2; +use crate::sys::process::{EnvKey, ExitStatus, Process, StdioPipes}; // Stores a set of changes to an environment -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct CommandEnv { clear: bool, saw_path: bool, @@ -20,6 +23,14 @@ impl Default for CommandEnv { } } +impl fmt::Debug for CommandEnv { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut debug_command_env = f.debug_struct("CommandEnv"); + debug_command_env.field("clear", &self.clear).field("vars", &self.vars); + debug_command_env.finish() + } +} + impl CommandEnv { // Capture the current environment with these changes applied pub fn capture(&self) -> BTreeMap { @@ -117,3 +128,30 @@ impl<'a> ExactSizeIterator for CommandEnvs<'a> { self.iter.is_empty() } } + +pub fn wait_with_output( + mut process: Process, + mut pipes: StdioPipes, +) -> io::Result<(ExitStatus, Vec, Vec)> { + drop(pipes.stdin.take()); + + let (mut stdout, mut stderr) = (Vec::new(), Vec::new()); + match (pipes.stdout.take(), pipes.stderr.take()) { + (None, None) => {} + (Some(out), None) => { + let res = out.read_to_end(&mut stdout); + res.unwrap(); + } + (None, Some(err)) => { + let res = err.read_to_end(&mut stderr); + res.unwrap(); + } + (Some(out), Some(err)) => { + let res = read2(out, &mut stdout, err, &mut stderr); + res.unwrap(); + } + } + + let status = process.wait()?; + Ok((status, stdout, stderr)) +} diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs deleted file mode 100644 index 4c054da64..000000000 --- a/library/std/src/sys_common/remutex.rs +++ /dev/null @@ -1,178 +0,0 @@ -#[cfg(all(test, not(target_os = "emscripten")))] -mod tests; - -use crate::cell::UnsafeCell; -use crate::ops::Deref; -use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use crate::sys::locks as sys; - -/// A re-entrant mutual exclusion -/// -/// This mutex will block *other* threads waiting for the lock to become -/// available. The thread which has already locked the mutex can lock it -/// multiple times without blocking, preventing a common source of deadlocks. -/// -/// This is used by stdout().lock() and friends. -/// -/// ## Implementation details -/// -/// The 'owner' field tracks which thread has locked the mutex. -/// -/// We use current_thread_unique_ptr() as the thread identifier, -/// which is just the address of a thread local variable. -/// -/// If `owner` is set to the identifier of the current thread, -/// we assume the mutex is already locked and instead of locking it again, -/// we increment `lock_count`. -/// -/// When unlocking, we decrement `lock_count`, and only unlock the mutex when -/// it reaches zero. -/// -/// `lock_count` is protected by the mutex and only accessed by the thread that has -/// locked the mutex, so needs no synchronization. -/// -/// `owner` can be checked by other threads that want to see if they already -/// hold the lock, so needs to be atomic. If it compares equal, we're on the -/// same thread that holds the mutex and memory access can use relaxed ordering -/// since we're not dealing with multiple threads. If it compares unequal, -/// synchronization is left to the mutex, making relaxed memory ordering for -/// the `owner` field fine in all cases. -pub struct ReentrantMutex { - mutex: sys::Mutex, - owner: AtomicUsize, - lock_count: UnsafeCell, - data: T, -} - -unsafe impl Send for ReentrantMutex {} -unsafe impl Sync for ReentrantMutex {} - -impl UnwindSafe for ReentrantMutex {} -impl RefUnwindSafe for ReentrantMutex {} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// Deref implementation. -/// -/// # Mutability -/// -/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`, -/// because implementation of the trait would violate Rust’s reference aliasing -/// rules. Use interior mutability (usually `RefCell`) in order to mutate the -/// guarded data. -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ReentrantMutexGuard<'a, T: 'a> { - lock: &'a ReentrantMutex, -} - -impl !Send for ReentrantMutexGuard<'_, T> {} - -impl ReentrantMutex { - /// Creates a new reentrant mutex in an unlocked state. - pub const fn new(t: T) -> ReentrantMutex { - ReentrantMutex { - mutex: sys::Mutex::new(), - owner: AtomicUsize::new(0), - lock_count: UnsafeCell::new(0), - data: t, - } - } - - /// Acquires a mutex, blocking the current thread until it is able to do so. - /// - /// This function will block the caller until it is available to acquire the mutex. - /// Upon returning, the thread is the only thread with the mutex held. When the thread - /// calling this method already holds the lock, the call shall succeed without - /// blocking. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - } else { - self.mutex.lock(); - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - } - } - ReentrantMutexGuard { lock: self } - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `Err` is returned. - /// Otherwise, an RAII guard is returned. - /// - /// This function does not block. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn try_lock(&self) -> Option> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - Some(ReentrantMutexGuard { lock: self }) - } else if self.mutex.try_lock() { - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - Some(ReentrantMutexGuard { lock: self }) - } else { - None - } - } - } - - unsafe fn increment_lock_count(&self) { - *self.lock_count.get() = (*self.lock_count.get()) - .checked_add(1) - .expect("lock count overflow in reentrant mutex"); - } -} - -impl Deref for ReentrantMutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - &self.lock.data - } -} - -impl Drop for ReentrantMutexGuard<'_, T> { - #[inline] - fn drop(&mut self) { - // Safety: We own the lock. - unsafe { - *self.lock.lock_count.get() -= 1; - if *self.lock.lock_count.get() == 0 { - self.lock.owner.store(0, Relaxed); - self.lock.mutex.unlock(); - } - } - } -} - -/// Get an address that is unique per running thread. -/// -/// This can be used as a non-null usize-sized ID. -pub fn current_thread_unique_ptr() -> usize { - // Use a non-drop type to make sure it's still available during thread destruction. - thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) -} diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs deleted file mode 100644 index 8e97ce11c..000000000 --- a/library/std/src/sys_common/remutex/tests.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::cell::RefCell; -use crate::sync::Arc; -use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; -use crate::thread; - -#[test] -fn smoke() { - let m = ReentrantMutex::new(()); - { - let a = m.lock(); - { - let b = m.lock(); - { - let c = m.lock(); - assert_eq!(*c, ()); - } - assert_eq!(*b, ()); - } - assert_eq!(*a, ()); - } -} - -#[test] -fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let m2 = m.clone(); - let lock = m.lock(); - let child = thread::spawn(move || { - let lock = m2.lock(); - assert_eq!(*lock.borrow(), 4950); - }); - for i in 0..100 { - let lock = m.lock(); - *lock.borrow_mut() += i; - } - drop(lock); - child.join().unwrap(); -} - -#[test] -fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); - let m2 = m.clone(); - let _lock = m.try_lock(); - let _lock2 = m.try_lock(); - thread::spawn(move || { - let lock = m2.try_lock(); - assert!(lock.is_none()); - }) - .join() - .unwrap(); - let _lock3 = m.try_lock(); -} - -pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); -impl Drop for Answer<'_> { - fn drop(&mut self) { - *self.0.borrow_mut() = 42; - } -} diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 747579f17..2672a2a75 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -117,10 +117,14 @@ pub struct Key { /// This value specifies no destructor by default. pub const INIT: StaticKey = StaticKey::new(None); +// Define a sentinel value that is unlikely to be returned +// as a TLS key (but it may be returned). +const KEY_SENTVAL: usize = 0; + impl StaticKey { #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")] pub const fn new(dtor: Option) -> StaticKey { - StaticKey { key: atomic::AtomicUsize::new(0), dtor } + StaticKey { key: atomic::AtomicUsize::new(KEY_SENTVAL), dtor } } /// Gets the value associated with this TLS key @@ -144,31 +148,36 @@ impl StaticKey { #[inline] unsafe fn key(&self) -> imp::Key { match self.key.load(Ordering::Relaxed) { - 0 => self.lazy_init() as imp::Key, + KEY_SENTVAL => self.lazy_init() as imp::Key, n => n as imp::Key, } } unsafe fn lazy_init(&self) -> usize { - // POSIX allows the key created here to be 0, but the compare_exchange - // below relies on using 0 as a sentinel value to check who won the + // POSIX allows the key created here to be KEY_SENTVAL, but the compare_exchange + // below relies on using KEY_SENTVAL as a sentinel value to check who won the // race to set the shared TLS key. As far as I know, there is no // guaranteed value that cannot be returned as a posix_key_create key, // so there is no value we can initialize the inner key with to // prove that it has not yet been set. As such, we'll continue using a - // value of 0, but with some gyrations to make sure we have a non-0 + // value of KEY_SENTVAL, but with some gyrations to make sure we have a non-KEY_SENTVAL // value returned from the creation routine. // FIXME: this is clearly a hack, and should be cleaned up. let key1 = imp::create(self.dtor); - let key = if key1 != 0 { + let key = if key1 as usize != KEY_SENTVAL { key1 } else { let key2 = imp::create(self.dtor); imp::destroy(key1); key2 }; - rtassert!(key != 0); - match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) { + rtassert!(key as usize != KEY_SENTVAL); + match self.key.compare_exchange( + KEY_SENTVAL, + key as usize, + Ordering::SeqCst, + Ordering::SeqCst, + ) { // The CAS succeeded, so we've created the actual key Ok(_) => key as usize, // If someone beat us to the punch, use their key instead diff --git a/library/std/src/sys_common/thread_parker/futex.rs b/library/std/src/sys_common/thread_parker/futex.rs deleted file mode 100644 index d9e2f39e3..000000000 --- a/library/std/src/sys_common/thread_parker/futex.rs +++ /dev/null @@ -1,97 +0,0 @@ -use crate::pin::Pin; -use crate::sync::atomic::AtomicU32; -use crate::sync::atomic::Ordering::{Acquire, Release}; -use crate::sys::futex::{futex_wait, futex_wake}; -use crate::time::Duration; - -const PARKED: u32 = u32::MAX; -const EMPTY: u32 = 0; -const NOTIFIED: u32 = 1; - -pub struct Parker { - state: AtomicU32, -} - -// Notes about memory ordering: -// -// Memory ordering is only relevant for the relative ordering of operations -// between different variables. Even Ordering::Relaxed guarantees a -// monotonic/consistent order when looking at just a single atomic variable. -// -// So, since this parker is just a single atomic variable, we only need to look -// at the ordering guarantees we need to provide to the 'outside world'. -// -// The only memory ordering guarantee that parking and unparking provide, is -// that things which happened before unpark() are visible on the thread -// returning from park() afterwards. Otherwise, it was effectively unparked -// before unpark() was called while still consuming the 'token'. -// -// In other words, unpark() needs to synchronize with the part of park() that -// consumes the token and returns. -// -// This is done with a release-acquire synchronization, by using -// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using -// Ordering::Acquire when checking for this state in park(). -impl Parker { - /// Construct the futex parker. The UNIX parker implementation - /// requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { - parker.write(Self { state: AtomicU32::new(EMPTY) }); - } - - // Assumes this is only called by the thread that owns the Parker, - // which means that `self.state != PARKED`. - pub unsafe fn park(self: Pin<&Self>) { - // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the - // first case. - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - loop { - // Wait for something to happen, assuming it's still set to PARKED. - futex_wait(&self.state, PARKED, None); - // Change NOTIFIED=>EMPTY and return in that case. - if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() { - return; - } else { - // Spurious wake up. We loop to try again. - } - } - } - - // Assumes this is only called by the thread that owns the Parker, - // which means that `self.state != PARKED`. This implementation doesn't - // require `Pin`, but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { - // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the - // first case. - if self.state.fetch_sub(1, Acquire) == NOTIFIED { - return; - } - // Wait for something to happen, assuming it's still set to PARKED. - futex_wait(&self.state, PARKED, Some(timeout)); - // This is not just a store, because we need to establish a - // release-acquire ordering with unpark(). - if self.state.swap(EMPTY, Acquire) == NOTIFIED { - // Woke up because of unpark(). - } else { - // Timeout or spurious wake up. - // We return either way, because we can't easily tell if it was the - // timeout or not. - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - #[inline] - pub fn unpark(self: Pin<&Self>) { - // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and - // wake the thread in the first case. - // - // Note that even NOTIFIED=>NOTIFIED results in a write. This is on - // purpose, to make sure every unpark() has a release-acquire ordering - // with park(). - if self.state.swap(NOTIFIED, Release) == PARKED { - futex_wake(&self.state); - } - } -} diff --git a/library/std/src/sys_common/thread_parker/generic.rs b/library/std/src/sys_common/thread_parker/generic.rs deleted file mode 100644 index f3d8b34d3..000000000 --- a/library/std/src/sys_common/thread_parker/generic.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Parker implementation based on a Mutex and Condvar. - -use crate::pin::Pin; -use crate::sync::atomic::AtomicUsize; -use crate::sync::atomic::Ordering::SeqCst; -use crate::sync::{Condvar, Mutex}; -use crate::time::Duration; - -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; - -pub struct Parker { - state: AtomicUsize, - lock: Mutex<()>, - cvar: Condvar, -} - -impl Parker { - /// Construct the generic parker. The UNIX parker implementation - /// requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { - parker.write(Parker { - state: AtomicUsize::new(EMPTY), - lock: Mutex::new(()), - cvar: Condvar::new(), - }); - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park(self: Pin<&Self>) { - // If we were previously notified then we consume this notification and - // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { - return; - } - - // Otherwise we need to coordinate going to sleep - let mut m = self.lock.lock().unwrap(); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read here, even though we know it will be `NOTIFIED`. - // This is because `unpark` may have been called again since we read - // `NOTIFIED` in the `compare_exchange` above. We must perform an - // acquire operation that synchronizes with that `unpark` to observe - // any writes it made before the call to unpark. To do that we must - // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => panic!("inconsistent park state"), - } - loop { - m = self.cvar.wait(m).unwrap(); - match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { - Ok(_) => return, // got a notification - Err(_) => {} // spurious wakeup, go back to sleep - } - } - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - // Like `park` above we have a fast path for an already-notified thread, and - // afterwards we start coordinating for a sleep. - // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { - return; - } - let m = self.lock.lock().unwrap(); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - Err(NOTIFIED) => { - // We must read again here, see `park`. - let old = self.state.swap(EMPTY, SeqCst); - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return; - } // should consume this notification, so prohibit spurious wakeups in next park. - Err(_) => panic!("inconsistent park_timeout state"), - } - - // Wait with a timeout, and if we spuriously wake up or otherwise wake up - // from a notification we just want to unconditionally set the state back to - // empty, either consuming a notification or un-flagging ourselves as - // parked. - let (_m, _result) = self.cvar.wait_timeout(m, dur).unwrap(); - match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => {} // got a notification, hurray! - PARKED => {} // no notification, alas - n => panic!("inconsistent park_timeout state: {n}"), - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - pub fn unpark(self: Pin<&Self>) { - // To ensure the unparked thread will observe any writes we made - // before this call, we must perform a release operation that `park` - // can synchronize with. To do that we must write `NOTIFIED` even if - // `state` is already `NOTIFIED`. That is why this must be a swap - // rather than a compare-and-swap that returns if it reads `NOTIFIED` - // on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => return, // no one was waiting - NOTIFIED => return, // already unparked - PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), - } - - // There is a period between when the parked thread sets `state` to - // `PARKED` (or last checked `state` in the case of a spurious wake - // up) and when it actually waits on `cvar`. If we were to notify - // during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has - // `lock` locked at this stage so we can acquire `lock` to wait until - // it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the - // parked thread wakes it doesn't get woken only to have to wait for us - // to release `lock`. - drop(self.lock.lock().unwrap()); - self.cvar.notify_one() - } -} diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs deleted file mode 100644 index f86a9a555..000000000 --- a/library/std/src/sys_common/thread_parker/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(any( - target_os = "linux", - target_os = "android", - all(target_arch = "wasm32", target_feature = "atomics"), - target_os = "freebsd", - target_os = "openbsd", - target_os = "dragonfly", - target_os = "fuchsia", - target_os = "hermit", - ))] { - mod futex; - pub use futex::Parker; - } else if #[cfg(target_os = "solid_asp3")] { - mod wait_flag; - pub use wait_flag::Parker; - } else if #[cfg(any(windows, target_family = "unix"))] { - pub use crate::sys::thread_parker::Parker; - } else { - mod generic; - pub use generic::Parker; - } -} diff --git a/library/std/src/sys_common/thread_parker/wait_flag.rs b/library/std/src/sys_common/thread_parker/wait_flag.rs deleted file mode 100644 index 6561c1866..000000000 --- a/library/std/src/sys_common/thread_parker/wait_flag.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! A wait-flag-based thread parker. -//! -//! Some operating systems provide low-level parking primitives like wait counts, -//! event flags or semaphores which are not susceptible to race conditions (meaning -//! the wakeup can occur before the wait operation). To implement the `std` thread -//! parker on top of these primitives, we only have to ensure that parking is fast -//! when the thread token is available, the atomic ordering guarantees are maintained -//! and spurious wakeups are minimized. -//! -//! To achieve this, this parker uses an atomic variable with three states: `EMPTY`, -//! `PARKED` and `NOTIFIED`: -//! * `EMPTY` means the token has not been made available, but the thread is not -//! currently waiting on it. -//! * `PARKED` means the token is not available and the thread is parked. -//! * `NOTIFIED` means the token is available. -//! -//! `park` and `park_timeout` change the state from `EMPTY` to `PARKED` and from -//! `NOTIFIED` to `EMPTY`. If the state was `NOTIFIED`, the thread was unparked and -//! execution can continue without calling into the OS. If the state was `EMPTY`, -//! the token is not available and the thread waits on the primitive (here called -//! "wait flag"). -//! -//! `unpark` changes the state to `NOTIFIED`. If the state was `PARKED`, the thread -//! is or will be sleeping on the wait flag, so we raise it. - -use crate::pin::Pin; -use crate::sync::atomic::AtomicI8; -use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sys::wait_flag::WaitFlag; -use crate::time::Duration; - -const EMPTY: i8 = 0; -const PARKED: i8 = -1; -const NOTIFIED: i8 = 1; - -pub struct Parker { - state: AtomicI8, - wait_flag: WaitFlag, -} - -impl Parker { - /// Construct a parker for the current thread. The UNIX parker - /// implementation requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { - parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() }) - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park(self: Pin<&Self>) { - match self.state.fetch_sub(1, Acquire) { - // NOTIFIED => EMPTY - NOTIFIED => return, - // EMPTY => PARKED - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - // Avoid waking up from spurious wakeups (these are quite likely, see below). - loop { - self.wait_flag.wait(); - - match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) { - Ok(_) => return, - Err(PARKED) => (), - Err(_) => panic!("inconsistent park state"), - } - } - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - match self.state.fetch_sub(1, Acquire) { - NOTIFIED => return, - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - self.wait_flag.wait_timeout(dur); - - // Either a wakeup or a timeout occurred. Wakeups may be spurious, as there can be - // a race condition when `unpark` is performed between receiving the timeout and - // resetting the state, resulting in the eventflag being set unnecessarily. `park` - // is protected against this by looping until the token is actually given, but - // here we cannot easily tell. - - // Use `swap` to provide acquire ordering. - match self.state.swap(EMPTY, Acquire) { - NOTIFIED => (), - PARKED => (), - _ => panic!("inconsistent park state"), - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - - if state == PARKED { - self.wait_flag.raise(); - } - } -} diff --git a/library/std/src/sys_common/thread_parking/futex.rs b/library/std/src/sys_common/thread_parking/futex.rs new file mode 100644 index 000000000..588e7b278 --- /dev/null +++ b/library/std/src/sys_common/thread_parking/futex.rs @@ -0,0 +1,97 @@ +use crate::pin::Pin; +use crate::sync::atomic::AtomicU32; +use crate::sync::atomic::Ordering::{Acquire, Release}; +use crate::sys::futex::{futex_wait, futex_wake}; +use crate::time::Duration; + +const PARKED: u32 = u32::MAX; +const EMPTY: u32 = 0; +const NOTIFIED: u32 = 1; + +pub struct Parker { + state: AtomicU32, +} + +// Notes about memory ordering: +// +// Memory ordering is only relevant for the relative ordering of operations +// between different variables. Even Ordering::Relaxed guarantees a +// monotonic/consistent order when looking at just a single atomic variable. +// +// So, since this parker is just a single atomic variable, we only need to look +// at the ordering guarantees we need to provide to the 'outside world'. +// +// The only memory ordering guarantee that parking and unparking provide, is +// that things which happened before unpark() are visible on the thread +// returning from park() afterwards. Otherwise, it was effectively unparked +// before unpark() was called while still consuming the 'token'. +// +// In other words, unpark() needs to synchronize with the part of park() that +// consumes the token and returns. +// +// This is done with a release-acquire synchronization, by using +// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using +// Ordering::Acquire when checking for this state in park(). +impl Parker { + /// Construct the futex parker. The UNIX parker implementation + /// requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Self { state: AtomicU32::new(EMPTY) }); + } + + // Assumes this is only called by the thread that owns the Parker, + // which means that `self.state != PARKED`. + pub unsafe fn park(self: Pin<&Self>) { + // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the + // first case. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + loop { + // Wait for something to happen, assuming it's still set to PARKED. + futex_wait(&self.state, PARKED, None); + // Change NOTIFIED=>EMPTY and return in that case. + if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Acquire).is_ok() { + return; + } else { + // Spurious wake up. We loop to try again. + } + } + } + + // Assumes this is only called by the thread that owns the Parker, + // which means that `self.state != PARKED`. This implementation doesn't + // require `Pin`, but other implementations do. + pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { + // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the + // first case. + if self.state.fetch_sub(1, Acquire) == NOTIFIED { + return; + } + // Wait for something to happen, assuming it's still set to PARKED. + futex_wait(&self.state, PARKED, Some(timeout)); + // This is not just a store, because we need to establish a + // release-acquire ordering with unpark(). + if self.state.swap(EMPTY, Acquire) == NOTIFIED { + // Woke up because of unpark(). + } else { + // Timeout or spurious wake up. + // We return either way, because we can't easily tell if it was the + // timeout or not. + } + } + + // This implementation doesn't require `Pin`, but other implementations do. + #[inline] + pub fn unpark(self: Pin<&Self>) { + // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and + // wake the thread in the first case. + // + // Note that even NOTIFIED=>NOTIFIED results in a write. This is on + // purpose, to make sure every unpark() has a release-acquire ordering + // with park(). + if self.state.swap(NOTIFIED, Release) == PARKED { + futex_wake(&self.state); + } + } +} diff --git a/library/std/src/sys_common/thread_parking/generic.rs b/library/std/src/sys_common/thread_parking/generic.rs new file mode 100644 index 000000000..3209bffe3 --- /dev/null +++ b/library/std/src/sys_common/thread_parking/generic.rs @@ -0,0 +1,125 @@ +//! Parker implementation based on a Mutex and Condvar. + +use crate::pin::Pin; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use crate::sync::{Condvar, Mutex}; +use crate::time::Duration; + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +pub struct Parker { + state: AtomicUsize, + lock: Mutex<()>, + cvar: Condvar, +} + +impl Parker { + /// Construct the generic parker. The UNIX parker implementation + /// requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Parker { + state: AtomicUsize::new(EMPTY), + lock: Mutex::new(()), + cvar: Condvar::new(), + }); + } + + // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. + pub unsafe fn park(self: Pin<&Self>) { + // If we were previously notified then we consume this notification and + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + + // Otherwise we need to coordinate going to sleep + let mut m = self.lock.lock().unwrap(); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => panic!("inconsistent park state"), + } + loop { + m = self.cvar.wait(m).unwrap(); + match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + Ok(_) => return, // got a notification + Err(_) => {} // spurious wakeup, go back to sleep + } + } + } + + // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + // Like `park` above we have a fast path for an already-notified thread, and + // afterwards we start coordinating for a sleep. + // return quickly. + if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + return; + } + let m = self.lock.lock().unwrap(); + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read again here, see `park`. + let old = self.state.swap(EMPTY, SeqCst); + assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + return; + } // should consume this notification, so prohibit spurious wakeups in next park. + Err(_) => panic!("inconsistent park_timeout state"), + } + + // Wait with a timeout, and if we spuriously wake up or otherwise wake up + // from a notification we just want to unconditionally set the state back to + // empty, either consuming a notification or un-flagging ourselves as + // parked. + let (_m, _result) = self.cvar.wait_timeout(m, dur).unwrap(); + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => {} // got a notification, hurray! + PARKED => {} // no notification, alas + n => panic!("inconsistent park_timeout state: {n}"), + } + } + + // This implementation doesn't require `Pin`, but other implementations do. + pub fn unpark(self: Pin<&Self>) { + // To ensure the unparked thread will observe any writes we made + // before this call, we must perform a release operation that `park` + // can synchronize with. To do that we must write `NOTIFIED` even if + // `state` is already `NOTIFIED`. That is why this must be a swap + // rather than a compare-and-swap that returns if it reads `NOTIFIED` + // on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + drop(self.lock.lock().unwrap()); + self.cvar.notify_one() + } +} diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs new file mode 100644 index 000000000..e98169597 --- /dev/null +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -0,0 +1,108 @@ +//! Thread parking using thread ids. +//! +//! Some platforms (notably NetBSD) have thread parking primitives whose semantics +//! match those offered by `thread::park`, with the difference that the thread to +//! be unparked is referenced by a platform-specific thread id. Since the thread +//! parker is constructed before that id is known, an atomic state variable is used +//! to manage the park state and propagate the thread id. This also avoids platform +//! calls in the case where `unpark` is called before `park`. + +use crate::cell::UnsafeCell; +use crate::pin::Pin; +use crate::sync::atomic::{ + fence, AtomicI8, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::thread_parking::{current, park, park_timeout, unpark, ThreadId}; +use crate::time::Duration; + +pub struct Parker { + state: AtomicI8, + tid: UnsafeCell>, +} + +const PARKED: i8 = -1; +const EMPTY: i8 = 0; +const NOTIFIED: i8 = 1; + +impl Parker { + pub fn new() -> Parker { + Parker { state: AtomicI8::new(EMPTY), tid: UnsafeCell::new(None) } + } + + /// Create a new thread parker. UNIX requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Parker::new()) + } + + /// # Safety + /// * must always be called from the same thread + /// * must be called before the state is set to PARKED + unsafe fn init_tid(&self) { + // The field is only ever written to from this thread, so we don't need + // synchronization to read it here. + if self.tid.get().read().is_none() { + // Because this point is only reached once, before the state is set + // to PARKED for the first time, the non-atomic write here can not + // conflict with reads by other threads. + self.tid.get().write(Some(current())); + // Ensure that the write can be observed by all threads reading the + // state. Synchronizes with the acquire barrier in `unpark`. + fence(Release); + } + } + + pub unsafe fn park(self: Pin<&Self>) { + self.init_tid(); + + // Changes NOTIFIED to EMPTY and EMPTY to PARKED. + let mut state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); + if state == PARKED { + // Loop to guard against spurious wakeups. + while state == PARKED { + park(self.state.as_mut_ptr().addr()); + state = self.state.load(Acquire); + } + + // Since the state change has already been observed with acquire + // ordering, the state can be reset with a relaxed store instead + // of a swap. + self.state.store(EMPTY, Relaxed); + } + } + + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + self.init_tid(); + + let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); + if state == PARKED { + park_timeout(dur, self.state.as_mut_ptr().addr()); + // Swap to ensure that we observe all state changes with acquire + // ordering, even if the state has been changed after the timeout + // occured. + self.state.swap(EMPTY, Acquire); + } + } + + pub fn unpark(self: Pin<&Self>) { + let state = self.state.swap(NOTIFIED, Release); + if state == PARKED { + // Synchronize with the release fence in `init_tid` to observe the + // write to `tid`. + fence(Acquire); + // # Safety + // The thread id is initialized before the state is set to `PARKED` + // for the first time and is not written to from that point on + // (negating the need for an atomic read). + let tid = unsafe { self.tid.get().read().unwrap_unchecked() }; + // It is possible that the waiting thread woke up because of a timeout + // and terminated before this call is made. This call then returns an + // error or wakes up an unrelated thread. The platform API and + // environment does allow this, however. + unpark(tid, self.state.as_mut_ptr().addr()); + } + } +} + +unsafe impl Send for Parker {} +unsafe impl Sync for Parker {} diff --git a/library/std/src/sys_common/thread_parking/mod.rs b/library/std/src/sys_common/thread_parking/mod.rs new file mode 100644 index 000000000..0ead6633c --- /dev/null +++ b/library/std/src/sys_common/thread_parking/mod.rs @@ -0,0 +1,29 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + all(target_arch = "wasm32", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + target_os = "fuchsia", + target_os = "hermit", + ))] { + mod futex; + pub use futex::Parker; + } else if #[cfg(any( + target_os = "netbsd", + all(target_vendor = "fortanix", target_env = "sgx"), + ))] { + mod id; + pub use id::Parker; + } else if #[cfg(target_os = "solid_asp3")] { + mod wait_flag; + pub use wait_flag::Parker; + } else if #[cfg(any(windows, target_family = "unix"))] { + pub use crate::sys::thread_parking::Parker; + } else { + mod generic; + pub use generic::Parker; + } +} diff --git a/library/std/src/sys_common/thread_parking/wait_flag.rs b/library/std/src/sys_common/thread_parking/wait_flag.rs new file mode 100644 index 000000000..d0f8899a9 --- /dev/null +++ b/library/std/src/sys_common/thread_parking/wait_flag.rs @@ -0,0 +1,102 @@ +//! A wait-flag-based thread parker. +//! +//! Some operating systems provide low-level parking primitives like wait counts, +//! event flags or semaphores which are not susceptible to race conditions (meaning +//! the wakeup can occur before the wait operation). To implement the `std` thread +//! parker on top of these primitives, we only have to ensure that parking is fast +//! when the thread token is available, the atomic ordering guarantees are maintained +//! and spurious wakeups are minimized. +//! +//! To achieve this, this parker uses an atomic variable with three states: `EMPTY`, +//! `PARKED` and `NOTIFIED`: +//! * `EMPTY` means the token has not been made available, but the thread is not +//! currently waiting on it. +//! * `PARKED` means the token is not available and the thread is parked. +//! * `NOTIFIED` means the token is available. +//! +//! `park` and `park_timeout` change the state from `EMPTY` to `PARKED` and from +//! `NOTIFIED` to `EMPTY`. If the state was `NOTIFIED`, the thread was unparked and +//! execution can continue without calling into the OS. If the state was `EMPTY`, +//! the token is not available and the thread waits on the primitive (here called +//! "wait flag"). +//! +//! `unpark` changes the state to `NOTIFIED`. If the state was `PARKED`, the thread +//! is or will be sleeping on the wait flag, so we raise it. + +use crate::pin::Pin; +use crate::sync::atomic::AtomicI8; +use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use crate::sys::wait_flag::WaitFlag; +use crate::time::Duration; + +const EMPTY: i8 = 0; +const PARKED: i8 = -1; +const NOTIFIED: i8 = 1; + +pub struct Parker { + state: AtomicI8, + wait_flag: WaitFlag, +} + +impl Parker { + /// Construct a parker for the current thread. The UNIX parker + /// implementation requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() }) + } + + // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. + pub unsafe fn park(self: Pin<&Self>) { + match self.state.fetch_sub(1, Acquire) { + // NOTIFIED => EMPTY + NOTIFIED => return, + // EMPTY => PARKED + EMPTY => (), + _ => panic!("inconsistent park state"), + } + + // Avoid waking up from spurious wakeups (these are quite likely, see below). + loop { + self.wait_flag.wait(); + + match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) { + Ok(_) => return, + Err(PARKED) => (), + Err(_) => panic!("inconsistent park state"), + } + } + } + + // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + match self.state.fetch_sub(1, Acquire) { + NOTIFIED => return, + EMPTY => (), + _ => panic!("inconsistent park state"), + } + + self.wait_flag.wait_timeout(dur); + + // Either a wakeup or a timeout occurred. Wakeups may be spurious, as there can be + // a race condition when `unpark` is performed between receiving the timeout and + // resetting the state, resulting in the eventflag being set unnecessarily. `park` + // is protected against this by looping until the token is actually given, but + // here we cannot easily tell. + + // Use `swap` to provide acquire ordering. + match self.state.swap(EMPTY, Acquire) { + NOTIFIED => (), + PARKED => (), + _ => panic!("inconsistent park state"), + } + } + + // This implementation doesn't require `Pin`, but other implementations do. + pub fn unpark(self: Pin<&Self>) { + let state = self.state.swap(NOTIFIED, Release); + + if state == PARKED { + self.wait_flag.raise(); + } + } +} diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index 5d267891b..b30bb7b77 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -905,9 +905,8 @@ pub mod statik { pub mod fast { use super::lazy::LazyKeyInner; use crate::cell::Cell; - use crate::fmt; - use crate::mem; use crate::sys::thread_local_dtor::register_dtor; + use crate::{fmt, mem, panic}; #[derive(Copy, Clone)] enum DtorState { @@ -950,7 +949,7 @@ pub mod fast { // note that this is just a publicly-callable function only for the // const-initialized form of thread locals, basically a way to call the - // free `register_dtor` function defined elsewhere in libstd. + // free `register_dtor` function defined elsewhere in std. pub unsafe fn register_dtor(a: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { unsafe { register_dtor(a, dtor); @@ -1028,10 +1027,15 @@ pub mod fast { // `Option` to `None`, and `dtor_state` to `RunningOrHasRun`. This // causes future calls to `get` to run `try_initialize_drop` again, // which will now fail, and return `None`. - unsafe { + // + // Wrap the call in a catch to ensure unwinding is caught in the event + // a panic takes place in a destructor. + if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| unsafe { let value = (*ptr).inner.take(); (*ptr).dtor_state.set(DtorState::RunningOrHasRun); drop(value); + })) { + rtabort!("thread local panicked on drop"); } } } @@ -1044,10 +1048,8 @@ pub mod fast { pub mod os { use super::lazy::LazyKeyInner; use crate::cell::Cell; - use crate::fmt; - use crate::marker; - use crate::ptr; use crate::sys_common::thread_local_key::StaticKey as OsStaticKey; + use crate::{fmt, marker, panic, ptr}; /// Use a regular global static to store this key; the state provided will then be /// thread-local. @@ -1137,12 +1139,17 @@ pub mod os { // // Note that to prevent an infinite loop we reset it back to null right // before we return from the destructor ourselves. - unsafe { + // + // Wrap the call in a catch to ensure unwinding is caught in the event + // a panic takes place in a destructor. + if let Err(_) = panic::catch_unwind(|| unsafe { let ptr = Box::from_raw(ptr as *mut Value); let key = ptr.key; key.os.set(ptr::invalid_mut(1)); drop(ptr); key.os.set(ptr::null_mut()); + }) { + rtabort!("thread local panicked on drop"); } } } diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs index 80dc4c038..964c7fc5b 100644 --- a/library/std/src/thread/local/tests.rs +++ b/library/std/src/thread/local/tests.rs @@ -23,11 +23,11 @@ impl Signal { } } -struct Foo(Signal); +struct NotifyOnDrop(Signal); -impl Drop for Foo { +impl Drop for NotifyOnDrop { fn drop(&mut self) { - let Foo(ref f) = *self; + let NotifyOnDrop(ref f) = *self; f.notify(); } } @@ -82,18 +82,18 @@ fn states() { #[test] fn smoke_dtor() { - thread_local!(static FOO: UnsafeCell> = UnsafeCell::new(None)); + thread_local!(static FOO: UnsafeCell> = UnsafeCell::new(None)); run(&FOO); - thread_local!(static FOO2: UnsafeCell> = const { UnsafeCell::new(None) }); + thread_local!(static FOO2: UnsafeCell> = const { UnsafeCell::new(None) }); run(&FOO2); - fn run(key: &'static LocalKey>>) { + fn run(key: &'static LocalKey>>) { let signal = Signal::default(); let signal2 = signal.clone(); let t = thread::spawn(move || unsafe { let mut signal = Some(signal2); key.with(|f| { - *f.get() = Some(Foo(signal.take().unwrap())); + *f.get() = Some(NotifyOnDrop(signal.take().unwrap())); }); }); signal.wait(); @@ -187,13 +187,13 @@ fn self_referential() { fn dtors_in_dtors_in_dtors() { struct S1(Signal); thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); - thread_local!(static K2: UnsafeCell> = UnsafeCell::new(None)); + thread_local!(static K2: UnsafeCell> = UnsafeCell::new(None)); impl Drop for S1 { fn drop(&mut self) { let S1(ref signal) = *self; unsafe { - let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone()))); + let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); } } } @@ -211,13 +211,13 @@ fn dtors_in_dtors_in_dtors() { fn dtors_in_dtors_in_dtors_const_init() { struct S1(Signal); thread_local!(static K1: UnsafeCell> = const { UnsafeCell::new(None) }); - thread_local!(static K2: UnsafeCell> = const { UnsafeCell::new(None) }); + thread_local!(static K2: UnsafeCell> = const { UnsafeCell::new(None) }); impl Drop for S1 { fn drop(&mut self) { let S1(ref signal) = *self; unsafe { - let _ = K2.try_with(|s| *s.get() = Some(Foo(signal.clone()))); + let _ = K2.try_with(|s| *s.get() = Some(NotifyOnDrop(signal.clone()))); } } } diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 34bdb8bd4..692ff0cbc 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -173,10 +173,16 @@ use crate::sync::Arc; use crate::sys::thread as imp; use crate::sys_common::thread; use crate::sys_common::thread_info; -use crate::sys_common::thread_parker::Parker; +use crate::sys_common::thread_parking::Parker; use crate::sys_common::{AsInner, IntoInner}; use crate::time::Duration; +#[stable(feature = "scoped_threads", since = "1.63.0")] +mod scoped; + +#[stable(feature = "scoped_threads", since = "1.63.0")] +pub use scoped::{scope, Scope, ScopedJoinHandle}; + //////////////////////////////////////////////////////////////////////////////// // Thread-local storage //////////////////////////////////////////////////////////////////////////////// @@ -184,12 +190,6 @@ use crate::time::Duration; #[macro_use] mod local; -#[stable(feature = "scoped_threads", since = "1.63.0")] -mod scoped; - -#[stable(feature = "scoped_threads", since = "1.63.0")] -pub use scoped::{scope, Scope, ScopedJoinHandle}; - #[stable(feature = "rust1", since = "1.0.0")] pub use self::local::{AccessError, LocalKey}; @@ -209,7 +209,6 @@ pub use self::local::{AccessError, LocalKey}; ))] #[doc(hidden)] pub use self::local::fast::Key as __FastLocalKeyInner; - // when building for tests, use real std's type #[unstable(feature = "libstd_thread_internals", issue = "none")] #[cfg(test)] @@ -220,12 +219,21 @@ pub use self::local::fast::Key as __FastLocalKeyInner; pub use realstd::thread::__FastLocalKeyInner; #[unstable(feature = "libstd_thread_internals", issue = "none")] +#[cfg(not(test))] #[cfg(all( not(target_thread_local), not(all(target_family = "wasm", not(target_feature = "atomics"))), ))] #[doc(hidden)] pub use self::local::os::Key as __OsLocalKeyInner; +// when building for tests, use real std's type +#[unstable(feature = "libstd_thread_internals", issue = "none")] +#[cfg(test)] +#[cfg(all( + not(target_thread_local), + not(all(target_family = "wasm", not(target_feature = "atomics"))), +))] +pub use realstd::thread::__OsLocalKeyInner; #[unstable(feature = "libstd_thread_internals", issue = "none")] #[cfg(all(target_family = "wasm", not(target_feature = "atomics")))] @@ -1216,7 +1224,7 @@ impl Thread { let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr(); addr_of_mut!((*ptr).name).write(name); addr_of_mut!((*ptr).id).write(ThreadId::new()); - Parker::new(addr_of_mut!((*ptr).parker)); + Parker::new_in_place(addr_of_mut!((*ptr).parker)); Pin::new_unchecked(arc.assume_init()) }; -- cgit v1.2.3