diff options
Diffstat (limited to 'library/std/src/sys_common')
14 files changed, 190 insertions, 276 deletions
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index 8807077cb..f1d804ef4 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -20,7 +20,7 @@ pub fn lock() -> impl Drop { /// Prints the current backtrace. pub fn print(w: &mut dyn Write, format: PrintFmt) -> io::Result<()> { // There are issues currently linking libbacktrace into tests, and in - // general during libstd's own unit tests we're not testing this path. In + // general during std's own unit tests we're not testing this path. In // test mode immediately return here to optimize away any references to the // libbacktrace symbols if cfg!(test) { @@ -111,7 +111,7 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt:: } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that -/// this is only inline(never) when backtraces in libstd are enabled, otherwise +/// this is only inline(never) when backtraces in std are enabled, otherwise /// it's fine to optimize away. #[cfg_attr(feature = "backtrace", inline(never))] pub fn __rust_begin_short_backtrace<F, T>(f: F) -> T @@ -127,7 +127,7 @@ where } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. Note that -/// this is only inline(never) when backtraces in libstd are enabled, otherwise +/// this is only inline(never) when backtraces in std are enabled, otherwise /// it's fine to optimize away. #[cfg_attr(feature = "backtrace", inline(never))] pub fn __rust_end_short_backtrace<F, T>(f: F) -> T diff --git a/library/std/src/sys_common/io.rs b/library/std/src/sys_common/io.rs index d1e9fed41..4a42ff3c6 100644 --- a/library/std/src/sys_common/io.rs +++ b/library/std/src/sys_common/io.rs @@ -39,9 +39,10 @@ pub mod test { } } + #[track_caller] // for `test_rng` pub fn tmpdir() -> TempDir { let p = env::temp_dir(); - let mut r = rand::thread_rng(); + let mut r = crate::test_helpers::test_rng(); let ret = p.join(&format!("rust-{}", r.next_u32())); fs::create_dir(&ret).unwrap(); TempDir(ret) diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index b1987aa0f..6b24b0e9a 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -27,11 +27,10 @@ pub mod lazy_box; pub mod memchr; pub mod once; pub mod process; -pub mod remutex; pub mod thread; pub mod thread_info; pub mod thread_local_dtor; -pub mod thread_parker; +pub mod thread_parking; pub mod wstr; pub mod wtf8; diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs index 8742e68cc..359697d83 100644 --- a/library/std/src/sys_common/once/mod.rs +++ b/library/std/src/sys_common/once/mod.rs @@ -6,22 +6,6 @@ // As a result, we end up implementing it ourselves in the standard library. // This also gives us the opportunity to optimize the implementation a bit which // should help the fast path on call sites. -// -// So to recap, the guarantees of a Once are that it will call the -// initialization closure at most once, and it will never return until the one -// that's running has finished running. This means that we need some form of -// blocking here while the custom callback is running at the very least. -// Additionally, we add on the restriction of **poisoning**. Whenever an -// initialization closure panics, the Once enters a "poisoned" state which means -// that all future calls will immediately panic as well. -// -// So to implement this, one might first reach for a `Mutex`, but those cannot -// be put into a `static`. It also gets a lot harder with poisoning to figure -// out when the mutex needs to be deallocated because it's not after the closure -// finishes, but after the first successful closure finishes. -// -// All in all, this is instead implemented with atomics and lock-free -// operations! Whee! cfg_if::cfg_if! { if #[cfg(any( @@ -36,8 +20,15 @@ cfg_if::cfg_if! { ))] { mod futex; pub use futex::{Once, OnceState}; + } else if #[cfg(any( + windows, + target_family = "unix", + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", + ))] { + mod queue; + pub use queue::{Once, OnceState}; } else { - mod generic; - pub use generic::{Once, OnceState}; + pub use crate::sys::once::{Once, OnceState}; } } diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/queue.rs index d953a6745..d953a6745 100644 --- a/library/std/src/sys_common/once/generic.rs +++ b/library/std/src/sys_common/once/queue.rs diff --git a/library/std/src/sys_common/process.rs b/library/std/src/sys_common/process.rs index 9f978789a..18883048d 100644 --- a/library/std/src/sys_common/process.rs +++ b/library/std/src/sys_common/process.rs @@ -4,10 +4,13 @@ use crate::collections::BTreeMap; use crate::env; use crate::ffi::{OsStr, OsString}; -use crate::sys::process::EnvKey; +use crate::fmt; +use crate::io; +use crate::sys::pipe::read2; +use crate::sys::process::{EnvKey, ExitStatus, Process, StdioPipes}; // Stores a set of changes to an environment -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct CommandEnv { clear: bool, saw_path: bool, @@ -20,6 +23,14 @@ impl Default for CommandEnv { } } +impl fmt::Debug for CommandEnv { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut debug_command_env = f.debug_struct("CommandEnv"); + debug_command_env.field("clear", &self.clear).field("vars", &self.vars); + debug_command_env.finish() + } +} + impl CommandEnv { // Capture the current environment with these changes applied pub fn capture(&self) -> BTreeMap<EnvKey, OsString> { @@ -117,3 +128,30 @@ impl<'a> ExactSizeIterator for CommandEnvs<'a> { self.iter.is_empty() } } + +pub fn wait_with_output( + mut process: Process, + mut pipes: StdioPipes, +) -> io::Result<(ExitStatus, Vec<u8>, Vec<u8>)> { + drop(pipes.stdin.take()); + + let (mut stdout, mut stderr) = (Vec::new(), Vec::new()); + match (pipes.stdout.take(), pipes.stderr.take()) { + (None, None) => {} + (Some(out), None) => { + let res = out.read_to_end(&mut stdout); + res.unwrap(); + } + (None, Some(err)) => { + let res = err.read_to_end(&mut stderr); + res.unwrap(); + } + (Some(out), Some(err)) => { + let res = read2(out, &mut stdout, err, &mut stderr); + res.unwrap(); + } + } + + let status = process.wait()?; + Ok((status, stdout, stderr)) +} diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs deleted file mode 100644 index 4c054da64..000000000 --- a/library/std/src/sys_common/remutex.rs +++ /dev/null @@ -1,178 +0,0 @@ -#[cfg(all(test, not(target_os = "emscripten")))] -mod tests; - -use crate::cell::UnsafeCell; -use crate::ops::Deref; -use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use crate::sys::locks as sys; - -/// A re-entrant mutual exclusion -/// -/// This mutex will block *other* threads waiting for the lock to become -/// available. The thread which has already locked the mutex can lock it -/// multiple times without blocking, preventing a common source of deadlocks. -/// -/// This is used by stdout().lock() and friends. -/// -/// ## Implementation details -/// -/// The 'owner' field tracks which thread has locked the mutex. -/// -/// We use current_thread_unique_ptr() as the thread identifier, -/// which is just the address of a thread local variable. -/// -/// If `owner` is set to the identifier of the current thread, -/// we assume the mutex is already locked and instead of locking it again, -/// we increment `lock_count`. -/// -/// When unlocking, we decrement `lock_count`, and only unlock the mutex when -/// it reaches zero. -/// -/// `lock_count` is protected by the mutex and only accessed by the thread that has -/// locked the mutex, so needs no synchronization. -/// -/// `owner` can be checked by other threads that want to see if they already -/// hold the lock, so needs to be atomic. If it compares equal, we're on the -/// same thread that holds the mutex and memory access can use relaxed ordering -/// since we're not dealing with multiple threads. If it compares unequal, -/// synchronization is left to the mutex, making relaxed memory ordering for -/// the `owner` field fine in all cases. -pub struct ReentrantMutex<T> { - mutex: sys::Mutex, - owner: AtomicUsize, - lock_count: UnsafeCell<u32>, - data: T, -} - -unsafe impl<T: Send> Send for ReentrantMutex<T> {} -unsafe impl<T: Send> Sync for ReentrantMutex<T> {} - -impl<T> UnwindSafe for ReentrantMutex<T> {} -impl<T> RefUnwindSafe for ReentrantMutex<T> {} - -/// An RAII implementation of a "scoped lock" of a mutex. When this structure is -/// dropped (falls out of scope), the lock will be unlocked. -/// -/// The data protected by the mutex can be accessed through this guard via its -/// Deref implementation. -/// -/// # Mutability -/// -/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`, -/// because implementation of the trait would violate Rust’s reference aliasing -/// rules. Use interior mutability (usually `RefCell`) in order to mutate the -/// guarded data. -#[must_use = "if unused the ReentrantMutex will immediately unlock"] -pub struct ReentrantMutexGuard<'a, T: 'a> { - lock: &'a ReentrantMutex<T>, -} - -impl<T> !Send for ReentrantMutexGuard<'_, T> {} - -impl<T> ReentrantMutex<T> { - /// Creates a new reentrant mutex in an unlocked state. - pub const fn new(t: T) -> ReentrantMutex<T> { - ReentrantMutex { - mutex: sys::Mutex::new(), - owner: AtomicUsize::new(0), - lock_count: UnsafeCell::new(0), - data: t, - } - } - - /// Acquires a mutex, blocking the current thread until it is able to do so. - /// - /// This function will block the caller until it is available to acquire the mutex. - /// Upon returning, the thread is the only thread with the mutex held. When the thread - /// calling this method already holds the lock, the call shall succeed without - /// blocking. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - } else { - self.mutex.lock(); - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - } - } - ReentrantMutexGuard { lock: self } - } - - /// Attempts to acquire this lock. - /// - /// If the lock could not be acquired at this time, then `Err` is returned. - /// Otherwise, an RAII guard is returned. - /// - /// This function does not block. - /// - /// # Errors - /// - /// If another user of this mutex panicked while holding the mutex, then - /// this call will return failure if the mutex would otherwise be - /// acquired. - pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> { - let this_thread = current_thread_unique_ptr(); - // Safety: We only touch lock_count when we own the lock. - unsafe { - if self.owner.load(Relaxed) == this_thread { - self.increment_lock_count(); - Some(ReentrantMutexGuard { lock: self }) - } else if self.mutex.try_lock() { - self.owner.store(this_thread, Relaxed); - debug_assert_eq!(*self.lock_count.get(), 0); - *self.lock_count.get() = 1; - Some(ReentrantMutexGuard { lock: self }) - } else { - None - } - } - } - - unsafe fn increment_lock_count(&self) { - *self.lock_count.get() = (*self.lock_count.get()) - .checked_add(1) - .expect("lock count overflow in reentrant mutex"); - } -} - -impl<T> Deref for ReentrantMutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - &self.lock.data - } -} - -impl<T> Drop for ReentrantMutexGuard<'_, T> { - #[inline] - fn drop(&mut self) { - // Safety: We own the lock. - unsafe { - *self.lock.lock_count.get() -= 1; - if *self.lock.lock_count.get() == 0 { - self.lock.owner.store(0, Relaxed); - self.lock.mutex.unlock(); - } - } - } -} - -/// Get an address that is unique per running thread. -/// -/// This can be used as a non-null usize-sized ID. -pub fn current_thread_unique_ptr() -> usize { - // Use a non-drop type to make sure it's still available during thread destruction. - thread_local! { static X: u8 = const { 0 } } - X.with(|x| <*const _>::addr(x)) -} diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs deleted file mode 100644 index 8e97ce11c..000000000 --- a/library/std/src/sys_common/remutex/tests.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::cell::RefCell; -use crate::sync::Arc; -use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; -use crate::thread; - -#[test] -fn smoke() { - let m = ReentrantMutex::new(()); - { - let a = m.lock(); - { - let b = m.lock(); - { - let c = m.lock(); - assert_eq!(*c, ()); - } - assert_eq!(*b, ()); - } - assert_eq!(*a, ()); - } -} - -#[test] -fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let m2 = m.clone(); - let lock = m.lock(); - let child = thread::spawn(move || { - let lock = m2.lock(); - assert_eq!(*lock.borrow(), 4950); - }); - for i in 0..100 { - let lock = m.lock(); - *lock.borrow_mut() += i; - } - drop(lock); - child.join().unwrap(); -} - -#[test] -fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); - let m2 = m.clone(); - let _lock = m.try_lock(); - let _lock2 = m.try_lock(); - thread::spawn(move || { - let lock = m2.try_lock(); - assert!(lock.is_none()); - }) - .join() - .unwrap(); - let _lock3 = m.try_lock(); -} - -pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>); -impl Drop for Answer<'_> { - fn drop(&mut self) { - *self.0.borrow_mut() = 42; - } -} diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 747579f17..2672a2a75 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -117,10 +117,14 @@ pub struct Key { /// This value specifies no destructor by default. pub const INIT: StaticKey = StaticKey::new(None); +// Define a sentinel value that is unlikely to be returned +// as a TLS key (but it may be returned). +const KEY_SENTVAL: usize = 0; + impl StaticKey { #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")] pub const fn new(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> StaticKey { - StaticKey { key: atomic::AtomicUsize::new(0), dtor } + StaticKey { key: atomic::AtomicUsize::new(KEY_SENTVAL), dtor } } /// Gets the value associated with this TLS key @@ -144,31 +148,36 @@ impl StaticKey { #[inline] unsafe fn key(&self) -> imp::Key { match self.key.load(Ordering::Relaxed) { - 0 => self.lazy_init() as imp::Key, + KEY_SENTVAL => self.lazy_init() as imp::Key, n => n as imp::Key, } } unsafe fn lazy_init(&self) -> usize { - // POSIX allows the key created here to be 0, but the compare_exchange - // below relies on using 0 as a sentinel value to check who won the + // POSIX allows the key created here to be KEY_SENTVAL, but the compare_exchange + // below relies on using KEY_SENTVAL as a sentinel value to check who won the // race to set the shared TLS key. As far as I know, there is no // guaranteed value that cannot be returned as a posix_key_create key, // so there is no value we can initialize the inner key with to // prove that it has not yet been set. As such, we'll continue using a - // value of 0, but with some gyrations to make sure we have a non-0 + // value of KEY_SENTVAL, but with some gyrations to make sure we have a non-KEY_SENTVAL // value returned from the creation routine. // FIXME: this is clearly a hack, and should be cleaned up. let key1 = imp::create(self.dtor); - let key = if key1 != 0 { + let key = if key1 as usize != KEY_SENTVAL { key1 } else { let key2 = imp::create(self.dtor); imp::destroy(key1); key2 }; - rtassert!(key != 0); - match self.key.compare_exchange(0, key as usize, Ordering::SeqCst, Ordering::SeqCst) { + rtassert!(key as usize != KEY_SENTVAL); + match self.key.compare_exchange( + KEY_SENTVAL, + key as usize, + Ordering::SeqCst, + Ordering::SeqCst, + ) { // The CAS succeeded, so we've created the actual key Ok(_) => key as usize, // If someone beat us to the punch, use their key instead diff --git a/library/std/src/sys_common/thread_parker/futex.rs b/library/std/src/sys_common/thread_parking/futex.rs index d9e2f39e3..588e7b278 100644 --- a/library/std/src/sys_common/thread_parker/futex.rs +++ b/library/std/src/sys_common/thread_parking/futex.rs @@ -35,7 +35,7 @@ pub struct Parker { impl Parker { /// Construct the futex parker. The UNIX parker implementation /// requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { + pub unsafe fn new_in_place(parker: *mut Parker) { parker.write(Self { state: AtomicU32::new(EMPTY) }); } diff --git a/library/std/src/sys_common/thread_parker/generic.rs b/library/std/src/sys_common/thread_parking/generic.rs index f3d8b34d3..3209bffe3 100644 --- a/library/std/src/sys_common/thread_parker/generic.rs +++ b/library/std/src/sys_common/thread_parking/generic.rs @@ -19,7 +19,7 @@ pub struct Parker { impl Parker { /// Construct the generic parker. The UNIX parker implementation /// requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { + pub unsafe fn new_in_place(parker: *mut Parker) { parker.write(Parker { state: AtomicUsize::new(EMPTY), lock: Mutex::new(()), diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs new file mode 100644 index 000000000..e98169597 --- /dev/null +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -0,0 +1,108 @@ +//! Thread parking using thread ids. +//! +//! Some platforms (notably NetBSD) have thread parking primitives whose semantics +//! match those offered by `thread::park`, with the difference that the thread to +//! be unparked is referenced by a platform-specific thread id. Since the thread +//! parker is constructed before that id is known, an atomic state variable is used +//! to manage the park state and propagate the thread id. This also avoids platform +//! calls in the case where `unpark` is called before `park`. + +use crate::cell::UnsafeCell; +use crate::pin::Pin; +use crate::sync::atomic::{ + fence, AtomicI8, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::thread_parking::{current, park, park_timeout, unpark, ThreadId}; +use crate::time::Duration; + +pub struct Parker { + state: AtomicI8, + tid: UnsafeCell<Option<ThreadId>>, +} + +const PARKED: i8 = -1; +const EMPTY: i8 = 0; +const NOTIFIED: i8 = 1; + +impl Parker { + pub fn new() -> Parker { + Parker { state: AtomicI8::new(EMPTY), tid: UnsafeCell::new(None) } + } + + /// Create a new thread parker. UNIX requires this to happen in-place. + pub unsafe fn new_in_place(parker: *mut Parker) { + parker.write(Parker::new()) + } + + /// # Safety + /// * must always be called from the same thread + /// * must be called before the state is set to PARKED + unsafe fn init_tid(&self) { + // The field is only ever written to from this thread, so we don't need + // synchronization to read it here. + if self.tid.get().read().is_none() { + // Because this point is only reached once, before the state is set + // to PARKED for the first time, the non-atomic write here can not + // conflict with reads by other threads. + self.tid.get().write(Some(current())); + // Ensure that the write can be observed by all threads reading the + // state. Synchronizes with the acquire barrier in `unpark`. + fence(Release); + } + } + + pub unsafe fn park(self: Pin<&Self>) { + self.init_tid(); + + // Changes NOTIFIED to EMPTY and EMPTY to PARKED. + let mut state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); + if state == PARKED { + // Loop to guard against spurious wakeups. + while state == PARKED { + park(self.state.as_mut_ptr().addr()); + state = self.state.load(Acquire); + } + + // Since the state change has already been observed with acquire + // ordering, the state can be reset with a relaxed store instead + // of a swap. + self.state.store(EMPTY, Relaxed); + } + } + + pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { + self.init_tid(); + + let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); + if state == PARKED { + park_timeout(dur, self.state.as_mut_ptr().addr()); + // Swap to ensure that we observe all state changes with acquire + // ordering, even if the state has been changed after the timeout + // occured. + self.state.swap(EMPTY, Acquire); + } + } + + pub fn unpark(self: Pin<&Self>) { + let state = self.state.swap(NOTIFIED, Release); + if state == PARKED { + // Synchronize with the release fence in `init_tid` to observe the + // write to `tid`. + fence(Acquire); + // # Safety + // The thread id is initialized before the state is set to `PARKED` + // for the first time and is not written to from that point on + // (negating the need for an atomic read). + let tid = unsafe { self.tid.get().read().unwrap_unchecked() }; + // It is possible that the waiting thread woke up because of a timeout + // and terminated before this call is made. This call then returns an + // error or wakes up an unrelated thread. The platform API and + // environment does allow this, however. + unpark(tid, self.state.as_mut_ptr().addr()); + } + } +} + +unsafe impl Send for Parker {} +unsafe impl Sync for Parker {} diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parking/mod.rs index f86a9a555..0ead6633c 100644 --- a/library/std/src/sys_common/thread_parker/mod.rs +++ b/library/std/src/sys_common/thread_parking/mod.rs @@ -11,11 +11,17 @@ cfg_if::cfg_if! { ))] { mod futex; pub use futex::Parker; + } else if #[cfg(any( + target_os = "netbsd", + all(target_vendor = "fortanix", target_env = "sgx"), + ))] { + mod id; + pub use id::Parker; } else if #[cfg(target_os = "solid_asp3")] { mod wait_flag; pub use wait_flag::Parker; } else if #[cfg(any(windows, target_family = "unix"))] { - pub use crate::sys::thread_parker::Parker; + pub use crate::sys::thread_parking::Parker; } else { mod generic; pub use generic::Parker; diff --git a/library/std/src/sys_common/thread_parker/wait_flag.rs b/library/std/src/sys_common/thread_parking/wait_flag.rs index 6561c1866..d0f8899a9 100644 --- a/library/std/src/sys_common/thread_parker/wait_flag.rs +++ b/library/std/src/sys_common/thread_parking/wait_flag.rs @@ -41,7 +41,7 @@ pub struct Parker { impl Parker { /// Construct a parker for the current thread. The UNIX parker /// implementation requires this to happen in-place. - pub unsafe fn new(parker: *mut Parker) { + pub unsafe fn new_in_place(parker: *mut Parker) { parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() }) } |