From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- library/std/src/sys/unix/locks/fuchsia_mutex.rs | 165 +++++++++++ library/std/src/sys/unix/locks/futex_condvar.rs | 58 ++++ library/std/src/sys/unix/locks/futex_mutex.rs | 101 +++++++ library/std/src/sys/unix/locks/futex_rwlock.rs | 322 ++++++++++++++++++++++ library/std/src/sys/unix/locks/mod.rs | 31 +++ library/std/src/sys/unix/locks/pthread_condvar.rs | 222 +++++++++++++++ library/std/src/sys/unix/locks/pthread_mutex.rs | 135 +++++++++ library/std/src/sys/unix/locks/pthread_rwlock.rs | 173 ++++++++++++ 8 files changed, 1207 insertions(+) create mode 100644 library/std/src/sys/unix/locks/fuchsia_mutex.rs create mode 100644 library/std/src/sys/unix/locks/futex_condvar.rs create mode 100644 library/std/src/sys/unix/locks/futex_mutex.rs create mode 100644 library/std/src/sys/unix/locks/futex_rwlock.rs create mode 100644 library/std/src/sys/unix/locks/mod.rs create mode 100644 library/std/src/sys/unix/locks/pthread_condvar.rs create mode 100644 library/std/src/sys/unix/locks/pthread_mutex.rs create mode 100644 library/std/src/sys/unix/locks/pthread_rwlock.rs (limited to 'library/std/src/sys/unix/locks') diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs new file mode 100644 index 000000000..ce427599c --- /dev/null +++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs @@ -0,0 +1,165 @@ +//! A priority inheriting mutex for Fuchsia. +//! +//! This is a port of the [mutex in Fuchsia's libsync]. Contrary to the original, +//! it does not abort the process when reentrant locking is detected, but deadlocks. +//! +//! Priority inheritance is achieved by storing the owning thread's handle in an +//! atomic variable. Fuchsia's futex operations support setting an owner thread +//! for a futex, which can boost that thread's priority while the futex is waited +//! upon. +//! +//! libsync is licenced under the following BSD-style licence: +//! +//! Copyright 2016 The Fuchsia Authors. +//! +//! Redistribution and use in source and binary forms, with or without +//! modification, are permitted provided that the following conditions are +//! met: +//! +//! * Redistributions of source code must retain the above copyright +//! notice, this list of conditions and the following disclaimer. +//! * Redistributions in binary form must reproduce the above +//! copyright notice, this list of conditions and the following +//! disclaimer in the documentation and/or other materials provided +//! with the distribution. +//! +//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +//! "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +//! LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +//! A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +//! OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +//! SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +//! LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +//! DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +//! THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +//! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +//! +//! [mutex in Fuchsia's libsync]: https://cs.opensource.google/fuchsia/fuchsia/+/main:zircon/system/ulib/sync/mutex.c + +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::zircon::{ + zx_futex_wait, zx_futex_wake_single_owner, zx_handle_t, zx_thread_self, ZX_ERR_BAD_HANDLE, + ZX_ERR_BAD_STATE, ZX_ERR_INVALID_ARGS, ZX_ERR_TIMED_OUT, ZX_ERR_WRONG_TYPE, ZX_OK, + ZX_TIME_INFINITE, +}; + +// The lowest two bits of a `zx_handle_t` are always set, so the lowest bit is used to mark the +// mutex as contested by clearing it. +const CONTESTED_BIT: u32 = 1; +// This can never be a valid `zx_handle_t`. +const UNLOCKED: u32 = 0; + +pub type MovableMutex = Mutex; + +pub struct Mutex { + futex: AtomicU32, +} + +#[inline] +fn to_state(owner: zx_handle_t) -> u32 { + owner +} + +#[inline] +fn to_owner(state: u32) -> zx_handle_t { + state | CONTESTED_BIT +} + +#[inline] +fn is_contested(state: u32) -> bool { + state & CONTESTED_BIT == 0 +} + +#[inline] +fn mark_contested(state: u32) -> u32 { + state & !CONTESTED_BIT +} + +impl Mutex { + #[inline] + pub const fn new() -> Mutex { + Mutex { futex: AtomicU32::new(UNLOCKED) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + let thread_self = zx_thread_self(); + self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok() + } + + #[inline] + pub unsafe fn lock(&self) { + let thread_self = zx_thread_self(); + if let Err(state) = + self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed) + { + self.lock_contested(state, thread_self); + } + } + + #[cold] + fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) { + let owned_state = mark_contested(to_state(thread_self)); + loop { + // Mark the mutex as contested if it is not already. + let contested = mark_contested(state); + if is_contested(state) + || self.futex.compare_exchange(state, contested, Relaxed, Relaxed).is_ok() + { + // The mutex has been marked as contested, wait for the state to change. + unsafe { + match zx_futex_wait( + &self.futex, + AtomicU32::new(contested), + to_owner(state), + ZX_TIME_INFINITE, + ) { + ZX_OK | ZX_ERR_BAD_STATE | ZX_ERR_TIMED_OUT => (), + // Note that if a thread handle is reused after its associated thread + // exits without unlocking the mutex, an arbitrary thread's priority + // could be boosted by the wait, but there is currently no way to + // prevent that. + ZX_ERR_INVALID_ARGS | ZX_ERR_BAD_HANDLE | ZX_ERR_WRONG_TYPE => { + panic!( + "either the current thread is trying to lock a mutex it has + already locked, or the previous owner did not unlock the mutex + before exiting" + ) + } + error => panic!("unexpected error in zx_futex_wait: {error}"), + } + } + } + + // The state has changed or a wakeup occured, try to lock the mutex. + match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) { + Ok(_) => return, + Err(updated) => state = updated, + } + } + } + + #[inline] + pub unsafe fn unlock(&self) { + if is_contested(self.futex.swap(UNLOCKED, Release)) { + // The woken thread will mark the mutex as contested again, + // and return here, waking until there are no waiters left, + // in which case this is a noop. + self.wake(); + } + } + + #[cold] + fn wake(&self) { + unsafe { + zx_futex_wake_single_owner(&self.futex); + } + } +} diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs new file mode 100644 index 000000000..c0576c178 --- /dev/null +++ b/library/std/src/sys/unix/locks/futex_condvar.rs @@ -0,0 +1,58 @@ +use super::Mutex; +use crate::sync::atomic::{AtomicU32, Ordering::Relaxed}; +use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; +use crate::time::Duration; + +pub type MovableCondvar = Condvar; + +pub struct Condvar { + // The value of this atomic is simply incremented on every notification. + // This is used by `.wait()` to not miss any notifications after + // unlocking the mutex and before waiting for notifications. + futex: AtomicU32, +} + +impl Condvar { + #[inline] + pub const fn new() -> Self { + Self { futex: AtomicU32::new(0) } + } + + // All the memory orderings here are `Relaxed`, + // because synchronization is done by unlocking and locking the mutex. + + pub unsafe fn notify_one(&self) { + self.futex.fetch_add(1, Relaxed); + futex_wake(&self.futex); + } + + pub unsafe fn notify_all(&self) { + self.futex.fetch_add(1, Relaxed); + futex_wake_all(&self.futex); + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + self.wait_optional_timeout(mutex, None); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, timeout: Duration) -> bool { + self.wait_optional_timeout(mutex, Some(timeout)) + } + + unsafe fn wait_optional_timeout(&self, mutex: &Mutex, timeout: Option) -> bool { + // Examine the notification counter _before_ we unlock the mutex. + let futex_value = self.futex.load(Relaxed); + + // Unlock the mutex before going to sleep. + mutex.unlock(); + + // Wait, but only if there hasn't been any + // notification since we unlocked the mutex. + let r = futex_wait(&self.futex, futex_value, timeout); + + // Lock the mutex again. + mutex.lock(); + + r + } +} diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs new file mode 100644 index 000000000..99ba86e5f --- /dev/null +++ b/library/std/src/sys/unix/locks/futex_mutex.rs @@ -0,0 +1,101 @@ +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::{futex_wait, futex_wake}; + +pub type MovableMutex = Mutex; + +pub struct Mutex { + /// 0: unlocked + /// 1: locked, no other threads waiting + /// 2: locked, and other threads waiting (contended) + futex: AtomicU32, +} + +impl Mutex { + #[inline] + pub const fn new() -> Self { + Self { futex: AtomicU32::new(0) } + } + + #[inline] + pub unsafe fn init(&mut self) {} + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok() + } + + #[inline] + pub unsafe fn lock(&self) { + if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() { + self.lock_contended(); + } + } + + #[cold] + fn lock_contended(&self) { + // Spin first to speed things up if the lock is released quickly. + let mut state = self.spin(); + + // If it's unlocked now, attempt to take the lock + // without marking it as contended. + if state == 0 { + match self.futex.compare_exchange(0, 1, Acquire, Relaxed) { + Ok(_) => return, // Locked! + Err(s) => state = s, + } + } + + loop { + // Put the lock in contended state. + // We avoid an unnecessary write if it as already set to 2, + // to be friendlier for the caches. + if state != 2 && self.futex.swap(2, Acquire) == 0 { + // We changed it from 0 to 2, so we just succesfully locked it. + return; + } + + // Wait for the futex to change state, assuming it is still 2. + futex_wait(&self.futex, 2, None); + + // Spin again after waking up. + state = self.spin(); + } + } + + fn spin(&self) -> u32 { + let mut spin = 100; + loop { + // We only use `load` (and not `swap` or `compare_exchange`) + // while spinning, to be easier on the caches. + let state = self.futex.load(Relaxed); + + // We stop spinning when the mutex is unlocked (0), + // but also when it's contended (2). + if state != 1 || spin == 0 { + return state; + } + + crate::hint::spin_loop(); + spin -= 1; + } + } + + #[inline] + pub unsafe fn unlock(&self) { + if self.futex.swap(0, Release) == 2 { + // We only wake up one thread. When that thread locks the mutex, it + // will mark the mutex as contended (2) (see lock_contended above), + // which makes sure that any other waiting threads will also be + // woken up eventually. + self.wake(); + } + } + + #[cold] + fn wake(&self) { + futex_wake(&self.futex); + } +} diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs new file mode 100644 index 000000000..b3bbbf743 --- /dev/null +++ b/library/std/src/sys/unix/locks/futex_rwlock.rs @@ -0,0 +1,322 @@ +use crate::sync::atomic::{ + AtomicU32, + Ordering::{Acquire, Relaxed, Release}, +}; +use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all}; + +pub type MovableRwLock = RwLock; + +pub struct RwLock { + // The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag. + // Bits 0..30: + // 0: Unlocked + // 1..=0x3FFF_FFFE: Locked by N readers + // 0x3FFF_FFFF: Write locked + // Bit 30: Readers are waiting on this futex. + // Bit 31: Writers are waiting on the writer_notify futex. + state: AtomicU32, + // The 'condition variable' to notify writers through. + // Incremented on every signal. + writer_notify: AtomicU32, +} + +const READ_LOCKED: u32 = 1; +const MASK: u32 = (1 << 30) - 1; +const WRITE_LOCKED: u32 = MASK; +const MAX_READERS: u32 = MASK - 1; +const READERS_WAITING: u32 = 1 << 30; +const WRITERS_WAITING: u32 = 1 << 31; + +#[inline] +fn is_unlocked(state: u32) -> bool { + state & MASK == 0 +} + +#[inline] +fn is_write_locked(state: u32) -> bool { + state & MASK == WRITE_LOCKED +} + +#[inline] +fn has_readers_waiting(state: u32) -> bool { + state & READERS_WAITING != 0 +} + +#[inline] +fn has_writers_waiting(state: u32) -> bool { + state & WRITERS_WAITING != 0 +} + +#[inline] +fn is_read_lockable(state: u32) -> bool { + // This also returns false if the counter could overflow if we tried to read lock it. + // + // We don't allow read-locking if there's readers waiting, even if the lock is unlocked + // and there's no writers waiting. The only situation when this happens is after unlocking, + // at which point the unlocking thread might be waking up writers, which have priority over readers. + // The unlocking thread will clear the readers waiting bit and wake up readers, if necssary. + state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state) +} + +#[inline] +fn has_reached_max_readers(state: u32) -> bool { + state & MASK == MAX_READERS +} + +impl RwLock { + #[inline] + pub const fn new() -> Self { + Self { state: AtomicU32::new(0), writer_notify: AtomicU32::new(0) } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + self.state + .fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED)) + .is_ok() + } + + #[inline] + pub unsafe fn read(&self) { + let state = self.state.load(Relaxed); + if !is_read_lockable(state) + || self + .state + .compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) + .is_err() + { + self.read_contended(); + } + } + + #[inline] + pub unsafe fn read_unlock(&self) { + let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED; + + // It's impossible for a reader to be waiting on a read-locked RwLock, + // except if there is also a writer waiting. + debug_assert!(!has_readers_waiting(state) || has_writers_waiting(state)); + + // Wake up a writer if we were the last reader and there's a writer waiting. + if is_unlocked(state) && has_writers_waiting(state) { + self.wake_writer_or_readers(state); + } + } + + #[cold] + fn read_contended(&self) { + let mut state = self.spin_read(); + + loop { + // If we can lock it, lock it. + if is_read_lockable(state) { + match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed) + { + Ok(_) => return, // Locked! + Err(s) => { + state = s; + continue; + } + } + } + + // Check for overflow. + if has_reached_max_readers(state) { + panic!("too many active read locks on RwLock"); + } + + // Make sure the readers waiting bit is set before we go to sleep. + if !has_readers_waiting(state) { + if let Err(s) = + self.state.compare_exchange(state, state | READERS_WAITING, Relaxed, Relaxed) + { + state = s; + continue; + } + } + + // Wait for the state to change. + futex_wait(&self.state, state | READERS_WAITING, None); + + // Spin again after waking up. + state = self.spin_read(); + } + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.state + .fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED)) + .is_ok() + } + + #[inline] + pub unsafe fn write(&self) { + if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() { + self.write_contended(); + } + } + + #[inline] + pub unsafe fn write_unlock(&self) { + let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED; + + debug_assert!(is_unlocked(state)); + + if has_writers_waiting(state) || has_readers_waiting(state) { + self.wake_writer_or_readers(state); + } + } + + #[cold] + fn write_contended(&self) { + let mut state = self.spin_write(); + + let mut other_writers_waiting = 0; + + loop { + // If it's unlocked, we try to lock it. + if is_unlocked(state) { + match self.state.compare_exchange_weak( + state, + state | WRITE_LOCKED | other_writers_waiting, + Acquire, + Relaxed, + ) { + Ok(_) => return, // Locked! + Err(s) => { + state = s; + continue; + } + } + } + + // Set the waiting bit indicating that we're waiting on it. + if !has_writers_waiting(state) { + if let Err(s) = + self.state.compare_exchange(state, state | WRITERS_WAITING, Relaxed, Relaxed) + { + state = s; + continue; + } + } + + // Other writers might be waiting now too, so we should make sure + // we keep that bit on once we manage lock it. + other_writers_waiting = WRITERS_WAITING; + + // Examine the notification counter before we check if `state` has changed, + // to make sure we don't miss any notifications. + let seq = self.writer_notify.load(Acquire); + + // Don't go to sleep if the lock has become available, + // or if the writers waiting bit is no longer set. + state = self.state.load(Relaxed); + if is_unlocked(state) || !has_writers_waiting(state) { + continue; + } + + // Wait for the state to change. + futex_wait(&self.writer_notify, seq, None); + + // Spin again after waking up. + state = self.spin_write(); + } + } + + /// Wake up waiting threads after unlocking. + /// + /// If both are waiting, this will wake up only one writer, but will fall + /// back to waking up readers if there was no writer to wake up. + #[cold] + fn wake_writer_or_readers(&self, mut state: u32) { + assert!(is_unlocked(state)); + + // The readers waiting bit might be turned on at any point now, + // since readers will block when there's anything waiting. + // Writers will just lock the lock though, regardless of the waiting bits, + // so we don't have to worry about the writer waiting bit. + // + // If the lock gets locked in the meantime, we don't have to do + // anything, because then the thread that locked the lock will take + // care of waking up waiters when it unlocks. + + // If only writers are waiting, wake one of them up. + if state == WRITERS_WAITING { + match self.state.compare_exchange(state, 0, Relaxed, Relaxed) { + Ok(_) => { + self.wake_writer(); + return; + } + Err(s) => { + // Maybe some readers are now waiting too. So, continue to the next `if`. + state = s; + } + } + } + + // If both writers and readers are waiting, leave the readers waiting + // and only wake up one writer. + if state == READERS_WAITING + WRITERS_WAITING { + if self.state.compare_exchange(state, READERS_WAITING, Relaxed, Relaxed).is_err() { + // The lock got locked. Not our problem anymore. + return; + } + if self.wake_writer() { + return; + } + // No writers were actually blocked on futex_wait, so we continue + // to wake up readers instead, since we can't be sure if we notified a writer. + state = READERS_WAITING; + } + + // If readers are waiting, wake them all up. + if state == READERS_WAITING { + if self.state.compare_exchange(state, 0, Relaxed, Relaxed).is_ok() { + futex_wake_all(&self.state); + } + } + } + + /// This wakes one writer and returns true if we woke up a writer that was + /// blocked on futex_wait. + /// + /// If this returns false, it might still be the case that we notified a + /// writer that was about to go to sleep. + fn wake_writer(&self) -> bool { + self.writer_notify.fetch_add(1, Release); + futex_wake(&self.writer_notify) + // Note that FreeBSD and DragonFlyBSD don't tell us whether they woke + // up any threads or not, and always return `false` here. That still + // results in correct behaviour: it just means readers get woken up as + // well in case both readers and writers were waiting. + } + + /// Spin for a while, but stop directly at the given condition. + #[inline] + fn spin_until(&self, f: impl Fn(u32) -> bool) -> u32 { + let mut spin = 100; // Chosen by fair dice roll. + loop { + let state = self.state.load(Relaxed); + if f(state) || spin == 0 { + return state; + } + crate::hint::spin_loop(); + spin -= 1; + } + } + + #[inline] + fn spin_write(&self) -> u32 { + // Stop spinning when it's unlocked or when there's waiting writers, to keep things somewhat fair. + self.spin_until(|state| is_unlocked(state) || has_writers_waiting(state)) + } + + #[inline] + fn spin_read(&self) -> u32 { + // Stop spinning when it's unlocked or read locked, or when there's waiting threads. + self.spin_until(|state| { + !is_write_locked(state) || has_readers_waiting(state) || has_writers_waiting(state) + }) + } +} diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs new file mode 100644 index 000000000..f5f92f693 --- /dev/null +++ b/library/std/src/sys/unix/locks/mod.rs @@ -0,0 +1,31 @@ +cfg_if::cfg_if! { + if #[cfg(any( + target_os = "linux", + target_os = "android", + all(target_os = "emscripten", target_feature = "atomics"), + target_os = "freebsd", + target_os = "openbsd", + target_os = "dragonfly", + ))] { + mod futex_mutex; + mod futex_rwlock; + mod futex_condvar; + pub(crate) use futex_mutex::{Mutex, MovableMutex}; + pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; + pub(crate) use futex_condvar::MovableCondvar; + } else if #[cfg(target_os = "fuchsia")] { + mod fuchsia_mutex; + mod futex_rwlock; + mod futex_condvar; + pub(crate) use fuchsia_mutex::{Mutex, MovableMutex}; + pub(crate) use futex_rwlock::{RwLock, MovableRwLock}; + pub(crate) use futex_condvar::MovableCondvar; + } else { + mod pthread_mutex; + mod pthread_rwlock; + mod pthread_condvar; + pub(crate) use pthread_mutex::{Mutex, MovableMutex}; + pub(crate) use pthread_rwlock::{RwLock, MovableRwLock}; + pub(crate) use pthread_condvar::MovableCondvar; + } +} diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs new file mode 100644 index 000000000..abf27e7db --- /dev/null +++ b/library/std/src/sys/unix/locks/pthread_condvar.rs @@ -0,0 +1,222 @@ +use crate::cell::UnsafeCell; +use crate::sys::locks::{pthread_mutex, Mutex}; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; +use crate::time::Duration; + +pub struct Condvar { + inner: UnsafeCell, +} + +pub(crate) type MovableCondvar = LazyBox; + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +const TIMESPEC_MAX: libc::timespec = + libc::timespec { tv_sec: ::MAX, tv_nsec: 1_000_000_000 - 1 }; + +fn saturating_cast_to_time_t(value: u64) -> libc::time_t { + if value > ::MAX as u64 { ::MAX } else { value as libc::time_t } +} + +impl LazyInit for Condvar { + fn init() -> Box { + let mut condvar = Box::new(Self::new()); + unsafe { condvar.init() }; + condvar + } +} + +impl Condvar { + pub const fn new() -> Condvar { + // Might be moved and address is changing it is better to avoid + // initialization of potentially opaque OS data before it landed + Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) } + } + + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "l4re", + target_os = "android", + target_os = "redox" + ))] + unsafe fn init(&mut self) {} + + // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet + // So on that platform, init() should always be called + // Moreover, that platform does not have pthread_condattr_setclock support, + // hence that initialization should be skipped as well + // + // Similar story for the 3DS (horizon). + #[cfg(any(target_os = "espidf", target_os = "horizon"))] + unsafe fn init(&mut self) { + let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null()); + assert_eq!(r, 0); + } + + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "l4re", + target_os = "android", + target_os = "redox", + target_os = "espidf", + target_os = "horizon" + )))] + unsafe fn init(&mut self) { + use crate::mem::MaybeUninit; + let mut attr = MaybeUninit::::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); + assert_eq!(r, 0); + let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr()); + assert_eq!(r, 0); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); + assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_one(&self) { + let r = libc::pthread_cond_signal(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn notify_all(&self) { + let r = libc::pthread_cond_broadcast(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + pub unsafe fn wait(&self, mutex: &Mutex) { + let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex)); + debug_assert_eq!(r, 0); + } + + // This implementation is used on systems that support pthread_condattr_setclock + // where we configure condition variable to use monotonic clock (instead of + // default system clock). This approach avoids all problems that result + // from changes made to the system time. + #[cfg(not(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "android", + target_os = "espidf", + target_os = "horizon" + )))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + use crate::mem; + + let mut now: libc::timespec = mem::zeroed(); + let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now); + assert_eq!(r, 0); + + // Nanosecond calculations can't overflow because both values are below 1e9. + let nsec = dur.subsec_nanos() + now.tv_nsec as u32; + + let sec = saturating_cast_to_time_t(dur.as_secs()) + .checked_add((nsec / 1_000_000_000) as libc::time_t) + .and_then(|s| s.checked_add(now.tv_sec)); + let nsec = nsec % 1_000_000_000; + + let timeout = + sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX); + + let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout); + assert!(r == libc::ETIMEDOUT || r == 0); + r == 0 + } + + // This implementation is modeled after libcxx's condition_variable + // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46 + // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367 + #[cfg(any( + target_os = "macos", + target_os = "ios", + target_os = "watchos", + target_os = "android", + target_os = "espidf", + target_os = "horizon" + ))] + pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool { + use crate::ptr; + use crate::time::Instant; + + // 1000 years + let max_dur = Duration::from_secs(1000 * 365 * 86400); + + if dur > max_dur { + // OSX implementation of `pthread_cond_timedwait` is buggy + // with super long durations. When duration is greater than + // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait` + // in macOS Sierra return error 316. + // + // This program demonstrates the issue: + // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c + // + // To work around this issue, and possible bugs of other OSes, timeout + // is clamped to 1000 years, which is allowable per the API of `wait_timeout` + // because of spurious wakeups. + + dur = max_dur; + } + + // First, figure out what time it currently is, in both system and + // stable time. pthread_cond_timedwait uses system time, but we want to + // report timeout based on stable time. + let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 }; + let stable_now = Instant::now(); + let r = libc::gettimeofday(&mut sys_now, ptr::null_mut()); + debug_assert_eq!(r, 0); + + let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long; + let extra = (nsec / 1_000_000_000) as libc::time_t; + let nsec = nsec % 1_000_000_000; + let seconds = saturating_cast_to_time_t(dur.as_secs()); + + let timeout = sys_now + .tv_sec + .checked_add(extra) + .and_then(|s| s.checked_add(seconds)) + .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec }) + .unwrap_or(TIMESPEC_MAX); + + // And wait! + let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout); + debug_assert!(r == libc::ETIMEDOUT || r == 0); + + // ETIMEDOUT is not a totally reliable method of determining timeout due + // to clock shifts, so do the check ourselves + stable_now.elapsed() < dur + } + + #[inline] + #[cfg(not(target_os = "dragonfly"))] + unsafe fn destroy(&mut self) { + let r = libc::pthread_cond_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } + + #[inline] + #[cfg(target_os = "dragonfly")] + unsafe fn destroy(&mut self) { + let r = libc::pthread_cond_destroy(self.inner.get()); + // On DragonFly pthread_cond_destroy() returns EINVAL if called on + // a condvar that was just initialized with + // libc::PTHREAD_COND_INITIALIZER. Once it is used or + // pthread_cond_init() is called, this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } +} + +impl Drop for Condvar { + #[inline] + fn drop(&mut self) { + unsafe { self.destroy() }; + } +} diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs new file mode 100644 index 000000000..98afee69b --- /dev/null +++ b/library/std/src/sys/unix/locks/pthread_mutex.rs @@ -0,0 +1,135 @@ +use crate::cell::UnsafeCell; +use crate::mem::{forget, MaybeUninit}; +use crate::sys::cvt_nz; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +pub struct Mutex { + inner: UnsafeCell, +} + +pub(crate) type MovableMutex = LazyBox; + +#[inline] +pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t { + m.inner.get() +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl LazyInit for Mutex { + fn init() -> Box { + let mut mutex = Box::new(Self::new()); + unsafe { mutex.init() }; + mutex + } + + fn destroy(mutex: Box) { + // We're not allowed to pthread_mutex_destroy a locked mutex, + // so check first if it's unlocked. + if unsafe { mutex.try_lock() } { + unsafe { mutex.unlock() }; + drop(mutex); + } else { + // The mutex is locked. This happens if a MutexGuard is leaked. + // In this case, we just leak the Mutex too. + forget(mutex); + } + } + + fn cancel_init(_: Box) { + // In this case, we can just drop it without any checks, + // since it cannot have been locked yet. + } +} + +impl Mutex { + pub const fn new() -> Mutex { + // Might be moved to a different address, so it is better to avoid + // initialization of potentially opaque OS data before it landed. + // Be very careful using this newly constructed `Mutex`, reentrant + // locking is undefined behavior until `init` is called! + Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } + } + #[inline] + pub unsafe fn init(&mut self) { + // Issue #33770 + // + // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have + // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you + // try to re-lock it from the same thread when you already hold a lock + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html). + // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL + // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that + // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same + // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in + // a Mutex where re-locking is UB. + // + // In practice, glibc takes advantage of this undefined behavior to + // implement hardware lock elision, which uses hardware transactional + // memory to avoid acquiring the lock. While a transaction is in + // progress, the lock appears to be unlocked. This isn't a problem for + // other threads since the transactional memory will abort if a conflict + // is detected, however no abort is generated when re-locking from the + // same thread. + // + // Since locking the same mutex twice will result in two aliasing &mut + // references, we instead create the mutex with type + // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to + // re-lock it from the same thread, thus avoiding undefined behavior. + let mut attr = MaybeUninit::::uninit(); + cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap(); + let attr = PthreadMutexAttr(&mut attr); + cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL)) + .unwrap(); + cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap(); + } + #[inline] + pub unsafe fn lock(&self) { + let r = libc::pthread_mutex_lock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn try_lock(&self) -> bool { + libc::pthread_mutex_trylock(self.inner.get()) == 0 + } + #[inline] + #[cfg(not(target_os = "dragonfly"))] + unsafe fn destroy(&mut self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + #[cfg(target_os = "dragonfly")] + unsafe fn destroy(&mut self) { + let r = libc::pthread_mutex_destroy(self.inner.get()); + // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a + // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. + // Once it is used (locked/unlocked) or pthread_mutex_init() is called, + // this behaviour no longer occurs. + debug_assert!(r == 0 || r == libc::EINVAL); + } +} + +impl Drop for Mutex { + #[inline] + fn drop(&mut self) { + unsafe { self.destroy() }; + } +} + +pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit); + +impl Drop for PthreadMutexAttr<'_> { + fn drop(&mut self) { + unsafe { + let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr()); + debug_assert_eq!(result, 0); + } + } +} diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs new file mode 100644 index 000000000..adfe2a883 --- /dev/null +++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs @@ -0,0 +1,173 @@ +use crate::cell::UnsafeCell; +use crate::mem::forget; +use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::sys_common::lazy_box::{LazyBox, LazyInit}; + +pub struct RwLock { + inner: UnsafeCell, + write_locked: UnsafeCell, // guarded by the `inner` RwLock + num_readers: AtomicUsize, +} + +pub(crate) type MovableRwLock = LazyBox; + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +impl LazyInit for RwLock { + fn init() -> Box { + Box::new(Self::new()) + } + + fn destroy(mut rwlock: Box) { + // We're not allowed to pthread_rwlock_destroy a locked rwlock, + // so check first if it's unlocked. + if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 { + // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked. + // In this case, we just leak the RwLock too. + forget(rwlock); + } + } + + fn cancel_init(_: Box) { + // In this case, we can just drop it without any checks, + // since it cannot have been locked yet. + } +} + +impl RwLock { + pub const fn new() -> RwLock { + RwLock { + inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), + write_locked: UnsafeCell::new(false), + num_readers: AtomicUsize::new(0), + } + } + #[inline] + pub unsafe fn read(&self) { + let r = libc::pthread_rwlock_rdlock(self.inner.get()); + + // According to POSIX, when a thread tries to acquire this read lock + // while it already holds the write lock + // (or vice versa, or tries to acquire the write lock twice), + // "the call shall either deadlock or return [EDEADLK]" + // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html, + // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html). + // So, in principle, all we have to do here is check `r == 0` to be sure we properly + // got the lock. + // + // However, (at least) glibc before version 2.25 does not conform to this spec, + // and can return `r == 0` even when this thread already holds the write lock. + // We thus check for this situation ourselves and panic when detecting that a thread + // got the write lock more than once, or got a read and a write lock. + if r == libc::EAGAIN { + panic!("rwlock maximum reader count exceeded"); + } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_rdlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock read lock would result in deadlock"); + } else { + // POSIX does not make guarantees about all the errors that may be returned. + // See issue #94705 for more details. + assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r); + self.num_readers.fetch_add(1, Ordering::Relaxed); + } + } + #[inline] + pub unsafe fn try_read(&self) -> bool { + let r = libc::pthread_rwlock_tryrdlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() { + // `pthread_rwlock_tryrdlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + self.num_readers.fetch_add(1, Ordering::Relaxed); + true + } + } else { + false + } + } + #[inline] + pub unsafe fn write(&self) { + let r = libc::pthread_rwlock_wrlock(self.inner.get()); + // See comments above for why we check for EDEADLK and write_locked. For the same reason, + // we also need to check that there are no readers (tracked in `num_readers`). + if r == libc::EDEADLK + || (r == 0 && *self.write_locked.get()) + || self.num_readers.load(Ordering::Relaxed) != 0 + { + // Above, we make sure to only access `write_locked` when `r == 0` to avoid + // data races. + if r == 0 { + // `pthread_rwlock_wrlock` succeeded when it should not have. + self.raw_unlock(); + } + panic!("rwlock write lock would result in deadlock"); + } else { + // According to POSIX, for a properly initialized rwlock this can only + // return EDEADLK or 0. We rely on that. + debug_assert_eq!(r, 0); + } + *self.write_locked.get() = true; + } + #[inline] + pub unsafe fn try_write(&self) -> bool { + let r = libc::pthread_rwlock_trywrlock(self.inner.get()); + if r == 0 { + if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 { + // `pthread_rwlock_trywrlock` succeeded when it should not have. + self.raw_unlock(); + false + } else { + *self.write_locked.get() = true; + true + } + } else { + false + } + } + #[inline] + unsafe fn raw_unlock(&self) { + let r = libc::pthread_rwlock_unlock(self.inner.get()); + debug_assert_eq!(r, 0); + } + #[inline] + pub unsafe fn read_unlock(&self) { + debug_assert!(!*self.write_locked.get()); + self.num_readers.fetch_sub(1, Ordering::Relaxed); + self.raw_unlock(); + } + #[inline] + pub unsafe fn write_unlock(&self) { + debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0); + debug_assert!(*self.write_locked.get()); + *self.write_locked.get() = false; + self.raw_unlock(); + } + #[inline] + unsafe fn destroy(&mut self) { + let r = libc::pthread_rwlock_destroy(self.inner.get()); + // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a + // rwlock that was just initialized with + // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked) + // or pthread_rwlock_init() is called, this behaviour no longer occurs. + if cfg!(target_os = "dragonfly") { + debug_assert!(r == 0 || r == libc::EINVAL); + } else { + debug_assert_eq!(r, 0); + } + } +} + +impl Drop for RwLock { + #[inline] + fn drop(&mut self) { + unsafe { self.destroy() }; + } +} -- cgit v1.2.3