summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys/unix
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs18
-rw-r--r--library/std/src/sys/unix/locks/futex_condvar.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs10
-rw-r--r--library/std/src/sys/unix/locks/mod.rs18
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs179
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs134
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs148
-rw-r--r--library/std/src/sys/unix/time.rs25
-rw-r--r--library/std/src/sys/unix/weak.rs37
10 files changed, 320 insertions, 261 deletions
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
index 117611ce4..5d89e5a13 100644
--- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -53,8 +53,6 @@ const CONTESTED_BIT: u32 = 1;
// This can never be a valid `zx_handle_t`.
const UNLOCKED: u32 = 0;
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
futex: AtomicU32,
}
@@ -86,23 +84,27 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let thread_self = zx_thread_self();
+ pub fn try_lock(&self) -> bool {
+ let thread_self = unsafe { zx_thread_self() };
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
- let thread_self = zx_thread_self();
+ pub fn lock(&self) {
+ let thread_self = unsafe { zx_thread_self() };
if let Err(state) =
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
{
- self.lock_contested(state, thread_self);
+ unsafe {
+ self.lock_contested(state, thread_self);
+ }
}
}
+ /// # Safety
+ /// `thread_self` must be the handle for the current thread.
#[cold]
- fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
+ unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
let owned_state = mark_contested(to_state(thread_self));
loop {
// Mark the mutex as contested if it is not already.
diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs
index c0576c178..4bd65dd25 100644
--- a/library/std/src/sys/unix/locks/futex_condvar.rs
+++ b/library/std/src/sys/unix/locks/futex_condvar.rs
@@ -3,8 +3,6 @@ use crate::sync::atomic::{AtomicU32, Ordering::Relaxed};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
use crate::time::Duration;
-pub type MovableCondvar = Condvar;
-
pub struct Condvar {
// The value of this atomic is simply incremented on every notification.
// This is used by `.wait()` to not miss any notifications after
@@ -21,12 +19,12 @@ impl Condvar {
// All the memory orderings here are `Relaxed`,
// because synchronization is done by unlocking and locking the mutex.
- pub unsafe fn notify_one(&self) {
+ pub fn notify_one(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake(&self.futex);
}
- pub unsafe fn notify_all(&self) {
+ pub fn notify_all(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake_all(&self.futex);
}
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
index 33b13dad4..c01229586 100644
--- a/library/std/src/sys/unix/locks/futex_mutex.rs
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake};
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
/// 0: unlocked
/// 1: locked, no other threads waiting
@@ -20,12 +18,12 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
self.lock_contended();
}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
index 0cc92244e..aa0de9002 100644
--- a/library/std/src/sys/unix/locks/futex_rwlock.rs
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
-pub type MovableRwLock = RwLock;
-
pub struct RwLock {
// The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
// Bits 0..30:
@@ -70,14 +68,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let state = self.state.load(Relaxed);
if !is_read_lockable(state)
|| self
@@ -144,14 +142,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
self.write_contended();
}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
index 9bb314b70..b2e0e49ad 100644
--- a/library/std/src/sys/unix/locks/mod.rs
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -10,22 +10,22 @@ cfg_if::cfg_if! {
mod futex_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else if #[cfg(target_os = "fuchsia")] {
mod fuchsia_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use fuchsia_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else {
mod pthread_mutex;
mod pthread_rwlock;
mod pthread_condvar;
- pub(crate) use pthread_mutex::{Mutex, MovableMutex};
- pub(crate) use pthread_rwlock::MovableRwLock;
- pub(crate) use pthread_condvar::MovableCondvar;
+ pub(crate) use pthread_mutex::Mutex;
+ pub(crate) use pthread_rwlock::RwLock;
+ pub(crate) use pthread_condvar::Condvar;
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index 4741c0c67..1ddb09905 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -1,17 +1,17 @@
use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
use crate::sys::locks::{pthread_mutex, Mutex};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use crate::time::Duration;
+struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
+
pub struct Condvar {
- inner: UnsafeCell<libc::pthread_cond_t>,
+ inner: LazyBox<AllocatedCondvar>,
+ mutex: AtomicPtr<libc::pthread_mutex_t>,
}
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
const TIMESPEC_MAX: libc::timespec =
libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
@@ -19,81 +19,104 @@ fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
}
-impl LazyInit for Condvar {
+#[inline]
+fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
+ c.inner.0.get()
+}
+
+unsafe impl Send for AllocatedCondvar {}
+unsafe impl Sync for AllocatedCondvar {}
+
+impl LazyInit for AllocatedCondvar {
fn init() -> Box<Self> {
- let mut condvar = Box::new(Self::new());
- unsafe { condvar.init() };
+ let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ // `pthread_condattr_setclock` is unfortunately not supported on these platforms.
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
+ // So on that platform, init() should always be called
+ // Moreover, that platform does not have pthread_condattr_setclock support,
+ // hence that initialization should be skipped as well
+ //
+ // Similar story for the 3DS (horizon).
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ }
+ }
+
condvar
}
}
-impl Condvar {
- pub const fn new() -> Condvar {
- // Might be moved and address is changing it is better to avoid
- // initialization of potentially opaque OS data before it landed
- Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
+impl Drop for AllocatedCondvar {
+ #[inline]
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_cond_destroy() returns EINVAL if called on
+ // a condvar that was just initialized with
+ // libc::PTHREAD_COND_INITIALIZER. Once it is used or
+ // pthread_cond_init() is called, this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))]
- unsafe fn init(&mut self) {}
-
- // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
- // So on that platform, init() should always be called
- // Moreover, that platform does not have pthread_condattr_setclock support,
- // hence that initialization should be skipped as well
- //
- // Similar story for the 3DS (horizon).
- #[cfg(any(target_os = "espidf", target_os = "horizon"))]
- unsafe fn init(&mut self) {
- let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
- assert_eq!(r, 0);
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
}
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox",
- target_os = "espidf",
- target_os = "horizon"
- )))]
- unsafe fn init(&mut self) {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
+ #[inline]
+ fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
+ // Relaxed is okay here because we never read through `self.addr`, and only use it to
+ // compare addresses.
+ match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
+ Ok(_) => {} // Stored the address
+ Err(n) if n == mutex => {} // Lost a race to store the same address
+ _ => panic!("attempted to use a condition variable with two mutexes"),
+ }
}
#[inline]
- pub unsafe fn notify_one(&self) {
- let r = libc::pthread_cond_signal(self.inner.get());
+ pub fn notify_one(&self) {
+ let r = unsafe { libc::pthread_cond_signal(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
- pub unsafe fn notify_all(&self) {
- let r = libc::pthread_cond_broadcast(self.inner.get());
+ pub fn notify_all(&self) {
+ let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+ let r = libc::pthread_cond_wait(raw(self), mutex);
debug_assert_eq!(r, 0);
}
@@ -112,6 +135,9 @@ impl Condvar {
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
use crate::mem;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
let mut now: libc::timespec = mem::zeroed();
let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
assert_eq!(r, 0);
@@ -127,7 +153,7 @@ impl Condvar {
let timeout =
sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
assert!(r == libc::ETIMEDOUT || r == 0);
r == 0
}
@@ -144,9 +170,11 @@ impl Condvar {
target_os = "horizon"
))]
pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
- use crate::ptr;
use crate::time::Instant;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
// 1000 years
let max_dur = Duration::from_secs(1000 * 365 * 86400);
@@ -187,36 +215,11 @@ impl Condvar {
.unwrap_or(TIMESPEC_MAX);
// And wait!
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
debug_assert!(r == libc::ETIMEDOUT || r == 0);
// ETIMEDOUT is not a totally reliable method of determining timeout due
// to clock shifts, so do the check ourselves
stable_now.elapsed() < dur
}
-
- #[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- // On DragonFly pthread_cond_destroy() returns EINVAL if called on
- // a condvar that was just initialized with
- // libc::PTHREAD_COND_INITIALIZER. Once it is used or
- // pthread_cond_init() is called, this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
- }
-}
-
-impl Drop for Condvar {
- #[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
- }
}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 5964935dd..8a78bc1fd 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -3,56 +3,24 @@ use crate::mem::{forget, MaybeUninit};
use crate::sys::cvt_nz;
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+struct AllocatedMutex(UnsafeCell<libc::pthread_mutex_t>);
+
pub struct Mutex {
- inner: UnsafeCell<libc::pthread_mutex_t>,
+ inner: LazyBox<AllocatedMutex>,
}
-pub(crate) type MovableMutex = LazyBox<Mutex>;
-
#[inline]
pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
- m.inner.get()
+ m.inner.0.get()
}
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
+unsafe impl Send for AllocatedMutex {}
+unsafe impl Sync for AllocatedMutex {}
-impl LazyInit for Mutex {
+impl LazyInit for AllocatedMutex {
fn init() -> Box<Self> {
- let mut mutex = Box::new(Self::new());
- unsafe { mutex.init() };
- mutex
- }
-
- fn destroy(mutex: Box<Self>) {
- // We're not allowed to pthread_mutex_destroy a locked mutex,
- // so check first if it's unlocked.
- if unsafe { mutex.try_lock() } {
- unsafe { mutex.unlock() };
- drop(mutex);
- } else {
- // The mutex is locked. This happens if a MutexGuard is leaked.
- // In this case, we just leak the Mutex too.
- forget(mutex);
- }
- }
-
- fn cancel_init(_: Box<Self>) {
- // In this case, we can just drop it without any checks,
- // since it cannot have been locked yet.
- }
-}
+ let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
-impl Mutex {
- pub const fn new() -> Mutex {
- // Might be moved to a different address, so it is better to avoid
- // initialization of potentially opaque OS data before it landed.
- // Be very careful using this newly constructed `Mutex`, reentrant
- // locking is undefined behavior until `init` is called!
- Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
- }
- #[inline]
- unsafe fn init(&mut self) {
// Issue #33770
//
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
@@ -77,49 +45,77 @@ impl Mutex {
// references, we instead create the mutex with type
// PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
// re-lock it from the same thread, thus avoiding undefined behavior.
- let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
- cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
- let attr = PthreadMutexAttr(&mut attr);
- cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
+ unsafe {
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(
+ attr.0.as_mut_ptr(),
+ libc::PTHREAD_MUTEX_NORMAL,
+ ))
.unwrap();
- cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap();
+ }
+
+ mutex
}
- #[inline]
- pub unsafe fn lock(&self) {
- let r = libc::pthread_mutex_lock(self.inner.get());
- debug_assert_eq!(r, 0);
+
+ fn destroy(mutex: Box<Self>) {
+ // We're not allowed to pthread_mutex_destroy a locked mutex,
+ // so check first if it's unlocked.
+ if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } {
+ unsafe { libc::pthread_mutex_unlock(mutex.0.get()) };
+ drop(mutex);
+ } else {
+ // The mutex is locked. This happens if a MutexGuard is leaked.
+ // In this case, we just leak the Mutex too.
+ forget(mutex);
+ }
}
+
+ fn cancel_init(_: Box<Self>) {
+ // In this case, we can just drop it without any checks,
+ // since it cannot have been locked yet.
+ }
+}
+
+impl Drop for AllocatedMutex {
#[inline]
- pub unsafe fn unlock(&self) {
- let r = libc::pthread_mutex_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
+
+impl Mutex {
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- libc::pthread_mutex_trylock(self.inner.get()) == 0
+ pub const fn new() -> Mutex {
+ Mutex { inner: LazyBox::new() }
}
+
#[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
+ pub unsafe fn lock(&self) {
+ let r = libc::pthread_mutex_lock(raw(self));
debug_assert_eq!(r, 0);
}
+
#[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
- // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
- // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
- // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
- // this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
+ pub unsafe fn unlock(&self) {
+ let r = libc::pthread_mutex_unlock(raw(self));
+ debug_assert_eq!(r, 0);
}
-}
-impl Drop for Mutex {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(raw(self)) == 0
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
index adfe2a883..04662be9d 100644
--- a/library/std/src/sys/unix/locks/pthread_rwlock.rs
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -3,20 +3,26 @@ use crate::mem::forget;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-pub struct RwLock {
+struct AllocatedRwLock {
inner: UnsafeCell<libc::pthread_rwlock_t>,
write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
num_readers: AtomicUsize,
}
-pub(crate) type MovableRwLock = LazyBox<RwLock>;
+unsafe impl Send for AllocatedRwLock {}
+unsafe impl Sync for AllocatedRwLock {}
-unsafe impl Send for RwLock {}
-unsafe impl Sync for RwLock {}
+pub struct RwLock {
+ inner: LazyBox<AllocatedRwLock>,
+}
-impl LazyInit for RwLock {
+impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedRwLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ })
}
fn destroy(mut rwlock: Box<Self>) {
@@ -35,17 +41,39 @@ impl LazyInit for RwLock {
}
}
+impl AllocatedRwLock {
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl Drop for AllocatedRwLock {
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) };
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
+ }
+}
+
impl RwLock {
+ #[inline]
pub const fn new() -> RwLock {
- RwLock {
- inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
- write_locked: UnsafeCell::new(false),
- num_readers: AtomicUsize::new(0),
- }
+ RwLock { inner: LazyBox::new() }
}
+
#[inline]
- pub unsafe fn read(&self) {
- let r = libc::pthread_rwlock_rdlock(self.inner.get());
+ pub fn read(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) };
// According to POSIX, when a thread tries to acquire this read lock
// while it already holds the write lock
@@ -62,51 +90,61 @@ impl RwLock {
// got the write lock more than once, or got a read and a write lock.
if r == libc::EAGAIN {
panic!("rwlock maximum reader count exceeded");
- } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+ } else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_rdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock read lock would result in deadlock");
} else {
// POSIX does not make guarantees about all the errors that may be returned.
// See issue #94705 for more details.
assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
}
}
+
#[inline]
- pub unsafe fn try_read(&self) -> bool {
- let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ pub fn try_read(&self) -> bool {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) };
if r == 0 {
- if *self.write_locked.get() {
+ if unsafe { *lock.write_locked.get() } {
// `pthread_rwlock_tryrdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
false
} else {
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
true
}
} else {
false
}
}
+
#[inline]
- pub unsafe fn write(&self) {
- let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ pub fn write(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) };
// See comments above for why we check for EDEADLK and write_locked. For the same reason,
// we also need to check that there are no readers (tracked in `num_readers`).
if r == libc::EDEADLK
- || (r == 0 && *self.write_locked.get())
- || self.num_readers.load(Ordering::Relaxed) != 0
+ || (r == 0 && unsafe { *lock.write_locked.get() })
+ || lock.num_readers.load(Ordering::Relaxed) != 0
{
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_wrlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock write lock would result in deadlock");
} else {
@@ -114,60 +152,44 @@ impl RwLock {
// return EDEADLK or 0. We rely on that.
debug_assert_eq!(r, 0);
}
- *self.write_locked.get() = true;
+
+ unsafe {
+ *lock.write_locked.get() = true;
+ }
}
+
#[inline]
pub unsafe fn try_write(&self) -> bool {
- let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ let lock = &*self.inner;
+ let r = libc::pthread_rwlock_trywrlock(lock.inner.get());
if r == 0 {
- if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
// `pthread_rwlock_trywrlock` succeeded when it should not have.
- self.raw_unlock();
+ lock.raw_unlock();
false
} else {
- *self.write_locked.get() = true;
+ *lock.write_locked.get() = true;
true
}
} else {
false
}
}
- #[inline]
- unsafe fn raw_unlock(&self) {
- let r = libc::pthread_rwlock_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
+
#[inline]
pub unsafe fn read_unlock(&self) {
- debug_assert!(!*self.write_locked.get());
- self.num_readers.fetch_sub(1, Ordering::Relaxed);
- self.raw_unlock();
- }
- #[inline]
- pub unsafe fn write_unlock(&self) {
- debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
- debug_assert!(*self.write_locked.get());
- *self.write_locked.get() = false;
- self.raw_unlock();
+ let lock = &*self.inner;
+ debug_assert!(!*lock.write_locked.get());
+ lock.num_readers.fetch_sub(1, Ordering::Relaxed);
+ lock.raw_unlock();
}
- #[inline]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_rwlock_destroy(self.inner.get());
- // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
- // rwlock that was just initialized with
- // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
- // or pthread_rwlock_init() is called, this behaviour no longer occurs.
- if cfg!(target_os = "dragonfly") {
- debug_assert!(r == 0 || r == libc::EINVAL);
- } else {
- debug_assert_eq!(r, 0);
- }
- }
-}
-impl Drop for RwLock {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn write_unlock(&self) {
+ let lock = &*self.inner;
+ debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*lock.write_locked.get());
+ *lock.write_locked.get() = false;
+ lock.raw_unlock();
}
}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index cca9c6767..d5abd9b58 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -149,7 +149,11 @@ impl From<libc::timespec> for Timespec {
}
}
-#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+#[cfg(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+))]
mod inner {
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::sys::cvt;
@@ -265,7 +269,11 @@ mod inner {
}
}
-#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "watchos")))]
+#[cfg(not(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+)))]
mod inner {
use crate::fmt;
use crate::mem::MaybeUninit;
@@ -281,7 +289,11 @@ mod inner {
impl Instant {
pub fn now() -> Instant {
- Instant { t: Timespec::now(libc::CLOCK_MONOTONIC) }
+ #[cfg(target_os = "macos")]
+ const clock_id: libc::clockid_t = libc::CLOCK_UPTIME_RAW;
+ #[cfg(not(target_os = "macos"))]
+ const clock_id: libc::clockid_t = libc::CLOCK_MONOTONIC;
+ Instant { t: Timespec::now(clock_id) }
}
pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
@@ -312,13 +324,8 @@ mod inner {
}
}
- #[cfg(not(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon")))]
- pub type clock_t = libc::c_int;
- #[cfg(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon"))]
- pub type clock_t = libc::c_ulong;
-
impl Timespec {
- pub fn now(clock: clock_t) -> Timespec {
+ pub fn now(clock: libc::clockid_t) -> Timespec {
// Try to use 64-bit time in preparation for Y2038.
#[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))]
{
diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs
index e4ff21b25..f5a4ce929 100644
--- a/library/std/src/sys/unix/weak.rs
+++ b/library/std/src/sys/unix/weak.rs
@@ -29,7 +29,21 @@ use crate::ptr;
use crate::sync::atomic::{self, AtomicPtr, Ordering};
// We can use true weak linkage on ELF targets.
-#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), not(bootstrap)))]
+pub(crate) macro weak {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static $name: Option<unsafe extern "C" fn($($t),*) -> $ret>;
+ }
+ #[allow(unused_unsafe)]
+ ExternWeak::new(unsafe { $name })
+ };
+ )
+}
+
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), bootstrap))]
pub(crate) macro weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
@@ -47,11 +61,31 @@ pub(crate) macro weak {
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::dlsym as weak;
+#[cfg(not(bootstrap))]
+pub(crate) struct ExternWeak<F: Copy> {
+ weak_ptr: Option<F>,
+}
+
+#[cfg(not(bootstrap))]
+impl<F: Copy> ExternWeak<F> {
+ #[inline]
+ pub(crate) fn new(weak_ptr: Option<F>) -> Self {
+ ExternWeak { weak_ptr }
+ }
+
+ #[inline]
+ pub(crate) fn get(&self) -> Option<F> {
+ self.weak_ptr
+ }
+}
+
+#[cfg(bootstrap)]
pub(crate) struct ExternWeak<F> {
weak_ptr: *const libc::c_void,
_marker: PhantomData<F>,
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self {
@@ -59,6 +93,7 @@ impl<F> ExternWeak<F> {
}
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn get(&self) -> Option<F> {