summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:21 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:21 +0000
commit4e8199b572f2035b7749cba276ece3a26630d23e (patch)
treef09feeed6a0fe39d027b1908aa63ea6b35e4b631 /library/std/src/sys
parentAdding upstream version 1.66.0+dfsg1. (diff)
downloadrustc-4e8199b572f2035b7749cba276ece3a26630d23e.tar.xz
rustc-4e8199b572f2035b7749cba276ece3a26630d23e.zip
Adding upstream version 1.67.1+dfsg1.upstream/1.67.1+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/std/src/sys')
-rw-r--r--library/std/src/sys/common/alloc.rs12
-rw-r--r--library/std/src/sys/hermit/fs.rs4
-rw-r--r--library/std/src/sys/hermit/mod.rs6
-rw-r--r--library/std/src/sys/hermit/thread.rs3
-rw-r--r--library/std/src/sys/itron/condvar.rs13
-rw-r--r--library/std/src/sys/itron/mutex.rs8
-rw-r--r--library/std/src/sys/itron/thread.rs48
-rw-r--r--library/std/src/sys/sgx/condvar.rs29
-rw-r--r--library/std/src/sys/sgx/mod.rs1
-rw-r--r--library/std/src/sys/sgx/mutex.rs24
-rw-r--r--library/std/src/sys/sgx/rwlock.rs78
-rw-r--r--library/std/src/sys/sgx/rwlock/tests.rs16
-rw-r--r--library/std/src/sys/solid/io.rs4
-rw-r--r--library/std/src/sys/solid/os.rs3
-rw-r--r--library/std/src/sys/solid/rwlock.rs10
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs18
-rw-r--r--library/std/src/sys/unix/locks/futex_condvar.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs6
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs10
-rw-r--r--library/std/src/sys/unix/locks/mod.rs18
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs179
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs134
-rw-r--r--library/std/src/sys/unix/locks/pthread_rwlock.rs148
-rw-r--r--library/std/src/sys/unix/time.rs25
-rw-r--r--library/std/src/sys/unix/weak.rs37
-rw-r--r--library/std/src/sys/unsupported/locks/condvar.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/mod.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/mutex.rs6
-rw-r--r--library/std/src/sys/unsupported/locks/rwlock.rs10
-rw-r--r--library/std/src/sys/wasi/net.rs10
-rw-r--r--library/std/src/sys/wasm/mod.rs6
-rw-r--r--library/std/src/sys/windows/args.rs54
-rw-r--r--library/std/src/sys/windows/c.rs59
-rw-r--r--library/std/src/sys/windows/locks/condvar.rs10
-rw-r--r--library/std/src/sys/windows/locks/mod.rs6
-rw-r--r--library/std/src/sys/windows/locks/mutex.rs13
-rw-r--r--library/std/src/sys/windows/locks/rwlock.rs18
-rw-r--r--library/std/src/sys/windows/mod.rs4
-rw-r--r--library/std/src/sys/windows/pipe.rs9
-rw-r--r--library/std/src/sys/windows/process.rs34
-rw-r--r--library/std/src/sys/windows/stdio_uwp.rs87
41 files changed, 552 insertions, 626 deletions
diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs
index e8e7c51cb..3edbe7280 100644
--- a/library/std/src/sys/common/alloc.rs
+++ b/library/std/src/sys/common/alloc.rs
@@ -4,7 +4,7 @@ use crate::ptr;
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values.
-#[cfg(all(any(
+#[cfg(any(
target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
@@ -16,9 +16,9 @@ use crate::ptr;
target_arch = "hexagon",
all(target_arch = "riscv32", not(target_os = "espidf")),
all(target_arch = "xtensa", not(target_os = "espidf")),
-)))]
+))]
pub const MIN_ALIGN: usize = 8;
-#[cfg(all(any(
+#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "mips64",
@@ -26,13 +26,13 @@ pub const MIN_ALIGN: usize = 8;
target_arch = "sparc64",
target_arch = "riscv64",
target_arch = "wasm64",
-)))]
+))]
pub const MIN_ALIGN: usize = 16;
// The allocator on the esp-idf platform guarantees 4 byte alignment.
-#[cfg(all(any(
+#[cfg(any(
all(target_arch = "riscv32", target_os = "espidf"),
all(target_arch = "xtensa", target_os = "espidf"),
-)))]
+))]
pub const MIN_ALIGN: usize = 4;
pub unsafe fn realloc_fallback(
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index af297ff1e..6fb92c037 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -1,10 +1,8 @@
-use crate::convert::TryFrom;
-use crate::ffi::{CStr, CString, OsString};
+use crate::ffi::{CStr, OsString};
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::io::{self, Error, ErrorKind};
use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
-use crate::os::unix::ffi::OsStrExt;
use crate::path::{Path, PathBuf};
use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::cvt;
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index e6534df89..6811fadb0 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -51,9 +51,9 @@ pub mod locks {
mod futex_condvar;
mod futex_mutex;
mod futex_rwlock;
- pub(crate) use futex_condvar::MovableCondvar;
- pub(crate) use futex_mutex::{MovableMutex, Mutex};
- pub(crate) use futex_rwlock::{MovableRwLock, RwLock};
+ pub(crate) use futex_condvar::Condvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
}
use crate::io::ErrorKind;
diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs
index e53a1fea6..8f65544a9 100644
--- a/library/std/src/sys/hermit/thread.rs
+++ b/library/std/src/sys/hermit/thread.rs
@@ -5,6 +5,7 @@ use crate::ffi::CStr;
use crate::io;
use crate::mem;
use crate::num::NonZeroUsize;
+use crate::ptr;
use crate::sys::hermit::abi;
use crate::sys::hermit::thread_local_dtor::run_dtors;
use crate::time::Duration;
@@ -47,7 +48,7 @@ impl Thread {
extern "C" fn thread_start(main: usize) {
unsafe {
// Finally, let's run some code.
- Box::from_raw(main as *mut Box<dyn FnOnce()>)();
+ Box::from_raw(ptr::from_exposed_addr::<Box<dyn FnOnce()>>(main).cast_mut())();
// run all destructors
run_dtors();
diff --git a/library/std/src/sys/itron/condvar.rs b/library/std/src/sys/itron/condvar.rs
index 008cd8fb1..7a47cc669 100644
--- a/library/std/src/sys/itron/condvar.rs
+++ b/library/std/src/sys/itron/condvar.rs
@@ -12,18 +12,13 @@ pub struct Condvar {
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
-pub type MovableCondvar = Condvar;
-
impl Condvar {
#[inline]
pub const fn new() -> Condvar {
Condvar { waiters: SpinMutex::new(waiter_queue::WaiterQueue::new()) }
}
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- pub unsafe fn notify_one(&self) {
+ pub fn notify_one(&self) {
self.waiters.with_locked(|waiters| {
if let Some(task) = waiters.pop_front() {
// Unpark the task
@@ -39,7 +34,7 @@ impl Condvar {
});
}
- pub unsafe fn notify_all(&self) {
+ pub fn notify_all(&self) {
self.waiters.with_locked(|waiters| {
while let Some(task) = waiters.pop_front() {
// Unpark the task
@@ -76,7 +71,7 @@ impl Condvar {
}
}
- unsafe { mutex.lock() };
+ mutex.lock();
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
@@ -114,7 +109,7 @@ impl Condvar {
// we woke up because of `notify_*`.
let success = self.waiters.with_locked(|waiters| unsafe { !waiters.remove(waiter) });
- unsafe { mutex.lock() };
+ mutex.lock();
success
}
}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
index 085662e6d..1f6cc4194 100644
--- a/library/std/src/sys/itron/mutex.rs
+++ b/library/std/src/sys/itron/mutex.rs
@@ -11,8 +11,6 @@ pub struct Mutex {
mtx: SpinIdOnceCell<()>,
}
-pub type MovableMutex = Mutex;
-
/// Create a mutex object. This function never panics.
fn new_mtx() -> Result<abi::ID, ItronError> {
ItronError::err_if_negative(unsafe {
@@ -39,7 +37,7 @@ impl Mutex {
}
}
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
let mtx = self.raw();
expect_success(unsafe { abi::loc_mtx(mtx) }, &"loc_mtx");
}
@@ -49,7 +47,7 @@ impl Mutex {
expect_success_aborting(unsafe { abi::unl_mtx(mtx) }, &"unl_mtx");
}
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
let mtx = self.raw();
match unsafe { abi::ploc_mtx(mtx) } {
abi::E_TMOUT => false,
@@ -74,7 +72,7 @@ pub(super) struct MutexGuard<'a>(&'a Mutex);
impl<'a> MutexGuard<'a> {
#[inline]
pub(super) fn lock(x: &'a Mutex) -> Self {
- unsafe { x.lock() };
+ x.lock();
Self(x)
}
}
diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs
index d28f57f33..c2b366808 100644
--- a/library/std/src/sys/itron/thread.rs
+++ b/library/std/src/sys/itron/thread.rs
@@ -11,18 +11,25 @@ use crate::{
ffi::CStr,
hint, io,
mem::ManuallyDrop,
+ ptr::NonNull,
sync::atomic::{AtomicUsize, Ordering},
sys::thread_local_dtor::run_dtors,
time::Duration,
};
pub struct Thread {
- inner: ManuallyDrop<Box<ThreadInner>>,
+ p_inner: NonNull<ThreadInner>,
/// The ID of the underlying task.
task: abi::ID,
}
+// Safety: There's nothing in `Thread` that ties it to the original creator. It
+// can be dropped by any threads.
+unsafe impl Send for Thread {}
+// Safety: `Thread` provides no methods that take `&self`.
+unsafe impl Sync for Thread {}
+
/// State data shared between a parent thread and child thread. It's dropped on
/// a transition to one of the final states.
struct ThreadInner {
@@ -90,8 +97,9 @@ impl Thread {
});
unsafe extern "C" fn trampoline(exinf: isize) {
+ let p_inner: *mut ThreadInner = crate::ptr::from_exposed_addr_mut(exinf as usize);
// Safety: `ThreadInner` is alive at this point
- let inner = unsafe { &*(exinf as *const ThreadInner) };
+ let inner = unsafe { &*p_inner };
// Safety: Since `trampoline` is called only once for each
// `ThreadInner` and only `trampoline` touches `start`,
@@ -119,13 +127,13 @@ impl Thread {
// No one will ever join, so we'll ask the collector task to
// delete the task.
- // In this case, `inner`'s ownership has been moved to us,
- // And we are responsible for dropping it. The acquire
+ // In this case, `*p_inner`'s ownership has been moved to
+ // us, and we are responsible for dropping it. The acquire
// ordering is not necessary because the parent thread made
// no memory access needing synchronization since the call
// to `acre_tsk`.
// Safety: See above.
- let _ = unsafe { Box::from_raw(inner as *const _ as *mut ThreadInner) };
+ let _ = unsafe { Box::from_raw(p_inner) };
// Safety: There are no pinned references to the stack
unsafe { terminate_and_delete_current_task() };
@@ -162,13 +170,14 @@ impl Thread {
}
}
- let inner_ptr = (&*inner) as *const ThreadInner;
+ // Safety: `Box::into_raw` returns a non-null pointer
+ let p_inner = unsafe { NonNull::new_unchecked(Box::into_raw(inner)) };
let new_task = ItronError::err_if_negative(unsafe {
abi::acre_tsk(&abi::T_CTSK {
// Activate this task immediately
tskatr: abi::TA_ACT,
- exinf: inner_ptr as abi::EXINF,
+ exinf: p_inner.as_ptr().expose_addr() as abi::EXINF,
// The entry point
task: Some(trampoline),
// Inherit the calling task's base priority
@@ -180,7 +189,7 @@ impl Thread {
})
.map_err(|e| e.as_io_error())?;
- Ok(Self { inner: ManuallyDrop::new(inner), task: new_task })
+ Ok(Self { p_inner, task: new_task })
}
pub fn yield_now() {
@@ -197,8 +206,9 @@ impl Thread {
}
}
- pub fn join(mut self) {
- let inner = &*self.inner;
+ pub fn join(self) {
+ // Safety: `ThreadInner` is alive at this point
+ let inner = unsafe { self.p_inner.as_ref() };
// Get the current task ID. Panicking here would cause a resource leak,
// so just abort on failure.
let current_task = task::current_task_id_aborting();
@@ -243,8 +253,8 @@ impl Thread {
unsafe { terminate_and_delete_task(self.task) };
// In either case, we are responsible for dropping `inner`.
- // Safety: The contents of `self.inner` will not be accessed hereafter
- let _inner = unsafe { ManuallyDrop::take(&mut self.inner) };
+ // Safety: The contents of `*p_inner` will not be accessed hereafter
+ let _inner = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
// Skip the destructor (because it would attempt to detach the thread)
crate::mem::forget(self);
@@ -253,13 +263,16 @@ impl Thread {
impl Drop for Thread {
fn drop(&mut self) {
+ // Safety: `ThreadInner` is alive at this point
+ let inner = unsafe { self.p_inner.as_ref() };
+
// Detach the thread.
- match self.inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
+ match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
LIFECYCLE_INIT => {
// [INIT → DETACHED]
// When the time comes, the child will figure out that no
// one will ever join it.
- // The ownership of `self.inner` is moved to the child thread.
+ // The ownership of `*p_inner` is moved to the child thread.
// However, the release ordering is not necessary because we
// made no memory access needing synchronization since the call
// to `acre_tsk`.
@@ -278,10 +291,9 @@ impl Drop for Thread {
// delete by entering the `FINISHED` state.
unsafe { terminate_and_delete_task(self.task) };
- // Wwe are responsible for dropping `inner`.
- // Safety: The contents of `self.inner` will not be accessed
- // hereafter
- unsafe { ManuallyDrop::drop(&mut self.inner) };
+ // Wwe are responsible for dropping `*p_inner`.
+ // Safety: The contents of `*p_inner` will not be accessed hereafter
+ let _ = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
}
_ => unsafe { hint::unreachable_unchecked() },
}
diff --git a/library/std/src/sys/sgx/condvar.rs b/library/std/src/sys/sgx/condvar.rs
index 36534e0ef..aa1174664 100644
--- a/library/std/src/sys/sgx/condvar.rs
+++ b/library/std/src/sys/sgx/condvar.rs
@@ -4,42 +4,43 @@ use crate::time::Duration;
use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+/// FIXME: `UnsafeList` is not movable.
+struct AllocatedCondvar(SpinMutex<WaitVariable<()>>);
+
pub struct Condvar {
- inner: SpinMutex<WaitVariable<()>>,
+ inner: LazyBox<AllocatedCondvar>,
}
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-impl LazyInit for Condvar {
+impl LazyInit for AllocatedCondvar {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedCondvar(SpinMutex::new(WaitVariable::new(()))))
}
}
impl Condvar {
pub const fn new() -> Condvar {
- Condvar { inner: SpinMutex::new(WaitVariable::new(())) }
+ Condvar { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn notify_one(&self) {
- let _ = WaitQueue::notify_one(self.inner.lock());
+ pub fn notify_one(&self) {
+ let _ = WaitQueue::notify_one(self.inner.0.lock());
}
#[inline]
- pub unsafe fn notify_all(&self) {
- let _ = WaitQueue::notify_all(self.inner.lock());
+ pub fn notify_all(&self) {
+ let _ = WaitQueue::notify_all(self.inner.0.lock());
}
pub unsafe fn wait(&self, mutex: &Mutex) {
- let guard = self.inner.lock();
+ let guard = self.inner.0.lock();
WaitQueue::wait(guard, || unsafe { mutex.unlock() });
- unsafe { mutex.lock() }
+ mutex.lock()
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let success = WaitQueue::wait_timeout(&self.inner, dur, || unsafe { mutex.unlock() });
- unsafe { mutex.lock() };
+ let success = WaitQueue::wait_timeout(&self.inner.0, dur, || unsafe { mutex.unlock() });
+ mutex.lock();
success
}
}
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index b1d32929e..01e4ffe3d 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -3,6 +3,7 @@
//! This module contains the facade (aka platform-specific) implementations of
//! OS level functionality for Fortanix SGX.
#![deny(unsafe_op_in_unsafe_fn)]
+#![allow(fuzzy_provenance_casts)] // FIXME: this entire module systematically confuses pointers and integers
use crate::io::ErrorKind;
use crate::sync::atomic::{AtomicBool, Ordering};
diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs
index aa747d56b..0dbf020eb 100644
--- a/library/std/src/sys/sgx/mutex.rs
+++ b/library/std/src/sys/sgx/mutex.rs
@@ -1,28 +1,28 @@
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+/// FIXME: `UnsafeList` is not movable.
+struct AllocatedMutex(SpinMutex<WaitVariable<bool>>);
+
pub struct Mutex {
- inner: SpinMutex<WaitVariable<bool>>,
+ inner: LazyBox<AllocatedMutex>,
}
-// not movable: see UnsafeList implementation
-pub(crate) type MovableMutex = LazyBox<Mutex>;
-
-impl LazyInit for Mutex {
+impl LazyInit for AllocatedMutex {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedMutex(SpinMutex::new(WaitVariable::new(false))))
}
}
// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28
impl Mutex {
pub const fn new() -> Mutex {
- Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
+ Mutex { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn lock(&self) {
- let mut guard = self.inner.lock();
+ pub fn lock(&self) {
+ let mut guard = self.inner.0.lock();
if *guard.lock_var() {
// Another thread has the lock, wait
WaitQueue::wait(guard, || {})
@@ -35,7 +35,7 @@ impl Mutex {
#[inline]
pub unsafe fn unlock(&self) {
- let guard = self.inner.lock();
+ let guard = self.inner.0.lock();
if let Err(mut guard) = WaitQueue::notify_one(guard) {
// No other waiters, unlock
*guard.lock_var_mut() = false;
@@ -45,8 +45,8 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let mut guard = try_lock_or_false!(self.inner);
+ pub fn try_lock(&self) -> bool {
+ let mut guard = try_lock_or_false!(self.inner.0);
if *guard.lock_var() {
// Another thread has the lock
false
diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs
index a97fb9ab0..d89de18ca 100644
--- a/library/std/src/sys/sgx/rwlock.rs
+++ b/library/std/src/sys/sgx/rwlock.rs
@@ -7,42 +7,45 @@ use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use super::waitqueue::{
try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
};
-use crate::mem;
+use crate::alloc::Layout;
-pub struct RwLock {
+struct AllocatedRwLock {
readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
writer: SpinMutex<WaitVariable<bool>>,
}
-pub(crate) type MovableRwLock = LazyBox<RwLock>;
+pub struct RwLock {
+ inner: LazyBox<AllocatedRwLock>,
+}
-impl LazyInit for RwLock {
+impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedRwLock {
+ readers: SpinMutex::new(WaitVariable::new(None)),
+ writer: SpinMutex::new(WaitVariable::new(false)),
+ })
}
}
-// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
-//
-// # Safety
-// Never called, as it is a compile time check.
-#[allow(dead_code)]
-unsafe fn rw_lock_size_assert(r: RwLock) {
- unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
-}
+// Check at compile time that RwLock's size and alignment matches the C definition
+// in libunwind (see also `test_c_rwlock_initializer` in `tests`).
+const _: () = {
+ let rust = Layout::new::<RwLock>();
+ let c = Layout::new::<*mut ()>();
+ assert!(rust.size() == c.size());
+ assert!(rust.align() == c.align());
+};
impl RwLock {
pub const fn new() -> RwLock {
- RwLock {
- readers: SpinMutex::new(WaitVariable::new(None)),
- writer: SpinMutex::new(WaitVariable::new(false)),
- }
+ RwLock { inner: LazyBox::new() }
}
#[inline]
- pub unsafe fn read(&self) {
- let mut rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ pub fn read(&self) {
+ let lock = &*self.inner;
+ let mut rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock, wait
drop(wguard);
@@ -57,8 +60,9 @@ impl RwLock {
#[inline]
pub unsafe fn try_read(&self) -> bool {
- let mut rguard = try_lock_or_false!(self.readers);
- let wguard = try_lock_or_false!(self.writer);
+ let lock = &*self.inner;
+ let mut rguard = try_lock_or_false!(lock.readers);
+ let wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock
false
@@ -71,9 +75,10 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
- let rguard = self.readers.lock();
- let mut wguard = self.writer.lock();
+ pub fn write(&self) {
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let mut wguard = lock.writer.lock();
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock, wait
drop(rguard);
@@ -86,9 +91,10 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
- let rguard = try_lock_or_false!(self.readers);
- let mut wguard = try_lock_or_false!(self.writer);
+ pub fn try_write(&self) -> bool {
+ let lock = &*self.inner;
+ let rguard = try_lock_or_false!(lock.readers);
+ let mut wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock
false
@@ -122,8 +128,9 @@ impl RwLock {
#[inline]
pub unsafe fn read_unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
unsafe { self.__read_unlock(rguard, wguard) };
}
@@ -158,8 +165,9 @@ impl RwLock {
#[inline]
pub unsafe fn write_unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
unsafe { self.__write_unlock(rguard, wguard) };
}
@@ -167,8 +175,9 @@ impl RwLock {
#[inline]
#[cfg_attr(test, allow(dead_code))]
unsafe fn unlock(&self) {
- let rguard = self.readers.lock();
- let wguard = self.writer.lock();
+ let lock = &*self.inner;
+ let rguard = lock.readers.lock();
+ let wguard = lock.writer.lock();
if *wguard.lock_var() == true {
unsafe { self.__write_unlock(rguard, wguard) };
} else {
@@ -201,6 +210,7 @@ pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
unsafe { (*p).write() };
return 0;
}
+
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
diff --git a/library/std/src/sys/sgx/rwlock/tests.rs b/library/std/src/sys/sgx/rwlock/tests.rs
index 479996115..5fd6670af 100644
--- a/library/std/src/sys/sgx/rwlock/tests.rs
+++ b/library/std/src/sys/sgx/rwlock/tests.rs
@@ -1,22 +1,12 @@
use super::*;
+use crate::ptr;
// Verify that the byte pattern libunwind uses to initialize an RwLock is
// equivalent to the value of RwLock::new(). If the value changes,
// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
#[test]
fn test_c_rwlock_initializer() {
- #[rustfmt::skip]
- const C_RWLOCK_INIT: &[u8] = &[
- /* 0x00 */ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x10 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x20 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x30 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x40 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x50 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x60 */ 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x70 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- /* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- ];
+ const C_RWLOCK_INIT: *mut () = ptr::null_mut();
// For the test to work, we need the padding/unused bytes in RwLock to be
// initialized as 0. In practice, this is the case with statics.
@@ -26,6 +16,6 @@ fn test_c_rwlock_initializer() {
// If the assertion fails, that not necessarily an issue with the value
// of C_RWLOCK_INIT. It might just be an issue with the way padding
// bytes are initialized in the test code.
- assert_eq!(&crate::mem::transmute_copy::<_, [u8; 144]>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
+ assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
};
}
diff --git a/library/std/src/sys/solid/io.rs b/library/std/src/sys/solid/io.rs
index 9eb17a10d..a862bb787 100644
--- a/library/std/src/sys/solid/io.rs
+++ b/library/std/src/sys/solid/io.rs
@@ -75,3 +75,7 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
}
}
+
+pub fn is_terminal<T>(_: &T) -> bool {
+ false
+}
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index 4906c6268..6135921f0 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -1,7 +1,6 @@
use super::unsupported;
-use crate::convert::TryFrom;
use crate::error::Error as StdError;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::os::{
diff --git a/library/std/src/sys/solid/rwlock.rs b/library/std/src/sys/solid/rwlock.rs
index 0a770cf03..ecb4eb83b 100644
--- a/library/std/src/sys/solid/rwlock.rs
+++ b/library/std/src/sys/solid/rwlock.rs
@@ -12,8 +12,6 @@ pub struct RwLock {
rwl: SpinIdOnceCell<()>,
}
-pub type MovableRwLock = RwLock;
-
// Safety: `num_readers` is protected by `mtx_num_readers`
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
@@ -37,13 +35,13 @@ impl RwLock {
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let rwl = self.raw();
expect_success(unsafe { abi::rwl_loc_rdl(rwl) }, &"rwl_loc_rdl");
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
let rwl = self.raw();
match unsafe { abi::rwl_ploc_rdl(rwl) } {
abi::E_TMOUT => false,
@@ -55,13 +53,13 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
let rwl = self.raw();
expect_success(unsafe { abi::rwl_loc_wrl(rwl) }, &"rwl_loc_wrl");
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
let rwl = self.raw();
match unsafe { abi::rwl_ploc_wrl(rwl) } {
abi::E_TMOUT => false,
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
index 117611ce4..5d89e5a13 100644
--- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -53,8 +53,6 @@ const CONTESTED_BIT: u32 = 1;
// This can never be a valid `zx_handle_t`.
const UNLOCKED: u32 = 0;
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
futex: AtomicU32,
}
@@ -86,23 +84,27 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let thread_self = zx_thread_self();
+ pub fn try_lock(&self) -> bool {
+ let thread_self = unsafe { zx_thread_self() };
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
- let thread_self = zx_thread_self();
+ pub fn lock(&self) {
+ let thread_self = unsafe { zx_thread_self() };
if let Err(state) =
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed)
{
- self.lock_contested(state, thread_self);
+ unsafe {
+ self.lock_contested(state, thread_self);
+ }
}
}
+ /// # Safety
+ /// `thread_self` must be the handle for the current thread.
#[cold]
- fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
+ unsafe fn lock_contested(&self, mut state: u32, thread_self: zx_handle_t) {
let owned_state = mark_contested(to_state(thread_self));
loop {
// Mark the mutex as contested if it is not already.
diff --git a/library/std/src/sys/unix/locks/futex_condvar.rs b/library/std/src/sys/unix/locks/futex_condvar.rs
index c0576c178..4bd65dd25 100644
--- a/library/std/src/sys/unix/locks/futex_condvar.rs
+++ b/library/std/src/sys/unix/locks/futex_condvar.rs
@@ -3,8 +3,6 @@ use crate::sync::atomic::{AtomicU32, Ordering::Relaxed};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
use crate::time::Duration;
-pub type MovableCondvar = Condvar;
-
pub struct Condvar {
// The value of this atomic is simply incremented on every notification.
// This is used by `.wait()` to not miss any notifications after
@@ -21,12 +19,12 @@ impl Condvar {
// All the memory orderings here are `Relaxed`,
// because synchronization is done by unlocking and locking the mutex.
- pub unsafe fn notify_one(&self) {
+ pub fn notify_one(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake(&self.futex);
}
- pub unsafe fn notify_all(&self) {
+ pub fn notify_all(&self) {
self.futex.fetch_add(1, Relaxed);
futex_wake_all(&self.futex);
}
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
index 33b13dad4..c01229586 100644
--- a/library/std/src/sys/unix/locks/futex_mutex.rs
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake};
-pub type MovableMutex = Mutex;
-
pub struct Mutex {
/// 0: unlocked
/// 1: locked, no other threads waiting
@@ -20,12 +18,12 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
}
#[inline]
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
if self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_err() {
self.lock_contended();
}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
index 0cc92244e..aa0de9002 100644
--- a/library/std/src/sys/unix/locks/futex_rwlock.rs
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -4,8 +4,6 @@ use crate::sync::atomic::{
};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
-pub type MovableRwLock = RwLock;
-
pub struct RwLock {
// The state consists of a 30-bit reader counter, a 'readers waiting' flag, and a 'writers waiting' flag.
// Bits 0..30:
@@ -70,14 +68,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_read_lockable(s).then(|| s + READ_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let state = self.state.load(Relaxed);
if !is_read_lockable(state)
|| self
@@ -144,14 +142,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
self.state
.fetch_update(Acquire, Relaxed, |s| is_unlocked(s).then(|| s + WRITE_LOCKED))
.is_ok()
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
if self.state.compare_exchange_weak(0, WRITE_LOCKED, Acquire, Relaxed).is_err() {
self.write_contended();
}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
index 9bb314b70..b2e0e49ad 100644
--- a/library/std/src/sys/unix/locks/mod.rs
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -10,22 +10,22 @@ cfg_if::cfg_if! {
mod futex_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else if #[cfg(target_os = "fuchsia")] {
mod fuchsia_mutex;
mod futex_rwlock;
mod futex_condvar;
- pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
- pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use fuchsia_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
+ pub(crate) use futex_condvar::Condvar;
} else {
mod pthread_mutex;
mod pthread_rwlock;
mod pthread_condvar;
- pub(crate) use pthread_mutex::{Mutex, MovableMutex};
- pub(crate) use pthread_rwlock::MovableRwLock;
- pub(crate) use pthread_condvar::MovableCondvar;
+ pub(crate) use pthread_mutex::Mutex;
+ pub(crate) use pthread_rwlock::RwLock;
+ pub(crate) use pthread_condvar::Condvar;
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index 4741c0c67..1ddb09905 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -1,17 +1,17 @@
use crate::cell::UnsafeCell;
+use crate::ptr;
+use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed};
use crate::sys::locks::{pthread_mutex, Mutex};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
use crate::time::Duration;
+struct AllocatedCondvar(UnsafeCell<libc::pthread_cond_t>);
+
pub struct Condvar {
- inner: UnsafeCell<libc::pthread_cond_t>,
+ inner: LazyBox<AllocatedCondvar>,
+ mutex: AtomicPtr<libc::pthread_mutex_t>,
}
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
const TIMESPEC_MAX: libc::timespec =
libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
@@ -19,81 +19,104 @@ fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
}
-impl LazyInit for Condvar {
+#[inline]
+fn raw(c: &Condvar) -> *mut libc::pthread_cond_t {
+ c.inner.0.get()
+}
+
+unsafe impl Send for AllocatedCondvar {}
+unsafe impl Sync for AllocatedCondvar {}
+
+impl LazyInit for AllocatedCondvar {
fn init() -> Box<Self> {
- let mut condvar = Box::new(Self::new());
- unsafe { condvar.init() };
+ let condvar = Box::new(AllocatedCondvar(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER)));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ // `pthread_condattr_setclock` is unfortunately not supported on these platforms.
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
+ // So on that platform, init() should always be called
+ // Moreover, that platform does not have pthread_condattr_setclock support,
+ // hence that initialization should be skipped as well
+ //
+ // Similar story for the 3DS (horizon).
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), crate::ptr::null()) };
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = unsafe { libc::pthread_condattr_init(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_cond_init(condvar.0.get(), attr.as_ptr()) };
+ assert_eq!(r, 0);
+ let r = unsafe { libc::pthread_condattr_destroy(attr.as_mut_ptr()) };
+ assert_eq!(r, 0);
+ }
+ }
+
condvar
}
}
-impl Condvar {
- pub const fn new() -> Condvar {
- // Might be moved and address is changing it is better to avoid
- // initialization of potentially opaque OS data before it landed
- Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
+impl Drop for AllocatedCondvar {
+ #[inline]
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_cond_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_cond_destroy() returns EINVAL if called on
+ // a condvar that was just initialized with
+ // libc::PTHREAD_COND_INITIALIZER. Once it is used or
+ // pthread_cond_init() is called, this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))]
- unsafe fn init(&mut self) {}
-
- // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
- // So on that platform, init() should always be called
- // Moreover, that platform does not have pthread_condattr_setclock support,
- // hence that initialization should be skipped as well
- //
- // Similar story for the 3DS (horizon).
- #[cfg(any(target_os = "espidf", target_os = "horizon"))]
- unsafe fn init(&mut self) {
- let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
- assert_eq!(r, 0);
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: LazyBox::new(), mutex: AtomicPtr::new(ptr::null_mut()) }
}
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox",
- target_os = "espidf",
- target_os = "horizon"
- )))]
- unsafe fn init(&mut self) {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
+ #[inline]
+ fn verify(&self, mutex: *mut libc::pthread_mutex_t) {
+ // Relaxed is okay here because we never read through `self.addr`, and only use it to
+ // compare addresses.
+ match self.mutex.compare_exchange(ptr::null_mut(), mutex, Relaxed, Relaxed) {
+ Ok(_) => {} // Stored the address
+ Err(n) if n == mutex => {} // Lost a race to store the same address
+ _ => panic!("attempted to use a condition variable with two mutexes"),
+ }
}
#[inline]
- pub unsafe fn notify_one(&self) {
- let r = libc::pthread_cond_signal(self.inner.get());
+ pub fn notify_one(&self) {
+ let r = unsafe { libc::pthread_cond_signal(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
- pub unsafe fn notify_all(&self) {
- let r = libc::pthread_cond_broadcast(self.inner.get());
+ pub fn notify_all(&self) {
+ let r = unsafe { libc::pthread_cond_broadcast(raw(self)) };
debug_assert_eq!(r, 0);
}
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+ let r = libc::pthread_cond_wait(raw(self), mutex);
debug_assert_eq!(r, 0);
}
@@ -112,6 +135,9 @@ impl Condvar {
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
use crate::mem;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
let mut now: libc::timespec = mem::zeroed();
let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
assert_eq!(r, 0);
@@ -127,7 +153,7 @@ impl Condvar {
let timeout =
sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
assert!(r == libc::ETIMEDOUT || r == 0);
r == 0
}
@@ -144,9 +170,11 @@ impl Condvar {
target_os = "horizon"
))]
pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
- use crate::ptr;
use crate::time::Instant;
+ let mutex = pthread_mutex::raw(mutex);
+ self.verify(mutex);
+
// 1000 years
let max_dur = Duration::from_secs(1000 * 365 * 86400);
@@ -187,36 +215,11 @@ impl Condvar {
.unwrap_or(TIMESPEC_MAX);
// And wait!
- let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout);
debug_assert!(r == libc::ETIMEDOUT || r == 0);
// ETIMEDOUT is not a totally reliable method of determining timeout due
// to clock shifts, so do the check ourselves
stable_now.elapsed() < dur
}
-
- #[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- // On DragonFly pthread_cond_destroy() returns EINVAL if called on
- // a condvar that was just initialized with
- // libc::PTHREAD_COND_INITIALIZER. Once it is used or
- // pthread_cond_init() is called, this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
- }
-}
-
-impl Drop for Condvar {
- #[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
- }
}
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 5964935dd..8a78bc1fd 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -3,56 +3,24 @@ use crate::mem::{forget, MaybeUninit};
use crate::sys::cvt_nz;
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
+struct AllocatedMutex(UnsafeCell<libc::pthread_mutex_t>);
+
pub struct Mutex {
- inner: UnsafeCell<libc::pthread_mutex_t>,
+ inner: LazyBox<AllocatedMutex>,
}
-pub(crate) type MovableMutex = LazyBox<Mutex>;
-
#[inline]
pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
- m.inner.get()
+ m.inner.0.get()
}
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
+unsafe impl Send for AllocatedMutex {}
+unsafe impl Sync for AllocatedMutex {}
-impl LazyInit for Mutex {
+impl LazyInit for AllocatedMutex {
fn init() -> Box<Self> {
- let mut mutex = Box::new(Self::new());
- unsafe { mutex.init() };
- mutex
- }
-
- fn destroy(mutex: Box<Self>) {
- // We're not allowed to pthread_mutex_destroy a locked mutex,
- // so check first if it's unlocked.
- if unsafe { mutex.try_lock() } {
- unsafe { mutex.unlock() };
- drop(mutex);
- } else {
- // The mutex is locked. This happens if a MutexGuard is leaked.
- // In this case, we just leak the Mutex too.
- forget(mutex);
- }
- }
-
- fn cancel_init(_: Box<Self>) {
- // In this case, we can just drop it without any checks,
- // since it cannot have been locked yet.
- }
-}
+ let mutex = Box::new(AllocatedMutex(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)));
-impl Mutex {
- pub const fn new() -> Mutex {
- // Might be moved to a different address, so it is better to avoid
- // initialization of potentially opaque OS data before it landed.
- // Be very careful using this newly constructed `Mutex`, reentrant
- // locking is undefined behavior until `init` is called!
- Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
- }
- #[inline]
- unsafe fn init(&mut self) {
// Issue #33770
//
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
@@ -77,49 +45,77 @@ impl Mutex {
// references, we instead create the mutex with type
// PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
// re-lock it from the same thread, thus avoiding undefined behavior.
- let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
- cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
- let attr = PthreadMutexAttr(&mut attr);
- cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
+ unsafe {
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(
+ attr.0.as_mut_ptr(),
+ libc::PTHREAD_MUTEX_NORMAL,
+ ))
.unwrap();
- cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ cvt_nz(libc::pthread_mutex_init(mutex.0.get(), attr.0.as_ptr())).unwrap();
+ }
+
+ mutex
}
- #[inline]
- pub unsafe fn lock(&self) {
- let r = libc::pthread_mutex_lock(self.inner.get());
- debug_assert_eq!(r, 0);
+
+ fn destroy(mutex: Box<Self>) {
+ // We're not allowed to pthread_mutex_destroy a locked mutex,
+ // so check first if it's unlocked.
+ if unsafe { libc::pthread_mutex_trylock(mutex.0.get()) == 0 } {
+ unsafe { libc::pthread_mutex_unlock(mutex.0.get()) };
+ drop(mutex);
+ } else {
+ // The mutex is locked. This happens if a MutexGuard is leaked.
+ // In this case, we just leak the Mutex too.
+ forget(mutex);
+ }
}
+
+ fn cancel_init(_: Box<Self>) {
+ // In this case, we can just drop it without any checks,
+ // since it cannot have been locked yet.
+ }
+}
+
+impl Drop for AllocatedMutex {
#[inline]
- pub unsafe fn unlock(&self) {
- let r = libc::pthread_mutex_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_mutex_destroy(self.0.get()) };
+ if cfg!(target_os = "dragonfly") {
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
+}
+
+impl Mutex {
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- libc::pthread_mutex_trylock(self.inner.get()) == 0
+ pub const fn new() -> Mutex {
+ Mutex { inner: LazyBox::new() }
}
+
#[inline]
- #[cfg(not(target_os = "dragonfly"))]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
+ pub unsafe fn lock(&self) {
+ let r = libc::pthread_mutex_lock(raw(self));
debug_assert_eq!(r, 0);
}
+
#[inline]
- #[cfg(target_os = "dragonfly")]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
- // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
- // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
- // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
- // this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
+ pub unsafe fn unlock(&self) {
+ let r = libc::pthread_mutex_unlock(raw(self));
+ debug_assert_eq!(r, 0);
}
-}
-impl Drop for Mutex {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(raw(self)) == 0
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs
index adfe2a883..04662be9d 100644
--- a/library/std/src/sys/unix/locks/pthread_rwlock.rs
+++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs
@@ -3,20 +3,26 @@ use crate::mem::forget;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-pub struct RwLock {
+struct AllocatedRwLock {
inner: UnsafeCell<libc::pthread_rwlock_t>,
write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
num_readers: AtomicUsize,
}
-pub(crate) type MovableRwLock = LazyBox<RwLock>;
+unsafe impl Send for AllocatedRwLock {}
+unsafe impl Sync for AllocatedRwLock {}
-unsafe impl Send for RwLock {}
-unsafe impl Sync for RwLock {}
+pub struct RwLock {
+ inner: LazyBox<AllocatedRwLock>,
+}
-impl LazyInit for RwLock {
+impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
- Box::new(Self::new())
+ Box::new(AllocatedRwLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ })
}
fn destroy(mut rwlock: Box<Self>) {
@@ -35,17 +41,39 @@ impl LazyInit for RwLock {
}
}
+impl AllocatedRwLock {
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl Drop for AllocatedRwLock {
+ fn drop(&mut self) {
+ let r = unsafe { libc::pthread_rwlock_destroy(self.inner.get()) };
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
+ }
+}
+
impl RwLock {
+ #[inline]
pub const fn new() -> RwLock {
- RwLock {
- inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
- write_locked: UnsafeCell::new(false),
- num_readers: AtomicUsize::new(0),
- }
+ RwLock { inner: LazyBox::new() }
}
+
#[inline]
- pub unsafe fn read(&self) {
- let r = libc::pthread_rwlock_rdlock(self.inner.get());
+ pub fn read(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_rdlock(lock.inner.get()) };
// According to POSIX, when a thread tries to acquire this read lock
// while it already holds the write lock
@@ -62,51 +90,61 @@ impl RwLock {
// got the write lock more than once, or got a read and a write lock.
if r == libc::EAGAIN {
panic!("rwlock maximum reader count exceeded");
- } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+ } else if r == libc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_rdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock read lock would result in deadlock");
} else {
// POSIX does not make guarantees about all the errors that may be returned.
// See issue #94705 for more details.
assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
}
}
+
#[inline]
- pub unsafe fn try_read(&self) -> bool {
- let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ pub fn try_read(&self) -> bool {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_tryrdlock(lock.inner.get()) };
if r == 0 {
- if *self.write_locked.get() {
+ if unsafe { *lock.write_locked.get() } {
// `pthread_rwlock_tryrdlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
false
} else {
- self.num_readers.fetch_add(1, Ordering::Relaxed);
+ lock.num_readers.fetch_add(1, Ordering::Relaxed);
true
}
} else {
false
}
}
+
#[inline]
- pub unsafe fn write(&self) {
- let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ pub fn write(&self) {
+ let lock = &*self.inner;
+ let r = unsafe { libc::pthread_rwlock_wrlock(lock.inner.get()) };
// See comments above for why we check for EDEADLK and write_locked. For the same reason,
// we also need to check that there are no readers (tracked in `num_readers`).
if r == libc::EDEADLK
- || (r == 0 && *self.write_locked.get())
- || self.num_readers.load(Ordering::Relaxed) != 0
+ || (r == 0 && unsafe { *lock.write_locked.get() })
+ || lock.num_readers.load(Ordering::Relaxed) != 0
{
// Above, we make sure to only access `write_locked` when `r == 0` to avoid
// data races.
if r == 0 {
// `pthread_rwlock_wrlock` succeeded when it should not have.
- self.raw_unlock();
+ unsafe {
+ lock.raw_unlock();
+ }
}
panic!("rwlock write lock would result in deadlock");
} else {
@@ -114,60 +152,44 @@ impl RwLock {
// return EDEADLK or 0. We rely on that.
debug_assert_eq!(r, 0);
}
- *self.write_locked.get() = true;
+
+ unsafe {
+ *lock.write_locked.get() = true;
+ }
}
+
#[inline]
pub unsafe fn try_write(&self) -> bool {
- let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ let lock = &*self.inner;
+ let r = libc::pthread_rwlock_trywrlock(lock.inner.get());
if r == 0 {
- if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
// `pthread_rwlock_trywrlock` succeeded when it should not have.
- self.raw_unlock();
+ lock.raw_unlock();
false
} else {
- *self.write_locked.get() = true;
+ *lock.write_locked.get() = true;
true
}
} else {
false
}
}
- #[inline]
- unsafe fn raw_unlock(&self) {
- let r = libc::pthread_rwlock_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
+
#[inline]
pub unsafe fn read_unlock(&self) {
- debug_assert!(!*self.write_locked.get());
- self.num_readers.fetch_sub(1, Ordering::Relaxed);
- self.raw_unlock();
- }
- #[inline]
- pub unsafe fn write_unlock(&self) {
- debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
- debug_assert!(*self.write_locked.get());
- *self.write_locked.get() = false;
- self.raw_unlock();
+ let lock = &*self.inner;
+ debug_assert!(!*lock.write_locked.get());
+ lock.num_readers.fetch_sub(1, Ordering::Relaxed);
+ lock.raw_unlock();
}
- #[inline]
- unsafe fn destroy(&mut self) {
- let r = libc::pthread_rwlock_destroy(self.inner.get());
- // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
- // rwlock that was just initialized with
- // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
- // or pthread_rwlock_init() is called, this behaviour no longer occurs.
- if cfg!(target_os = "dragonfly") {
- debug_assert!(r == 0 || r == libc::EINVAL);
- } else {
- debug_assert_eq!(r, 0);
- }
- }
-}
-impl Drop for RwLock {
#[inline]
- fn drop(&mut self) {
- unsafe { self.destroy() };
+ pub unsafe fn write_unlock(&self) {
+ let lock = &*self.inner;
+ debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*lock.write_locked.get());
+ *lock.write_locked.get() = false;
+ lock.raw_unlock();
}
}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index cca9c6767..d5abd9b58 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -149,7 +149,11 @@ impl From<libc::timespec> for Timespec {
}
}
-#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
+#[cfg(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+))]
mod inner {
use crate::sync::atomic::{AtomicU64, Ordering};
use crate::sys::cvt;
@@ -265,7 +269,11 @@ mod inner {
}
}
-#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "watchos")))]
+#[cfg(not(any(
+ all(target_os = "macos", any(not(target_arch = "aarch64"))),
+ target_os = "ios",
+ target_os = "watchos"
+)))]
mod inner {
use crate::fmt;
use crate::mem::MaybeUninit;
@@ -281,7 +289,11 @@ mod inner {
impl Instant {
pub fn now() -> Instant {
- Instant { t: Timespec::now(libc::CLOCK_MONOTONIC) }
+ #[cfg(target_os = "macos")]
+ const clock_id: libc::clockid_t = libc::CLOCK_UPTIME_RAW;
+ #[cfg(not(target_os = "macos"))]
+ const clock_id: libc::clockid_t = libc::CLOCK_MONOTONIC;
+ Instant { t: Timespec::now(clock_id) }
}
pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
@@ -312,13 +324,8 @@ mod inner {
}
}
- #[cfg(not(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon")))]
- pub type clock_t = libc::c_int;
- #[cfg(any(target_os = "dragonfly", target_os = "espidf", target_os = "horizon"))]
- pub type clock_t = libc::c_ulong;
-
impl Timespec {
- pub fn now(clock: clock_t) -> Timespec {
+ pub fn now(clock: libc::clockid_t) -> Timespec {
// Try to use 64-bit time in preparation for Y2038.
#[cfg(all(target_os = "linux", target_env = "gnu", target_pointer_width = "32"))]
{
diff --git a/library/std/src/sys/unix/weak.rs b/library/std/src/sys/unix/weak.rs
index e4ff21b25..f5a4ce929 100644
--- a/library/std/src/sys/unix/weak.rs
+++ b/library/std/src/sys/unix/weak.rs
@@ -29,7 +29,21 @@ use crate::ptr;
use crate::sync::atomic::{self, AtomicPtr, Ordering};
// We can use true weak linkage on ELF targets.
-#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), not(bootstrap)))]
+pub(crate) macro weak {
+ (fn $name:ident($($t:ty),*) -> $ret:ty) => (
+ let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
+ extern "C" {
+ #[linkage = "extern_weak"]
+ static $name: Option<unsafe extern "C" fn($($t),*) -> $ret>;
+ }
+ #[allow(unused_unsafe)]
+ ExternWeak::new(unsafe { $name })
+ };
+ )
+}
+
+#[cfg(all(not(any(target_os = "macos", target_os = "ios")), bootstrap))]
pub(crate) macro weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => (
let ref $name: ExternWeak<unsafe extern "C" fn($($t),*) -> $ret> = {
@@ -47,11 +61,31 @@ pub(crate) macro weak {
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) use self::dlsym as weak;
+#[cfg(not(bootstrap))]
+pub(crate) struct ExternWeak<F: Copy> {
+ weak_ptr: Option<F>,
+}
+
+#[cfg(not(bootstrap))]
+impl<F: Copy> ExternWeak<F> {
+ #[inline]
+ pub(crate) fn new(weak_ptr: Option<F>) -> Self {
+ ExternWeak { weak_ptr }
+ }
+
+ #[inline]
+ pub(crate) fn get(&self) -> Option<F> {
+ self.weak_ptr
+ }
+}
+
+#[cfg(bootstrap)]
pub(crate) struct ExternWeak<F> {
weak_ptr: *const libc::c_void,
_marker: PhantomData<F>,
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn new(weak_ptr: *const libc::c_void) -> Self {
@@ -59,6 +93,7 @@ impl<F> ExternWeak<F> {
}
}
+#[cfg(bootstrap)]
impl<F> ExternWeak<F> {
#[inline]
pub(crate) fn get(&self) -> Option<F> {
diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs
index 527a26a12..3f0943b50 100644
--- a/library/std/src/sys/unsupported/locks/condvar.rs
+++ b/library/std/src/sys/unsupported/locks/condvar.rs
@@ -3,8 +3,6 @@ use crate::time::Duration;
pub struct Condvar {}
-pub type MovableCondvar = Condvar;
-
impl Condvar {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
@@ -13,10 +11,10 @@ impl Condvar {
}
#[inline]
- pub unsafe fn notify_one(&self) {}
+ pub fn notify_one(&self) {}
#[inline]
- pub unsafe fn notify_all(&self) {}
+ pub fn notify_all(&self) {}
pub unsafe fn wait(&self, _mutex: &Mutex) {
panic!("condvar wait not supported")
diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs
index 602a2d623..0e0f9eccb 100644
--- a/library/std/src/sys/unsupported/locks/mod.rs
+++ b/library/std/src/sys/unsupported/locks/mod.rs
@@ -1,6 +1,6 @@
mod condvar;
mod mutex;
mod rwlock;
-pub use condvar::{Condvar, MovableCondvar};
-pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::MovableRwLock;
+pub use condvar::Condvar;
+pub use mutex::Mutex;
+pub use rwlock::RwLock;
diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs
index 87ea475c6..4a13c55fb 100644
--- a/library/std/src/sys/unsupported/locks/mutex.rs
+++ b/library/std/src/sys/unsupported/locks/mutex.rs
@@ -5,8 +5,6 @@ pub struct Mutex {
locked: Cell<bool>,
}
-pub type MovableMutex = Mutex;
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {} // no threads on this platform
@@ -18,7 +16,7 @@ impl Mutex {
}
#[inline]
- pub unsafe fn lock(&self) {
+ pub fn lock(&self) {
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
}
@@ -28,7 +26,7 @@ impl Mutex {
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.locked.replace(true) == false
}
}
diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs
index 5292691b9..789ef9b29 100644
--- a/library/std/src/sys/unsupported/locks/rwlock.rs
+++ b/library/std/src/sys/unsupported/locks/rwlock.rs
@@ -5,8 +5,6 @@ pub struct RwLock {
mode: Cell<isize>,
}
-pub type MovableRwLock = RwLock;
-
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {} // no threads on this platform
@@ -18,7 +16,7 @@ impl RwLock {
}
#[inline]
- pub unsafe fn read(&self) {
+ pub fn read(&self) {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
@@ -28,7 +26,7 @@ impl RwLock {
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
+ pub fn try_read(&self) -> bool {
let m = self.mode.get();
if m >= 0 {
self.mode.set(m + 1);
@@ -39,14 +37,14 @@ impl RwLock {
}
#[inline]
- pub unsafe fn write(&self) {
+ pub fn write(&self) {
if self.mode.replace(-1) != 0 {
rtabort!("rwlock locked for reading")
}
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
+ pub fn try_write(&self) -> bool {
if self.mode.get() == 0 {
self.mode.set(-1);
true
diff --git a/library/std/src/sys/wasi/net.rs b/library/std/src/sys/wasi/net.rs
index 590d268c3..cf4ebba1a 100644
--- a/library/std/src/sys/wasi/net.rs
+++ b/library/std/src/sys/wasi/net.rs
@@ -119,8 +119,14 @@ impl TcpStream {
unsupported()
}
- pub fn shutdown(&self, _: Shutdown) -> io::Result<()> {
- unsupported()
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ let wasi_how = match how {
+ Shutdown::Read => wasi::SDFLAGS_RD,
+ Shutdown::Write => wasi::SDFLAGS_WR,
+ Shutdown::Both => wasi::SDFLAGS_RD | wasi::SDFLAGS_WR,
+ };
+
+ unsafe { wasi::sock_shutdown(self.socket().as_raw_fd() as _, wasi_how).map_err(err2io) }
}
pub fn duplicate(&self) -> io::Result<TcpStream> {
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
index 93838390b..d68c3e5f1 100644
--- a/library/std/src/sys/wasm/mod.rs
+++ b/library/std/src/sys/wasm/mod.rs
@@ -55,9 +55,9 @@ cfg_if::cfg_if! {
mod futex_condvar;
mod futex_mutex;
mod futex_rwlock;
- pub(crate) use futex_condvar::{Condvar, MovableCondvar};
- pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::MovableRwLock;
+ pub(crate) use futex_condvar::Condvar;
+ pub(crate) use futex_mutex::Mutex;
+ pub(crate) use futex_rwlock::RwLock;
}
#[path = "atomics/futex.rs"]
pub mod futex;
diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs
index 01f262982..6741ae46d 100644
--- a/library/std/src/sys/windows/args.rs
+++ b/library/std/src/sys/windows/args.rs
@@ -9,17 +9,16 @@ mod tests;
use crate::ffi::OsString;
use crate::fmt;
use crate::io;
-use crate::marker::PhantomData;
use crate::num::NonZeroU16;
use crate::os::windows::prelude::*;
use crate::path::PathBuf;
-use crate::ptr::NonNull;
use crate::sys::c;
use crate::sys::process::ensure_no_nuls;
use crate::sys::windows::os::current_exe;
+use crate::sys_common::wstr::WStrUnits;
use crate::vec;
-use core::iter;
+use crate::iter;
/// This is the const equivalent to `NonZeroU16::new(n).unwrap()`
///
@@ -199,55 +198,6 @@ impl ExactSizeIterator for Args {
}
}
-/// A safe iterator over a LPWSTR
-/// (aka a pointer to a series of UTF-16 code units terminated by a NULL).
-struct WStrUnits<'a> {
- // The pointer must never be null...
- lpwstr: NonNull<u16>,
- // ...and the memory it points to must be valid for this lifetime.
- lifetime: PhantomData<&'a [u16]>,
-}
-impl WStrUnits<'_> {
- /// Create the iterator. Returns `None` if `lpwstr` is null.
- ///
- /// SAFETY: `lpwstr` must point to a null-terminated wide string that lives
- /// at least as long as the lifetime of this struct.
- unsafe fn new(lpwstr: *const u16) -> Option<Self> {
- Some(Self { lpwstr: NonNull::new(lpwstr as _)?, lifetime: PhantomData })
- }
- fn peek(&self) -> Option<NonZeroU16> {
- // SAFETY: It's always safe to read the current item because we don't
- // ever move out of the array's bounds.
- unsafe { NonZeroU16::new(*self.lpwstr.as_ptr()) }
- }
- /// Advance the iterator while `predicate` returns true.
- /// Returns the number of items it advanced by.
- fn advance_while<P: FnMut(NonZeroU16) -> bool>(&mut self, mut predicate: P) -> usize {
- let mut counter = 0;
- while let Some(w) = self.peek() {
- if !predicate(w) {
- break;
- }
- counter += 1;
- self.next();
- }
- counter
- }
-}
-impl Iterator for WStrUnits<'_> {
- // This can never return zero as that marks the end of the string.
- type Item = NonZeroU16;
- fn next(&mut self) -> Option<NonZeroU16> {
- // SAFETY: If NULL is reached we immediately return.
- // Therefore it's safe to advance the pointer after that.
- unsafe {
- let next = self.peek()?;
- self.lpwstr = NonNull::new_unchecked(self.lpwstr.as_ptr().add(1));
- Some(next)
- }
- }
-}
-
#[derive(Debug)]
pub(crate) enum Arg {
/// Add quotes (if needed)
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index be6fc2ebb..81461de4f 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -56,6 +56,7 @@ pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION;
pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
pub type LPSTARTUPINFO = *mut STARTUPINFO;
pub type LPVOID = *mut c_void;
+pub type LPCVOID = *const c_void;
pub type LPWCH = *mut WCHAR;
pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
pub type LPWSADATA = *mut WSADATA;
@@ -362,7 +363,7 @@ impl IO_STATUS_BLOCK {
pub type LPOVERLAPPED_COMPLETION_ROUTINE = unsafe extern "system" fn(
dwErrorCode: DWORD,
- dwNumberOfBytesTransfered: DWORD,
+ dwNumberOfBytesTransferred: DWORD,
lpOverlapped: *mut OVERLAPPED,
);
@@ -773,6 +774,16 @@ pub struct timeval {
pub tv_usec: c_long,
}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CONSOLE_READCONSOLE_CONTROL {
+ pub nLength: ULONG,
+ pub nInitialChars: ULONG,
+ pub dwCtrlWakeupMask: ULONG,
+ pub dwControlKeyState: ULONG,
+}
+pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
+
// Desktop specific functions & types
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
@@ -802,17 +813,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
extern "system" fn(ExceptionInfo: *mut EXCEPTION_POINTERS) -> LONG;
#[repr(C)]
- #[derive(Copy, Clone)]
- pub struct CONSOLE_READCONSOLE_CONTROL {
- pub nLength: ULONG,
- pub nInitialChars: ULONG,
- pub dwCtrlWakeupMask: ULONG,
- pub dwControlKeyState: ULONG,
- }
-
- pub type PCONSOLE_READCONSOLE_CONTROL = *mut CONSOLE_READCONSOLE_CONTROL;
-
- #[repr(C)]
pub struct BY_HANDLE_FILE_INFORMATION {
pub dwFileAttributes: DWORD,
pub ftCreationTime: FILETIME,
@@ -827,7 +827,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
}
pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
- pub type LPCVOID = *const c_void;
pub const HANDLE_FLAG_INHERIT: DWORD = 0x00000001;
@@ -855,24 +854,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
#[link(name = "kernel32")]
extern "system" {
- // Functions forbidden when targeting UWP
- pub fn ReadConsoleW(
- hConsoleInput: HANDLE,
- lpBuffer: LPVOID,
- nNumberOfCharsToRead: DWORD,
- lpNumberOfCharsRead: LPDWORD,
- pInputControl: PCONSOLE_READCONSOLE_CONTROL,
- ) -> BOOL;
-
- pub fn WriteConsoleW(
- hConsoleOutput: HANDLE,
- lpBuffer: LPCVOID,
- nNumberOfCharsToWrite: DWORD,
- lpNumberOfCharsWritten: LPDWORD,
- lpReserved: LPVOID,
- ) -> BOOL;
-
- pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
// Allowed but unused by UWP
pub fn GetFileInformationByHandle(
hFile: HANDLE,
@@ -914,6 +895,22 @@ if #[cfg(target_vendor = "uwp")] {
extern "system" {
pub fn GetCurrentProcessId() -> DWORD;
+ pub fn ReadConsoleW(
+ hConsoleInput: HANDLE,
+ lpBuffer: LPVOID,
+ nNumberOfCharsToRead: DWORD,
+ lpNumberOfCharsRead: LPDWORD,
+ pInputControl: PCONSOLE_READCONSOLE_CONTROL,
+ ) -> BOOL;
+ pub fn WriteConsoleW(
+ hConsoleOutput: HANDLE,
+ lpBuffer: LPCVOID,
+ nNumberOfCharsToWrite: DWORD,
+ lpNumberOfCharsWritten: LPDWORD,
+ lpReserved: LPVOID,
+ ) -> BOOL;
+ pub fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
+
pub fn GetSystemDirectoryW(lpBuffer: LPWSTR, uSize: UINT) -> UINT;
pub fn RemoveDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn SetFileAttributesW(lpFileName: LPCWSTR, dwFileAttributes: DWORD) -> BOOL;
diff --git a/library/std/src/sys/windows/locks/condvar.rs b/library/std/src/sys/windows/locks/condvar.rs
index be9a2abbe..66fafa2c0 100644
--- a/library/std/src/sys/windows/locks/condvar.rs
+++ b/library/std/src/sys/windows/locks/condvar.rs
@@ -8,8 +8,6 @@ pub struct Condvar {
inner: UnsafeCell<c::CONDITION_VARIABLE>,
}
-pub type MovableCondvar = Condvar;
-
unsafe impl Send for Condvar {}
unsafe impl Sync for Condvar {}
@@ -41,12 +39,12 @@ impl Condvar {
}
#[inline]
- pub unsafe fn notify_one(&self) {
- c::WakeConditionVariable(self.inner.get())
+ pub fn notify_one(&self) {
+ unsafe { c::WakeConditionVariable(self.inner.get()) }
}
#[inline]
- pub unsafe fn notify_all(&self) {
- c::WakeAllConditionVariable(self.inner.get())
+ pub fn notify_all(&self) {
+ unsafe { c::WakeAllConditionVariable(self.inner.get()) }
}
}
diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs
index 602a2d623..0e0f9eccb 100644
--- a/library/std/src/sys/windows/locks/mod.rs
+++ b/library/std/src/sys/windows/locks/mod.rs
@@ -1,6 +1,6 @@
mod condvar;
mod mutex;
mod rwlock;
-pub use condvar::{Condvar, MovableCondvar};
-pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::MovableRwLock;
+pub use condvar::Condvar;
+pub use mutex::Mutex;
+pub use rwlock::RwLock;
diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs
index 91207f5f4..ef2f84082 100644
--- a/library/std/src/sys/windows/locks/mutex.rs
+++ b/library/std/src/sys/windows/locks/mutex.rs
@@ -21,9 +21,6 @@ pub struct Mutex {
srwlock: UnsafeCell<c::SRWLOCK>,
}
-// Windows SRW Locks are movable (while not borrowed).
-pub type MovableMutex = Mutex;
-
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
@@ -39,13 +36,15 @@ impl Mutex {
}
#[inline]
- pub unsafe fn lock(&self) {
- c::AcquireSRWLockExclusive(raw(self));
+ pub fn lock(&self) {
+ unsafe {
+ c::AcquireSRWLockExclusive(raw(self));
+ }
}
#[inline]
- pub unsafe fn try_lock(&self) -> bool {
- c::TryAcquireSRWLockExclusive(raw(self)) != 0
+ pub fn try_lock(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockExclusive(raw(self)) != 0 }
}
#[inline]
diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs
index fa5ffe574..e69415baa 100644
--- a/library/std/src/sys/windows/locks/rwlock.rs
+++ b/library/std/src/sys/windows/locks/rwlock.rs
@@ -5,8 +5,6 @@ pub struct RwLock {
inner: UnsafeCell<c::SRWLOCK>,
}
-pub type MovableRwLock = RwLock;
-
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
@@ -16,20 +14,20 @@ impl RwLock {
RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
}
#[inline]
- pub unsafe fn read(&self) {
- c::AcquireSRWLockShared(self.inner.get())
+ pub fn read(&self) {
+ unsafe { c::AcquireSRWLockShared(self.inner.get()) }
}
#[inline]
- pub unsafe fn try_read(&self) -> bool {
- c::TryAcquireSRWLockShared(self.inner.get()) != 0
+ pub fn try_read(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 }
}
#[inline]
- pub unsafe fn write(&self) {
- c::AcquireSRWLockExclusive(self.inner.get())
+ pub fn write(&self) {
+ unsafe { c::AcquireSRWLockExclusive(self.inner.get()) }
}
#[inline]
- pub unsafe fn try_write(&self) -> bool {
- c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ pub fn try_write(&self) -> bool {
+ unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 }
}
#[inline]
pub unsafe fn read_unlock(&self) {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index eab9b9612..e67411e16 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -29,6 +29,7 @@ pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
+pub mod stdio;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
@@ -36,12 +37,9 @@ pub mod thread_parker;
pub mod time;
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
- pub mod stdio;
pub mod stack_overflow;
} else {
- pub mod stdio_uwp;
pub mod stack_overflow_uwp;
- pub use self::stdio_uwp as stdio;
pub use self::stack_overflow_uwp as stack_overflow;
}
}
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index 013c776c4..9f26acc45 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -324,17 +324,18 @@ impl AnonPipe {
let mut async_result: Option<AsyncResult> = None;
struct AsyncResult {
error: u32,
- transfered: u32,
+ transferred: u32,
}
// STEP 3: The callback.
unsafe extern "system" fn callback(
dwErrorCode: u32,
- dwNumberOfBytesTransfered: u32,
+ dwNumberOfBytesTransferred: u32,
lpOverlapped: *mut c::OVERLAPPED,
) {
// Set `async_result` using a pointer smuggled through `hEvent`.
- let result = AsyncResult { error: dwErrorCode, transfered: dwNumberOfBytesTransfered };
+ let result =
+ AsyncResult { error: dwErrorCode, transferred: dwNumberOfBytesTransferred };
*(*lpOverlapped).hEvent.cast::<Option<AsyncResult>>() = Some(result);
}
@@ -365,7 +366,7 @@ impl AnonPipe {
// STEP 4: Return the result.
// `async_result` is always `Some` at this point
match result.error {
- c::ERROR_SUCCESS => Ok(result.transfered as usize),
+ c::ERROR_SUCCESS => Ok(result.transferred as usize),
error => Err(io::Error::from_raw_os_error(error as _)),
}
}
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 9cbb4ef19..31e9b34fb 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -252,10 +252,6 @@ impl Command {
) -> io::Result<(Process, StdioPipes)> {
let maybe_env = self.env.capture_if_changed();
- let mut si = zeroed_startupinfo();
- si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD;
- si.dwFlags = c::STARTF_USESTDHANDLES;
-
let child_paths = if let Some(env) = maybe_env.as_ref() {
env.get(&EnvKey::new("PATH")).map(|s| s.as_os_str())
} else {
@@ -314,9 +310,21 @@ impl Command {
let stdin = stdin.to_handle(c::STD_INPUT_HANDLE, &mut pipes.stdin)?;
let stdout = stdout.to_handle(c::STD_OUTPUT_HANDLE, &mut pipes.stdout)?;
let stderr = stderr.to_handle(c::STD_ERROR_HANDLE, &mut pipes.stderr)?;
- si.hStdInput = stdin.as_raw_handle();
- si.hStdOutput = stdout.as_raw_handle();
- si.hStdError = stderr.as_raw_handle();
+
+ let mut si = zeroed_startupinfo();
+ si.cb = mem::size_of::<c::STARTUPINFO>() as c::DWORD;
+
+ // If at least one of stdin, stdout or stderr are set (i.e. are non null)
+ // then set the `hStd` fields in `STARTUPINFO`.
+ // Otherwise skip this and allow the OS to apply its default behaviour.
+ // This provides more consistent behaviour between Win7 and Win8+.
+ let is_set = |stdio: &Handle| !stdio.as_raw_handle().is_null();
+ if is_set(&stderr) || is_set(&stdout) || is_set(&stdin) {
+ si.dwFlags |= c::STARTF_USESTDHANDLES;
+ si.hStdInput = stdin.as_raw_handle();
+ si.hStdOutput = stdout.as_raw_handle();
+ si.hStdError = stderr.as_raw_handle();
+ }
unsafe {
cvt(c::CreateProcessW(
@@ -513,9 +521,6 @@ fn program_exists(path: &Path) -> Option<Vec<u16>> {
impl Stdio {
fn to_handle(&self, stdio_id: c::DWORD, pipe: &mut Option<AnonPipe>) -> io::Result<Handle> {
match *self {
- // If no stdio handle is available, then inherit means that it
- // should still be unavailable so propagate the
- // INVALID_HANDLE_VALUE.
Stdio::Inherit => match stdio::get_handle(stdio_id) {
Ok(io) => unsafe {
let io = Handle::from_raw_handle(io);
@@ -523,7 +528,8 @@ impl Stdio {
io.into_raw_handle();
ret
},
- Err(..) => unsafe { Ok(Handle::from_raw_handle(c::INVALID_HANDLE_VALUE)) },
+ // If no stdio handle is available, then propagate the null value.
+ Err(..) => unsafe { Ok(Handle::from_raw_handle(ptr::null_mut())) },
},
Stdio::MakePipe => {
@@ -730,9 +736,9 @@ fn zeroed_startupinfo() -> c::STARTUPINFO {
wShowWindow: 0,
cbReserved2: 0,
lpReserved2: ptr::null_mut(),
- hStdInput: c::INVALID_HANDLE_VALUE,
- hStdOutput: c::INVALID_HANDLE_VALUE,
- hStdError: c::INVALID_HANDLE_VALUE,
+ hStdInput: ptr::null_mut(),
+ hStdOutput: ptr::null_mut(),
+ hStdError: ptr::null_mut(),
}
}
diff --git a/library/std/src/sys/windows/stdio_uwp.rs b/library/std/src/sys/windows/stdio_uwp.rs
deleted file mode 100644
index 32550f796..000000000
--- a/library/std/src/sys/windows/stdio_uwp.rs
+++ /dev/null
@@ -1,87 +0,0 @@
-#![unstable(issue = "none", feature = "windows_stdio")]
-
-use crate::io;
-use crate::mem::ManuallyDrop;
-use crate::os::windows::io::FromRawHandle;
-use crate::sys::c;
-use crate::sys::handle::Handle;
-
-pub struct Stdin {}
-pub struct Stdout;
-pub struct Stderr;
-
-const MAX_BUFFER_SIZE: usize = 8192;
-pub const STDIN_BUF_SIZE: usize = MAX_BUFFER_SIZE / 2 * 3;
-
-pub fn get_handle(handle_id: c::DWORD) -> io::Result<c::HANDLE> {
- let handle = unsafe { c::GetStdHandle(handle_id) };
- if handle == c::INVALID_HANDLE_VALUE {
- Err(io::Error::last_os_error())
- } else if handle.is_null() {
- Err(io::Error::from_raw_os_error(c::ERROR_INVALID_HANDLE as i32))
- } else {
- Ok(handle)
- }
-}
-
-fn write(handle_id: c::DWORD, data: &[u8]) -> io::Result<usize> {
- let handle = get_handle(handle_id)?;
- // SAFETY: The handle returned from `get_handle` must be valid and non-null.
- let handle = unsafe { Handle::from_raw_handle(handle) };
- ManuallyDrop::new(handle).write(data)
-}
-
-impl Stdin {
- pub const fn new() -> Stdin {
- Stdin {}
- }
-}
-
-impl io::Read for Stdin {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- let handle = get_handle(c::STD_INPUT_HANDLE)?;
- // SAFETY: The handle returned from `get_handle` must be valid and non-null.
- let handle = unsafe { Handle::from_raw_handle(handle) };
- ManuallyDrop::new(handle).read(buf)
- }
-}
-
-impl Stdout {
- pub const fn new() -> Stdout {
- Stdout
- }
-}
-
-impl io::Write for Stdout {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- write(c::STD_OUTPUT_HANDLE, buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- Ok(())
- }
-}
-
-impl Stderr {
- pub const fn new() -> Stderr {
- Stderr
- }
-}
-
-impl io::Write for Stderr {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- write(c::STD_ERROR_HANDLE, buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- Ok(())
- }
-}
-
-pub fn is_ebadf(err: &io::Error) -> bool {
- err.raw_os_error() == Some(c::ERROR_INVALID_HANDLE as i32)
-}
-
-pub fn panic_output() -> Option<impl io::Write> {
- Some(Stderr::new())
-}