summaryrefslogtreecommitdiffstats
path: root/library/std/src/sys
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/std/src/sys/common/mod.rs4
-rw-r--r--library/std/src/sys/common/small_c_string.rs58
-rw-r--r--library/std/src/sys/common/tests.rs66
-rw-r--r--library/std/src/sys/hermit/args.rs74
-rw-r--r--library/std/src/sys/hermit/condvar.rs90
-rw-r--r--library/std/src/sys/hermit/fs.rs31
-rw-r--r--library/std/src/sys/hermit/futex.rs39
-rw-r--r--library/std/src/sys/hermit/mod.rs21
-rw-r--r--library/std/src/sys/hermit/mutex.rs216
-rw-r--r--library/std/src/sys/hermit/net.rs2
-rw-r--r--library/std/src/sys/hermit/rwlock.rs144
-rw-r--r--library/std/src/sys/itron/mutex.rs6
-rw-r--r--library/std/src/sys/mod.rs2
-rw-r--r--library/std/src/sys/sgx/abi/thread.rs8
-rw-r--r--library/std/src/sys/sgx/abi/tls/mod.rs1
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/alloc.rs183
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/mod.rs8
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/raw.rs24
-rw-r--r--library/std/src/sys/sgx/abi/usercalls/tests.rs34
-rw-r--r--library/std/src/sys/sgx/mod.rs2
-rw-r--r--library/std/src/sys/sgx/mutex.rs3
-rw-r--r--library/std/src/sys/sgx/thread_local_key.rs5
-rw-r--r--library/std/src/sys/solid/fs.rs44
-rw-r--r--library/std/src/sys/solid/mod.rs2
-rw-r--r--library/std/src/sys/solid/os.rs44
-rw-r--r--library/std/src/sys/solid/thread_local_key.rs5
-rw-r--r--library/std/src/sys/unix/fd.rs11
-rw-r--r--library/std/src/sys/unix/fs.rs497
-rw-r--r--library/std/src/sys/unix/io.rs6
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs2
-rw-r--r--library/std/src/sys/unix/locks/fuchsia_mutex.rs5
-rw-r--r--library/std/src/sys/unix/locks/futex_mutex.rs5
-rw-r--r--library/std/src/sys/unix/locks/futex_rwlock.rs2
-rw-r--r--library/std/src/sys/unix/locks/mod.rs6
-rw-r--r--library/std/src/sys/unix/locks/pthread_condvar.rs2
-rw-r--r--library/std/src/sys/unix/locks/pthread_mutex.rs2
-rw-r--r--library/std/src/sys/unix/mod.rs76
-rw-r--r--library/std/src/sys/unix/net.rs22
-rw-r--r--library/std/src/sys/unix/os.rs59
-rw-r--r--library/std/src/sys/unix/os_str.rs40
-rw-r--r--library/std/src/sys/unix/os_str/tests.rs8
-rw-r--r--library/std/src/sys/unix/process/process_common.rs61
-rw-r--r--library/std/src/sys/unix/process/process_common/tests.rs103
-rw-r--r--library/std/src/sys/unix/process/process_fuchsia.rs2
-rw-r--r--library/std/src/sys/unix/process/process_unix.rs80
-rw-r--r--library/std/src/sys/unix/rand.rs18
-rw-r--r--library/std/src/sys/unix/stdio.rs50
-rw-r--r--library/std/src/sys/unix/thread.rs59
-rw-r--r--library/std/src/sys/unix/thread_local_dtor.rs1
-rw-r--r--library/std/src/sys/unix/thread_local_key.rs5
-rw-r--r--library/std/src/sys/unix/thread_parker.rs281
-rw-r--r--library/std/src/sys/unix/thread_parker/darwin.rs131
-rw-r--r--library/std/src/sys/unix/thread_parker/mod.rs32
-rw-r--r--library/std/src/sys/unix/thread_parker/netbsd.rs113
-rw-r--r--library/std/src/sys/unix/thread_parker/pthread.rs271
-rw-r--r--library/std/src/sys/unix/time.rs34
-rw-r--r--library/std/src/sys/unsupported/alloc.rs7
-rw-r--r--library/std/src/sys/unsupported/common.rs2
-rw-r--r--library/std/src/sys/unsupported/fs.rs4
-rw-r--r--library/std/src/sys/unsupported/io.rs4
-rw-r--r--library/std/src/sys/unsupported/locks/condvar.rs1
-rw-r--r--library/std/src/sys/unsupported/locks/mod.rs2
-rw-r--r--library/std/src/sys/unsupported/locks/mutex.rs4
-rw-r--r--library/std/src/sys/unsupported/locks/rwlock.rs1
-rw-r--r--library/std/src/sys/unsupported/process.rs3
-rw-r--r--library/std/src/sys/unsupported/thread_local_dtor.rs1
-rw-r--r--library/std/src/sys/unsupported/thread_local_key.rs5
-rw-r--r--library/std/src/sys/wasi/fs.rs111
-rw-r--r--library/std/src/sys/wasi/io.rs6
-rw-r--r--library/std/src/sys/wasi/mod.rs3
-rw-r--r--library/std/src/sys/wasi/os.rs81
-rw-r--r--library/std/src/sys/wasi/time.rs4
-rw-r--r--library/std/src/sys/wasm/mod.rs2
-rw-r--r--library/std/src/sys/windows/alloc.rs5
-rw-r--r--library/std/src/sys/windows/c.rs65
-rw-r--r--library/std/src/sys/windows/cmath.rs2
-rw-r--r--library/std/src/sys/windows/compat.rs232
-rw-r--r--library/std/src/sys/windows/fs.rs156
-rw-r--r--library/std/src/sys/windows/handle.rs12
-rw-r--r--library/std/src/sys/windows/io.rs76
-rw-r--r--library/std/src/sys/windows/locks/mod.rs2
-rw-r--r--library/std/src/sys/windows/locks/mutex.rs2
-rw-r--r--library/std/src/sys/windows/mod.rs28
-rw-r--r--library/std/src/sys/windows/os.rs6
-rw-r--r--library/std/src/sys/windows/os_str.rs4
-rw-r--r--library/std/src/sys/windows/path/tests.rs2
-rw-r--r--library/std/src/sys/windows/process.rs6
-rw-r--r--library/std/src/sys/windows/rand.rs117
-rw-r--r--library/std/src/sys/windows/stdio.rs41
-rw-r--r--library/std/src/sys/windows/thread_local_dtor.rs4
-rw-r--r--library/std/src/sys/windows/thread_local_key.rs196
-rw-r--r--library/std/src/sys/windows/thread_local_key/tests.rs53
-rw-r--r--library/std/src/sys/windows/thread_parker.rs22
-rw-r--r--library/std/src/sys_common/backtrace.rs9
-rw-r--r--library/std/src/sys_common/condvar.rs1
-rw-r--r--library/std/src/sys_common/condvar/check.rs1
-rw-r--r--library/std/src/sys_common/mod.rs10
-rw-r--r--library/std/src/sys_common/mutex.rs45
-rw-r--r--library/std/src/sys_common/net.rs28
-rw-r--r--library/std/src/sys_common/once/futex.rs134
-rw-r--r--library/std/src/sys_common/once/generic.rs282
-rw-r--r--library/std/src/sys_common/once/mod.rs43
-rw-r--r--library/std/src/sys_common/remutex.rs46
-rw-r--r--library/std/src/sys_common/remutex/tests.rs37
-rw-r--r--library/std/src/sys_common/rwlock.rs61
-rw-r--r--library/std/src/sys_common/thread_local_key.rs26
-rw-r--r--library/std/src/sys_common/thread_local_key/tests.rs9
-rw-r--r--library/std/src/sys_common/thread_parker/mod.rs1
-rw-r--r--library/std/src/sys_common/wtf8.rs95
-rw-r--r--library/std/src/sys_common/wtf8/tests.rs295
110 files changed, 3431 insertions, 2064 deletions
diff --git a/library/std/src/sys/common/mod.rs b/library/std/src/sys/common/mod.rs
index ff64d2aa8..29fc0835d 100644
--- a/library/std/src/sys/common/mod.rs
+++ b/library/std/src/sys/common/mod.rs
@@ -11,3 +11,7 @@
#![allow(dead_code)]
pub mod alloc;
+pub mod small_c_string;
+
+#[cfg(test)]
+mod tests;
diff --git a/library/std/src/sys/common/small_c_string.rs b/library/std/src/sys/common/small_c_string.rs
new file mode 100644
index 000000000..01acd5191
--- /dev/null
+++ b/library/std/src/sys/common/small_c_string.rs
@@ -0,0 +1,58 @@
+use crate::ffi::{CStr, CString};
+use crate::mem::MaybeUninit;
+use crate::path::Path;
+use crate::slice;
+use crate::{io, ptr};
+
+// Make sure to stay under 4096 so the compiler doesn't insert a probe frame:
+// https://docs.rs/compiler_builtins/latest/compiler_builtins/probestack/index.html
+#[cfg(not(target_os = "espidf"))]
+const MAX_STACK_ALLOCATION: usize = 384;
+#[cfg(target_os = "espidf")]
+const MAX_STACK_ALLOCATION: usize = 32;
+
+const NUL_ERR: io::Error =
+ io::const_io_error!(io::ErrorKind::InvalidInput, "file name contained an unexpected NUL byte");
+
+#[inline]
+pub fn run_path_with_cstr<T, F>(path: &Path, f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ run_with_cstr(path.as_os_str().bytes(), f)
+}
+
+#[inline]
+pub fn run_with_cstr<T, F>(bytes: &[u8], f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ if bytes.len() >= MAX_STACK_ALLOCATION {
+ return run_with_cstr_allocating(bytes, f);
+ }
+
+ let mut buf = MaybeUninit::<[u8; MAX_STACK_ALLOCATION]>::uninit();
+ let buf_ptr = buf.as_mut_ptr() as *mut u8;
+
+ unsafe {
+ ptr::copy_nonoverlapping(bytes.as_ptr(), buf_ptr, bytes.len());
+ buf_ptr.add(bytes.len()).write(0);
+ }
+
+ match CStr::from_bytes_with_nul(unsafe { slice::from_raw_parts(buf_ptr, bytes.len() + 1) }) {
+ Ok(s) => f(s),
+ Err(_) => Err(NUL_ERR),
+ }
+}
+
+#[cold]
+#[inline(never)]
+fn run_with_cstr_allocating<T, F>(bytes: &[u8], f: F) -> io::Result<T>
+where
+ F: FnOnce(&CStr) -> io::Result<T>,
+{
+ match CString::new(bytes) {
+ Ok(s) => f(&s),
+ Err(_) => Err(NUL_ERR),
+ }
+}
diff --git a/library/std/src/sys/common/tests.rs b/library/std/src/sys/common/tests.rs
new file mode 100644
index 000000000..fb6f5d6af
--- /dev/null
+++ b/library/std/src/sys/common/tests.rs
@@ -0,0 +1,66 @@
+use crate::ffi::CString;
+use crate::hint::black_box;
+use crate::path::Path;
+use crate::sys::common::small_c_string::run_path_with_cstr;
+use core::iter::repeat;
+
+#[test]
+fn stack_allocation_works() {
+ let path = Path::new("abc");
+ let result = run_path_with_cstr(path, |p| {
+ assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap());
+ Ok(42)
+ });
+ assert_eq!(result.unwrap(), 42);
+}
+
+#[test]
+fn stack_allocation_fails() {
+ let path = Path::new("ab\0");
+ assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err());
+}
+
+#[test]
+fn heap_allocation_works() {
+ let path = repeat("a").take(384).collect::<String>();
+ let path = Path::new(&path);
+ let result = run_path_with_cstr(path, |p| {
+ assert_eq!(p, &*CString::new(path.as_os_str().bytes()).unwrap());
+ Ok(42)
+ });
+ assert_eq!(result.unwrap(), 42);
+}
+
+#[test]
+fn heap_allocation_fails() {
+ let mut path = repeat("a").take(384).collect::<String>();
+ path.push('\0');
+ let path = Path::new(&path);
+ assert!(run_path_with_cstr::<(), _>(path, |_| unreachable!()).is_err());
+}
+
+#[bench]
+fn bench_stack_path_alloc(b: &mut test::Bencher) {
+ let path = repeat("a").take(383).collect::<String>();
+ let p = Path::new(&path);
+ b.iter(|| {
+ run_path_with_cstr(p, |cstr| {
+ black_box(cstr);
+ Ok(())
+ })
+ .unwrap();
+ });
+}
+
+#[bench]
+fn bench_heap_path_alloc(b: &mut test::Bencher) {
+ let path = repeat("a").take(384).collect::<String>();
+ let p = Path::new(&path);
+ b.iter(|| {
+ run_path_with_cstr(p, |cstr| {
+ black_box(cstr);
+ Ok(())
+ })
+ .unwrap();
+ });
+}
diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs
index 1c7e1dd8d..afcae6c90 100644
--- a/library/std/src/sys/hermit/args.rs
+++ b/library/std/src/sys/hermit/args.rs
@@ -1,20 +1,37 @@
-use crate::ffi::OsString;
+use crate::ffi::{c_char, CStr, OsString};
use crate::fmt;
+use crate::os::unix::ffi::OsStringExt;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicIsize, AtomicPtr,
+ Ordering::{Acquire, Relaxed, Release},
+};
use crate::vec;
+static ARGC: AtomicIsize = AtomicIsize::new(0);
+static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(ptr::null_mut());
+
/// One-time global initialization.
pub unsafe fn init(argc: isize, argv: *const *const u8) {
- imp::init(argc, argv)
-}
-
-/// One-time global cleanup.
-pub unsafe fn cleanup() {
- imp::cleanup()
+ ARGC.store(argc, Relaxed);
+ // Use release ordering here to broadcast writes by the OS.
+ ARGV.store(argv as *mut *const u8, Release);
}
/// Returns the command line arguments
pub fn args() -> Args {
- imp::args()
+ // Synchronize with the store above.
+ let argv = ARGV.load(Acquire);
+ // If argv has not been initialized yet, do not return any arguments.
+ let argc = if argv.is_null() { 0 } else { ARGC.load(Relaxed) };
+ let args: Vec<OsString> = (0..argc)
+ .map(|i| unsafe {
+ let cstr = CStr::from_ptr(*argv.offset(i) as *const c_char);
+ OsStringExt::from_vec(cstr.to_bytes().to_vec())
+ })
+ .collect();
+
+ Args { iter: args.into_iter() }
}
pub struct Args {
@@ -51,44 +68,3 @@ impl DoubleEndedIterator for Args {
self.iter.next_back()
}
}
-
-mod imp {
- use super::Args;
- use crate::ffi::{CStr, OsString};
- use crate::os::unix::ffi::OsStringExt;
- use crate::ptr;
-
- use crate::sys_common::mutex::StaticMutex;
-
- static mut ARGC: isize = 0;
- static mut ARGV: *const *const u8 = ptr::null();
- static LOCK: StaticMutex = StaticMutex::new();
-
- pub unsafe fn init(argc: isize, argv: *const *const u8) {
- let _guard = LOCK.lock();
- ARGC = argc;
- ARGV = argv;
- }
-
- pub unsafe fn cleanup() {
- let _guard = LOCK.lock();
- ARGC = 0;
- ARGV = ptr::null();
- }
-
- pub fn args() -> Args {
- Args { iter: clone().into_iter() }
- }
-
- fn clone() -> Vec<OsString> {
- unsafe {
- let _guard = LOCK.lock();
- (0..ARGC)
- .map(|i| {
- let cstr = CStr::from_ptr(*ARGV.offset(i) as *const i8);
- OsStringExt::from_vec(cstr.to_bytes().to_vec())
- })
- .collect()
- }
- }
-}
diff --git a/library/std/src/sys/hermit/condvar.rs b/library/std/src/sys/hermit/condvar.rs
deleted file mode 100644
index 22059ca0d..000000000
--- a/library/std/src/sys/hermit/condvar.rs
+++ /dev/null
@@ -1,90 +0,0 @@
-use crate::ffi::c_void;
-use crate::ptr;
-use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
-use crate::sys::hermit::abi;
-use crate::sys::locks::Mutex;
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-use crate::time::Duration;
-
-// The implementation is inspired by Andrew D. Birrell's paper
-// "Implementing Condition Variables with Semaphores"
-
-pub struct Condvar {
- counter: AtomicUsize,
- sem1: *const c_void,
- sem2: *const c_void,
-}
-
-pub(crate) type MovableCondvar = LazyBox<Condvar>;
-
-impl LazyInit for Condvar {
- fn init() -> Box<Self> {
- Box::new(Self::new())
- }
-}
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
-impl Condvar {
- pub fn new() -> Self {
- let mut condvar =
- Self { counter: AtomicUsize::new(0), sem1: ptr::null(), sem2: ptr::null() };
- unsafe {
- let _ = abi::sem_init(&mut condvar.sem1, 0);
- let _ = abi::sem_init(&mut condvar.sem2, 0);
- }
- condvar
- }
-
- pub unsafe fn notify_one(&self) {
- if self.counter.load(SeqCst) > 0 {
- self.counter.fetch_sub(1, SeqCst);
- abi::sem_post(self.sem1);
- abi::sem_timedwait(self.sem2, 0);
- }
- }
-
- pub unsafe fn notify_all(&self) {
- let counter = self.counter.swap(0, SeqCst);
- for _ in 0..counter {
- abi::sem_post(self.sem1);
- }
- for _ in 0..counter {
- abi::sem_timedwait(self.sem2, 0);
- }
- }
-
- pub unsafe fn wait(&self, mutex: &Mutex) {
- self.counter.fetch_add(1, SeqCst);
- mutex.unlock();
- abi::sem_timedwait(self.sem1, 0);
- abi::sem_post(self.sem2);
- mutex.lock();
- }
-
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- self.counter.fetch_add(1, SeqCst);
- mutex.unlock();
- let millis = dur.as_millis().min(u32::MAX as u128) as u32;
-
- let res = if millis > 0 {
- abi::sem_timedwait(self.sem1, millis)
- } else {
- abi::sem_trywait(self.sem1)
- };
-
- abi::sem_post(self.sem2);
- mutex.lock();
- res == 0
- }
-}
-
-impl Drop for Condvar {
- fn drop(&mut self) {
- unsafe {
- let _ = abi::sem_destroy(self.sem1);
- let _ = abi::sem_destroy(self.sem2);
- }
- }
-}
diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs
index fa9a7fb19..af297ff1e 100644
--- a/library/std/src/sys/hermit/fs.rs
+++ b/library/std/src/sys/hermit/fs.rs
@@ -1,10 +1,12 @@
+use crate::convert::TryFrom;
use crate::ffi::{CStr, CString, OsString};
use crate::fmt;
use crate::hash::{Hash, Hasher};
use crate::io::{self, Error, ErrorKind};
-use crate::io::{IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::os::unix::ffi::OsStrExt;
use crate::path::{Path, PathBuf};
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::cvt;
use crate::sys::hermit::abi;
use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY};
@@ -15,10 +17,6 @@ use crate::sys::unsupported;
pub use crate::sys_common::fs::{copy, try_exists};
//pub use crate::sys_common::fs::remove_dir_all;
-fn cstr(path: &Path) -> io::Result<CString> {
- Ok(CString::new(path.as_os_str().as_bytes())?)
-}
-
#[derive(Debug)]
pub struct File(FileDesc);
@@ -41,6 +39,9 @@ pub struct OpenOptions {
mode: i32,
}
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {}
+
pub struct FilePermissions(!);
pub struct FileType(!);
@@ -110,6 +111,11 @@ impl fmt::Debug for FilePermissions {
}
}
+impl FileTimes {
+ pub fn set_accessed(&mut self, _t: SystemTime) {}
+ pub fn set_modified(&mut self, _t: SystemTime) {}
+}
+
impl FileType {
pub fn is_dir(&self) -> bool {
self.0
@@ -264,8 +270,7 @@ impl OpenOptions {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
- let path = cstr(path)?;
- File::open_c(&path, opts)
+ run_path_with_cstr(path, |path| File::open_c(&path, opts))
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
@@ -312,8 +317,8 @@ impl File {
false
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- crate::io::default_read_buf(|buf| self.read(buf), buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -344,6 +349,10 @@ impl File {
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
Err(Error::from_raw_os_error(22))
}
+
+ pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
+ Err(Error::from_raw_os_error(22))
+ }
}
impl DirBuilder {
@@ -361,9 +370,7 @@ pub fn readdir(_p: &Path) -> io::Result<ReadDir> {
}
pub fn unlink(path: &Path) -> io::Result<()> {
- let name = cstr(path)?;
- let _ = unsafe { cvt(abi::unlink(name.as_ptr()))? };
- Ok(())
+ run_path_with_cstr(path, |path| cvt(unsafe { abi::unlink(path.as_ptr()) }).map(|_| ()))
}
pub fn rename(_old: &Path, _new: &Path) -> io::Result<()> {
diff --git a/library/std/src/sys/hermit/futex.rs b/library/std/src/sys/hermit/futex.rs
new file mode 100644
index 000000000..b64c174b0
--- /dev/null
+++ b/library/std/src/sys/hermit/futex.rs
@@ -0,0 +1,39 @@
+use super::abi;
+use crate::ptr::null;
+use crate::sync::atomic::AtomicU32;
+use crate::time::Duration;
+
+pub fn futex_wait(futex: &AtomicU32, expected: u32, timeout: Option<Duration>) -> bool {
+ // Calculate the timeout as a relative timespec.
+ //
+ // Overflows are rounded up to an infinite timeout (None).
+ let timespec = timeout.and_then(|dur| {
+ Some(abi::timespec {
+ tv_sec: dur.as_secs().try_into().ok()?,
+ tv_nsec: dur.subsec_nanos().into(),
+ })
+ });
+
+ let r = unsafe {
+ abi::futex_wait(
+ futex.as_mut_ptr(),
+ expected,
+ timespec.as_ref().map_or(null(), |t| t as *const abi::timespec),
+ abi::FUTEX_RELATIVE_TIMEOUT,
+ )
+ };
+
+ r != -abi::errno::ETIMEDOUT
+}
+
+#[inline]
+pub fn futex_wake(futex: &AtomicU32) -> bool {
+ unsafe { abi::futex_wake(futex.as_mut_ptr(), 1) > 0 }
+}
+
+#[inline]
+pub fn futex_wake_all(futex: &AtomicU32) {
+ unsafe {
+ abi::futex_wake(futex.as_mut_ptr(), i32::MAX);
+ }
+}
diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs
index 60b7a973c..e6534df89 100644
--- a/library/std/src/sys/hermit/mod.rs
+++ b/library/std/src/sys/hermit/mod.rs
@@ -25,6 +25,7 @@ pub mod cmath;
pub mod env;
pub mod fd;
pub mod fs;
+pub mod futex;
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod memchr;
@@ -45,14 +46,14 @@ pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod time;
-mod condvar;
-mod mutex;
-mod rwlock;
-
+#[path = "../unix/locks"]
pub mod locks {
- pub use super::condvar::*;
- pub use super::mutex::*;
- pub use super::rwlock::*;
+ mod futex_condvar;
+ mod futex_mutex;
+ mod futex_rwlock;
+ pub(crate) use futex_condvar::MovableCondvar;
+ pub(crate) use futex_mutex::{MovableMutex, Mutex};
+ pub(crate) use futex_rwlock::{MovableRwLock, RwLock};
}
use crate::io::ErrorKind;
@@ -98,16 +99,14 @@ pub extern "C" fn __rust_abort() {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
let _ = net::init();
args::init(argc, argv);
}
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
-pub unsafe fn cleanup() {
- args::cleanup();
-}
+pub unsafe fn cleanup() {}
#[cfg(not(test))]
#[no_mangle]
diff --git a/library/std/src/sys/hermit/mutex.rs b/library/std/src/sys/hermit/mutex.rs
deleted file mode 100644
index eb15a04ff..000000000
--- a/library/std/src/sys/hermit/mutex.rs
+++ /dev/null
@@ -1,216 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::collections::VecDeque;
-use crate::hint;
-use crate::ops::{Deref, DerefMut, Drop};
-use crate::ptr;
-use crate::sync::atomic::{AtomicUsize, Ordering};
-use crate::sys::hermit::abi;
-
-/// This type provides a lock based on busy waiting to realize mutual exclusion
-///
-/// # Description
-///
-/// This structure behaves a lot like a common mutex. There are some differences:
-///
-/// - By using busy waiting, it can be used outside the runtime.
-/// - It is a so called ticket lock and is completely fair.
-#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
-#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
-struct Spinlock<T: ?Sized> {
- queue: AtomicUsize,
- dequeue: AtomicUsize,
- data: UnsafeCell<T>,
-}
-
-unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
-unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
-
-/// A guard to which the protected data can be accessed
-///
-/// When the guard falls out of scope it will release the lock.
-struct SpinlockGuard<'a, T: ?Sized + 'a> {
- dequeue: &'a AtomicUsize,
- data: &'a mut T,
-}
-
-impl<T> Spinlock<T> {
- pub const fn new(user_data: T) -> Spinlock<T> {
- Spinlock {
- queue: AtomicUsize::new(0),
- dequeue: AtomicUsize::new(1),
- data: UnsafeCell::new(user_data),
- }
- }
-
- #[inline]
- fn obtain_lock(&self) {
- let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
- let mut counter: u16 = 0;
- while self.dequeue.load(Ordering::SeqCst) != ticket {
- counter += 1;
- if counter < 100 {
- hint::spin_loop();
- } else {
- counter = 0;
- unsafe {
- abi::yield_now();
- }
- }
- }
- }
-
- #[inline]
- pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
- self.obtain_lock();
- SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
- }
-}
-
-impl<T: ?Sized + Default> Default for Spinlock<T> {
- fn default() -> Spinlock<T> {
- Spinlock::new(Default::default())
- }
-}
-
-impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
- type Target = T;
- fn deref(&self) -> &T {
- &*self.data
- }
-}
-
-impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
- fn deref_mut(&mut self) -> &mut T {
- &mut *self.data
- }
-}
-
-impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
- /// The dropping of the SpinlockGuard will release the lock it was created from.
- fn drop(&mut self) {
- self.dequeue.fetch_add(1, Ordering::SeqCst);
- }
-}
-
-/// Realize a priority queue for tasks
-struct PriorityQueue {
- queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
- prio_bitmap: u64,
-}
-
-impl PriorityQueue {
- pub const fn new() -> PriorityQueue {
- PriorityQueue {
- queues: [
- None, None, None, None, None, None, None, None, None, None, None, None, None, None,
- None, None, None, None, None, None, None, None, None, None, None, None, None, None,
- None, None, None,
- ],
- prio_bitmap: 0,
- }
- }
-
- /// Add a task id by its priority to the queue
- pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
- let i: usize = prio.into().into();
- self.prio_bitmap |= (1 << i) as u64;
- if let Some(queue) = &mut self.queues[i] {
- queue.push_back(id);
- } else {
- let mut queue = VecDeque::new();
- queue.push_back(id);
- self.queues[i] = Some(queue);
- }
- }
-
- fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
- if let Some(queue) = &mut self.queues[queue_index] {
- let id = queue.pop_front();
-
- if queue.is_empty() {
- self.prio_bitmap &= !(1 << queue_index as u64);
- }
-
- id
- } else {
- None
- }
- }
-
- /// Pop the task handle with the highest priority from the queue
- pub fn pop(&mut self) -> Option<abi::Tid> {
- for i in 0..abi::NO_PRIORITIES {
- if self.prio_bitmap & (1 << i) != 0 {
- return self.pop_from_queue(i);
- }
- }
-
- None
- }
-}
-
-struct MutexInner {
- locked: bool,
- blocked_task: PriorityQueue,
-}
-
-impl MutexInner {
- pub const fn new() -> MutexInner {
- MutexInner { locked: false, blocked_task: PriorityQueue::new() }
- }
-}
-
-pub struct Mutex {
- inner: Spinlock<MutexInner>,
-}
-
-pub type MovableMutex = Mutex;
-
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
-
-impl Mutex {
- pub const fn new() -> Mutex {
- Mutex { inner: Spinlock::new(MutexInner::new()) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn lock(&self) {
- loop {
- let mut guard = self.inner.lock();
- if guard.locked == false {
- guard.locked = true;
- return;
- } else {
- let prio = abi::get_priority();
- let id = abi::getpid();
-
- guard.blocked_task.push(prio, id);
- abi::block_current_task();
- drop(guard);
- abi::yield_now();
- }
- }
- }
-
- #[inline]
- pub unsafe fn unlock(&self) {
- let mut guard = self.inner.lock();
- guard.locked = false;
- if let Some(tid) = guard.blocked_task.pop() {
- abi::wakeup_task(tid);
- }
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- let mut guard = self.inner.lock();
- if guard.locked == false {
- guard.locked = true;
- }
- guard.locked
- }
-}
diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs
index 745476171..8a13879d8 100644
--- a/library/std/src/sys/hermit/net.rs
+++ b/library/std/src/sys/hermit/net.rs
@@ -487,6 +487,4 @@ pub mod netc {
#[derive(Copy, Clone)]
pub struct sockaddr {}
-
- pub type socklen_t = usize;
}
diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs
deleted file mode 100644
index 9701bab1f..000000000
--- a/library/std/src/sys/hermit/rwlock.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-use crate::cell::UnsafeCell;
-use crate::sys::locks::{MovableCondvar, Mutex};
-use crate::sys_common::lazy_box::{LazyBox, LazyInit};
-
-pub struct RwLock {
- lock: Mutex,
- cond: MovableCondvar,
- state: UnsafeCell<State>,
-}
-
-pub type MovableRwLock = RwLock;
-
-enum State {
- Unlocked,
- Reading(usize),
- Writing,
-}
-
-unsafe impl Send for RwLock {}
-unsafe impl Sync for RwLock {}
-
-// This rwlock implementation is a relatively simple implementation which has a
-// condition variable for readers/writers as well as a mutex protecting the
-// internal state of the lock. A current downside of the implementation is that
-// unlocking the lock will notify *all* waiters rather than just readers or just
-// writers. This can cause lots of "thundering stampede" problems. While
-// hopefully correct this implementation is very likely to want to be changed in
-// the future.
-
-impl RwLock {
- pub const fn new() -> RwLock {
- RwLock {
- lock: Mutex::new(),
- cond: MovableCondvar::new(),
- state: UnsafeCell::new(State::Unlocked),
- }
- }
-
- #[inline]
- pub unsafe fn read(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_readers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_readers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn write(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_writers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_writers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn read_unlock(&self) {
- self.lock.lock();
- let notify = (*self.state.get()).dec_readers();
- self.lock.unlock();
- if notify {
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
- }
-
- #[inline]
- pub unsafe fn write_unlock(&self) {
- self.lock.lock();
- (*self.state.get()).dec_writers();
- self.lock.unlock();
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
-}
-
-impl State {
- fn inc_readers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Reading(1);
- true
- }
- State::Reading(ref mut cnt) => {
- *cnt += 1;
- true
- }
- State::Writing => false,
- }
- }
-
- fn inc_writers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Writing;
- true
- }
- State::Reading(_) | State::Writing => false,
- }
- }
-
- fn dec_readers(&mut self) -> bool {
- let zero = match *self {
- State::Reading(ref mut cnt) => {
- *cnt -= 1;
- *cnt == 0
- }
- State::Unlocked | State::Writing => invalid(),
- };
- if zero {
- *self = State::Unlocked;
- }
- zero
- }
-
- fn dec_writers(&mut self) {
- match *self {
- State::Writing => {}
- State::Unlocked | State::Reading(_) => invalid(),
- }
- *self = State::Unlocked;
- }
-}
-
-fn invalid() -> ! {
- panic!("inconsistent rwlock");
-}
diff --git a/library/std/src/sys/itron/mutex.rs b/library/std/src/sys/itron/mutex.rs
index 715e94c3b..085662e6d 100644
--- a/library/std/src/sys/itron/mutex.rs
+++ b/library/std/src/sys/itron/mutex.rs
@@ -31,12 +31,6 @@ impl Mutex {
Mutex { mtx: SpinIdOnceCell::new() }
}
- pub unsafe fn init(&mut self) {
- // Initialize `self.mtx` eagerly
- let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
- unsafe { self.mtx.set_unchecked((id, ())) };
- }
-
/// Get the inner mutex's ID, which is lazily created.
fn raw(&self) -> abi::ID {
match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {
diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs
index 167c918c9..c080c176a 100644
--- a/library/std/src/sys/mod.rs
+++ b/library/std/src/sys/mod.rs
@@ -22,7 +22,7 @@
#![allow(missing_debug_implementations)]
-mod common;
+pub mod common;
cfg_if::cfg_if! {
if #[cfg(unix)] {
diff --git a/library/std/src/sys/sgx/abi/thread.rs b/library/std/src/sys/sgx/abi/thread.rs
index ef55b821a..2b23e368c 100644
--- a/library/std/src/sys/sgx/abi/thread.rs
+++ b/library/std/src/sys/sgx/abi/thread.rs
@@ -7,7 +7,11 @@ use fortanix_sgx_abi::Tcs;
#[unstable(feature = "sgx_platform", issue = "56975")]
pub fn current() -> Tcs {
extern "C" {
- fn get_tcs_addr() -> Tcs;
+ fn get_tcs_addr() -> *mut u8;
+ }
+ let addr = unsafe { get_tcs_addr() };
+ match Tcs::new(addr) {
+ Some(tcs) => tcs,
+ None => rtabort!("TCS must not be placed at address zero (this is a linker error)"),
}
- unsafe { get_tcs_addr() }
}
diff --git a/library/std/src/sys/sgx/abi/tls/mod.rs b/library/std/src/sys/sgx/abi/tls/mod.rs
index 13d96e9a6..09c4ab3d3 100644
--- a/library/std/src/sys/sgx/abi/tls/mod.rs
+++ b/library/std/src/sys/sgx/abi/tls/mod.rs
@@ -111,6 +111,7 @@ impl Tls {
rtabort!("TLS limit exceeded")
};
TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
+ unsafe { Self::current() }.data[index].set(ptr::null_mut());
Key::from_index(index)
}
diff --git a/library/std/src/sys/sgx/abi/usercalls/alloc.rs b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
index ea24fedd0..0d934318c 100644
--- a/library/std/src/sys/sgx/abi/usercalls/alloc.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/alloc.rs
@@ -56,6 +56,8 @@ unsafe impl UserSafeSized for Usercall {}
#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl UserSafeSized for Return {}
#[unstable(feature = "sgx_platform", issue = "56975")]
+unsafe impl UserSafeSized for Cancel {}
+#[unstable(feature = "sgx_platform", issue = "56975")]
unsafe impl<T: UserSafeSized> UserSafeSized for [T; 2] {}
/// A type that can be represented in memory as one or more `UserSafeSized`s.
@@ -115,7 +117,7 @@ pub unsafe trait UserSafe {
/// * the pointer is null.
/// * the pointed-to range is not in user memory.
unsafe fn check_ptr(ptr: *const Self) {
- let is_aligned = |p| -> bool { 0 == (p as usize) & (Self::align_of() - 1) };
+ let is_aligned = |p: *const u8| -> bool { p.is_aligned_to(Self::align_of()) };
assert!(is_aligned(ptr as *const u8));
assert!(is_user_range(ptr as _, mem::size_of_val(unsafe { &*ptr })));
@@ -305,6 +307,34 @@ where
}
}
+// Split a memory region ptr..ptr + len into three parts:
+// +--------+
+// | small0 | Chunk smaller than 8 bytes
+// +--------+
+// | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
+// +--------+
+// | small1 | Chunk smaller than 8 bytes
+// +--------+
+fn region_as_aligned_chunks(ptr: *const u8, len: usize) -> (usize, usize, usize) {
+ let small0_size = if ptr.is_aligned_to(8) { 0 } else { 8 - ptr.addr() % 8 };
+ let small1_size = (len - small0_size) % 8;
+ let big_size = len - small0_size - small1_size;
+
+ (small0_size, big_size, small1_size)
+}
+
+unsafe fn copy_quadwords(src: *const u8, dst: *mut u8, len: usize) {
+ unsafe {
+ asm!(
+ "rep movsq (%rsi), (%rdi)",
+ inout("rcx") len / 8 => _,
+ inout("rdi") dst => _,
+ inout("rsi") src => _,
+ options(att_syntax, nostack, preserves_flags)
+ );
+ }
+}
+
/// Copies `len` bytes of data from enclave pointer `src` to userspace `dst`
///
/// This function mitigates stale data vulnerabilities by ensuring all writes to untrusted memory are either:
@@ -334,8 +364,8 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
mfence
lfence
",
- val = in(reg_byte) *src.offset(off as isize),
- dst = in(reg) dst.offset(off as isize),
+ val = in(reg_byte) *src.add(off),
+ dst = in(reg) dst.add(off),
seg_sel = in(reg) &mut seg_sel,
options(nostack, att_syntax)
);
@@ -343,34 +373,23 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
}
}
- unsafe fn copy_aligned_quadwords_to_userspace(src: *const u8, dst: *mut u8, len: usize) {
- unsafe {
- asm!(
- "rep movsq (%rsi), (%rdi)",
- inout("rcx") len / 8 => _,
- inout("rdi") dst => _,
- inout("rsi") src => _,
- options(att_syntax, nostack, preserves_flags)
- );
- }
- }
assert!(!src.is_null());
assert!(!dst.is_null());
assert!(is_enclave_range(src, len));
assert!(is_user_range(dst, len));
assert!(len < isize::MAX as usize);
- assert!(!(src as usize).overflowing_add(len).1);
- assert!(!(dst as usize).overflowing_add(len).1);
+ assert!(!src.addr().overflowing_add(len).1);
+ assert!(!dst.addr().overflowing_add(len).1);
if len < 8 {
// Can't align on 8 byte boundary: copy safely byte per byte
unsafe {
copy_bytewise_to_userspace(src, dst, len);
}
- } else if len % 8 == 0 && dst as usize % 8 == 0 {
+ } else if len % 8 == 0 && dst.is_aligned_to(8) {
// Copying 8-byte aligned quadwords: copy quad word per quad word
unsafe {
- copy_aligned_quadwords_to_userspace(src, dst, len);
+ copy_quadwords(src, dst, len);
}
} else {
// Split copies into three parts:
@@ -381,25 +400,121 @@ pub(crate) unsafe fn copy_to_userspace(src: *const u8, dst: *mut u8, len: usize)
// +--------+
// | small1 | Chunk smaller than 8 bytes
// +--------+
+ let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
unsafe {
// Copy small0
- let small0_size = (8 - dst as usize % 8) as u8;
- let small0_src = src;
- let small0_dst = dst;
- copy_bytewise_to_userspace(small0_src as _, small0_dst, small0_size as _);
+ copy_bytewise_to_userspace(src, dst, small0_size);
// Copy big
- let small1_size = ((len - small0_size as usize) % 8) as u8;
- let big_size = len - small0_size as usize - small1_size as usize;
- let big_src = src.offset(small0_size as _);
- let big_dst = dst.offset(small0_size as _);
- copy_aligned_quadwords_to_userspace(big_src as _, big_dst, big_size);
+ let big_src = src.add(small0_size);
+ let big_dst = dst.add(small0_size);
+ copy_quadwords(big_src, big_dst, big_size);
// Copy small1
- let small1_src = src.offset(big_size as isize + small0_size as isize);
- let small1_dst = dst.offset(big_size as isize + small0_size as isize);
- copy_bytewise_to_userspace(small1_src, small1_dst, small1_size as _);
+ let small1_src = src.add(big_size + small0_size);
+ let small1_dst = dst.add(big_size + small0_size);
+ copy_bytewise_to_userspace(small1_src, small1_dst, small1_size);
+ }
+ }
+}
+
+/// Copies `len` bytes of data from userspace pointer `src` to enclave pointer `dst`
+///
+/// This function mitigates AEPIC leak vulnerabilities by ensuring all reads from untrusted memory are 8-byte aligned
+///
+/// # Panics
+/// This function panics if:
+///
+/// * The `src` pointer is null
+/// * The `dst` pointer is null
+/// * The `src` memory range is not in user memory
+/// * The `dst` memory range is not in enclave memory
+///
+/// # References
+/// - https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00657.html
+/// - https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/stale-data-read-from-xapic.html
+pub(crate) unsafe fn copy_from_userspace(src: *const u8, dst: *mut u8, len: usize) {
+ // Copies memory region `src..src + len` to the enclave at `dst`. The source memory region
+ // is:
+ // - strictly less than 8 bytes in size and may be
+ // - located at a misaligned memory location
+ fn copy_misaligned_chunk_to_enclave(src: *const u8, dst: *mut u8, len: usize) {
+ let mut tmp_buff = [0u8; 16];
+
+ unsafe {
+ // Compute an aligned memory region to read from
+ // +--------+ <-- aligned_src + aligned_len (8B-aligned)
+ // | pad1 |
+ // +--------+ <-- src + len (misaligned)
+ // | |
+ // | |
+ // | |
+ // +--------+ <-- src (misaligned)
+ // | pad0 |
+ // +--------+ <-- aligned_src (8B-aligned)
+ let pad0_size = src as usize % 8;
+ let aligned_src = src.sub(pad0_size);
+
+ let pad1_size = 8 - (src.add(len) as usize % 8);
+ let aligned_len = pad0_size + len + pad1_size;
+
+ debug_assert!(len < 8);
+ debug_assert_eq!(aligned_src as usize % 8, 0);
+ debug_assert_eq!(aligned_len % 8, 0);
+ debug_assert!(aligned_len <= 16);
+
+ // Copy the aligned buffer to a temporary buffer
+ // Note: copying from a slightly different memory location is a bit odd. In this case it
+ // can't lead to page faults or inadvertent copying from the enclave as we only ensured
+ // that the `src` pointer is aligned at an 8 byte boundary. As pages are 4096 bytes
+ // aligned, `aligned_src` must be on the same page as `src`. A similar argument can be made
+ // for `src + len`
+ copy_quadwords(aligned_src as _, tmp_buff.as_mut_ptr(), aligned_len);
+
+ // Copy the correct parts of the temporary buffer to the destination
+ ptr::copy(tmp_buff.as_ptr().add(pad0_size), dst, len);
+ }
+ }
+
+ assert!(!src.is_null());
+ assert!(!dst.is_null());
+ assert!(is_user_range(src, len));
+ assert!(is_enclave_range(dst, len));
+ assert!(!(src as usize).overflowing_add(len + 8).1);
+ assert!(!(dst as usize).overflowing_add(len + 8).1);
+
+ if len < 8 {
+ copy_misaligned_chunk_to_enclave(src, dst, len);
+ } else if len % 8 == 0 && src as usize % 8 == 0 {
+ // Copying 8-byte aligned quadwords: copy quad word per quad word
+ unsafe {
+ copy_quadwords(src, dst, len);
+ }
+ } else {
+ // Split copies into three parts:
+ // +--------+
+ // | small0 | Chunk smaller than 8 bytes
+ // +--------+
+ // | big | Chunk 8-byte aligned, and size a multiple of 8 bytes
+ // +--------+
+ // | small1 | Chunk smaller than 8 bytes
+ // +--------+
+ let (small0_size, big_size, small1_size) = region_as_aligned_chunks(dst, len);
+
+ unsafe {
+ // Copy small0
+ copy_misaligned_chunk_to_enclave(src, dst, small0_size);
+
+ // Copy big
+ let big_src = src.add(small0_size);
+ let big_dst = dst.add(small0_size);
+ copy_quadwords(big_src, big_dst, big_size);
+
+ // Copy small1
+ let small1_src = src.add(big_size + small0_size);
+ let small1_dst = dst.add(big_size + small0_size);
+ copy_misaligned_chunk_to_enclave(small1_src, small1_dst, small1_size);
}
}
}
@@ -468,7 +583,7 @@ where
pub fn copy_to_enclave(&self, dest: &mut T) {
unsafe {
assert_eq!(mem::size_of_val(dest), mem::size_of_val(&*self.0.get()));
- ptr::copy(
+ copy_from_userspace(
self.0.get() as *const T as *const u8,
dest as *mut T as *mut u8,
mem::size_of_val(dest),
@@ -494,7 +609,11 @@ where
{
/// Copies the value from user memory into enclave memory.
pub fn to_enclave(&self) -> T {
- unsafe { ptr::read(self.0.get()) }
+ unsafe {
+ let mut data: T = mem::MaybeUninit::uninit().assume_init();
+ copy_from_userspace(self.0.get() as _, &mut data as *mut T as _, mem::size_of::<T>());
+ data
+ }
}
}
diff --git a/library/std/src/sys/sgx/abi/usercalls/mod.rs b/library/std/src/sys/sgx/abi/usercalls/mod.rs
index 79d1db5e1..e19e84326 100644
--- a/library/std/src/sys/sgx/abi/usercalls/mod.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/mod.rs
@@ -292,12 +292,17 @@ fn check_os_error(err: Result) -> i32 {
}
}
-trait FromSgxResult {
+/// Translate the raw result of an SGX usercall.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait FromSgxResult {
+ /// Return type
type Return;
+ /// Translate the raw result of an SGX usercall.
fn from_sgx_result(self) -> IoResult<Self::Return>;
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T> FromSgxResult for (Result, T) {
type Return = T;
@@ -310,6 +315,7 @@ impl<T> FromSgxResult for (Result, T) {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl FromSgxResult for Result {
type Return = ();
diff --git a/library/std/src/sys/sgx/abi/usercalls/raw.rs b/library/std/src/sys/sgx/abi/usercalls/raw.rs
index 4267b96cc..10c1456d4 100644
--- a/library/std/src/sys/sgx/abi/usercalls/raw.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/raw.rs
@@ -37,14 +37,23 @@ pub unsafe fn do_usercall(
(a, b)
}
-type Register = u64;
+/// A value passed or returned in a CPU register.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub type Register = u64;
-trait RegisterArgument {
+/// Translate a type from/to Register to be used as an argument.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait RegisterArgument {
+ /// Translate a Register to Self.
fn from_register(_: Register) -> Self;
+ /// Translate self to a Register.
fn into_register(self) -> Register;
}
-trait ReturnValue {
+/// Translate a pair of Registers to the raw usercall return value.
+#[unstable(feature = "sgx_platform", issue = "56975")]
+pub trait ReturnValue {
+ /// Translate a pair of Registers to the raw usercall return value.
fn from_registers(call: &'static str, regs: (Register, Register)) -> Self;
}
@@ -68,6 +77,7 @@ macro_rules! define_usercalls {
macro_rules! define_ra {
(< $i:ident > $t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl<$i> RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as _
@@ -78,6 +88,7 @@ macro_rules! define_ra {
}
};
($i:ty as $t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as $i as _
@@ -88,6 +99,7 @@ macro_rules! define_ra {
}
};
($t:ty) => {
+ #[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for $t {
fn from_register(a: Register) -> Self {
a as _
@@ -112,6 +124,7 @@ define_ra!(usize as isize);
define_ra!(<T> *const T);
define_ra!(<T> *mut T);
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl RegisterArgument for bool {
fn from_register(a: Register) -> bool {
if a != 0 { true } else { false }
@@ -121,6 +134,7 @@ impl RegisterArgument for bool {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
fn from_register(a: Register) -> Option<NonNull<T>> {
NonNull::new(a as _)
@@ -130,12 +144,14 @@ impl<T: RegisterArgument> RegisterArgument for Option<NonNull<T>> {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl ReturnValue for ! {
fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self {
rtabort!("Usercall {call}: did not expect to be re-entered");
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl ReturnValue for () {
fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
rtassert!(usercall_retval.0 == 0);
@@ -144,6 +160,7 @@ impl ReturnValue for () {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument> ReturnValue for T {
fn from_registers(call: &'static str, usercall_retval: (Register, Register)) -> Self {
rtassert!(usercall_retval.1 == 0);
@@ -151,6 +168,7 @@ impl<T: RegisterArgument> ReturnValue for T {
}
}
+#[unstable(feature = "sgx_platform", issue = "56975")]
impl<T: RegisterArgument, U: RegisterArgument> ReturnValue for (T, U) {
fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self {
(T::from_register(regs.0), U::from_register(regs.1))
diff --git a/library/std/src/sys/sgx/abi/usercalls/tests.rs b/library/std/src/sys/sgx/abi/usercalls/tests.rs
index cbf7d7d54..58b8eb215 100644
--- a/library/std/src/sys/sgx/abi/usercalls/tests.rs
+++ b/library/std/src/sys/sgx/abi/usercalls/tests.rs
@@ -1,8 +1,8 @@
-use super::alloc::copy_to_userspace;
use super::alloc::User;
+use super::alloc::{copy_from_userspace, copy_to_userspace};
#[test]
-fn test_copy_function() {
+fn test_copy_to_userspace_function() {
let mut src = [0u8; 100];
let mut dst = User::<[u8]>::uninitialized(100);
@@ -17,12 +17,38 @@ fn test_copy_function() {
dst.copy_from_enclave(&[0u8; 100]);
// Copy src[0..size] to dst + offset
- unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().offset(offset), size) };
+ unsafe { copy_to_userspace(src.as_ptr(), dst.as_mut_ptr().add(offset), size) };
// Verify copy
for byte in 0..size {
unsafe {
- assert_eq!(*dst.as_ptr().offset(offset + byte as isize), src[byte as usize]);
+ assert_eq!(*dst.as_ptr().add(offset + byte), src[byte as usize]);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_copy_from_userspace_function() {
+ let mut dst = [0u8; 100];
+ let mut src = User::<[u8]>::uninitialized(100);
+
+ src.copy_from_enclave(&[0u8; 100]);
+
+ for size in 0..48 {
+ // For all possible alignment
+ for offset in 0..8 {
+ // overwrite complete dst
+ dst = [0u8; 100];
+
+ // Copy src[0..size] to dst + offset
+ unsafe { copy_from_userspace(src.as_ptr().offset(offset), dst.as_mut_ptr(), size) };
+
+ // Verify copy
+ for byte in 0..size {
+ unsafe {
+ assert_eq!(dst[byte as usize], *src.as_ptr().offset(offset + byte as isize));
}
}
}
diff --git a/library/std/src/sys/sgx/mod.rs b/library/std/src/sys/sgx/mod.rs
index 696400670..b1d32929e 100644
--- a/library/std/src/sys/sgx/mod.rs
+++ b/library/std/src/sys/sgx/mod.rs
@@ -47,7 +47,7 @@ pub mod locks {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+pub unsafe fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {
unsafe {
args::init(argc, argv);
}
diff --git a/library/std/src/sys/sgx/mutex.rs b/library/std/src/sys/sgx/mutex.rs
index 513cd77fd..aa747d56b 100644
--- a/library/std/src/sys/sgx/mutex.rs
+++ b/library/std/src/sys/sgx/mutex.rs
@@ -21,9 +21,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn lock(&self) {
let mut guard = self.inner.lock();
if *guard.lock_var() {
diff --git a/library/std/src/sys/sgx/thread_local_key.rs b/library/std/src/sys/sgx/thread_local_key.rs
index b21784475..c7a57d3a3 100644
--- a/library/std/src/sys/sgx/thread_local_key.rs
+++ b/library/std/src/sys/sgx/thread_local_key.rs
@@ -21,8 +21,3 @@ pub unsafe fn get(key: Key) -> *mut u8 {
pub unsafe fn destroy(key: Key) {
Tls::destroy(AbiKey::from_usize(key))
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- false
-}
diff --git a/library/std/src/sys/solid/fs.rs b/library/std/src/sys/solid/fs.rs
index a2cbee4dc..6c66b93a3 100644
--- a/library/std/src/sys/solid/fs.rs
+++ b/library/std/src/sys/solid/fs.rs
@@ -2,7 +2,7 @@ use super::{abi, error};
use crate::{
ffi::{CStr, CString, OsStr, OsString},
fmt,
- io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom},
+ io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom},
mem::MaybeUninit,
os::raw::{c_int, c_short},
os::solid::ffi::OsStrExt,
@@ -77,6 +77,9 @@ pub struct OpenOptions {
custom_flags: i32,
}
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {}
+
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FilePermissions(c_short);
@@ -126,6 +129,11 @@ impl FilePermissions {
}
}
+impl FileTimes {
+ pub fn set_accessed(&mut self, _t: SystemTime) {}
+ pub fn set_modified(&mut self, _t: SystemTime) {}
+}
+
impl FileType {
pub fn is_dir(&self) -> bool {
self.is(abi::S_IFDIR)
@@ -167,15 +175,19 @@ impl Iterator for ReadDir {
type Item = io::Result<DirEntry>;
fn next(&mut self) -> Option<io::Result<DirEntry>> {
- unsafe {
- let mut out_dirent = MaybeUninit::uninit();
- error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir(
+ let entry = unsafe {
+ let mut out_entry = MaybeUninit::uninit();
+ match error::SolidError::err_if_negative(abi::SOLID_FS_ReadDir(
self.inner.dirp,
- out_dirent.as_mut_ptr(),
- ))
- .ok()?;
- Some(Ok(DirEntry { entry: out_dirent.assume_init(), inner: Arc::clone(&self.inner) }))
- }
+ out_entry.as_mut_ptr(),
+ )) {
+ Ok(_) => out_entry.assume_init(),
+ Err(e) if e.as_raw() == abi::SOLID_ERR_NOTFOUND => return None,
+ Err(e) => return Some(Err(e.as_io_error())),
+ }
+ };
+
+ (entry.d_name[0] != 0).then(|| Ok(DirEntry { entry, inner: Arc::clone(&self.inner) }))
}
}
@@ -358,13 +370,13 @@ impl File {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
unsafe {
- let len = buf.remaining();
+ let len = cursor.capacity();
let mut out_num_bytes = MaybeUninit::uninit();
error::SolidError::err_if_negative(abi::SOLID_FS_Read(
self.fd.raw(),
- buf.unfilled_mut().as_mut_ptr() as *mut u8,
+ cursor.as_mut().as_mut_ptr() as *mut u8,
len,
out_num_bytes.as_mut_ptr(),
))
@@ -376,9 +388,7 @@ impl File {
// Safety: `num_bytes_read` bytes were written to the unfilled
// portion of the buffer
- buf.assume_init(num_bytes_read);
-
- buf.add_filled(num_bytes_read);
+ cursor.advance(num_bytes_read);
Ok(())
}
@@ -452,6 +462,10 @@ impl File {
pub fn set_permissions(&self, _perm: FilePermissions) -> io::Result<()> {
unsupported()
}
+
+ pub fn set_times(&self, _times: FileTimes) -> io::Result<()> {
+ unsupported()
+ }
}
impl Drop for File {
diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs
index 778a589d1..5867979a2 100644
--- a/library/std/src/sys/solid/mod.rs
+++ b/library/std/src/sys/solid/mod.rs
@@ -56,7 +56,7 @@ pub mod locks {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {}
// SAFETY: must be called only once during runtime cleanup.
pub unsafe fn cleanup() {}
diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs
index b5649d6e0..4906c6268 100644
--- a/library/std/src/sys/solid/os.rs
+++ b/library/std/src/sys/solid/os.rs
@@ -1,4 +1,5 @@
use super::unsupported;
+use crate::convert::TryFrom;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
@@ -8,7 +9,8 @@ use crate::os::{
solid::ffi::{OsStrExt, OsStringExt},
};
use crate::path::{self, PathBuf};
-use crate::sys_common::rwlock::StaticRwLock;
+use crate::sync::RwLock;
+use crate::sys::common::small_c_string::run_with_cstr;
use crate::vec;
use super::{error, itron, memchr};
@@ -78,7 +80,7 @@ pub fn current_exe() -> io::Result<PathBuf> {
unsupported()
}
-static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+static ENV_LOCK: RwLock<()> = RwLock::new(());
pub struct Env {
iter: vec::IntoIter<(OsString, OsString)>,
@@ -139,35 +141,33 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
+ let s = run_with_cstr(k.as_bytes(), |k| {
let _guard = ENV_LOCK.read();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
+ })
+ .ok()?;
+
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = ENV_LOCK.write();
- cvt_env(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| {
+ let _guard = ENV_LOCK.write();
+ cvt_env(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
+ run_with_cstr(n.as_bytes(), |nbuf| {
let _guard = ENV_LOCK.write();
- cvt_env(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ cvt_env(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop)
+ })
}
/// In kmclib, `setenv` and `unsetenv` don't always set `errno`, so this
diff --git a/library/std/src/sys/solid/thread_local_key.rs b/library/std/src/sys/solid/thread_local_key.rs
index b17521f70..b37bf9996 100644
--- a/library/std/src/sys/solid/thread_local_key.rs
+++ b/library/std/src/sys/solid/thread_local_key.rs
@@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 {
pub unsafe fn destroy(_key: Key) {
panic!("should not be used on the solid target");
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- panic!("should not be used on the solid target");
-}
diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs
index 30812dabb..dbaa3c33e 100644
--- a/library/std/src/sys/unix/fd.rs
+++ b/library/std/src/sys/unix/fd.rs
@@ -4,7 +4,7 @@
mod tests;
use crate::cmp;
-use crate::io::{self, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, Read};
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
use crate::sys::cvt;
use crate::sys_common::{AsInner, FromInner, IntoInner};
@@ -131,20 +131,19 @@ impl FileDesc {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let ret = cvt(unsafe {
libc::read(
self.as_raw_fd(),
- buf.unfilled_mut().as_mut_ptr() as *mut libc::c_void,
- cmp::min(buf.remaining(), READ_LIMIT),
+ cursor.as_mut().as_mut_ptr() as *mut libc::c_void,
+ cmp::min(cursor.capacity(), READ_LIMIT),
)
})?;
// Safety: `ret` bytes were written to the initialized portion of the buffer
unsafe {
- buf.assume_init(ret as usize);
+ cursor.advance(ret as usize);
}
- buf.add_filled(ret as usize);
Ok(())
}
diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs
index b5cc8038c..37a49f2d7 100644
--- a/library/std/src/sys/unix/fs.rs
+++ b/library/std/src/sys/unix/fs.rs
@@ -1,13 +1,26 @@
+// miri has some special hacks here that make things unused.
+#![cfg_attr(miri, allow(unused))]
+
use crate::os::unix::prelude::*;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
-use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
use crate::mem;
+#[cfg(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "fuchsia",
+ target_os = "redox",
+ target_os = "illumos"
+))]
+use crate::mem::MaybeUninit;
use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd};
use crate::path::{Path, PathBuf};
use crate::ptr;
use crate::sync::Arc;
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::fd::FileDesc;
use crate::sys::time::SystemTime;
use crate::sys::{cvt, cvt_r};
@@ -260,7 +273,7 @@ pub struct DirEntry {
// We need to store an owned copy of the entry name on platforms that use
// readdir() (not readdir_r()), because a) struct dirent may use a flexible
// array to store the name, b) it lives only until the next readdir() call.
- name: CString,
+ name: crate::ffi::CString,
}
// Define a minimal subset of fields we need from `dirent64`, especially since
@@ -313,8 +326,11 @@ pub struct FilePermissions {
mode: mode_t,
}
-#[derive(Copy, Clone)]
-pub struct FileTimes([libc::timespec; 2]);
+#[derive(Copy, Clone, Debug, Default)]
+pub struct FileTimes {
+ accessed: Option<SystemTime>,
+ modified: Option<SystemTime>,
+}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct FileType {
@@ -512,45 +528,11 @@ impl FilePermissions {
impl FileTimes {
pub fn set_accessed(&mut self, t: SystemTime) {
- self.0[0] = t.t.to_timespec().expect("Invalid system time");
+ self.accessed = Some(t);
}
pub fn set_modified(&mut self, t: SystemTime) {
- self.0[1] = t.t.to_timespec().expect("Invalid system time");
- }
-}
-
-struct TimespecDebugAdapter<'a>(&'a libc::timespec);
-
-impl fmt::Debug for TimespecDebugAdapter<'_> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("timespec")
- .field("tv_sec", &self.0.tv_sec)
- .field("tv_nsec", &self.0.tv_nsec)
- .finish()
- }
-}
-
-impl fmt::Debug for FileTimes {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("FileTimes")
- .field("accessed", &TimespecDebugAdapter(&self.0[0]))
- .field("modified", &TimespecDebugAdapter(&self.0[1]))
- .finish()
- }
-}
-
-impl Default for FileTimes {
- fn default() -> Self {
- // Redox doesn't appear to support `UTIME_OMIT`, so we stub it out here, and always return
- // an error in `set_times`.
- // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
- // the same as for Redox.
- #[cfg(any(target_os = "redox", target_os = "espidf"))]
- let omit = libc::timespec { tv_sec: 0, tv_nsec: 0 };
- #[cfg(not(any(target_os = "redox", target_os = "espidf")))]
- let omit = libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ };
- Self([omit; 2])
+ self.modified = Some(t);
}
}
@@ -614,33 +596,69 @@ impl Iterator for ReadDir {
};
}
- // Only d_reclen bytes of *entry_ptr are valid, so we can't just copy the
- // whole thing (#93384). Instead, copy everything except the name.
- let mut copy: dirent64 = mem::zeroed();
- // Can't dereference entry_ptr, so use the local entry to get
- // offsetof(struct dirent, d_name)
- let copy_bytes = &mut copy as *mut _ as *mut u8;
- let copy_name = &mut copy.d_name as *mut _ as *mut u8;
- let name_offset = copy_name.offset_from(copy_bytes) as usize;
- let entry_bytes = entry_ptr as *const u8;
- let entry_name = entry_bytes.add(name_offset);
- ptr::copy_nonoverlapping(entry_bytes, copy_bytes, name_offset);
+ // The dirent64 struct is a weird imaginary thing that isn't ever supposed
+ // to be worked with by value. Its trailing d_name field is declared
+ // variously as [c_char; 256] or [c_char; 1] on different systems but
+ // either way that size is meaningless; only the offset of d_name is
+ // meaningful. The dirent64 pointers that libc returns from readdir64 are
+ // allowed to point to allocations smaller _or_ LARGER than implied by the
+ // definition of the struct.
+ //
+ // As such, we need to be even more careful with dirent64 than if its
+ // contents were "simply" partially initialized data.
+ //
+ // Like for uninitialized contents, converting entry_ptr to `&dirent64`
+ // would not be legal. However, unique to dirent64 is that we don't even
+ // get to use `addr_of!((*entry_ptr).d_name)` because that operation
+ // requires the full extent of *entry_ptr to be in bounds of the same
+ // allocation, which is not necessarily the case here.
+ //
+ // Absent any other way to obtain a pointer to `(*entry_ptr).d_name`
+ // legally in Rust analogously to how it would be done in C, we instead
+ // need to make our own non-libc allocation that conforms to the weird
+ // imaginary definition of dirent64, and use that for a field offset
+ // computation.
+ macro_rules! offset_ptr {
+ ($entry_ptr:expr, $field:ident) => {{
+ const OFFSET: isize = {
+ let delusion = MaybeUninit::<dirent64>::uninit();
+ let entry_ptr = delusion.as_ptr();
+ unsafe {
+ ptr::addr_of!((*entry_ptr).$field)
+ .cast::<u8>()
+ .offset_from(entry_ptr.cast::<u8>())
+ }
+ };
+ if true {
+ // Cast to the same type determined by the else branch.
+ $entry_ptr.byte_offset(OFFSET).cast::<_>()
+ } else {
+ #[allow(deref_nullptr)]
+ {
+ ptr::addr_of!((*ptr::null::<dirent64>()).$field)
+ }
+ }
+ }};
+ }
+
+ // d_name is guaranteed to be null-terminated.
+ let name = CStr::from_ptr(offset_ptr!(entry_ptr, d_name).cast());
+ let name_bytes = name.to_bytes();
+ if name_bytes == b"." || name_bytes == b".." {
+ continue;
+ }
let entry = dirent64_min {
- d_ino: copy.d_ino as u64,
+ d_ino: *offset_ptr!(entry_ptr, d_ino) as u64,
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
- d_type: copy.d_type as u8,
+ d_type: *offset_ptr!(entry_ptr, d_type) as u8,
};
- let ret = DirEntry {
+ return Some(Ok(DirEntry {
entry,
- // d_name is guaranteed to be null-terminated.
- name: CStr::from_ptr(entry_name as *const _).to_owned(),
+ name: name.to_owned(),
dir: Arc::clone(&self.inner),
- };
- if ret.name_bytes() != b"." && ret.name_bytes() != b".." {
- return Some(Ok(ret));
- }
+ }));
}
}
}
@@ -687,7 +705,11 @@ impl Iterator for ReadDir {
impl Drop for Dir {
fn drop(&mut self) {
let r = unsafe { libc::closedir(self.0) };
- debug_assert_eq!(r, 0);
+ assert!(
+ r == 0 || crate::io::Error::last_os_error().kind() == crate::io::ErrorKind::Interrupted,
+ "unexpected error during closedir: {:?}",
+ crate::io::Error::last_os_error()
+ );
}
}
@@ -700,7 +722,10 @@ impl DirEntry {
self.file_name_os_str().to_os_string()
}
- #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "android"))]
+ #[cfg(all(
+ any(target_os = "linux", target_os = "emscripten", target_os = "android"),
+ not(miri)
+ ))]
pub fn metadata(&self) -> io::Result<FileAttr> {
let fd = cvt(unsafe { dirfd(self.dir.dirp.0) })?;
let name = self.name_cstr().as_ptr();
@@ -721,7 +746,10 @@ impl DirEntry {
Ok(FileAttr::from_stat64(stat))
}
- #[cfg(not(any(target_os = "linux", target_os = "emscripten", target_os = "android")))]
+ #[cfg(any(
+ not(any(target_os = "linux", target_os = "emscripten", target_os = "android")),
+ miri
+ ))]
pub fn metadata(&self) -> io::Result<FileAttr> {
lstat(&self.path())
}
@@ -925,8 +953,7 @@ impl OpenOptions {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
- let path = cstr(path)?;
- File::open_c(&path, opts)
+ run_path_with_cstr(path, |path| File::open_c(path, opts))
}
pub fn open_c(path: &CStr, opts: &OpenOptions) -> io::Result<File> {
@@ -1031,8 +1058,8 @@ impl File {
self.0.read_at(buf, offset)
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.0.read_buf(buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.0.read_buf(cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -1078,10 +1105,21 @@ impl File {
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))]
+ let to_timespec = |time: Option<SystemTime>| {
+ match time {
+ Some(time) if let Some(ts) = time.t.to_timespec() => Ok(ts),
+ Some(time) if time > crate::sys::time::UNIX_EPOCH => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
+ Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too small to set as a file time")),
+ None => Ok(libc::timespec { tv_sec: 0, tv_nsec: libc::UTIME_OMIT as _ }),
+ }
+ };
+ #[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon")))]
+ let times = [to_timespec(times.accessed)?, to_timespec(times.modified)?];
cfg_if::cfg_if! {
- if #[cfg(any(target_os = "redox", target_os = "espidf"))] {
+ if #[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon"))] {
// Redox doesn't appear to support `UTIME_OMIT`.
- // ESP-IDF does not support `futimens` at all and the behavior for that OS is therefore
+ // ESP-IDF and HorizonOS do not support `futimens` at all and the behavior for those OS is therefore
// the same as for Redox.
drop(times);
Err(io::const_io_error!(
@@ -1093,7 +1131,7 @@ impl File {
cvt(unsafe {
weak!(fn futimens(c_int, *const libc::timespec) -> c_int);
match futimens.get() {
- Some(futimens) => futimens(self.as_raw_fd(), times.0.as_ptr()),
+ Some(futimens) => futimens(self.as_raw_fd(), times.as_ptr()),
#[cfg(target_os = "macos")]
None => {
fn ts_to_tv(ts: &libc::timespec) -> libc::timeval {
@@ -1102,7 +1140,7 @@ impl File {
tv_usec: (ts.tv_nsec / 1000) as _
}
}
- let timevals = [ts_to_tv(&times.0[0]), ts_to_tv(&times.0[1])];
+ let timevals = [ts_to_tv(&times[0]), ts_to_tv(&times[1])];
libc::futimes(self.as_raw_fd(), timevals.as_ptr())
}
// futimes requires even newer Android.
@@ -1115,7 +1153,7 @@ impl File {
})?;
Ok(())
} else {
- cvt(unsafe { libc::futimens(self.as_raw_fd(), times.0.as_ptr()) })?;
+ cvt(unsafe { libc::futimens(self.as_raw_fd(), times.as_ptr()) })?;
Ok(())
}
}
@@ -1128,9 +1166,7 @@ impl DirBuilder {
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::mkdir(p.as_ptr(), self.mode) }).map(|_| ()))
}
pub fn set_mode(&mut self, mode: u32) {
@@ -1138,10 +1174,6 @@ impl DirBuilder {
}
}
-fn cstr(path: &Path) -> io::Result<CString> {
- Ok(CString::new(path.as_os_str().as_bytes())?)
-}
-
impl AsInner<FileDesc> for File {
fn as_inner(&self) -> &FileDesc {
&self.0
@@ -1192,7 +1224,12 @@ impl FromRawFd for File {
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- #[cfg(any(target_os = "linux", target_os = "netbsd"))]
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "netbsd",
+ target_os = "illumos",
+ target_os = "solaris"
+ ))]
fn get_path(fd: c_int) -> Option<PathBuf> {
let mut p = PathBuf::from("/proc/self/fd");
p.push(&fd.to_string());
@@ -1247,14 +1284,23 @@ impl fmt::Debug for File {
target_os = "macos",
target_os = "vxworks",
all(target_os = "freebsd", target_arch = "x86_64"),
- target_os = "netbsd"
+ target_os = "netbsd",
+ target_os = "illumos",
+ target_os = "solaris"
)))]
fn get_path(_fd: c_int) -> Option<PathBuf> {
// FIXME(#24570): implement this for other Unix platforms
None
}
- #[cfg(any(target_os = "linux", target_os = "macos", target_os = "vxworks"))]
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "vxworks"
+ ))]
fn get_mode(fd: c_int) -> Option<(bool, bool)> {
let mode = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if mode == -1 {
@@ -1268,7 +1314,14 @@ impl fmt::Debug for File {
}
}
- #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "vxworks")))]
+ #[cfg(not(any(
+ target_os = "linux",
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "vxworks"
+ )))]
fn get_mode(_fd: c_int) -> Option<(bool, bool)> {
// FIXME(#24570): implement this for other Unix platforms
None
@@ -1287,173 +1340,170 @@ impl fmt::Debug for File {
}
}
-pub fn readdir(p: &Path) -> io::Result<ReadDir> {
- let root = p.to_path_buf();
- let p = cstr(p)?;
- unsafe {
- let ptr = libc::opendir(p.as_ptr());
- if ptr.is_null() {
- Err(Error::last_os_error())
- } else {
- let inner = InnerReadDir { dirp: Dir(ptr), root };
- Ok(ReadDir {
- inner: Arc::new(inner),
- #[cfg(not(any(
- target_os = "android",
- target_os = "linux",
- target_os = "solaris",
- target_os = "illumos",
- target_os = "fuchsia",
- target_os = "redox",
- )))]
- end_of_stream: false,
- })
- }
+pub fn readdir(path: &Path) -> io::Result<ReadDir> {
+ let ptr = run_path_with_cstr(path, |p| unsafe { Ok(libc::opendir(p.as_ptr())) })?;
+ if ptr.is_null() {
+ Err(Error::last_os_error())
+ } else {
+ let root = path.to_path_buf();
+ let inner = InnerReadDir { dirp: Dir(ptr), root };
+ Ok(ReadDir {
+ inner: Arc::new(inner),
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "linux",
+ target_os = "solaris",
+ target_os = "illumos",
+ target_os = "fuchsia",
+ target_os = "redox",
+ )))]
+ end_of_stream: false,
+ })
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::unlink(p.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::unlink(p.as_ptr()) }).map(|_| ()))
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
- let old = cstr(old)?;
- let new = cstr(new)?;
- cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(old, |old| {
+ run_path_with_cstr(new, |new| {
+ cvt(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) }).map(|_| ())
+ })
+ })
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
- let p = cstr(p)?;
- cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt_r(|| unsafe { libc::chmod(p.as_ptr(), perm.mode) }).map(|_| ()))
}
pub fn rmdir(p: &Path) -> io::Result<()> {
- let p = cstr(p)?;
- cvt(unsafe { libc::rmdir(p.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(p, |p| cvt(unsafe { libc::rmdir(p.as_ptr()) }).map(|_| ()))
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
- let c_path = cstr(p)?;
- let p = c_path.as_ptr();
+ run_path_with_cstr(p, |c_path| {
+ let p = c_path.as_ptr();
- let mut buf = Vec::with_capacity(256);
+ let mut buf = Vec::with_capacity(256);
- loop {
- let buf_read =
- cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })? as usize;
+ loop {
+ let buf_read =
+ cvt(unsafe { libc::readlink(p, buf.as_mut_ptr() as *mut _, buf.capacity()) })?
+ as usize;
- unsafe {
- buf.set_len(buf_read);
- }
+ unsafe {
+ buf.set_len(buf_read);
+ }
- if buf_read != buf.capacity() {
- buf.shrink_to_fit();
+ if buf_read != buf.capacity() {
+ buf.shrink_to_fit();
- return Ok(PathBuf::from(OsString::from_vec(buf)));
- }
+ return Ok(PathBuf::from(OsString::from_vec(buf)));
+ }
- // Trigger the internal buffer resizing logic of `Vec` by requiring
- // more space than the current capacity. The length is guaranteed to be
- // the same as the capacity due to the if statement above.
- buf.reserve(1);
- }
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity. The length is guaranteed to be
+ // the same as the capacity due to the if statement above.
+ buf.reserve(1);
+ }
+ })
}
pub fn symlink(original: &Path, link: &Path) -> io::Result<()> {
- let original = cstr(original)?;
- let link = cstr(link)?;
- cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(original, |original| {
+ run_path_with_cstr(link, |link| {
+ cvt(unsafe { libc::symlink(original.as_ptr(), link.as_ptr()) }).map(|_| ())
+ })
+ })
}
pub fn link(original: &Path, link: &Path) -> io::Result<()> {
- let original = cstr(original)?;
- let link = cstr(link)?;
- cfg_if::cfg_if! {
- if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] {
- // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves
- // it implementation-defined whether `link` follows symlinks, so rely on the
- // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior.
- // Android has `linkat` on newer versions, but we happen to know `link`
- // always has the correct behavior, so it's here as well.
- cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
- } else if #[cfg(target_os = "macos")] {
- // On MacOS, older versions (<=10.9) lack support for linkat while newer
- // versions have it. We want to use linkat if it is available, so we use weak!
- // to check. `linkat` is preferable to `link` because it gives us a flag to
- // specify how symlinks should be handled. We pass 0 as the flags argument,
- // meaning it shouldn't follow symlinks.
- weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
-
- if let Some(f) = linkat.get() {
- cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
- } else {
- cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
- };
- } else {
- // Where we can, use `linkat` instead of `link`; see the comment above
- // this one for details on why.
- cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
- }
- }
- Ok(())
+ run_path_with_cstr(original, |original| {
+ run_path_with_cstr(link, |link| {
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "vxworks", target_os = "redox", target_os = "android", target_os = "espidf", target_os = "horizon"))] {
+ // VxWorks, Redox and ESP-IDF lack `linkat`, so use `link` instead. POSIX leaves
+ // it implementation-defined whether `link` follows symlinks, so rely on the
+ // `symlink_hard_link` test in library/std/src/fs/tests.rs to check the behavior.
+ // Android has `linkat` on newer versions, but we happen to know `link`
+ // always has the correct behavior, so it's here as well.
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ } else if #[cfg(target_os = "macos")] {
+ // On MacOS, older versions (<=10.9) lack support for linkat while newer
+ // versions have it. We want to use linkat if it is available, so we use weak!
+ // to check. `linkat` is preferable to `link` because it gives us a flag to
+ // specify how symlinks should be handled. We pass 0 as the flags argument,
+ // meaning it shouldn't follow symlinks.
+ weak!(fn linkat(c_int, *const c_char, c_int, *const c_char, c_int) -> c_int);
+
+ if let Some(f) = linkat.get() {
+ cvt(unsafe { f(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ } else {
+ cvt(unsafe { libc::link(original.as_ptr(), link.as_ptr()) })?;
+ };
+ } else {
+ // Where we can, use `linkat` instead of `link`; see the comment above
+ // this one for details on why.
+ cvt(unsafe { libc::linkat(libc::AT_FDCWD, original.as_ptr(), libc::AT_FDCWD, link.as_ptr(), 0) })?;
+ }
+ }
+ Ok(())
+ })
+ })
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
- let p = cstr(p)?;
-
- cfg_has_statx! {
- if let Some(ret) = unsafe { try_statx(
- libc::AT_FDCWD,
- p.as_ptr(),
- libc::AT_STATX_SYNC_AS_STAT,
- libc::STATX_ALL,
- ) } {
- return ret;
+ run_path_with_cstr(p, |p| {
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
}
- }
- let mut stat: stat64 = unsafe { mem::zeroed() };
- cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?;
- Ok(FileAttr::from_stat64(stat))
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { stat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+ })
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
- let p = cstr(p)?;
-
- cfg_has_statx! {
- if let Some(ret) = unsafe { try_statx(
- libc::AT_FDCWD,
- p.as_ptr(),
- libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
- libc::STATX_ALL,
- ) } {
- return ret;
+ run_path_with_cstr(p, |p| {
+ cfg_has_statx! {
+ if let Some(ret) = unsafe { try_statx(
+ libc::AT_FDCWD,
+ p.as_ptr(),
+ libc::AT_SYMLINK_NOFOLLOW | libc::AT_STATX_SYNC_AS_STAT,
+ libc::STATX_ALL,
+ ) } {
+ return ret;
+ }
}
- }
- let mut stat: stat64 = unsafe { mem::zeroed() };
- cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?;
- Ok(FileAttr::from_stat64(stat))
+ let mut stat: stat64 = unsafe { mem::zeroed() };
+ cvt(unsafe { lstat64(p.as_ptr(), &mut stat) })?;
+ Ok(FileAttr::from_stat64(stat))
+ })
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
- let path = CString::new(p.as_os_str().as_bytes())?;
- let buf;
- unsafe {
- let r = libc::realpath(path.as_ptr(), ptr::null_mut());
- if r.is_null() {
- return Err(io::Error::last_os_error());
- }
- buf = CStr::from_ptr(r).to_bytes().to_vec();
- libc::free(r as *mut _);
+ let r = run_path_with_cstr(p, |path| unsafe {
+ Ok(libc::realpath(path.as_ptr(), ptr::null_mut()))
+ })?;
+ if r.is_null() {
+ return Err(io::Error::last_os_error());
}
- Ok(PathBuf::from(OsString::from_vec(buf)))
+ Ok(PathBuf::from(OsString::from_vec(unsafe {
+ let buf = CStr::from_ptr(r).to_bytes().to_vec();
+ libc::free(r as *mut _);
+ buf
+ })))
}
fn open_from(from: &Path) -> io::Result<(crate::fs::File, crate::fs::Metadata)> {
@@ -1603,9 +1653,9 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
// Opportunistically attempt to create a copy-on-write clone of `from`
// using `fclonefileat`.
if HAS_FCLONEFILEAT.load(Ordering::Relaxed) {
- let to = cstr(to)?;
- let clonefile_result =
- cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) });
+ let clonefile_result = run_path_with_cstr(to, |to| {
+ cvt(unsafe { fclonefileat(reader.as_raw_fd(), libc::AT_FDCWD, to.as_ptr(), 0) })
+ });
match clonefile_result {
Ok(_) => return Ok(reader_metadata.len()),
Err(err) => match err.raw_os_error() {
@@ -1649,9 +1699,10 @@ pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
- let path = cstr(path)?;
- cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
- Ok(())
+ run_path_with_cstr(path, |path| {
+ cvt(unsafe { libc::chown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })
+ .map(|_| ())
+ })
}
pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> {
@@ -1660,16 +1711,15 @@ pub fn fchown(fd: c_int, uid: u32, gid: u32) -> io::Result<()> {
}
pub fn lchown(path: &Path, uid: u32, gid: u32) -> io::Result<()> {
- let path = cstr(path)?;
- cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })?;
- Ok(())
+ run_path_with_cstr(path, |path| {
+ cvt(unsafe { libc::lchown(path.as_ptr(), uid as libc::uid_t, gid as libc::gid_t) })
+ .map(|_| ())
+ })
}
#[cfg(not(any(target_os = "fuchsia", target_os = "vxworks")))]
pub fn chroot(dir: &Path) -> io::Result<()> {
- let dir = cstr(dir)?;
- cvt(unsafe { libc::chroot(dir.as_ptr()) })?;
- Ok(())
+ run_path_with_cstr(dir, |dir| cvt(unsafe { libc::chroot(dir.as_ptr()) }).map(|_| ()))
}
pub use remove_dir_impl::remove_dir_all;
@@ -1683,13 +1733,14 @@ mod remove_dir_impl {
// Modern implementation using openat(), unlinkat() and fdopendir()
#[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri)))]
mod remove_dir_impl {
- use super::{cstr, lstat, Dir, DirEntry, InnerReadDir, ReadDir};
+ use super::{lstat, Dir, DirEntry, InnerReadDir, ReadDir};
use crate::ffi::CStr;
use crate::io;
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use crate::os::unix::prelude::{OwnedFd, RawFd};
use crate::path::{Path, PathBuf};
use crate::sync::Arc;
+ use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::{cvt, cvt_r};
#[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))]
@@ -1856,7 +1907,7 @@ mod remove_dir_impl {
if attr.file_type().is_symlink() {
crate::fs::remove_file(p)
} else {
- remove_dir_all_recursive(None, &cstr(p)?)
+ run_path_with_cstr(p, |p| remove_dir_all_recursive(None, &p))
}
}
diff --git a/library/std/src/sys/unix/io.rs b/library/std/src/sys/unix/io.rs
index deb5ee76b..29c340dd3 100644
--- a/library/std/src/sys/unix/io.rs
+++ b/library/std/src/sys/unix/io.rs
@@ -1,4 +1,5 @@
use crate::marker::PhantomData;
+use crate::os::fd::{AsFd, AsRawFd};
use crate::slice;
use libc::{c_void, iovec};
@@ -74,3 +75,8 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.iov_base as *mut u8, self.vec.iov_len) }
}
}
+
+pub fn is_terminal(fd: &impl AsFd) -> bool {
+ let fd = fd.as_fd();
+ unsafe { libc::isatty(fd.as_raw_fd()) != 0 }
+}
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 8f7abb55e..94546ca09 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -20,7 +20,7 @@
//! Since those syscalls have requirements that cannot be fully checked in advance and
//! gathering additional information about file descriptors would require additional syscalls
//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to
-//! figure out which one works and and falls back to the generic read-write copy loop if none of them
+//! figure out which one works and falls back to the generic read-write copy loop if none of them
//! does.
//! Once a working syscall is found for a pair of file descriptors it will be called in a loop
//! until the copy operation is completed.
diff --git a/library/std/src/sys/unix/locks/fuchsia_mutex.rs b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
index ce427599c..117611ce4 100644
--- a/library/std/src/sys/unix/locks/fuchsia_mutex.rs
+++ b/library/std/src/sys/unix/locks/fuchsia_mutex.rs
@@ -86,9 +86,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn try_lock(&self) -> bool {
let thread_self = zx_thread_self();
self.futex.compare_exchange(UNLOCKED, to_state(thread_self), Acquire, Relaxed).is_ok()
@@ -138,7 +135,7 @@ impl Mutex {
}
}
- // The state has changed or a wakeup occured, try to lock the mutex.
+ // The state has changed or a wakeup occurred, try to lock the mutex.
match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) {
Ok(_) => return,
Err(updated) => state = updated,
diff --git a/library/std/src/sys/unix/locks/futex_mutex.rs b/library/std/src/sys/unix/locks/futex_mutex.rs
index 99ba86e5f..33b13dad4 100644
--- a/library/std/src/sys/unix/locks/futex_mutex.rs
+++ b/library/std/src/sys/unix/locks/futex_mutex.rs
@@ -20,9 +20,6 @@ impl Mutex {
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn try_lock(&self) -> bool {
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()
}
@@ -53,7 +50,7 @@ impl Mutex {
// We avoid an unnecessary write if it as already set to 2,
// to be friendlier for the caches.
if state != 2 && self.futex.swap(2, Acquire) == 0 {
- // We changed it from 0 to 2, so we just succesfully locked it.
+ // We changed it from 0 to 2, so we just successfully locked it.
return;
}
diff --git a/library/std/src/sys/unix/locks/futex_rwlock.rs b/library/std/src/sys/unix/locks/futex_rwlock.rs
index b3bbbf743..0cc92244e 100644
--- a/library/std/src/sys/unix/locks/futex_rwlock.rs
+++ b/library/std/src/sys/unix/locks/futex_rwlock.rs
@@ -54,7 +54,7 @@ fn is_read_lockable(state: u32) -> bool {
// We don't allow read-locking if there's readers waiting, even if the lock is unlocked
// and there's no writers waiting. The only situation when this happens is after unlocking,
// at which point the unlocking thread might be waking up writers, which have priority over readers.
- // The unlocking thread will clear the readers waiting bit and wake up readers, if necssary.
+ // The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
}
diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs
index f5f92f693..9bb314b70 100644
--- a/library/std/src/sys/unix/locks/mod.rs
+++ b/library/std/src/sys/unix/locks/mod.rs
@@ -11,21 +11,21 @@ cfg_if::cfg_if! {
mod futex_rwlock;
mod futex_condvar;
pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
pub(crate) use futex_condvar::MovableCondvar;
} else if #[cfg(target_os = "fuchsia")] {
mod fuchsia_mutex;
mod futex_rwlock;
mod futex_condvar;
pub(crate) use fuchsia_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
pub(crate) use futex_condvar::MovableCondvar;
} else {
mod pthread_mutex;
mod pthread_rwlock;
mod pthread_condvar;
pub(crate) use pthread_mutex::{Mutex, MovableMutex};
- pub(crate) use pthread_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use pthread_rwlock::MovableRwLock;
pub(crate) use pthread_condvar::MovableCondvar;
}
}
diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs
index abf27e7db..4741c0c67 100644
--- a/library/std/src/sys/unix/locks/pthread_condvar.rs
+++ b/library/std/src/sys/unix/locks/pthread_condvar.rs
@@ -172,7 +172,7 @@ impl Condvar {
let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
let stable_now = Instant::now();
let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
- debug_assert_eq!(r, 0);
+ assert_eq!(r, 0, "unexpected error: {:?}", crate::io::Error::last_os_error());
let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
let extra = (nsec / 1_000_000_000) as libc::time_t;
diff --git a/library/std/src/sys/unix/locks/pthread_mutex.rs b/library/std/src/sys/unix/locks/pthread_mutex.rs
index 98afee69b..5964935dd 100644
--- a/library/std/src/sys/unix/locks/pthread_mutex.rs
+++ b/library/std/src/sys/unix/locks/pthread_mutex.rs
@@ -52,7 +52,7 @@ impl Mutex {
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
#[inline]
- pub unsafe fn init(&mut self) {
+ unsafe fn init(&mut self) {
// Issue #33770
//
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs
index 3d0d91460..9055a011c 100644
--- a/library/std/src/sys/unix/mod.rs
+++ b/library/std/src/sys/unix/mod.rs
@@ -44,12 +44,13 @@ pub mod thread_parker;
pub mod time;
#[cfg(target_os = "espidf")]
-pub fn init(argc: isize, argv: *const *const u8) {}
+pub fn init(argc: isize, argv: *const *const u8, _sigpipe: u8) {}
#[cfg(not(target_os = "espidf"))]
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(argc: isize, argv: *const *const u8) {
+// See `fn init()` in `library/std/src/rt.rs` for docs on `sigpipe`.
+pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
// The standard streams might be closed on application startup. To prevent
// std::io::{stdin, stdout,stderr} objects from using other unrelated file
// resources opened later, we reopen standards streams when they are closed.
@@ -61,8 +62,9 @@ pub unsafe fn init(argc: isize, argv: *const *const u8) {
// want!
//
// Hence, we set SIGPIPE to ignore when the program starts up in order
- // to prevent this problem.
- reset_sigpipe();
+ // to prevent this problem. Add `#[unix_sigpipe = "..."]` above `fn main()` to
+ // alter this behavior.
+ reset_sigpipe(sigpipe);
stack_overflow::init();
args::init(argc, argv);
@@ -151,12 +153,64 @@ pub unsafe fn init(argc: isize, argv: *const *const u8) {
}
}
- unsafe fn reset_sigpipe() {
+ unsafe fn reset_sigpipe(#[allow(unused_variables)] sigpipe: u8) {
#[cfg(not(any(target_os = "emscripten", target_os = "fuchsia", target_os = "horizon")))]
- rtassert!(signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR);
+ {
+ // We don't want to add this as a public type to libstd, nor do we
+ // want to `include!` a file from the compiler (which would break
+ // Miri and xargo for example), so we choose to duplicate these
+ // constants from `compiler/rustc_session/src/config/sigpipe.rs`.
+ // See the other file for docs. NOTE: Make sure to keep them in
+ // sync!
+ mod sigpipe {
+ pub const DEFAULT: u8 = 0;
+ pub const INHERIT: u8 = 1;
+ pub const SIG_IGN: u8 = 2;
+ pub const SIG_DFL: u8 = 3;
+ }
+
+ let (sigpipe_attr_specified, handler) = match sigpipe {
+ sigpipe::DEFAULT => (false, Some(libc::SIG_IGN)),
+ sigpipe::INHERIT => (true, None),
+ sigpipe::SIG_IGN => (true, Some(libc::SIG_IGN)),
+ sigpipe::SIG_DFL => (true, Some(libc::SIG_DFL)),
+ _ => unreachable!(),
+ };
+ // The bootstrap compiler doesn't know about sigpipe::DEFAULT, and always passes in
+ // SIG_IGN. This causes some tests to fail because they expect SIGPIPE to be reset to
+ // default on process spawning (which doesn't happen if #[unix_sigpipe] is specified).
+ // Since we can't differentiate between the cases here, treat SIG_IGN as DEFAULT
+ // unconditionally.
+ if sigpipe_attr_specified && !(cfg!(bootstrap) && sigpipe == sigpipe::SIG_IGN) {
+ UNIX_SIGPIPE_ATTR_SPECIFIED.store(true, crate::sync::atomic::Ordering::Relaxed);
+ }
+ if let Some(handler) = handler {
+ rtassert!(signal(libc::SIGPIPE, handler) != libc::SIG_ERR);
+ }
+ }
}
}
+// This is set (up to once) in reset_sigpipe.
+#[cfg(not(any(
+ target_os = "espidf",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "horizon"
+)))]
+static UNIX_SIGPIPE_ATTR_SPECIFIED: crate::sync::atomic::AtomicBool =
+ crate::sync::atomic::AtomicBool::new(false);
+
+#[cfg(not(any(
+ target_os = "espidf",
+ target_os = "emscripten",
+ target_os = "fuchsia",
+ target_os = "horizon"
+)))]
+pub(crate) fn unix_sigpipe_attr_specified() -> bool {
+ UNIX_SIGPIPE_ATTR_SPECIFIED.load(crate::sync::atomic::Ordering::Relaxed)
+}
+
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
pub unsafe fn cleanup() {
@@ -295,8 +349,10 @@ pub fn abort_internal() -> ! {
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
- #[link(name = "dl")]
- #[link(name = "log")]
+ #[link(name = "dl", kind = "static", modifiers = "-bundle",
+ cfg(target_feature = "crt-static"))]
+ #[link(name = "dl", cfg(not(target_feature = "crt-static")))]
+ #[link(name = "log", cfg(not(target_feature = "crt-static")))]
extern "C" {}
} else if #[cfg(target_os = "freebsd")] {
#[link(name = "execinfo")]
@@ -326,16 +382,12 @@ cfg_if::cfg_if! {
extern "C" {}
} else if #[cfg(target_os = "macos")] {
#[link(name = "System")]
- // res_init and friends require -lresolv on macOS/iOS.
- // See #41582 and https://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
- #[link(name = "resolv")]
extern "C" {}
} else if #[cfg(any(target_os = "ios", target_os = "watchos"))] {
#[link(name = "System")]
#[link(name = "objc")]
#[link(name = "Security", kind = "framework")]
#[link(name = "Foundation", kind = "framework")]
- #[link(name = "resolv")]
extern "C" {}
} else if #[cfg(target_os = "fuchsia")] {
#[link(name = "zircon")]
diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs
index 462a45b01..b84bf8f92 100644
--- a/library/std/src/sys/unix/net.rs
+++ b/library/std/src/sys/unix/net.rs
@@ -393,6 +393,17 @@ impl Socket {
}
#[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn set_quickack(&self, quickack: bool) -> io::Result<()> {
+ setsockopt(self, libc::IPPROTO_TCP, libc::TCP_QUICKACK, quickack as c_int)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
+ pub fn quickack(&self) -> io::Result<bool> {
+ let raw: c_int = getsockopt(self, libc::IPPROTO_TCP, libc::TCP_QUICKACK)?;
+ Ok(raw != 0)
+ }
+
+ #[cfg(any(target_os = "android", target_os = "linux",))]
pub fn set_passcred(&self, passcred: bool) -> io::Result<()> {
setsockopt(self, libc::SOL_SOCKET, libc::SO_PASSCRED, passcred as libc::c_int)
}
@@ -427,6 +438,17 @@ impl Socket {
self.0.set_nonblocking(nonblocking)
}
+ #[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"))]
+ pub fn set_mark(&self, mark: u32) -> io::Result<()> {
+ #[cfg(target_os = "linux")]
+ let option = libc::SO_MARK;
+ #[cfg(target_os = "freebsd")]
+ let option = libc::SO_USER_COOKIE;
+ #[cfg(target_os = "openbsd")]
+ let option = libc::SO_RTABLE;
+ setsockopt(self, libc::SOL_SOCKET, option, mark as libc::c_int)
+ }
+
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
let raw: c_int = getsockopt(self, libc::SOL_SOCKET, libc::SO_ERROR)?;
if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs
index 46545a083..2f2663db6 100644
--- a/library/std/src/sys/unix/os.rs
+++ b/library/std/src/sys/unix/os.rs
@@ -7,6 +7,7 @@ mod tests;
use crate::os::unix::prelude::*;
+use crate::convert::TryFrom;
use crate::error::Error as StdError;
use crate::ffi::{CStr, CString, OsStr, OsString};
use crate::fmt;
@@ -17,10 +18,11 @@ use crate::path::{self, PathBuf};
use crate::ptr;
use crate::slice;
use crate::str;
+use crate::sync::{PoisonError, RwLock};
+use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr};
use crate::sys::cvt;
use crate::sys::fd;
use crate::sys::memchr;
-use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard};
use crate::vec;
#[cfg(all(target_env = "gnu", not(target_os = "vxworks")))]
@@ -125,7 +127,9 @@ pub fn error_string(errno: i32) -> String {
}
let p = p as *const _;
- str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned()
+ // We can't always expect a UTF-8 environment. When we don't get that luxury,
+ // it's better to give a low-quality error message than none at all.
+ String::from_utf8_lossy(CStr::from_ptr(p).to_bytes()).into()
}
}
@@ -168,12 +172,8 @@ pub fn chdir(p: &path::Path) -> io::Result<()> {
#[cfg(not(target_os = "espidf"))]
pub fn chdir(p: &path::Path) -> io::Result<()> {
- let p: &OsStr = p.as_ref();
- let p = CString::new(p.as_bytes())?;
- if unsafe { libc::chdir(p.as_ptr()) } != 0 {
- return Err(io::Error::last_os_error());
- }
- Ok(())
+ let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?;
+ if result == 0 { Ok(()) } else { Err(io::Error::last_os_error()) }
}
pub struct SplitPaths<'a> {
@@ -501,10 +501,10 @@ pub unsafe fn environ() -> *mut *const *const c_char {
ptr::addr_of_mut!(environ)
}
-static ENV_LOCK: StaticRwLock = StaticRwLock::new();
+static ENV_LOCK: RwLock<()> = RwLock::new(());
-pub fn env_read_lock() -> StaticRwLockReadGuard {
- ENV_LOCK.read()
+pub fn env_read_lock() -> impl Drop {
+ ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner)
}
/// Returns a vector of (variable, value) byte-vector pairs for all the
@@ -546,35 +546,32 @@ pub fn env() -> Env {
pub fn getenv(k: &OsStr) -> Option<OsString> {
// environment variables with a nul byte can't be set, so their value is
// always None as well
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
+ let s = run_with_cstr(k.as_bytes(), |k| {
let _guard = env_read_lock();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ Ok(unsafe { libc::getenv(k.as_ptr()) } as *const libc::c_char)
+ })
+ .ok()?;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = ENV_LOCK.write();
- cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| {
+ let _guard = ENV_LOCK.write();
+ cvt(unsafe { libc::setenv(k.as_ptr(), v.as_ptr(), 1) }).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
+ run_with_cstr(n.as_bytes(), |nbuf| {
let _guard = ENV_LOCK.write();
- cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ cvt(unsafe { libc::unsetenv(nbuf.as_ptr()) }).map(drop)
+ })
}
#[cfg(not(target_os = "espidf"))]
diff --git a/library/std/src/sys/unix/os_str.rs b/library/std/src/sys/unix/os_str.rs
index ccbc18224..017e2af29 100644
--- a/library/std/src/sys/unix/os_str.rs
+++ b/library/std/src/sys/unix/os_str.rs
@@ -11,7 +11,7 @@ use crate::str;
use crate::sync::Arc;
use crate::sys_common::{AsInner, IntoInner};
-use core::str::lossy::{Utf8Lossy, Utf8LossyChunk};
+use core::str::Utf8Chunks;
#[cfg(test)]
#[path = "../unix/os_str/tests.rs"]
@@ -29,26 +29,32 @@ pub struct Slice {
}
impl fmt::Debug for Slice {
- fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
- // Writes out a valid unicode string with the correct escape sequences
-
- formatter.write_str("\"")?;
- for Utf8LossyChunk { valid, broken } in Utf8Lossy::from_bytes(&self.inner).chunks() {
- for c in valid.chars().flat_map(|c| c.escape_debug()) {
- formatter.write_char(c)?
- }
-
- for b in broken {
- write!(formatter, "\\x{:02X}", b)?;
- }
- }
- formatter.write_str("\"")
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&Utf8Chunks::new(&self.inner).debug(), f)
}
}
impl fmt::Display for Slice {
- fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&Utf8Lossy::from_bytes(&self.inner), formatter)
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If we're the empty string then our iterator won't actually yield
+ // anything, so perform the formatting manually
+ if self.inner.is_empty() {
+ return "".fmt(f);
+ }
+
+ for chunk in Utf8Chunks::new(&self.inner) {
+ let valid = chunk.valid();
+ // If we successfully decoded the whole chunk as a valid string then
+ // we can return a direct formatting of the string which will also
+ // respect various formatting flags if possible.
+ if chunk.invalid().is_empty() {
+ return valid.fmt(f);
+ }
+
+ f.write_str(valid)?;
+ f.write_char(char::REPLACEMENT_CHARACTER)?;
+ }
+ Ok(())
}
}
diff --git a/library/std/src/sys/unix/os_str/tests.rs b/library/std/src/sys/unix/os_str/tests.rs
index 213277f01..22ba0c923 100644
--- a/library/std/src/sys/unix/os_str/tests.rs
+++ b/library/std/src/sys/unix/os_str/tests.rs
@@ -8,3 +8,11 @@ fn slice_debug_output() {
assert_eq!(output, expected);
}
+
+#[test]
+fn display() {
+ assert_eq!(
+ "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
+ Slice::from_u8_slice(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string(),
+ );
+}
diff --git a/library/std/src/sys/unix/process/process_common.rs b/library/std/src/sys/unix/process/process_common.rs
index bca1b65a7..848adca78 100644
--- a/library/std/src/sys/unix/process/process_common.rs
+++ b/library/std/src/sys/unix/process/process_common.rs
@@ -39,17 +39,39 @@ cfg_if::cfg_if! {
// https://github.com/aosp-mirror/platform_bionic/blob/ad8dcd6023294b646e5a8288c0ed431b0845da49/libc/include/android/legacy_signal_inlines.h
cfg_if::cfg_if! {
if #[cfg(target_os = "android")] {
+ #[allow(dead_code)]
pub unsafe fn sigemptyset(set: *mut libc::sigset_t) -> libc::c_int {
set.write_bytes(0u8, 1);
return 0;
}
+
#[allow(dead_code)]
pub unsafe fn sigaddset(set: *mut libc::sigset_t, signum: libc::c_int) -> libc::c_int {
- use crate::{slice, mem};
+ use crate::{
+ mem::{align_of, size_of},
+ slice,
+ };
+ use libc::{c_ulong, sigset_t};
+
+ // The implementations from bionic (android libc) type pun `sigset_t` as an
+ // array of `c_ulong`. This works, but lets add a smoke check to make sure
+ // that doesn't change.
+ const _: () = assert!(
+ align_of::<c_ulong>() == align_of::<sigset_t>()
+ && (size_of::<sigset_t>() % size_of::<c_ulong>()) == 0
+ );
- let raw = slice::from_raw_parts_mut(set as *mut u8, mem::size_of::<libc::sigset_t>());
let bit = (signum - 1) as usize;
- raw[bit / 8] |= 1 << (bit % 8);
+ if set.is_null() || bit >= (8 * size_of::<sigset_t>()) {
+ crate::sys::unix::os::set_errno(libc::EINVAL);
+ return -1;
+ }
+ let raw = slice::from_raw_parts_mut(
+ set as *mut c_ulong,
+ size_of::<sigset_t>() / size_of::<c_ulong>(),
+ );
+ const LONG_BIT: usize = size_of::<c_ulong>() * 8;
+ raw[bit / LONG_BIT] |= 1 << (bit % LONG_BIT);
return 0;
}
} else {
@@ -72,6 +94,7 @@ pub struct Command {
argv: Argv,
env: CommandEnv,
+ program_kind: ProgramKind,
cwd: Option<CString>,
uid: Option<uid_t>,
gid: Option<gid_t>,
@@ -128,15 +151,40 @@ pub enum Stdio {
Fd(FileDesc),
}
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum ProgramKind {
+ /// A program that would be looked up on the PATH (e.g. `ls`)
+ PathLookup,
+ /// A relative path (e.g. `my-dir/foo`, `../foo`, `./foo`)
+ Relative,
+ /// An absolute path.
+ Absolute,
+}
+
+impl ProgramKind {
+ fn new(program: &OsStr) -> Self {
+ if program.bytes().starts_with(b"/") {
+ Self::Absolute
+ } else if program.bytes().contains(&b'/') {
+ // If the program has more than one component in it, it is a relative path.
+ Self::Relative
+ } else {
+ Self::PathLookup
+ }
+ }
+}
+
impl Command {
#[cfg(not(target_os = "linux"))]
pub fn new(program: &OsStr) -> Command {
let mut saw_nul = false;
+ let program_kind = ProgramKind::new(program.as_ref());
let program = os2c(program, &mut saw_nul);
Command {
argv: Argv(vec![program.as_ptr(), ptr::null()]),
args: vec![program.clone()],
program,
+ program_kind,
env: Default::default(),
cwd: None,
uid: None,
@@ -154,11 +202,13 @@ impl Command {
#[cfg(target_os = "linux")]
pub fn new(program: &OsStr) -> Command {
let mut saw_nul = false;
+ let program_kind = ProgramKind::new(program.as_ref());
let program = os2c(program, &mut saw_nul);
Command {
argv: Argv(vec![program.as_ptr(), ptr::null()]),
args: vec![program.clone()],
program,
+ program_kind,
env: Default::default(),
cwd: None,
uid: None,
@@ -234,6 +284,11 @@ impl Command {
OsStr::from_bytes(self.program.as_bytes())
}
+ #[allow(dead_code)]
+ pub fn get_program_kind(&self) -> ProgramKind {
+ self.program_kind
+ }
+
pub fn get_args(&self) -> CommandArgs<'_> {
let mut iter = self.args.iter();
iter.next();
diff --git a/library/std/src/sys/unix/process/process_common/tests.rs b/library/std/src/sys/unix/process/process_common/tests.rs
index 1956b3692..03631e4e3 100644
--- a/library/std/src/sys/unix/process/process_common/tests.rs
+++ b/library/std/src/sys/unix/process/process_common/tests.rs
@@ -31,41 +31,54 @@ macro_rules! t {
ignore
)]
fn test_process_mask() {
- unsafe {
- // Test to make sure that a signal mask does not get inherited.
- let mut cmd = Command::new(OsStr::new("cat"));
-
- let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit();
- let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit();
- t!(cvt(sigemptyset(set.as_mut_ptr())));
- t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT)));
- t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), old_set.as_mut_ptr())));
-
- cmd.stdin(Stdio::MakePipe);
- cmd.stdout(Stdio::MakePipe);
-
- let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
- let stdin_write = pipes.stdin.take().unwrap();
- let stdout_read = pipes.stdout.take().unwrap();
-
- t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut())));
-
- t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT)));
- // We need to wait until SIGINT is definitely delivered. The
- // easiest way is to write something to cat, and try to read it
- // back: if SIGINT is unmasked, it'll get delivered when cat is
- // next scheduled.
- let _ = stdin_write.write(b"Hello");
- drop(stdin_write);
-
- // Either EOF or failure (EPIPE) is okay.
- let mut buf = [0; 5];
- if let Ok(ret) = stdout_read.read(&mut buf) {
- assert_eq!(ret, 0);
+ // Test to make sure that a signal mask *does* get inherited.
+ fn test_inner(mut cmd: Command) {
+ unsafe {
+ let mut set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ let mut old_set = mem::MaybeUninit::<libc::sigset_t>::uninit();
+ t!(cvt(sigemptyset(set.as_mut_ptr())));
+ t!(cvt(sigaddset(set.as_mut_ptr(), libc::SIGINT)));
+ t!(cvt_nz(libc::pthread_sigmask(
+ libc::SIG_SETMASK,
+ set.as_ptr(),
+ old_set.as_mut_ptr()
+ )));
+
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+
+ let (mut cat, mut pipes) = t!(cmd.spawn(Stdio::Null, true));
+ let stdin_write = pipes.stdin.take().unwrap();
+ let stdout_read = pipes.stdout.take().unwrap();
+
+ t!(cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, old_set.as_ptr(), ptr::null_mut())));
+
+ t!(cvt(libc::kill(cat.id() as libc::pid_t, libc::SIGINT)));
+ // We need to wait until SIGINT is definitely delivered. The
+ // easiest way is to write something to cat, and try to read it
+ // back: if SIGINT is unmasked, it'll get delivered when cat is
+ // next scheduled.
+ let _ = stdin_write.write(b"Hello");
+ drop(stdin_write);
+
+ // Exactly 5 bytes should be read.
+ let mut buf = [0; 5];
+ let ret = t!(stdout_read.read(&mut buf));
+ assert_eq!(ret, 5);
+ assert_eq!(&buf, b"Hello");
+
+ t!(cat.wait());
}
-
- t!(cat.wait());
}
+
+ // A plain `Command::new` uses the posix_spawn path on many platforms.
+ let cmd = Command::new(OsStr::new("cat"));
+ test_inner(cmd);
+
+ // Specifying `pre_exec` forces the fork/exec path.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ unsafe { cmd.pre_exec(Box::new(|| Ok(()))) };
+ test_inner(cmd);
}
#[test]
@@ -122,3 +135,27 @@ fn test_process_group_no_posix_spawn() {
t!(cat.wait());
}
}
+
+#[test]
+fn test_program_kind() {
+ let vectors = &[
+ ("foo", ProgramKind::PathLookup),
+ ("foo.out", ProgramKind::PathLookup),
+ ("./foo", ProgramKind::Relative),
+ ("../foo", ProgramKind::Relative),
+ ("dir/foo", ProgramKind::Relative),
+ // Note that paths on Unix can't contain / in them, so this is actually the directory "fo\\"
+ // followed by the file "o".
+ ("fo\\/o", ProgramKind::Relative),
+ ("/foo", ProgramKind::Absolute),
+ ("/dir/../foo", ProgramKind::Absolute),
+ ];
+
+ for (program, expected_kind) in vectors {
+ assert_eq!(
+ ProgramKind::new(program.as_ref()),
+ *expected_kind,
+ "actual != expected program kind for input {program}",
+ );
+ }
+}
diff --git a/library/std/src/sys/unix/process/process_fuchsia.rs b/library/std/src/sys/unix/process/process_fuchsia.rs
index 73f5d3a61..66ea3db20 100644
--- a/library/std/src/sys/unix/process/process_fuchsia.rs
+++ b/library/std/src/sys/unix/process/process_fuchsia.rs
@@ -287,7 +287,7 @@ impl ExitStatus {
// SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't
// necessarily fit.
//
- // It seems to me that that the right answer would be to provide std::os::fuchsia with its
+ // It seems to me that the right answer would be to provide std::os::fuchsia with its
// own ExitStatusExt, rather that trying to provide a not very convincing imitation of
// Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
// fixing this up that is beyond the scope of my efforts now.
diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs
index 75bb92437..56a805cef 100644
--- a/library/std/src/sys/unix/process/process_unix.rs
+++ b/library/std/src/sys/unix/process/process_unix.rs
@@ -2,7 +2,6 @@ use crate::fmt;
use crate::io::{self, Error, ErrorKind};
use crate::mem;
use crate::num::NonZeroI32;
-use crate::ptr;
use crate::sys;
use crate::sys::cvt;
use crate::sys::process::process_common::*;
@@ -310,7 +309,7 @@ impl Command {
//FIXME: Redox kernel does not support setgroups yet
#[cfg(not(target_os = "redox"))]
if libc::getuid() == 0 && self.get_groups().is_none() {
- cvt(libc::setgroups(0, ptr::null()))?;
+ cvt(libc::setgroups(0, crate::ptr::null()))?;
}
cvt(libc::setuid(u as uid_t))?;
}
@@ -326,30 +325,26 @@ impl Command {
// emscripten has no signal support.
#[cfg(not(target_os = "emscripten"))]
{
- use crate::mem::MaybeUninit;
- use crate::sys::cvt_nz;
- // Reset signal handling so the child process starts in a
- // standardized state. libstd ignores SIGPIPE, and signal-handling
- // libraries often set a mask. Child processes inherit ignored
- // signals and the signal mask from their parent, but most
- // UNIX programs do not reset these things on their own, so we
- // need to clean things up now to avoid confusing the program
- // we're about to run.
- let mut set = MaybeUninit::<libc::sigset_t>::uninit();
- cvt(sigemptyset(set.as_mut_ptr()))?;
- cvt_nz(libc::pthread_sigmask(libc::SIG_SETMASK, set.as_ptr(), ptr::null_mut()))?;
-
- #[cfg(target_os = "android")] // see issue #88585
- {
- let mut action: libc::sigaction = mem::zeroed();
- action.sa_sigaction = libc::SIG_DFL;
- cvt(libc::sigaction(libc::SIGPIPE, &action, ptr::null_mut()))?;
- }
- #[cfg(not(target_os = "android"))]
- {
- let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL);
- if ret == libc::SIG_ERR {
- return Err(io::Error::last_os_error());
+ // Inherit the signal mask from the parent rather than resetting it (i.e. do not call
+ // pthread_sigmask).
+
+ // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL.
+ // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility.
+ //
+ // #[unix_sigpipe] is an opportunity to change the default here.
+ if !crate::sys::unix_sigpipe_attr_specified() {
+ #[cfg(target_os = "android")] // see issue #88585
+ {
+ let mut action: libc::sigaction = mem::zeroed();
+ action.sa_sigaction = libc::SIG_DFL;
+ cvt(libc::sigaction(libc::SIGPIPE, &action, crate::ptr::null_mut()))?;
+ }
+ #[cfg(not(target_os = "android"))]
+ {
+ let ret = sys::signal(libc::SIGPIPE, libc::SIG_DFL);
+ if ret == libc::SIG_ERR {
+ return Err(io::Error::last_os_error());
+ }
}
}
}
@@ -411,7 +406,7 @@ impl Command {
envp: Option<&CStringArray>,
) -> io::Result<Option<Process>> {
use crate::mem::MaybeUninit;
- use crate::sys::{self, cvt_nz};
+ use crate::sys::{self, cvt_nz, unix_sigpipe_attr_specified};
if self.get_gid().is_some()
|| self.get_uid().is_some()
@@ -453,7 +448,9 @@ impl Command {
// successfully launch the program, but erroneously return
// ENOENT when used with posix_spawn_file_actions_addchdir_np
// which was introduced in macOS 10.15.
- return Ok(None);
+ if self.get_program_kind() == ProgramKind::Relative {
+ return Ok(None);
+ }
}
match posix_spawn_file_actions_addchdir_np.get() {
Some(f) => Some((f, cwd)),
@@ -529,13 +526,24 @@ impl Command {
cvt_nz(libc::posix_spawnattr_setpgroup(attrs.0.as_mut_ptr(), pgroup))?;
}
- let mut set = MaybeUninit::<libc::sigset_t>::uninit();
- cvt(sigemptyset(set.as_mut_ptr()))?;
- cvt_nz(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?;
- cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?;
- cvt_nz(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?;
+ // Inherit the signal mask from this process rather than resetting it (i.e. do not call
+ // posix_spawnattr_setsigmask).
+
+ // If #[unix_sigpipe] is specified, don't reset SIGPIPE to SIG_DFL.
+ // If #[unix_sigpipe] is not specified, reset SIGPIPE to SIG_DFL for backward compatibility.
+ //
+ // #[unix_sigpipe] is an opportunity to change the default here.
+ if !unix_sigpipe_attr_specified() {
+ let mut default_set = MaybeUninit::<libc::sigset_t>::uninit();
+ cvt(sigemptyset(default_set.as_mut_ptr()))?;
+ cvt(sigaddset(default_set.as_mut_ptr(), libc::SIGPIPE))?;
+ cvt_nz(libc::posix_spawnattr_setsigdefault(
+ attrs.0.as_mut_ptr(),
+ default_set.as_ptr(),
+ ))?;
+ flags |= libc::POSIX_SPAWN_SETSIGDEF;
+ }
- flags |= libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK;
cvt_nz(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?;
// Make sure we synchronize access to the global `environ` resource
@@ -820,14 +828,14 @@ impl crate::os::linux::process::ChildExt for crate::process::Child {
self.handle
.pidfd
.as_ref()
- .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created."))
}
fn take_pidfd(&mut self) -> io::Result<PidFd> {
self.handle
.pidfd
.take()
- .ok_or_else(|| Error::new(ErrorKind::Other, "No pidfd was created."))
+ .ok_or_else(|| Error::new(ErrorKind::Uncategorized, "No pidfd was created."))
}
}
diff --git a/library/std/src/sys/unix/rand.rs b/library/std/src/sys/unix/rand.rs
index bf4920488..a6fe07873 100644
--- a/library/std/src/sys/unix/rand.rs
+++ b/library/std/src/sys/unix/rand.rs
@@ -1,13 +1,13 @@
-use crate::mem;
-use crate::slice;
-
pub fn hashmap_random_keys() -> (u64, u64) {
- let mut v = (0, 0);
- unsafe {
- let view = slice::from_raw_parts_mut(&mut v as *mut _ as *mut u8, mem::size_of_val(&v));
- imp::fill_bytes(view);
- }
- v
+ const KEY_LEN: usize = core::mem::size_of::<u64>();
+
+ let mut v = [0u8; KEY_LEN * 2];
+ imp::fill_bytes(&mut v);
+
+ let key1 = v[0..KEY_LEN].try_into().unwrap();
+ let key2 = v[KEY_LEN..].try_into().unwrap();
+
+ (u64::from_ne_bytes(key1), u64::from_ne_bytes(key2))
}
#[cfg(all(
diff --git a/library/std/src/sys/unix/stdio.rs b/library/std/src/sys/unix/stdio.rs
index 329f9433d..b3626c564 100644
--- a/library/std/src/sys/unix/stdio.rs
+++ b/library/std/src/sys/unix/stdio.rs
@@ -1,6 +1,6 @@
use crate::io::{self, IoSlice, IoSliceMut};
use crate::mem::ManuallyDrop;
-use crate::os::unix::io::{AsFd, BorrowedFd, FromRawFd};
+use crate::os::unix::io::FromRawFd;
use crate::sys::fd::FileDesc;
pub struct Stdin(());
@@ -91,51 +91,3 @@ pub const STDIN_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
pub fn panic_output() -> Option<impl io::Write> {
Some(Stderr::new())
}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stdin {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StdinLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDIN_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stdout {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StdoutLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDOUT_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl AsFd for io::Stderr {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
- }
-}
-
-#[stable(feature = "io_safety", since = "1.63.0")]
-impl<'a> AsFd for io::StderrLock<'a> {
- #[inline]
- fn as_fd(&self) -> BorrowedFd<'_> {
- unsafe { BorrowedFd::borrow_raw(libc::STDERR_FILENO) }
- }
-}
diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs
index 36a3fa602..c1d30dd9d 100644
--- a/library/std/src/sys/unix/thread.rs
+++ b/library/std/src/sys/unix/thread.rs
@@ -116,11 +116,9 @@ impl Thread {
debug_assert_eq!(ret, 0);
}
- #[cfg(any(target_os = "linux", target_os = "android"))]
+ #[cfg(target_os = "android")]
pub fn set_name(name: &CStr) {
const PR_SET_NAME: libc::c_int = 15;
- // pthread wrapper only appeared in glibc 2.12, so we use syscall
- // directly.
unsafe {
libc::prctl(
PR_SET_NAME,
@@ -132,6 +130,19 @@ impl Thread {
}
}
+ #[cfg(target_os = "linux")]
+ pub fn set_name(name: &CStr) {
+ const TASK_COMM_LEN: usize = 16;
+
+ unsafe {
+ // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
+ let name = truncate_cstr(name, TASK_COMM_LEN);
+ let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
+ // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
+ debug_assert_eq!(res, 0);
+ }
+ }
+
#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
pub fn set_name(name: &CStr) {
unsafe {
@@ -142,20 +153,23 @@ impl Thread {
#[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
pub fn set_name(name: &CStr) {
unsafe {
- libc::pthread_setname_np(name.as_ptr());
+ let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE);
+ let res = libc::pthread_setname_np(name.as_ptr());
+ // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
+ debug_assert_eq!(res, 0);
}
}
#[cfg(target_os = "netbsd")]
pub fn set_name(name: &CStr) {
- use crate::ffi::CString;
- let cname = CString::new(&b"%s"[..]).unwrap();
unsafe {
- libc::pthread_setname_np(
+ let cname = CStr::from_bytes_with_nul_unchecked(b"%s\0".as_slice());
+ let res = libc::pthread_setname_np(
libc::pthread_self(),
cname.as_ptr(),
name.as_ptr() as *mut libc::c_void,
);
+ debug_assert_eq!(res, 0);
}
}
@@ -168,9 +182,8 @@ impl Thread {
}
if let Some(f) = pthread_setname_np.get() {
- unsafe {
- f(libc::pthread_self(), name.as_ptr());
- }
+ let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
+ debug_assert_eq!(res, 0);
}
}
@@ -271,6 +284,20 @@ impl Drop for Thread {
}
}
+#[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios", target_os = "watchos"))]
+fn truncate_cstr(cstr: &CStr, max_with_nul: usize) -> crate::borrow::Cow<'_, CStr> {
+ use crate::{borrow::Cow, ffi::CString};
+
+ if cstr.to_bytes_with_nul().len() > max_with_nul {
+ let bytes = cstr.to_bytes()[..max_with_nul - 1].to_vec();
+ // SAFETY: the non-nul bytes came straight from a CStr.
+ // (CString will add the terminating nul.)
+ Cow::Owned(unsafe { CString::from_vec_unchecked(bytes) })
+ } else {
+ Cow::Borrowed(cstr)
+ }
+}
+
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
cfg_if::cfg_if! {
if #[cfg(any(
@@ -423,7 +450,7 @@ mod cgroups {
Some(b"") => Cgroup::V2,
Some(controllers)
if from_utf8(controllers)
- .is_ok_and(|c| c.split(",").any(|c| c == "cpu")) =>
+ .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
{
Cgroup::V1
}
@@ -761,6 +788,16 @@ pub mod guard {
const GUARD_PAGES: usize = 1;
let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
Some(guard)
+ } else if cfg!(target_os = "openbsd") {
+ // OpenBSD stack already includes a guard page, and stack is
+ // immutable.
+ //
+ // We'll just note where we expect rlimit to start
+ // faulting, so our handler can report "stack overflow", and
+ // trust that the kernel's own stack guard will work.
+ let stackptr = get_stack_start_aligned()?;
+ let stackaddr = stackptr.addr();
+ Some(stackaddr - page_size..stackaddr)
} else {
// Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on
diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs
index 6e8be2a91..d7fd2130f 100644
--- a/library/std/src/sys/unix/thread_local_dtor.rs
+++ b/library/std/src/sys/unix/thread_local_dtor.rs
@@ -17,6 +17,7 @@
target_os = "redox",
target_os = "emscripten"
))]
+#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten)
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
use crate::mem;
use crate::sys_common::thread_local_dtor::register_dtor_fallback;
diff --git a/library/std/src/sys/unix/thread_local_key.rs b/library/std/src/sys/unix/thread_local_key.rs
index 2c5b94b1e..2b2d079ee 100644
--- a/library/std/src/sys/unix/thread_local_key.rs
+++ b/library/std/src/sys/unix/thread_local_key.rs
@@ -27,8 +27,3 @@ pub unsafe fn destroy(key: Key) {
let r = libc::pthread_key_delete(key);
debug_assert_eq!(r, 0);
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- false
-}
diff --git a/library/std/src/sys/unix/thread_parker.rs b/library/std/src/sys/unix/thread_parker.rs
deleted file mode 100644
index ca1a7138f..000000000
--- a/library/std/src/sys/unix/thread_parker.rs
+++ /dev/null
@@ -1,281 +0,0 @@
-//! Thread parking without `futex` using the `pthread` synchronization primitives.
-
-#![cfg(not(any(
- target_os = "linux",
- target_os = "android",
- all(target_os = "emscripten", target_feature = "atomics"),
- target_os = "freebsd",
- target_os = "openbsd",
- target_os = "dragonfly",
- target_os = "fuchsia",
-)))]
-
-use crate::cell::UnsafeCell;
-use crate::marker::PhantomPinned;
-use crate::pin::Pin;
-use crate::ptr::addr_of_mut;
-use crate::sync::atomic::AtomicUsize;
-use crate::sync::atomic::Ordering::SeqCst;
-use crate::time::Duration;
-
-const EMPTY: usize = 0;
-const PARKED: usize = 1;
-const NOTIFIED: usize = 2;
-
-unsafe fn lock(lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_mutex_lock(lock);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn unlock(lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_mutex_unlock(lock);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn notify_one(cond: *mut libc::pthread_cond_t) {
- let r = libc::pthread_cond_signal(cond);
- debug_assert_eq!(r, 0);
-}
-
-unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) {
- let r = libc::pthread_cond_wait(cond, lock);
- debug_assert_eq!(r, 0);
-}
-
-const TIMESPEC_MAX: libc::timespec =
- libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
-
-unsafe fn wait_timeout(
- cond: *mut libc::pthread_cond_t,
- lock: *mut libc::pthread_mutex_t,
- dur: Duration,
-) {
- // Use the system clock on systems that do not support pthread_condattr_setclock.
- // This unfortunately results in problems when the system time changes.
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "espidf"
- ))]
- let (now, dur) = {
- use super::time::SystemTime;
- use crate::cmp::min;
-
- // OSX implementation of `pthread_cond_timedwait` is buggy
- // with super long durations. When duration is greater than
- // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
- // in macOS Sierra return error 316.
- //
- // This program demonstrates the issue:
- // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
- //
- // To work around this issue, and possible bugs of other OSes, timeout
- // is clamped to 1000 years, which is allowable per the API of `park_timeout`
- // because of spurious wakeups.
- let dur = min(dur, Duration::from_secs(1000 * 365 * 86400));
- let now = SystemTime::now().t;
- (now, dur)
- };
- // Use the monotonic clock on other systems.
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "espidf"
- )))]
- let (now, dur) = {
- use super::time::Timespec;
-
- (Timespec::now(libc::CLOCK_MONOTONIC), dur)
- };
-
- let timeout =
- now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX);
- let r = libc::pthread_cond_timedwait(cond, lock, &timeout);
- debug_assert!(r == libc::ETIMEDOUT || r == 0);
-}
-
-pub struct Parker {
- state: AtomicUsize,
- lock: UnsafeCell<libc::pthread_mutex_t>,
- cvar: UnsafeCell<libc::pthread_cond_t>,
- // The `pthread` primitives require a stable address, so make this struct `!Unpin`.
- _pinned: PhantomPinned,
-}
-
-impl Parker {
- /// Construct the UNIX parker in-place.
- ///
- /// # Safety
- /// The constructed parker must never be moved.
- pub unsafe fn new(parker: *mut Parker) {
- // Use the default mutex implementation to allow for simpler initialization.
- // This could lead to undefined behaviour when deadlocking. This is avoided
- // by not deadlocking. Note in particular the unlocking operation before any
- // panic, as code after the panic could try to park again.
- addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
- addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
-
- cfg_if::cfg_if! {
- if #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "watchos",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))] {
- addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
- } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
- let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
- assert_eq!(r, 0);
- } else {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
- }
- }
- }
-
- // This implementation doesn't require `unsafe`, but other implementations
- // may assume this is only called by the thread that owns the Parker.
- pub unsafe fn park(self: Pin<&Self>) {
- // If we were previously notified then we consume this notification and
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
-
- // Otherwise we need to coordinate going to sleep
- lock(self.lock.get());
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read here, even though we know it will be `NOTIFIED`.
- // This is because `unpark` may have been called again since we read
- // `NOTIFIED` in the `compare_exchange` above. We must perform an
- // acquire operation that synchronizes with that `unpark` to observe
- // any writes it made before the call to unpark. To do that we must
- // read from the write it made to `state`.
- let old = self.state.swap(EMPTY, SeqCst);
-
- unlock(self.lock.get());
-
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => {
- unlock(self.lock.get());
-
- panic!("inconsistent park state")
- }
- }
-
- loop {
- wait(self.cvar.get(), self.lock.get());
-
- match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
- Ok(_) => break, // got a notification
- Err(_) => {} // spurious wakeup, go back to sleep
- }
- }
-
- unlock(self.lock.get());
- }
-
- // This implementation doesn't require `unsafe`, but other implementations
- // may assume this is only called by the thread that owns the Parker. Use
- // `Pin` to guarantee a stable address for the mutex and condition variable.
- pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
- // Like `park` above we have a fast path for an already-notified thread, and
- // afterwards we start coordinating for a sleep.
- // return quickly.
- if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
- return;
- }
-
- lock(self.lock.get());
- match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
- Ok(_) => {}
- Err(NOTIFIED) => {
- // We must read again here, see `park`.
- let old = self.state.swap(EMPTY, SeqCst);
- unlock(self.lock.get());
-
- assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
- return;
- } // should consume this notification, so prohibit spurious wakeups in next park.
- Err(_) => {
- unlock(self.lock.get());
- panic!("inconsistent park_timeout state")
- }
- }
-
- // Wait with a timeout, and if we spuriously wake up or otherwise wake up
- // from a notification we just want to unconditionally set the state back to
- // empty, either consuming a notification or un-flagging ourselves as
- // parked.
- wait_timeout(self.cvar.get(), self.lock.get(), dur);
-
- match self.state.swap(EMPTY, SeqCst) {
- NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
- PARKED => unlock(self.lock.get()), // no notification, alas
- n => {
- unlock(self.lock.get());
- panic!("inconsistent park_timeout state: {n}")
- }
- }
- }
-
- pub fn unpark(self: Pin<&Self>) {
- // To ensure the unparked thread will observe any writes we made
- // before this call, we must perform a release operation that `park`
- // can synchronize with. To do that we must write `NOTIFIED` even if
- // `state` is already `NOTIFIED`. That is why this must be a swap
- // rather than a compare-and-swap that returns if it reads `NOTIFIED`
- // on failure.
- match self.state.swap(NOTIFIED, SeqCst) {
- EMPTY => return, // no one was waiting
- NOTIFIED => return, // already unparked
- PARKED => {} // gotta go wake someone up
- _ => panic!("inconsistent state in unpark"),
- }
-
- // There is a period between when the parked thread sets `state` to
- // `PARKED` (or last checked `state` in the case of a spurious wake
- // up) and when it actually waits on `cvar`. If we were to notify
- // during this period it would be ignored and then when the parked
- // thread went to sleep it would never wake up. Fortunately, it has
- // `lock` locked at this stage so we can acquire `lock` to wait until
- // it is ready to receive the notification.
- //
- // Releasing `lock` before the call to `notify_one` means that when the
- // parked thread wakes it doesn't get woken only to have to wait for us
- // to release `lock`.
- unsafe {
- lock(self.lock.get());
- unlock(self.lock.get());
- notify_one(self.cvar.get());
- }
- }
-}
-
-impl Drop for Parker {
- fn drop(&mut self) {
- unsafe {
- libc::pthread_cond_destroy(self.cvar.get_mut());
- libc::pthread_mutex_destroy(self.lock.get_mut());
- }
- }
-}
-
-unsafe impl Sync for Parker {}
-unsafe impl Send for Parker {}
diff --git a/library/std/src/sys/unix/thread_parker/darwin.rs b/library/std/src/sys/unix/thread_parker/darwin.rs
new file mode 100644
index 000000000..2f5356fe2
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/darwin.rs
@@ -0,0 +1,131 @@
+//! Thread parking for Darwin-based systems.
+//!
+//! Darwin actually has futex syscalls (`__ulock_wait`/`__ulock_wake`), but they
+//! cannot be used in `std` because they are non-public (their use will lead to
+//! rejection from the App Store) and because they are only available starting
+//! with macOS version 10.12, even though the minimum target version is 10.7.
+//!
+//! Therefore, we need to look for other synchronization primitives. Luckily, Darwin
+//! supports semaphores, which allow us to implement the behaviour we need with
+//! only one primitive (as opposed to a mutex-condvar pair). We use the semaphore
+//! provided by libdispatch, as the underlying Mach semaphore is only dubiously
+//! public.
+
+use crate::pin::Pin;
+use crate::sync::atomic::{
+ AtomicI8,
+ Ordering::{Acquire, Release},
+};
+use crate::time::Duration;
+
+type dispatch_semaphore_t = *mut crate::ffi::c_void;
+type dispatch_time_t = u64;
+
+const DISPATCH_TIME_NOW: dispatch_time_t = 0;
+const DISPATCH_TIME_FOREVER: dispatch_time_t = !0;
+
+// Contained in libSystem.dylib, which is linked by default.
+extern "C" {
+ fn dispatch_time(when: dispatch_time_t, delta: i64) -> dispatch_time_t;
+ fn dispatch_semaphore_create(val: isize) -> dispatch_semaphore_t;
+ fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) -> isize;
+ fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) -> isize;
+ fn dispatch_release(object: *mut crate::ffi::c_void);
+}
+
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+const PARKED: i8 = -1;
+
+pub struct Parker {
+ semaphore: dispatch_semaphore_t,
+ state: AtomicI8,
+}
+
+unsafe impl Sync for Parker {}
+unsafe impl Send for Parker {}
+
+impl Parker {
+ pub unsafe fn new(parker: *mut Parker) {
+ let semaphore = dispatch_semaphore_create(0);
+ assert!(
+ !semaphore.is_null(),
+ "failed to create dispatch semaphore for thread synchronization"
+ );
+ parker.write(Parker { semaphore, state: AtomicI8::new(EMPTY) })
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // The semaphore counter must be zero at this point, because unparking
+ // threads will not actually increase it until we signalled that we
+ // are waiting.
+
+ // Change NOTIFIED to EMPTY and EMPTY to PARKED.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ // Another thread may increase the semaphore counter from this point on.
+ // If it is faster than us, we will decrement it again immediately below.
+ // If we are faster, we wait.
+
+ // Ensure that the semaphore counter has actually been decremented, even
+ // if the call timed out for some reason.
+ while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {}
+
+ // At this point, the semaphore counter is zero again.
+
+ // We were definitely woken up, so we don't need to check the state.
+ // Still, we need to reset the state using a swap to observe the state
+ // change with acquire ordering.
+ self.state.swap(EMPTY, Acquire);
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ let nanos = dur.as_nanos().try_into().unwrap_or(i64::MAX);
+ let timeout = dispatch_time(DISPATCH_TIME_NOW, nanos);
+
+ let timeout = dispatch_semaphore_wait(self.semaphore, timeout) != 0;
+
+ let state = self.state.swap(EMPTY, Acquire);
+ if state == NOTIFIED && timeout {
+ // If the state was NOTIFIED but semaphore_wait returned without
+ // decrementing the count because of a timeout, it means another
+ // thread is about to call semaphore_signal. We must wait for that
+ // to happen to ensure the semaphore count is reset.
+ while dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER) != 0 {}
+ } else {
+ // Either a timeout occurred and we reset the state before any thread
+ // tried to wake us up, or we were woken up and reset the state,
+ // making sure to observe the state change with acquire ordering.
+ // Either way, the semaphore counter is now zero again.
+ }
+ }
+
+ // Does not need `Pin`, but other implementation do.
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if state == PARKED {
+ unsafe {
+ dispatch_semaphore_signal(self.semaphore);
+ }
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ // SAFETY:
+ // We always ensure that the semaphore count is reset, so this will
+ // never cause an exception.
+ unsafe {
+ dispatch_release(self.semaphore);
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/mod.rs b/library/std/src/sys/unix/thread_parker/mod.rs
new file mode 100644
index 000000000..35f1e68a8
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/mod.rs
@@ -0,0 +1,32 @@
+//! Thread parking on systems without futex support.
+
+#![cfg(not(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_os = "emscripten", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+)))]
+
+cfg_if::cfg_if! {
+ if #[cfg(all(
+ any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "tvos",
+ ),
+ not(miri),
+ ))] {
+ mod darwin;
+ pub use darwin::Parker;
+ } else if #[cfg(target_os = "netbsd")] {
+ mod netbsd;
+ pub use netbsd::Parker;
+ } else {
+ mod pthread;
+ pub use pthread::Parker;
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/netbsd.rs b/library/std/src/sys/unix/thread_parker/netbsd.rs
new file mode 100644
index 000000000..7657605b5
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/netbsd.rs
@@ -0,0 +1,113 @@
+use crate::ffi::{c_int, c_void};
+use crate::pin::Pin;
+use crate::ptr::{null, null_mut};
+use crate::sync::atomic::{
+ AtomicU64,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::time::Duration;
+use libc::{_lwp_self, clockid_t, lwpid_t, time_t, timespec, CLOCK_MONOTONIC};
+
+extern "C" {
+ fn ___lwp_park60(
+ clock_id: clockid_t,
+ flags: c_int,
+ ts: *mut timespec,
+ unpark: lwpid_t,
+ hint: *const c_void,
+ unparkhint: *const c_void,
+ ) -> c_int;
+ fn _lwp_unpark(lwp: lwpid_t, hint: *const c_void) -> c_int;
+}
+
+/// The thread is not parked and the token is not available.
+///
+/// Zero cannot be a valid LWP id, since it is used as empty value for the unpark
+/// argument in _lwp_park.
+const EMPTY: u64 = 0;
+/// The token is available. Do not park anymore.
+const NOTIFIED: u64 = u64::MAX;
+
+pub struct Parker {
+ /// The parker state. Contains either one of the two state values above or the LWP
+ /// id of the parked thread.
+ state: AtomicU64,
+}
+
+impl Parker {
+ pub unsafe fn new(parker: *mut Parker) {
+ parker.write(Parker { state: AtomicU64::new(EMPTY) })
+ }
+
+ // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If the token has already been made available, we can skip
+ // a bit of work, so check for it here.
+ if self.state.load(Acquire) != NOTIFIED {
+ let parked = _lwp_self() as u64;
+ let hint = self.state.as_mut_ptr().cast();
+ if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
+ // Loop to guard against spurious wakeups.
+ loop {
+ ___lwp_park60(0, 0, null_mut(), 0, hint, null());
+ if self.state.load(Acquire) == NOTIFIED {
+ break;
+ }
+ }
+ }
+ }
+
+ // At this point, the change to NOTIFIED has always been observed with acquire
+ // ordering, so we can just use a relaxed store here (instead of a swap).
+ self.state.store(EMPTY, Relaxed);
+ }
+
+ // Does not actually need `unsafe` or `Pin`, but the pthread implementation does.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ if self.state.load(Acquire) != NOTIFIED {
+ let parked = _lwp_self() as u64;
+ let hint = self.state.as_mut_ptr().cast();
+ let mut timeout = timespec {
+ // Saturate so that the operation will definitely time out
+ // (even if it is after the heat death of the universe).
+ tv_sec: dur.as_secs().try_into().ok().unwrap_or(time_t::MAX),
+ tv_nsec: dur.subsec_nanos().into(),
+ };
+
+ if self.state.compare_exchange(EMPTY, parked, Relaxed, Acquire).is_ok() {
+ // Timeout needs to be mutable since it is modified on NetBSD 9.0 and
+ // above.
+ ___lwp_park60(CLOCK_MONOTONIC, 0, &mut timeout, 0, hint, null());
+ // Use a swap to get acquire ordering even if the token was set after
+ // the timeout occurred.
+ self.state.swap(EMPTY, Acquire);
+ return;
+ }
+ }
+
+ self.state.store(EMPTY, Relaxed);
+ }
+
+ // Does not actually need `Pin`, but the pthread implementation does.
+ pub fn unpark(self: Pin<&Self>) {
+ let state = self.state.swap(NOTIFIED, Release);
+ if !matches!(state, EMPTY | NOTIFIED) {
+ let lwp = state as lwpid_t;
+ let hint = self.state.as_mut_ptr().cast();
+
+ // If the parking thread terminated and did not actually park, this will
+ // probably return an error, which is OK. In the worst case, another
+ // thread has received the same LWP id. It will then receive a spurious
+ // wakeup, but those are allowable per the API contract. The same reasoning
+ // applies if a timeout occurred before this call, but the state was not
+ // yet reset.
+
+ // SAFETY:
+ // The syscall has no invariants to hold. Only unsafe because it is an
+ // extern function.
+ unsafe {
+ _lwp_unpark(lwp, hint);
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys/unix/thread_parker/pthread.rs b/library/std/src/sys/unix/thread_parker/pthread.rs
new file mode 100644
index 000000000..3dfc0026e
--- /dev/null
+++ b/library/std/src/sys/unix/thread_parker/pthread.rs
@@ -0,0 +1,271 @@
+//! Thread parking without `futex` using the `pthread` synchronization primitives.
+
+use crate::cell::UnsafeCell;
+use crate::marker::PhantomPinned;
+use crate::pin::Pin;
+use crate::ptr::addr_of_mut;
+use crate::sync::atomic::AtomicUsize;
+use crate::sync::atomic::Ordering::SeqCst;
+use crate::time::Duration;
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+unsafe fn lock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_lock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn unlock(lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_mutex_unlock(lock);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn notify_one(cond: *mut libc::pthread_cond_t) {
+ let r = libc::pthread_cond_signal(cond);
+ debug_assert_eq!(r, 0);
+}
+
+unsafe fn wait(cond: *mut libc::pthread_cond_t, lock: *mut libc::pthread_mutex_t) {
+ let r = libc::pthread_cond_wait(cond, lock);
+ debug_assert_eq!(r, 0);
+}
+
+const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
+
+unsafe fn wait_timeout(
+ cond: *mut libc::pthread_cond_t,
+ lock: *mut libc::pthread_mutex_t,
+ dur: Duration,
+) {
+ // Use the system clock on systems that do not support pthread_condattr_setclock.
+ // This unfortunately results in problems when the system time changes.
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ ))]
+ let (now, dur) = {
+ use crate::cmp::min;
+ use crate::sys::time::SystemTime;
+
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra return error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `park_timeout`
+ // because of spurious wakeups.
+ let dur = min(dur, Duration::from_secs(1000 * 365 * 86400));
+ let now = SystemTime::now().t;
+ (now, dur)
+ };
+ // Use the monotonic clock on other systems.
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "espidf"
+ )))]
+ let (now, dur) = {
+ use crate::sys::time::Timespec;
+
+ (Timespec::now(libc::CLOCK_MONOTONIC), dur)
+ };
+
+ let timeout =
+ now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX);
+ let r = libc::pthread_cond_timedwait(cond, lock, &timeout);
+ debug_assert!(r == libc::ETIMEDOUT || r == 0);
+}
+
+pub struct Parker {
+ state: AtomicUsize,
+ lock: UnsafeCell<libc::pthread_mutex_t>,
+ cvar: UnsafeCell<libc::pthread_cond_t>,
+ // The `pthread` primitives require a stable address, so make this struct `!Unpin`.
+ _pinned: PhantomPinned,
+}
+
+impl Parker {
+ /// Construct the UNIX parker in-place.
+ ///
+ /// # Safety
+ /// The constructed parker must never be moved.
+ pub unsafe fn new(parker: *mut Parker) {
+ // Use the default mutex implementation to allow for simpler initialization.
+ // This could lead to undefined behaviour when deadlocking. This is avoided
+ // by not deadlocking. Note in particular the unlocking operation before any
+ // panic, as code after the panic could try to park again.
+ addr_of_mut!((*parker).state).write(AtomicUsize::new(EMPTY));
+ addr_of_mut!((*parker).lock).write(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER));
+
+ cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "watchos",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))] {
+ addr_of_mut!((*parker).cvar).write(UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER));
+ } else if #[cfg(any(target_os = "espidf", target_os = "horizon"))] {
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), crate::ptr::null());
+ assert_eq!(r, 0);
+ } else {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(addr_of_mut!((*parker).cvar).cast(), attr.as_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ }
+ }
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker.
+ pub unsafe fn park(self: Pin<&Self>) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+
+ panic!("inconsistent park state")
+ }
+ }
+
+ loop {
+ wait(self.cvar.get(), self.lock.get());
+
+ match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) {
+ Ok(_) => break, // got a notification
+ Err(_) => {} // spurious wakeup, go back to sleep
+ }
+ }
+
+ unlock(self.lock.get());
+ }
+
+ // This implementation doesn't require `unsafe`, but other implementations
+ // may assume this is only called by the thread that owns the Parker. Use
+ // `Pin` to guarantee a stable address for the mutex and condition variable.
+ pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread, and
+ // afterwards we start coordinating for a sleep.
+ // return quickly.
+ if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() {
+ return;
+ }
+
+ lock(self.lock.get());
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ unlock(self.lock.get());
+
+ assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+ return;
+ } // should consume this notification, so prohibit spurious wakeups in next park.
+ Err(_) => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state")
+ }
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ wait_timeout(self.cvar.get(), self.lock.get(), dur);
+
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => unlock(self.lock.get()), // got a notification, hurray!
+ PARKED => unlock(self.lock.get()), // no notification, alas
+ n => {
+ unlock(self.lock.get());
+ panic!("inconsistent park_timeout state: {n}")
+ }
+ }
+ }
+
+ pub fn unpark(self: Pin<&Self>) {
+ // To ensure the unparked thread will observe any writes we made
+ // before this call, we must perform a release operation that `park`
+ // can synchronize with. To do that we must write `NOTIFIED` even if
+ // `state` is already `NOTIFIED`. That is why this must be a swap
+ // rather than a compare-and-swap that returns if it reads `NOTIFIED`
+ // on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ unsafe {
+ lock(self.lock.get());
+ unlock(self.lock.get());
+ notify_one(self.cvar.get());
+ }
+ }
+}
+
+impl Drop for Parker {
+ fn drop(&mut self) {
+ unsafe {
+ libc::pthread_cond_destroy(self.cvar.get_mut());
+ libc::pthread_mutex_destroy(self.lock.get_mut());
+ }
+ }
+}
+
+unsafe impl Sync for Parker {}
+unsafe impl Send for Parker {}
diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs
index dff973f59..cca9c6767 100644
--- a/library/std/src/sys/unix/time.rs
+++ b/library/std/src/sys/unix/time.rs
@@ -7,6 +7,12 @@ const NSEC_PER_SEC: u64 = 1_000_000_000;
pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() };
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(0)]
+#[rustc_layout_scalar_valid_range_end(999_999_999)]
+struct Nanoseconds(u32);
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SystemTime {
pub(in crate::sys::unix) t: Timespec,
}
@@ -14,7 +20,7 @@ pub struct SystemTime {
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(in crate::sys::unix) struct Timespec {
tv_sec: i64,
- tv_nsec: i64,
+ tv_nsec: Nanoseconds,
}
impl SystemTime {
@@ -46,18 +52,20 @@ impl fmt::Debug for SystemTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemTime")
.field("tv_sec", &self.t.tv_sec)
- .field("tv_nsec", &self.t.tv_nsec)
+ .field("tv_nsec", &self.t.tv_nsec.0)
.finish()
}
}
impl Timespec {
pub const fn zero() -> Timespec {
- Timespec { tv_sec: 0, tv_nsec: 0 }
+ Timespec::new(0, 0)
}
- fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
- Timespec { tv_sec, tv_nsec }
+ const fn new(tv_sec: i64, tv_nsec: i64) -> Timespec {
+ assert!(tv_nsec >= 0 && tv_nsec < NSEC_PER_SEC as i64);
+ // SAFETY: The assert above checks tv_nsec is within the valid range
+ Timespec { tv_sec, tv_nsec: unsafe { Nanoseconds(tv_nsec as u32) } }
}
pub fn sub_timespec(&self, other: &Timespec) -> Result<Duration, Duration> {
@@ -75,12 +83,12 @@ impl Timespec {
//
// Ideally this code could be rearranged such that it more
// directly expresses the lower-cost behavior we want from it.
- let (secs, nsec) = if self.tv_nsec >= other.tv_nsec {
- ((self.tv_sec - other.tv_sec) as u64, (self.tv_nsec - other.tv_nsec) as u32)
+ let (secs, nsec) = if self.tv_nsec.0 >= other.tv_nsec.0 {
+ ((self.tv_sec - other.tv_sec) as u64, self.tv_nsec.0 - other.tv_nsec.0)
} else {
(
(self.tv_sec - other.tv_sec - 1) as u64,
- self.tv_nsec as u32 + (NSEC_PER_SEC as u32) - other.tv_nsec as u32,
+ self.tv_nsec.0 + (NSEC_PER_SEC as u32) - other.tv_nsec.0,
)
};
@@ -102,7 +110,7 @@ impl Timespec {
// Nano calculations can't overflow because nanos are <1B which fit
// in a u32.
- let mut nsec = other.subsec_nanos() + self.tv_nsec as u32;
+ let mut nsec = other.subsec_nanos() + self.tv_nsec.0;
if nsec >= NSEC_PER_SEC as u32 {
nsec -= NSEC_PER_SEC as u32;
secs = secs.checked_add(1)?;
@@ -118,7 +126,7 @@ impl Timespec {
.and_then(|secs| self.tv_sec.checked_sub(secs))?;
// Similar to above, nanos can't overflow.
- let mut nsec = self.tv_nsec as i32 - other.subsec_nanos() as i32;
+ let mut nsec = self.tv_nsec.0 as i32 - other.subsec_nanos() as i32;
if nsec < 0 {
nsec += NSEC_PER_SEC as i32;
secs = secs.checked_sub(1)?;
@@ -130,7 +138,7 @@ impl Timespec {
pub fn to_timespec(&self) -> Option<libc::timespec> {
Some(libc::timespec {
tv_sec: self.tv_sec.try_into().ok()?,
- tv_nsec: self.tv_nsec.try_into().ok()?,
+ tv_nsec: self.tv_nsec.0.try_into().ok()?,
})
}
}
@@ -293,7 +301,7 @@ mod inner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Instant")
.field("tv_sec", &self.t.tv_sec)
- .field("tv_nsec", &self.t.tv_nsec)
+ .field("tv_nsec", &self.t.tv_nsec.0)
.finish()
}
}
@@ -334,7 +342,7 @@ mod inner {
let mut t = MaybeUninit::uninit();
cvt(unsafe { clock_gettime64(clock, t.as_mut_ptr()) }).unwrap();
let t = unsafe { t.assume_init() };
- return Timespec { tv_sec: t.tv_sec, tv_nsec: t.tv_nsec as i64 };
+ return Timespec::new(t.tv_sec, t.tv_nsec as i64);
}
}
diff --git a/library/std/src/sys/unsupported/alloc.rs b/library/std/src/sys/unsupported/alloc.rs
index 8d5d0a2f5..d715ae454 100644
--- a/library/std/src/sys/unsupported/alloc.rs
+++ b/library/std/src/sys/unsupported/alloc.rs
@@ -1,15 +1,16 @@
use crate::alloc::{GlobalAlloc, Layout, System};
+use crate::ptr::null_mut;
#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
#[inline]
unsafe fn alloc_zeroed(&self, _layout: Layout) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
#[inline]
@@ -17,6 +18,6 @@ unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn realloc(&self, _ptr: *mut u8, _layout: Layout, _new_size: usize) -> *mut u8 {
- 0 as *mut u8
+ null_mut()
}
}
diff --git a/library/std/src/sys/unsupported/common.rs b/library/std/src/sys/unsupported/common.rs
index 4c9ade4a8..5cd9e57de 100644
--- a/library/std/src/sys/unsupported/common.rs
+++ b/library/std/src/sys/unsupported/common.rs
@@ -6,7 +6,7 @@ pub mod memchr {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {}
// SAFETY: must be called only once during runtime cleanup.
// NOTE: this is not guaranteed to run, for example when the program aborts.
diff --git a/library/std/src/sys/unsupported/fs.rs b/library/std/src/sys/unsupported/fs.rs
index 0e1a6257e..6ac1b5d2b 100644
--- a/library/std/src/sys/unsupported/fs.rs
+++ b/library/std/src/sys/unsupported/fs.rs
@@ -1,7 +1,7 @@
use crate::ffi::OsString;
use crate::fmt;
use crate::hash::{Hash, Hasher};
-use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::path::{Path, PathBuf};
use crate::sys::time::SystemTime;
use crate::sys::unsupported;
@@ -214,7 +214,7 @@ impl File {
self.0
}
- pub fn read_buf(&self, _buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ pub fn read_buf(&self, _cursor: BorrowedCursor<'_>) -> io::Result<()> {
self.0
}
diff --git a/library/std/src/sys/unsupported/io.rs b/library/std/src/sys/unsupported/io.rs
index d5f475b43..82610ffab 100644
--- a/library/std/src/sys/unsupported/io.rs
+++ b/library/std/src/sys/unsupported/io.rs
@@ -45,3 +45,7 @@ impl<'a> IoSliceMut<'a> {
self.0
}
}
+
+pub fn is_terminal<T>(_: &T) -> bool {
+ false
+}
diff --git a/library/std/src/sys/unsupported/locks/condvar.rs b/library/std/src/sys/unsupported/locks/condvar.rs
index e703fd0d2..527a26a12 100644
--- a/library/std/src/sys/unsupported/locks/condvar.rs
+++ b/library/std/src/sys/unsupported/locks/condvar.rs
@@ -7,6 +7,7 @@ pub type MovableCondvar = Condvar;
impl Condvar {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Condvar {
Condvar {}
}
diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs
index d412ff152..602a2d623 100644
--- a/library/std/src/sys/unsupported/locks/mod.rs
+++ b/library/std/src/sys/unsupported/locks/mod.rs
@@ -3,4 +3,4 @@ mod mutex;
mod rwlock;
pub use condvar::{Condvar, MovableCondvar};
pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::{MovableRwLock, RwLock};
+pub use rwlock::MovableRwLock;
diff --git a/library/std/src/sys/unsupported/locks/mutex.rs b/library/std/src/sys/unsupported/locks/mutex.rs
index d7cb12e0c..87ea475c6 100644
--- a/library/std/src/sys/unsupported/locks/mutex.rs
+++ b/library/std/src/sys/unsupported/locks/mutex.rs
@@ -12,14 +12,12 @@ unsafe impl Sync for Mutex {} // no threads on this platform
impl Mutex {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Mutex {
Mutex { locked: Cell::new(false) }
}
#[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
pub unsafe fn lock(&self) {
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
}
diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs
index aca5fb715..5292691b9 100644
--- a/library/std/src/sys/unsupported/locks/rwlock.rs
+++ b/library/std/src/sys/unsupported/locks/rwlock.rs
@@ -12,6 +12,7 @@ unsafe impl Sync for RwLock {} // no threads on this platform
impl RwLock {
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> RwLock {
RwLock { mode: Cell::new(0) }
}
diff --git a/library/std/src/sys/unsupported/process.rs b/library/std/src/sys/unsupported/process.rs
index 42a1ff730..633f17c05 100644
--- a/library/std/src/sys/unsupported/process.rs
+++ b/library/std/src/sys/unsupported/process.rs
@@ -200,6 +200,9 @@ impl<'a> Iterator for CommandArgs<'a> {
fn next(&mut self) -> Option<&'a OsStr> {
None
}
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
}
impl<'a> ExactSizeIterator for CommandArgs<'a> {}
diff --git a/library/std/src/sys/unsupported/thread_local_dtor.rs b/library/std/src/sys/unsupported/thread_local_dtor.rs
index 85d660983..84660ea58 100644
--- a/library/std/src/sys/unsupported/thread_local_dtor.rs
+++ b/library/std/src/sys/unsupported/thread_local_dtor.rs
@@ -1,5 +1,6 @@
#![unstable(feature = "thread_local_internals", issue = "none")]
+#[cfg_attr(target_family = "wasm", allow(unused))] // unused on wasm32-unknown-unknown
pub unsafe fn register_dtor(_t: *mut u8, _dtor: unsafe extern "C" fn(*mut u8)) {
// FIXME: right now there is no concept of "thread exit", but this is likely
// going to show up at some point in the form of an exported symbol that the
diff --git a/library/std/src/sys/unsupported/thread_local_key.rs b/library/std/src/sys/unsupported/thread_local_key.rs
index c31b61cbf..b6e5e4cd2 100644
--- a/library/std/src/sys/unsupported/thread_local_key.rs
+++ b/library/std/src/sys/unsupported/thread_local_key.rs
@@ -19,8 +19,3 @@ pub unsafe fn get(_key: Key) -> *mut u8 {
pub unsafe fn destroy(_key: Key) {
panic!("should not be used on this target");
}
-
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- panic!("should not be used on this target");
-}
diff --git a/library/std/src/sys/wasi/fs.rs b/library/std/src/sys/wasi/fs.rs
index 6614ae397..d4866bbc3 100644
--- a/library/std/src/sys/wasi/fs.rs
+++ b/library/std/src/sys/wasi/fs.rs
@@ -1,9 +1,9 @@
#![deny(unsafe_op_in_unsafe_fn)]
use super::fd::WasiFd;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
-use crate::io::{self, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
+use crate::io::{self, BorrowedCursor, IoSlice, IoSliceMut, SeekFrom};
use crate::iter;
use crate::mem::{self, ManuallyDrop};
use crate::os::raw::c_int;
@@ -12,6 +12,7 @@ use crate::os::wasi::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd
use crate::path::{Path, PathBuf};
use crate::ptr;
use crate::sync::Arc;
+use crate::sys::common::small_c_string::run_path_with_cstr;
use crate::sys::time::SystemTime;
use crate::sys::unsupported;
use crate::sys_common::{AsInner, FromInner, IntoInner};
@@ -65,8 +66,8 @@ pub struct FilePermissions {
#[derive(Copy, Clone, Debug, Default)]
pub struct FileTimes {
- accessed: Option<wasi::Timestamp>,
- modified: Option<wasi::Timestamp>,
+ accessed: Option<SystemTime>,
+ modified: Option<SystemTime>,
}
#[derive(PartialEq, Eq, Hash, Debug, Copy, Clone)]
@@ -120,11 +121,11 @@ impl FilePermissions {
impl FileTimes {
pub fn set_accessed(&mut self, t: SystemTime) {
- self.accessed = Some(t.to_wasi_timestamp_or_panic());
+ self.accessed = Some(t);
}
pub fn set_modified(&mut self, t: SystemTime) {
- self.modified = Some(t.to_wasi_timestamp_or_panic());
+ self.modified = Some(t);
}
}
@@ -439,8 +440,8 @@ impl File {
true
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- crate::io::default_read_buf(|buf| self.read(buf), buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ crate::io::default_read_buf(|buf| self.read(buf), cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -476,9 +477,16 @@ impl File {
}
pub fn set_times(&self, times: FileTimes) -> io::Result<()> {
+ let to_timestamp = |time: Option<SystemTime>| {
+ match time {
+ Some(time) if let Some(ts) = time.to_wasi_timestamp() => Ok(ts),
+ Some(_) => Err(io::const_io_error!(io::ErrorKind::InvalidInput, "timestamp is too large to set as a file time")),
+ None => Ok(0),
+ }
+ };
self.fd.filestat_set_times(
- times.accessed.unwrap_or(0),
- times.modified.unwrap_or(0),
+ to_timestamp(times.accessed)?,
+ to_timestamp(times.modified)?,
times.accessed.map_or(0, |_| wasi::FSTFLAGS_ATIM)
| times.modified.map_or(0, |_| wasi::FSTFLAGS_MTIM),
)
@@ -687,51 +695,52 @@ fn open_at(fd: &WasiFd, path: &Path, opts: &OpenOptions) -> io::Result<File> {
/// Note that this can fail if `p` doesn't look like it can be opened relative
/// to any pre-opened file descriptor.
fn open_parent(p: &Path) -> io::Result<(ManuallyDrop<WasiFd>, PathBuf)> {
- let p = CString::new(p.as_os_str().as_bytes())?;
- let mut buf = Vec::<u8>::with_capacity(512);
- loop {
- unsafe {
- let mut relative_path = buf.as_ptr().cast();
- let mut abs_prefix = ptr::null();
- let fd = __wasilibc_find_relpath(
- p.as_ptr(),
- &mut abs_prefix,
- &mut relative_path,
- buf.capacity(),
- );
- if fd == -1 {
- if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) {
- // Trigger the internal buffer resizing logic of `Vec` by requiring
- // more space than the current capacity.
- let cap = buf.capacity();
- buf.set_len(cap);
- buf.reserve(1);
- continue;
- }
- let msg = format!(
- "failed to find a pre-opened file descriptor \
- through which {:?} could be opened",
- p
+ run_path_with_cstr(p, |p| {
+ let mut buf = Vec::<u8>::with_capacity(512);
+ loop {
+ unsafe {
+ let mut relative_path = buf.as_ptr().cast();
+ let mut abs_prefix = ptr::null();
+ let fd = __wasilibc_find_relpath(
+ p.as_ptr(),
+ &mut abs_prefix,
+ &mut relative_path,
+ buf.capacity(),
);
- return Err(io::Error::new(io::ErrorKind::Uncategorized, msg));
- }
- let relative = CStr::from_ptr(relative_path).to_bytes().to_vec();
+ if fd == -1 {
+ if io::Error::last_os_error().raw_os_error() == Some(libc::ENOMEM) {
+ // Trigger the internal buffer resizing logic of `Vec` by requiring
+ // more space than the current capacity.
+ let cap = buf.capacity();
+ buf.set_len(cap);
+ buf.reserve(1);
+ continue;
+ }
+ let msg = format!(
+ "failed to find a pre-opened file descriptor \
+ through which {:?} could be opened",
+ p
+ );
+ return Err(io::Error::new(io::ErrorKind::Uncategorized, msg));
+ }
+ let relative = CStr::from_ptr(relative_path).to_bytes().to_vec();
- return Ok((
- ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)),
- PathBuf::from(OsString::from_vec(relative)),
- ));
+ return Ok((
+ ManuallyDrop::new(WasiFd::from_raw_fd(fd as c_int)),
+ PathBuf::from(OsString::from_vec(relative)),
+ ));
+ }
}
- }
- extern "C" {
- pub fn __wasilibc_find_relpath(
- path: *const libc::c_char,
- abs_prefix: *mut *const libc::c_char,
- relative_path: *mut *const libc::c_char,
- relative_path_len: libc::size_t,
- ) -> libc::c_int;
- }
+ extern "C" {
+ pub fn __wasilibc_find_relpath(
+ path: *const libc::c_char,
+ abs_prefix: *mut *const libc::c_char,
+ relative_path: *mut *const libc::c_char,
+ relative_path_len: libc::size_t,
+ ) -> libc::c_int;
+ }
+ })
}
pub fn osstr2str(f: &OsStr) -> io::Result<&str> {
diff --git a/library/std/src/sys/wasi/io.rs b/library/std/src/sys/wasi/io.rs
index ee017d13a..2cd45df88 100644
--- a/library/std/src/sys/wasi/io.rs
+++ b/library/std/src/sys/wasi/io.rs
@@ -1,6 +1,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::marker::PhantomData;
+use crate::os::fd::{AsFd, AsRawFd};
use crate::slice;
#[derive(Copy, Clone)]
@@ -71,3 +72,8 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.buf_len) }
}
}
+
+pub fn is_terminal(fd: &impl AsFd) -> bool {
+ let fd = fd.as_fd();
+ unsafe { libc::isatty(fd.as_raw_fd()) != 0 }
+}
diff --git a/library/std/src/sys/wasi/mod.rs b/library/std/src/sys/wasi/mod.rs
index 683a07a34..c8c47763a 100644
--- a/library/std/src/sys/wasi/mod.rs
+++ b/library/std/src/sys/wasi/mod.rs
@@ -25,6 +25,9 @@ pub mod cmath;
pub mod env;
pub mod fd;
pub mod fs;
+#[allow(unused)]
+#[path = "../wasm/atomics/futex.rs"]
+pub mod futex;
pub mod io;
#[path = "../unsupported/locks/mod.rs"]
pub mod locks;
diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs
index c5229a188..f5513e999 100644
--- a/library/std/src/sys/wasi/os.rs
+++ b/library/std/src/sys/wasi/os.rs
@@ -1,14 +1,15 @@
#![deny(unsafe_op_in_unsafe_fn)]
-use crate::any::Any;
use crate::error::Error as StdError;
-use crate::ffi::{CStr, CString, OsStr, OsString};
+use crate::ffi::{CStr, OsStr, OsString};
use crate::fmt;
use crate::io;
use crate::marker::PhantomData;
+use crate::ops::Drop;
use crate::os::wasi::prelude::*;
use crate::path::{self, PathBuf};
use crate::str;
+use crate::sys::common::small_c_string::{run_path_with_cstr, run_with_cstr};
use crate::sys::memchr;
use crate::sys::unsupported;
use crate::vec;
@@ -23,10 +24,26 @@ mod libc {
}
}
-#[cfg(not(target_feature = "atomics"))]
-pub unsafe fn env_lock() -> impl Any {
- // No need for a lock if we're single-threaded, but this function will need
- // to get implemented for multi-threaded scenarios
+cfg_if::cfg_if! {
+ if #[cfg(target_feature = "atomics")] {
+ // Access to the environment must be protected by a lock in multi-threaded scenarios.
+ use crate::sync::{PoisonError, RwLock};
+ static ENV_LOCK: RwLock<()> = RwLock::new(());
+ pub fn env_read_lock() -> impl Drop {
+ ENV_LOCK.read().unwrap_or_else(PoisonError::into_inner)
+ }
+ pub fn env_write_lock() -> impl Drop {
+ ENV_LOCK.write().unwrap_or_else(PoisonError::into_inner)
+ }
+ } else {
+ // No need for a lock if we are single-threaded.
+ pub fn env_read_lock() -> impl Drop {
+ Box::new(())
+ }
+ pub fn env_write_lock() -> impl Drop {
+ Box::new(())
+ }
+ }
}
pub fn errno() -> i32 {
@@ -77,13 +94,10 @@ pub fn getcwd() -> io::Result<PathBuf> {
}
pub fn chdir(p: &path::Path) -> io::Result<()> {
- let p: &OsStr = p.as_ref();
- let p = CString::new(p.as_bytes())?;
- unsafe {
- match libc::chdir(p.as_ptr()) == (0 as libc::c_int) {
- true => Ok(()),
- false => Err(io::Error::last_os_error()),
- }
+ let result = run_path_with_cstr(p, |p| unsafe { Ok(libc::chdir(p.as_ptr())) })?;
+ match result == (0 as libc::c_int) {
+ true => Ok(()),
+ false => Err(io::Error::last_os_error()),
}
}
@@ -146,7 +160,7 @@ impl Iterator for Env {
pub fn env() -> Env {
unsafe {
- let _guard = env_lock();
+ let _guard = env_read_lock();
let mut environ = libc::environ;
let mut result = Vec::new();
if !environ.is_null() {
@@ -176,35 +190,32 @@ pub fn env() -> Env {
}
pub fn getenv(k: &OsStr) -> Option<OsString> {
- let k = CString::new(k.as_bytes()).ok()?;
- unsafe {
- let _guard = env_lock();
- let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
- if s.is_null() {
- None
- } else {
- Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
- }
+ let s = run_with_cstr(k.as_bytes(), |k| unsafe {
+ let _guard = env_read_lock();
+ Ok(libc::getenv(k.as_ptr()) as *const libc::c_char)
+ })
+ .ok()?;
+ if s.is_null() {
+ None
+ } else {
+ Some(OsStringExt::from_vec(unsafe { CStr::from_ptr(s) }.to_bytes().to_vec()))
}
}
pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
- let k = CString::new(k.as_bytes())?;
- let v = CString::new(v.as_bytes())?;
-
- unsafe {
- let _guard = env_lock();
- cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
- }
+ run_with_cstr(k.as_bytes(), |k| {
+ run_with_cstr(v.as_bytes(), |v| unsafe {
+ let _guard = env_write_lock();
+ cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop)
+ })
+ })
}
pub fn unsetenv(n: &OsStr) -> io::Result<()> {
- let nbuf = CString::new(n.as_bytes())?;
-
- unsafe {
- let _guard = env_lock();
+ run_with_cstr(n.as_bytes(), |nbuf| unsafe {
+ let _guard = env_write_lock();
cvt(libc::unsetenv(nbuf.as_ptr())).map(drop)
- }
+ })
}
pub fn temp_dir() -> PathBuf {
diff --git a/library/std/src/sys/wasi/time.rs b/library/std/src/sys/wasi/time.rs
index 3d326e491..016b06efb 100644
--- a/library/std/src/sys/wasi/time.rs
+++ b/library/std/src/sys/wasi/time.rs
@@ -47,8 +47,8 @@ impl SystemTime {
SystemTime(Duration::from_nanos(ts))
}
- pub fn to_wasi_timestamp_or_panic(&self) -> wasi::Timestamp {
- self.0.as_nanos().try_into().expect("time does not fit in WASI timestamp")
+ pub fn to_wasi_timestamp(&self) -> Option<wasi::Timestamp> {
+ self.0.as_nanos().try_into().ok()
}
pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
diff --git a/library/std/src/sys/wasm/mod.rs b/library/std/src/sys/wasm/mod.rs
index 4159efe2a..93838390b 100644
--- a/library/std/src/sys/wasm/mod.rs
+++ b/library/std/src/sys/wasm/mod.rs
@@ -57,7 +57,7 @@ cfg_if::cfg_if! {
mod futex_rwlock;
pub(crate) use futex_condvar::{Condvar, MovableCondvar};
pub(crate) use futex_mutex::{Mutex, MovableMutex};
- pub(crate) use futex_rwlock::{RwLock, MovableRwLock};
+ pub(crate) use futex_rwlock::MovableRwLock;
}
#[path = "atomics/futex.rs"]
pub mod futex;
diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs
index fdc81cdea..d53ea1600 100644
--- a/library/std/src/sys/windows/alloc.rs
+++ b/library/std/src/sys/windows/alloc.rs
@@ -16,6 +16,7 @@ mod tests;
// Flag to indicate that the memory returned by `HeapAlloc` should be zeroed.
const HEAP_ZERO_MEMORY: c::DWORD = 0x00000008;
+#[link(name = "kernel32")]
extern "system" {
// Get a handle to the default heap of the current process, or null if the operation fails.
//
@@ -168,7 +169,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 {
// SAFETY: Because the size and alignment of a header is <= `MIN_ALIGN` and `aligned`
// is aligned to at least `MIN_ALIGN` and has at least `MIN_ALIGN` bytes of padding before
// it, it is safe to write a header directly before it.
- unsafe { ptr::write((aligned as *mut Header).offset(-1), Header(ptr)) };
+ unsafe { ptr::write((aligned as *mut Header).sub(1), Header(ptr)) };
// SAFETY: The returned pointer does not point to the to the start of an allocated block,
// but there is a header readable directly before it containing the location of the start
@@ -213,7 +214,7 @@ unsafe impl GlobalAlloc for System {
// SAFETY: Because of the contract of `System`, `ptr` is guaranteed to be non-null
// and have a header readable directly before it.
- unsafe { ptr::read((ptr as *mut Header).offset(-1)).0 }
+ unsafe { ptr::read((ptr as *mut Header).sub(1)).0 }
}
};
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 478068c73..be6fc2ebb 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -66,10 +66,12 @@ pub type LPSYSTEM_INFO = *mut SYSTEM_INFO;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut c_void;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *mut c_void;
+pub type BCRYPT_ALG_HANDLE = LPVOID;
pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
pub type PLARGE_INTEGER = *mut c_longlong;
pub type PSRWLOCK = *mut SRWLOCK;
+pub type LPINIT_ONCE = *mut INIT_ONCE;
pub type SOCKET = crate::os::windows::raw::SOCKET;
pub type socklen_t = c_int;
@@ -125,6 +127,10 @@ pub const SECURITY_SQOS_PRESENT: DWORD = 0x00100000;
pub const FIONBIO: c_ulong = 0x8004667e;
+pub const MAX_PATH: usize = 260;
+
+pub const FILE_TYPE_PIPE: u32 = 3;
+
#[repr(C)]
#[derive(Copy)]
pub struct WIN32_FIND_DATAW {
@@ -193,6 +199,9 @@ pub const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
pub const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { ptr: ptr::null_mut() };
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: ptr::null_mut() };
+pub const INIT_ONCE_STATIC_INIT: INIT_ONCE = INIT_ONCE { ptr: ptr::null_mut() };
+
+pub const INIT_ONCE_INIT_FAILED: DWORD = 0x00000004;
pub const DETACHED_PROCESS: DWORD = 0x00000008;
pub const CREATE_NEW_PROCESS_GROUP: DWORD = 0x00000200;
@@ -285,6 +294,8 @@ pub fn nt_success(status: NTSTATUS) -> bool {
status >= 0
}
+// "RNG\0"
+pub const BCRYPT_RNG_ALGORITHM: &[u16] = &[b'R' as u16, b'N' as u16, b'G' as u16, 0];
pub const BCRYPT_USE_SYSTEM_PREFERRED_RNG: DWORD = 0x00000002;
#[repr(C)]
@@ -455,6 +466,12 @@ pub enum FILE_INFO_BY_HANDLE_CLASS {
}
#[repr(C)]
+pub struct FILE_ATTRIBUTE_TAG_INFO {
+ pub FileAttributes: DWORD,
+ pub ReparseTag: DWORD,
+}
+
+#[repr(C)]
pub struct FILE_DISPOSITION_INFO {
pub DeleteFile: BOOLEAN,
}
@@ -501,6 +518,8 @@ pub struct FILE_END_OF_FILE_INFO {
pub EndOfFile: LARGE_INTEGER,
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `rest` field!
#[repr(C)]
pub struct REPARSE_DATA_BUFFER {
pub ReparseTag: c_uint,
@@ -509,6 +528,8 @@ pub struct REPARSE_DATA_BUFFER {
pub rest: (),
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `PathBuffer` field!
#[repr(C)]
pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
@@ -519,6 +540,14 @@ pub struct SYMBOLIC_LINK_REPARSE_BUFFER {
pub PathBuffer: WCHAR,
}
+/// NB: Use carefully! In general using this as a reference is likely to get the
+/// provenance wrong for the `PathBuffer` field!
+#[repr(C)]
+pub struct FILE_NAME_INFO {
+ pub FileNameLength: DWORD,
+ pub FileName: [WCHAR; 1],
+}
+
#[repr(C)]
pub struct MOUNT_POINT_REPARSE_BUFFER {
pub SubstituteNameOffset: c_ushort,
@@ -550,6 +579,10 @@ pub struct CONDITION_VARIABLE {
pub struct SRWLOCK {
pub ptr: LPVOID,
}
+#[repr(C)]
+pub struct INIT_ONCE {
+ pub ptr: LPVOID,
+}
#[repr(C)]
pub struct REPARSE_MOUNTPOINT_DATA_BUFFER {
@@ -802,10 +835,6 @@ if #[cfg(not(target_vendor = "uwp"))] {
#[link(name = "advapi32")]
extern "system" {
- // Forbidden when targeting UWP
- #[link_name = "SystemFunction036"]
- pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN;
-
// Allowed but unused by UWP
pub fn OpenProcessToken(
ProcessHandle: HANDLE,
@@ -944,6 +973,7 @@ extern "system" {
pub fn TlsAlloc() -> DWORD;
pub fn TlsGetValue(dwTlsIndex: DWORD) -> LPVOID;
pub fn TlsSetValue(dwTlsIndex: DWORD, lpTlsvalue: LPVOID) -> BOOL;
+ pub fn TlsFree(dwTlsIndex: DWORD) -> BOOL;
pub fn GetLastError() -> DWORD;
pub fn QueryPerformanceFrequency(lpFrequency: *mut LARGE_INTEGER) -> BOOL;
pub fn QueryPerformanceCounter(lpPerformanceCount: *mut LARGE_INTEGER) -> BOOL;
@@ -1086,6 +1116,7 @@ extern "system" {
lpFileInformation: LPVOID,
dwBufferSize: DWORD,
) -> BOOL;
+ pub fn GetFileType(hfile: HANDLE) -> DWORD;
pub fn SleepConditionVariableSRW(
ConditionVariable: PCONDITION_VARIABLE,
SRWLock: PSRWLOCK,
@@ -1103,6 +1134,14 @@ extern "system" {
pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
+ pub fn InitOnceBeginInitialize(
+ lpInitOnce: LPINIT_ONCE,
+ dwFlags: DWORD,
+ fPending: LPBOOL,
+ lpContext: *mut LPVOID,
+ ) -> BOOL;
+ pub fn InitOnceComplete(lpInitOnce: LPINIT_ONCE, dwFlags: DWORD, lpContext: LPVOID) -> BOOL;
+
pub fn CompareStringOrdinal(
lpString1: LPCWSTR,
cchCount1: c_int,
@@ -1217,11 +1256,18 @@ extern "system" {
// >= Vista / Server 2008
// https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
pub fn BCryptGenRandom(
- hAlgorithm: LPVOID,
+ hAlgorithm: BCRYPT_ALG_HANDLE,
pBuffer: *mut u8,
cbBuffer: ULONG,
dwFlags: ULONG,
) -> NTSTATUS;
+ pub fn BCryptOpenAlgorithmProvider(
+ phalgorithm: *mut BCRYPT_ALG_HANDLE,
+ pszAlgId: LPCWSTR,
+ pszimplementation: LPCWSTR,
+ dwflags: ULONG,
+ ) -> NTSTATUS;
+ pub fn BCryptCloseAlgorithmProvider(hAlgorithm: BCRYPT_ALG_HANDLE, dwFlags: ULONG) -> NTSTATUS;
}
// Functions that aren't available on every version of Windows that we support,
@@ -1251,17 +1297,14 @@ compat_fn_with_fallback! {
}
compat_fn_optional! {
- pub static SYNCH_API: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
-
- // >= Windows 8 / Server 2012
- // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitonaddress
+ crate::sys::compat::load_synch_functions();
pub fn WaitOnAddress(
Address: LPVOID,
CompareAddress: LPVOID,
AddressSize: SIZE_T,
dwMilliseconds: DWORD
- ) -> BOOL;
- pub fn WakeByAddressSingle(Address: LPVOID) -> ();
+ );
+ pub fn WakeByAddressSingle(Address: LPVOID);
}
compat_fn_with_fallback! {
diff --git a/library/std/src/sys/windows/cmath.rs b/library/std/src/sys/windows/cmath.rs
index 1a5421fac..43ab8c7ee 100644
--- a/library/std/src/sys/windows/cmath.rs
+++ b/library/std/src/sys/windows/cmath.rs
@@ -44,7 +44,7 @@ mod shims {
}
// On 32-bit x86 MSVC these functions aren't defined, so we just define shims
-// which promote everything fo f64, perform the calculation, and then demote
+// which promote everything to f64, perform the calculation, and then demote
// back to f32. While not precisely correct should be "correct enough" for now.
#[cfg(all(target_env = "msvc", target_arch = "x86"))]
mod shims {
diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs
index ccc90177a..7dff81ecb 100644
--- a/library/std/src/sys/windows/compat.rs
+++ b/library/std/src/sys/windows/compat.rs
@@ -7,52 +7,66 @@
//! `GetModuleHandle` and `GetProcAddress` to look up DLL entry points at
//! runtime.
//!
-//! This implementation uses a static initializer to look up the DLL entry
-//! points. The CRT (C runtime) executes static initializers before `main`
-//! is called (for binaries) and before `DllMain` is called (for DLLs).
-//! This is the ideal time to look up DLL imports, because we are guaranteed
-//! that no other threads will attempt to call these entry points. Thus,
-//! we can look up the imports and store them in `static mut` fields
-//! without any synchronization.
+//! This is implemented simply by storing a function pointer in an atomic.
+//! Loading and calling this function will have little or no overhead
+//! compared with calling any other dynamically imported function.
//!
-//! This has an additional advantage: Because the DLL import lookup happens
-//! at module initialization, the cost of these lookups is deterministic,
-//! and is removed from the code paths that actually call the DLL imports.
-//! That is, there is no unpredictable "cache miss" that occurs when calling
-//! a DLL import. For applications that benefit from predictable delays,
-//! this is a benefit. This also eliminates the comparison-and-branch
-//! from the hot path.
-//!
-//! Currently, the standard library uses only a small number of dynamic
-//! DLL imports. If this number grows substantially, then the cost of
-//! performing all of the lookups at initialization time might become
-//! substantial.
-//!
-//! The mechanism of registering a static initializer with the CRT is
-//! documented in
-//! [CRT Initialization](https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization?view=msvc-160).
-//! It works by contributing a global symbol to the `.CRT$XCU` section.
-//! The linker builds a table of all static initializer functions.
-//! The CRT startup code then iterates that table, calling each
-//! initializer function.
-//!
-//! # **WARNING!!*
-//! The environment that a static initializer function runs in is highly
-//! constrained. There are **many** restrictions on what static initializers
-//! can safely do. Static initializer functions **MUST NOT** do any of the
-//! following (this list is not comprehensive):
-//! * touch any other static field that is used by a different static
-//! initializer, because the order that static initializers run in
-//! is not defined.
-//! * call `LoadLibrary` or any other function that acquires the DLL
-//! loader lock.
-//! * call any Rust function or CRT function that touches any static
-//! (global) state.
+//! The stored function pointer starts out as an importer function which will
+//! swap itself with the real function when it's called for the first time. If
+//! the real function can't be imported then a fallback function is used in its
+//! place. While this is low cost for the happy path (where the function is
+//! already loaded) it does mean there's some overhead the first time the
+//! function is called. In the worst case, multiple threads may all end up
+//! importing the same function unnecessarily.
use crate::ffi::{c_void, CStr};
use crate::ptr::NonNull;
+use crate::sync::atomic::Ordering;
use crate::sys::c;
+// This uses a static initializer to preload some imported functions.
+// The CRT (C runtime) executes static initializers before `main`
+// is called (for binaries) and before `DllMain` is called (for DLLs).
+//
+// It works by contributing a global symbol to the `.CRT$XCT` section.
+// The linker builds a table of all static initializer functions.
+// The CRT startup code then iterates that table, calling each
+// initializer function.
+//
+// NOTE: User code should instead use .CRT$XCU to reliably run after std's initializer.
+// If you're reading this and would like a guarantee here, please
+// file an issue for discussion; currently we don't guarantee any functionality
+// before main.
+// See https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-initialization?view=msvc-170
+#[used]
+#[link_section = ".CRT$XCT"]
+static INIT_TABLE_ENTRY: unsafe extern "C" fn() = init;
+
+/// Preload some imported functions.
+///
+/// Note that any functions included here will be unconditionally loaded in
+/// the final binary, regardless of whether or not they're actually used.
+///
+/// Therefore, this should be limited to `compat_fn_optional` functions which
+/// must be preloaded or any functions where lazier loading demonstrates a
+/// negative performance impact in practical situations.
+///
+/// Currently we only preload `WaitOnAddress` and `WakeByAddressSingle`.
+unsafe extern "C" fn init() {
+ // In an exe this code is executed before main() so is single threaded.
+ // In a DLL the system's loader lock will be held thereby synchronizing
+ // access. So the same best practices apply here as they do to running in DllMain:
+ // https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-best-practices
+ //
+ // DO NOT do anything interesting or complicated in this function! DO NOT call
+ // any Rust functions or CRT functions if those functions touch any global state,
+ // because this function runs during global initialization. For example, DO NOT
+ // do any dynamic allocation, don't call LoadLibrary, etc.
+
+ // Attempt to preload the synch functions.
+ load_synch_functions();
+}
+
/// Helper macro for creating CStrs from literals and symbol names.
macro_rules! ansi_str {
(sym $ident:ident) => {{
@@ -85,39 +99,6 @@ pub(crate) const fn const_cstr_from_bytes(bytes: &'static [u8]) -> &'static CStr
unsafe { crate::ffi::CStr::from_bytes_with_nul_unchecked(bytes) }
}
-#[used]
-#[link_section = ".CRT$XCU"]
-static INIT_TABLE_ENTRY: unsafe extern "C" fn() = init;
-
-/// This is where the magic preloading of symbols happens.
-///
-/// Note that any functions included here will be unconditionally included in
-/// the final binary, regardless of whether or not they're actually used.
-///
-/// Therefore, this is limited to `compat_fn_optional` functions which must be
-/// preloaded and any functions which may be more time sensitive, even for the first call.
-unsafe extern "C" fn init() {
- // There is no locking here. This code is executed before main() is entered, and
- // is guaranteed to be single-threaded.
- //
- // DO NOT do anything interesting or complicated in this function! DO NOT call
- // any Rust functions or CRT functions if those functions touch any global state,
- // because this function runs during global initialization. For example, DO NOT
- // do any dynamic allocation, don't call LoadLibrary, etc.
-
- if let Some(synch) = Module::new(c::SYNCH_API) {
- // These are optional and so we must manually attempt to load them
- // before they can be used.
- c::WaitOnAddress::preload(synch);
- c::WakeByAddressSingle::preload(synch);
- }
-
- if let Some(kernel32) = Module::new(c::KERNEL32) {
- // Preloading this means getting a precise time will be as fast as possible.
- c::GetSystemTimePreciseAsFileTime::preload(kernel32);
- }
-}
-
/// Represents a loaded module.
///
/// Note that the modules std depends on must not be unloaded.
@@ -151,7 +132,7 @@ impl Module {
macro_rules! compat_fn_with_fallback {
(pub static $module:ident: &CStr = $name:expr; $(
$(#[$meta:meta])*
- pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $fallback_body:block
+ $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty $fallback_body:block
)*) => (
pub static $module: &CStr = $name;
$(
@@ -196,11 +177,6 @@ macro_rules! compat_fn_with_fallback {
$fallback_body
}
- #[allow(unused)]
- pub(in crate::sys) fn preload(module: Module) {
- load_from_module(Some(module));
- }
-
#[inline(always)]
pub unsafe fn call($($argname: $argtype),*) -> $rettype {
let func: F = mem::transmute(PTR.load(Ordering::Relaxed));
@@ -208,66 +184,60 @@ macro_rules! compat_fn_with_fallback {
}
}
$(#[$meta])*
- pub use $symbol::call as $symbol;
+ $vis use $symbol::call as $symbol;
)*)
}
-/// A function that either exists or doesn't.
+/// Optionally loaded functions.
///
-/// NOTE: Optional functions must be preloaded in the `init` function above, or they will always be None.
+/// Actual loading of the function defers to $load_functions.
macro_rules! compat_fn_optional {
- (pub static $module:ident: &CStr = $name:expr; $(
- $(#[$meta:meta])*
- pub fn $symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty;
- )*) => (
- pub static $module: &CStr = $name;
+ ($load_functions:expr;
$(
- $(#[$meta])*
- pub mod $symbol {
- #[allow(unused_imports)]
- use super::*;
- use crate::mem;
- use crate::sync::atomic::{AtomicPtr, Ordering};
- use crate::sys::compat::Module;
- use crate::ptr::{self, NonNull};
-
- type F = unsafe extern "system" fn($($argtype),*) -> $rettype;
-
- /// `PTR` will either be `null()` or set to the loaded function.
- static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
-
- /// Only allow access to the function if it has loaded successfully.
- #[inline(always)]
- #[cfg(not(miri))]
- pub fn option() -> Option<F> {
- unsafe {
- NonNull::new(PTR.load(Ordering::Relaxed)).map(|f| mem::transmute(f))
+ $(#[$meta:meta])*
+ $vis:vis fn $symbol:ident($($argname:ident: $argtype:ty),*) $(-> $rettype:ty)?;
+ )+) => (
+ $(
+ pub mod $symbol {
+ use super::*;
+ use crate::ffi::c_void;
+ use crate::mem;
+ use crate::ptr::{self, NonNull};
+ use crate::sync::atomic::{AtomicPtr, Ordering};
+
+ pub(in crate::sys) static PTR: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
+
+ type F = unsafe extern "system" fn($($argtype),*) $(-> $rettype)?;
+
+ #[inline(always)]
+ pub fn option() -> Option<F> {
+ // Miri does not understand the way we do preloading
+ // therefore load the function here instead.
+ #[cfg(miri)] $load_functions;
+ NonNull::new(PTR.load(Ordering::Relaxed)).map(|f| unsafe { mem::transmute(f) })
}
}
+ )+
+ )
+}
- // Miri does not understand the way we do preloading
- // therefore load the function here instead.
- #[cfg(miri)]
- pub fn option() -> Option<F> {
- let mut func = NonNull::new(PTR.load(Ordering::Relaxed));
- if func.is_none() {
- unsafe { Module::new($module).map(preload) };
- func = NonNull::new(PTR.load(Ordering::Relaxed));
- }
- unsafe {
- func.map(|f| mem::transmute(f))
- }
- }
+/// Load all needed functions from "api-ms-win-core-synch-l1-2-0".
+pub(super) fn load_synch_functions() {
+ fn try_load() -> Option<()> {
+ const MODULE_NAME: &CStr = ansi_str!("api-ms-win-core-synch-l1-2-0");
+ const WAIT_ON_ADDRESS: &CStr = ansi_str!("WaitOnAddress");
+ const WAKE_BY_ADDRESS_SINGLE: &CStr = ansi_str!("WakeByAddressSingle");
+
+ // Try loading the library and all the required functions.
+ // If any step fails, then they all fail.
+ let library = unsafe { Module::new(MODULE_NAME) }?;
+ let wait_on_address = library.proc_address(WAIT_ON_ADDRESS)?;
+ let wake_by_address_single = library.proc_address(WAKE_BY_ADDRESS_SINGLE)?;
+
+ c::WaitOnAddress::PTR.store(wait_on_address.as_ptr(), Ordering::Relaxed);
+ c::WakeByAddressSingle::PTR.store(wake_by_address_single.as_ptr(), Ordering::Relaxed);
+ Some(())
+ }
- #[allow(unused)]
- pub(in crate::sys) fn preload(module: Module) {
- unsafe {
- static SYMBOL_NAME: &CStr = ansi_str!(sym $symbol);
- if let Some(f) = module.proc_address(SYMBOL_NAME) {
- PTR.store(f.as_ptr(), Ordering::Relaxed);
- }
- }
- }
- }
- )*)
+ try_load();
}
diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs
index aed082b3e..378098038 100644
--- a/library/std/src/sys/windows/fs.rs
+++ b/library/std/src/sys/windows/fs.rs
@@ -1,9 +1,10 @@
use crate::os::windows::prelude::*;
+use crate::borrow::Cow;
use crate::ffi::OsString;
use crate::fmt;
-use crate::io::{self, Error, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
-use crate::mem;
+use crate::io::{self, BorrowedCursor, Error, IoSlice, IoSliceMut, SeekFrom};
+use crate::mem::{self, MaybeUninit};
use crate::os::windows::io::{AsHandle, BorrowedHandle};
use crate::path::{Path, PathBuf};
use crate::ptr;
@@ -11,7 +12,7 @@ use crate::slice;
use crate::sync::Arc;
use crate::sys::handle::Handle;
use crate::sys::time::SystemTime;
-use crate::sys::{c, cvt};
+use crate::sys::{c, cvt, Align8};
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::thread;
@@ -326,9 +327,15 @@ impl File {
cvt(c::GetFileInformationByHandle(self.handle.as_raw_handle(), &mut info))?;
let mut reparse_tag = 0;
if info.dwFileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
- let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- if let Ok((_, buf)) = self.reparse_point(&mut b) {
- reparse_tag = buf.ReparseTag;
+ let mut attr_tag: c::FILE_ATTRIBUTE_TAG_INFO = mem::zeroed();
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileAttributeTagInfo,
+ ptr::addr_of_mut!(attr_tag).cast(),
+ mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+ ))?;
+ if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ reparse_tag = attr_tag.ReparseTag;
}
}
Ok(FileAttr {
@@ -389,9 +396,15 @@ impl File {
attr.file_size = info.AllocationSize as u64;
attr.number_of_links = Some(info.NumberOfLinks);
if attr.file_type().is_reparse_point() {
- let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- if let Ok((_, buf)) = self.reparse_point(&mut b) {
- attr.reparse_tag = buf.ReparseTag;
+ let mut attr_tag: c::FILE_ATTRIBUTE_TAG_INFO = mem::zeroed();
+ cvt(c::GetFileInformationByHandleEx(
+ self.handle.as_raw_handle(),
+ c::FileAttributeTagInfo,
+ ptr::addr_of_mut!(attr_tag).cast(),
+ mem::size_of::<c::FILE_ATTRIBUTE_TAG_INFO>().try_into().unwrap(),
+ ))?;
+ if attr_tag.FileAttributes & c::FILE_ATTRIBUTE_REPARSE_POINT != 0 {
+ attr.reparse_tag = attr_tag.ReparseTag;
}
}
Ok(attr)
@@ -415,8 +428,8 @@ impl File {
self.handle.read_at(buf, offset)
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- self.handle.read_buf(buf)
+ pub fn read_buf(&self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ self.handle.read_buf(cursor)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -458,38 +471,46 @@ impl File {
Ok(Self { handle: self.handle.try_clone()? })
}
- fn reparse_point<'a>(
+ // NB: returned pointer is derived from `space`, and has provenance to
+ // match. A raw pointer is returned rather than a reference in order to
+ // avoid narrowing provenance to the actual `REPARSE_DATA_BUFFER`.
+ fn reparse_point(
&self,
- space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE],
- ) -> io::Result<(c::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
+ space: &mut Align8<[MaybeUninit<u8>]>,
+ ) -> io::Result<(c::DWORD, *const c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
cvt({
+ // Grab this in advance to avoid it invalidating the pointer
+ // we get from `space.0.as_mut_ptr()`.
+ let len = space.0.len();
c::DeviceIoControl(
self.handle.as_raw_handle(),
c::FSCTL_GET_REPARSE_POINT,
ptr::null_mut(),
0,
- space.as_mut_ptr() as *mut _,
- space.len() as c::DWORD,
+ space.0.as_mut_ptr().cast(),
+ len as c::DWORD,
&mut bytes,
ptr::null_mut(),
)
})?;
- Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
+ const _: () = assert!(core::mem::align_of::<c::REPARSE_DATA_BUFFER>() <= 8);
+ Ok((bytes, space.0.as_ptr().cast::<c::REPARSE_DATA_BUFFER>()))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
- let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ let mut space = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]);
let (_bytes, buf) = self.reparse_point(&mut space)?;
unsafe {
- let (path_buffer, subst_off, subst_len, relative) = match buf.ReparseTag {
+ let (path_buffer, subst_off, subst_len, relative) = match (*buf).ReparseTag {
c::IO_REPARSE_TAG_SYMLINK => {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
- &buf.rest as *const _ as *const _;
+ ptr::addr_of!((*buf).rest).cast();
+ assert!(info.is_aligned());
(
- &(*info).PathBuffer as *const _ as *const u16,
+ ptr::addr_of!((*info).PathBuffer).cast::<u16>(),
(*info).SubstituteNameOffset / 2,
(*info).SubstituteNameLength / 2,
(*info).Flags & c::SYMLINK_FLAG_RELATIVE != 0,
@@ -497,9 +518,10 @@ impl File {
}
c::IO_REPARSE_TAG_MOUNT_POINT => {
let info: *const c::MOUNT_POINT_REPARSE_BUFFER =
- &buf.rest as *const _ as *const _;
+ ptr::addr_of!((*buf).rest).cast();
+ assert!(info.is_aligned());
(
- &(*info).PathBuffer as *const _ as *const u16,
+ ptr::addr_of!((*info).PathBuffer).cast::<u16>(),
(*info).SubstituteNameOffset / 2,
(*info).SubstituteNameLength / 2,
false,
@@ -512,7 +534,7 @@ impl File {
));
}
};
- let subst_ptr = path_buffer.offset(subst_off as isize);
+ let subst_ptr = path_buffer.add(subst_off.into());
let mut subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
// Absolute paths start with an NT internal namespace prefix `\??\`
// We should not let it leak through.
@@ -551,6 +573,14 @@ impl File {
"Cannot set file timestamp to 0",
));
}
+ let is_max =
+ |t: c::FILETIME| t.dwLowDateTime == c::DWORD::MAX && t.dwHighDateTime == c::DWORD::MAX;
+ if times.accessed.map_or(false, is_max) || times.modified.map_or(false, is_max) {
+ return Err(io::const_io_error!(
+ io::ErrorKind::InvalidInput,
+ "Cannot set file timestamp to 0xFFFF_FFFF_FFFF_FFFF",
+ ));
+ }
cvt(unsafe {
c::SetFileTime(self.as_handle(), None, times.accessed.as_ref(), times.modified.as_ref())
})?;
@@ -649,27 +679,31 @@ impl File {
/// A buffer for holding directory entries.
struct DirBuff {
- buffer: Vec<u8>,
+ buffer: Box<Align8<[MaybeUninit<u8>; Self::BUFFER_SIZE]>>,
}
impl DirBuff {
+ const BUFFER_SIZE: usize = 1024;
fn new() -> Self {
- const BUFFER_SIZE: usize = 1024;
- Self { buffer: vec![0_u8; BUFFER_SIZE] }
+ Self {
+ // Safety: `Align8<[MaybeUninit<u8>; N]>` does not need
+ // initialization.
+ buffer: unsafe { Box::new_uninit().assume_init() },
+ }
}
fn capacity(&self) -> usize {
- self.buffer.len()
+ self.buffer.0.len()
}
fn as_mut_ptr(&mut self) -> *mut u8 {
- self.buffer.as_mut_ptr().cast()
+ self.buffer.0.as_mut_ptr().cast()
}
/// Returns a `DirBuffIter`.
fn iter(&self) -> DirBuffIter<'_> {
DirBuffIter::new(self)
}
}
-impl AsRef<[u8]> for DirBuff {
- fn as_ref(&self) -> &[u8] {
- &self.buffer
+impl AsRef<[MaybeUninit<u8>]> for DirBuff {
+ fn as_ref(&self) -> &[MaybeUninit<u8>] {
+ &self.buffer.0
}
}
@@ -677,7 +711,7 @@ impl AsRef<[u8]> for DirBuff {
///
/// Currently only returns file names (UTF-16 encoded).
struct DirBuffIter<'a> {
- buffer: Option<&'a [u8]>,
+ buffer: Option<&'a [MaybeUninit<u8>]>,
cursor: usize,
}
impl<'a> DirBuffIter<'a> {
@@ -686,23 +720,34 @@ impl<'a> DirBuffIter<'a> {
}
}
impl<'a> Iterator for DirBuffIter<'a> {
- type Item = (&'a [u16], bool);
+ type Item = (Cow<'a, [u16]>, bool);
fn next(&mut self) -> Option<Self::Item> {
use crate::mem::size_of;
let buffer = &self.buffer?[self.cursor..];
// Get the name and next entry from the buffer.
- // SAFETY: The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the
- // last field (the file name) is unsized. So an offset has to be
- // used to get the file name slice.
+ // SAFETY:
+ // - The buffer contains a `FILE_ID_BOTH_DIR_INFO` struct but the last
+ // field (the file name) is unsized. So an offset has to be used to
+ // get the file name slice.
+ // - The OS has guaranteed initialization of the fields of
+ // `FILE_ID_BOTH_DIR_INFO` and the trailing filename (for at least
+ // `FileNameLength` bytes)
let (name, is_directory, next_entry) = unsafe {
let info = buffer.as_ptr().cast::<c::FILE_ID_BOTH_DIR_INFO>();
- let next_entry = (*info).NextEntryOffset as usize;
- let name = crate::slice::from_raw_parts(
- (*info).FileName.as_ptr().cast::<u16>(),
- (*info).FileNameLength as usize / size_of::<u16>(),
+ // While this is guaranteed to be aligned in documentation for
+ // https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_both_dir_info
+ // it does not seem that reality is so kind, and assuming this
+ // caused crashes in some cases (https://github.com/rust-lang/rust/issues/104530)
+ // presumably, this can be blamed on buggy filesystem drivers, but who knows.
+ let next_entry = ptr::addr_of!((*info).NextEntryOffset).read_unaligned() as usize;
+ let length = ptr::addr_of!((*info).FileNameLength).read_unaligned() as usize;
+ let attrs = ptr::addr_of!((*info).FileAttributes).read_unaligned();
+ let name = from_maybe_unaligned(
+ ptr::addr_of!((*info).FileName).cast::<u16>(),
+ length / size_of::<u16>(),
);
- let is_directory = ((*info).FileAttributes & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
+ let is_directory = (attrs & c::FILE_ATTRIBUTE_DIRECTORY) != 0;
(name, is_directory, next_entry)
};
@@ -715,13 +760,21 @@ impl<'a> Iterator for DirBuffIter<'a> {
// Skip `.` and `..` pseudo entries.
const DOT: u16 = b'.' as u16;
- match name {
+ match &name[..] {
[DOT] | [DOT, DOT] => self.next(),
_ => Some((name, is_directory)),
}
}
}
+unsafe fn from_maybe_unaligned<'a>(p: *const u16, len: usize) -> Cow<'a, [u16]> {
+ if p.is_aligned() {
+ Cow::Borrowed(crate::slice::from_raw_parts(p, len))
+ } else {
+ Cow::Owned((0..len).map(|i| p.add(i).read_unaligned()).collect())
+ }
+}
+
/// Open a link relative to the parent directory, ensure no symlinks are followed.
fn open_link_no_reparse(parent: &File, name: &[u16], access: u32) -> io::Result<File> {
// This is implemented using the lower level `NtCreateFile` function as
@@ -1077,13 +1130,13 @@ fn remove_dir_all_iterative(f: &File, delete: fn(&File) -> io::Result<()>) -> io
if is_directory {
let child_dir = open_link_no_reparse(
&dir,
- name,
+ &name,
c::SYNCHRONIZE | c::DELETE | c::FILE_LIST_DIRECTORY,
)?;
dirlist.push(child_dir);
} else {
for i in 1..=MAX_RETRIES {
- let result = open_link_no_reparse(&dir, name, c::SYNCHRONIZE | c::DELETE);
+ let result = open_link_no_reparse(&dir, &name, c::SYNCHRONIZE | c::DELETE);
match result {
Ok(f) => delete(&f)?,
// Already deleted, so skip.
@@ -1337,18 +1390,19 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
let h = f.as_inner().as_raw_handle();
unsafe {
- let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
- let db = data.as_mut_ptr() as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
- let buf = &mut (*db).ReparseTarget as *mut c::WCHAR;
+ let mut data = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]);
+ let data_ptr = data.0.as_mut_ptr();
+ let db = data_ptr.cast::<c::REPARSE_MOUNTPOINT_DATA_BUFFER>();
+ let buf = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>();
let mut i = 0;
// FIXME: this conversion is very hacky
let v = br"\??\";
let v = v.iter().map(|x| *x as u16);
for c in v.chain(original.as_os_str().encode_wide()) {
- *buf.offset(i) = c;
+ *buf.add(i) = c;
i += 1;
}
- *buf.offset(i) = 0;
+ *buf.add(i) = 0;
i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as c::WORD;
@@ -1359,7 +1413,7 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> {
cvt(c::DeviceIoControl(
h as *mut _,
c::FSCTL_SET_REPARSE_POINT,
- data.as_ptr() as *mut _,
+ data_ptr.cast(),
(*db).ReparseDataLength + 8,
ptr::null_mut(),
0,
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index e24b09cc9..ae33d48c6 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -4,7 +4,7 @@
mod tests;
use crate::cmp;
-use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, Read, ReadBuf};
+use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, Read};
use crate::mem;
use crate::os::windows::io::{
AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle, OwnedHandle, RawHandle,
@@ -112,18 +112,16 @@ impl Handle {
}
}
- pub fn read_buf(&self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
- let res = unsafe {
- self.synchronous_read(buf.unfilled_mut().as_mut_ptr(), buf.remaining(), None)
- };
+ pub fn read_buf(&self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
+ let res =
+ unsafe { self.synchronous_read(cursor.as_mut().as_mut_ptr(), cursor.capacity(), None) };
match res {
Ok(read) => {
// Safety: `read` bytes were written to the initialized portion of the buffer
unsafe {
- buf.assume_init(read as usize);
+ cursor.advance(read as usize);
}
- buf.add_filled(read as usize);
Ok(())
}
diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs
index fb06df1f8..2cc34c986 100644
--- a/library/std/src/sys/windows/io.rs
+++ b/library/std/src/sys/windows/io.rs
@@ -1,6 +1,10 @@
use crate::marker::PhantomData;
+use crate::mem::size_of;
+use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle};
use crate::slice;
-use crate::sys::c;
+use crate::sys::{c, Align8};
+use core;
+use libc;
#[derive(Copy, Clone)]
#[repr(transparent)]
@@ -78,3 +82,73 @@ impl<'a> IoSliceMut<'a> {
unsafe { slice::from_raw_parts_mut(self.vec.buf as *mut u8, self.vec.len as usize) }
}
}
+
+pub fn is_terminal(h: &impl AsHandle) -> bool {
+ unsafe { handle_is_console(h.as_handle()) }
+}
+
+unsafe fn handle_is_console(handle: BorrowedHandle<'_>) -> bool {
+ let handle = handle.as_raw_handle();
+
+ // A null handle means the process has no console.
+ if handle.is_null() {
+ return false;
+ }
+
+ let mut out = 0;
+ if c::GetConsoleMode(handle, &mut out) != 0 {
+ // False positives aren't possible. If we got a console then we definitely have a console.
+ return true;
+ }
+
+ // At this point, we *could* have a false negative. We can determine that this is a true
+ // negative if we can detect the presence of a console on any of the standard I/O streams. If
+ // another stream has a console, then we know we're in a Windows console and can therefore
+ // trust the negative.
+ for std_handle in [c::STD_INPUT_HANDLE, c::STD_OUTPUT_HANDLE, c::STD_ERROR_HANDLE] {
+ let std_handle = c::GetStdHandle(std_handle);
+ if !std_handle.is_null()
+ && std_handle != handle
+ && c::GetConsoleMode(std_handle, &mut out) != 0
+ {
+ return false;
+ }
+ }
+
+ // Otherwise, we fall back to an msys hack to see if we can detect the presence of a pty.
+ msys_tty_on(handle)
+}
+
+unsafe fn msys_tty_on(handle: c::HANDLE) -> bool {
+ // Early return if the handle is not a pipe.
+ if c::GetFileType(handle) != c::FILE_TYPE_PIPE {
+ return false;
+ }
+
+ const SIZE: usize = size_of::<c::FILE_NAME_INFO>() + c::MAX_PATH * size_of::<c::WCHAR>();
+ let mut name_info_bytes = Align8([0u8; SIZE]);
+ let res = c::GetFileInformationByHandleEx(
+ handle,
+ c::FileNameInfo,
+ name_info_bytes.0.as_mut_ptr() as *mut libc::c_void,
+ SIZE as u32,
+ );
+ if res == 0 {
+ return false;
+ }
+ let name_info: &c::FILE_NAME_INFO = &*(name_info_bytes.0.as_ptr() as *const c::FILE_NAME_INFO);
+ let name_len = name_info.FileNameLength as usize / 2;
+ // Offset to get the `FileName` field.
+ let name_ptr = name_info_bytes.0.as_ptr().offset(size_of::<c::DWORD>() as isize).cast::<u16>();
+ let s = core::slice::from_raw_parts(name_ptr, name_len);
+ let name = String::from_utf16_lossy(s);
+ // Get the file name only.
+ let name = name.rsplit('\\').next().unwrap_or(&name);
+ // This checks whether 'pty' exists in the file name, which indicates that
+ // a pseudo-terminal is attached. To mitigate against false positives
+ // (e.g., an actual file name that contains 'pty'), we also require that
+ // the file name begins with either the strings 'msys-' or 'cygwin-'.)
+ let is_msys = name.starts_with("msys-") || name.starts_with("cygwin-");
+ let is_pty = name.contains("-pty");
+ is_msys && is_pty
+}
diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs
index d412ff152..602a2d623 100644
--- a/library/std/src/sys/windows/locks/mod.rs
+++ b/library/std/src/sys/windows/locks/mod.rs
@@ -3,4 +3,4 @@ mod mutex;
mod rwlock;
pub use condvar::{Condvar, MovableCondvar};
pub use mutex::{MovableMutex, Mutex};
-pub use rwlock::{MovableRwLock, RwLock};
+pub use rwlock::MovableRwLock;
diff --git a/library/std/src/sys/windows/locks/mutex.rs b/library/std/src/sys/windows/locks/mutex.rs
index f91e8f9f5..91207f5f4 100644
--- a/library/std/src/sys/windows/locks/mutex.rs
+++ b/library/std/src/sys/windows/locks/mutex.rs
@@ -37,8 +37,6 @@ impl Mutex {
pub const fn new() -> Mutex {
Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
}
- #[inline]
- pub unsafe fn init(&mut self) {}
#[inline]
pub unsafe fn lock(&self) {
diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs
index b3f6d2d0a..eab9b9612 100644
--- a/library/std/src/sys/windows/mod.rs
+++ b/library/std/src/sys/windows/mod.rs
@@ -2,6 +2,7 @@
use crate::ffi::{CStr, OsStr, OsString};
use crate::io::ErrorKind;
+use crate::mem::MaybeUninit;
use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::path::PathBuf;
use crate::time::Duration;
@@ -47,7 +48,7 @@ cfg_if::cfg_if! {
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
-pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
+pub unsafe fn init(_argc: isize, _argv: *const *const u8, _sigpipe: u8) {
stack_overflow::init();
// Normally, `thread::spawn` will call `Thread::set_name` but since this thread already
@@ -204,8 +205,8 @@ where
// This initial size also works around `GetFullPathNameW` returning
// incorrect size hints for some short paths:
// https://github.com/dylni/normpath/issues/5
- let mut stack_buf = [0u16; 512];
- let mut heap_buf = Vec::new();
+ let mut stack_buf: [MaybeUninit<u16>; 512] = MaybeUninit::uninit_array();
+ let mut heap_buf: Vec<MaybeUninit<u16>> = Vec::new();
unsafe {
let mut n = stack_buf.len();
loop {
@@ -214,6 +215,11 @@ where
} else {
let extra = n - heap_buf.len();
heap_buf.reserve(extra);
+ // We used `reserve` and not `reserve_exact`, so in theory we
+ // may have gotten more than requested. If so, we'd like to use
+ // it... so long as we won't cause overflow.
+ n = heap_buf.capacity().min(c::DWORD::MAX as usize);
+ // Safety: MaybeUninit<u16> does not need initialization
heap_buf.set_len(n);
&mut heap_buf[..]
};
@@ -228,13 +234,13 @@ where
// error" is still 0 then we interpret it as a 0 length buffer and
// not an actual error.
c::SetLastError(0);
- let k = match f1(buf.as_mut_ptr(), n as c::DWORD) {
+ let k = match f1(buf.as_mut_ptr().cast::<u16>(), n as c::DWORD) {
0 if c::GetLastError() == 0 => 0,
0 => return Err(crate::io::Error::last_os_error()),
n => n,
} as usize;
if k == n && c::GetLastError() == c::ERROR_INSUFFICIENT_BUFFER {
- n *= 2;
+ n = n.saturating_mul(2).min(c::DWORD::MAX as usize);
} else if k > n {
n = k;
} else if k == n {
@@ -244,7 +250,9 @@ where
// Therefore k never equals n.
unreachable!();
} else {
- return Ok(f2(&buf[..k]));
+ // Safety: First `k` values are initialized.
+ let slice: &[u16] = MaybeUninit::slice_assume_init_ref(&buf[..k]);
+ return Ok(f2(slice));
}
}
}
@@ -321,3 +329,11 @@ pub fn abort_internal() -> ! {
}
crate::intrinsics::abort();
}
+
+/// Align the inner value to 8 bytes.
+///
+/// This is enough for almost all of the buffers we're likely to work with in
+/// the Windows APIs we use.
+#[repr(C, align(8))]
+#[derive(Copy, Clone)]
+pub(crate) struct Align8<T: ?Sized>(pub T);
diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs
index bcac996c0..352337ba3 100644
--- a/library/std/src/sys/windows/os.rs
+++ b/library/std/src/sys/windows/os.rs
@@ -99,11 +99,11 @@ impl Iterator for Env {
}
let p = self.cur as *const u16;
let mut len = 0;
- while *p.offset(len) != 0 {
+ while *p.add(len) != 0 {
len += 1;
}
- let s = slice::from_raw_parts(p, len as usize);
- self.cur = self.cur.offset(len + 1);
+ let s = slice::from_raw_parts(p, len);
+ self.cur = self.cur.add(len + 1);
// Windows allows environment variables to start with an equals
// symbol (in any other position, this is the separator between
diff --git a/library/std/src/sys/windows/os_str.rs b/library/std/src/sys/windows/os_str.rs
index 11883f150..4bdd8c505 100644
--- a/library/std/src/sys/windows/os_str.rs
+++ b/library/std/src/sys/windows/os_str.rs
@@ -164,9 +164,7 @@ impl Slice {
}
pub fn to_owned(&self) -> Buf {
- let mut buf = Wtf8Buf::with_capacity(self.inner.len());
- buf.push_wtf8(&self.inner);
- Buf { inner: buf }
+ Buf { inner: self.inner.to_owned() }
}
pub fn clone_into(&self, buf: &mut Buf) {
diff --git a/library/std/src/sys/windows/path/tests.rs b/library/std/src/sys/windows/path/tests.rs
index 6eab38cab..623c62361 100644
--- a/library/std/src/sys/windows/path/tests.rs
+++ b/library/std/src/sys/windows/path/tests.rs
@@ -105,7 +105,7 @@ fn test_parse_prefix_verbatim_device() {
assert_eq!(prefix, parse_prefix(r"\\?/C:\windows\system32\notepad.exe"));
}
-// See #93586 for more infomation.
+// See #93586 for more information.
#[test]
fn test_windows_prefix_components() {
use crate::path::Path;
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 02d5af471..9cbb4ef19 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -16,6 +16,7 @@ use crate::os::windows::ffi::{OsStrExt, OsStringExt};
use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle, FromRawHandle, IntoRawHandle};
use crate::path::{Path, PathBuf};
use crate::ptr;
+use crate::sync::Mutex;
use crate::sys::args::{self, Arg};
use crate::sys::c;
use crate::sys::c::NonZeroDWORD;
@@ -25,7 +26,6 @@ use crate::sys::handle::Handle;
use crate::sys::path;
use crate::sys::pipe::{self, AnonPipe};
use crate::sys::stdio;
-use crate::sys_common::mutex::StaticMutex;
use crate::sys_common::process::{CommandEnv, CommandEnvs};
use crate::sys_common::IntoInner;
@@ -301,9 +301,9 @@ impl Command {
//
// For more information, msdn also has an article about this race:
// https://support.microsoft.com/kb/315939
- static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new();
+ static CREATE_PROCESS_LOCK: Mutex<()> = Mutex::new(());
- let _guard = unsafe { CREATE_PROCESS_LOCK.lock() };
+ let _guard = CREATE_PROCESS_LOCK.lock();
let mut pipes = StdioPipes { stdin: None, stdout: None, stderr: None };
let null = Stdio::Null;
diff --git a/library/std/src/sys/windows/rand.rs b/library/std/src/sys/windows/rand.rs
index f8fd93a73..b5a49489d 100644
--- a/library/std/src/sys/windows/rand.rs
+++ b/library/std/src/sys/windows/rand.rs
@@ -1,35 +1,106 @@
-use crate::io;
+//! # Random key generation
+//!
+//! This module wraps the RNG provided by the OS. There are a few different
+//! ways to interface with the OS RNG so it's worth exploring each of the options.
+//! Note that at the time of writing these all go through the (undocumented)
+//! `bcryptPrimitives.dll` but they use different route to get there.
+//!
+//! Originally we were using [`RtlGenRandom`], however that function is
+//! deprecated and warns it "may be altered or unavailable in subsequent versions".
+//!
+//! So we switched to [`BCryptGenRandom`] with the `BCRYPT_USE_SYSTEM_PREFERRED_RNG`
+//! flag to query and find the system configured RNG. However, this change caused a small
+//! but significant number of users to experience panics caused by a failure of
+//! this function. See [#94098].
+//!
+//! The current version falls back to using `BCryptOpenAlgorithmProvider` if
+//! `BCRYPT_USE_SYSTEM_PREFERRED_RNG` fails for any reason.
+//!
+//! [#94098]: https://github.com/rust-lang/rust/issues/94098
+//! [`RtlGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom
+//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
use crate::mem;
use crate::ptr;
use crate::sys::c;
+/// Generates high quality secure random keys for use by [`HashMap`].
+///
+/// This is used to seed the default [`RandomState`].
+///
+/// [`HashMap`]: crate::collections::HashMap
+/// [`RandomState`]: crate::collections::hash_map::RandomState
pub fn hashmap_random_keys() -> (u64, u64) {
- let mut v = (0, 0);
- let ret = unsafe {
- c::BCryptGenRandom(
- ptr::null_mut(),
- &mut v as *mut _ as *mut u8,
- mem::size_of_val(&v) as c::ULONG,
- c::BCRYPT_USE_SYSTEM_PREFERRED_RNG,
- )
- };
- if ret != 0 { fallback_rng() } else { v }
+ Rng::SYSTEM.gen_random_keys().unwrap_or_else(fallback_rng)
}
-/// Generate random numbers using the fallback RNG function (RtlGenRandom)
-#[cfg(not(target_vendor = "uwp"))]
-#[inline(never)]
-fn fallback_rng() -> (u64, u64) {
- let mut v = (0, 0);
- let ret =
- unsafe { c::RtlGenRandom(&mut v as *mut _ as *mut u8, mem::size_of_val(&v) as c::ULONG) };
+struct Rng {
+ algorithm: c::BCRYPT_ALG_HANDLE,
+ flags: u32,
+}
+impl Rng {
+ const SYSTEM: Self = unsafe { Self::new(ptr::null_mut(), c::BCRYPT_USE_SYSTEM_PREFERRED_RNG) };
+
+ /// Create the RNG from an existing algorithm handle.
+ ///
+ /// # Safety
+ ///
+ /// The handle must either be null or a valid algorithm handle.
+ const unsafe fn new(algorithm: c::BCRYPT_ALG_HANDLE, flags: u32) -> Self {
+ Self { algorithm, flags }
+ }
+
+ /// Open a handle to the RNG algorithm.
+ fn open() -> Result<Self, c::NTSTATUS> {
+ use crate::sync::atomic::AtomicPtr;
+ use crate::sync::atomic::Ordering::{Acquire, Release};
+
+ // An atomic is used so we don't need to reopen the handle every time.
+ static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(ptr::null_mut());
+
+ let mut handle = HANDLE.load(Acquire);
+ if handle.is_null() {
+ let status = unsafe {
+ c::BCryptOpenAlgorithmProvider(
+ &mut handle,
+ c::BCRYPT_RNG_ALGORITHM.as_ptr(),
+ ptr::null(),
+ 0,
+ )
+ };
+ if c::nt_success(status) {
+ // If another thread opens a handle first then use that handle instead.
+ let result = HANDLE.compare_exchange(ptr::null_mut(), handle, Release, Acquire);
+ if let Err(previous_handle) = result {
+ // Close our handle and return the previous one.
+ unsafe { c::BCryptCloseAlgorithmProvider(handle, 0) };
+ handle = previous_handle;
+ }
+ Ok(unsafe { Self::new(handle, 0) })
+ } else {
+ Err(status)
+ }
+ } else {
+ Ok(unsafe { Self::new(handle, 0) })
+ }
+ }
- if ret != 0 { v } else { panic!("fallback RNG broken: {}", io::Error::last_os_error()) }
+ fn gen_random_keys(self) -> Result<(u64, u64), c::NTSTATUS> {
+ let mut v = (0, 0);
+ let status = unsafe {
+ let size = mem::size_of_val(&v).try_into().unwrap();
+ c::BCryptGenRandom(self.algorithm, ptr::addr_of_mut!(v).cast(), size, self.flags)
+ };
+ if c::nt_success(status) { Ok(v) } else { Err(status) }
+ }
}
-/// We can't use RtlGenRandom with UWP, so there is no fallback
-#[cfg(target_vendor = "uwp")]
+/// Generate random numbers using the fallback RNG function
#[inline(never)]
-fn fallback_rng() -> (u64, u64) {
- panic!("fallback RNG broken: RtlGenRandom() not supported on UWP");
+fn fallback_rng(rng_status: c::NTSTATUS) -> (u64, u64) {
+ match Rng::open().and_then(|rng| rng.gen_random_keys()) {
+ Ok(keys) => keys,
+ Err(status) => {
+ panic!("RNG broken: {rng_status:#x}, fallback RNG broken: {status:#x}")
+ }
+ }
}
diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs
index a001d6b98..70c9b14a0 100644
--- a/library/std/src/sys/windows/stdio.rs
+++ b/library/std/src/sys/windows/stdio.rs
@@ -3,6 +3,7 @@
use crate::char::decode_utf16;
use crate::cmp;
use crate::io;
+use crate::mem::MaybeUninit;
use crate::os::windows::io::{FromRawHandle, IntoRawHandle};
use crate::ptr;
use crate::str;
@@ -169,13 +170,14 @@ fn write(
}
fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usize> {
- let mut utf16 = [0u16; MAX_BUFFER_SIZE / 2];
+ let mut utf16 = [MaybeUninit::<u16>::uninit(); MAX_BUFFER_SIZE / 2];
let mut len_utf16 = 0;
for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) {
- *dest = chr;
+ *dest = MaybeUninit::new(chr);
len_utf16 += 1;
}
- let utf16 = &utf16[..len_utf16];
+ // Safety: We've initialized `len_utf16` values.
+ let utf16: &[u16] = unsafe { MaybeUninit::slice_assume_init_ref(&utf16[..len_utf16]) };
let mut written = write_u16s(handle, &utf16)?;
@@ -250,11 +252,14 @@ impl io::Read for Stdin {
return Ok(bytes_copied);
} else if buf.len() - bytes_copied < 4 {
// Not enough space to get a UTF-8 byte. We will use the incomplete UTF8.
- let mut utf16_buf = [0u16; 1];
+ let mut utf16_buf = [MaybeUninit::new(0); 1];
// Read one u16 character.
let read = read_u16s_fixup_surrogates(handle, &mut utf16_buf, 1, &mut self.surrogate)?;
// Read bytes, using the (now-empty) self.incomplete_utf8 as extra space.
- let read_bytes = utf16_to_utf8(&utf16_buf[..read], &mut self.incomplete_utf8.bytes)?;
+ let read_bytes = utf16_to_utf8(
+ unsafe { MaybeUninit::slice_assume_init_ref(&utf16_buf[..read]) },
+ &mut self.incomplete_utf8.bytes,
+ )?;
// Read in the bytes from incomplete_utf8 until the buffer is full.
self.incomplete_utf8.len = read_bytes as u8;
@@ -262,15 +267,18 @@ impl io::Read for Stdin {
bytes_copied += self.incomplete_utf8.read(&mut buf[bytes_copied..]);
Ok(bytes_copied)
} else {
- let mut utf16_buf = [0u16; MAX_BUFFER_SIZE / 2];
+ let mut utf16_buf = [MaybeUninit::<u16>::uninit(); MAX_BUFFER_SIZE / 2];
+
// In the worst case, a UTF-8 string can take 3 bytes for every `u16` of a UTF-16. So
// we can read at most a third of `buf.len()` chars and uphold the guarantee no data gets
// lost.
let amount = cmp::min(buf.len() / 3, utf16_buf.len());
let read =
read_u16s_fixup_surrogates(handle, &mut utf16_buf, amount, &mut self.surrogate)?;
-
- match utf16_to_utf8(&utf16_buf[..read], buf) {
+ // Safety `read_u16s_fixup_surrogates` returns the number of items
+ // initialized.
+ let utf16s = unsafe { MaybeUninit::slice_assume_init_ref(&utf16_buf[..read]) };
+ match utf16_to_utf8(utf16s, buf) {
Ok(value) => return Ok(bytes_copied + value),
Err(e) => return Err(e),
}
@@ -283,14 +291,14 @@ impl io::Read for Stdin {
// This is a best effort, and might not work if we are not the only reader on Stdin.
fn read_u16s_fixup_surrogates(
handle: c::HANDLE,
- buf: &mut [u16],
+ buf: &mut [MaybeUninit<u16>],
mut amount: usize,
surrogate: &mut u16,
) -> io::Result<usize> {
// Insert possibly remaining unpaired surrogate from last read.
let mut start = 0;
if *surrogate != 0 {
- buf[0] = *surrogate;
+ buf[0] = MaybeUninit::new(*surrogate);
*surrogate = 0;
start = 1;
if amount == 1 {
@@ -303,7 +311,10 @@ fn read_u16s_fixup_surrogates(
let mut amount = read_u16s(handle, &mut buf[start..amount])? + start;
if amount > 0 {
- let last_char = buf[amount - 1];
+ // Safety: The returned `amount` is the number of values initialized,
+ // and it is not 0, so we know that `buf[amount - 1]` have been
+ // initialized.
+ let last_char = unsafe { buf[amount - 1].assume_init() };
if last_char >= 0xD800 && last_char <= 0xDBFF {
// high surrogate
*surrogate = last_char;
@@ -313,7 +324,8 @@ fn read_u16s_fixup_surrogates(
Ok(amount)
}
-fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
+// Returns `Ok(n)` if it initialized `n` values in `buf`.
+fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usize> {
// Configure the `pInputControl` parameter to not only return on `\r\n` but also Ctrl-Z, the
// traditional DOS method to indicate end of character stream / user input (SUB).
// See #38274 and https://stackoverflow.com/questions/43836040/win-api-readconsole.
@@ -346,8 +358,9 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [u16]) -> io::Result<usize> {
}
break;
}
-
- if amount > 0 && buf[amount as usize - 1] == CTRL_Z {
+ // Safety: if `amount > 0`, then that many bytes were written, so
+ // `buf[amount as usize - 1]` has been initialized.
+ if amount > 0 && unsafe { buf[amount as usize - 1].assume_init() } == CTRL_Z {
amount -= 1;
}
Ok(amount as usize)
diff --git a/library/std/src/sys/windows/thread_local_dtor.rs b/library/std/src/sys/windows/thread_local_dtor.rs
index 25d1c6e8e..9707a95df 100644
--- a/library/std/src/sys/windows/thread_local_dtor.rs
+++ b/library/std/src/sys/windows/thread_local_dtor.rs
@@ -8,10 +8,14 @@
#[thread_local]
static mut DESTRUCTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new();
+// Ensure this can never be inlined because otherwise this may break in dylibs.
+// See #44391.
+#[inline(never)]
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) {
DESTRUCTORS.push((t, dtor));
}
+#[inline(never)] // See comment above
/// Runs destructors. This should not be called until thread exit.
pub unsafe fn run_keyless_dtors() {
// Drop all the destructors.
diff --git a/library/std/src/sys/windows/thread_local_key.rs b/library/std/src/sys/windows/thread_local_key.rs
index ec670238e..17628b757 100644
--- a/library/std/src/sys/windows/thread_local_key.rs
+++ b/library/std/src/sys/windows/thread_local_key.rs
@@ -1,11 +1,16 @@
-use crate::mem::ManuallyDrop;
+use crate::cell::UnsafeCell;
use crate::ptr;
-use crate::sync::atomic::AtomicPtr;
-use crate::sync::atomic::Ordering::SeqCst;
+use crate::sync::atomic::{
+ AtomicPtr, AtomicU32,
+ Ordering::{AcqRel, Acquire, Relaxed, Release},
+};
use crate::sys::c;
-pub type Key = c::DWORD;
-pub type Dtor = unsafe extern "C" fn(*mut u8);
+#[cfg(test)]
+mod tests;
+
+type Key = c::DWORD;
+type Dtor = unsafe extern "C" fn(*mut u8);
// Turns out, like pretty much everything, Windows is pretty close the
// functionality that Unix provides, but slightly different! In the case of
@@ -22,60 +27,109 @@ pub type Dtor = unsafe extern "C" fn(*mut u8);
// To accomplish this feat, we perform a number of threads, all contained
// within this module:
//
-// * All TLS destructors are tracked by *us*, not the windows runtime. This
+// * All TLS destructors are tracked by *us*, not the Windows runtime. This
// means that we have a global list of destructors for each TLS key that
// we know about.
// * When a thread exits, we run over the entire list and run dtors for all
// non-null keys. This attempts to match Unix semantics in this regard.
//
-// This ends up having the overhead of using a global list, having some
-// locks here and there, and in general just adding some more code bloat. We
-// attempt to optimize runtime by forgetting keys that don't have
-// destructors, but this only gets us so far.
-//
// For more details and nitty-gritty, see the code sections below!
//
// [1]: https://www.codeproject.com/Articles/8113/Thread-Local-Storage-The-C-Way
-// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base
-// /threading/thread_local_storage_win.cc#L42
+// [2]: https://github.com/ChromiumWebApps/chromium/blob/master/base/threading/thread_local_storage_win.cc#L42
-// -------------------------------------------------------------------------
-// Native bindings
-//
-// This section is just raw bindings to the native functions that Windows
-// provides, There's a few extra calls to deal with destructors.
+pub struct StaticKey {
+ /// The key value shifted up by one. Since TLS_OUT_OF_INDEXES == DWORD::MAX
+ /// is not a valid key value, this allows us to use zero as sentinel value
+ /// without risking overflow.
+ key: AtomicU32,
+ dtor: Option<Dtor>,
+ next: AtomicPtr<StaticKey>,
+ /// Currently, destructors cannot be unregistered, so we cannot use racy
+ /// initialization for keys. Instead, we need synchronize initialization.
+ /// Use the Windows-provided `Once` since it does not require TLS.
+ once: UnsafeCell<c::INIT_ONCE>,
+}
-#[inline]
-pub unsafe fn create(dtor: Option<Dtor>) -> Key {
- let key = c::TlsAlloc();
- assert!(key != c::TLS_OUT_OF_INDEXES);
- if let Some(f) = dtor {
- register_dtor(key, f);
+impl StaticKey {
+ #[inline]
+ pub const fn new(dtor: Option<Dtor>) -> StaticKey {
+ StaticKey {
+ key: AtomicU32::new(0),
+ dtor,
+ next: AtomicPtr::new(ptr::null_mut()),
+ once: UnsafeCell::new(c::INIT_ONCE_STATIC_INIT),
+ }
}
- key
-}
-#[inline]
-pub unsafe fn set(key: Key, value: *mut u8) {
- let r = c::TlsSetValue(key, value as c::LPVOID);
- debug_assert!(r != 0);
-}
+ #[inline]
+ pub unsafe fn set(&'static self, val: *mut u8) {
+ let r = c::TlsSetValue(self.key(), val.cast());
+ debug_assert_eq!(r, c::TRUE);
+ }
-#[inline]
-pub unsafe fn get(key: Key) -> *mut u8 {
- c::TlsGetValue(key) as *mut u8
-}
+ #[inline]
+ pub unsafe fn get(&'static self) -> *mut u8 {
+ c::TlsGetValue(self.key()).cast()
+ }
-#[inline]
-pub unsafe fn destroy(_key: Key) {
- rtabort!("can't destroy tls keys on windows")
-}
+ #[inline]
+ unsafe fn key(&'static self) -> Key {
+ match self.key.load(Acquire) {
+ 0 => self.init(),
+ key => key - 1,
+ }
+ }
+
+ #[cold]
+ unsafe fn init(&'static self) -> Key {
+ if self.dtor.is_some() {
+ let mut pending = c::FALSE;
+ let r = c::InitOnceBeginInitialize(self.once.get(), 0, &mut pending, ptr::null_mut());
+ assert_eq!(r, c::TRUE);
-#[inline]
-pub fn requires_synchronized_create() -> bool {
- true
+ if pending == c::FALSE {
+ // Some other thread initialized the key, load it.
+ self.key.load(Relaxed) - 1
+ } else {
+ let key = c::TlsAlloc();
+ if key == c::TLS_OUT_OF_INDEXES {
+ // Wakeup the waiting threads before panicking to avoid deadlock.
+ c::InitOnceComplete(self.once.get(), c::INIT_ONCE_INIT_FAILED, ptr::null_mut());
+ panic!("out of TLS indexes");
+ }
+
+ self.key.store(key + 1, Release);
+ register_dtor(self);
+
+ let r = c::InitOnceComplete(self.once.get(), 0, ptr::null_mut());
+ debug_assert_eq!(r, c::TRUE);
+
+ key
+ }
+ } else {
+ // If there is no destructor to clean up, we can use racy initialization.
+
+ let key = c::TlsAlloc();
+ assert_ne!(key, c::TLS_OUT_OF_INDEXES, "out of TLS indexes");
+
+ match self.key.compare_exchange(0, key + 1, AcqRel, Acquire) {
+ Ok(_) => key,
+ Err(new) => {
+ // Some other thread completed initialization first, so destroy
+ // our key and use theirs.
+ let r = c::TlsFree(key);
+ debug_assert_eq!(r, c::TRUE);
+ new - 1
+ }
+ }
+ }
+ }
}
+unsafe impl Send for StaticKey {}
+unsafe impl Sync for StaticKey {}
+
// -------------------------------------------------------------------------
// Dtor registration
//
@@ -96,29 +150,21 @@ pub fn requires_synchronized_create() -> bool {
// Typically processes have a statically known set of TLS keys which is pretty
// small, and we'd want to keep this memory alive for the whole process anyway
// really.
-//
-// Perhaps one day we can fold the `Box` here into a static allocation,
-// expanding the `StaticKey` structure to contain not only a slot for the TLS
-// key but also a slot for the destructor queue on windows. An optimization for
-// another day!
-
-static DTORS: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
-
-struct Node {
- dtor: Dtor,
- key: Key,
- next: *mut Node,
-}
-unsafe fn register_dtor(key: Key, dtor: Dtor) {
- let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
+static DTORS: AtomicPtr<StaticKey> = AtomicPtr::new(ptr::null_mut());
- let mut head = DTORS.load(SeqCst);
+/// Should only be called once per key, otherwise loops or breaks may occur in
+/// the linked list.
+unsafe fn register_dtor(key: &'static StaticKey) {
+ let this = <*const StaticKey>::cast_mut(key);
+ // Use acquire ordering to pass along the changes done by the previously
+ // registered keys when we store the new head with release ordering.
+ let mut head = DTORS.load(Acquire);
loop {
- node.next = head;
- match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
- Ok(_) => return, // nothing to drop, we successfully added the node to the list
- Err(cur) => head = cur,
+ key.next.store(head, Relaxed);
+ match DTORS.compare_exchange_weak(head, this, Release, Acquire) {
+ Ok(_) => break,
+ Err(new) => head = new,
}
}
}
@@ -214,25 +260,29 @@ unsafe extern "system" fn on_tls_callback(h: c::LPVOID, dwReason: c::DWORD, pv:
unsafe fn reference_tls_used() {}
}
-#[allow(dead_code)] // actually called above
+#[allow(dead_code)] // actually called below
unsafe fn run_dtors() {
- let mut any_run = true;
for _ in 0..5 {
- if !any_run {
- break;
- }
- any_run = false;
- let mut cur = DTORS.load(SeqCst);
+ let mut any_run = false;
+
+ // Use acquire ordering to observe key initialization.
+ let mut cur = DTORS.load(Acquire);
while !cur.is_null() {
- let ptr = c::TlsGetValue((*cur).key);
+ let key = (*cur).key.load(Relaxed) - 1;
+ let dtor = (*cur).dtor.unwrap();
+ let ptr = c::TlsGetValue(key);
if !ptr.is_null() {
- c::TlsSetValue((*cur).key, ptr::null_mut());
- ((*cur).dtor)(ptr as *mut _);
+ c::TlsSetValue(key, ptr::null_mut());
+ dtor(ptr as *mut _);
any_run = true;
}
- cur = (*cur).next;
+ cur = (*cur).next.load(Relaxed);
+ }
+
+ if !any_run {
+ break;
}
}
}
diff --git a/library/std/src/sys/windows/thread_local_key/tests.rs b/library/std/src/sys/windows/thread_local_key/tests.rs
new file mode 100644
index 000000000..c95f383fb
--- /dev/null
+++ b/library/std/src/sys/windows/thread_local_key/tests.rs
@@ -0,0 +1,53 @@
+use super::StaticKey;
+use crate::ptr;
+
+#[test]
+fn smoke() {
+ static K1: StaticKey = StaticKey::new(None);
+ static K2: StaticKey = StaticKey::new(None);
+
+ unsafe {
+ assert!(K1.get().is_null());
+ assert!(K2.get().is_null());
+ K1.set(ptr::invalid_mut(1));
+ K2.set(ptr::invalid_mut(2));
+ assert_eq!(K1.get() as usize, 1);
+ assert_eq!(K2.get() as usize, 2);
+ }
+}
+
+#[test]
+fn destructors() {
+ use crate::mem::ManuallyDrop;
+ use crate::sync::Arc;
+ use crate::thread;
+
+ unsafe extern "C" fn destruct(ptr: *mut u8) {
+ drop(Arc::from_raw(ptr as *const ()));
+ }
+
+ static KEY: StaticKey = StaticKey::new(Some(destruct));
+
+ let shared1 = Arc::new(());
+ let shared2 = Arc::clone(&shared1);
+
+ unsafe {
+ assert!(KEY.get().is_null());
+ KEY.set(Arc::into_raw(shared1) as *mut u8);
+ }
+
+ thread::spawn(move || unsafe {
+ assert!(KEY.get().is_null());
+ KEY.set(Arc::into_raw(shared2) as *mut u8);
+ })
+ .join()
+ .unwrap();
+
+ // Leak the Arc, let the TLS destructor clean it up.
+ let shared1 = unsafe { ManuallyDrop::new(Arc::from_raw(KEY.get() as *const ())) };
+ assert_eq!(
+ Arc::strong_count(&shared1),
+ 1,
+ "destructor should have dropped the other reference on thread exit"
+ );
+}
diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs
index d876e0f6f..2f7ae863b 100644
--- a/library/std/src/sys/windows/thread_parker.rs
+++ b/library/std/src/sys/windows/thread_parker.rs
@@ -197,19 +197,17 @@ impl Parker {
// purpose, to make sure every unpark() has a release-acquire ordering
// with park().
if self.state.swap(NOTIFIED, Release) == PARKED {
- if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
- unsafe {
+ unsafe {
+ if let Some(wake_by_address_single) = c::WakeByAddressSingle::option() {
wake_by_address_single(self.ptr());
- }
- } else {
- // If we run NtReleaseKeyedEvent before the waiting thread runs
- // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
- // If the waiting thread wakes up before we run NtReleaseKeyedEvent
- // (e.g. due to a timeout), this blocks until we do wake up a thread.
- // To prevent this thread from blocking indefinitely in that case,
- // park_impl() will, after seeing the state set to NOTIFIED after
- // waking up, call NtWaitForKeyedEvent again to unblock us.
- unsafe {
+ } else {
+ // If we run NtReleaseKeyedEvent before the waiting thread runs
+ // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+ // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+ // (e.g. due to a timeout), this blocks until we do wake up a thread.
+ // To prevent this thread from blocking indefinitely in that case,
+ // park_impl() will, after seeing the state set to NOTIFIED after
+ // waking up, call NtWaitForKeyedEvent again to unblock us.
c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
}
}
diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs
index 31164afdc..8807077cb 100644
--- a/library/std/src/sys_common/backtrace.rs
+++ b/library/std/src/sys_common/backtrace.rs
@@ -7,15 +7,14 @@ use crate::fmt;
use crate::io;
use crate::io::prelude::*;
use crate::path::{self, Path, PathBuf};
-use crate::sys_common::mutex::StaticMutex;
+use crate::sync::{Mutex, PoisonError};
/// Max number of frames to print.
const MAX_NB_FRAMES: usize = 100;
-// SAFETY: Don't attempt to lock this reentrantly.
-pub unsafe fn lock() -> impl Drop {
- static LOCK: StaticMutex = StaticMutex::new();
- LOCK.lock()
+pub fn lock() -> impl Drop {
+ static LOCK: Mutex<()> = Mutex::new(());
+ LOCK.lock().unwrap_or_else(PoisonError::into_inner)
}
/// Prints the current backtrace.
diff --git a/library/std/src/sys_common/condvar.rs b/library/std/src/sys_common/condvar.rs
index f3ac1061b..8bc5b2411 100644
--- a/library/std/src/sys_common/condvar.rs
+++ b/library/std/src/sys_common/condvar.rs
@@ -15,6 +15,7 @@ pub struct Condvar {
impl Condvar {
/// Creates a new condition variable for use.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self { inner: imp::MovableCondvar::new(), check: CondvarCheck::new() }
}
diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs
index ce8f36704..4ac9e62bf 100644
--- a/library/std/src/sys_common/condvar/check.rs
+++ b/library/std/src/sys_common/condvar/check.rs
@@ -50,6 +50,7 @@ pub struct NoCheck;
#[allow(dead_code)]
impl NoCheck {
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self
}
diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs
index 80f56bf75..8c19f9332 100644
--- a/library/std/src/sys_common/mod.rs
+++ b/library/std/src/sys_common/mod.rs
@@ -27,17 +27,25 @@ pub mod io;
pub mod lazy_box;
pub mod memchr;
pub mod mutex;
+pub mod once;
pub mod process;
pub mod remutex;
pub mod rwlock;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
-pub mod thread_local_key;
pub mod thread_parker;
pub mod wtf8;
cfg_if::cfg_if! {
+ if #[cfg(target_os = "windows")] {
+ pub use crate::sys::thread_local_key;
+ } else {
+ pub mod thread_local_key;
+ }
+}
+
+cfg_if::cfg_if! {
if #[cfg(any(target_os = "l4re",
target_os = "hermit",
feature = "restricted-std",
diff --git a/library/std/src/sys_common/mutex.rs b/library/std/src/sys_common/mutex.rs
index 48479f5bd..98046f20f 100644
--- a/library/std/src/sys_common/mutex.rs
+++ b/library/std/src/sys_common/mutex.rs
@@ -1,49 +1,5 @@
use crate::sys::locks as imp;
-/// An OS-based mutual exclusion lock, meant for use in static variables.
-///
-/// This mutex has a const constructor ([`StaticMutex::new`]), does not
-/// implement `Drop` to cleanup resources, and causes UB when used reentrantly.
-///
-/// This mutex does not implement poisoning.
-///
-/// This is a wrapper around `imp::Mutex` that does *not* call `init()` and
-/// `destroy()`.
-pub struct StaticMutex(imp::Mutex);
-
-unsafe impl Sync for StaticMutex {}
-
-impl StaticMutex {
- /// Creates a new mutex for use.
- #[inline]
- pub const fn new() -> Self {
- Self(imp::Mutex::new())
- }
-
- /// Calls raw_lock() and then returns an RAII guard to guarantee the mutex
- /// will be unlocked.
- ///
- /// It is undefined behaviour to call this function while locked by the
- /// same thread.
- #[inline]
- pub unsafe fn lock(&'static self) -> StaticMutexGuard {
- self.0.lock();
- StaticMutexGuard(&self.0)
- }
-}
-
-#[must_use]
-pub struct StaticMutexGuard(&'static imp::Mutex);
-
-impl Drop for StaticMutexGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.unlock();
- }
- }
-}
-
/// An OS-based mutual exclusion lock.
///
/// This mutex cleans up its resources in its `Drop` implementation, may safely
@@ -61,6 +17,7 @@ unsafe impl Sync for MovableMutex {}
impl MovableMutex {
/// Creates a new mutex.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self(imp::MovableMutex::new())
}
diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs
index 33d336c43..fad4a6333 100644
--- a/library/std/src/sys_common/net.rs
+++ b/library/std/src/sys_common/net.rs
@@ -2,15 +2,16 @@
mod tests;
use crate::cmp;
-use crate::ffi::CString;
+use crate::convert::{TryFrom, TryInto};
use crate::fmt;
use crate::io::{self, ErrorKind, IoSlice, IoSliceMut};
use crate::mem;
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
use crate::ptr;
+use crate::sys::common::small_c_string::run_with_cstr;
use crate::sys::net::netc as c;
use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket};
-use crate::sys_common::{FromInner, IntoInner};
+use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::time::Duration;
use libc::{c_int, c_void};
@@ -197,14 +198,15 @@ impl<'a> TryFrom<(&'a str, u16)> for LookupHost {
fn try_from((host, port): (&'a str, u16)) -> io::Result<LookupHost> {
init();
- let c_host = CString::new(host)?;
- let mut hints: c::addrinfo = unsafe { mem::zeroed() };
- hints.ai_socktype = c::SOCK_STREAM;
- let mut res = ptr::null_mut();
- unsafe {
- cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res))
- .map(|_| LookupHost { original: res, cur: res, port })
- }
+ run_with_cstr(host.as_bytes(), |c_host| {
+ let mut hints: c::addrinfo = unsafe { mem::zeroed() };
+ hints.ai_socktype = c::SOCK_STREAM;
+ let mut res = ptr::null_mut();
+ unsafe {
+ cvt_gai(c::getaddrinfo(c_host.as_ptr(), ptr::null(), &hints, &mut res))
+ .map(|_| LookupHost { original: res, cur: res, port })
+ }
+ })
}
}
@@ -345,6 +347,12 @@ impl TcpStream {
}
}
+impl AsInner<Socket> for TcpStream {
+ fn as_inner(&self) -> &Socket {
+ &self.inner
+ }
+}
+
impl FromInner<Socket> for TcpStream {
fn from_inner(socket: Socket) -> TcpStream {
TcpStream { inner: socket }
diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs
new file mode 100644
index 000000000..5c7e6c013
--- /dev/null
+++ b/library/std/src/sys_common/once/futex.rs
@@ -0,0 +1,134 @@
+use crate::cell::Cell;
+use crate::sync as public;
+use crate::sync::atomic::{
+ AtomicU32,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::futex::{futex_wait, futex_wake_all};
+
+// On some platforms, the OS is very nice and handles the waiter queue for us.
+// This means we only need one atomic value with 5 states:
+
+/// No initialization has run yet, and no thread is currently using the Once.
+const INCOMPLETE: u32 = 0;
+/// Some thread has previously attempted to initialize the Once, but it panicked,
+/// so the Once is now poisoned. There are no other threads currently accessing
+/// this Once.
+const POISONED: u32 = 1;
+/// Some thread is currently attempting to run initialization. It may succeed,
+/// so all future threads need to wait for it to finish.
+const RUNNING: u32 = 2;
+/// Some thread is currently attempting to run initialization and there are threads
+/// waiting for it to finish.
+const QUEUED: u32 = 3;
+/// Initialization has completed and all future calls should finish immediately.
+const COMPLETE: u32 = 4;
+
+// Threads wait by setting the state to QUEUED and calling `futex_wait` on the state
+// variable. When the running thread finishes, it will wake all waiting threads using
+// `futex_wake_all`.
+
+pub struct OnceState {
+ poisoned: bool,
+ set_state_to: Cell<u32>,
+}
+
+impl OnceState {
+ #[inline]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ #[inline]
+ pub fn poison(&self) {
+ self.set_state_to.set(POISONED);
+ }
+}
+
+struct CompletionGuard<'a> {
+ state: &'a AtomicU32,
+ set_state_on_drop_to: u32,
+}
+
+impl<'a> Drop for CompletionGuard<'a> {
+ fn drop(&mut self) {
+ // Use release ordering to propagate changes to all threads checking
+ // up on the Once. `futex_wake_all` does its own synchronization, hence
+ // we do not need `AcqRel`.
+ if self.state.swap(self.set_state_on_drop_to, Release) == QUEUED {
+ futex_wake_all(&self.state);
+ }
+ }
+}
+
+pub struct Once {
+ state: AtomicU32,
+}
+
+impl Once {
+ #[inline]
+ pub const fn new() -> Once {
+ Once { state: AtomicU32::new(INCOMPLETE) }
+ }
+
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ // Use acquire ordering to make all initialization changes visible to the
+ // current thread.
+ self.state.load(Acquire) == COMPLETE
+ }
+
+ // This uses FnMut to match the API of the generic implementation. As this
+ // implementation is quite light-weight, it is generic over the closure and
+ // so avoids the cost of dynamic dispatch.
+ #[cold]
+ #[track_caller]
+ pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) {
+ let mut state = self.state.load(Acquire);
+ loop {
+ match state {
+ POISONED if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ INCOMPLETE | POISONED => {
+ // Try to register the current thread as the one running.
+ if let Err(new) =
+ self.state.compare_exchange_weak(state, RUNNING, Acquire, Acquire)
+ {
+ state = new;
+ continue;
+ }
+ // `waiter_queue` will manage other waiting threads, and
+ // wake them up on drop.
+ let mut waiter_queue =
+ CompletionGuard { state: &self.state, set_state_on_drop_to: POISONED };
+ // Run the function, letting it know if we're poisoned or not.
+ let f_state = public::OnceState {
+ inner: OnceState {
+ poisoned: state == POISONED,
+ set_state_to: Cell::new(COMPLETE),
+ },
+ };
+ f(&f_state);
+ waiter_queue.set_state_on_drop_to = f_state.inner.set_state_to.get();
+ return;
+ }
+ RUNNING | QUEUED => {
+ // Set the state to QUEUED if it is not already.
+ if state == RUNNING
+ && let Err(new) = self.state.compare_exchange_weak(RUNNING, QUEUED, Relaxed, Acquire)
+ {
+ state = new;
+ continue;
+ }
+
+ futex_wait(&self.state, QUEUED, None);
+ state = self.state.load(Acquire);
+ }
+ COMPLETE => return,
+ _ => unreachable!("state is never set to invalid values"),
+ }
+ }
+ }
+}
diff --git a/library/std/src/sys_common/once/generic.rs b/library/std/src/sys_common/once/generic.rs
new file mode 100644
index 000000000..acf5f2471
--- /dev/null
+++ b/library/std/src/sys_common/once/generic.rs
@@ -0,0 +1,282 @@
+// Each `Once` has one word of atomic state, and this state is CAS'd on to
+// determine what to do. There are four possible state of a `Once`:
+//
+// * Incomplete - no initialization has run yet, and no thread is currently
+// using the Once.
+// * Poisoned - some thread has previously attempted to initialize the Once, but
+// it panicked, so the Once is now poisoned. There are no other
+// threads currently accessing this Once.
+// * Running - some thread is currently attempting to run initialization. It may
+// succeed, so all future threads need to wait for it to finish.
+// Note that this state is accompanied with a payload, described
+// below.
+// * Complete - initialization has completed and all future calls should finish
+// immediately.
+//
+// With 4 states we need 2 bits to encode this, and we use the remaining bits
+// in the word we have allocated as a queue of threads waiting for the thread
+// responsible for entering the RUNNING state. This queue is just a linked list
+// of Waiter nodes which is monotonically increasing in size. Each node is
+// allocated on the stack, and whenever the running closure finishes it will
+// consume the entire queue and notify all waiters they should try again.
+//
+// You'll find a few more details in the implementation, but that's the gist of
+// it!
+//
+// Atomic orderings:
+// When running `Once` we deal with multiple atomics:
+// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
+// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
+// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
+// - At the end of the `call` function we have to make sure the result
+// of the `Once` is acquired. So every load which can be the only one to
+// load COMPLETED must have at least acquire ordering, which means all
+// three of them.
+// - `WaiterQueue::drop` is the only place that may store COMPLETED, and
+// must do so with release ordering to make the result available.
+// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
+// needs to make the nodes available with release ordering. The load in
+// its `compare_exchange` can be relaxed because it only has to compare
+// the atomic, not to read other data.
+// - `WaiterQueue::drop` must see the `Waiter` nodes, so it must load
+// `state_and_queue` with acquire ordering.
+// - There is just one store where `state_and_queue` is used only as a
+// state flag, without having to synchronize data: switching the state
+// from INCOMPLETE to RUNNING in `call`. This store can be Relaxed,
+// but the read has to be Acquire because of the requirements mentioned
+// above.
+// * `Waiter.signaled` is both used as a flag, and to protect a field with
+// interior mutability in `Waiter`. `Waiter.thread` is changed in
+// `WaiterQueue::drop` which then sets `signaled` with release ordering.
+// After `wait` loads `signaled` with acquire ordering and sees it is true,
+// it needs to see the changes to drop the `Waiter` struct correctly.
+// * There is one place where the two atomics `Once.state_and_queue` and
+// `Waiter.signaled` come together, and might be reordered by the compiler or
+// processor. Because both use acquire ordering such a reordering is not
+// allowed, so no need for `SeqCst`.
+
+use crate::cell::Cell;
+use crate::fmt;
+use crate::ptr;
+use crate::sync as public;
+use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
+use crate::thread::{self, Thread};
+
+type Masked = ();
+
+pub struct Once {
+ state_and_queue: AtomicPtr<Masked>,
+}
+
+pub struct OnceState {
+ poisoned: bool,
+ set_state_on_drop_to: Cell<*mut Masked>,
+}
+
+// Four states that a Once can be in, encoded into the lower bits of
+// `state_and_queue` in the Once structure.
+const INCOMPLETE: usize = 0x0;
+const POISONED: usize = 0x1;
+const RUNNING: usize = 0x2;
+const COMPLETE: usize = 0x3;
+
+// Mask to learn about the state. All other bits are the queue of waiters if
+// this is in the RUNNING state.
+const STATE_MASK: usize = 0x3;
+
+// Representation of a node in the linked list of waiters, used while in the
+// RUNNING state.
+// Note: `Waiter` can't hold a mutable pointer to the next thread, because then
+// `wait` would both hand out a mutable reference to its `Waiter` node, and keep
+// a shared reference to check `signaled`. Instead we hold shared references and
+// use interior mutability.
+#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
+struct Waiter {
+ thread: Cell<Option<Thread>>,
+ signaled: AtomicBool,
+ next: *const Waiter,
+}
+
+// Head of a linked list of waiters.
+// Every node is a struct on the stack of a waiting thread.
+// Will wake up the waiters when it gets dropped, i.e. also on panic.
+struct WaiterQueue<'a> {
+ state_and_queue: &'a AtomicPtr<Masked>,
+ set_state_on_drop_to: *mut Masked,
+}
+
+impl Once {
+ #[inline]
+ pub const fn new() -> Once {
+ Once { state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)) }
+ }
+
+ #[inline]
+ pub fn is_completed(&self) -> bool {
+ // An `Acquire` load is enough because that makes all the initialization
+ // operations visible to us, and, this being a fast path, weaker
+ // ordering helps with performance. This `Acquire` synchronizes with
+ // `Release` operations on the slow path.
+ self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
+ }
+
+ // This is a non-generic function to reduce the monomorphization cost of
+ // using `call_once` (this isn't exactly a trivial or small implementation).
+ //
+ // Additionally, this is tagged with `#[cold]` as it should indeed be cold
+ // and it helps let LLVM know that calls to this function should be off the
+ // fast path. Essentially, this should help generate more straight line code
+ // in LLVM.
+ //
+ // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
+ // currently no way to take an `FnOnce` and call it via virtual dispatch
+ // without some allocation overhead.
+ #[cold]
+ #[track_caller]
+ pub fn call(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&public::OnceState)) {
+ let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ loop {
+ match state_and_queue.addr() {
+ COMPLETE => break,
+ POISONED if !ignore_poisoning => {
+ // Panic to propagate the poison.
+ panic!("Once instance has previously been poisoned");
+ }
+ POISONED | INCOMPLETE => {
+ // Try to register this thread as the one RUNNING.
+ let exchange_result = self.state_and_queue.compare_exchange(
+ state_and_queue,
+ ptr::invalid_mut(RUNNING),
+ Ordering::Acquire,
+ Ordering::Acquire,
+ );
+ if let Err(old) = exchange_result {
+ state_and_queue = old;
+ continue;
+ }
+ // `waiter_queue` will manage other waiting threads, and
+ // wake them up on drop.
+ let mut waiter_queue = WaiterQueue {
+ state_and_queue: &self.state_and_queue,
+ set_state_on_drop_to: ptr::invalid_mut(POISONED),
+ };
+ // Run the initialization function, letting it know if we're
+ // poisoned or not.
+ let init_state = public::OnceState {
+ inner: OnceState {
+ poisoned: state_and_queue.addr() == POISONED,
+ set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
+ },
+ };
+ init(&init_state);
+ waiter_queue.set_state_on_drop_to = init_state.inner.set_state_on_drop_to.get();
+ break;
+ }
+ _ => {
+ // All other values must be RUNNING with possibly a
+ // pointer to the waiter queue in the more significant bits.
+ assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
+ wait(&self.state_and_queue, state_and_queue);
+ state_and_queue = self.state_and_queue.load(Ordering::Acquire);
+ }
+ }
+ }
+ }
+}
+
+fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
+ // Note: the following code was carefully written to avoid creating a
+ // mutable reference to `node` that gets aliased.
+ loop {
+ // Don't queue this thread if the status is no longer running,
+ // otherwise we will not be woken up.
+ if current_state.addr() & STATE_MASK != RUNNING {
+ return;
+ }
+
+ // Create the node for our current thread.
+ let node = Waiter {
+ thread: Cell::new(Some(thread::current())),
+ signaled: AtomicBool::new(false),
+ next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
+ };
+ let me = &node as *const Waiter as *const Masked as *mut Masked;
+
+ // Try to slide in the node at the head of the linked list, making sure
+ // that another thread didn't just replace the head of the linked list.
+ let exchange_result = state_and_queue.compare_exchange(
+ current_state,
+ me.with_addr(me.addr() | RUNNING),
+ Ordering::Release,
+ Ordering::Relaxed,
+ );
+ if let Err(old) = exchange_result {
+ current_state = old;
+ continue;
+ }
+
+ // We have enqueued ourselves, now lets wait.
+ // It is important not to return before being signaled, otherwise we
+ // would drop our `Waiter` node and leave a hole in the linked list
+ // (and a dangling reference). Guard against spurious wakeups by
+ // reparking ourselves until we are signaled.
+ while !node.signaled.load(Ordering::Acquire) {
+ // If the managing thread happens to signal and unpark us before we
+ // can park ourselves, the result could be this thread never gets
+ // unparked. Luckily `park` comes with the guarantee that if it got
+ // an `unpark` just before on an unparked thread it does not park.
+ thread::park();
+ }
+ break;
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for Once {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Once").finish_non_exhaustive()
+ }
+}
+
+impl Drop for WaiterQueue<'_> {
+ fn drop(&mut self) {
+ // Swap out our state with however we finished.
+ let state_and_queue =
+ self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
+
+ // We should only ever see an old state which was RUNNING.
+ assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
+
+ // Walk the entire linked list of waiters and wake them up (in lifo
+ // order, last to register is first to wake up).
+ unsafe {
+ // Right after setting `node.signaled = true` the other thread may
+ // free `node` if there happens to be has a spurious wakeup.
+ // So we have to take out the `thread` field and copy the pointer to
+ // `next` first.
+ let mut queue =
+ state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
+ while !queue.is_null() {
+ let next = (*queue).next;
+ let thread = (*queue).thread.take().unwrap();
+ (*queue).signaled.store(true, Ordering::Release);
+ // ^- FIXME (maybe): This is another case of issue #55005
+ // `store()` has a potentially dangling ref to `signaled`.
+ queue = next;
+ thread.unpark();
+ }
+ }
+ }
+}
+
+impl OnceState {
+ #[inline]
+ pub fn is_poisoned(&self) -> bool {
+ self.poisoned
+ }
+
+ #[inline]
+ pub fn poison(&self) {
+ self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
+ }
+}
diff --git a/library/std/src/sys_common/once/mod.rs b/library/std/src/sys_common/once/mod.rs
new file mode 100644
index 000000000..8742e68cc
--- /dev/null
+++ b/library/std/src/sys_common/once/mod.rs
@@ -0,0 +1,43 @@
+// A "once" is a relatively simple primitive, and it's also typically provided
+// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
+// primitives, however, tend to have surprising restrictions, such as the Unix
+// one doesn't allow an argument to be passed to the function.
+//
+// As a result, we end up implementing it ourselves in the standard library.
+// This also gives us the opportunity to optimize the implementation a bit which
+// should help the fast path on call sites.
+//
+// So to recap, the guarantees of a Once are that it will call the
+// initialization closure at most once, and it will never return until the one
+// that's running has finished running. This means that we need some form of
+// blocking here while the custom callback is running at the very least.
+// Additionally, we add on the restriction of **poisoning**. Whenever an
+// initialization closure panics, the Once enters a "poisoned" state which means
+// that all future calls will immediately panic as well.
+//
+// So to implement this, one might first reach for a `Mutex`, but those cannot
+// be put into a `static`. It also gets a lot harder with poisoning to figure
+// out when the mutex needs to be deallocated because it's not after the closure
+// finishes, but after the first successful closure finishes.
+//
+// All in all, this is instead implemented with atomics and lock-free
+// operations! Whee!
+
+cfg_if::cfg_if! {
+ if #[cfg(any(
+ target_os = "linux",
+ target_os = "android",
+ all(target_arch = "wasm32", target_feature = "atomics"),
+ target_os = "freebsd",
+ target_os = "openbsd",
+ target_os = "dragonfly",
+ target_os = "fuchsia",
+ target_os = "hermit",
+ ))] {
+ mod futex;
+ pub use futex::{Once, OnceState};
+ } else {
+ mod generic;
+ pub use generic::{Once, OnceState};
+ }
+}
diff --git a/library/std/src/sys_common/remutex.rs b/library/std/src/sys_common/remutex.rs
index 8921af311..b448ae3a9 100644
--- a/library/std/src/sys_common/remutex.rs
+++ b/library/std/src/sys_common/remutex.rs
@@ -1,13 +1,11 @@
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
+use super::mutex as sys;
use crate::cell::UnsafeCell;
-use crate::marker::PhantomPinned;
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::pin::Pin;
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
-use crate::sys::locks as sys;
/// A re-entrant mutual exclusion
///
@@ -41,11 +39,10 @@ use crate::sys::locks as sys;
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex<T> {
- mutex: sys::Mutex,
+ mutex: sys::MovableMutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
data: T,
- _pinned: PhantomPinned,
}
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
@@ -68,39 +65,22 @@ impl<T> RefUnwindSafe for ReentrantMutex<T> {}
/// guarded data.
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, T: 'a> {
- lock: Pin<&'a ReentrantMutex<T>>,
+ lock: &'a ReentrantMutex<T>,
}
impl<T> !Send for ReentrantMutexGuard<'_, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
- ///
- /// # Unsafety
- ///
- /// This function is unsafe because it is required that `init` is called
- /// once this mutex is in its final resting place, and only then are the
- /// lock/unlock methods safe.
- pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
+ pub const fn new(t: T) -> ReentrantMutex<T> {
ReentrantMutex {
- mutex: sys::Mutex::new(),
+ mutex: sys::MovableMutex::new(),
owner: AtomicUsize::new(0),
lock_count: UnsafeCell::new(0),
data: t,
- _pinned: PhantomPinned,
}
}
- /// Initializes this mutex so it's ready for use.
- ///
- /// # Unsafety
- ///
- /// Unsafe to call more than once, and must be called after this will no
- /// longer move in memory.
- pub unsafe fn init(self: Pin<&mut Self>) {
- self.get_unchecked_mut().mutex.init()
- }
-
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// This function will block the caller until it is available to acquire the mutex.
@@ -113,15 +93,14 @@ impl<T> ReentrantMutex<T> {
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
+ pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
let this_thread = current_thread_unique_ptr();
- // Safety: We only touch lock_count when we own the lock,
- // and since self is pinned we can safely call the lock() on the mutex.
+ // Safety: We only touch lock_count when we own the lock.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
- self.mutex.lock();
+ self.mutex.raw_lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
@@ -142,10 +121,9 @@ impl<T> ReentrantMutex<T> {
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
+ pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
let this_thread = current_thread_unique_ptr();
- // Safety: We only touch lock_count when we own the lock,
- // and since self is pinned we can safely call the try_lock on the mutex.
+ // Safety: We only touch lock_count when we own the lock.
unsafe {
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
@@ -179,12 +157,12 @@ impl<T> Deref for ReentrantMutexGuard<'_, T> {
impl<T> Drop for ReentrantMutexGuard<'_, T> {
#[inline]
fn drop(&mut self) {
- // Safety: We own the lock, and the lock is pinned.
+ // Safety: We own the lock.
unsafe {
*self.lock.lock_count.get() -= 1;
if *self.lock.lock_count.get() == 0 {
self.lock.owner.store(0, Relaxed);
- self.lock.mutex.unlock();
+ self.lock.mutex.raw_unlock();
}
}
}
diff --git a/library/std/src/sys_common/remutex/tests.rs b/library/std/src/sys_common/remutex/tests.rs
index 64873b850..8e97ce11c 100644
--- a/library/std/src/sys_common/remutex/tests.rs
+++ b/library/std/src/sys_common/remutex/tests.rs
@@ -1,18 +1,11 @@
-use crate::boxed::Box;
use crate::cell::RefCell;
-use crate::pin::Pin;
use crate::sync::Arc;
use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use crate::thread;
#[test]
fn smoke() {
- let m = unsafe {
- let mut m = Box::pin(ReentrantMutex::new(()));
- m.as_mut().init();
- m
- };
- let m = m.as_ref();
+ let m = ReentrantMutex::new(());
{
let a = m.lock();
{
@@ -29,20 +22,15 @@ fn smoke() {
#[test]
fn is_mutex() {
- let m = unsafe {
- // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
- let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
- Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
- Pin::new_unchecked(m)
- };
+ let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
let m2 = m.clone();
- let lock = m.as_ref().lock();
+ let lock = m.lock();
let child = thread::spawn(move || {
- let lock = m2.as_ref().lock();
+ let lock = m2.lock();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
- let lock = m.as_ref().lock();
+ let lock = m.lock();
*lock.borrow_mut() += i;
}
drop(lock);
@@ -51,22 +39,17 @@ fn is_mutex() {
#[test]
fn trylock_works() {
- let m = unsafe {
- // FIXME: Simplify this if Arc gets an Arc::get_pin_mut.
- let mut m = Arc::new(ReentrantMutex::new(()));
- Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
- Pin::new_unchecked(m)
- };
+ let m = Arc::new(ReentrantMutex::new(()));
let m2 = m.clone();
- let _lock = m.as_ref().try_lock();
- let _lock2 = m.as_ref().try_lock();
+ let _lock = m.try_lock();
+ let _lock2 = m.try_lock();
thread::spawn(move || {
- let lock = m2.as_ref().try_lock();
+ let lock = m2.try_lock();
assert!(lock.is_none());
})
.join()
.unwrap();
- let _lock3 = m.as_ref().try_lock();
+ let _lock3 = m.try_lock();
}
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs
index ba56f3a8f..042981dac 100644
--- a/library/std/src/sys_common/rwlock.rs
+++ b/library/std/src/sys_common/rwlock.rs
@@ -1,65 +1,5 @@
use crate::sys::locks as imp;
-/// An OS-based reader-writer lock, meant for use in static variables.
-///
-/// This rwlock does not implement poisoning.
-///
-/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not
-/// implement `Drop` to cleanup resources.
-pub struct StaticRwLock(imp::RwLock);
-
-impl StaticRwLock {
- /// Creates a new rwlock for use.
- #[inline]
- pub const fn new() -> Self {
- Self(imp::RwLock::new())
- }
-
- /// Acquires shared access to the underlying lock, blocking the current
- /// thread to do so.
- ///
- /// The lock is automatically unlocked when the returned guard is dropped.
- #[inline]
- pub fn read(&'static self) -> StaticRwLockReadGuard {
- unsafe { self.0.read() };
- StaticRwLockReadGuard(&self.0)
- }
-
- /// Acquires write access to the underlying lock, blocking the current thread
- /// to do so.
- ///
- /// The lock is automatically unlocked when the returned guard is dropped.
- #[inline]
- pub fn write(&'static self) -> StaticRwLockWriteGuard {
- unsafe { self.0.write() };
- StaticRwLockWriteGuard(&self.0)
- }
-}
-
-#[must_use]
-pub struct StaticRwLockReadGuard(&'static imp::RwLock);
-
-impl Drop for StaticRwLockReadGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.read_unlock();
- }
- }
-}
-
-#[must_use]
-pub struct StaticRwLockWriteGuard(&'static imp::RwLock);
-
-impl Drop for StaticRwLockWriteGuard {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- self.0.write_unlock();
- }
- }
-}
-
/// An OS-based reader-writer lock.
///
/// This rwlock cleans up its resources in its `Drop` implementation and may
@@ -75,6 +15,7 @@ pub struct MovableRwLock(imp::MovableRwLock);
impl MovableRwLock {
/// Creates a new reader-writer lock for use.
#[inline]
+ #[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> Self {
Self(imp::MovableRwLock::new())
}
diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs
index 70beebe86..747579f17 100644
--- a/library/std/src/sys_common/thread_local_key.rs
+++ b/library/std/src/sys_common/thread_local_key.rs
@@ -53,7 +53,6 @@ mod tests;
use crate::sync::atomic::{self, AtomicUsize, Ordering};
use crate::sys::thread_local_key as imp;
-use crate::sys_common::mutex::StaticMutex;
/// A type for TLS keys that are statically allocated.
///
@@ -69,8 +68,10 @@ use crate::sys_common::mutex::StaticMutex;
/// ```ignore (cannot-doctest-private-modules)
/// use tls::os::{StaticKey, INIT};
///
+/// // Use a regular global static to store the key.
/// static KEY: StaticKey = INIT;
///
+/// // The state provided via `get` and `set` is thread-local.
/// unsafe {
/// assert!(KEY.get().is_null());
/// KEY.set(1 as *mut u8);
@@ -149,25 +150,6 @@ impl StaticKey {
}
unsafe fn lazy_init(&self) -> usize {
- // Currently the Windows implementation of TLS is pretty hairy, and
- // it greatly simplifies creation if we just synchronize everything.
- //
- // Additionally a 0-index of a tls key hasn't been seen on windows, so
- // we just simplify the whole branch.
- if imp::requires_synchronized_create() {
- // We never call `INIT_LOCK.init()`, so it is UB to attempt to
- // acquire this mutex reentrantly!
- static INIT_LOCK: StaticMutex = StaticMutex::new();
- let _guard = INIT_LOCK.lock();
- let mut key = self.key.load(Ordering::SeqCst);
- if key == 0 {
- key = imp::create(self.dtor) as usize;
- self.key.store(key, Ordering::SeqCst);
- }
- rtassert!(key != 0);
- return key;
- }
-
// POSIX allows the key created here to be 0, but the compare_exchange
// below relies on using 0 as a sentinel value to check who won the
// race to set the shared TLS key. As far as I know, there is no
@@ -230,8 +212,6 @@ impl Key {
impl Drop for Key {
fn drop(&mut self) {
- // Right now Windows doesn't support TLS key destruction, but this also
- // isn't used anywhere other than tests, so just leak the TLS key.
- // unsafe { imp::destroy(self.key) }
+ unsafe { imp::destroy(self.key) }
}
}
diff --git a/library/std/src/sys_common/thread_local_key/tests.rs b/library/std/src/sys_common/thread_local_key/tests.rs
index 968738a41..6f32b858f 100644
--- a/library/std/src/sys_common/thread_local_key/tests.rs
+++ b/library/std/src/sys_common/thread_local_key/tests.rs
@@ -1,4 +1,5 @@
use super::{Key, StaticKey};
+use core::ptr;
fn assert_sync<T: Sync>() {}
fn assert_send<T: Send>() {}
@@ -12,8 +13,8 @@ fn smoke() {
let k2 = Key::new(None);
assert!(k1.get().is_null());
assert!(k2.get().is_null());
- k1.set(1 as *mut _);
- k2.set(2 as *mut _);
+ k1.set(ptr::invalid_mut(1));
+ k2.set(ptr::invalid_mut(2));
assert_eq!(k1.get() as usize, 1);
assert_eq!(k2.get() as usize, 2);
}
@@ -26,8 +27,8 @@ fn statik() {
unsafe {
assert!(K1.get().is_null());
assert!(K2.get().is_null());
- K1.set(1 as *mut _);
- K2.set(2 as *mut _);
+ K1.set(ptr::invalid_mut(1));
+ K2.set(ptr::invalid_mut(2));
assert_eq!(K1.get() as usize, 1);
assert_eq!(K2.get() as usize, 2);
}
diff --git a/library/std/src/sys_common/thread_parker/mod.rs b/library/std/src/sys_common/thread_parker/mod.rs
index cbd7832eb..f86a9a555 100644
--- a/library/std/src/sys_common/thread_parker/mod.rs
+++ b/library/std/src/sys_common/thread_parker/mod.rs
@@ -7,6 +7,7 @@ cfg_if::cfg_if! {
target_os = "openbsd",
target_os = "dragonfly",
target_os = "fuchsia",
+ target_os = "hermit",
))] {
mod futex;
pub use futex::Parker;
diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs
index 57fa49893..dd53767d4 100644
--- a/library/std/src/sys_common/wtf8.rs
+++ b/library/std/src/sys_common/wtf8.rs
@@ -89,6 +89,24 @@ impl CodePoint {
self.value
}
+ /// Returns the numeric value of the code point if it is a leading surrogate.
+ #[inline]
+ pub fn to_lead_surrogate(&self) -> Option<u16> {
+ match self.value {
+ lead @ 0xD800..=0xDBFF => Some(lead as u16),
+ _ => None,
+ }
+ }
+
+ /// Returns the numeric value of the code point if it is a trailing surrogate.
+ #[inline]
+ pub fn to_trail_surrogate(&self) -> Option<u16> {
+ match self.value {
+ trail @ 0xDC00..=0xDFFF => Some(trail as u16),
+ _ => None,
+ }
+ }
+
/// Optionally returns a Unicode scalar value for the code point.
///
/// Returns `None` if the code point is a surrogate (from U+D800 to U+DFFF).
@@ -117,6 +135,14 @@ impl CodePoint {
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone)]
pub struct Wtf8Buf {
bytes: Vec<u8>,
+
+ /// Do we know that `bytes` holds a valid UTF-8 encoding? We can easily
+ /// know this if we're constructed from a `String` or `&str`.
+ ///
+ /// It is possible for `bytes` to have valid UTF-8 without this being
+ /// set, such as when we're concatenating `&Wtf8`'s and surrogates become
+ /// paired, as we don't bother to rescan the entire string.
+ is_known_utf8: bool,
}
impl ops::Deref for Wtf8Buf {
@@ -147,13 +173,13 @@ impl Wtf8Buf {
/// Creates a new, empty WTF-8 string.
#[inline]
pub fn new() -> Wtf8Buf {
- Wtf8Buf { bytes: Vec::new() }
+ Wtf8Buf { bytes: Vec::new(), is_known_utf8: true }
}
/// Creates a new, empty WTF-8 string with pre-allocated capacity for `capacity` bytes.
#[inline]
pub fn with_capacity(capacity: usize) -> Wtf8Buf {
- Wtf8Buf { bytes: Vec::with_capacity(capacity) }
+ Wtf8Buf { bytes: Vec::with_capacity(capacity), is_known_utf8: true }
}
/// Creates a WTF-8 string from a UTF-8 `String`.
@@ -163,7 +189,7 @@ impl Wtf8Buf {
/// Since WTF-8 is a superset of UTF-8, this always succeeds.
#[inline]
pub fn from_string(string: String) -> Wtf8Buf {
- Wtf8Buf { bytes: string.into_bytes() }
+ Wtf8Buf { bytes: string.into_bytes(), is_known_utf8: true }
}
/// Creates a WTF-8 string from a UTF-8 `&str` slice.
@@ -173,11 +199,12 @@ impl Wtf8Buf {
/// Since WTF-8 is a superset of UTF-8, this always succeeds.
#[inline]
pub fn from_str(str: &str) -> Wtf8Buf {
- Wtf8Buf { bytes: <[_]>::to_vec(str.as_bytes()) }
+ Wtf8Buf { bytes: <[_]>::to_vec(str.as_bytes()), is_known_utf8: true }
}
pub fn clear(&mut self) {
- self.bytes.clear()
+ self.bytes.clear();
+ self.is_known_utf8 = true;
}
/// Creates a WTF-8 string from a potentially ill-formed UTF-16 slice of 16-bit code units.
@@ -193,9 +220,11 @@ impl Wtf8Buf {
let surrogate = surrogate.unpaired_surrogate();
// Surrogates are known to be in the code point range.
let code_point = unsafe { CodePoint::from_u32_unchecked(surrogate as u32) };
+ // The string will now contain an unpaired surrogate.
+ string.is_known_utf8 = false;
// Skip the WTF-8 concatenation check,
// surrogate pairs are already decoded by decode_utf16
- string.push_code_point_unchecked(code_point)
+ string.push_code_point_unchecked(code_point);
}
}
}
@@ -203,7 +232,7 @@ impl Wtf8Buf {
}
/// Copied from String::push
- /// This does **not** include the WTF-8 concatenation check.
+ /// This does **not** include the WTF-8 concatenation check or `is_known_utf8` check.
fn push_code_point_unchecked(&mut self, code_point: CodePoint) {
let mut bytes = [0; 4];
let bytes = char::encode_utf8_raw(code_point.value, &mut bytes);
@@ -217,6 +246,9 @@ impl Wtf8Buf {
#[inline]
pub fn as_mut_slice(&mut self) -> &mut Wtf8 {
+ // Safety: `Wtf8` doesn't expose any way to mutate the bytes that would
+ // cause them to change from well-formed UTF-8 to ill-formed UTF-8,
+ // which would break the assumptions of the `is_known_utf8` field.
unsafe { Wtf8::from_mut_bytes_unchecked(&mut self.bytes) }
}
@@ -236,7 +268,8 @@ impl Wtf8Buf {
/// in the given `Wtf8Buf`. The `Wtf8Buf` may reserve more space to avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// capacity is already sufficient. This method preserves the contents even
+ /// if an error occurs.
///
/// # Errors
///
@@ -313,7 +346,15 @@ impl Wtf8Buf {
self.push_char(decode_surrogate_pair(lead, trail));
self.bytes.extend_from_slice(other_without_trail_surrogate);
}
- _ => self.bytes.extend_from_slice(&other.bytes),
+ _ => {
+ // If we'll be pushing a string containing a surrogate, we may
+ // no longer have UTF-8.
+ if other.next_surrogate(0).is_some() {
+ self.is_known_utf8 = false;
+ }
+
+ self.bytes.extend_from_slice(&other.bytes);
+ }
}
}
@@ -330,13 +371,19 @@ impl Wtf8Buf {
/// like concatenating ill-formed UTF-16 strings effectively would.
#[inline]
pub fn push(&mut self, code_point: CodePoint) {
- if let trail @ 0xDC00..=0xDFFF = code_point.to_u32() {
+ if let Some(trail) = code_point.to_trail_surrogate() {
if let Some(lead) = (&*self).final_lead_surrogate() {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
- self.push_char(decode_surrogate_pair(lead, trail as u16));
+ self.push_char(decode_surrogate_pair(lead, trail));
return;
}
+
+ // We're pushing a trailing surrogate.
+ self.is_known_utf8 = false;
+ } else if code_point.to_lead_surrogate().is_some() {
+ // We're pushing a leading surrogate.
+ self.is_known_utf8 = false;
}
// No newly paired surrogates at the boundary.
@@ -363,9 +410,10 @@ impl Wtf8Buf {
/// (that is, if the string contains surrogates),
/// the original WTF-8 string is returned instead.
pub fn into_string(self) -> Result<String, Wtf8Buf> {
- match self.next_surrogate(0) {
- None => Ok(unsafe { String::from_utf8_unchecked(self.bytes) }),
- Some(_) => Err(self),
+ if self.is_known_utf8 || self.next_surrogate(0).is_none() {
+ Ok(unsafe { String::from_utf8_unchecked(self.bytes) })
+ } else {
+ Err(self)
}
}
@@ -375,6 +423,11 @@ impl Wtf8Buf {
///
/// Surrogates are replaced with `"\u{FFFD}"` (the replacement character ā€œļæ½ā€)
pub fn into_string_lossy(mut self) -> String {
+ // Fast path: If we already have UTF-8, we can return it immediately.
+ if self.is_known_utf8 {
+ return unsafe { String::from_utf8_unchecked(self.bytes) };
+ }
+
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
@@ -397,7 +450,7 @@ impl Wtf8Buf {
/// Converts a `Box<Wtf8>` into a `Wtf8Buf`.
pub fn from_box(boxed: Box<Wtf8>) -> Wtf8Buf {
let bytes: Box<[u8]> = unsafe { mem::transmute(boxed) };
- Wtf8Buf { bytes: bytes.into_vec() }
+ Wtf8Buf { bytes: bytes.into_vec(), is_known_utf8: false }
}
}
@@ -575,6 +628,11 @@ impl Wtf8 {
}
}
+ /// Creates an owned `Wtf8Buf` from a borrowed `Wtf8`.
+ pub fn to_owned(&self) -> Wtf8Buf {
+ Wtf8Buf { bytes: self.bytes.to_vec(), is_known_utf8: false }
+ }
+
/// Lossily converts the string to UTF-8.
/// Returns a UTF-8 `&str` slice if the contents are well-formed in UTF-8.
///
@@ -664,7 +722,8 @@ impl Wtf8 {
}
pub fn clone_into(&self, buf: &mut Wtf8Buf) {
- self.bytes.clone_into(&mut buf.bytes)
+ buf.is_known_utf8 = false;
+ self.bytes.clone_into(&mut buf.bytes);
}
/// Boxes this `Wtf8`.
@@ -704,12 +763,12 @@ impl Wtf8 {
#[inline]
pub fn to_ascii_lowercase(&self) -> Wtf8Buf {
- Wtf8Buf { bytes: self.bytes.to_ascii_lowercase() }
+ Wtf8Buf { bytes: self.bytes.to_ascii_lowercase(), is_known_utf8: false }
}
#[inline]
pub fn to_ascii_uppercase(&self) -> Wtf8Buf {
- Wtf8Buf { bytes: self.bytes.to_ascii_uppercase() }
+ Wtf8Buf { bytes: self.bytes.to_ascii_uppercase(), is_known_utf8: false }
}
#[inline]
diff --git a/library/std/src/sys_common/wtf8/tests.rs b/library/std/src/sys_common/wtf8/tests.rs
index 931996791..1a302d646 100644
--- a/library/std/src/sys_common/wtf8/tests.rs
+++ b/library/std/src/sys_common/wtf8/tests.rs
@@ -20,6 +20,36 @@ fn code_point_to_u32() {
}
#[test]
+fn code_point_to_lead_surrogate() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0).to_lead_surrogate(), None);
+ assert_eq!(c(0xE9).to_lead_surrogate(), None);
+ assert_eq!(c(0xD800).to_lead_surrogate(), Some(0xD800));
+ assert_eq!(c(0xDBFF).to_lead_surrogate(), Some(0xDBFF));
+ assert_eq!(c(0xDC00).to_lead_surrogate(), None);
+ assert_eq!(c(0xDFFF).to_lead_surrogate(), None);
+ assert_eq!(c(0x1F4A9).to_lead_surrogate(), None);
+ assert_eq!(c(0x10FFFF).to_lead_surrogate(), None);
+}
+
+#[test]
+fn code_point_to_trail_surrogate() {
+ fn c(value: u32) -> CodePoint {
+ CodePoint::from_u32(value).unwrap()
+ }
+ assert_eq!(c(0).to_trail_surrogate(), None);
+ assert_eq!(c(0xE9).to_trail_surrogate(), None);
+ assert_eq!(c(0xD800).to_trail_surrogate(), None);
+ assert_eq!(c(0xDBFF).to_trail_surrogate(), None);
+ assert_eq!(c(0xDC00).to_trail_surrogate(), Some(0xDC00));
+ assert_eq!(c(0xDFFF).to_trail_surrogate(), Some(0xDFFF));
+ assert_eq!(c(0x1F4A9).to_trail_surrogate(), None);
+ assert_eq!(c(0x10FFFF).to_trail_surrogate(), None);
+}
+
+#[test]
fn code_point_from_char() {
assert_eq!(CodePoint::from_char('a').to_u32(), 0x61);
assert_eq!(CodePoint::from_char('šŸ’©').to_u32(), 0x1F4A9);
@@ -70,35 +100,66 @@ fn wtf8buf_from_string() {
#[test]
fn wtf8buf_from_wide() {
- assert_eq!(Wtf8Buf::from_wide(&[]).bytes, b"");
- assert_eq!(
- Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]).bytes,
- b"a\xC3\xA9 \xED\xA0\xBD\xF0\x9F\x92\xA9"
- );
+ let buf = Wtf8Buf::from_wide(&[]);
+ assert_eq!(buf.bytes, b"");
+ assert!(buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xDCA9]);
+ assert_eq!(buf.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0x61, 0xE9, 0x20, 0xD83D, 0xD83D, 0xDCA9]);
+ assert_eq!(buf.bytes, b"a\xC3\xA9 \xED\xA0\xBD\xF0\x9F\x92\xA9");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xD800]);
+ assert_eq!(buf.bytes, b"\xED\xA0\x80");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDBFF]);
+ assert_eq!(buf.bytes, b"\xED\xAF\xBF");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDC00]);
+ assert_eq!(buf.bytes, b"\xED\xB0\x80");
+ assert!(!buf.is_known_utf8);
+
+ let buf = Wtf8Buf::from_wide(&[0xDFFF]);
+ assert_eq!(buf.bytes, b"\xED\xBF\xBF");
+ assert!(!buf.is_known_utf8);
}
#[test]
fn wtf8buf_push_str() {
let mut string = Wtf8Buf::new();
assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
+
string.push_str("aĆ© šŸ’©");
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
}
#[test]
fn wtf8buf_push_char() {
let mut string = Wtf8Buf::from_str("aƩ ");
assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ assert!(string.is_known_utf8);
+
string.push_char('šŸ’©');
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
}
#[test]
fn wtf8buf_push() {
let mut string = Wtf8Buf::from_str("aƩ ");
assert_eq!(string.bytes, b"a\xC3\xA9 ");
+ assert!(string.is_known_utf8);
+
string.push(CodePoint::from_char('šŸ’©'));
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
fn c(value: u32) -> CodePoint {
CodePoint::from_u32(value).unwrap()
@@ -106,37 +167,46 @@ fn wtf8buf_push() {
let mut string = Wtf8Buf::new();
string.push(c(0xD83D)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xDCA9)); // trail
assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9"); // Magic!
let mut string = Wtf8Buf::new();
string.push(c(0xD83D)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0x20)); // not surrogate
string.push(c(0xDCA9)); // trail
assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
let mut string = Wtf8Buf::new();
string.push(c(0xD800)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xDBFF)); // lead
assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
let mut string = Wtf8Buf::new();
string.push(c(0xD800)); // lead
+ assert!(!string.is_known_utf8);
string.push(c(0xE000)); // not surrogate
assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
let mut string = Wtf8Buf::new();
string.push(c(0xD7FF)); // not surrogate
+ assert!(string.is_known_utf8);
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
let mut string = Wtf8Buf::new();
string.push(c(0x61)); // not surrogate, < 3 bytes
+ assert!(string.is_known_utf8);
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
let mut string = Wtf8Buf::new();
string.push(c(0xDC00)); // trail
+ assert!(!string.is_known_utf8);
assert_eq!(string.bytes, b"\xED\xB0\x80");
}
@@ -146,6 +216,7 @@ fn wtf8buf_push_wtf8() {
assert_eq!(string.bytes, b"a\xC3\xA9");
string.push_wtf8(Wtf8::from_str(" šŸ’©"));
assert_eq!(string.bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
fn w(v: &[u8]) -> &Wtf8 {
unsafe { Wtf8::from_bytes_unchecked(v) }
@@ -161,37 +232,68 @@ fn wtf8buf_push_wtf8() {
string.push_wtf8(w(b" ")); // not surrogate
string.push_wtf8(w(b"\xED\xB2\xA9")); // trail
assert_eq!(string.bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xA0\x80")); // lead
string.push_wtf8(w(b"\xED\xAF\xBF")); // lead
assert_eq!(string.bytes, b"\xED\xA0\x80\xED\xAF\xBF");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xA0\x80")); // lead
string.push_wtf8(w(b"\xEE\x80\x80")); // not surrogate
assert_eq!(string.bytes, b"\xED\xA0\x80\xEE\x80\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\x9F\xBF")); // not surrogate
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\xED\x9F\xBF\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"a")); // not surrogate, < 3 bytes
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\x61\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
let mut string = Wtf8Buf::new();
string.push_wtf8(w(b"\xED\xB0\x80")); // trail
assert_eq!(string.bytes, b"\xED\xB0\x80");
+ assert!(!string.is_known_utf8);
}
#[test]
fn wtf8buf_truncate() {
let mut string = Wtf8Buf::from_str("aƩ");
+ assert!(string.is_known_utf8);
+
+ string.truncate(3);
+ assert_eq!(string.bytes, b"a\xC3\xA9");
+ assert!(string.is_known_utf8);
+
string.truncate(1);
assert_eq!(string.bytes, b"a");
+ assert!(string.is_known_utf8);
+
+ string.truncate(0);
+ assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
+}
+
+#[test]
+fn wtf8buf_truncate_around_non_bmp() {
+ let mut string = Wtf8Buf::from_str("šŸ’©");
+ assert!(string.is_known_utf8);
+
+ string.truncate(4);
+ assert_eq!(string.bytes, b"\xF0\x9F\x92\xA9");
+ assert!(string.is_known_utf8);
+
+ string.truncate(0);
+ assert_eq!(string.bytes, b"");
+ assert!(string.is_known_utf8);
}
#[test]
@@ -209,10 +311,36 @@ fn wtf8buf_truncate_fail_longer() {
}
#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp3() {
+ let mut string = Wtf8Buf::from_str("šŸ’©");
+ assert!(string.is_known_utf8);
+ string.truncate(3);
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp2() {
+ let mut string = Wtf8Buf::from_str("šŸ’©");
+ assert!(string.is_known_utf8);
+ string.truncate(2);
+}
+
+#[test]
+#[should_panic]
+fn wtf8buf_truncate_splitting_non_bmp1() {
+ let mut string = Wtf8Buf::from_str("šŸ’©");
+ assert!(string.is_known_utf8);
+ string.truncate(1);
+}
+
+#[test]
fn wtf8buf_into_string() {
let mut string = Wtf8Buf::from_str("aĆ© šŸ’©");
+ assert!(string.is_known_utf8);
assert_eq!(string.clone().into_string(), Ok(String::from("aĆ© šŸ’©")));
string.push(CodePoint::from_u32(0xD800).unwrap());
+ assert!(!string.is_known_utf8);
assert_eq!(string.clone().into_string(), Err(string));
}
@@ -229,15 +357,33 @@ fn wtf8buf_from_iterator() {
fn f(values: &[u32]) -> Wtf8Buf {
values.iter().map(|&c| CodePoint::from_u32(c).unwrap()).collect::<Wtf8Buf>()
}
- assert_eq!(f(&[0x61, 0xE9, 0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert_eq!(
+ f(&[0x61, 0xE9, 0x20, 0x1F4A9]),
+ Wtf8Buf { bytes: b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec(), is_known_utf8: true }
+ );
assert_eq!(f(&[0xD83D, 0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
- assert_eq!(f(&[0xD83D, 0x20, 0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
- assert_eq!(f(&[0xD800, 0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
- assert_eq!(f(&[0xD800, 0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
- assert_eq!(f(&[0xD7FF, 0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
- assert_eq!(f(&[0x61, 0xDC00]).bytes, b"\x61\xED\xB0\x80");
- assert_eq!(f(&[0xDC00]).bytes, b"\xED\xB0\x80");
+ assert_eq!(
+ f(&[0xD83D, 0x20, 0xDCA9]),
+ Wtf8Buf { bytes: b"\xED\xA0\xBD \xED\xB2\xA9".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD800, 0xDBFF]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xED\xAF\xBF".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD800, 0xE000]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xEE\x80\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0xD7FF, 0xDC00]),
+ Wtf8Buf { bytes: b"\xED\x9F\xBF\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ f(&[0x61, 0xDC00]),
+ Wtf8Buf { bytes: b"\x61\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(f(&[0xDC00]), Wtf8Buf { bytes: b"\xED\xB0\x80".to_vec(), is_known_utf8: false });
}
#[test]
@@ -251,15 +397,36 @@ fn wtf8buf_extend() {
string
}
- assert_eq!(e(&[0x61, 0xE9], &[0x20, 0x1F4A9]).bytes, b"a\xC3\xA9 \xF0\x9F\x92\xA9");
+ assert_eq!(
+ e(&[0x61, 0xE9], &[0x20, 0x1F4A9]),
+ Wtf8Buf { bytes: b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec(), is_known_utf8: true }
+ );
assert_eq!(e(&[0xD83D], &[0xDCA9]).bytes, b"\xF0\x9F\x92\xA9"); // Magic!
- assert_eq!(e(&[0xD83D, 0x20], &[0xDCA9]).bytes, b"\xED\xA0\xBD \xED\xB2\xA9");
- assert_eq!(e(&[0xD800], &[0xDBFF]).bytes, b"\xED\xA0\x80\xED\xAF\xBF");
- assert_eq!(e(&[0xD800], &[0xE000]).bytes, b"\xED\xA0\x80\xEE\x80\x80");
- assert_eq!(e(&[0xD7FF], &[0xDC00]).bytes, b"\xED\x9F\xBF\xED\xB0\x80");
- assert_eq!(e(&[0x61], &[0xDC00]).bytes, b"\x61\xED\xB0\x80");
- assert_eq!(e(&[], &[0xDC00]).bytes, b"\xED\xB0\x80");
+ assert_eq!(
+ e(&[0xD83D, 0x20], &[0xDCA9]),
+ Wtf8Buf { bytes: b"\xED\xA0\xBD \xED\xB2\xA9".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD800], &[0xDBFF]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xED\xAF\xBF".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD800], &[0xE000]),
+ Wtf8Buf { bytes: b"\xED\xA0\x80\xEE\x80\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0xD7FF], &[0xDC00]),
+ Wtf8Buf { bytes: b"\xED\x9F\xBF\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[0x61], &[0xDC00]),
+ Wtf8Buf { bytes: b"\x61\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
+ assert_eq!(
+ e(&[], &[0xDC00]),
+ Wtf8Buf { bytes: b"\xED\xB0\x80".to_vec(), is_known_utf8: false }
+ );
}
#[test]
@@ -407,3 +574,93 @@ fn wtf8_encode_wide_size_hint() {
assert_eq!((0, Some(0)), iter.size_hint());
assert!(iter.next().is_none());
}
+
+#[test]
+fn wtf8_clone_into() {
+ let mut string = Wtf8Buf::new();
+ Wtf8::from_str("green").clone_into(&mut string);
+ assert_eq!(string.bytes, b"green");
+
+ let mut string = Wtf8Buf::from_str("green");
+ Wtf8::from_str("").clone_into(&mut string);
+ assert_eq!(string.bytes, b"");
+
+ let mut string = Wtf8Buf::from_str("red");
+ Wtf8::from_str("green").clone_into(&mut string);
+ assert_eq!(string.bytes, b"green");
+
+ let mut string = Wtf8Buf::from_str("green");
+ Wtf8::from_str("red").clone_into(&mut string);
+ assert_eq!(string.bytes, b"red");
+
+ let mut string = Wtf8Buf::from_str("green");
+ assert!(string.is_known_utf8);
+ unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").clone_into(&mut string) };
+ assert_eq!(string.bytes, b"\xED\xA0\x80");
+ assert!(!string.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_ascii_lowercase() {
+ let lowercase = Wtf8::from_str("").to_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"");
+
+ let lowercase = Wtf8::from_str("GrEeN gRaPeS! šŸ‡").to_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"green grapes! \xf0\x9f\x8d\x87");
+
+ let lowercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_ascii_lowercase() };
+ assert_eq!(lowercase.bytes, b"\xED\xA0\x80");
+ assert!(!lowercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_ascii_uppercase() {
+ let uppercase = Wtf8::from_str("").to_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"");
+
+ let uppercase = Wtf8::from_str("GrEeN gRaPeS! šŸ‡").to_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"GREEN GRAPES! \xf0\x9f\x8d\x87");
+
+ let uppercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_ascii_uppercase() };
+ assert_eq!(uppercase.bytes, b"\xED\xA0\x80");
+ assert!(!uppercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_make_ascii_lowercase() {
+ let mut lowercase = Wtf8Buf::from_str("");
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"");
+
+ let mut lowercase = Wtf8Buf::from_str("GrEeN gRaPeS! šŸ‡");
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"green grapes! \xf0\x9f\x8d\x87");
+
+ let mut lowercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ lowercase.make_ascii_lowercase();
+ assert_eq!(lowercase.bytes, b"\xED\xA0\x80");
+ assert!(!lowercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_make_ascii_uppercase() {
+ let mut uppercase = Wtf8Buf::from_str("");
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"");
+
+ let mut uppercase = Wtf8Buf::from_str("GrEeN gRaPeS! šŸ‡");
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"GREEN GRAPES! \xf0\x9f\x8d\x87");
+
+ let mut uppercase = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ uppercase.make_ascii_uppercase();
+ assert_eq!(uppercase.bytes, b"\xED\xA0\x80");
+ assert!(!uppercase.is_known_utf8);
+}
+
+#[test]
+fn wtf8_to_owned() {
+ let string = unsafe { Wtf8::from_bytes_unchecked(b"\xED\xA0\x80").to_owned() };
+ assert_eq!(string.bytes, b"\xED\xA0\x80");
+ assert!(!string.is_known_utf8);
+}