summaryrefslogtreecommitdiffstats
path: root/vendor/parking_lot_core/src/thread_parker
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/parking_lot_core/src/thread_parker')
-rw-r--r--vendor/parking_lot_core/src/thread_parker/generic.rs79
-rw-r--r--vendor/parking_lot_core/src/thread_parker/linux.rs156
-rw-r--r--vendor/parking_lot_core/src/thread_parker/mod.rs85
-rw-r--r--vendor/parking_lot_core/src/thread_parker/redox.rs139
-rw-r--r--vendor/parking_lot_core/src/thread_parker/sgx.rs94
-rw-r--r--vendor/parking_lot_core/src/thread_parker/unix.rs242
-rw-r--r--vendor/parking_lot_core/src/thread_parker/wasm.rs54
-rw-r--r--vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs97
-rw-r--r--vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs202
-rw-r--r--vendor/parking_lot_core/src/thread_parker/windows/mod.rs188
-rw-r--r--vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs138
11 files changed, 1474 insertions, 0 deletions
diff --git a/vendor/parking_lot_core/src/thread_parker/generic.rs b/vendor/parking_lot_core/src/thread_parker/generic.rs
new file mode 100644
index 000000000..990bcb7fc
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/generic.rs
@@ -0,0 +1,79 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! A simple spin lock based thread parker. Used on platforms without better
+//! parking facilities available.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use core::hint::spin_loop;
+use std::thread;
+use std::time::Instant;
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ parked: AtomicBool,
+}
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ parked: AtomicBool::new(false),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.parked.store(true, Ordering::Relaxed);
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ self.parked.load(Ordering::Relaxed) != false
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ while self.parked.load(Ordering::Acquire) != false {
+ spin_loop();
+ }
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, timeout: Instant) -> bool {
+ while self.parked.load(Ordering::Acquire) != false {
+ if Instant::now() >= timeout {
+ return false;
+ }
+ spin_loop();
+ }
+ true
+ }
+
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ self.parked.store(false, Ordering::Release);
+ UnparkHandle(())
+ }
+}
+
+pub struct UnparkHandle(());
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {}
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/linux.rs b/vendor/parking_lot_core/src/thread_parker/linux.rs
new file mode 100644
index 000000000..5d4e229ad
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/linux.rs
@@ -0,0 +1,156 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ ptr,
+ sync::atomic::{AtomicI32, Ordering},
+};
+use libc;
+use std::thread;
+use std::time::Instant;
+
+// x32 Linux uses a non-standard type for tv_nsec in timespec.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = i64;
+#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = libc::c_long;
+
+fn errno() -> libc::c_int {
+ #[cfg(target_os = "linux")]
+ unsafe {
+ *libc::__errno_location()
+ }
+ #[cfg(target_os = "android")]
+ unsafe {
+ *libc::__errno()
+ }
+}
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ futex: AtomicI32,
+}
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ futex: AtomicI32::new(0),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.futex.store(1, Ordering::Relaxed);
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ self.futex.load(Ordering::Relaxed) != 0
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ while self.futex.load(Ordering::Acquire) != 0 {
+ self.futex_wait(None);
+ }
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, timeout: Instant) -> bool {
+ while self.futex.load(Ordering::Acquire) != 0 {
+ let now = Instant::now();
+ if timeout <= now {
+ return false;
+ }
+ let diff = timeout - now;
+ if diff.as_secs() as libc::time_t as u64 != diff.as_secs() {
+ // Timeout overflowed, just sleep indefinitely
+ self.park();
+ return true;
+ }
+ let ts = libc::timespec {
+ tv_sec: diff.as_secs() as libc::time_t,
+ tv_nsec: diff.subsec_nanos() as tv_nsec_t,
+ };
+ self.futex_wait(Some(ts));
+ }
+ true
+ }
+
+ // Locks the parker to prevent the target thread from exiting. This is
+ // necessary to ensure that thread-local ThreadData objects remain valid.
+ // This should be called while holding the queue lock.
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ self.futex.store(0, Ordering::Release);
+
+ UnparkHandle { futex: &self.futex }
+ }
+}
+
+impl ThreadParker {
+ #[inline]
+ fn futex_wait(&self, ts: Option<libc::timespec>) {
+ let ts_ptr = ts
+ .as_ref()
+ .map(|ts_ref| ts_ref as *const _)
+ .unwrap_or(ptr::null());
+ let r = unsafe {
+ libc::syscall(
+ libc::SYS_futex,
+ &self.futex,
+ libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG,
+ 1,
+ ts_ptr,
+ )
+ };
+ debug_assert!(r == 0 || r == -1);
+ if r == -1 {
+ debug_assert!(
+ errno() == libc::EINTR
+ || errno() == libc::EAGAIN
+ || (ts.is_some() && errno() == libc::ETIMEDOUT)
+ );
+ }
+ }
+}
+
+pub struct UnparkHandle {
+ futex: *const AtomicI32,
+}
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {
+ // The thread data may have been freed at this point, but it doesn't
+ // matter since the syscall will just return EFAULT in that case.
+ let r = libc::syscall(
+ libc::SYS_futex,
+ self.futex,
+ libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG,
+ 1,
+ );
+ debug_assert!(r == 0 || r == 1 || r == -1);
+ if r == -1 {
+ debug_assert_eq!(errno(), libc::EFAULT);
+ }
+ }
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/mod.rs b/vendor/parking_lot_core/src/thread_parker/mod.rs
new file mode 100644
index 000000000..fc162f4c5
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/mod.rs
@@ -0,0 +1,85 @@
+use cfg_if::cfg_if;
+use std::time::Instant;
+
+/// Trait for the platform thread parker implementation.
+///
+/// All unsafe methods are unsafe because the Unix thread parker is based on
+/// pthread mutexes and condvars. Those primitives must not be moved and used
+/// from any other memory address than the one they were located at when they
+/// were initialized. As such, it's UB to call any unsafe method on
+/// `ThreadParkerT` if the implementing instance has moved since the last
+/// call to any of the unsafe methods.
+pub trait ThreadParkerT {
+ type UnparkHandle: UnparkHandleT;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool;
+
+ fn new() -> Self;
+
+ /// Prepares the parker. This should be called before adding it to the queue.
+ unsafe fn prepare_park(&self);
+
+ /// Checks if the park timed out. This should be called while holding the
+ /// queue lock after park_until has returned false.
+ unsafe fn timed_out(&self) -> bool;
+
+ /// Parks the thread until it is unparked. This should be called after it has
+ /// been added to the queue, after unlocking the queue.
+ unsafe fn park(&self);
+
+ /// Parks the thread until it is unparked or the timeout is reached. This
+ /// should be called after it has been added to the queue, after unlocking
+ /// the queue. Returns true if we were unparked and false if we timed out.
+ unsafe fn park_until(&self, timeout: Instant) -> bool;
+
+ /// Locks the parker to prevent the target thread from exiting. This is
+ /// necessary to ensure that thread-local ThreadData objects remain valid.
+ /// This should be called while holding the queue lock.
+ unsafe fn unpark_lock(&self) -> Self::UnparkHandle;
+}
+
+/// Handle for a thread that is about to be unparked. We need to mark the thread
+/// as unparked while holding the queue lock, but we delay the actual unparking
+/// until after the queue lock is released.
+pub trait UnparkHandleT {
+ /// Wakes up the parked thread. This should be called after the queue lock is
+ /// released to avoid blocking the queue for too long.
+ ///
+ /// This method is unsafe for the same reason as the unsafe methods in
+ /// `ThreadParkerT`.
+ unsafe fn unpark(self);
+}
+
+cfg_if! {
+ if #[cfg(any(target_os = "linux", target_os = "android"))] {
+ #[path = "linux.rs"]
+ mod imp;
+ } else if #[cfg(unix)] {
+ #[path = "unix.rs"]
+ mod imp;
+ } else if #[cfg(windows)] {
+ #[path = "windows/mod.rs"]
+ mod imp;
+ } else if #[cfg(target_os = "redox")] {
+ #[path = "redox.rs"]
+ mod imp;
+ } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] {
+ #[path = "sgx.rs"]
+ mod imp;
+ } else if #[cfg(all(
+ feature = "nightly",
+ target_family = "wasm",
+ target_feature = "atomics"
+ ))] {
+ #[path = "wasm_atomic.rs"]
+ mod imp;
+ } else if #[cfg(target_family = "wasm")] {
+ #[path = "wasm.rs"]
+ mod imp;
+ } else {
+ #[path = "generic.rs"]
+ mod imp;
+ }
+}
+
+pub use self::imp::{thread_yield, ThreadParker, UnparkHandle};
diff --git a/vendor/parking_lot_core/src/thread_parker/redox.rs b/vendor/parking_lot_core/src/thread_parker/redox.rs
new file mode 100644
index 000000000..fdf6bd17d
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/redox.rs
@@ -0,0 +1,139 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ ptr,
+ sync::atomic::{AtomicI32, Ordering},
+};
+use std::thread;
+use std::time::Instant;
+use syscall::{
+ call::futex,
+ data::TimeSpec,
+ error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT},
+ flag::{FUTEX_WAIT, FUTEX_WAKE},
+};
+
+const UNPARKED: i32 = 0;
+const PARKED: i32 = 1;
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ futex: AtomicI32,
+}
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ futex: AtomicI32::new(UNPARKED),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.futex.store(PARKED, Ordering::Relaxed);
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ self.futex.load(Ordering::Relaxed) != UNPARKED
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ while self.futex.load(Ordering::Acquire) != UNPARKED {
+ self.futex_wait(None);
+ }
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, timeout: Instant) -> bool {
+ while self.futex.load(Ordering::Acquire) != UNPARKED {
+ let now = Instant::now();
+ if timeout <= now {
+ return false;
+ }
+ let diff = timeout - now;
+ if diff.as_secs() > i64::max_value() as u64 {
+ // Timeout overflowed, just sleep indefinitely
+ self.park();
+ return true;
+ }
+ let ts = TimeSpec {
+ tv_sec: diff.as_secs() as i64,
+ tv_nsec: diff.subsec_nanos() as i32,
+ };
+ self.futex_wait(Some(ts));
+ }
+ true
+ }
+
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ self.futex.store(UNPARKED, Ordering::Release);
+
+ UnparkHandle { futex: self.ptr() }
+ }
+}
+
+impl ThreadParker {
+ #[inline]
+ fn futex_wait(&self, ts: Option<TimeSpec>) {
+ let ts_ptr = ts
+ .as_ref()
+ .map(|ts_ref| ts_ref as *const _)
+ .unwrap_or(ptr::null());
+ let r = unsafe {
+ futex(
+ self.ptr(),
+ FUTEX_WAIT,
+ PARKED,
+ ts_ptr as usize,
+ ptr::null_mut(),
+ )
+ };
+ match r {
+ Ok(r) => debug_assert_eq!(r, 0),
+ Err(Error { errno }) => {
+ debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT);
+ }
+ }
+ }
+
+ #[inline]
+ fn ptr(&self) -> *mut i32 {
+ &self.futex as *const AtomicI32 as *mut i32
+ }
+}
+
+pub struct UnparkHandle {
+ futex: *mut i32,
+}
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {
+ // The thread data may have been freed at this point, but it doesn't
+ // matter since the syscall will just return EFAULT in that case.
+ let r = futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut());
+ match r {
+ Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1),
+ Err(Error { errno }) => debug_assert_eq!(errno, EFAULT),
+ }
+ }
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/sgx.rs b/vendor/parking_lot_core/src/thread_parker/sgx.rs
new file mode 100644
index 000000000..bc76fe785
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/sgx.rs
@@ -0,0 +1,94 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use std::time::Instant;
+use std::{
+ io,
+ os::fortanix_sgx::{
+ thread::current as current_tcs,
+ usercalls::{
+ self,
+ raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE},
+ },
+ },
+ thread,
+};
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ parked: AtomicBool,
+ tcs: Tcs,
+}
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ parked: AtomicBool::new(false),
+ tcs: current_tcs(),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.parked.store(true, Ordering::Relaxed);
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ self.parked.load(Ordering::Relaxed)
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ while self.parked.load(Ordering::Acquire) {
+ let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE);
+ debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK);
+ }
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, _timeout: Instant) -> bool {
+ // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+ panic!("timeout not supported in SGX");
+ }
+
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ self.parked.store(false, Ordering::Release);
+ UnparkHandle(self.tcs)
+ }
+}
+
+pub struct UnparkHandle(Tcs);
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {
+ let result = usercalls::send(EV_UNPARK, Some(self.0));
+ if cfg!(debug_assertions) {
+ if let Err(error) = result {
+ // `InvalidInput` may be returned if the thread we send to has
+ // already been unparked and exited.
+ if error.kind() != io::ErrorKind::InvalidInput {
+ panic!("send returned an unexpected error: {:?}", error);
+ }
+ }
+ }
+ }
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/unix.rs b/vendor/parking_lot_core/src/thread_parker/unix.rs
new file mode 100644
index 000000000..a75e1768b
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/unix.rs
@@ -0,0 +1,242 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+use core::ptr;
+use core::{
+ cell::{Cell, UnsafeCell},
+ mem::MaybeUninit,
+};
+use libc;
+use std::time::Instant;
+use std::{thread, time::Duration};
+
+// x32 Linux uses a non-standard type for tv_nsec in timespec.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = i64;
+#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = libc::c_long;
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ should_park: Cell<bool>,
+ mutex: UnsafeCell<libc::pthread_mutex_t>,
+ condvar: UnsafeCell<libc::pthread_cond_t>,
+ initialized: Cell<bool>,
+}
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = false;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ should_park: Cell::new(false),
+ mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER),
+ condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER),
+ initialized: Cell::new(false),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.should_park.set(true);
+ if !self.initialized.get() {
+ self.init();
+ self.initialized.set(true);
+ }
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ // We need to grab the mutex here because another thread may be
+ // concurrently executing UnparkHandle::unpark, which is done without
+ // holding the queue lock.
+ let r = libc::pthread_mutex_lock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ let should_park = self.should_park.get();
+ let r = libc::pthread_mutex_unlock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ should_park
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ let r = libc::pthread_mutex_lock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ while self.should_park.get() {
+ let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get());
+ debug_assert_eq!(r, 0);
+ }
+ let r = libc::pthread_mutex_unlock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, timeout: Instant) -> bool {
+ let r = libc::pthread_mutex_lock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ while self.should_park.get() {
+ let now = Instant::now();
+ if timeout <= now {
+ let r = libc::pthread_mutex_unlock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ return false;
+ }
+
+ if let Some(ts) = timeout_to_timespec(timeout - now) {
+ let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts);
+ if ts.tv_sec < 0 {
+ // On some systems, negative timeouts will return EINVAL. In
+ // that case we won't sleep and will just busy loop instead,
+ // which is the best we can do.
+ debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL);
+ } else {
+ debug_assert!(r == 0 || r == libc::ETIMEDOUT);
+ }
+ } else {
+ // Timeout calculation overflowed, just sleep indefinitely
+ let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get());
+ debug_assert_eq!(r, 0);
+ }
+ }
+ let r = libc::pthread_mutex_unlock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+ true
+ }
+
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ let r = libc::pthread_mutex_lock(self.mutex.get());
+ debug_assert_eq!(r, 0);
+
+ UnparkHandle {
+ thread_parker: self,
+ }
+ }
+}
+
+impl ThreadParker {
+ /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
+ #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android", target_os = "espidf"))]
+ #[inline]
+ unsafe fn init(&self) {}
+
+ /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
+ #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android", target_os = "espidf")))]
+ #[inline]
+ unsafe fn init(&self) {
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr());
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ debug_assert_eq!(r, 0);
+ }
+}
+
+impl Drop for ThreadParker {
+ #[inline]
+ fn drop(&mut self) {
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs. The same applies to condvars.
+ unsafe {
+ let r = libc::pthread_mutex_destroy(self.mutex.get());
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ let r = libc::pthread_cond_destroy(self.condvar.get());
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ }
+ }
+}
+
+pub struct UnparkHandle {
+ thread_parker: *const ThreadParker,
+}
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {
+ (*self.thread_parker).should_park.set(false);
+
+ // We notify while holding the lock here to avoid races with the target
+ // thread. In particular, the thread could exit after we unlock the
+ // mutex, which would make the condvar access invalid memory.
+ let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get());
+ debug_assert_eq!(r, 0);
+ let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get());
+ debug_assert_eq!(r, 0);
+ }
+}
+
+// Returns the current time on the clock used by pthread_cond_t as a timespec.
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+#[inline]
+fn timespec_now() -> libc::timespec {
+ let mut now = MaybeUninit::<libc::timeval>::uninit();
+ let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) };
+ debug_assert_eq!(r, 0);
+ // SAFETY: We know `libc::gettimeofday` has initialized the value.
+ let now = unsafe { now.assume_init() };
+ libc::timespec {
+ tv_sec: now.tv_sec,
+ tv_nsec: now.tv_usec as tv_nsec_t * 1000,
+ }
+}
+#[cfg(not(any(target_os = "macos", target_os = "ios")))]
+#[inline]
+fn timespec_now() -> libc::timespec {
+ let mut now = MaybeUninit::<libc::timespec>::uninit();
+ let clock = if cfg!(target_os = "android") {
+ // Android doesn't support pthread_condattr_setclock, so we need to
+ // specify the timeout in CLOCK_REALTIME.
+ libc::CLOCK_REALTIME
+ } else {
+ libc::CLOCK_MONOTONIC
+ };
+ let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) };
+ debug_assert_eq!(r, 0);
+ // SAFETY: We know `libc::clock_gettime` has initialized the value.
+ unsafe { now.assume_init() }
+}
+
+// Converts a relative timeout into an absolute timeout in the clock used by
+// pthread_cond_t.
+#[inline]
+fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
+ // Handle overflows early on
+ if timeout.as_secs() > libc::time_t::max_value() as u64 {
+ return None;
+ }
+
+ let now = timespec_now();
+ let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t;
+ let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t);
+ if nsec >= 1_000_000_000 {
+ nsec -= 1_000_000_000;
+ sec = sec.and_then(|sec| sec.checked_add(1));
+ }
+
+ sec.map(|sec| libc::timespec {
+ tv_nsec: nsec,
+ tv_sec: sec,
+ })
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/wasm.rs b/vendor/parking_lot_core/src/thread_parker/wasm.rs
new file mode 100644
index 000000000..657425f46
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/wasm.rs
@@ -0,0 +1,54 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! The wasm platform can't park when atomic support is not available.
+//! So this ThreadParker just panics on any attempt to park.
+
+use std::thread;
+use std::time::Instant;
+
+pub struct ThreadParker(());
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ fn new() -> ThreadParker {
+ ThreadParker(())
+ }
+
+ unsafe fn prepare_park(&self) {
+ panic!("Parking not supported on this platform");
+ }
+
+ unsafe fn timed_out(&self) -> bool {
+ panic!("Parking not supported on this platform");
+ }
+
+ unsafe fn park(&self) {
+ panic!("Parking not supported on this platform");
+ }
+
+ unsafe fn park_until(&self, _timeout: Instant) -> bool {
+ panic!("Parking not supported on this platform");
+ }
+
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ panic!("Parking not supported on this platform");
+ }
+}
+
+pub struct UnparkHandle(());
+
+impl super::UnparkHandleT for UnparkHandle {
+ unsafe fn unpark(self) {}
+}
+
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs b/vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs
new file mode 100644
index 000000000..f332affc4
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/wasm_atomic.rs
@@ -0,0 +1,97 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ arch::wasm32,
+ sync::atomic::{AtomicI32, Ordering},
+};
+use std::time::{Duration, Instant};
+use std::{convert::TryFrom, thread};
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ parked: AtomicI32,
+}
+
+const UNPARKED: i32 = 0;
+const PARKED: i32 = 1;
+
+impl super::ThreadParkerT for ThreadParker {
+ type UnparkHandle = UnparkHandle;
+
+ const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ fn new() -> ThreadParker {
+ ThreadParker {
+ parked: AtomicI32::new(UNPARKED),
+ }
+ }
+
+ #[inline]
+ unsafe fn prepare_park(&self) {
+ self.parked.store(PARKED, Ordering::Relaxed);
+ }
+
+ #[inline]
+ unsafe fn timed_out(&self) -> bool {
+ self.parked.load(Ordering::Relaxed) == PARKED
+ }
+
+ #[inline]
+ unsafe fn park(&self) {
+ while self.parked.load(Ordering::Acquire) == PARKED {
+ let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, -1);
+ // we should have either woken up (0) or got a not-equal due to a
+ // race (1). We should never time out (2)
+ debug_assert!(r == 0 || r == 1);
+ }
+ }
+
+ #[inline]
+ unsafe fn park_until(&self, timeout: Instant) -> bool {
+ while self.parked.load(Ordering::Acquire) == PARKED {
+ if let Some(left) = timeout.checked_duration_since(Instant::now()) {
+ let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value());
+ let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, nanos_left);
+ debug_assert!(r == 0 || r == 1 || r == 2);
+ } else {
+ return false;
+ }
+ }
+ true
+ }
+
+ #[inline]
+ unsafe fn unpark_lock(&self) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ self.parked.store(UNPARKED, Ordering::Release);
+ UnparkHandle(self.ptr())
+ }
+}
+
+impl ThreadParker {
+ #[inline]
+ fn ptr(&self) -> *mut i32 {
+ &self.parked as *const AtomicI32 as *mut i32
+ }
+}
+
+pub struct UnparkHandle(*mut i32);
+
+impl super::UnparkHandleT for UnparkHandle {
+ #[inline]
+ unsafe fn unpark(self) {
+ let num_notified = wasm32::memory_atomic_notify(self.0 as *mut i32, 1);
+ debug_assert!(num_notified == 0 || num_notified == 1);
+ }
+}
+
+#[inline]
+pub fn thread_yield() {
+ thread::yield_now();
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs b/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs
new file mode 100644
index 000000000..bbe45a4cc
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/windows/keyed_event.rs
@@ -0,0 +1,202 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ ffi,
+ mem::{self, MaybeUninit},
+ ptr,
+};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::Instant;
+
+use windows_sys::Win32::{
+ Foundation::{CloseHandle, BOOLEAN, HANDLE, NTSTATUS, STATUS_SUCCESS, STATUS_TIMEOUT},
+ System::{
+ LibraryLoader::{GetModuleHandleA, GetProcAddress},
+ SystemServices::{GENERIC_READ, GENERIC_WRITE},
+ },
+};
+
+const STATE_UNPARKED: usize = 0;
+const STATE_PARKED: usize = 1;
+const STATE_TIMED_OUT: usize = 2;
+
+#[allow(non_snake_case)]
+pub struct KeyedEvent {
+ handle: HANDLE,
+ NtReleaseKeyedEvent: extern "system" fn(
+ EventHandle: HANDLE,
+ Key: *mut ffi::c_void,
+ Alertable: BOOLEAN,
+ Timeout: *mut i64,
+ ) -> NTSTATUS,
+ NtWaitForKeyedEvent: extern "system" fn(
+ EventHandle: HANDLE,
+ Key: *mut ffi::c_void,
+ Alertable: BOOLEAN,
+ Timeout: *mut i64,
+ ) -> NTSTATUS,
+}
+
+impl KeyedEvent {
+ #[inline]
+ unsafe fn wait_for(&self, key: *mut ffi::c_void, timeout: *mut i64) -> NTSTATUS {
+ (self.NtWaitForKeyedEvent)(self.handle, key, false.into(), timeout)
+ }
+
+ #[inline]
+ unsafe fn release(&self, key: *mut ffi::c_void) -> NTSTATUS {
+ (self.NtReleaseKeyedEvent)(self.handle, key, false.into(), ptr::null_mut())
+ }
+
+ #[allow(non_snake_case)]
+ pub fn create() -> Option<KeyedEvent> {
+ unsafe {
+ let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr());
+ if ntdll == 0 {
+ return None;
+ }
+
+ let NtCreateKeyedEvent =
+ GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr())?;
+ let NtReleaseKeyedEvent =
+ GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr())?;
+ let NtWaitForKeyedEvent =
+ GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr())?;
+
+ let NtCreateKeyedEvent: extern "system" fn(
+ KeyedEventHandle: *mut HANDLE,
+ DesiredAccess: u32,
+ ObjectAttributes: *mut ffi::c_void,
+ Flags: u32,
+ ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
+ let mut handle = MaybeUninit::uninit();
+ let status = NtCreateKeyedEvent(
+ handle.as_mut_ptr(),
+ GENERIC_READ | GENERIC_WRITE,
+ ptr::null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ return None;
+ }
+
+ Some(KeyedEvent {
+ handle: handle.assume_init(),
+ NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
+ NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
+ })
+ }
+ }
+
+ #[inline]
+ pub fn prepare_park(&'static self, key: &AtomicUsize) {
+ key.store(STATE_PARKED, Ordering::Relaxed);
+ }
+
+ #[inline]
+ pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+ key.load(Ordering::Relaxed) == STATE_TIMED_OUT
+ }
+
+ #[inline]
+ pub unsafe fn park(&'static self, key: &AtomicUsize) {
+ let status = self.wait_for(key as *const _ as *mut ffi::c_void, ptr::null_mut());
+ debug_assert_eq!(status, STATUS_SUCCESS);
+ }
+
+ #[inline]
+ pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
+ let now = Instant::now();
+ if timeout <= now {
+ // If another thread unparked us, we need to call
+ // NtWaitForKeyedEvent otherwise that thread will stay stuck at
+ // NtReleaseKeyedEvent.
+ if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
+ self.park(key);
+ return true;
+ }
+ return false;
+ }
+
+ // NT uses a timeout in units of 100ns. We use a negative value to
+ // indicate a relative timeout based on a monotonic clock.
+ let diff = timeout - now;
+ let value = (diff.as_secs() as i64)
+ .checked_mul(-10000000)
+ .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100));
+
+ let mut nt_timeout = match value {
+ Some(x) => x,
+ None => {
+ // Timeout overflowed, just sleep indefinitely
+ self.park(key);
+ return true;
+ }
+ };
+
+ let status = self.wait_for(key as *const _ as *mut ffi::c_void, &mut nt_timeout);
+ if status == STATUS_SUCCESS {
+ return true;
+ }
+ debug_assert_eq!(status, STATUS_TIMEOUT);
+
+ // If another thread unparked us, we need to call NtWaitForKeyedEvent
+ // otherwise that thread will stay stuck at NtReleaseKeyedEvent.
+ if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
+ self.park(key);
+ return true;
+ }
+ false
+ }
+
+ #[inline]
+ pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
+ // If the state was STATE_PARKED then we need to wake up the thread
+ if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED {
+ UnparkHandle {
+ key: key,
+ keyed_event: self,
+ }
+ } else {
+ UnparkHandle {
+ key: ptr::null(),
+ keyed_event: self,
+ }
+ }
+ }
+}
+
+impl Drop for KeyedEvent {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ let ok = CloseHandle(self.handle);
+ debug_assert_eq!(ok, true.into());
+ }
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle {
+ key: *const AtomicUsize,
+ keyed_event: &'static KeyedEvent,
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub unsafe fn unpark(self) {
+ if !self.key.is_null() {
+ let status = self.keyed_event.release(self.key as *mut ffi::c_void);
+ debug_assert_eq!(status, STATUS_SUCCESS);
+ }
+ }
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/windows/mod.rs b/vendor/parking_lot_core/src/thread_parker/windows/mod.rs
new file mode 100644
index 000000000..1f5ed2378
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/windows/mod.rs
@@ -0,0 +1,188 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ ptr,
+ sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
+};
+use std::time::Instant;
+
+mod keyed_event;
+mod waitaddress;
+
+enum Backend {
+ KeyedEvent(keyed_event::KeyedEvent),
+ WaitAddress(waitaddress::WaitAddress),
+}
+
+static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
+
+impl Backend {
+ #[inline]
+ fn get() -> &'static Backend {
+ // Fast path: use the existing object
+ let backend_ptr = BACKEND.load(Ordering::Acquire);
+ if !backend_ptr.is_null() {
+ return unsafe { &*backend_ptr };
+ };
+
+ Backend::create()
+ }
+
+ #[cold]
+ fn create() -> &'static Backend {
+ // Try to create a new Backend
+ let backend;
+ if let Some(waitaddress) = waitaddress::WaitAddress::create() {
+ backend = Backend::WaitAddress(waitaddress);
+ } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() {
+ backend = Backend::KeyedEvent(keyed_event);
+ } else {
+ panic!(
+ "parking_lot requires either NT Keyed Events (WinXP+) or \
+ WaitOnAddress/WakeByAddress (Win8+)"
+ );
+ }
+
+ // Try to set our new Backend as the global one
+ let backend_ptr = Box::into_raw(Box::new(backend));
+ match BACKEND.compare_exchange(
+ ptr::null_mut(),
+ backend_ptr,
+ Ordering::Release,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => unsafe { &*backend_ptr },
+ Err(global_backend_ptr) => {
+ unsafe {
+ // We lost the race, free our object and return the global one
+ Box::from_raw(backend_ptr);
+ &*global_backend_ptr
+ }
+ }
+ }
+ }
+}
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ key: AtomicUsize,
+ backend: &'static Backend,
+}
+
+impl ThreadParker {
+ pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ pub fn new() -> ThreadParker {
+ // Initialize the backend here to ensure we don't get any panics
+ // later on, which could leave synchronization primitives in a broken
+ // state.
+ ThreadParker {
+ key: AtomicUsize::new(0),
+ backend: Backend::get(),
+ }
+ }
+
+ // Prepares the parker. This should be called before adding it to the queue.
+ #[inline]
+ pub fn prepare_park(&self) {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
+ Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
+ }
+ }
+
+ // Checks if the park timed out. This should be called while holding the
+ // queue lock after park_until has returned false.
+ #[inline]
+ pub fn timed_out(&self) -> bool {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
+ Backend::WaitAddress(ref x) => x.timed_out(&self.key),
+ }
+ }
+
+ // Parks the thread until it is unparked. This should be called after it has
+ // been added to the queue, after unlocking the queue.
+ #[inline]
+ pub unsafe fn park(&self) {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.park(&self.key),
+ Backend::WaitAddress(ref x) => x.park(&self.key),
+ }
+ }
+
+ // Parks the thread until it is unparked or the timeout is reached. This
+ // should be called after it has been added to the queue, after unlocking
+ // the queue. Returns true if we were unparked and false if we timed out.
+ #[inline]
+ pub unsafe fn park_until(&self, timeout: Instant) -> bool {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
+ Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout),
+ }
+ }
+
+ // Locks the parker to prevent the target thread from exiting. This is
+ // necessary to ensure that thread-local ThreadData objects remain valid.
+ // This should be called while holding the queue lock.
+ #[inline]
+ pub unsafe fn unpark_lock(&self) -> UnparkHandle {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
+ Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)),
+ }
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub enum UnparkHandle {
+ KeyedEvent(keyed_event::UnparkHandle),
+ WaitAddress(waitaddress::UnparkHandle),
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub unsafe fn unpark(self) {
+ match self {
+ UnparkHandle::KeyedEvent(x) => x.unpark(),
+ UnparkHandle::WaitAddress(x) => x.unpark(),
+ }
+ }
+}
+
+// Yields the rest of the current timeslice to the OS
+#[inline]
+pub fn thread_yield() {
+ // Note that this is manually defined here rather than using the definition
+ // through `winapi`. The `winapi` definition comes from the `synchapi`
+ // header which enables the "synchronization.lib" library. It turns out,
+ // however that `Sleep` comes from `kernel32.dll` so this activation isn't
+ // necessary.
+ //
+ // This was originally identified in rust-lang/rust where on MinGW the
+ // libsynchronization.a library pulls in a dependency on a newer DLL not
+ // present in older versions of Windows. (see rust-lang/rust#49438)
+ //
+ // This is a bit of a hack for now and ideally we'd fix MinGW's own import
+ // libraries, but that'll probably take a lot longer than patching this here
+ // and avoiding the `synchapi` feature entirely.
+ extern "system" {
+ fn Sleep(a: u32);
+ }
+ unsafe {
+ // We don't use SwitchToThread here because it doesn't consider all
+ // threads in the system and the thread we are waiting for may not get
+ // selected.
+ Sleep(0);
+ }
+}
diff --git a/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs b/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs
new file mode 100644
index 000000000..dde0db7b6
--- /dev/null
+++ b/vendor/parking_lot_core/src/thread_parker/windows/waitaddress.rs
@@ -0,0 +1,138 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ mem,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+use std::{ffi, time::Instant};
+use windows_sys::Win32::{
+ Foundation::{GetLastError, BOOL, ERROR_TIMEOUT},
+ System::{
+ LibraryLoader::{GetModuleHandleA, GetProcAddress},
+ WindowsProgramming::INFINITE,
+ },
+};
+
+#[allow(non_snake_case)]
+pub struct WaitAddress {
+ WaitOnAddress: extern "system" fn(
+ Address: *mut ffi::c_void,
+ CompareAddress: *mut ffi::c_void,
+ AddressSize: usize,
+ dwMilliseconds: u32,
+ ) -> BOOL,
+ WakeByAddressSingle: extern "system" fn(Address: *mut ffi::c_void),
+}
+
+impl WaitAddress {
+ #[allow(non_snake_case)]
+ pub fn create() -> Option<WaitAddress> {
+ unsafe {
+ // MSDN claims that that WaitOnAddress and WakeByAddressSingle are
+ // located in kernel32.dll, but they are lying...
+ let synch_dll =
+ GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr());
+ if synch_dll == 0 {
+ return None;
+ }
+
+ let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr())?;
+ let WakeByAddressSingle =
+ GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr())?;
+
+ Some(WaitAddress {
+ WaitOnAddress: mem::transmute(WaitOnAddress),
+ WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
+ })
+ }
+ }
+
+ #[inline]
+ pub fn prepare_park(&'static self, key: &AtomicUsize) {
+ key.store(1, Ordering::Relaxed);
+ }
+
+ #[inline]
+ pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+ key.load(Ordering::Relaxed) != 0
+ }
+
+ #[inline]
+ pub fn park(&'static self, key: &AtomicUsize) {
+ while key.load(Ordering::Acquire) != 0 {
+ let r = self.wait_on_address(key, INFINITE);
+ debug_assert!(r == true.into());
+ }
+ }
+
+ #[inline]
+ pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
+ while key.load(Ordering::Acquire) != 0 {
+ let now = Instant::now();
+ if timeout <= now {
+ return false;
+ }
+ let diff = timeout - now;
+ let timeout = diff
+ .as_secs()
+ .checked_mul(1000)
+ .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
+ .map(|ms| {
+ if ms > std::u32::MAX as u64 {
+ INFINITE
+ } else {
+ ms as u32
+ }
+ })
+ .unwrap_or(INFINITE);
+ if self.wait_on_address(key, timeout) == false.into() {
+ debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT);
+ }
+ }
+ true
+ }
+
+ #[inline]
+ pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ key.store(0, Ordering::Release);
+
+ UnparkHandle {
+ key: key,
+ waitaddress: self,
+ }
+ }
+
+ #[inline]
+ fn wait_on_address(&'static self, key: &AtomicUsize, timeout: u32) -> BOOL {
+ let cmp = 1usize;
+ (self.WaitOnAddress)(
+ key as *const _ as *mut ffi::c_void,
+ &cmp as *const _ as *mut ffi::c_void,
+ mem::size_of::<usize>(),
+ timeout,
+ )
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle {
+ key: *const AtomicUsize,
+ waitaddress: &'static WaitAddress,
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub fn unpark(self) {
+ (self.waitaddress.WakeByAddressSingle)(self.key as *mut ffi::c_void);
+ }
+}