summaryrefslogtreecommitdiffstats
path: root/vendor/parking_lot_core-0.8.6/src/thread_parker/windows
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/parking_lot_core-0.8.6/src/thread_parker/windows')
-rw-r--r--vendor/parking_lot_core-0.8.6/src/thread_parker/windows/keyed_event.rs218
-rw-r--r--vendor/parking_lot_core-0.8.6/src/thread_parker/windows/mod.rs188
-rw-r--r--vendor/parking_lot_core-0.8.6/src/thread_parker/windows/waitaddress.rs149
3 files changed, 555 insertions, 0 deletions
diff --git a/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/keyed_event.rs b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/keyed_event.rs
new file mode 100644
index 000000000..7c371537c
--- /dev/null
+++ b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/keyed_event.rs
@@ -0,0 +1,218 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ mem::{self, MaybeUninit},
+ ptr,
+};
+use instant::Instant;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use winapi::{
+ shared::{
+ minwindef::{TRUE, ULONG},
+ ntdef::NTSTATUS,
+ ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT},
+ },
+ um::{
+ handleapi::CloseHandle,
+ libloaderapi::{GetModuleHandleA, GetProcAddress},
+ winnt::{
+ ACCESS_MASK, BOOLEAN, GENERIC_READ, GENERIC_WRITE, HANDLE, LARGE_INTEGER, LPCSTR,
+ PHANDLE, PLARGE_INTEGER, PVOID,
+ },
+ },
+};
+
+const STATE_UNPARKED: usize = 0;
+const STATE_PARKED: usize = 1;
+const STATE_TIMED_OUT: usize = 2;
+
+#[allow(non_snake_case)]
+pub struct KeyedEvent {
+ handle: HANDLE,
+ NtReleaseKeyedEvent: extern "system" fn(
+ EventHandle: HANDLE,
+ Key: PVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER,
+ ) -> NTSTATUS,
+ NtWaitForKeyedEvent: extern "system" fn(
+ EventHandle: HANDLE,
+ Key: PVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER,
+ ) -> NTSTATUS,
+}
+
+impl KeyedEvent {
+ #[inline]
+ unsafe fn wait_for(&self, key: PVOID, timeout: PLARGE_INTEGER) -> NTSTATUS {
+ (self.NtWaitForKeyedEvent)(self.handle, key, 0, timeout)
+ }
+
+ #[inline]
+ unsafe fn release(&self, key: PVOID) -> NTSTATUS {
+ (self.NtReleaseKeyedEvent)(self.handle, key, 0, ptr::null_mut())
+ }
+
+ #[allow(non_snake_case)]
+ pub fn create() -> Option<KeyedEvent> {
+ unsafe {
+ let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR);
+ if ntdll.is_null() {
+ return None;
+ }
+
+ let NtCreateKeyedEvent =
+ GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR);
+ if NtCreateKeyedEvent.is_null() {
+ return None;
+ }
+ let NtReleaseKeyedEvent =
+ GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR);
+ if NtReleaseKeyedEvent.is_null() {
+ return None;
+ }
+ let NtWaitForKeyedEvent =
+ GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR);
+ if NtWaitForKeyedEvent.is_null() {
+ return None;
+ }
+
+ let NtCreateKeyedEvent: extern "system" fn(
+ KeyedEventHandle: PHANDLE,
+ DesiredAccess: ACCESS_MASK,
+ ObjectAttributes: PVOID,
+ Flags: ULONG,
+ ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
+ let mut handle = MaybeUninit::uninit();
+ let status = NtCreateKeyedEvent(
+ handle.as_mut_ptr(),
+ GENERIC_READ | GENERIC_WRITE,
+ ptr::null_mut(),
+ 0,
+ );
+ if status != STATUS_SUCCESS {
+ return None;
+ }
+
+ Some(KeyedEvent {
+ handle: handle.assume_init(),
+ NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
+ NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
+ })
+ }
+ }
+
+ #[inline]
+ pub fn prepare_park(&'static self, key: &AtomicUsize) {
+ key.store(STATE_PARKED, Ordering::Relaxed);
+ }
+
+ #[inline]
+ pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+ key.load(Ordering::Relaxed) == STATE_TIMED_OUT
+ }
+
+ #[inline]
+ pub unsafe fn park(&'static self, key: &AtomicUsize) {
+ let status = self.wait_for(key as *const _ as PVOID, ptr::null_mut());
+ debug_assert_eq!(status, STATUS_SUCCESS);
+ }
+
+ #[inline]
+ pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
+ let now = Instant::now();
+ if timeout <= now {
+ // If another thread unparked us, we need to call
+ // NtWaitForKeyedEvent otherwise that thread will stay stuck at
+ // NtReleaseKeyedEvent.
+ if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
+ self.park(key);
+ return true;
+ }
+ return false;
+ }
+
+ // NT uses a timeout in units of 100ns. We use a negative value to
+ // indicate a relative timeout based on a monotonic clock.
+ let mut nt_timeout: LARGE_INTEGER = mem::zeroed();
+ let diff = timeout - now;
+ let value = (diff.as_secs() as i64)
+ .checked_mul(-10000000)
+ .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100));
+
+ match value {
+ Some(x) => *nt_timeout.QuadPart_mut() = x,
+ None => {
+ // Timeout overflowed, just sleep indefinitely
+ self.park(key);
+ return true;
+ }
+ };
+
+ let status = self.wait_for(key as *const _ as PVOID, &mut nt_timeout);
+ if status == STATUS_SUCCESS {
+ return true;
+ }
+ debug_assert_eq!(status, STATUS_TIMEOUT);
+
+ // If another thread unparked us, we need to call NtWaitForKeyedEvent
+ // otherwise that thread will stay stuck at NtReleaseKeyedEvent.
+ if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
+ self.park(key);
+ return true;
+ }
+ false
+ }
+
+ #[inline]
+ pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
+ // If the state was STATE_PARKED then we need to wake up the thread
+ if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED {
+ UnparkHandle {
+ key: key,
+ keyed_event: self,
+ }
+ } else {
+ UnparkHandle {
+ key: ptr::null(),
+ keyed_event: self,
+ }
+ }
+ }
+}
+
+impl Drop for KeyedEvent {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ let ok = CloseHandle(self.handle);
+ debug_assert_eq!(ok, TRUE);
+ }
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle {
+ key: *const AtomicUsize,
+ keyed_event: &'static KeyedEvent,
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub unsafe fn unpark(self) {
+ if !self.key.is_null() {
+ let status = self.keyed_event.release(self.key as PVOID);
+ debug_assert_eq!(status, STATUS_SUCCESS);
+ }
+ }
+}
diff --git a/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/mod.rs b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/mod.rs
new file mode 100644
index 000000000..76dbb5d49
--- /dev/null
+++ b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/mod.rs
@@ -0,0 +1,188 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ ptr,
+ sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
+};
+use instant::Instant;
+
+mod keyed_event;
+mod waitaddress;
+
+enum Backend {
+ KeyedEvent(keyed_event::KeyedEvent),
+ WaitAddress(waitaddress::WaitAddress),
+}
+
+static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
+
+impl Backend {
+ #[inline]
+ fn get() -> &'static Backend {
+ // Fast path: use the existing object
+ let backend_ptr = BACKEND.load(Ordering::Acquire);
+ if !backend_ptr.is_null() {
+ return unsafe { &*backend_ptr };
+ };
+
+ Backend::create()
+ }
+
+ #[cold]
+ fn create() -> &'static Backend {
+ // Try to create a new Backend
+ let backend;
+ if let Some(waitaddress) = waitaddress::WaitAddress::create() {
+ backend = Backend::WaitAddress(waitaddress);
+ } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() {
+ backend = Backend::KeyedEvent(keyed_event);
+ } else {
+ panic!(
+ "parking_lot requires either NT Keyed Events (WinXP+) or \
+ WaitOnAddress/WakeByAddress (Win8+)"
+ );
+ }
+
+ // Try to set our new Backend as the global one
+ let backend_ptr = Box::into_raw(Box::new(backend));
+ match BACKEND.compare_exchange(
+ ptr::null_mut(),
+ backend_ptr,
+ Ordering::Release,
+ Ordering::Relaxed,
+ ) {
+ Ok(_) => unsafe { &*backend_ptr },
+ Err(global_backend_ptr) => {
+ unsafe {
+ // We lost the race, free our object and return the global one
+ Box::from_raw(backend_ptr);
+ &*global_backend_ptr
+ }
+ }
+ }
+ }
+}
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+ key: AtomicUsize,
+ backend: &'static Backend,
+}
+
+impl ThreadParker {
+ pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+ #[inline]
+ pub fn new() -> ThreadParker {
+ // Initialize the backend here to ensure we don't get any panics
+ // later on, which could leave synchronization primitives in a broken
+ // state.
+ ThreadParker {
+ key: AtomicUsize::new(0),
+ backend: Backend::get(),
+ }
+ }
+
+ // Prepares the parker. This should be called before adding it to the queue.
+ #[inline]
+ pub fn prepare_park(&self) {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
+ Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
+ }
+ }
+
+ // Checks if the park timed out. This should be called while holding the
+ // queue lock after park_until has returned false.
+ #[inline]
+ pub fn timed_out(&self) -> bool {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
+ Backend::WaitAddress(ref x) => x.timed_out(&self.key),
+ }
+ }
+
+ // Parks the thread until it is unparked. This should be called after it has
+ // been added to the queue, after unlocking the queue.
+ #[inline]
+ pub unsafe fn park(&self) {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.park(&self.key),
+ Backend::WaitAddress(ref x) => x.park(&self.key),
+ }
+ }
+
+ // Parks the thread until it is unparked or the timeout is reached. This
+ // should be called after it has been added to the queue, after unlocking
+ // the queue. Returns true if we were unparked and false if we timed out.
+ #[inline]
+ pub unsafe fn park_until(&self, timeout: Instant) -> bool {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
+ Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout),
+ }
+ }
+
+ // Locks the parker to prevent the target thread from exiting. This is
+ // necessary to ensure that thread-local ThreadData objects remain valid.
+ // This should be called while holding the queue lock.
+ #[inline]
+ pub unsafe fn unpark_lock(&self) -> UnparkHandle {
+ match *self.backend {
+ Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
+ Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)),
+ }
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub enum UnparkHandle {
+ KeyedEvent(keyed_event::UnparkHandle),
+ WaitAddress(waitaddress::UnparkHandle),
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub unsafe fn unpark(self) {
+ match self {
+ UnparkHandle::KeyedEvent(x) => x.unpark(),
+ UnparkHandle::WaitAddress(x) => x.unpark(),
+ }
+ }
+}
+
+// Yields the rest of the current timeslice to the OS
+#[inline]
+pub fn thread_yield() {
+ // Note that this is manually defined here rather than using the definition
+ // through `winapi`. The `winapi` definition comes from the `synchapi`
+ // header which enables the "synchronization.lib" library. It turns out,
+ // however that `Sleep` comes from `kernel32.dll` so this activation isn't
+ // necessary.
+ //
+ // This was originally identified in rust-lang/rust where on MinGW the
+ // libsynchronization.a library pulls in a dependency on a newer DLL not
+ // present in older versions of Windows. (see rust-lang/rust#49438)
+ //
+ // This is a bit of a hack for now and ideally we'd fix MinGW's own import
+ // libraries, but that'll probably take a lot longer than patching this here
+ // and avoiding the `synchapi` feature entirely.
+ extern "system" {
+ fn Sleep(a: winapi::shared::minwindef::DWORD);
+ }
+ unsafe {
+ // We don't use SwitchToThread here because it doesn't consider all
+ // threads in the system and the thread we are waiting for may not get
+ // selected.
+ Sleep(0);
+ }
+}
diff --git a/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/waitaddress.rs b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/waitaddress.rs
new file mode 100644
index 000000000..862c5c652
--- /dev/null
+++ b/vendor/parking_lot_core-0.8.6/src/thread_parker/windows/waitaddress.rs
@@ -0,0 +1,149 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+ mem,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+use instant::Instant;
+use winapi::{
+ shared::{
+ basetsd::SIZE_T,
+ minwindef::{BOOL, DWORD, FALSE, TRUE},
+ winerror::ERROR_TIMEOUT,
+ },
+ um::{
+ errhandlingapi::GetLastError,
+ libloaderapi::{GetModuleHandleA, GetProcAddress},
+ winbase::INFINITE,
+ winnt::{LPCSTR, PVOID},
+ },
+};
+
+#[allow(non_snake_case)]
+pub struct WaitAddress {
+ WaitOnAddress: extern "system" fn(
+ Address: PVOID,
+ CompareAddress: PVOID,
+ AddressSize: SIZE_T,
+ dwMilliseconds: DWORD,
+ ) -> BOOL,
+ WakeByAddressSingle: extern "system" fn(Address: PVOID),
+}
+
+impl WaitAddress {
+ #[allow(non_snake_case)]
+ pub fn create() -> Option<WaitAddress> {
+ unsafe {
+ // MSDN claims that that WaitOnAddress and WakeByAddressSingle are
+ // located in kernel32.dll, but they are lying...
+ let synch_dll =
+ GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR);
+ if synch_dll.is_null() {
+ return None;
+ }
+
+ let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR);
+ if WaitOnAddress.is_null() {
+ return None;
+ }
+ let WakeByAddressSingle =
+ GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR);
+ if WakeByAddressSingle.is_null() {
+ return None;
+ }
+ Some(WaitAddress {
+ WaitOnAddress: mem::transmute(WaitOnAddress),
+ WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
+ })
+ }
+ }
+
+ #[inline]
+ pub fn prepare_park(&'static self, key: &AtomicUsize) {
+ key.store(1, Ordering::Relaxed);
+ }
+
+ #[inline]
+ pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+ key.load(Ordering::Relaxed) != 0
+ }
+
+ #[inline]
+ pub fn park(&'static self, key: &AtomicUsize) {
+ while key.load(Ordering::Acquire) != 0 {
+ let r = self.wait_on_address(key, INFINITE);
+ debug_assert!(r == TRUE);
+ }
+ }
+
+ #[inline]
+ pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
+ while key.load(Ordering::Acquire) != 0 {
+ let now = Instant::now();
+ if timeout <= now {
+ return false;
+ }
+ let diff = timeout - now;
+ let timeout = diff
+ .as_secs()
+ .checked_mul(1000)
+ .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
+ .map(|ms| {
+ if ms > <DWORD>::max_value() as u64 {
+ INFINITE
+ } else {
+ ms as DWORD
+ }
+ })
+ .unwrap_or(INFINITE);
+ if self.wait_on_address(key, timeout) == FALSE {
+ debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT);
+ }
+ }
+ true
+ }
+
+ #[inline]
+ pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
+ // We don't need to lock anything, just clear the state
+ key.store(0, Ordering::Release);
+
+ UnparkHandle {
+ key: key,
+ waitaddress: self,
+ }
+ }
+
+ #[inline]
+ fn wait_on_address(&'static self, key: &AtomicUsize, timeout: DWORD) -> BOOL {
+ let cmp = 1usize;
+ (self.WaitOnAddress)(
+ key as *const _ as PVOID,
+ &cmp as *const _ as PVOID,
+ mem::size_of::<usize>() as SIZE_T,
+ timeout,
+ )
+ }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle {
+ key: *const AtomicUsize,
+ waitaddress: &'static WaitAddress,
+}
+
+impl UnparkHandle {
+ // Wakes up the parked thread. This should be called after the queue lock is
+ // released to avoid blocking the queue for too long.
+ #[inline]
+ pub fn unpark(self) {
+ (self.waitaddress.WakeByAddressSingle)(self.key as PVOID);
+ }
+}