summaryrefslogtreecommitdiffstats
path: root/vendor/tracing-core/src/spin
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--vendor/tracing-core/src/spin/LICENSE21
-rw-r--r--vendor/tracing-core/src/spin/mod.rs7
-rw-r--r--vendor/tracing-core/src/spin/mutex.rs118
-rw-r--r--vendor/tracing-core/src/spin/once.rs158
4 files changed, 304 insertions, 0 deletions
diff --git a/vendor/tracing-core/src/spin/LICENSE b/vendor/tracing-core/src/spin/LICENSE
new file mode 100644
index 000000000..84d5f4d7a
--- /dev/null
+++ b/vendor/tracing-core/src/spin/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mathijs van de Nes
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/tracing-core/src/spin/mod.rs b/vendor/tracing-core/src/spin/mod.rs
new file mode 100644
index 000000000..148b192b3
--- /dev/null
+++ b/vendor/tracing-core/src/spin/mod.rs
@@ -0,0 +1,7 @@
+//! Synchronization primitives based on spinning
+
+pub(crate) use mutex::*;
+pub(crate) use once::Once;
+
+mod mutex;
+mod once;
diff --git a/vendor/tracing-core/src/spin/mutex.rs b/vendor/tracing-core/src/spin/mutex.rs
new file mode 100644
index 000000000..c261a6191
--- /dev/null
+++ b/vendor/tracing-core/src/spin/mutex.rs
@@ -0,0 +1,118 @@
+use core::cell::UnsafeCell;
+use core::default::Default;
+use core::fmt;
+use core::hint;
+use core::marker::Sync;
+use core::ops::{Deref, DerefMut, Drop};
+use core::option::Option::{self, None, Some};
+use core::sync::atomic::{AtomicBool, Ordering};
+
+/// This type provides MUTual EXclusion based on spinning.
+pub(crate) struct Mutex<T: ?Sized> {
+ lock: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+/// A guard to which the protected data can be accessed
+///
+/// When the guard falls out of scope it will release the lock.
+#[derive(Debug)]
+pub(crate) struct MutexGuard<'a, T: ?Sized> {
+ lock: &'a AtomicBool,
+ data: &'a mut T,
+}
+
+// Same unsafe impls as `std::sync::Mutex`
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Creates a new spinlock wrapping the supplied data.
+ pub(crate) const fn new(user_data: T) -> Mutex<T> {
+ Mutex {
+ lock: AtomicBool::new(false),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ fn obtain_lock(&self) {
+ while self
+ .lock
+ .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ // Wait until the lock looks unlocked before retrying
+ while self.lock.load(Ordering::Relaxed) {
+ hint::spin_loop();
+ }
+ }
+ }
+
+ /// Locks the spinlock and returns a guard.
+ ///
+ /// The returned value may be dereferenced for data access
+ /// and the lock will be dropped when the guard falls out of scope.
+ pub(crate) fn lock(&self) -> MutexGuard<'_, T> {
+ self.obtain_lock();
+ MutexGuard {
+ lock: &self.lock,
+ data: unsafe { &mut *self.data.get() },
+ }
+ }
+
+ /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
+ /// a guard within Some.
+ pub(crate) fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
+ if self
+ .lock
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ {
+ Some(MutexGuard {
+ lock: &self.lock,
+ data: unsafe { &mut *self.data.get() },
+ })
+ } else {
+ None
+ }
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.try_lock() {
+ Some(guard) => write!(f, "Mutex {{ data: ")
+ .and_then(|()| (&*guard).fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "Mutex {{ <locked> }}"),
+ }
+ }
+}
+
+impl<T: ?Sized + Default> Default for Mutex<T> {
+ fn default() -> Mutex<T> {
+ Mutex::new(Default::default())
+ }
+}
+
+impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
+ type Target = T;
+ fn deref<'b>(&'b self) -> &'b T {
+ &*self.data
+ }
+}
+
+impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
+ fn deref_mut<'b>(&'b mut self) -> &'b mut T {
+ &mut *self.data
+ }
+}
+
+impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
+ /// The dropping of the MutexGuard will release the lock it was created from.
+ fn drop(&mut self) {
+ self.lock.store(false, Ordering::Release);
+ }
+}
diff --git a/vendor/tracing-core/src/spin/once.rs b/vendor/tracing-core/src/spin/once.rs
new file mode 100644
index 000000000..27c99e56e
--- /dev/null
+++ b/vendor/tracing-core/src/spin/once.rs
@@ -0,0 +1,158 @@
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::hint::spin_loop;
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+/// A synchronization primitive which can be used to run a one-time global
+/// initialization. Unlike its std equivalent, this is generalized so that the
+/// closure returns a value and it is stored. Once therefore acts something like
+/// a future, too.
+pub struct Once<T> {
+ state: AtomicUsize,
+ data: UnsafeCell<Option<T>>, // TODO remove option and use mem::uninitialized
+}
+
+impl<T: fmt::Debug> fmt::Debug for Once<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.r#try() {
+ Some(s) => write!(f, "Once {{ data: ")
+ .and_then(|()| s.fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "Once {{ <uninitialized> }}"),
+ }
+ }
+}
+
+// Same unsafe impls as `std::sync::RwLock`, because this also allows for
+// concurrent reads.
+unsafe impl<T: Send + Sync> Sync for Once<T> {}
+unsafe impl<T: Send> Send for Once<T> {}
+
+// Four states that a Once can be in, encoded into the lower bits of `state` in
+// the Once structure.
+const INCOMPLETE: usize = 0x0;
+const RUNNING: usize = 0x1;
+const COMPLETE: usize = 0x2;
+const PANICKED: usize = 0x3;
+
+use core::hint::unreachable_unchecked as unreachable;
+
+impl<T> Once<T> {
+ /// Initialization constant of `Once`.
+ pub const INIT: Self = Once {
+ state: AtomicUsize::new(INCOMPLETE),
+ data: UnsafeCell::new(None),
+ };
+
+ /// Creates a new `Once` value.
+ pub const fn new() -> Once<T> {
+ Self::INIT
+ }
+
+ fn force_get<'a>(&'a self) -> &'a T {
+ match unsafe { &*self.data.get() }.as_ref() {
+ None => unsafe { unreachable() },
+ Some(p) => p,
+ }
+ }
+
+ /// Performs an initialization routine once and only once. The given closure
+ /// will be executed if this is the first time `call_once` has been called,
+ /// and otherwise the routine will *not* be invoked.
+ ///
+ /// This method will block the calling thread if another initialization
+ /// routine is currently running.
+ ///
+ /// When this function returns, it is guaranteed that some initialization
+ /// has run and completed (it may not be the closure specified). The
+ /// returned pointer will point to the result from the closure that was
+ /// run.
+ pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T
+ where
+ F: FnOnce() -> T,
+ {
+ let mut status = self.state.load(Ordering::SeqCst);
+
+ if status == INCOMPLETE {
+ status = match self.state.compare_exchange(
+ INCOMPLETE,
+ RUNNING,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
+ Ok(status) => {
+ debug_assert_eq!(
+ status, INCOMPLETE,
+ "if compare_exchange succeeded, previous status must be incomplete",
+ );
+ // We init
+ // We use a guard (Finish) to catch panics caused by builder
+ let mut finish = Finish {
+ state: &self.state,
+ panicked: true,
+ };
+ unsafe { *self.data.get() = Some(builder()) };
+ finish.panicked = false;
+
+ self.state.store(COMPLETE, Ordering::SeqCst);
+
+ // This next line is strictly an optimization
+ return self.force_get();
+ }
+ Err(status) => status,
+ }
+ }
+
+ loop {
+ match status {
+ INCOMPLETE => unreachable!(),
+ RUNNING => {
+ // We spin
+ spin_loop();
+ status = self.state.load(Ordering::SeqCst)
+ }
+ PANICKED => panic!("Once has panicked"),
+ COMPLETE => return self.force_get(),
+ _ => unsafe { unreachable() },
+ }
+ }
+ }
+
+ /// Returns a pointer iff the `Once` was previously initialized
+ pub fn r#try<'a>(&'a self) -> Option<&'a T> {
+ match self.state.load(Ordering::SeqCst) {
+ COMPLETE => Some(self.force_get()),
+ _ => None,
+ }
+ }
+
+ /// Like try, but will spin if the `Once` is in the process of being
+ /// initialized
+ pub fn wait<'a>(&'a self) -> Option<&'a T> {
+ loop {
+ match self.state.load(Ordering::SeqCst) {
+ INCOMPLETE => return None,
+
+ RUNNING => {
+ spin_loop() // We spin
+ }
+ COMPLETE => return Some(self.force_get()),
+ PANICKED => panic!("Once has panicked"),
+ _ => unsafe { unreachable() },
+ }
+ }
+ }
+}
+
+struct Finish<'a> {
+ state: &'a AtomicUsize,
+ panicked: bool,
+}
+
+impl<'a> Drop for Finish<'a> {
+ fn drop(&mut self) {
+ if self.panicked {
+ self.state.store(PANICKED, Ordering::SeqCst);
+ }
+ }
+}