summaryrefslogtreecommitdiffstats
path: root/third_party/rust/parking_lot/src/mutex.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/parking_lot/src/mutex.rs
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/parking_lot/src/mutex.rs')
-rw-r--r--third_party/rust/parking_lot/src/mutex.rs312
1 files changed, 312 insertions, 0 deletions
diff --git a/third_party/rust/parking_lot/src/mutex.rs b/third_party/rust/parking_lot/src/mutex.rs
new file mode 100644
index 0000000000..9f63cb9434
--- /dev/null
+++ b/third_party/rust/parking_lot/src/mutex.rs
@@ -0,0 +1,312 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use crate::raw_mutex::RawMutex;
+use lock_api;
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can also be statically initialized or created via a `new`
+/// constructor. Each mutex has a type parameter which represents the data that
+/// it is protecting. The data can only be accessed through the RAII guards
+/// returned from `lock` and `try_lock`, which guarantees that the data is only
+/// ever accessed when the mutex is locked.
+///
+/// # Fairness
+///
+/// A typical unfair lock can often end up in a situation where a single thread
+/// quickly acquires and releases the same mutex in succession, which can starve
+/// other threads waiting to acquire the mutex. While this improves throughput
+/// because it doesn't force a context switch when a thread tries to re-acquire
+/// a mutex it has just released, this can starve other threads.
+///
+/// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350)
+/// to ensure that the lock will be fair on average without sacrificing
+/// throughput. This is done by forcing a fair unlock on average every 0.5ms,
+/// which will force the lock to go to the next thread waiting for the mutex.
+///
+/// Additionally, any critical section longer than 1ms will always use a fair
+/// unlock, which has a negligible impact on throughput considering the length
+/// of the critical section.
+///
+/// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when
+/// unlocking a mutex instead of simply dropping the `MutexGuard`.
+///
+/// # Differences from the standard library `Mutex`
+///
+/// - No poisoning, the lock is released normally on panic.
+/// - Only requires 1 byte of space, whereas the standard library boxes the
+/// `Mutex` due to platform limitations.
+/// - Can be statically constructed (requires the `const_fn` nightly feature).
+/// - Does not require any drop glue when dropped.
+/// - Inline fast path for the uncontended case.
+/// - Efficient handling of micro-contention using adaptive spinning.
+/// - Allows raw locking & unlocking without a guard.
+/// - Supports eventual fairness so that the mutex is fair on average.
+/// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`.
+///
+/// # Examples
+///
+/// ```
+/// use parking_lot::Mutex;
+/// use std::sync::{Arc, mpsc::channel};
+/// use std::thread;
+///
+/// const N: usize = 10;
+///
+/// // Spawn a few threads to increment a shared variable (non-atomically), and
+/// // let the main thread know once all increments are done.
+/// //
+/// // Here we're using an Arc to share memory among threads, and the data inside
+/// // the Arc is protected with a mutex.
+/// let data = Arc::new(Mutex::new(0));
+///
+/// let (tx, rx) = channel();
+/// for _ in 0..10 {
+/// let (data, tx) = (Arc::clone(&data), tx.clone());
+/// thread::spawn(move || {
+/// // The shared state can only be accessed once the lock is held.
+/// // Our non-atomic increment is safe because we're the only thread
+/// // which can access the shared state when the lock is held.
+/// let mut data = data.lock();
+/// *data += 1;
+/// if *data == N {
+/// tx.send(()).unwrap();
+/// }
+/// // the lock is unlocked here when `data` goes out of scope.
+/// });
+/// }
+///
+/// rx.recv().unwrap();
+/// ```
+pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
+
+/// Creates a new mutex in an unlocked state ready for use.
+///
+/// This allows creating a mutex in a constant context on stable Rust.
+pub const fn const_mutex<T>(val: T) -> Mutex<T> {
+ Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val)
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` and `DerefMut` implementations.
+pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
+
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
+
+#[cfg(test)]
+mod tests {
+ use crate::{Condvar, Mutex};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::mpsc::channel;
+ use std::sync::Arc;
+ use std::thread;
+
+ #[cfg(feature = "serde")]
+ use bincode::{deserialize, serialize};
+
+ struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct NonCopy(i32);
+
+ unsafe impl<T: Send> Send for Packet<T> {}
+ unsafe impl<T> Sync for Packet<T> {}
+
+ #[test]
+ fn smoke() {
+ let m = Mutex::new(());
+ drop(m.lock());
+ drop(m.lock());
+ }
+
+ #[test]
+ fn lots_and_lots() {
+ const J: u32 = 1000;
+ const K: u32 = 3;
+
+ let m = Arc::new(Mutex::new(0));
+
+ fn inc(m: &Mutex<u32>) {
+ for _ in 0..J {
+ *m.lock() += 1;
+ }
+ }
+
+ let (tx, rx) = channel();
+ for _ in 0..K {
+ let tx2 = tx.clone();
+ let m2 = m.clone();
+ thread::spawn(move || {
+ inc(&m2);
+ tx2.send(()).unwrap();
+ });
+ let tx2 = tx.clone();
+ let m2 = m.clone();
+ thread::spawn(move || {
+ inc(&m2);
+ tx2.send(()).unwrap();
+ });
+ }
+
+ drop(tx);
+ for _ in 0..2 * K {
+ rx.recv().unwrap();
+ }
+ assert_eq!(*m.lock(), J * K * 2);
+ }
+
+ #[test]
+ fn try_lock() {
+ let m = Mutex::new(());
+ *m.try_lock().unwrap() = ();
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let m = Mutex::new(NonCopy(10));
+ assert_eq!(m.into_inner(), NonCopy(10));
+ }
+
+ #[test]
+ fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = Mutex::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+ }
+
+ #[test]
+ fn test_get_mut() {
+ let mut m = Mutex::new(NonCopy(10));
+ *m.get_mut() = NonCopy(20);
+ assert_eq!(m.into_inner(), NonCopy(20));
+ }
+
+ #[test]
+ fn test_mutex_arc_condvar() {
+ let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
+ let packet2 = Packet(packet.0.clone());
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ // wait until parent gets in
+ rx.recv().unwrap();
+ let &(ref lock, ref cvar) = &*packet2.0;
+ let mut lock = lock.lock();
+ *lock = true;
+ cvar.notify_one();
+ });
+
+ let &(ref lock, ref cvar) = &*packet.0;
+ let mut lock = lock.lock();
+ tx.send(()).unwrap();
+ assert!(!*lock);
+ while !*lock {
+ cvar.wait(&mut lock);
+ }
+ }
+
+ #[test]
+ fn test_mutex_arc_nested() {
+ // Tests nested mutexes and access
+ // to underlying data.
+ let arc = Arc::new(Mutex::new(1));
+ let arc2 = Arc::new(Mutex::new(arc));
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let lock = arc2.lock();
+ let lock2 = lock.lock();
+ assert_eq!(*lock2, 1);
+ tx.send(()).unwrap();
+ });
+ rx.recv().unwrap();
+ }
+
+ #[test]
+ fn test_mutex_arc_access_in_unwind() {
+ let arc = Arc::new(Mutex::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || {
+ struct Unwinder {
+ i: Arc<Mutex<i32>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ *self.i.lock() += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.lock();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test]
+ fn test_mutex_unsized() {
+ let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
+ {
+ let b = &mut *mutex.lock();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*mutex.lock(), comp);
+ }
+
+ #[test]
+ fn test_mutexguard_sync() {
+ fn sync<T: Sync>(_: T) {}
+
+ let mutex = Mutex::new(());
+ sync(mutex.lock());
+ }
+
+ #[test]
+ fn test_mutex_debug() {
+ let mutex = Mutex::new(vec![0u8, 10]);
+
+ assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
+ let _lock = mutex.lock();
+ assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
+ }
+
+ #[cfg(feature = "serde")]
+ #[test]
+ fn test_serde() {
+ let contents: Vec<u8> = vec![0, 1, 2];
+ let mutex = Mutex::new(contents.clone());
+
+ let serialized = serialize(&mutex).unwrap();
+ let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
+
+ assert_eq!(*(mutex.lock()), *(deserialized.lock()));
+ assert_eq!(contents, *(deserialized.lock()));
+ }
+}