summaryrefslogtreecommitdiffstats
path: root/vendor/futures-util/src/lock
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/futures-util/src/lock
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/futures-util/src/lock')
-rw-r--r--vendor/futures-util/src/lock/bilock.rs276
-rw-r--r--vendor/futures-util/src/lock/mod.rs25
-rw-r--r--vendor/futures-util/src/lock/mutex.rs406
3 files changed, 707 insertions, 0 deletions
diff --git a/vendor/futures-util/src/lock/bilock.rs b/vendor/futures-util/src/lock/bilock.rs
new file mode 100644
index 000000000..2f51ae7c9
--- /dev/null
+++ b/vendor/futures-util/src/lock/bilock.rs
@@ -0,0 +1,276 @@
+//! Futures-powered synchronization primitives.
+
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::ops::{Deref, DerefMut};
+use core::pin::Pin;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering::SeqCst;
+#[cfg(feature = "bilock")]
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll, Waker};
+
+/// A type of futures-powered synchronization primitive which is a mutex between
+/// two possible owners.
+///
+/// This primitive is not as generic as a full-blown mutex but is sufficient for
+/// many use cases where there are only two possible owners of a resource. The
+/// implementation of `BiLock` can be more optimized for just the two possible
+/// owners.
+///
+/// Note that it's possible to use this lock through a poll-style interface with
+/// the `poll_lock` method but you can also use it as a future with the `lock`
+/// method that consumes a `BiLock` and returns a future that will resolve when
+/// it's locked.
+///
+/// A `BiLock` is typically used for "split" operations where data which serves
+/// two purposes wants to be split into two to be worked with separately. For
+/// example a TCP stream could be both a reader and a writer or a framing layer
+/// could be both a stream and a sink for messages. A `BiLock` enables splitting
+/// these two and then using each independently in a futures-powered fashion.
+///
+/// This type is only available when the `bilock` feature of this
+/// library is activated.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct BiLock<T> {
+ arc: Arc<Inner<T>>,
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+ state: AtomicUsize,
+ value: Option<UnsafeCell<T>>,
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl<T> BiLock<T> {
+ /// Creates a new `BiLock` protecting the provided data.
+ ///
+ /// Two handles to the lock are returned, and these are the only two handles
+ /// that will ever be available to the lock. These can then be sent to separate
+ /// tasks to be managed there.
+ ///
+ /// The data behind the bilock is considered to be pinned, which allows `Pin`
+ /// references to locked data. However, this means that the locked value
+ /// will only be available through `Pin<&mut T>` (not `&mut T`) unless `T` is `Unpin`.
+ /// Similarly, reuniting the lock and extracting the inner value is only
+ /// possible when `T` is `Unpin`.
+ pub fn new(t: T) -> (Self, Self) {
+ let arc = Arc::new(Inner { state: AtomicUsize::new(0), value: Some(UnsafeCell::new(t)) });
+
+ (Self { arc: arc.clone() }, Self { arc })
+ }
+
+ /// Attempt to acquire this lock, returning `Pending` if it can't be
+ /// acquired.
+ ///
+ /// This function will acquire the lock in a nonblocking fashion, returning
+ /// immediately if the lock is already held. If the lock is successfully
+ /// acquired then `Poll::Ready` is returned with a value that represents
+ /// the locked value (and can be used to access the protected data). The
+ /// lock is unlocked when the returned `BiLockGuard` is dropped.
+ ///
+ /// If the lock is already held then this function will return
+ /// `Poll::Pending`. In this case the current task will also be scheduled
+ /// to receive a notification when the lock would otherwise become
+ /// available.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called outside the context of a future's
+ /// task.
+ pub fn poll_lock(&self, cx: &mut Context<'_>) -> Poll<BiLockGuard<'_, T>> {
+ let mut waker = None;
+ loop {
+ match self.arc.state.swap(1, SeqCst) {
+ // Woohoo, we grabbed the lock!
+ 0 => return Poll::Ready(BiLockGuard { bilock: self }),
+
+ // Oops, someone else has locked the lock
+ 1 => {}
+
+ // A task was previously blocked on this lock, likely our task,
+ // so we need to update that task.
+ n => unsafe {
+ let mut prev = Box::from_raw(n as *mut Waker);
+ *prev = cx.waker().clone();
+ waker = Some(prev);
+ },
+ }
+
+ // type ascription for safety's sake!
+ let me: Box<Waker> = waker.take().unwrap_or_else(|| Box::new(cx.waker().clone()));
+ let me = Box::into_raw(me) as usize;
+
+ match self.arc.state.compare_exchange(1, me, SeqCst, SeqCst) {
+ // The lock is still locked, but we've now parked ourselves, so
+ // just report that we're scheduled to receive a notification.
+ Ok(_) => return Poll::Pending,
+
+ // Oops, looks like the lock was unlocked after our swap above
+ // and before the compare_exchange. Deallocate what we just
+ // allocated and go through the loop again.
+ Err(0) => unsafe {
+ waker = Some(Box::from_raw(me as *mut Waker));
+ },
+
+ // The top of this loop set the previous state to 1, so if we
+ // failed the CAS above then it's because the previous value was
+ // *not* zero or one. This indicates that a task was blocked,
+ // but we're trying to acquire the lock and there's only one
+ // other reference of the lock, so it should be impossible for
+ // that task to ever block itself.
+ Err(n) => panic!("invalid state: {}", n),
+ }
+ }
+ }
+
+ /// Perform a "blocking lock" of this lock, consuming this lock handle and
+ /// returning a future to the acquired lock.
+ ///
+ /// This function consumes the `BiLock<T>` and returns a sentinel future,
+ /// `BiLockAcquire<T>`. The returned future will resolve to
+ /// `BiLockAcquired<T>` which represents a locked lock similarly to
+ /// `BiLockGuard<T>`.
+ ///
+ /// Note that the returned future will never resolve to an error.
+ #[cfg(feature = "bilock")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+ pub fn lock(&self) -> BiLockAcquire<'_, T> {
+ BiLockAcquire { bilock: self }
+ }
+
+ /// Attempts to put the two "halves" of a `BiLock<T>` back together and
+ /// recover the original value. Succeeds only if the two `BiLock<T>`s
+ /// originated from the same call to `BiLock::new`.
+ pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>>
+ where
+ T: Unpin,
+ {
+ if Arc::ptr_eq(&self.arc, &other.arc) {
+ drop(other);
+ let inner = Arc::try_unwrap(self.arc)
+ .ok()
+ .expect("futures: try_unwrap failed in BiLock<T>::reunite");
+ Ok(unsafe { inner.into_value() })
+ } else {
+ Err(ReuniteError(self, other))
+ }
+ }
+
+ fn unlock(&self) {
+ match self.arc.state.swap(0, SeqCst) {
+ // we've locked the lock, shouldn't be possible for us to see an
+ // unlocked lock.
+ 0 => panic!("invalid unlocked state"),
+
+ // Ok, no one else tried to get the lock, we're done.
+ 1 => {}
+
+ // Another task has parked themselves on this lock, let's wake them
+ // up as its now their turn.
+ n => unsafe {
+ Box::from_raw(n as *mut Waker).wake();
+ },
+ }
+ }
+}
+
+impl<T: Unpin> Inner<T> {
+ unsafe fn into_value(mut self) -> T {
+ self.value.take().unwrap().into_inner()
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(SeqCst), 0);
+ }
+}
+
+/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
+/// thus could not be `reunite`d.
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("ReuniteError").field(&"...").finish()
+ }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "tried to reunite two BiLocks that don't form a pair")
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: core::any::Any> std::error::Error for ReuniteError<T> {}
+
+/// Returned RAII guard from the `poll_lock` method.
+///
+/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
+/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
+/// unlocked.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct BiLockGuard<'a, T> {
+ bilock: &'a BiLock<T>,
+}
+
+impl<T> Deref for BiLockGuard<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.bilock.arc.value.as_ref().unwrap().get() }
+ }
+}
+
+impl<T: Unpin> DerefMut for BiLockGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.bilock.arc.value.as_ref().unwrap().get() }
+ }
+}
+
+impl<T> BiLockGuard<'_, T> {
+ /// Get a mutable pinned reference to the locked value.
+ pub fn as_pin_mut(&mut self) -> Pin<&mut T> {
+ // Safety: we never allow moving a !Unpin value out of a bilock, nor
+ // allow mutable access to it
+ unsafe { Pin::new_unchecked(&mut *self.bilock.arc.value.as_ref().unwrap().get()) }
+ }
+}
+
+impl<T> Drop for BiLockGuard<'_, T> {
+ fn drop(&mut self) {
+ self.bilock.unlock();
+ }
+}
+
+/// Future returned by `BiLock::lock` which will resolve when the lock is
+/// acquired.
+#[cfg(feature = "bilock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct BiLockAcquire<'a, T> {
+ bilock: &'a BiLock<T>,
+}
+
+// Pinning is never projected to fields
+#[cfg(feature = "bilock")]
+impl<T> Unpin for BiLockAcquire<'_, T> {}
+
+#[cfg(feature = "bilock")]
+impl<'a, T> Future for BiLockAcquire<'a, T> {
+ type Output = BiLockGuard<'a, T>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.bilock.poll_lock(cx)
+ }
+}
diff --git a/vendor/futures-util/src/lock/mod.rs b/vendor/futures-util/src/lock/mod.rs
new file mode 100644
index 000000000..cf374c016
--- /dev/null
+++ b/vendor/futures-util/src/lock/mod.rs
@@ -0,0 +1,25 @@
+//! Futures-powered synchronization primitives.
+//!
+//! This module is only available when the `std` or `alloc` feature of this
+//! library is activated, and it is activated by default.
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "std")]
+mod mutex;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "std")]
+pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard, MutexLockFuture};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(any(feature = "bilock", feature = "sink", feature = "io"))]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+#[cfg_attr(not(feature = "bilock"), allow(unreachable_pub))]
+mod bilock;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(any(feature = "sink", feature = "io"))]
+#[cfg(not(feature = "bilock"))]
+pub(crate) use self::bilock::BiLock;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "bilock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub use self::bilock::{BiLock, BiLockAcquire, BiLockGuard, ReuniteError};
diff --git a/vendor/futures-util/src/lock/mutex.rs b/vendor/futures-util/src/lock/mutex.rs
new file mode 100644
index 000000000..85dcb1537
--- /dev/null
+++ b/vendor/futures-util/src/lock/mutex.rs
@@ -0,0 +1,406 @@
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll, Waker};
+use slab::Slab;
+use std::cell::UnsafeCell;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut};
+use std::pin::Pin;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Mutex as StdMutex;
+use std::{fmt, mem};
+
+/// A futures-aware mutex.
+///
+/// # Fairness
+///
+/// This mutex provides no fairness guarantees. Tasks may not acquire the mutex
+/// in the order that they requested the lock, and it's possible for a single task
+/// which repeatedly takes the lock to starve other tasks, which may be left waiting
+/// indefinitely.
+pub struct Mutex<T: ?Sized> {
+ state: AtomicUsize,
+ waiters: StdMutex<Slab<Waiter>>,
+ value: UnsafeCell<T>,
+}
+
+impl<T: ?Sized> fmt::Debug for Mutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let state = self.state.load(Ordering::SeqCst);
+ f.debug_struct("Mutex")
+ .field("is_locked", &((state & IS_LOCKED) != 0))
+ .field("has_waiters", &((state & HAS_WAITERS) != 0))
+ .finish()
+ }
+}
+
+impl<T> From<T> for Mutex<T> {
+ fn from(t: T) -> Self {
+ Self::new(t)
+ }
+}
+
+impl<T: Default> Default for Mutex<T> {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+enum Waiter {
+ Waiting(Waker),
+ Woken,
+}
+
+impl Waiter {
+ fn register(&mut self, waker: &Waker) {
+ match self {
+ Self::Waiting(w) if waker.will_wake(w) => {}
+ _ => *self = Self::Waiting(waker.clone()),
+ }
+ }
+
+ fn wake(&mut self) {
+ match mem::replace(self, Self::Woken) {
+ Self::Waiting(waker) => waker.wake(),
+ Self::Woken => {}
+ }
+ }
+}
+
+const IS_LOCKED: usize = 1 << 0;
+const HAS_WAITERS: usize = 1 << 1;
+
+impl<T> Mutex<T> {
+ /// Creates a new futures-aware mutex.
+ pub fn new(t: T) -> Self {
+ Self {
+ state: AtomicUsize::new(0),
+ waiters: StdMutex::new(Slab::new()),
+ value: UnsafeCell::new(t),
+ }
+ }
+
+ /// Consumes this mutex, returning the underlying data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::lock::Mutex;
+ ///
+ /// let mutex = Mutex::new(0);
+ /// assert_eq!(mutex.into_inner(), 0);
+ /// ```
+ pub fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Attempt to acquire the lock immediately.
+ ///
+ /// If the lock is currently held, this will return `None`.
+ pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
+ let old_state = self.state.fetch_or(IS_LOCKED, Ordering::Acquire);
+ if (old_state & IS_LOCKED) == 0 {
+ Some(MutexGuard { mutex: self })
+ } else {
+ None
+ }
+ }
+
+ /// Acquire the lock asynchronously.
+ ///
+ /// This method returns a future that will resolve once the lock has been
+ /// successfully acquired.
+ pub fn lock(&self) -> MutexLockFuture<'_, T> {
+ MutexLockFuture { mutex: Some(self), wait_key: WAIT_KEY_NONE }
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no locks exist.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::Mutex;
+ ///
+ /// let mut mutex = Mutex::new(0);
+ /// *mutex.get_mut() = 10;
+ /// assert_eq!(*mutex.lock().await, 10);
+ /// # });
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ // We know statically that there are no other references to `self`, so
+ // there's no need to lock the inner mutex.
+ unsafe { &mut *self.value.get() }
+ }
+
+ fn remove_waker(&self, wait_key: usize, wake_another: bool) {
+ if wait_key != WAIT_KEY_NONE {
+ let mut waiters = self.waiters.lock().unwrap();
+ match waiters.remove(wait_key) {
+ Waiter::Waiting(_) => {}
+ Waiter::Woken => {
+ // We were awoken, but then dropped before we could
+ // wake up to acquire the lock. Wake up another
+ // waiter.
+ if wake_another {
+ if let Some((_i, waiter)) = waiters.iter_mut().next() {
+ waiter.wake();
+ }
+ }
+ }
+ }
+ if waiters.is_empty() {
+ self.state.fetch_and(!HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock
+ }
+ }
+ }
+
+ // Unlocks the mutex. Called by MutexGuard and MappedMutexGuard when they are
+ // dropped.
+ fn unlock(&self) {
+ let old_state = self.state.fetch_and(!IS_LOCKED, Ordering::AcqRel);
+ if (old_state & HAS_WAITERS) != 0 {
+ let mut waiters = self.waiters.lock().unwrap();
+ if let Some((_i, waiter)) = waiters.iter_mut().next() {
+ waiter.wake();
+ }
+ }
+ }
+}
+
+// Sentinel for when no slot in the `Slab` has been dedicated to this object.
+const WAIT_KEY_NONE: usize = usize::max_value();
+
+/// A future which resolves when the target mutex has been successfully acquired.
+pub struct MutexLockFuture<'a, T: ?Sized> {
+ // `None` indicates that the mutex was successfully acquired.
+ mutex: Option<&'a Mutex<T>>,
+ wait_key: usize,
+}
+
+impl<T: ?Sized> fmt::Debug for MutexLockFuture<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MutexLockFuture")
+ .field("was_acquired", &self.mutex.is_none())
+ .field("mutex", &self.mutex)
+ .field(
+ "wait_key",
+ &(if self.wait_key == WAIT_KEY_NONE { None } else { Some(self.wait_key) }),
+ )
+ .finish()
+ }
+}
+
+impl<T: ?Sized> FusedFuture for MutexLockFuture<'_, T> {
+ fn is_terminated(&self) -> bool {
+ self.mutex.is_none()
+ }
+}
+
+impl<'a, T: ?Sized> Future for MutexLockFuture<'a, T> {
+ type Output = MutexGuard<'a, T>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mutex = self.mutex.expect("polled MutexLockFuture after completion");
+
+ if let Some(lock) = mutex.try_lock() {
+ mutex.remove_waker(self.wait_key, false);
+ self.mutex = None;
+ return Poll::Ready(lock);
+ }
+
+ {
+ let mut waiters = mutex.waiters.lock().unwrap();
+ if self.wait_key == WAIT_KEY_NONE {
+ self.wait_key = waiters.insert(Waiter::Waiting(cx.waker().clone()));
+ if waiters.len() == 1 {
+ mutex.state.fetch_or(HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock
+ }
+ } else {
+ waiters[self.wait_key].register(cx.waker());
+ }
+ }
+
+ // Ensure that we haven't raced `MutexGuard::drop`'s unlock path by
+ // attempting to acquire the lock again.
+ if let Some(lock) = mutex.try_lock() {
+ mutex.remove_waker(self.wait_key, false);
+ self.mutex = None;
+ return Poll::Ready(lock);
+ }
+
+ Poll::Pending
+ }
+}
+
+impl<T: ?Sized> Drop for MutexLockFuture<'_, T> {
+ fn drop(&mut self) {
+ if let Some(mutex) = self.mutex {
+ // This future was dropped before it acquired the mutex.
+ //
+ // Remove ourselves from the map, waking up another waiter if we
+ // had been awoken to acquire the lock.
+ mutex.remove_waker(self.wait_key, true);
+ }
+ }
+}
+
+/// An RAII guard returned by the `lock` and `try_lock` methods.
+/// When this structure is dropped (falls out of scope), the lock will be
+/// unlocked.
+pub struct MutexGuard<'a, T: ?Sized> {
+ mutex: &'a Mutex<T>,
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+ /// Returns a locked view over a portion of the locked data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::{Mutex, MutexGuard};
+ ///
+ /// let data = Mutex::new(Some("value".to_string()));
+ /// {
+ /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap());
+ /// assert_eq!(&*locked_str, "value");
+ /// }
+ /// # });
+ /// ```
+ #[inline]
+ pub fn map<U: ?Sized, F>(this: Self, f: F) -> MappedMutexGuard<'a, T, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let mutex = this.mutex;
+ let value = f(unsafe { &mut *this.mutex.value.get() });
+ // Don't run the `drop` method for MutexGuard. The ownership of the underlying
+ // locked state is being moved to the returned MappedMutexGuard.
+ mem::forget(this);
+ MappedMutexGuard { mutex, value, _marker: PhantomData }
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MutexGuard").field("value", &&**self).field("mutex", &self.mutex).finish()
+ }
+}
+
+impl<T: ?Sized> Drop for MutexGuard<'_, T> {
+ fn drop(&mut self) {
+ self.mutex.unlock()
+ }
+}
+
+impl<T: ?Sized> Deref for MutexGuard<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.mutex.value.get() }
+ }
+}
+
+impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.mutex.value.get() }
+ }
+}
+
+/// An RAII guard returned by the `MutexGuard::map` and `MappedMutexGuard::map` methods.
+/// When this structure is dropped (falls out of scope), the lock will be unlocked.
+pub struct MappedMutexGuard<'a, T: ?Sized, U: ?Sized> {
+ mutex: &'a Mutex<T>,
+ value: *mut U,
+ _marker: PhantomData<&'a mut U>,
+}
+
+impl<'a, T: ?Sized, U: ?Sized> MappedMutexGuard<'a, T, U> {
+ /// Returns a locked view over a portion of the locked data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::{MappedMutexGuard, Mutex, MutexGuard};
+ ///
+ /// let data = Mutex::new(Some("value".to_string()));
+ /// {
+ /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap());
+ /// let locked_char = MappedMutexGuard::map(locked_str, |s| s.get_mut(0..1).unwrap());
+ /// assert_eq!(&*locked_char, "v");
+ /// }
+ /// # });
+ /// ```
+ #[inline]
+ pub fn map<V: ?Sized, F>(this: Self, f: F) -> MappedMutexGuard<'a, T, V>
+ where
+ F: FnOnce(&mut U) -> &mut V,
+ {
+ let mutex = this.mutex;
+ let value = f(unsafe { &mut *this.value });
+ // Don't run the `drop` method for MappedMutexGuard. The ownership of the underlying
+ // locked state is being moved to the returned MappedMutexGuard.
+ mem::forget(this);
+ MappedMutexGuard { mutex, value, _marker: PhantomData }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T, U> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MappedMutexGuard")
+ .field("value", &&**self)
+ .field("mutex", &self.mutex)
+ .finish()
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Drop for MappedMutexGuard<'_, T, U> {
+ fn drop(&mut self) {
+ self.mutex.unlock()
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Deref for MappedMutexGuard<'_, T, U> {
+ type Target = U;
+ fn deref(&self) -> &U {
+ unsafe { &*self.value }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> DerefMut for MappedMutexGuard<'_, T, U> {
+ fn deref_mut(&mut self) -> &mut U {
+ unsafe { &mut *self.value }
+ }
+}
+
+// Mutexes can be moved freely between threads and acquired on any thread so long
+// as the inner value can be safely sent between threads.
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+// It's safe to switch which thread the acquire is being attempted on so long as
+// `T` can be accessed on that thread.
+unsafe impl<T: ?Sized + Send> Send for MutexLockFuture<'_, T> {}
+// doesn't have any interesting `&self` methods (only Debug)
+unsafe impl<T: ?Sized> Sync for MutexLockFuture<'_, T> {}
+
+// Safe to send since we don't track any thread-specific details-- the inner
+// lock is essentially spinlock-equivalent (attempt to flip an atomic bool)
+unsafe impl<T: ?Sized + Send> Send for MutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Send, U: ?Sized + Send> Send for MappedMutexGuard<'_, T, U> {}
+unsafe impl<T: ?Sized + Sync, U: ?Sized + Sync> Sync for MappedMutexGuard<'_, T, U> {}
+
+#[test]
+fn test_mutex_guard_debug_not_recurse() {
+ let mutex = Mutex::new(42);
+ let guard = mutex.try_lock().unwrap();
+ let _ = format!("{:?}", guard);
+ let guard = MutexGuard::map(guard, |n| n);
+ let _ = format!("{:?}", guard);
+}