summaryrefslogtreecommitdiffstats
path: root/third_party/rust/atomic_refcell
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/atomic_refcell')
-rw-r--r--third_party/rust/atomic_refcell/.cargo-checksum.json1
-rw-r--r--third_party/rust/atomic_refcell/Cargo.toml10
-rw-r--r--third_party/rust/atomic_refcell/README.md2
-rw-r--r--third_party/rust/atomic_refcell/benches/basic.rs37
-rw-r--r--third_party/rust/atomic_refcell/src/lib.rs347
-rw-r--r--third_party/rust/atomic_refcell/tests/basic.rs97
6 files changed, 494 insertions, 0 deletions
diff --git a/third_party/rust/atomic_refcell/.cargo-checksum.json b/third_party/rust/atomic_refcell/.cargo-checksum.json
new file mode 100644
index 0000000000..b1249114ea
--- /dev/null
+++ b/third_party/rust/atomic_refcell/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"2246eb912d5b6592d7eafb7bcb171b602533d2f4522cf7f963c1cf10457dda40","README.md":"365e853efc3662c9750a93c8b67de69054bf585342abd2fc11a214ce0a408419","benches/basic.rs":"aba00ab044b37a7fb4e5c855cf88a323a04d6143b651717e227fdd12735602f3","src/lib.rs":"8fadbfe41328feeb9671c0408aaf6d18c2ca2b48b5740768e549e75ca2192ce6","tests/basic.rs":"1422080709fd5ccc45af363875926d140a3d85f3497f17e858576f1214a3b638"},"package":"fb2dcb6e6d35f20276943cc04bb98e538b348d525a04ac79c10021561d202f21"} \ No newline at end of file
diff --git a/third_party/rust/atomic_refcell/Cargo.toml b/third_party/rust/atomic_refcell/Cargo.toml
new file mode 100644
index 0000000000..e9819246e6
--- /dev/null
+++ b/third_party/rust/atomic_refcell/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+name = "atomic_refcell"
+version = "0.1.0"
+authors = ["Bobby Holley <bobbyholley@gmail.com>"]
+description = "Threadsafe RefCell"
+license = "Apache-2.0/MIT"
+repository = "https://github.com/bholley/atomic_refcell"
+documentation = "https://docs.rs/atomic_refcell/"
+
+[dependencies]
diff --git a/third_party/rust/atomic_refcell/README.md b/third_party/rust/atomic_refcell/README.md
new file mode 100644
index 0000000000..d671a845b6
--- /dev/null
+++ b/third_party/rust/atomic_refcell/README.md
@@ -0,0 +1,2 @@
+# atomic_refcell
+Threadsafe RefCell for Rust
diff --git a/third_party/rust/atomic_refcell/benches/basic.rs b/third_party/rust/atomic_refcell/benches/basic.rs
new file mode 100644
index 0000000000..3902a98a37
--- /dev/null
+++ b/third_party/rust/atomic_refcell/benches/basic.rs
@@ -0,0 +1,37 @@
+#![feature(test)]
+
+extern crate atomic_refcell;
+extern crate test;
+
+use atomic_refcell::AtomicRefCell;
+use test::Bencher;
+
+#[derive(Default)]
+struct Bar(u32);
+
+#[bench]
+fn immutable_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn immutable_second_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn immutable_third_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow();
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn mutable_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ b.iter(|| a.borrow_mut());
+}
diff --git a/third_party/rust/atomic_refcell/src/lib.rs b/third_party/rust/atomic_refcell/src/lib.rs
new file mode 100644
index 0000000000..21ffe49746
--- /dev/null
+++ b/third_party/rust/atomic_refcell/src/lib.rs
@@ -0,0 +1,347 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! Implements a container type providing RefCell-like semantics for objects
+//! shared across threads.
+//!
+//! RwLock is traditionally considered to be the |Sync| analogue of RefCell.
+//! However, for consumers that can guarantee that they will never mutably
+//! borrow the contents concurrently with immutable borrows, an RwLock is
+//! overkill, and has key disadvantages:
+//! * Performance: Even the fastest existing implementation of RwLock (that of
+//! parking_lot) performs at least two atomic operations during immutable
+//! borrows. This makes mutable borrows significantly cheaper than immutable
+//! borrows, leading to weird incentives when writing performance-critical
+//! code.
+//! * Features: Implementing AtomicRefCell on top of RwLock makes it impossible
+//! to implement useful things like AtomicRef{,Mut}::map.
+//!
+//! As such, we re-implement RefCell semantics from scratch with a single atomic
+//! reference count. The primary complication of this scheme relates to keeping
+//! things in a consistent state when one thread performs an illegal borrow and
+//! panics. Since an AtomicRefCell can be accessed by multiple threads, and since
+//! panics are recoverable, we need to ensure that an illegal (panicking) access by
+//! one thread does not lead to undefined behavior on other, still-running threads.
+//!
+//! So we represent things as follows:
+//! * Any value with the high bit set (so half the total refcount space) indicates
+//! a mutable borrow.
+//! * Mutable borrows perform an atomic compare-and-swap, swapping in the high bit
+//! if the current value is zero. If the current value is non-zero, the thread
+//! panics and the value is left undisturbed.
+//! * Immutable borrows perform an atomic increment. If the new value has the high
+//! bit set, the thread panics. The incremented refcount is left as-is, since it
+//! still represents a valid mutable borrow. When the mutable borrow is released,
+//! the refcount is set unconditionally to zero, clearing any stray increments by
+//! panicked threads.
+//!
+//! There are a few additional purely-academic complications to handle overflow,
+//! which are documented in the implementation.
+//!
+//! The rest of this module is mostly derived by copy-pasting the implementation of
+//! RefCell and fixing things up as appropriate. Certain non-threadsafe methods
+//! have been removed. We segment the concurrency logic from the rest of the code to
+//! keep the tricky parts small and easy to audit.
+
+#![allow(unsafe_code)]
+#![deny(missing_docs)]
+
+use std::cell::UnsafeCell;
+use std::cmp;
+use std::fmt;
+use std::fmt::Debug;
+use std::ops::{Deref, DerefMut};
+use std::sync::atomic;
+use std::sync::atomic::AtomicUsize;
+
+/// A threadsafe analogue to RefCell.
+pub struct AtomicRefCell<T: ?Sized> {
+ borrow: AtomicUsize,
+ value: UnsafeCell<T>,
+}
+
+impl<T> AtomicRefCell<T> {
+ /// Creates a new `AtomicRefCell` containing `value`.
+ #[inline]
+ pub fn new(value: T) -> AtomicRefCell<T> {
+ AtomicRefCell {
+ borrow: AtomicUsize::new(0),
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Consumes the `AtomicRefCell`, returning the wrapped value.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ debug_assert!(self.borrow.load(atomic::Ordering::Acquire) == 0);
+ unsafe { self.value.into_inner() }
+ }
+}
+
+impl<T: ?Sized> AtomicRefCell<T> {
+ /// Immutably borrows the wrapped value.
+ #[inline]
+ pub fn borrow(&self) -> AtomicRef<T> {
+ AtomicRef {
+ value: unsafe { &*self.value.get() },
+ borrow: AtomicBorrowRef::new(&self.borrow),
+ }
+ }
+
+ /// Mutably borrows the wrapped value.
+ #[inline]
+ pub fn borrow_mut(&self) -> AtomicRefMut<T> {
+ AtomicRefMut {
+ value: unsafe { &mut *self.value.get() },
+ borrow: AtomicBorrowRefMut::new(&self.borrow),
+ }
+ }
+
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// External synchronization is needed to avoid data races when dereferencing
+ /// the pointer.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+}
+
+//
+// Core synchronization logic. Keep this section small and easy to audit.
+//
+
+const HIGH_BIT: usize = !(::std::usize::MAX >> 1);
+const MAX_FAILED_BORROWS: usize = HIGH_BIT + (HIGH_BIT >> 1);
+
+struct AtomicBorrowRef<'b> {
+ borrow: &'b AtomicUsize,
+}
+
+impl<'b> AtomicBorrowRef<'b> {
+ #[inline]
+ fn new(borrow: &'b AtomicUsize) -> Self {
+ let new = borrow.fetch_add(1, atomic::Ordering::Acquire) + 1;
+
+ // If the new count has the high bit set, panic. The specifics of how
+ // we panic is interesting for soundness, but irrelevant for real programs.
+ if new & HIGH_BIT != 0 {
+ Self::do_panic(borrow, new);
+ }
+
+ AtomicBorrowRef { borrow: borrow }
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn do_panic(borrow: &'b AtomicUsize, new: usize) {
+ if new == HIGH_BIT {
+ // We overflowed into the reserved upper half of the refcount
+ // space. Before panicking, decrement the refcount to leave things
+ // in a consistent immutable-borrow state.
+ //
+ // This can basically only happen if somebody forget()s AtomicRefs
+ // in a tight loop.
+ borrow.fetch_sub(1, atomic::Ordering::Release);
+ panic!("too many immutable borrows");
+ } else if new >= MAX_FAILED_BORROWS {
+ // During the mutable borrow, an absurd number of threads have
+ // incremented the refcount and panicked. To avoid hypothetically
+ // wrapping the refcount, we abort the process once a certain
+ // threshold is reached.
+ //
+ // This requires billions of threads to have panicked already, and
+ // so will never happen in a real program.
+ println!("Too many failed borrows");
+ ::std::process::exit(1);
+ } else {
+ // This is the normal case, and the only one which should happen
+ // in a real program.
+ panic!("already mutably borrowed");
+ }
+ }
+}
+
+impl<'b> Drop for AtomicBorrowRef<'b> {
+ #[inline]
+ fn drop(&mut self) {
+ let old = self.borrow.fetch_sub(1, atomic::Ordering::Release);
+ // This assertion is technically incorrect in the case where another
+ // thread hits the hypothetical overflow case, since we might observe
+ // the refcount before it fixes it up (and panics). But that never will
+ // never happen in a real program, and this is a debug_assert! anyway.
+ debug_assert!(old & HIGH_BIT == 0);
+ }
+}
+
+struct AtomicBorrowRefMut<'b> {
+ borrow: &'b AtomicUsize,
+}
+
+impl<'b> Drop for AtomicBorrowRefMut<'b> {
+ #[inline]
+ fn drop(&mut self) {
+ self.borrow.store(0, atomic::Ordering::Release);
+ }
+}
+
+impl<'b> AtomicBorrowRefMut<'b> {
+ #[inline]
+ fn new(borrow: &'b AtomicUsize) -> AtomicBorrowRefMut<'b> {
+ // Use compare-and-swap to avoid corrupting the immutable borrow count
+ // on illegal mutable borrows.
+ let old = match borrow.compare_exchange(0, HIGH_BIT, atomic::Ordering::Acquire, atomic::Ordering::Relaxed) {
+ Ok(x) => x,
+ Err(x) => x,
+ };
+ assert!(old == 0, "already {} borrowed", if old & HIGH_BIT == 0 { "immutably" } else { "mutably" });
+ AtomicBorrowRefMut {
+ borrow: borrow
+ }
+ }
+}
+
+unsafe impl<T: ?Sized + Send + Sync> Send for AtomicRefCell<T> {}
+unsafe impl<T: ?Sized + Send + Sync> Sync for AtomicRefCell<T> {}
+
+//
+// End of core synchronization logic. No tricky thread stuff allowed below
+// this point.
+//
+
+impl<T: Clone> Clone for AtomicRefCell<T> {
+ #[inline]
+ fn clone(&self) -> AtomicRefCell<T> {
+ AtomicRefCell::new(self.borrow().clone())
+ }
+}
+
+impl<T: Default> Default for AtomicRefCell<T> {
+ #[inline]
+ fn default() -> AtomicRefCell<T> {
+ AtomicRefCell::new(Default::default())
+ }
+}
+
+impl<T: ?Sized + PartialEq> PartialEq for AtomicRefCell<T> {
+ #[inline]
+ fn eq(&self, other: &AtomicRefCell<T>) -> bool {
+ *self.borrow() == *other.borrow()
+ }
+}
+
+impl<T: ?Sized + Eq> Eq for AtomicRefCell<T> {}
+
+impl<T: ?Sized + PartialOrd> PartialOrd for AtomicRefCell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &AtomicRefCell<T>) -> Option<cmp::Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+}
+
+impl<T: ?Sized + Ord> Ord for AtomicRefCell<T> {
+ #[inline]
+ fn cmp(&self, other: &AtomicRefCell<T>) -> cmp::Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
+impl<T> From<T> for AtomicRefCell<T> {
+ fn from(t: T) -> AtomicRefCell<T> {
+ AtomicRefCell::new(t)
+ }
+}
+
+impl<'b> Clone for AtomicBorrowRef<'b> {
+ #[inline]
+ fn clone(&self) -> AtomicBorrowRef<'b> {
+ AtomicBorrowRef::new(self.borrow)
+ }
+}
+
+/// A wrapper type for an immutably borrowed value from an `AtomicRefCell<T>`.
+pub struct AtomicRef<'b, T: ?Sized + 'b> {
+ value: &'b T,
+ borrow: AtomicBorrowRef<'b>,
+}
+
+
+impl<'b, T: ?Sized> Deref for AtomicRef<'b, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized> AtomicRef<'b, T> {
+ /// Copies an `AtomicRef`.
+ #[inline]
+ pub fn clone(orig: &AtomicRef<'b, T>) -> AtomicRef<'b, T> {
+ AtomicRef {
+ value: orig.value,
+ borrow: orig.borrow.clone(),
+ }
+ }
+
+ /// Make a new `AtomicRef` for a component of the borrowed data.
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: AtomicRef<'b, T>, f: F) -> AtomicRef<'b, U>
+ where F: FnOnce(&T) -> &U
+ {
+ AtomicRef {
+ value: f(orig.value),
+ borrow: orig.borrow,
+ }
+ }
+}
+
+impl<'b, T: ?Sized> AtomicRefMut<'b, T> {
+ /// Make a new `AtomicRefMut` for a component of the borrowed data, e.g. an enum
+ /// variant.
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: AtomicRefMut<'b, T>, f: F) -> AtomicRefMut<'b, U>
+ where F: FnOnce(&mut T) -> &mut U
+ {
+ AtomicRefMut {
+ value: f(orig.value),
+ borrow: orig.borrow,
+ }
+ }
+}
+
+/// A wrapper type for a mutably borrowed value from an `AtomicRefCell<T>`.
+pub struct AtomicRefMut<'b, T: ?Sized + 'b> {
+ value: &'b mut T,
+ borrow: AtomicBorrowRefMut<'b>,
+}
+
+impl<'b, T: ?Sized> Deref for AtomicRefMut<'b, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized> DerefMut for AtomicRefMut<'b, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRef<'b, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
+
+impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRefMut<'b, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
diff --git a/third_party/rust/atomic_refcell/tests/basic.rs b/third_party/rust/atomic_refcell/tests/basic.rs
new file mode 100644
index 0000000000..5d859bb268
--- /dev/null
+++ b/third_party/rust/atomic_refcell/tests/basic.rs
@@ -0,0 +1,97 @@
+extern crate atomic_refcell;
+
+use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
+
+struct Foo {
+ u: u32,
+}
+
+struct Bar {
+ f: Foo,
+}
+
+impl Default for Bar {
+ fn default() -> Self {
+ Bar { f: Foo { u: 42 } }
+ }
+}
+
+// FIXME(bholley): Add tests to exercise this in concurrent scenarios.
+
+#[test]
+fn immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow();
+}
+
+#[test]
+fn mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _ = a.borrow_mut();
+}
+
+#[test]
+fn interleaved() {
+ let a = AtomicRefCell::new(Bar::default());
+ {
+ let _ = a.borrow_mut();
+ }
+ {
+ let _first = a.borrow();
+ let _second = a.borrow();
+ }
+ {
+ let _ = a.borrow_mut();
+ }
+}
+
+#[test]
+#[should_panic(expected = "already immutably borrowed")]
+fn immutable_then_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow_mut();
+}
+
+#[test]
+#[should_panic(expected = "already mutably borrowed")]
+fn mutable_then_immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.borrow();
+}
+
+#[test]
+#[should_panic(expected = "already mutably borrowed")]
+fn double_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.borrow_mut();
+}
+
+#[test]
+fn map() {
+ let a = AtomicRefCell::new(Bar::default());
+ let b = a.borrow();
+ assert_eq!(b.f.u, 42);
+ let c = AtomicRef::map(b, |x| &x.f);
+ assert_eq!(c.u, 42);
+ let d = AtomicRef::map(c, |x| &x.u);
+ assert_eq!(*d, 42);
+}
+
+#[test]
+fn map_mut() {
+ let a = AtomicRefCell::new(Bar::default());
+ let mut b = a.borrow_mut();
+ assert_eq!(b.f.u, 42);
+ b.f.u = 43;
+ let mut c = AtomicRefMut::map(b, |x| &mut x.f);
+ assert_eq!(c.u, 43);
+ c.u = 44;
+ let mut d = AtomicRefMut::map(c, |x| &mut x.u);
+ assert_eq!(*d, 44);
+ *d = 45;
+ assert_eq!(*d, 45);
+}