summaryrefslogtreecommitdiffstats
path: root/third_party/rust/atomic
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/atomic/.cargo-checksum.json1
-rw-r--r--third_party/rust/atomic/Cargo.toml26
-rw-r--r--third_party/rust/atomic/LICENSE-APACHE201
-rw-r--r--third_party/rust/atomic/LICENSE-MIT25
-rw-r--r--third_party/rust/atomic/README.md51
-rw-r--r--third_party/rust/atomic/src/fallback.rs196
-rw-r--r--third_party/rust/atomic/src/lib.rs735
-rw-r--r--third_party/rust/atomic/src/ops.rs666
-rw-r--r--third_party/rust/atomic_refcell/.cargo-checksum.json1
-rw-r--r--third_party/rust/atomic_refcell/Cargo.toml21
-rw-r--r--third_party/rust/atomic_refcell/README.md2
-rw-r--r--third_party/rust/atomic_refcell/benches/basic.rs37
-rw-r--r--third_party/rust/atomic_refcell/src/lib.rs486
-rw-r--r--third_party/rust/atomic_refcell/tests/basic.rs156
14 files changed, 2604 insertions, 0 deletions
diff --git a/third_party/rust/atomic/.cargo-checksum.json b/third_party/rust/atomic/.cargo-checksum.json
new file mode 100644
index 0000000000..9410ca648a
--- /dev/null
+++ b/third_party/rust/atomic/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"811d9a1badcf5b916dfb9be29473760b368750e94790dec636665ff4f07f43a7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"c45110524352e139b2d7de30ae225d098d3e016cabb66fcc4a90c790ebcb1c3c","src/fallback.rs":"ef3172236b690c8bb88d307c7a33c898420ea34c8a5ad5025f0e3eb3cef2b4f5","src/lib.rs":"bbbf586ec7e2e5e54863fd3f2bdfbf5b2a2a6c179cf6df228635163a2592c89f","src/ops.rs":"884549e2c4163fc9e3e383fa007db5aad872d8e4c99680e6c6bf48806cd26390"},"package":"64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e"} \ No newline at end of file
diff --git a/third_party/rust/atomic/Cargo.toml b/third_party/rust/atomic/Cargo.toml
new file mode 100644
index 0000000000..cc7531f90e
--- /dev/null
+++ b/third_party/rust/atomic/Cargo.toml
@@ -0,0 +1,26 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "atomic"
+version = "0.4.6"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Generic Atomic<T> wrapper type"
+documentation = "https://amanieu.github.io/atomic-rs/atomic/index.html"
+readme = "README.md"
+keywords = ["atomic", "no_std"]
+license = "Apache-2.0/MIT"
+repository = "https://github.com/Amanieu/atomic-rs"
+
+[features]
+nightly = []
+std = []
diff --git a/third_party/rust/atomic/LICENSE-APACHE b/third_party/rust/atomic/LICENSE-APACHE
new file mode 100644
index 0000000000..16fe87b06e
--- /dev/null
+++ b/third_party/rust/atomic/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/atomic/LICENSE-MIT b/third_party/rust/atomic/LICENSE-MIT
new file mode 100644
index 0000000000..40b8817a47
--- /dev/null
+++ b/third_party/rust/atomic/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/atomic/README.md b/third_party/rust/atomic/README.md
new file mode 100644
index 0000000000..7b122167bf
--- /dev/null
+++ b/third_party/rust/atomic/README.md
@@ -0,0 +1,51 @@
+Generic `Atomic<T>` for Rust
+============================
+
+[![Build Status](https://travis-ci.org/Amanieu/atomic-rs.svg?branch=master)](https://travis-ci.org/Amanieu/atomic-rs) [![Crates.io](https://img.shields.io/crates/v/atomic.svg)](https://crates.io/crates/atomic)
+
+A Rust library which provides a generic `Atomic<T>` type for all `T: Copy` types, unlike the standard library which only provides a few fixed atomic types (`AtomicBool`, `AtomicIsize`, `AtomicUsize`, `AtomicPtr`).
+
+This library will use native atomic instructions if possible, and will otherwise fall back to a lock-based mechanism. You can use the `Atomic::<T>::is_lock_free()` function to check whether native atomic operations are supported for a given type. Note that a type must have a power-of-2 size and alignment in order to be used by native atomic instructions.
+
+Only a subset of native atomic operations are supported on stable Rust (types which are the same size as `AtomicUsize`), but you can use the `nightly` Cargo feature on a nightly compiler to enable the full range of native atomic instructions. The `nightly` feature also enables `const fn` constructors which allow you to initialize static atomic variables.
+
+This crate uses `#![no_std]` and only depends on libcore.
+
+[Documentation](https://amanieu.github.io/atomic-rs/atomic/index.html)
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+atomic = "0.4"
+```
+
+and this to your crate root:
+
+```rust
+extern crate atomic;
+```
+
+To enable nightly-only features, add this to your `Cargo.toml` instead:
+
+```toml
+[dependencies]
+atomic = {version = "0.4", features = ["nightly"]}
+```
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/third_party/rust/atomic/src/fallback.rs b/third_party/rust/atomic/src/fallback.rs
new file mode 100644
index 0000000000..8b7e86119b
--- /dev/null
+++ b/third_party/rust/atomic/src/fallback.rs
@@ -0,0 +1,196 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::ptr;
+use core::slice;
+use core::sync::atomic::{self, AtomicUsize, Ordering};
+
+// We use an AtomicUsize instead of an AtomicBool because it performs better
+// on architectures that don't have byte-sized atomics.
+//
+// We give each spinlock its own cache line to avoid false sharing.
+#[repr(align(64))]
+struct SpinLock(AtomicUsize);
+
+impl SpinLock {
+ fn lock(&self) {
+ while self
+ .0
+ .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ while self.0.load(Ordering::Relaxed) != 0 {
+ atomic::spin_loop_hint();
+ }
+ }
+ }
+
+ fn unlock(&self) {
+ self.0.store(0, Ordering::Release);
+ }
+}
+
+// A big array of spinlocks which we use to guard atomic accesses. A spinlock is
+// chosen based on a hash of the address of the atomic object, which helps to
+// reduce contention compared to a single global lock.
+macro_rules! array {
+ (@accum (0, $($_es:expr),*) -> ($($body:tt)*))
+ => {array!(@as_expr [$($body)*])};
+ (@accum (1, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};
+ (@accum (2, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};
+ (@accum (4, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (8, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (16, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (32, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (64, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};
+
+ (@as_expr $e:expr) => {$e};
+
+ [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };
+}
+static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];
+
+// Spinlock pointer hashing function from compiler-rt
+#[inline]
+fn lock_for_addr(addr: usize) -> &'static SpinLock {
+ // Disregard the lowest 4 bits. We want all values that may be part of the
+ // same memory operation to hash to the same value and therefore use the same
+ // lock.
+ let mut hash = addr >> 4;
+ // Use the next bits as the basis for the hash
+ let low = hash & (SPINLOCKS.len() - 1);
+ // Now use the high(er) set of bits to perturb the hash, so that we don't
+ // get collisions from atomic fields in a single object
+ hash >>= 16;
+ hash ^= low;
+ // Return a pointer to the lock to use
+ &SPINLOCKS[hash & (SPINLOCKS.len() - 1)]
+}
+
+#[inline]
+fn lock(addr: usize) -> LockGuard {
+ let lock = lock_for_addr(addr);
+ lock.lock();
+ LockGuard(lock)
+}
+
+struct LockGuard(&'static SpinLock);
+impl Drop for LockGuard {
+ #[inline]
+ fn drop(&mut self) {
+ self.0.unlock();
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T) -> T {
+ let _l = lock(dst as usize);
+ ptr::read(dst)
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T) {
+ let _l = lock(dst as usize);
+ ptr::write(dst, val);
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ ptr::replace(dst, val)
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(dst: *mut T, current: T, new: T) -> Result<T, T> {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ // compare_exchange compares with memcmp instead of Eq
+ let a = slice::from_raw_parts(&result as *const _ as *const u8, mem::size_of_val(&result));
+ let b = slice::from_raw_parts(
+ &current as *const _ as *const u8,
+ mem::size_of_val(&current),
+ );
+ if a == b {
+ ptr::write(dst, new);
+ Ok(result)
+ } else {
+ Err(result)
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result & val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result | val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result ^ val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::min(result, val));
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::max(result, val));
+ result
+}
diff --git a/third_party/rust/atomic/src/lib.rs b/third_party/rust/atomic/src/lib.rs
new file mode 100644
index 0000000000..d6b19a651b
--- /dev/null
+++ b/third_party/rust/atomic/src/lib.rs
@@ -0,0 +1,735 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! Generic `Atomic<T>` wrapper type
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent types.
+//!
+//! This library defines a generic atomic wrapper type `Atomic<T>` for all
+//! `T: Copy` types.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [LLVM atomic orderings][1].
+//!
+//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
+//!
+//! Atomic variables are safe to share between threads (they implement `Sync`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the `const fn` constructors (only available on nightly). Atomic statics
+//! are often used for lazy global initialization.
+
+#![warn(missing_docs)]
+#![no_std]
+#![cfg_attr(
+ feature = "nightly", feature(const_fn, cfg_target_has_atomic, atomic_min_max)
+)]
+
+#[cfg(any(test, feature = "std"))]
+#[macro_use]
+extern crate std;
+
+// Re-export some useful definitions from libcore
+pub use core::sync::atomic::{fence, Ordering};
+
+use core::cell::UnsafeCell;
+use core::fmt;
+
+#[cfg(feature = "std")]
+use std::panic::RefUnwindSafe;
+
+mod fallback;
+mod ops;
+
+/// A generic atomic wrapper type which allows an object to be safely shared
+/// between threads.
+pub struct Atomic<T: Copy> {
+ v: UnsafeCell<T>,
+}
+
+// Atomic<T> is only Sync if T is Send
+unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
+
+// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
+//
+// This is trivially correct for native lock-free atomic types. For those whose
+// atomicity is emulated using a spinlock, it is still correct because the
+// `Atomic` API does not allow doing any panic-inducing operation after writing
+// to the target object.
+#[cfg(feature = "std")]
+impl<T: Copy + RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
+
+impl<T: Copy + Default> Default for Atomic<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+impl<T: Copy + fmt::Debug> fmt::Debug for Atomic<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple("Atomic")
+ .field(&self.load(Ordering::SeqCst))
+ .finish()
+ }
+}
+
+impl<T: Copy> Atomic<T> {
+ /// Creates a new `Atomic`.
+ #[inline]
+ #[cfg(feature = "nightly")]
+ pub const fn new(v: T) -> Atomic<T> {
+ Atomic {
+ v: UnsafeCell::new(v),
+ }
+ }
+
+ /// Creates a new `Atomic`.
+ #[inline]
+ #[cfg(not(feature = "nightly"))]
+ pub fn new(v: T) -> Atomic<T> {
+ Atomic {
+ v: UnsafeCell::new(v),
+ }
+ }
+
+ /// Checks if `Atomic` objects of this type are lock-free.
+ ///
+ /// If an `Atomic` is not lock-free then it may be implemented using locks
+ /// internally, which makes it unsuitable for some situations (such as
+ /// communicating with a signal handler).
+ #[inline]
+ #[cfg(feature = "nightly")]
+ pub const fn is_lock_free() -> bool {
+ ops::atomic_is_lock_free::<T>()
+ }
+
+ /// Checks if `Atomic` objects of this type are lock-free.
+ ///
+ /// If an `Atomic` is not lock-free then it may be implemented using locks
+ /// internally, which makes it unsuitable for some situations (such as
+ /// communicating with a signal handler).
+ #[inline]
+ #[cfg(not(feature = "nightly"))]
+ pub fn is_lock_free() -> bool {
+ ops::atomic_is_lock_free::<T>()
+ }
+
+ /// Returns a mutable reference to the underlying type.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.v.get() }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ self.v.into_inner()
+ }
+
+ /// Loads a value from the `Atomic`.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ #[inline]
+ pub fn load(&self, order: Ordering) -> T {
+ unsafe { ops::atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the `Atomic`.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ pub fn store(&self, val: T, order: Ordering) {
+ unsafe {
+ ops::atomic_store(self.v.get(), val, order);
+ }
+ }
+
+ /// Stores a value into the `Atomic`, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ #[inline]
+ pub fn swap(&self, val: T, order: Ordering) -> T {
+ unsafe { ops::atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was
+ /// written and containing the previous value. On success this value is
+ /// guaranteed to be equal to `new`.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Acquire` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ #[inline]
+ pub fn compare_exchange(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail
+ /// even when the comparison succeeds, which can result in more efficient
+ /// code on some platforms. The return value is a result indicating whether
+ /// the new value was written and containing the previous value.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Acquire` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ /// success ordering.
+ #[inline]
+ pub fn compare_exchange_weak(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
+ }
+}
+
+impl Atomic<bool> {
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+}
+
+macro_rules! atomic_ops_common {
+ ($($t:ty)*) => ($(
+ impl Atomic<$t> {
+ /// Add to the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract from the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+ }
+ )*);
+}
+macro_rules! atomic_ops_signed {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_min(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_max(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+macro_rules! atomic_ops_unsigned {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umin(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umax(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+atomic_ops_signed!{ i8 i16 i32 i64 isize i128 }
+atomic_ops_unsigned!{ u8 u16 u32 u64 usize u128 }
+
+#[cfg(test)]
+mod tests {
+ use core::mem;
+ use Atomic;
+ use Ordering::*;
+
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Foo(u8, u8);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Bar(u64, u64);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Quux(u32);
+
+ #[test]
+ fn atomic_bool() {
+ let a = Atomic::new(false);
+ assert_eq!(Atomic::<bool>::is_lock_free(), cfg!(feature = "nightly"));
+ assert_eq!(format!("{:?}", a), "Atomic(false)");
+ assert_eq!(a.load(SeqCst), false);
+ a.store(true, SeqCst);
+ assert_eq!(a.swap(false, SeqCst), true);
+ assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
+ assert_eq!(a.fetch_and(false, SeqCst), true);
+ assert_eq!(a.fetch_or(true, SeqCst), false);
+ assert_eq!(a.fetch_xor(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), true);
+ }
+
+ #[test]
+ fn atomic_i8() {
+ let a = Atomic::new(0i8);
+ assert_eq!(
+ Atomic::<i8>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "8",
+ all(feature = "nightly", target_has_atomic = "8")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ // Make sure overflows are handled correctly
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), -74);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i16() {
+ let a = Atomic::new(0i16);
+ assert_eq!(
+ Atomic::<i16>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "16",
+ all(feature = "nightly", target_has_atomic = "16")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i32() {
+ let a = Atomic::new(0i32);
+ assert_eq!(
+ Atomic::<i32>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "32",
+ all(feature = "nightly", target_has_atomic = "32")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i64() {
+ let a = Atomic::new(0i64);
+ assert_eq!(
+ Atomic::<i64>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "64",
+ all(feature = "nightly", target_has_atomic = "64")
+ )) && mem::align_of::<i64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i128() {
+ let a = Atomic::new(0i128);
+ assert_eq!(
+ Atomic::<i128>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "128",
+ all(feature = "nightly", target_has_atomic = "128")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_isize() {
+ let a = Atomic::new(0isize);
+ assert!(Atomic::<isize>::is_lock_free());
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u8() {
+ let a = Atomic::new(0u8);
+ assert_eq!(
+ Atomic::<u8>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "8",
+ all(feature = "nightly", target_has_atomic = "8")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u16() {
+ let a = Atomic::new(0u16);
+ assert_eq!(
+ Atomic::<u16>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "16",
+ all(feature = "nightly", target_has_atomic = "16")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u32() {
+ let a = Atomic::new(0u32);
+ assert_eq!(
+ Atomic::<u32>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "32",
+ all(feature = "nightly", target_has_atomic = "32")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u64() {
+ let a = Atomic::new(0u64);
+ assert_eq!(
+ Atomic::<u64>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "64",
+ all(feature = "nightly", target_has_atomic = "64")
+ )) && mem::align_of::<u64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u128() {
+ let a = Atomic::new(0u128);
+ assert_eq!(
+ Atomic::<u128>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "128",
+ all(feature = "nightly", target_has_atomic = "128")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_usize() {
+ let a = Atomic::new(0usize);
+ assert!(Atomic::<usize>::is_lock_free());
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_foo() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Foo>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
+ assert_eq!(a.load(SeqCst), Foo(0, 0));
+ a.store(Foo(1, 1), SeqCst);
+ assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
+ assert_eq!(
+ a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
+ Err(Foo(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
+ Ok(Foo(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Foo(3, 3));
+ }
+
+ #[test]
+ fn atomic_bar() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Bar>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
+ assert_eq!(a.load(SeqCst), Bar(0, 0));
+ a.store(Bar(1, 1), SeqCst);
+ assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
+ assert_eq!(
+ a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
+ Err(Bar(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
+ Ok(Bar(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Bar(3, 3));
+ }
+
+ #[test]
+ fn atomic_quxx() {
+ let a = Atomic::default();
+ assert_eq!(
+ Atomic::<Quux>::is_lock_free(),
+ cfg!(any(feature = "nightly", target_pointer_width = "32"))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
+ assert_eq!(a.load(SeqCst), Quux(0));
+ a.store(Quux(1), SeqCst);
+ assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
+ assert_eq!(
+ a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
+ Err(Quux(2))
+ );
+ assert_eq!(
+ a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
+ Ok(Quux(2))
+ );
+ assert_eq!(a.load(SeqCst), Quux(3));
+ }
+}
diff --git a/third_party/rust/atomic/src/ops.rs b/third_party/rust/atomic/src/ops.rs
new file mode 100644
index 0000000000..3626c9ed9a
--- /dev/null
+++ b/third_party/rust/atomic/src/ops.rs
@@ -0,0 +1,666 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::sync::atomic::Ordering;
+use fallback;
+
+#[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+use core::sync::atomic::{ AtomicI8, AtomicU8 };
+#[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+use core::sync::atomic::{ AtomicI16, AtomicU16 };
+#[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+use core::sync::atomic::{ AtomicI32, AtomicU32 };
+#[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+use core::sync::atomic::{ AtomicI64, AtomicU64 };
+
+#[cfg(not(feature = "nightly"))]
+use core::sync::atomic::AtomicUsize;
+#[cfg(not(feature = "nightly"))]
+const SIZEOF_USIZE: usize = mem::size_of::<usize>();
+#[cfg(not(feature = "nightly"))]
+const ALIGNOF_USIZE: usize = mem::align_of::<usize>();
+
+#[cfg(feature = "nightly")]
+#[inline]
+pub const fn atomic_is_lock_free<T>() -> bool {
+ let size = mem::size_of::<T>();
+ // FIXME: switch to … && … && … once that operator is supported in const functions
+ (1 == size.count_ones()) & (8 >= size) & (mem::align_of::<T>() >= size)
+}
+
+#[cfg(not(feature = "nightly"))]
+#[inline]
+pub fn atomic_is_lock_free<T>() -> bool {
+ let size = mem::size_of::<T>();
+ 1 == size.count_ones() && SIZEOF_USIZE >= size && mem::align_of::<T>() >= ALIGNOF_USIZE
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T, order: Ordering) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU8)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU16)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU32)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU64)).load(order))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicUsize)).load(order))
+ }
+ _ => fallback::atomic_load(dst),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ (*(dst as *const AtomicU8)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ (*(dst as *const AtomicU16)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ (*(dst as *const AtomicU32)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ (*(dst as *const AtomicU64)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ (*(dst as *const AtomicUsize)).store(mem::transmute_copy(&val), order)
+ }
+ _ => fallback::atomic_store(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU8)).swap(mem::transmute_copy(&val), order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_swap(dst, val),
+ }
+}
+
+#[inline]
+unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
+ match r {
+ Ok(x) => Ok(mem::transmute_copy(&x)),
+ Err(x) => Err(mem::transmute_copy(&x)),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ map_result((*(dst as *const AtomicU8)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ map_result((*(dst as *const AtomicU16)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ map_result((*(dst as *const AtomicU32)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ map_result((*(dst as *const AtomicU64)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ map_result((*(dst as *const AtomicUsize)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ _ => fallback::atomic_compare_exchange(dst, current, new),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange_weak<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ map_result((*(dst as *const AtomicU8)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ map_result((*(dst as *const AtomicU16)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ map_result((*(dst as *const AtomicU32)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ map_result((*(dst as *const AtomicU64)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ map_result((*(dst as *const AtomicUsize)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ _ => fallback::atomic_compare_exchange(dst, current, new),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_add(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_sub(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_and(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_or(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_xor(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI8)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI16)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI32)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI64)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_min(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI8)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI16)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI32)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI64)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_max(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_min(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_max(dst, val),
+ }
+}
diff --git a/third_party/rust/atomic_refcell/.cargo-checksum.json b/third_party/rust/atomic_refcell/.cargo-checksum.json
new file mode 100644
index 0000000000..0dc5177384
--- /dev/null
+++ b/third_party/rust/atomic_refcell/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"1191be2168fb6a9914afd20cd5a1c7ac5e612596b834a60ac495b45629805a51","README.md":"6b468f17809063c5dcee5758d1daca147d74e2297a9dc78a5e50ae13d9ea6e42","benches/basic.rs":"aba00ab044b37a7fb4e5c855cf88a323a04d6143b651717e227fdd12735602f3","src/lib.rs":"631e765b36b20ab47c7cd35fb0bf2e9b6f9c2bf2a0ab72bd67d59759ff53a71f","tests/basic.rs":"2cefa20188254d8357a595975733ad1bffb4de90933d5d9e9caa17232c1d646b"},"package":"73b5e5f48b927f04e952dedc932f31995a65a0bf65ec971c74436e51bf6e970d"} \ No newline at end of file
diff --git a/third_party/rust/atomic_refcell/Cargo.toml b/third_party/rust/atomic_refcell/Cargo.toml
new file mode 100644
index 0000000000..d71703f230
--- /dev/null
+++ b/third_party/rust/atomic_refcell/Cargo.toml
@@ -0,0 +1,21 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "atomic_refcell"
+version = "0.1.8"
+authors = ["Bobby Holley <bobbyholley@gmail.com>"]
+description = "Threadsafe RefCell"
+documentation = "https://docs.rs/atomic_refcell/"
+license = "Apache-2.0/MIT"
+repository = "https://github.com/bholley/atomic_refcell"
+
+[dependencies]
diff --git a/third_party/rust/atomic_refcell/README.md b/third_party/rust/atomic_refcell/README.md
new file mode 100644
index 0000000000..de72c55daa
--- /dev/null
+++ b/third_party/rust/atomic_refcell/README.md
@@ -0,0 +1,2 @@
+# atomic_refcell
+Threadsafe RefCell for Rust.
diff --git a/third_party/rust/atomic_refcell/benches/basic.rs b/third_party/rust/atomic_refcell/benches/basic.rs
new file mode 100644
index 0000000000..3902a98a37
--- /dev/null
+++ b/third_party/rust/atomic_refcell/benches/basic.rs
@@ -0,0 +1,37 @@
+#![feature(test)]
+
+extern crate atomic_refcell;
+extern crate test;
+
+use atomic_refcell::AtomicRefCell;
+use test::Bencher;
+
+#[derive(Default)]
+struct Bar(u32);
+
+#[bench]
+fn immutable_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn immutable_second_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn immutable_third_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow();
+ b.iter(|| a.borrow());
+}
+
+#[bench]
+fn mutable_borrow(b: &mut Bencher) {
+ let a = AtomicRefCell::new(Bar::default());
+ b.iter(|| a.borrow_mut());
+}
diff --git a/third_party/rust/atomic_refcell/src/lib.rs b/third_party/rust/atomic_refcell/src/lib.rs
new file mode 100644
index 0000000000..dbf599aa52
--- /dev/null
+++ b/third_party/rust/atomic_refcell/src/lib.rs
@@ -0,0 +1,486 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+//! Implements a container type providing RefCell-like semantics for objects
+//! shared across threads.
+//!
+//! RwLock is traditionally considered to be the |Sync| analogue of RefCell.
+//! However, for consumers that can guarantee that they will never mutably
+//! borrow the contents concurrently with immutable borrows, an RwLock is
+//! overkill, and has key disadvantages:
+//! * Performance: Even the fastest existing implementation of RwLock (that of
+//! parking_lot) performs at least two atomic operations during immutable
+//! borrows. This makes mutable borrows significantly cheaper than immutable
+//! borrows, leading to weird incentives when writing performance-critical
+//! code.
+//! * Features: Implementing AtomicRefCell on top of RwLock makes it impossible
+//! to implement useful things like AtomicRef{,Mut}::map.
+//!
+//! As such, we re-implement RefCell semantics from scratch with a single atomic
+//! reference count. The primary complication of this scheme relates to keeping
+//! things in a consistent state when one thread performs an illegal borrow and
+//! panics. Since an AtomicRefCell can be accessed by multiple threads, and since
+//! panics are recoverable, we need to ensure that an illegal (panicking) access by
+//! one thread does not lead to undefined behavior on other, still-running threads.
+//!
+//! So we represent things as follows:
+//! * Any value with the high bit set (so half the total refcount space) indicates
+//! a mutable borrow.
+//! * Mutable borrows perform an atomic compare-and-swap, swapping in the high bit
+//! if the current value is zero. If the current value is non-zero, the thread
+//! panics and the value is left undisturbed.
+//! * Immutable borrows perform an atomic increment. If the new value has the high
+//! bit set, the thread panics. The incremented refcount is left as-is, since it
+//! still represents a valid mutable borrow. When the mutable borrow is released,
+//! the refcount is set unconditionally to zero, clearing any stray increments by
+//! panicked threads.
+//!
+//! There are a few additional purely-academic complications to handle overflow,
+//! which are documented in the implementation.
+//!
+//! The rest of this module is mostly derived by copy-pasting the implementation of
+//! RefCell and fixing things up as appropriate. Certain non-threadsafe methods
+//! have been removed. We segment the concurrency logic from the rest of the code to
+//! keep the tricky parts small and easy to audit.
+
+#![no_std]
+#![allow(unsafe_code)]
+#![deny(missing_docs)]
+
+use core::cell::UnsafeCell;
+use core::cmp;
+use core::fmt;
+use core::fmt::{Debug, Display};
+use core::ops::{Deref, DerefMut};
+use core::sync::atomic;
+use core::sync::atomic::AtomicUsize;
+
+/// A threadsafe analogue to RefCell.
+pub struct AtomicRefCell<T: ?Sized> {
+ borrow: AtomicUsize,
+ value: UnsafeCell<T>,
+}
+
+/// An error returned by [`AtomicRefCell::try_borrow`](struct.AtomicRefCell.html#method.try_borrow).
+pub struct BorrowError {
+ _private: (),
+}
+
+impl Debug for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowError").finish()
+ }
+}
+
+impl Display for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already mutably borrowed", f)
+ }
+}
+
+/// An error returned by [`AtomicRefCell::try_borrow_mut`](struct.AtomicRefCell.html#method.try_borrow_mut).
+pub struct BorrowMutError {
+ _private: (),
+}
+
+impl Debug for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowMutError").finish()
+ }
+}
+
+impl Display for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already borrowed", f)
+ }
+}
+
+impl<T> AtomicRefCell<T> {
+ /// Creates a new `AtomicRefCell` containing `value`.
+ #[inline]
+ pub const fn new(value: T) -> AtomicRefCell<T> {
+ AtomicRefCell {
+ borrow: AtomicUsize::new(0),
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Consumes the `AtomicRefCell`, returning the wrapped value.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ debug_assert!(self.borrow.load(atomic::Ordering::Acquire) == 0);
+ self.value.into_inner()
+ }
+}
+
+impl<T: ?Sized> AtomicRefCell<T> {
+ /// Immutably borrows the wrapped value.
+ #[inline]
+ pub fn borrow(&self) -> AtomicRef<T> {
+ match AtomicBorrowRef::try_new(&self.borrow) {
+ Ok(borrow) => AtomicRef {
+ value: unsafe { &*self.value.get() },
+ borrow,
+ },
+ Err(s) => panic!("{}", s),
+ }
+ }
+
+ /// Attempts to immutably borrow the wrapped value, but instead of panicking
+ /// on a failed borrow, returns `Err`.
+ #[inline]
+ pub fn try_borrow(&self) -> Result<AtomicRef<T>, BorrowError> {
+ match AtomicBorrowRef::try_new(&self.borrow) {
+ Ok(borrow) => Ok(AtomicRef {
+ value: unsafe { &*self.value.get() },
+ borrow,
+ }),
+ Err(_) => Err(BorrowError { _private: () }),
+ }
+ }
+
+ /// Mutably borrows the wrapped value.
+ #[inline]
+ pub fn borrow_mut(&self) -> AtomicRefMut<T> {
+ match AtomicBorrowRefMut::try_new(&self.borrow) {
+ Ok(borrow) => AtomicRefMut {
+ value: unsafe { &mut *self.value.get() },
+ borrow,
+ },
+ Err(s) => panic!("{}", s),
+ }
+ }
+
+ /// Attempts to mutably borrow the wrapped value, but instead of panicking
+ /// on a failed borrow, returns `Err`.
+ #[inline]
+ pub fn try_borrow_mut(&self) -> Result<AtomicRefMut<T>, BorrowMutError> {
+ match AtomicBorrowRefMut::try_new(&self.borrow) {
+ Ok(borrow) => Ok(AtomicRefMut {
+ value: unsafe { &mut *self.value.get() },
+ borrow,
+ }),
+ Err(_) => Err(BorrowMutError { _private: () }),
+ }
+ }
+
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// External synchronization is needed to avoid data races when dereferencing
+ /// the pointer.
+ #[inline]
+ pub fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the wrapped value.
+ ///
+ /// No runtime checks take place (unless debug assertions are enabled)
+ /// because this call borrows `AtomicRefCell` mutably at compile-time.
+ #[inline]
+ pub fn get_mut(&mut self) -> &mut T {
+ debug_assert!(self.borrow.load(atomic::Ordering::Acquire) == 0);
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+//
+// Core synchronization logic. Keep this section small and easy to audit.
+//
+
+const HIGH_BIT: usize = !(::core::usize::MAX >> 1);
+const MAX_FAILED_BORROWS: usize = HIGH_BIT + (HIGH_BIT >> 1);
+
+struct AtomicBorrowRef<'b> {
+ borrow: &'b AtomicUsize,
+}
+
+impl<'b> AtomicBorrowRef<'b> {
+ #[inline]
+ fn try_new(borrow: &'b AtomicUsize) -> Result<Self, &'static str> {
+ let new = borrow.fetch_add(1, atomic::Ordering::Acquire) + 1;
+ if new & HIGH_BIT != 0 {
+ // If the new count has the high bit set, that almost certainly
+ // means there's an pre-existing mutable borrow. In that case,
+ // we simply leave the increment as a benign side-effect and
+ // return `Err`. Once the mutable borrow is released, the
+ // count will be reset to zero unconditionally.
+ //
+ // The overflow check here ensures that an unbounded number of
+ // immutable borrows during the scope of one mutable borrow
+ // will soundly trigger a panic (or abort) rather than UB.
+ Self::check_overflow(borrow, new);
+ Err("already mutably borrowed")
+ } else {
+ Ok(AtomicBorrowRef { borrow: borrow })
+ }
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn check_overflow(borrow: &'b AtomicUsize, new: usize) {
+ if new == HIGH_BIT {
+ // We overflowed into the reserved upper half of the refcount
+ // space. Before panicking, decrement the refcount to leave things
+ // in a consistent immutable-borrow state.
+ //
+ // This can basically only happen if somebody forget()s AtomicRefs
+ // in a tight loop.
+ borrow.fetch_sub(1, atomic::Ordering::Release);
+ panic!("too many immutable borrows");
+ } else if new >= MAX_FAILED_BORROWS {
+ // During the mutable borrow, an absurd number of threads have
+ // attempted to increment the refcount with immutable borrows.
+ // To avoid hypothetically wrapping the refcount, we abort the
+ // process once a certain threshold is reached.
+ //
+ // This requires billions of borrows to fail during the scope of
+ // one mutable borrow, and so is very unlikely to happen in a real
+ // program.
+ //
+ // To avoid a potential unsound state after overflowing, we make
+ // sure the entire process aborts.
+ //
+ // Right now, there's no stable way to do that without `std`:
+ // https://github.com/rust-lang/rust/issues/67952
+ // As a workaround, we cause an abort by making this thread panic
+ // during the unwinding of another panic.
+ //
+ // On platforms where the panic strategy is already 'abort', the
+ // ForceAbort object here has no effect, as the program already
+ // panics before it is dropped.
+ struct ForceAbort;
+ impl Drop for ForceAbort {
+ fn drop(&mut self) {
+ panic!("Aborting to avoid unsound state of AtomicRefCell");
+ }
+ }
+ let _abort = ForceAbort;
+ panic!("Too many failed borrows");
+ }
+ }
+}
+
+impl<'b> Drop for AtomicBorrowRef<'b> {
+ #[inline]
+ fn drop(&mut self) {
+ let old = self.borrow.fetch_sub(1, atomic::Ordering::Release);
+ // This assertion is technically incorrect in the case where another
+ // thread hits the hypothetical overflow case, since we might observe
+ // the refcount before it fixes it up (and panics). But that never will
+ // never happen in a real program, and this is a debug_assert! anyway.
+ debug_assert!(old & HIGH_BIT == 0);
+ }
+}
+
+struct AtomicBorrowRefMut<'b> {
+ borrow: &'b AtomicUsize,
+}
+
+impl<'b> Drop for AtomicBorrowRefMut<'b> {
+ #[inline]
+ fn drop(&mut self) {
+ self.borrow.store(0, atomic::Ordering::Release);
+ }
+}
+
+impl<'b> AtomicBorrowRefMut<'b> {
+ #[inline]
+ fn try_new(borrow: &'b AtomicUsize) -> Result<AtomicBorrowRefMut<'b>, &'static str> {
+ // Use compare-and-swap to avoid corrupting the immutable borrow count
+ // on illegal mutable borrows.
+ let old = match borrow.compare_exchange(
+ 0,
+ HIGH_BIT,
+ atomic::Ordering::Acquire,
+ atomic::Ordering::Relaxed,
+ ) {
+ Ok(x) => x,
+ Err(x) => x,
+ };
+
+ if old == 0 {
+ Ok(AtomicBorrowRefMut { borrow })
+ } else if old & HIGH_BIT == 0 {
+ Err("already immutably borrowed")
+ } else {
+ Err("already mutably borrowed")
+ }
+ }
+}
+
+unsafe impl<T: ?Sized + Send> Send for AtomicRefCell<T> {}
+unsafe impl<T: ?Sized + Send + Sync> Sync for AtomicRefCell<T> {}
+
+//
+// End of core synchronization logic. No tricky thread stuff allowed below
+// this point.
+//
+
+impl<T: Clone> Clone for AtomicRefCell<T> {
+ #[inline]
+ fn clone(&self) -> AtomicRefCell<T> {
+ AtomicRefCell::new(self.borrow().clone())
+ }
+}
+
+impl<T: Default> Default for AtomicRefCell<T> {
+ #[inline]
+ fn default() -> AtomicRefCell<T> {
+ AtomicRefCell::new(Default::default())
+ }
+}
+
+impl<T: ?Sized + PartialEq> PartialEq for AtomicRefCell<T> {
+ #[inline]
+ fn eq(&self, other: &AtomicRefCell<T>) -> bool {
+ *self.borrow() == *other.borrow()
+ }
+}
+
+impl<T: ?Sized + Eq> Eq for AtomicRefCell<T> {}
+
+impl<T: ?Sized + PartialOrd> PartialOrd for AtomicRefCell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &AtomicRefCell<T>) -> Option<cmp::Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+}
+
+impl<T: ?Sized + Ord> Ord for AtomicRefCell<T> {
+ #[inline]
+ fn cmp(&self, other: &AtomicRefCell<T>) -> cmp::Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
+impl<T> From<T> for AtomicRefCell<T> {
+ fn from(t: T) -> AtomicRefCell<T> {
+ AtomicRefCell::new(t)
+ }
+}
+
+impl<'b> Clone for AtomicBorrowRef<'b> {
+ #[inline]
+ fn clone(&self) -> AtomicBorrowRef<'b> {
+ AtomicBorrowRef::try_new(self.borrow).unwrap()
+ }
+}
+
+/// A wrapper type for an immutably borrowed value from an `AtomicRefCell<T>`.
+pub struct AtomicRef<'b, T: ?Sized + 'b> {
+ value: &'b T,
+ borrow: AtomicBorrowRef<'b>,
+}
+
+impl<'b, T: ?Sized> Deref for AtomicRef<'b, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized> AtomicRef<'b, T> {
+ /// Copies an `AtomicRef`.
+ #[inline]
+ pub fn clone(orig: &AtomicRef<'b, T>) -> AtomicRef<'b, T> {
+ AtomicRef {
+ value: orig.value,
+ borrow: orig.borrow.clone(),
+ }
+ }
+
+ /// Make a new `AtomicRef` for a component of the borrowed data.
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: AtomicRef<'b, T>, f: F) -> AtomicRef<'b, U>
+ where
+ F: FnOnce(&T) -> &U,
+ {
+ AtomicRef {
+ value: f(orig.value),
+ borrow: orig.borrow,
+ }
+ }
+
+ /// Make a new `AtomicRef` for an optional component of the borrowed data.
+ #[inline]
+ pub fn filter_map<U: ?Sized, F>(orig: AtomicRef<'b, T>, f: F) -> Option<AtomicRef<'b, U>>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ {
+ Some(AtomicRef {
+ value: f(orig.value)?,
+ borrow: orig.borrow,
+ })
+ }
+}
+
+impl<'b, T: ?Sized> AtomicRefMut<'b, T> {
+ /// Make a new `AtomicRefMut` for a component of the borrowed data, e.g. an enum
+ /// variant.
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: AtomicRefMut<'b, T>, f: F) -> AtomicRefMut<'b, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ AtomicRefMut {
+ value: f(orig.value),
+ borrow: orig.borrow,
+ }
+ }
+
+ /// Make a new `AtomicRefMut` for an optional component of the borrowed data.
+ #[inline]
+ pub fn filter_map<U: ?Sized, F>(orig: AtomicRefMut<'b, T>, f: F) -> Option<AtomicRefMut<'b, U>>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ {
+ Some(AtomicRefMut {
+ value: f(orig.value)?,
+ borrow: orig.borrow,
+ })
+ }
+}
+
+/// A wrapper type for a mutably borrowed value from an `AtomicRefCell<T>`.
+pub struct AtomicRefMut<'b, T: ?Sized + 'b> {
+ value: &'b mut T,
+ borrow: AtomicBorrowRefMut<'b>,
+}
+
+impl<'b, T: ?Sized> Deref for AtomicRefMut<'b, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized> DerefMut for AtomicRefMut<'b, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRef<'b, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
+
+impl<'b, T: ?Sized + Debug + 'b> Debug for AtomicRefMut<'b, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
+
+impl<T: ?Sized + Debug> Debug for AtomicRefCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "AtomicRefCell {{ ... }}")
+ }
+}
diff --git a/third_party/rust/atomic_refcell/tests/basic.rs b/third_party/rust/atomic_refcell/tests/basic.rs
new file mode 100644
index 0000000000..a37833085d
--- /dev/null
+++ b/third_party/rust/atomic_refcell/tests/basic.rs
@@ -0,0 +1,156 @@
+extern crate atomic_refcell;
+
+use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
+
+#[derive(Debug)]
+struct Foo {
+ u: u32,
+}
+
+#[derive(Debug)]
+struct Bar {
+ f: Foo,
+}
+
+impl Default for Bar {
+ fn default() -> Self {
+ Bar { f: Foo { u: 42 } }
+ }
+}
+
+// FIXME(bholley): Add tests to exercise this in concurrent scenarios.
+
+#[test]
+fn immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow();
+}
+
+#[test]
+fn try_immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.try_borrow().unwrap();
+ let _second = a.try_borrow().unwrap();
+}
+
+#[test]
+fn mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _ = a.borrow_mut();
+}
+
+#[test]
+fn try_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _ = a.try_borrow_mut().unwrap();
+}
+
+#[test]
+fn get_mut() {
+ let mut a = AtomicRefCell::new(Bar::default());
+ let _ = a.get_mut();
+}
+
+#[test]
+fn interleaved() {
+ let a = AtomicRefCell::new(Bar::default());
+ {
+ let _ = a.borrow_mut();
+ }
+ {
+ let _first = a.borrow();
+ let _second = a.borrow();
+ }
+ {
+ let _ = a.borrow_mut();
+ }
+}
+
+#[test]
+fn try_interleaved() {
+ let a = AtomicRefCell::new(Bar::default());
+ {
+ let _ = a.try_borrow_mut().unwrap();
+ }
+ {
+ let _first = a.try_borrow().unwrap();
+ let _second = a.try_borrow().unwrap();
+ let _ = a.try_borrow_mut().unwrap_err();
+ }
+ {
+ let _first = a.try_borrow_mut().unwrap();
+ let _ = a.try_borrow().unwrap_err();
+ }
+}
+
+#[test]
+#[should_panic(expected = "already immutably borrowed")]
+fn immutable_then_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.borrow_mut();
+}
+
+#[test]
+fn immutable_then_try_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow();
+ let _second = a.try_borrow_mut().unwrap_err();
+}
+
+#[test]
+#[should_panic(expected = "already mutably borrowed")]
+fn mutable_then_immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.borrow();
+}
+
+#[test]
+fn mutable_then_try_immutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.try_borrow().unwrap_err();
+}
+
+#[test]
+#[should_panic(expected = "already mutably borrowed")]
+fn double_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.borrow_mut();
+}
+
+#[test]
+fn mutable_then_try_mutable() {
+ let a = AtomicRefCell::new(Bar::default());
+ let _first = a.borrow_mut();
+ let _second = a.try_borrow_mut().unwrap_err();
+}
+
+#[test]
+fn map() {
+ let a = AtomicRefCell::new(Bar::default());
+ let b = a.borrow();
+ assert_eq!(b.f.u, 42);
+ let c = AtomicRef::map(b, |x| &x.f);
+ assert_eq!(c.u, 42);
+ let d = AtomicRef::map(c, |x| &x.u);
+ assert_eq!(*d, 42);
+}
+
+#[test]
+fn map_mut() {
+ let a = AtomicRefCell::new(Bar::default());
+ let mut b = a.borrow_mut();
+ assert_eq!(b.f.u, 42);
+ b.f.u = 43;
+ let mut c = AtomicRefMut::map(b, |x| &mut x.f);
+ assert_eq!(c.u, 43);
+ c.u = 44;
+ let mut d = AtomicRefMut::map(c, |x| &mut x.u);
+ assert_eq!(*d, 44);
+ *d = 45;
+ assert_eq!(*d, 45);
+}