summaryrefslogtreecommitdiffstats
path: root/third_party/rust/crossbeam-epoch-0.8.0
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/crossbeam-epoch-0.8.0')
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/.cargo-checksum.json1
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/CHANGELOG.md88
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/Cargo.lock248
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/Cargo.toml52
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/LICENSE-APACHE201
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/LICENSE-MIT27
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/README.md57
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/benches/defer.rs71
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/benches/flush.rs53
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/benches/pin.rs32
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/build.rs8
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/examples/sanitize.rs69
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/examples/treiber_stack.rs110
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/atomic.rs1201
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/collector.rs434
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/default.rs75
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/deferred.rs140
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/epoch.rs114
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/guard.rs529
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/internal.rs595
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/lib.rs106
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/sync/list.rs478
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/sync/mod.rs4
-rw-r--r--third_party/rust/crossbeam-epoch-0.8.0/src/sync/queue.rs444
24 files changed, 5137 insertions, 0 deletions
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/.cargo-checksum.json b/third_party/rust/crossbeam-epoch-0.8.0/.cargo-checksum.json
new file mode 100644
index 0000000000..d42d88919d
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"1d5defc0cddbd56dab9aad0c52a6101e2ae38a04703622b70822af4162b422b6","Cargo.lock":"dd8ea5cc92174990a2726564d8ea7d4cf1dcbc0e5775a5a8f07b854bcb25e27d","Cargo.toml":"802dbfef2329ab8d39ce1602e93f41403e2c297521e702e4278cf88df106f3a4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"30bed3b95612935f87903eeb37dfd09e133e4c064a66eb8b5681eaf97128c8e9","benches/defer.rs":"a4b17e7b96bef2f268c76925cfa58744b81504146426fab6f781ff0b2f666f2b","benches/flush.rs":"1d93ac40cb78bf2c55332869d4f7d1b283571bff4a0c6dbe245f02dd65e8e1d8","benches/pin.rs":"dd937ecfc8d7353995d438a7f4be383e91c002e9ee6acd4caa703bb340c97383","build.rs":"825c47ae19028dc4b28101ec71c04e7e41b8b185f6ecbeacee223596524c86ad","examples/sanitize.rs":"90f80709cd620e994f699d84ab9ce88f8552500d08bac64d60def5792460495a","examples/treiber_stack.rs":"b5d3bafa6d57af435b00d04145572a51ea871f64ff2c23ba544054c4c4145d65","src/atomic.rs":"53c29c8df6a90e2cd3d7747f7ebfe604087966c8ea6473ff2593bdd495f43951","src/collector.rs":"a783049d3cb22989ec978366fd865f31562b0acbfbd0bc9ff9b600ceaa5ffa87","src/default.rs":"6afda8fd141ad594bed62baeb73f2e97c5ef33b051969a542bb908946fe39dd1","src/deferred.rs":"883067dd60b108baacaafc9024833c0ec08c6b5649b60c030ab9a59e952ccd06","src/epoch.rs":"76dd63356d5bc52e741883d39abb636e4ccb04d20499fb2a0ce797bb81aa4e91","src/guard.rs":"486efbc061b7f402f4c8a96abd1889aff4b28eb10347b65e538c39d539d919ad","src/internal.rs":"5b09661b12d93412140b2ee186ebb304891374e4c512b5bbc97cd015ec34c493","src/lib.rs":"e55b6c418970e3bbd265a21b9beaf2d8254f7f7799e9954edf4486666869e2aa","src/sync/list.rs":"43e09e9cf79d7cf95b5110051de9dd47ab7761e3bc90ea3be16c4cbabce6d4be","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"266036a201222fe46b6674fa789b96dd8c385bf2c37135799fb9ba1fba53049a"},"package":"5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"} \ No newline at end of file
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/CHANGELOG.md b/third_party/rust/crossbeam-epoch-0.8.0/CHANGELOG.md
new file mode 100644
index 0000000000..e4aa1fdb29
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/CHANGELOG.md
@@ -0,0 +1,88 @@
+# Version 0.8.0
+
+- Bump the minimum required version to 1.28.
+- Fix breakage with nightly feature due to rust-lang/rust#65214.
+- Make `Atomic::null()` const function at 1.31+.
+- Bump `crossbeam-utils` to `0.7`.
+
+# Version 0.7.2
+
+- Add `Atomic::into_owned()`.
+- Update `memoffset` dependency.
+
+# Version 0.7.1
+
+- Add `Shared::deref_mut()`.
+- Add a Treiber stack to examples.
+
+# Version 0.7.0
+
+- Remove `Guard::clone()`.
+- Bump dependencies.
+
+# Version 0.6.1
+
+- Update `crossbeam-utils` to `0.6`.
+
+# Version 0.6.0
+
+- `defer` now requires `F: Send + 'static`.
+- Bump the minimum Rust version to 1.26.
+- Pinning while TLS is tearing down does not fail anymore.
+- Rename `Handle` to `LocalHandle`.
+- Add `defer_unchecked` and `defer_destroy`.
+- Remove `Clone` impl for `LocalHandle`.
+
+# Version 0.5.2
+
+- Update `crossbeam-utils` to `0.5`.
+
+# Version 0.5.1
+
+- Fix compatibility with the latest Rust nightly.
+
+# Version 0.5.0
+
+- Update `crossbeam-utils` to `0.4`.
+- Specify the minimum Rust version to `1.25.0`.
+
+# Version 0.4.3
+
+- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change.
+
+# Version 0.4.2
+
+- Expose the `Pointer` trait.
+- Warn missing docs and missing debug impls.
+- Update `crossbeam-utils` to `0.4`.
+
+# Version 0.4.1
+
+- Add `Debug` impls for `Collector`, `Handle`, and `Guard`.
+- Add `load_consume` to `Atomic`.
+- Rename `Collector::handle` to `Collector::register`.
+- Remove the `Send` implementation for `Handle` (this was a bug). Only
+ `Collector`s can be shared among multiple threads, while `Handle`s and
+ `Guard`s must stay within the thread in which they were created.
+
+# Version 0.4.0
+
+- Update dependencies.
+- Remove support for Rust 1.13.
+
+# Version 0.3.0
+
+- Add support for Rust 1.13.
+- Improve documentation for CAS.
+
+# Version 0.2.0
+
+- Add method `Owned::into_box`.
+- Fix a use-after-free bug in `Local::finalize`.
+- Fix an ordering bug in `Global::push_bag`.
+- Fix a bug in calculating distance between epochs.
+- Remove `impl<T> Into<Box<T>> for Owned<T>`.
+
+# Version 0.1.0
+
+- First version of the new epoch-based GC.
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/Cargo.lock b/third_party/rust/crossbeam-epoch-0.8.0/Cargo.lock
new file mode 100644
index 0000000000..9f19f55e35
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/Cargo.lock
@@ -0,0 +1,248 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "autocfg"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cloudabi"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.8.0"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "fuchsia-cprng"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.65"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "memoffset"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rand_hc"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_isaac"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_jitter"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_os"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_pcg"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_xorshift"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rdrand"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
+"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
+"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"
+"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
+"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+"checksum libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)" = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8"
+"checksum memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a85c1a8c329f11437034d7313dca647c79096523533a1c79e86f1d0f657c7cc"
+"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
+"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
+"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
+"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
+"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
+"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
+"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
+"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
+"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
+"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
+"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
+"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
+"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/Cargo.toml b/third_party/rust/crossbeam-epoch-0.8.0/Cargo.toml
new file mode 100644
index 0000000000..0cdcbbbcd0
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/Cargo.toml
@@ -0,0 +1,52 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "crossbeam-epoch"
+version = "0.8.0"
+authors = ["The Crossbeam Project Developers"]
+description = "Epoch-based garbage collection"
+homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch"
+documentation = "https://docs.rs/crossbeam-epoch"
+readme = "README.md"
+keywords = ["lock-free", "rcu", "atomic", "garbage"]
+categories = ["concurrency", "memory-management", "no-std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/crossbeam-rs/crossbeam"
+[dependencies.cfg-if]
+version = "0.1.2"
+
+[dependencies.crossbeam-utils]
+version = "0.7"
+default-features = false
+
+[dependencies.lazy_static]
+version = "1"
+optional = true
+
+[dependencies.memoffset]
+version = "0.5"
+
+[dependencies.scopeguard]
+version = "1"
+default-features = false
+[dev-dependencies.rand]
+version = "0.6"
+[build-dependencies.autocfg]
+version = "0.1.6"
+
+[features]
+alloc = ["crossbeam-utils/alloc"]
+default = ["std"]
+nightly = ["crossbeam-utils/nightly"]
+sanitize = []
+std = ["crossbeam-utils/std", "lazy_static"]
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-APACHE b/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-APACHE
new file mode 100644
index 0000000000..16fe87b06e
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-MIT b/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-MIT
new file mode 100644
index 0000000000..068d491fd5
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/LICENSE-MIT
@@ -0,0 +1,27 @@
+The MIT License (MIT)
+
+Copyright (c) 2019 The Crossbeam Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/README.md b/third_party/rust/crossbeam-epoch-0.8.0/README.md
new file mode 100644
index 0000000000..79b4c773e1
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/README.md
@@ -0,0 +1,57 @@
+# Crossbeam Epoch
+
+[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)](
+https://travis-ci.org/crossbeam-rs/crossbeam)
+[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
+https://github.com/crossbeam-rs/crossbeam-epoch)
+[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](
+https://crates.io/crates/crossbeam-epoch)
+[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](
+https://docs.rs/crossbeam-epoch)
+[![Rust 1.28+](https://img.shields.io/badge/rust-1.28+-lightgray.svg)](
+https://www.rust-lang.org)
+[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.gg/BBYwKq)
+
+This crate provides epoch-based garbage collection for building concurrent data structures.
+
+When a thread removes an object from a concurrent data structure, other threads
+may be still using pointers to it at the same time, so it cannot be destroyed
+immediately. Epoch-based GC is an efficient mechanism for deferring destruction of
+shared objects until no pointers to them can exist.
+
+Everything in this crate except the global GC can be used in `no_std` environments, provided that
+features `alloc` and `nightly` are enabled.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+crossbeam-epoch = "0.8"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate crossbeam_epoch as epoch;
+```
+
+## Compatibility
+
+The minimum supported Rust version is 1.28. Any change to this is considered a breaking change.
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+#### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/benches/defer.rs b/third_party/rust/crossbeam-epoch-0.8.0/benches/defer.rs
new file mode 100644
index 0000000000..e3693e74ad
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/benches/defer.rs
@@ -0,0 +1,71 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use epoch::Owned;
+use test::Bencher;
+use utils::thread::scope;
+
+#[bench]
+fn single_alloc_defer_free(b: &mut Bencher) {
+ b.iter(|| {
+ let guard = &epoch::pin();
+ let p = Owned::new(1).into_shared(guard);
+ unsafe {
+ guard.defer_destroy(p);
+ }
+ });
+}
+
+#[bench]
+fn single_defer(b: &mut Bencher) {
+ b.iter(|| {
+ let guard = &epoch::pin();
+ guard.defer(move || ());
+ });
+}
+
+#[bench]
+fn multi_alloc_defer_free(b: &mut Bencher) {
+ const THREADS: usize = 16;
+ const STEPS: usize = 10_000;
+
+ b.iter(|| {
+ scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ for _ in 0..STEPS {
+ let guard = &epoch::pin();
+ let p = Owned::new(1).into_shared(guard);
+ unsafe {
+ guard.defer_destroy(p);
+ }
+ }
+ });
+ }
+ })
+ .unwrap();
+ });
+}
+
+#[bench]
+fn multi_defer(b: &mut Bencher) {
+ const THREADS: usize = 16;
+ const STEPS: usize = 10_000;
+
+ b.iter(|| {
+ scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ for _ in 0..STEPS {
+ let guard = &epoch::pin();
+ guard.defer(move || ());
+ }
+ });
+ }
+ })
+ .unwrap();
+ });
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/benches/flush.rs b/third_party/rust/crossbeam-epoch-0.8.0/benches/flush.rs
new file mode 100644
index 0000000000..156d4b091c
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/benches/flush.rs
@@ -0,0 +1,53 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use std::sync::Barrier;
+
+use test::Bencher;
+use utils::thread::scope;
+
+#[bench]
+fn single_flush(b: &mut Bencher) {
+ const THREADS: usize = 16;
+
+ let start = Barrier::new(THREADS + 1);
+ let end = Barrier::new(THREADS + 1);
+
+ scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ epoch::pin();
+ start.wait();
+ end.wait();
+ });
+ }
+
+ start.wait();
+ b.iter(|| epoch::pin().flush());
+ end.wait();
+ })
+ .unwrap();
+}
+
+#[bench]
+fn multi_flush(b: &mut Bencher) {
+ const THREADS: usize = 16;
+ const STEPS: usize = 10_000;
+
+ b.iter(|| {
+ scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ for _ in 0..STEPS {
+ let guard = &epoch::pin();
+ guard.flush();
+ }
+ });
+ }
+ })
+ .unwrap();
+ });
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/benches/pin.rs b/third_party/rust/crossbeam-epoch-0.8.0/benches/pin.rs
new file mode 100644
index 0000000000..4b26f22397
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/benches/pin.rs
@@ -0,0 +1,32 @@
+#![feature(test)]
+
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+extern crate test;
+
+use test::Bencher;
+use utils::thread::scope;
+
+#[bench]
+fn single_pin(b: &mut Bencher) {
+ b.iter(|| epoch::pin());
+}
+
+#[bench]
+fn multi_pin(b: &mut Bencher) {
+ const THREADS: usize = 16;
+ const STEPS: usize = 100_000;
+
+ b.iter(|| {
+ scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ for _ in 0..STEPS {
+ epoch::pin();
+ }
+ });
+ }
+ })
+ .unwrap();
+ });
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/build.rs b/third_party/rust/crossbeam-epoch-0.8.0/build.rs
new file mode 100644
index 0000000000..d451c24b2f
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/build.rs
@@ -0,0 +1,8 @@
+extern crate autocfg;
+
+fn main() {
+ let cfg = autocfg::new();
+ if cfg.probe_rustc_version(1, 31) {
+ println!("cargo:rustc-cfg=has_min_const_fn");
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/examples/sanitize.rs b/third_party/rust/crossbeam-epoch-0.8.0/examples/sanitize.rs
new file mode 100644
index 0000000000..12f1263122
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/examples/sanitize.rs
@@ -0,0 +1,69 @@
+extern crate crossbeam_epoch as epoch;
+extern crate rand;
+
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed};
+use std::sync::Arc;
+use std::thread;
+use std::time::{Duration, Instant};
+
+use epoch::{Atomic, Collector, LocalHandle, Owned, Shared};
+use rand::Rng;
+
+fn worker(a: Arc<Atomic<AtomicUsize>>, handle: LocalHandle) -> usize {
+ let mut rng = rand::thread_rng();
+ let mut sum = 0;
+
+ if rng.gen() {
+ thread::sleep(Duration::from_millis(1));
+ }
+ let timeout = Duration::from_millis(rng.gen_range(0, 10));
+ let now = Instant::now();
+
+ while now.elapsed() < timeout {
+ for _ in 0..100 {
+ let guard = &handle.pin();
+ guard.flush();
+
+ let val = if rng.gen() {
+ let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard);
+ unsafe {
+ guard.defer_destroy(p);
+ guard.flush();
+ p.deref().load(Relaxed)
+ }
+ } else {
+ let p = a.load(Acquire, guard);
+ unsafe { p.deref().fetch_add(sum, Relaxed) }
+ };
+
+ sum = sum.wrapping_add(val);
+ }
+ }
+
+ sum
+}
+
+fn main() {
+ for _ in 0..100 {
+ let collector = Collector::new();
+ let a = Arc::new(Atomic::new(AtomicUsize::new(777)));
+
+ let threads = (0..16)
+ .map(|_| {
+ let a = a.clone();
+ let c = collector.clone();
+ thread::spawn(move || worker(a, c.register()))
+ })
+ .collect::<Vec<_>>();
+
+ for t in threads {
+ t.join().unwrap();
+ }
+
+ unsafe {
+ a.swap(Shared::null(), AcqRel, epoch::unprotected())
+ .into_owned();
+ }
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/examples/treiber_stack.rs b/third_party/rust/crossbeam-epoch-0.8.0/examples/treiber_stack.rs
new file mode 100644
index 0000000000..cc15c0dbd6
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/examples/treiber_stack.rs
@@ -0,0 +1,110 @@
+extern crate crossbeam_epoch as epoch;
+extern crate crossbeam_utils as utils;
+
+use std::mem::ManuallyDrop;
+use std::ptr;
+use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+use epoch::{Atomic, Owned};
+use utils::thread::scope;
+
+/// Treiber's lock-free stack.
+///
+/// Usable with any number of producers and consumers.
+#[derive(Debug)]
+pub struct TreiberStack<T> {
+ head: Atomic<Node<T>>,
+}
+
+#[derive(Debug)]
+struct Node<T> {
+ data: ManuallyDrop<T>,
+ next: Atomic<Node<T>>,
+}
+
+impl<T> TreiberStack<T> {
+ /// Creates a new, empty stack.
+ pub fn new() -> TreiberStack<T> {
+ TreiberStack {
+ head: Atomic::null(),
+ }
+ }
+
+ /// Pushes a value on top of the stack.
+ pub fn push(&self, t: T) {
+ let mut n = Owned::new(Node {
+ data: ManuallyDrop::new(t),
+ next: Atomic::null(),
+ });
+
+ let guard = epoch::pin();
+
+ loop {
+ let head = self.head.load(Relaxed, &guard);
+ n.next.store(head, Relaxed);
+
+ match self.head.compare_and_set(head, n, Release, &guard) {
+ Ok(_) => break,
+ Err(e) => n = e.new,
+ }
+ }
+ }
+
+ /// Attempts to pop the top element from the stack.
+ ///
+ /// Returns `None` if the stack is empty.
+ pub fn pop(&self) -> Option<T> {
+ let guard = epoch::pin();
+ loop {
+ let head = self.head.load(Acquire, &guard);
+
+ match unsafe { head.as_ref() } {
+ Some(h) => {
+ let next = h.next.load(Relaxed, &guard);
+
+ if self
+ .head
+ .compare_and_set(head, next, Release, &guard)
+ .is_ok()
+ {
+ unsafe {
+ guard.defer_destroy(head);
+ return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data)));
+ }
+ }
+ }
+ None => return None,
+ }
+ }
+ }
+
+ /// Returns `true` if the stack is empty.
+ pub fn is_empty(&self) -> bool {
+ let guard = epoch::pin();
+ self.head.load(Acquire, &guard).is_null()
+ }
+}
+
+impl<T> Drop for TreiberStack<T> {
+ fn drop(&mut self) {
+ while self.pop().is_some() {}
+ }
+}
+
+fn main() {
+ let stack = TreiberStack::new();
+
+ scope(|scope| {
+ for _ in 0..10 {
+ scope.spawn(|_| {
+ for i in 0..10_000 {
+ stack.push(i);
+ assert!(stack.pop().is_some());
+ }
+ });
+ }
+ })
+ .unwrap();
+
+ assert!(stack.pop().is_none());
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/atomic.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/atomic.rs
new file mode 100644
index 0000000000..a2044740bc
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/atomic.rs
@@ -0,0 +1,1201 @@
+use alloc::boxed::Box;
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+use crossbeam_utils::atomic::AtomicConsume;
+use guard::Guard;
+
+/// Given ordering for the success case in a compare-exchange operation, returns the strongest
+/// appropriate ordering for the failure case.
+#[inline]
+fn strongest_failure_ordering(ord: Ordering) -> Ordering {
+ use self::Ordering::*;
+ match ord {
+ Relaxed | Release => Relaxed,
+ Acquire | AcqRel => Acquire,
+ _ => SeqCst,
+ }
+}
+
+/// The error returned on failed compare-and-set operation.
+pub struct CompareAndSetError<'g, T: 'g, P: Pointer<T>> {
+ /// The value in the atomic pointer at the time of the failed operation.
+ pub current: Shared<'g, T>,
+
+ /// The new value, which the operation failed to store.
+ pub new: P,
+}
+
+impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("CompareAndSetError")
+ .field("current", &self.current)
+ .field("new", &self.new)
+ .finish()
+ }
+}
+
+/// Memory orderings for compare-and-set operations.
+///
+/// A compare-and-set operation can have different memory orderings depending on whether it
+/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
+///
+/// The two ways of specifying orderings for compare-and-set are:
+///
+/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
+/// ordering is chosen.
+/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
+/// for the failure case.
+pub trait CompareAndSetOrdering {
+ /// The ordering of the operation when it succeeds.
+ fn success(&self) -> Ordering;
+
+ /// The ordering of the operation when it fails.
+ ///
+ /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
+ /// the success ordering.
+ fn failure(&self) -> Ordering;
+}
+
+impl CompareAndSetOrdering for Ordering {
+ #[inline]
+ fn success(&self) -> Ordering {
+ *self
+ }
+
+ #[inline]
+ fn failure(&self) -> Ordering {
+ strongest_failure_ordering(*self)
+ }
+}
+
+impl CompareAndSetOrdering for (Ordering, Ordering) {
+ #[inline]
+ fn success(&self) -> Ordering {
+ self.0
+ }
+
+ #[inline]
+ fn failure(&self) -> Ordering {
+ self.1
+ }
+}
+
+/// Panics if the pointer is not properly unaligned.
+#[inline]
+fn ensure_aligned<T>(raw: *const T) {
+ assert_eq!(raw as usize & low_bits::<T>(), 0, "unaligned pointer");
+}
+
+/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
+#[inline]
+fn low_bits<T>() -> usize {
+ (1 << mem::align_of::<T>().trailing_zeros()) - 1
+}
+
+/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
+///
+/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
+#[inline]
+fn data_with_tag<T>(data: usize, tag: usize) -> usize {
+ (data & !low_bits::<T>()) | (tag & low_bits::<T>())
+}
+
+/// Decomposes a tagged pointer `data` into the pointer and the tag.
+#[inline]
+fn decompose_data<T>(data: usize) -> (*mut T, usize) {
+ let raw = (data & !low_bits::<T>()) as *mut T;
+ let tag = data & low_bits::<T>();
+ (raw, tag)
+}
+
+/// An atomic pointer that can be safely shared between threads.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address. More precisely, a tag should be less than `(1 <<
+/// mem::align_of::<T>().trailing_zeros())`.
+///
+/// Any method that loads the pointer must be passed a reference to a [`Guard`].
+///
+/// [`Guard`]: struct.Guard.html
+pub struct Atomic<T> {
+ data: AtomicUsize,
+ _marker: PhantomData<*mut T>,
+}
+
+unsafe impl<T: Send + Sync> Send for Atomic<T> {}
+unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
+
+impl<T> Atomic<T> {
+ /// Returns a new atomic pointer pointing to the tagged pointer `data`.
+ fn from_usize(data: usize) -> Self {
+ Self {
+ data: AtomicUsize::new(data),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Returns a new null atomic pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Atomic;
+ ///
+ /// let a = Atomic::<i32>::null();
+ /// ```
+ #[cfg(not(has_min_const_fn))]
+ pub fn null() -> Atomic<T> {
+ Self {
+ data: AtomicUsize::new(0),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Returns a new null atomic pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Atomic;
+ ///
+ /// let a = Atomic::<i32>::null();
+ /// ```
+ #[cfg(has_min_const_fn)]
+ pub const fn null() -> Atomic<T> {
+ Self {
+ data: AtomicUsize::new(0),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Atomic;
+ ///
+ /// let a = Atomic::new(1234);
+ /// ```
+ pub fn new(value: T) -> Atomic<T> {
+ Self::from(Owned::new(value))
+ }
+
+ /// Loads a `Shared` from the atomic pointer.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = a.load(SeqCst, guard);
+ /// ```
+ pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.load(ord)) }
+ }
+
+ /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
+ ///
+ /// This is similar to the "acquire" ordering, except that an ordering is
+ /// only guaranteed with operations that "depend on" the result of the load.
+ /// However consume loads are usually much faster than acquire loads on
+ /// architectures with a weak memory model since they don't require memory
+ /// fence instructions.
+ ///
+ /// The exact definition of "depend on" is a bit vague, but it works as you
+ /// would expect in practice since a lot of software, especially the Linux
+ /// kernel, rely on this behavior.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = a.load_consume(guard);
+ /// ```
+ pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.load_consume()) }
+ }
+
+ /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// a.store(Shared::null(), SeqCst);
+ /// a.store(Owned::new(1234), SeqCst);
+ /// ```
+ pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
+ self.data.store(new.into_usize(), ord);
+ }
+
+ /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
+ /// `Shared`.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = a.swap(Shared::null(), SeqCst, guard);
+ /// ```
+ pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
+ }
+
+ /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+ /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+ /// same object, but with different tags, will not be considered equal.
+ ///
+ /// The return value is a result indicating whether the new pointer was written. On success the
+ /// pointer that was written is returned. On failure the actual current value and `new` are
+ /// returned.
+ ///
+ /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
+ /// ordering of this operation.
+ ///
+ /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ ///
+ /// let guard = &epoch::pin();
+ /// let mut curr = a.load(SeqCst, guard);
+ /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
+ /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
+ /// ```
+ pub fn compare_and_set<'g, O, P>(
+ &self,
+ current: Shared<T>,
+ new: P,
+ ord: O,
+ _: &'g Guard,
+ ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
+ where
+ O: CompareAndSetOrdering,
+ P: Pointer<T>,
+ {
+ let new = new.into_usize();
+ self.data
+ .compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
+ .map(|_| unsafe { Shared::from_usize(new) })
+ .map_err(|current| unsafe {
+ CompareAndSetError {
+ current: Shared::from_usize(current),
+ new: P::from_usize(new),
+ }
+ })
+ }
+
+ /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+ /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+ /// same object, but with different tags, will not be considered equal.
+ ///
+ /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
+ /// succeeds, which can result in more efficient code on some platforms. The return value is a
+ /// result indicating whether the new pointer was written. On success the pointer that was
+ /// written is returned. On failure the actual current value and `new` are returned.
+ ///
+ /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
+ /// ordering of this operation.
+ ///
+ /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
+ /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ ///
+ /// let mut new = Owned::new(5678);
+ /// let mut ptr = a.load(SeqCst, guard);
+ /// loop {
+ /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
+ /// Ok(p) => {
+ /// ptr = p;
+ /// break;
+ /// }
+ /// Err(err) => {
+ /// ptr = err.current;
+ /// new = err.new;
+ /// }
+ /// }
+ /// }
+ ///
+ /// let mut curr = a.load(SeqCst, guard);
+ /// loop {
+ /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
+ /// Ok(_) => break,
+ /// Err(err) => curr = err.current,
+ /// }
+ /// }
+ /// ```
+ pub fn compare_and_set_weak<'g, O, P>(
+ &self,
+ current: Shared<T>,
+ new: P,
+ ord: O,
+ _: &'g Guard,
+ ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
+ where
+ O: CompareAndSetOrdering,
+ P: Pointer<T>,
+ {
+ let new = new.into_usize();
+ self.data
+ .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
+ .map(|_| unsafe { Shared::from_usize(new) })
+ .map_err(|current| unsafe {
+ CompareAndSetError {
+ current: Shared::from_usize(current),
+ new: P::from_usize(new),
+ }
+ })
+ }
+
+ /// Bitwise "and" with the current tag.
+ ///
+ /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
+ /// new tag to the result. Returns the previous pointer.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
+ /// let guard = &epoch::pin();
+ /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
+ /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
+ /// ```
+ pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
+ }
+
+ /// Bitwise "or" with the current tag.
+ ///
+ /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
+ /// new tag to the result. Returns the previous pointer.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
+ /// let guard = &epoch::pin();
+ /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
+ /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
+ /// ```
+ pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
+ }
+
+ /// Bitwise "xor" with the current tag.
+ ///
+ /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
+ /// new tag to the result. Returns the previous pointer.
+ ///
+ /// This method takes an [`Ordering`] argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
+ /// let guard = &epoch::pin();
+ /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
+ /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
+ /// ```
+ pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
+ }
+
+ /// Takes ownership of the pointee.
+ ///
+ /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
+ /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
+ /// destructors of data structures.
+ ///
+ /// # Panics
+ ///
+ /// Panics if this pointer is null, but only in debug mode.
+ ///
+ /// # Safety
+ ///
+ /// This method may be called only if the pointer is valid and nobody else is holding a
+ /// reference to the same object.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use std::mem;
+ /// # use crossbeam_epoch::Atomic;
+ /// struct DataStructure {
+ /// ptr: Atomic<usize>,
+ /// }
+ ///
+ /// impl Drop for DataStructure {
+ /// fn drop(&mut self) {
+ /// // By now the DataStructure lives only in our thread and we are sure we don't hold
+ /// // any Shared or & to it ourselves.
+ /// unsafe {
+ /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub unsafe fn into_owned(self) -> Owned<T> {
+ Owned::from_usize(self.data.into_inner())
+ }
+}
+
+impl<T> fmt::Debug for Atomic<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let data = self.data.load(Ordering::SeqCst);
+ let (raw, tag) = decompose_data::<T>(data);
+
+ f.debug_struct("Atomic")
+ .field("raw", &raw)
+ .field("tag", &tag)
+ .finish()
+ }
+}
+
+impl<T> fmt::Pointer for Atomic<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let data = self.data.load(Ordering::SeqCst);
+ let (raw, _) = decompose_data::<T>(data);
+ fmt::Pointer::fmt(&raw, f)
+ }
+}
+
+impl<T> Clone for Atomic<T> {
+ /// Returns a copy of the atomic value.
+ ///
+ /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
+ /// atomics or fences.
+ fn clone(&self) -> Self {
+ let data = self.data.load(Ordering::Relaxed);
+ Atomic::from_usize(data)
+ }
+}
+
+impl<T> Default for Atomic<T> {
+ fn default() -> Self {
+ Atomic::null()
+ }
+}
+
+impl<T> From<Owned<T>> for Atomic<T> {
+ /// Returns a new atomic pointer pointing to `owned`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{Atomic, Owned};
+ ///
+ /// let a = Atomic::<i32>::from(Owned::new(1234));
+ /// ```
+ fn from(owned: Owned<T>) -> Self {
+ let data = owned.data;
+ mem::forget(owned);
+ Self::from_usize(data)
+ }
+}
+
+impl<T> From<Box<T>> for Atomic<T> {
+ fn from(b: Box<T>) -> Self {
+ Self::from(Owned::from(b))
+ }
+}
+
+impl<T> From<T> for Atomic<T> {
+ fn from(t: T) -> Self {
+ Self::new(t)
+ }
+}
+
+impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
+ /// Returns a new atomic pointer pointing to `ptr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{Atomic, Shared};
+ ///
+ /// let a = Atomic::<i32>::from(Shared::<i32>::null());
+ /// ```
+ fn from(ptr: Shared<'g, T>) -> Self {
+ Self::from_usize(ptr.data)
+ }
+}
+
+impl<T> From<*const T> for Atomic<T> {
+ /// Returns a new atomic pointer pointing to `raw`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ /// use crossbeam_epoch::Atomic;
+ ///
+ /// let a = Atomic::<i32>::from(ptr::null::<i32>());
+ /// ```
+ fn from(raw: *const T) -> Self {
+ Self::from_usize(raw as usize)
+ }
+}
+
+/// A trait for either `Owned` or `Shared` pointers.
+pub trait Pointer<T> {
+ /// Returns the machine representation of the pointer.
+ fn into_usize(self) -> usize;
+
+ /// Returns a new pointer pointing to the tagged pointer `data`.
+ unsafe fn from_usize(data: usize) -> Self;
+}
+
+/// An owned heap-allocated object.
+///
+/// This type is very similar to `Box<T>`.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address.
+pub struct Owned<T> {
+ data: usize,
+ _marker: PhantomData<Box<T>>,
+}
+
+impl<T> Pointer<T> for Owned<T> {
+ #[inline]
+ fn into_usize(self) -> usize {
+ let data = self.data;
+ mem::forget(self);
+ data
+ }
+
+ /// Returns a new pointer pointing to the tagged pointer `data`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the data is zero in debug mode.
+ #[inline]
+ unsafe fn from_usize(data: usize) -> Self {
+ debug_assert!(data != 0, "converting zero into `Owned`");
+ Owned {
+ data: data,
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl<T> Owned<T> {
+ /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Owned;
+ ///
+ /// let o = Owned::new(1234);
+ /// ```
+ pub fn new(value: T) -> Owned<T> {
+ Self::from(Box::new(value))
+ }
+
+ /// Returns a new owned pointer pointing to `raw`.
+ ///
+ /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
+ /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
+ /// the same raw pointer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `raw` is not properly aligned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Owned;
+ ///
+ /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
+ /// ```
+ pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
+ ensure_aligned(raw);
+ Self::from_usize(raw as usize)
+ }
+
+ /// Converts the owned pointer into a [`Shared`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Owned};
+ ///
+ /// let o = Owned::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = o.into_shared(guard);
+ /// ```
+ ///
+ /// [`Shared`]: struct.Shared.html
+ pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
+ unsafe { Shared::from_usize(self.into_usize()) }
+ }
+
+ /// Converts the owned pointer into a `Box`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Owned};
+ ///
+ /// let o = Owned::new(1234);
+ /// let b: Box<i32> = o.into_box();
+ /// assert_eq!(*b, 1234);
+ /// ```
+ pub fn into_box(self) -> Box<T> {
+ let (raw, _) = decompose_data::<T>(self.data);
+ mem::forget(self);
+ unsafe { Box::from_raw(raw) }
+ }
+
+ /// Returns the tag stored within the pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Owned;
+ ///
+ /// assert_eq!(Owned::new(1234).tag(), 0);
+ /// ```
+ pub fn tag(&self) -> usize {
+ let (_, tag) = decompose_data::<T>(self.data);
+ tag
+ }
+
+ /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
+ /// unused bits of the pointer to `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Owned;
+ ///
+ /// let o = Owned::new(0u64);
+ /// assert_eq!(o.tag(), 0);
+ /// let o = o.with_tag(2);
+ /// assert_eq!(o.tag(), 2);
+ /// ```
+ pub fn with_tag(self, tag: usize) -> Owned<T> {
+ let data = self.into_usize();
+ unsafe { Self::from_usize(data_with_tag::<T>(data, tag)) }
+ }
+}
+
+impl<T> Drop for Owned<T> {
+ fn drop(&mut self) {
+ let (raw, _) = decompose_data::<T>(self.data);
+ unsafe {
+ drop(Box::from_raw(raw));
+ }
+ }
+}
+
+impl<T> fmt::Debug for Owned<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (raw, tag) = decompose_data::<T>(self.data);
+
+ f.debug_struct("Owned")
+ .field("raw", &raw)
+ .field("tag", &tag)
+ .finish()
+ }
+}
+
+impl<T: Clone> Clone for Owned<T> {
+ fn clone(&self) -> Self {
+ Owned::new((**self).clone()).with_tag(self.tag())
+ }
+}
+
+impl<T> Deref for Owned<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ let (raw, _) = decompose_data::<T>(self.data);
+ unsafe { &*raw }
+ }
+}
+
+impl<T> DerefMut for Owned<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ let (raw, _) = decompose_data::<T>(self.data);
+ unsafe { &mut *raw }
+ }
+}
+
+impl<T> From<T> for Owned<T> {
+ fn from(t: T) -> Self {
+ Owned::new(t)
+ }
+}
+
+impl<T> From<Box<T>> for Owned<T> {
+ /// Returns a new owned pointer pointing to `b`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the pointer (the `Box`) is not properly aligned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Owned;
+ ///
+ /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
+ /// ```
+ fn from(b: Box<T>) -> Self {
+ unsafe { Self::from_raw(Box::into_raw(b)) }
+ }
+}
+
+impl<T> Borrow<T> for Owned<T> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+impl<T> BorrowMut<T> for Owned<T> {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+impl<T> AsRef<T> for Owned<T> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+impl<T> AsMut<T> for Owned<T> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+/// A pointer to an object protected by the epoch GC.
+///
+/// The pointer is valid for use only during the lifetime `'g`.
+///
+/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
+/// least significant bits of the address.
+pub struct Shared<'g, T: 'g> {
+ data: usize,
+ _marker: PhantomData<(&'g (), *const T)>,
+}
+
+impl<'g, T> Clone for Shared<'g, T> {
+ fn clone(&self) -> Self {
+ Shared {
+ data: self.data,
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl<'g, T> Copy for Shared<'g, T> {}
+
+impl<'g, T> Pointer<T> for Shared<'g, T> {
+ #[inline]
+ fn into_usize(self) -> usize {
+ self.data
+ }
+
+ #[inline]
+ unsafe fn from_usize(data: usize) -> Self {
+ Shared {
+ data: data,
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl<'g, T> Shared<'g, T> {
+ /// Returns a new null pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Shared;
+ ///
+ /// let p = Shared::<i32>::null();
+ /// assert!(p.is_null());
+ /// ```
+ pub fn null() -> Shared<'g, T> {
+ Shared {
+ data: 0,
+ _marker: PhantomData,
+ }
+ }
+
+ /// Returns `true` if the pointer is null.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::null();
+ /// let guard = &epoch::pin();
+ /// assert!(a.load(SeqCst, guard).is_null());
+ /// a.store(Owned::new(1234), SeqCst);
+ /// assert!(!a.load(SeqCst, guard).is_null());
+ /// ```
+ pub fn is_null(&self) -> bool {
+ self.as_raw().is_null()
+ }
+
+ /// Converts the pointer to a raw pointer (without the tag).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let o = Owned::new(1234);
+ /// let raw = &*o as *const _;
+ /// let a = Atomic::from(o);
+ ///
+ /// let guard = &epoch::pin();
+ /// let p = a.load(SeqCst, guard);
+ /// assert_eq!(p.as_raw(), raw);
+ /// ```
+ pub fn as_raw(&self) -> *const T {
+ let (raw, _) = decompose_data::<T>(self.data);
+ raw
+ }
+
+ /// Dereferences the pointer.
+ ///
+ /// Returns a reference to the pointee that is valid during the lifetime `'g`.
+ ///
+ /// # Safety
+ ///
+ /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
+ ///
+ /// Another concern is the possiblity of data races due to lack of proper synchronization.
+ /// For example, consider the following scenario:
+ ///
+ /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
+ /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
+ ///
+ /// The problem is that relaxed orderings don't synchronize initialization of the object with
+ /// the read from the second thread. This is a data race. A possible solution would be to use
+ /// `Release` and `Acquire` orderings.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = a.load(SeqCst, guard);
+ /// unsafe {
+ /// assert_eq!(p.deref(), &1234);
+ /// }
+ /// ```
+ pub unsafe fn deref(&self) -> &'g T {
+ &*self.as_raw()
+ }
+
+ /// Dereferences the pointer.
+ ///
+ /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
+ ///
+ /// # Safety
+ ///
+ /// * There is no guarantee that there are no more threads attempting to read/write from/to the
+ /// actual object at the same time.
+ ///
+ /// The user must know that there are no concurrent accesses towards the object itself.
+ ///
+ /// * Other than the above, all safety concerns of `deref()` applies here.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(vec![1, 2, 3, 4]);
+ /// let guard = &epoch::pin();
+ ///
+ /// let mut p = a.load(SeqCst, guard);
+ /// unsafe {
+ /// assert!(!p.is_null());
+ /// let b = p.deref_mut();
+ /// assert_eq!(b, &vec![1, 2, 3, 4]);
+ /// b.push(5);
+ /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
+ /// }
+ ///
+ /// let p = a.load(SeqCst, guard);
+ /// unsafe {
+ /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
+ /// }
+ /// ```
+ pub unsafe fn deref_mut(&mut self) -> &'g mut T {
+ &mut *(self.as_raw() as *mut T)
+ }
+
+ /// Converts the pointer to a reference.
+ ///
+ /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
+ ///
+ /// # Safety
+ ///
+ /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
+ ///
+ /// Another concern is the possiblity of data races due to lack of proper synchronization.
+ /// For example, consider the following scenario:
+ ///
+ /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
+ /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
+ ///
+ /// The problem is that relaxed orderings don't synchronize initialization of the object with
+ /// the read from the second thread. This is a data race. A possible solution would be to use
+ /// `Release` and `Acquire` orderings.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// let guard = &epoch::pin();
+ /// let p = a.load(SeqCst, guard);
+ /// unsafe {
+ /// assert_eq!(p.as_ref(), Some(&1234));
+ /// }
+ /// ```
+ pub unsafe fn as_ref(&self) -> Option<&'g T> {
+ self.as_raw().as_ref()
+ }
+
+ /// Takes ownership of the pointee.
+ ///
+ /// # Panics
+ ///
+ /// Panics if this pointer is null, but only in debug mode.
+ ///
+ /// # Safety
+ ///
+ /// This method may be called only if the pointer is valid and nobody else is holding a
+ /// reference to the same object.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// unsafe {
+ /// let guard = &epoch::unprotected();
+ /// let p = a.load(SeqCst, guard);
+ /// drop(p.into_owned());
+ /// }
+ /// ```
+ pub unsafe fn into_owned(self) -> Owned<T> {
+ debug_assert!(
+ self.as_raw() != ptr::null(),
+ "converting a null `Shared` into `Owned`"
+ );
+ Owned::from_usize(self.data)
+ }
+
+ /// Returns the tag stored within the pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
+ /// let guard = &epoch::pin();
+ /// let p = a.load(SeqCst, guard);
+ /// assert_eq!(p.tag(), 2);
+ /// ```
+ pub fn tag(&self) -> usize {
+ let (_, tag) = decompose_data::<T>(self.data);
+ tag
+ }
+
+ /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
+ /// unused bits of the pointer to `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(0u64);
+ /// let guard = &epoch::pin();
+ /// let p1 = a.load(SeqCst, guard);
+ /// let p2 = p1.with_tag(2);
+ ///
+ /// assert_eq!(p1.tag(), 0);
+ /// assert_eq!(p2.tag(), 2);
+ /// assert_eq!(p1.as_raw(), p2.as_raw());
+ /// ```
+ pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
+ unsafe { Self::from_usize(data_with_tag::<T>(self.data, tag)) }
+ }
+}
+
+impl<'g, T> From<*const T> for Shared<'g, T> {
+ /// Returns a new pointer pointing to `raw`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `raw` is not properly aligned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::Shared;
+ ///
+ /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
+ /// assert!(!p.is_null());
+ /// ```
+ fn from(raw: *const T) -> Self {
+ ensure_aligned(raw);
+ unsafe { Self::from_usize(raw as usize) }
+ }
+}
+
+impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.data == other.data
+ }
+}
+
+impl<'g, T> Eq for Shared<'g, T> {}
+
+impl<'g, T> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ self.data.partial_cmp(&other.data)
+ }
+}
+
+impl<'g, T> Ord for Shared<'g, T> {
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.data.cmp(&other.data)
+ }
+}
+
+impl<'g, T> fmt::Debug for Shared<'g, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let (raw, tag) = decompose_data::<T>(self.data);
+
+ f.debug_struct("Shared")
+ .field("raw", &raw)
+ .field("tag", &tag)
+ .finish()
+ }
+}
+
+impl<'g, T> fmt::Pointer for Shared<'g, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_raw(), f)
+ }
+}
+
+impl<'g, T> Default for Shared<'g, T> {
+ fn default() -> Self {
+ Shared::null()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Shared;
+
+ #[test]
+ fn valid_tag_i8() {
+ Shared::<i8>::null().with_tag(0);
+ }
+
+ #[test]
+ fn valid_tag_i64() {
+ Shared::<i64>::null().with_tag(7);
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/collector.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/collector.rs
new file mode 100644
index 0000000000..1817d9adaf
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/collector.rs
@@ -0,0 +1,434 @@
+/// Epoch-based garbage collector.
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_epoch::Collector;
+///
+/// let collector = Collector::new();
+///
+/// let handle = collector.register();
+/// drop(collector); // `handle` still works after dropping `collector`
+///
+/// handle.pin().flush();
+/// ```
+use alloc::sync::Arc;
+use core::fmt;
+
+use guard::Guard;
+use internal::{Global, Local};
+
+/// An epoch-based garbage collector.
+pub struct Collector {
+ pub(crate) global: Arc<Global>,
+}
+
+unsafe impl Send for Collector {}
+unsafe impl Sync for Collector {}
+
+impl Collector {
+ /// Creates a new collector.
+ pub fn new() -> Self {
+ Collector {
+ global: Arc::new(Global::new()),
+ }
+ }
+
+ /// Registers a new handle for the collector.
+ pub fn register(&self) -> LocalHandle {
+ Local::register(self)
+ }
+}
+
+impl Clone for Collector {
+ /// Creates another reference to the same garbage collector.
+ fn clone(&self) -> Self {
+ Collector {
+ global: self.global.clone(),
+ }
+ }
+}
+
+impl fmt::Debug for Collector {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("Collector { .. }")
+ }
+}
+
+impl PartialEq for Collector {
+ /// Checks if both handles point to the same collector.
+ fn eq(&self, rhs: &Collector) -> bool {
+ Arc::ptr_eq(&self.global, &rhs.global)
+ }
+}
+impl Eq for Collector {}
+
+/// A handle to a garbage collector.
+pub struct LocalHandle {
+ pub(crate) local: *const Local,
+}
+
+impl LocalHandle {
+ /// Pins the handle.
+ #[inline]
+ pub fn pin(&self) -> Guard {
+ unsafe { (*self.local).pin() }
+ }
+
+ /// Returns `true` if the handle is pinned.
+ #[inline]
+ pub fn is_pinned(&self) -> bool {
+ unsafe { (*self.local).is_pinned() }
+ }
+
+ /// Returns the `Collector` associated with this handle.
+ #[inline]
+ pub fn collector(&self) -> &Collector {
+ unsafe { (*self.local).collector() }
+ }
+}
+
+impl Drop for LocalHandle {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ Local::release_handle(&*self.local);
+ }
+ }
+}
+
+impl fmt::Debug for LocalHandle {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("LocalHandle { .. }")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::mem;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use crossbeam_utils::thread;
+
+ use {Collector, Owned};
+
+ const NUM_THREADS: usize = 8;
+
+ #[test]
+ fn pin_reentrant() {
+ let collector = Collector::new();
+ let handle = collector.register();
+ drop(collector);
+
+ assert!(!handle.is_pinned());
+ {
+ let _guard = &handle.pin();
+ assert!(handle.is_pinned());
+ {
+ let _guard = &handle.pin();
+ assert!(handle.is_pinned());
+ }
+ assert!(handle.is_pinned());
+ }
+ assert!(!handle.is_pinned());
+ }
+
+ #[test]
+ fn flush_local_bag() {
+ let collector = Collector::new();
+ let handle = collector.register();
+ drop(collector);
+
+ for _ in 0..100 {
+ let guard = &handle.pin();
+ unsafe {
+ let a = Owned::new(7).into_shared(guard);
+ guard.defer_destroy(a);
+
+ assert!(!(*(*guard.local).bag.get()).is_empty());
+
+ while !(*(*guard.local).bag.get()).is_empty() {
+ guard.flush();
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn garbage_buffering() {
+ let collector = Collector::new();
+ let handle = collector.register();
+ drop(collector);
+
+ let guard = &handle.pin();
+ unsafe {
+ for _ in 0..10 {
+ let a = Owned::new(7).into_shared(guard);
+ guard.defer_destroy(a);
+ }
+ assert!(!(*(*guard.local).bag.get()).is_empty());
+ }
+ }
+
+ #[test]
+ fn pin_holds_advance() {
+ let collector = Collector::new();
+
+ thread::scope(|scope| {
+ for _ in 0..NUM_THREADS {
+ scope.spawn(|_| {
+ let handle = collector.register();
+ for _ in 0..500_000 {
+ let guard = &handle.pin();
+
+ let before = collector.global.epoch.load(Ordering::Relaxed);
+ collector.global.collect(guard);
+ let after = collector.global.epoch.load(Ordering::Relaxed);
+
+ assert!(after.wrapping_sub(before) <= 2);
+ }
+ });
+ }
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn incremental() {
+ const COUNT: usize = 100_000;
+ static DESTROYS: AtomicUsize = AtomicUsize::new(0);
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ unsafe {
+ let guard = &handle.pin();
+ for _ in 0..COUNT {
+ let a = Owned::new(7i32).into_shared(guard);
+ guard.defer_unchecked(move || {
+ drop(a.into_owned());
+ DESTROYS.fetch_add(1, Ordering::Relaxed);
+ });
+ }
+ guard.flush();
+ }
+
+ let mut last = 0;
+
+ while last < COUNT {
+ let curr = DESTROYS.load(Ordering::Relaxed);
+ assert!(curr - last <= 1024);
+ last = curr;
+
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert!(DESTROYS.load(Ordering::Relaxed) == 100_000);
+ }
+
+ #[test]
+ fn buffering() {
+ const COUNT: usize = 10;
+ static DESTROYS: AtomicUsize = AtomicUsize::new(0);
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ unsafe {
+ let guard = &handle.pin();
+ for _ in 0..COUNT {
+ let a = Owned::new(7i32).into_shared(guard);
+ guard.defer_unchecked(move || {
+ drop(a.into_owned());
+ DESTROYS.fetch_add(1, Ordering::Relaxed);
+ });
+ }
+ }
+
+ for _ in 0..100_000 {
+ collector.global.collect(&handle.pin());
+ }
+ assert!(DESTROYS.load(Ordering::Relaxed) < COUNT);
+
+ handle.pin().flush();
+
+ while DESTROYS.load(Ordering::Relaxed) < COUNT {
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+ }
+
+ #[test]
+ fn count_drops() {
+ const COUNT: usize = 100_000;
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct Elem(i32);
+
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::Relaxed);
+ }
+ }
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ unsafe {
+ let guard = &handle.pin();
+
+ for _ in 0..COUNT {
+ let a = Owned::new(Elem(7i32)).into_shared(guard);
+ guard.defer_destroy(a);
+ }
+ guard.flush();
+ }
+
+ while DROPS.load(Ordering::Relaxed) < COUNT {
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
+ }
+
+ #[test]
+ fn count_destroy() {
+ const COUNT: usize = 100_000;
+ static DESTROYS: AtomicUsize = AtomicUsize::new(0);
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ unsafe {
+ let guard = &handle.pin();
+
+ for _ in 0..COUNT {
+ let a = Owned::new(7i32).into_shared(guard);
+ guard.defer_unchecked(move || {
+ drop(a.into_owned());
+ DESTROYS.fetch_add(1, Ordering::Relaxed);
+ });
+ }
+ guard.flush();
+ }
+
+ while DESTROYS.load(Ordering::Relaxed) < COUNT {
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+ }
+
+ #[test]
+ fn drop_array() {
+ const COUNT: usize = 700;
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct Elem(i32);
+
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::Relaxed);
+ }
+ }
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ let mut guard = handle.pin();
+
+ let mut v = Vec::with_capacity(COUNT);
+ for i in 0..COUNT {
+ v.push(Elem(i as i32));
+ }
+
+ {
+ let a = Owned::new(v).into_shared(&guard);
+ unsafe {
+ guard.defer_destroy(a);
+ }
+ guard.flush();
+ }
+
+ while DROPS.load(Ordering::Relaxed) < COUNT {
+ guard.repin();
+ collector.global.collect(&guard);
+ }
+ assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
+ }
+
+ #[test]
+ fn destroy_array() {
+ const COUNT: usize = 100_000;
+ static DESTROYS: AtomicUsize = AtomicUsize::new(0);
+
+ let collector = Collector::new();
+ let handle = collector.register();
+
+ unsafe {
+ let guard = &handle.pin();
+
+ let mut v = Vec::with_capacity(COUNT);
+ for i in 0..COUNT {
+ v.push(i as i32);
+ }
+
+ let ptr = v.as_mut_ptr() as usize;
+ let len = v.len();
+ guard.defer_unchecked(move || {
+ drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len));
+ DESTROYS.fetch_add(len, Ordering::Relaxed);
+ });
+ guard.flush();
+
+ mem::forget(v);
+ }
+
+ while DESTROYS.load(Ordering::Relaxed) < COUNT {
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
+ }
+
+ #[test]
+ fn stress() {
+ const THREADS: usize = 8;
+ const COUNT: usize = 100_000;
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct Elem(i32);
+
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::Relaxed);
+ }
+ }
+
+ let collector = Collector::new();
+
+ thread::scope(|scope| {
+ for _ in 0..THREADS {
+ scope.spawn(|_| {
+ let handle = collector.register();
+ for _ in 0..COUNT {
+ let guard = &handle.pin();
+ unsafe {
+ let a = Owned::new(Elem(7i32)).into_shared(guard);
+ guard.defer_destroy(a);
+ }
+ }
+ });
+ }
+ })
+ .unwrap();
+
+ let handle = collector.register();
+ while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {
+ let guard = &handle.pin();
+ collector.global.collect(guard);
+ }
+ assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS);
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/default.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/default.rs
new file mode 100644
index 0000000000..734a62428f
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/default.rs
@@ -0,0 +1,75 @@
+//! The default garbage collector.
+//!
+//! For each thread, a participant is lazily initialized on its first use, when the current thread
+//! is registered in the default collector. If initialized, the thread's participant will get
+//! destructed on thread exit, which in turn unregisters the thread.
+
+use collector::{Collector, LocalHandle};
+use guard::Guard;
+
+lazy_static! {
+ /// The global data for the default garbage collector.
+ static ref COLLECTOR: Collector = Collector::new();
+}
+
+thread_local! {
+ /// The per-thread participant for the default garbage collector.
+ static HANDLE: LocalHandle = COLLECTOR.register();
+}
+
+/// Pins the current thread.
+#[inline]
+pub fn pin() -> Guard {
+ with_handle(|handle| handle.pin())
+}
+
+/// Returns `true` if the current thread is pinned.
+#[inline]
+pub fn is_pinned() -> bool {
+ with_handle(|handle| handle.is_pinned())
+}
+
+/// Returns the default global collector.
+pub fn default_collector() -> &'static Collector {
+ &COLLECTOR
+}
+
+#[inline]
+fn with_handle<F, R>(mut f: F) -> R
+where
+ F: FnMut(&LocalHandle) -> R,
+{
+ HANDLE
+ .try_with(|h| f(h))
+ .unwrap_or_else(|_| f(&COLLECTOR.register()))
+}
+
+#[cfg(test)]
+mod tests {
+ use crossbeam_utils::thread;
+
+ #[test]
+ fn pin_while_exiting() {
+ struct Foo;
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ // Pin after `HANDLE` has been dropped. This must not panic.
+ super::pin();
+ }
+ }
+
+ thread_local! {
+ static FOO: Foo = Foo;
+ }
+
+ thread::scope(|scope| {
+ scope.spawn(|_| {
+ // Initialize `FOO` and then `HANDLE`.
+ FOO.with(|_| ());
+ super::pin();
+ // At thread exit, `HANDLE` gets dropped first and `FOO` second.
+ });
+ }).unwrap();
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/deferred.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/deferred.rs
new file mode 100644
index 0000000000..3d22ee633e
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/deferred.rs
@@ -0,0 +1,140 @@
+use alloc::boxed::Box;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr;
+
+/// Number of words a piece of `Data` can hold.
+///
+/// Three words should be enough for the majority of cases. For example, you can fit inside it the
+/// function pointer together with a fat pointer representing an object that needs to be destroyed.
+const DATA_WORDS: usize = 3;
+
+/// Some space to keep a `FnOnce()` object on the stack.
+type Data = [usize; DATA_WORDS];
+
+/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap.
+///
+/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure.
+pub struct Deferred {
+ call: unsafe fn(*mut u8),
+ data: Data,
+ _marker: PhantomData<*mut ()>, // !Send + !Sync
+}
+
+impl fmt::Debug for Deferred {
+ fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ f.pad("Deferred { .. }")
+ }
+}
+
+impl Deferred {
+ /// Constructs a new `Deferred` from a `FnOnce()`.
+ pub fn new<F: FnOnce()>(f: F) -> Self {
+ let size = mem::size_of::<F>();
+ let align = mem::align_of::<F>();
+
+ unsafe {
+ if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() {
+ // TODO(taiki-e): when the minimum supported Rust version is bumped to 1.36+,
+ // replace this with `mem::MaybeUninit`.
+ #[allow(deprecated)]
+ let mut data: Data = mem::uninitialized();
+ ptr::write(&mut data as *mut Data as *mut F, f);
+
+ unsafe fn call<F: FnOnce()>(raw: *mut u8) {
+ let f: F = ptr::read(raw as *mut F);
+ f();
+ }
+
+ Deferred {
+ call: call::<F>,
+ data,
+ _marker: PhantomData,
+ }
+ } else {
+ let b: Box<F> = Box::new(f);
+ // TODO(taiki-e): when the minimum supported Rust version is bumped to 1.36+,
+ // replace this with `mem::MaybeUninit`.
+ #[allow(deprecated)]
+ let mut data: Data = mem::uninitialized();
+ ptr::write(&mut data as *mut Data as *mut Box<F>, b);
+
+ unsafe fn call<F: FnOnce()>(raw: *mut u8) {
+ let b: Box<F> = ptr::read(raw as *mut Box<F>);
+ (*b)();
+ }
+
+ Deferred {
+ call: call::<F>,
+ data,
+ _marker: PhantomData,
+ }
+ }
+ }
+ }
+
+ /// Calls the function.
+ #[inline]
+ pub fn call(mut self) {
+ let call = self.call;
+ unsafe { call(&mut self.data as *mut Data as *mut u8) };
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Deferred;
+ use std::cell::Cell;
+
+ #[test]
+ fn on_stack() {
+ let fired = &Cell::new(false);
+ let a = [0usize; 1];
+
+ let d = Deferred::new(move || {
+ drop(a);
+ fired.set(true);
+ });
+
+ assert!(!fired.get());
+ d.call();
+ assert!(fired.get());
+ }
+
+ #[test]
+ fn on_heap() {
+ let fired = &Cell::new(false);
+ let a = [0usize; 10];
+
+ let d = Deferred::new(move || {
+ drop(a);
+ fired.set(true);
+ });
+
+ assert!(!fired.get());
+ d.call();
+ assert!(fired.get());
+ }
+
+ #[test]
+ fn string() {
+ let a = "hello".to_string();
+ let d = Deferred::new(move || assert_eq!(a, "hello"));
+ d.call();
+ }
+
+ #[test]
+ fn boxed_slice_i32() {
+ let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice();
+ let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
+ d.call();
+ }
+
+ #[test]
+ fn long_slice_usize() {
+ let a: [usize; 5] = [2, 3, 5, 7, 11];
+ let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
+ d.call();
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/epoch.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/epoch.rs
new file mode 100644
index 0000000000..e7759d9355
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/epoch.rs
@@ -0,0 +1,114 @@
+//! The global epoch
+//!
+//! The last bit in this number is unused and is always zero. Every so often the global epoch is
+//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only
+//! if all currently pinned participants have been pinned in the current epoch.
+//!
+//! If an object became garbage in some epoch, then we can be sure that after two advancements no
+//! participant will hold a reference to it. That is the crux of safe memory reclamation.
+
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+/// An epoch that can be marked as pinned or unpinned.
+///
+/// Internally, the epoch is represented as an integer that wraps around at some unspecified point
+/// and a flag that represents whether it is pinned or unpinned.
+#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
+pub struct Epoch {
+ /// The least significant bit is set if pinned. The rest of the bits hold the epoch.
+ data: usize,
+}
+
+impl Epoch {
+ /// Returns the starting epoch in unpinned state.
+ #[inline]
+ pub fn starting() -> Self {
+ Self::default()
+ }
+
+ /// Returns the number of epochs `self` is ahead of `rhs`.
+ ///
+ /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX
+ /// / 2)`, so the returned distance will be in the same interval.
+ pub fn wrapping_sub(self, rhs: Self) -> isize {
+ // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`,
+ // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)`
+ // will be ignored in the shift operation.
+ self.data.wrapping_sub(rhs.data & !1) as isize >> 1
+ }
+
+ /// Returns `true` if the epoch is marked as pinned.
+ #[inline]
+ pub fn is_pinned(self) -> bool {
+ (self.data & 1) == 1
+ }
+
+ /// Returns the same epoch, but marked as pinned.
+ #[inline]
+ pub fn pinned(self) -> Epoch {
+ Epoch {
+ data: self.data | 1,
+ }
+ }
+
+ /// Returns the same epoch, but marked as unpinned.
+ #[inline]
+ pub fn unpinned(self) -> Epoch {
+ Epoch {
+ data: self.data & !1,
+ }
+ }
+
+ /// Returns the successor epoch.
+ ///
+ /// The returned epoch will be marked as pinned only if the previous one was as well.
+ #[inline]
+ pub fn successor(self) -> Epoch {
+ Epoch {
+ data: self.data.wrapping_add(2),
+ }
+ }
+}
+
+/// An atomic value that holds an `Epoch`.
+#[derive(Default, Debug)]
+pub struct AtomicEpoch {
+ /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented
+ /// using an `AtomicUsize`.
+ data: AtomicUsize,
+}
+
+impl AtomicEpoch {
+ /// Creates a new atomic epoch.
+ #[inline]
+ pub fn new(epoch: Epoch) -> Self {
+ let data = AtomicUsize::new(epoch.data);
+ AtomicEpoch { data }
+ }
+
+ /// Loads a value from the atomic epoch.
+ #[inline]
+ pub fn load(&self, ord: Ordering) -> Epoch {
+ Epoch {
+ data: self.data.load(ord),
+ }
+ }
+
+ /// Stores a value into the atomic epoch.
+ #[inline]
+ pub fn store(&self, epoch: Epoch, ord: Ordering) {
+ self.data.store(epoch.data, ord);
+ }
+
+ /// Stores a value into the atomic epoch if the current value is the same as `current`.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// is updated.
+ ///
+ /// The `Ordering` argument describes the memory ordering of this operation.
+ #[inline]
+ pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch {
+ let data = self.data.compare_and_swap(current.data, new.data, ord);
+ Epoch { data }
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/guard.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/guard.rs
new file mode 100644
index 0000000000..df18cb118c
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/guard.rs
@@ -0,0 +1,529 @@
+use core::fmt;
+use core::mem;
+
+use atomic::Shared;
+use collector::Collector;
+use deferred::Deferred;
+use internal::Local;
+
+/// A guard that keeps the current thread pinned.
+///
+/// # Pinning
+///
+/// The current thread is pinned by calling [`pin`], which returns a new guard:
+///
+/// ```
+/// use crossbeam_epoch as epoch;
+///
+/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
+/// // This is not really necessary, but makes passing references to the guard a bit easier.
+/// let guard = &epoch::pin();
+/// ```
+///
+/// When a guard gets dropped, the current thread is automatically unpinned.
+///
+/// # Pointers on the stack
+///
+/// Having a guard allows us to create pointers on the stack to heap-allocated objects.
+/// For example:
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+/// use std::sync::atomic::Ordering::SeqCst;
+///
+/// // Create a heap-allocated number.
+/// let a = Atomic::new(777);
+///
+/// // Pin the current thread.
+/// let guard = &epoch::pin();
+///
+/// // Load the heap-allocated object and create pointer `p` on the stack.
+/// let p = a.load(SeqCst, guard);
+///
+/// // Dereference the pointer and print the value:
+/// if let Some(num) = unsafe { p.as_ref() } {
+/// println!("The number is {}.", num);
+/// }
+/// ```
+///
+/// # Multiple guards
+///
+/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
+/// thread will actually be pinned only when the first guard is created and unpinned when the last
+/// one is dropped:
+///
+/// ```
+/// use crossbeam_epoch as epoch;
+///
+/// let guard1 = epoch::pin();
+/// let guard2 = epoch::pin();
+/// assert!(epoch::is_pinned());
+/// drop(guard1);
+/// assert!(epoch::is_pinned());
+/// drop(guard2);
+/// assert!(!epoch::is_pinned());
+/// ```
+///
+/// [`pin`]: fn.pin.html
+pub struct Guard {
+ pub(crate) local: *const Local,
+}
+
+impl Guard {
+ /// Stores a function so that it can be executed at some point after all currently pinned
+ /// threads get unpinned.
+ ///
+ /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
+ /// becomes full, some functions are moved into the global cache. At the same time, some
+ /// functions from both local and global caches may get executed in order to incrementally
+ /// clean up the caches as they fill up.
+ ///
+ /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
+ /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
+ /// never run, but the epoch-based garbage collection will make an effort to execute it
+ /// reasonably soon.
+ ///
+ /// If this method is called from an [`unprotected`] guard, the function will simply be
+ /// executed immediately.
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub fn defer<F, R>(&self, f: F)
+ where
+ F: FnOnce() -> R,
+ F: Send + 'static,
+ {
+ unsafe {
+ self.defer_unchecked(f);
+ }
+ }
+
+ /// Stores a function so that it can be executed at some point after all currently pinned
+ /// threads get unpinned.
+ ///
+ /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
+ /// becomes full, some functions are moved into the global cache. At the same time, some
+ /// functions from both local and global caches may get executed in order to incrementally
+ /// clean up the caches as they fill up.
+ ///
+ /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
+ /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
+ /// never run, but the epoch-based garbage collection will make an effort to execute it
+ /// reasonably soon.
+ ///
+ /// If this method is called from an [`unprotected`] guard, the function will simply be
+ /// executed immediately.
+ ///
+ /// # Safety
+ ///
+ /// The given function must not hold reference onto the stack. It is highly recommended that
+ /// the passed function is **always** marked with `move` in order to prevent accidental
+ /// borrows.
+ ///
+ /// ```
+ /// use crossbeam_epoch as epoch;
+ ///
+ /// let guard = &epoch::pin();
+ /// let message = "Hello!";
+ /// unsafe {
+ /// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
+ /// guard.defer_unchecked(move || {
+ /// println!("{}", message);
+ /// });
+ /// }
+ /// ```
+ ///
+ /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
+ /// the closure must be `Send`.
+ ///
+ /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
+ /// `F: Send` for typical use cases. For example, consider the following code snippet, which
+ /// exemplifies the typical use case of deferring the deallocation of a shared reference:
+ ///
+ /// ```ignore
+ /// let shared = Owned::new(7i32).into_shared(guard);
+ /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
+ /// ```
+ ///
+ /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
+ /// because it's called only after the grace period and `shared` is no longer shared with other
+ /// threads. But we don't expect type systems to prove this.
+ ///
+ /// # Examples
+ ///
+ /// When a heap-allocated object in a data structure becomes unreachable, it has to be
+ /// deallocated. However, the current thread and other threads may be still holding references
+ /// on the stack to that same object. Therefore it cannot be deallocated before those references
+ /// get dropped. This method can defer deallocation until all those threads get unpinned and
+ /// consequently drop all their references on the stack.
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new("foo");
+ ///
+ /// // Now suppose that `a` is shared among multiple threads and concurrently
+ /// // accessed and modified...
+ ///
+ /// // Pin the current thread.
+ /// let guard = &epoch::pin();
+ ///
+ /// // Steal the object currently stored in `a` and swap it with another one.
+ /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
+ ///
+ /// if !p.is_null() {
+ /// // The object `p` is pointing to is now unreachable.
+ /// // Defer its deallocation until all currently pinned threads get unpinned.
+ /// unsafe {
+ /// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
+ /// guard.defer_unchecked(move || {
+ /// println!("{} is now being deallocated.", p.deref());
+ /// // Now we have unique access to the object pointed to by `p` and can turn it
+ /// // into an `Owned`. Dropping the `Owned` will deallocate the object.
+ /// drop(p.into_owned());
+ /// });
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub unsafe fn defer_unchecked<F, R>(&self, f: F)
+ where
+ F: FnOnce() -> R,
+ {
+ if let Some(local) = self.local.as_ref() {
+ local.defer(Deferred::new(move || drop(f())), self);
+ } else {
+ drop(f());
+ }
+ }
+
+ /// Stores a destructor for an object so that it can be deallocated and dropped at some point
+ /// after all currently pinned threads get unpinned.
+ ///
+ /// This method first stores the destructor into the thread-local (or handle-local) cache. If
+ /// this cache becomes full, some destructors are moved into the global cache. At the same
+ /// time, some destructors from both local and global caches may get executed in order to
+ /// incrementally clean up the caches as they fill up.
+ ///
+ /// There is no guarantee when exactly the destructor will be executed. The only guarantee is
+ /// that it won't be executed until all currently pinned threads get unpinned. In theory, the
+ /// destructor might never run, but the epoch-based garbage collection will make an effort to
+ /// execute it reasonably soon.
+ ///
+ /// If this method is called from an [`unprotected`] guard, the destructor will simply be
+ /// executed immediately.
+ ///
+ /// # Safety
+ ///
+ /// The object must not be reachable by other threads anymore, otherwise it might be still in
+ /// use when the destructor runs.
+ ///
+ /// Apart from that, keep in mind that another thread may execute the destructor, so the object
+ /// must be sendable to other threads.
+ ///
+ /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
+ /// `T: Send` for typical use cases. For example, consider the following code snippet, which
+ /// exemplifies the typical use case of deferring the deallocation of a shared reference:
+ ///
+ /// ```ignore
+ /// let shared = Owned::new(7i32).into_shared(guard);
+ /// guard.defer_destroy(shared); // `Shared` is not `Send`!
+ /// ```
+ ///
+ /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
+ /// it's called only after the grace period and `shared` is no longer shared with other
+ /// threads. But we don't expect type systems to prove this.
+ ///
+ /// # Examples
+ ///
+ /// When a heap-allocated object in a data structure becomes unreachable, it has to be
+ /// deallocated. However, the current thread and other threads may be still holding references
+ /// on the stack to that same object. Therefore it cannot be deallocated before those references
+ /// get dropped. This method can defer deallocation until all those threads get unpinned and
+ /// consequently drop all their references on the stack.
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new("foo");
+ ///
+ /// // Now suppose that `a` is shared among multiple threads and concurrently
+ /// // accessed and modified...
+ ///
+ /// // Pin the current thread.
+ /// let guard = &epoch::pin();
+ ///
+ /// // Steal the object currently stored in `a` and swap it with another one.
+ /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
+ ///
+ /// if !p.is_null() {
+ /// // The object `p` is pointing to is now unreachable.
+ /// // Defer its deallocation until all currently pinned threads get unpinned.
+ /// unsafe {
+ /// guard.defer_destroy(p);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub unsafe fn defer_destroy<T>(&self, ptr: Shared<T>) {
+ self.defer_unchecked(move || ptr.into_owned());
+ }
+
+ /// Clears up the thread-local cache of deferred functions by executing them or moving into the
+ /// global cache.
+ ///
+ /// Call this method after deferring execution of a function if you want to get it executed as
+ /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
+ /// any thread has a chance of taking the function and executing it.
+ ///
+ /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch as epoch;
+ ///
+ /// let guard = &epoch::pin();
+ /// unsafe {
+ /// guard.defer(move || {
+ /// println!("This better be printed as soon as possible!");
+ /// });
+ /// }
+ /// guard.flush();
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub fn flush(&self) {
+ if let Some(local) = unsafe { self.local.as_ref() } {
+ local.flush(self);
+ }
+ }
+
+ /// Unpins and then immediately re-pins the thread.
+ ///
+ /// This method is useful when you don't want delay the advancement of the global epoch by
+ /// holding an old epoch. For safety, you should not maintain any guard-based reference across
+ /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
+ /// is the only active guard for the current thread.
+ ///
+ /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let a = Atomic::new(777);
+ /// let mut guard = epoch::pin();
+ /// {
+ /// let p = a.load(SeqCst, &guard);
+ /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
+ /// }
+ /// guard.repin();
+ /// {
+ /// let p = a.load(SeqCst, &guard);
+ /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
+ /// }
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub fn repin(&mut self) {
+ if let Some(local) = unsafe { self.local.as_ref() } {
+ local.repin();
+ }
+ }
+
+ /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
+ ///
+ /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
+ /// and don't need to maintain any guard-based reference across the call (the latter is enforced
+ /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
+ /// current thread.
+ ///
+ /// If this method is called from an [`unprotected`] guard, then the passed function is called
+ /// directly without unpinning the thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// let a = Atomic::new(777);
+ /// let mut guard = epoch::pin();
+ /// {
+ /// let p = a.load(SeqCst, &guard);
+ /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
+ /// }
+ /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
+ /// {
+ /// let p = a.load(SeqCst, &guard);
+ /// assert_eq!(unsafe { p.as_ref() }, Some(&777));
+ /// }
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub fn repin_after<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ if let Some(local) = unsafe { self.local.as_ref() } {
+ // We need to acquire a handle here to ensure the Local doesn't
+ // disappear from under us.
+ local.acquire_handle();
+ local.unpin();
+ }
+
+ // Ensure the Guard is re-pinned even if the function panics
+ defer! {
+ if let Some(local) = unsafe { self.local.as_ref() } {
+ mem::forget(local.pin());
+ local.release_handle();
+ }
+ }
+
+ f()
+ }
+
+ /// Returns the `Collector` associated with this guard.
+ ///
+ /// This method is useful when you need to ensure that all guards used with
+ /// a data structure come from the same collector.
+ ///
+ /// If this method is called from an [`unprotected`] guard, then `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch as epoch;
+ ///
+ /// let mut guard1 = epoch::pin();
+ /// let mut guard2 = epoch::pin();
+ /// assert!(guard1.collector() == guard2.collector());
+ /// ```
+ ///
+ /// [`unprotected`]: fn.unprotected.html
+ pub fn collector(&self) -> Option<&Collector> {
+ unsafe { self.local.as_ref().map(|local| local.collector()) }
+ }
+}
+
+impl Drop for Guard {
+ #[inline]
+ fn drop(&mut self) {
+ if let Some(local) = unsafe { self.local.as_ref() } {
+ local.unpin();
+ }
+ }
+}
+
+impl fmt::Debug for Guard {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("Guard { .. }")
+ }
+}
+
+/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
+///
+/// This guard should be used in special occasions only. Note that it doesn't actually keep any
+/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
+///
+/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
+/// execute the function immediately.
+///
+/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
+///
+/// # Safety
+///
+/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
+/// [`Atomic`] is not being concurrently modified by other threads.
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic};
+/// use std::sync::atomic::Ordering::Relaxed;
+///
+/// let a = Atomic::new(7);
+///
+/// unsafe {
+/// // Load `a` without pinning the current thread.
+/// a.load(Relaxed, epoch::unprotected());
+///
+/// // It's possible to create more dummy guards by calling `clone()`.
+/// let dummy = &epoch::unprotected().clone();
+///
+/// dummy.defer(move || {
+/// println!("This gets executed immediately.");
+/// });
+///
+/// // Dropping `dummy` doesn't affect the current thread - it's just a noop.
+/// }
+/// ```
+///
+/// The most common use of this function is when constructing or destructing a data structure.
+///
+/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
+/// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
+///
+/// If we were to actually pin the current thread during destruction, that would just unnecessarily
+/// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
+/// is very helpful.
+///
+/// ```
+/// use crossbeam_epoch::{self as epoch, Atomic};
+/// use std::mem::ManuallyDrop;
+/// use std::sync::atomic::Ordering::Relaxed;
+///
+/// struct Stack<T> {
+/// head: Atomic<Node<T>>,
+/// }
+///
+/// struct Node<T> {
+/// data: ManuallyDrop<T>,
+/// next: Atomic<Node<T>>,
+/// }
+///
+/// impl<T> Drop for Stack<T> {
+/// fn drop(&mut self) {
+/// unsafe {
+/// // Unprotected load.
+/// let mut node = self.head.load(Relaxed, epoch::unprotected());
+///
+/// while let Some(n) = node.as_ref() {
+/// // Unprotected load.
+/// let next = n.next.load(Relaxed, epoch::unprotected());
+///
+/// // Take ownership of the node, then drop its data and deallocate it.
+/// let mut o = node.into_owned();
+/// ManuallyDrop::drop(&mut o.data);
+/// drop(o);
+///
+/// node = next;
+/// }
+/// }
+/// }
+/// }
+/// ```
+///
+/// [`Atomic`]: struct.Atomic.html
+/// [`defer`]: struct.Guard.html#method.defer
+#[inline]
+pub unsafe fn unprotected() -> &'static Guard {
+ // HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null.
+ // Since this function returns a `'static` reference to a `Guard`, we must return a reference
+ // to a global guard. However, it's not possible to create a `static` `Guard` because it does
+ // not implement `Sync`. To get around the problem, we create a static `usize` initialized to
+ // zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard`
+ // (consisting of a single pointer) have the same representation in memory.
+ static UNPROTECTED: usize = 0;
+ &*(&UNPROTECTED as *const _ as *const Guard)
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/internal.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/internal.rs
new file mode 100644
index 0000000000..22038f30da
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/internal.rs
@@ -0,0 +1,595 @@
+//! The global data and participant for garbage collection.
+//!
+//! # Registration
+//!
+//! In order to track all participants in one place, we need some form of participant
+//! registration. When a participant is created, it is registered to a global lock-free
+//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the
+//! list.
+//!
+//! # Pinning
+//!
+//! Every participant contains an integer that tells whether the participant is pinned and if so,
+//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that
+//! aids in periodic global epoch advancement.
+//!
+//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned.
+//! Guards are necessary for performing atomic operations, and for freeing/dropping locations.
+//!
+//! # Thread-local bag
+//!
+//! Objects that get unlinked from concurrent data structures must be stashed away until the global
+//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects
+//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current
+//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages
+//! for amortizing the synchronization cost of pushing the garbages to a global queue.
+//!
+//! # Global queue
+//!
+//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and
+//! destroyed along the way. This design reduces contention on data structures. The global queue
+//! cannot be explicitly accessed: the only way to interact with it is by calling functions
+//! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers
+//! garbage collection.
+//!
+//! Ideally each instance of concurrent data structure may have its own queue that gets fully
+//! destroyed as soon as the data structure gets dropped.
+
+use core::cell::{Cell, UnsafeCell};
+use core::mem::{self, ManuallyDrop};
+use core::num::Wrapping;
+use core::{ptr, fmt};
+use core::sync::atomic;
+use core::sync::atomic::Ordering;
+
+use crossbeam_utils::CachePadded;
+
+use atomic::{Shared, Owned};
+use collector::{Collector, LocalHandle};
+use deferred::Deferred;
+use epoch::{AtomicEpoch, Epoch};
+use guard::{unprotected, Guard};
+use sync::list::{Entry, IsElement, IterError, List};
+use sync::queue::Queue;
+
+/// Maximum number of objects a bag can contain.
+#[cfg(not(feature = "sanitize"))]
+const MAX_OBJECTS: usize = 64;
+#[cfg(feature = "sanitize")]
+const MAX_OBJECTS: usize = 4;
+
+/// A bag of deferred functions.
+pub struct Bag {
+ /// Stashed objects.
+ deferreds: [Deferred; MAX_OBJECTS],
+ len: usize
+}
+
+/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions.
+unsafe impl Send for Bag {}
+
+impl Bag {
+ /// Returns a new, empty bag.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Returns `true` if the bag is empty.
+ pub fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Attempts to insert a deferred function into the bag.
+ ///
+ /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is
+ /// full.
+ ///
+ /// # Safety
+ ///
+ /// It should be safe for another thread to execute the given function.
+ pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> {
+ if self.len < MAX_OBJECTS {
+ self.deferreds[self.len] = deferred;
+ self.len += 1;
+ Ok(())
+ } else {
+ Err(deferred)
+ }
+ }
+
+ /// Seals the bag with the given epoch.
+ fn seal(self, epoch: Epoch) -> SealedBag {
+ SealedBag { epoch, bag: self }
+ }
+}
+
+impl Default for Bag {
+ fn default() -> Self {
+ // TODO: [no_op; MAX_OBJECTS] syntax blocked by https://github.com/rust-lang/rust/issues/49147
+ #[cfg(not(feature = "sanitize"))]
+ return Bag { len: 0, deferreds:
+ [Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func),
+ Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func)]
+ };
+ #[cfg(feature = "sanitize")]
+ return Bag { len: 0, deferreds: [Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func), Deferred::new(no_op_func)] };
+ }
+}
+
+impl Drop for Bag {
+ fn drop(&mut self) {
+ // Call all deferred functions.
+ for deferred in &mut self.deferreds[..self.len] {
+ let no_op = Deferred::new(no_op_func);
+ let owned_deferred = mem::replace(deferred, no_op);
+ owned_deferred.call();
+ }
+ }
+}
+
+// can't #[derive(Debug)] because Debug is not implemented for arrays 64 items long
+impl fmt::Debug for Bag {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Bag").field("deferreds", &&self.deferreds[..self.len]).finish()
+ }
+}
+
+fn no_op_func() {}
+
+/// A pair of an epoch and a bag.
+#[derive(Default, Debug)]
+struct SealedBag {
+ epoch: Epoch,
+ bag: Bag,
+}
+
+/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch.
+unsafe impl Sync for SealedBag {}
+
+impl SealedBag {
+ /// Checks if it is safe to drop the bag w.r.t. the given global epoch.
+ fn is_expired(&self, global_epoch: Epoch) -> bool {
+ // A pinned participant can witness at most one epoch advancement. Therefore, any bag that
+ // is within one epoch of the current one cannot be destroyed yet.
+ global_epoch.wrapping_sub(self.epoch) >= 2
+ }
+}
+
+/// The global data for a garbage collector.
+pub struct Global {
+ /// The intrusive linked list of `Local`s.
+ locals: List<Local>,
+
+ /// The global queue of bags of deferred functions.
+ queue: Queue<SealedBag>,
+
+ /// The global epoch.
+ pub(crate) epoch: CachePadded<AtomicEpoch>,
+}
+
+impl Global {
+ /// Number of bags to destroy.
+ const COLLECT_STEPS: usize = 8;
+
+ /// Creates a new global data for garbage collection.
+ #[inline]
+ pub fn new() -> Self {
+ Self {
+ locals: List::new(),
+ queue: Queue::new(),
+ epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
+ }
+ }
+
+ /// Pushes the bag into the global queue and replaces the bag with a new empty bag.
+ pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
+ let bag = mem::replace(bag, Bag::new());
+
+ atomic::fence(Ordering::SeqCst);
+
+ let epoch = self.epoch.load(Ordering::Relaxed);
+ self.queue.push(bag.seal(epoch), guard);
+ }
+
+ /// Collects several bags from the global queue and executes deferred functions in them.
+ ///
+ /// Note: This may itself produce garbage and in turn allocate new bags.
+ ///
+ /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold
+ /// path. In other words, we want the compiler to optimize branching for the case when
+ /// `collect()` is not called.
+ #[cold]
+ pub fn collect(&self, guard: &Guard) {
+ let global_epoch = self.try_advance(guard);
+
+ let steps = if cfg!(feature = "sanitize") {
+ usize::max_value()
+ } else {
+ Self::COLLECT_STEPS
+ };
+
+ for _ in 0..steps {
+ match self.queue.try_pop_if(
+ &|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch),
+ guard,
+ ) {
+ None => break,
+ Some(sealed_bag) => drop(sealed_bag),
+ }
+ }
+ }
+
+ /// Attempts to advance the global epoch.
+ ///
+ /// The global epoch can advance only if all currently pinned participants have been pinned in
+ /// the current epoch.
+ ///
+ /// Returns the current global epoch.
+ ///
+ /// `try_advance()` is annotated `#[cold]` because it is rarely called.
+ #[cold]
+ pub fn try_advance(&self, guard: &Guard) -> Epoch {
+ let global_epoch = self.epoch.load(Ordering::Relaxed);
+ atomic::fence(Ordering::SeqCst);
+
+ // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly
+ // easy to implement in a lock-free manner. However, traversal can be slow due to cache
+ // misses and data dependencies. We should experiment with other data structures as well.
+ for local in self.locals.iter(&guard) {
+ match local {
+ Err(IterError::Stalled) => {
+ // A concurrent thread stalled this iteration. That thread might also try to
+ // advance the epoch, in which case we leave the job to it. Otherwise, the
+ // epoch will not be advanced.
+ return global_epoch;
+ }
+ Ok(local) => {
+ let local_epoch = local.epoch.load(Ordering::Relaxed);
+
+ // If the participant was pinned in a different epoch, we cannot advance the
+ // global epoch just yet.
+ if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch {
+ return global_epoch;
+ }
+ }
+ }
+ }
+ atomic::fence(Ordering::Acquire);
+
+ // All pinned participants were pinned in the current global epoch.
+ // Now let's advance the global epoch...
+ //
+ // Note that if another thread already advanced it before us, this store will simply
+ // overwrite the global epoch with the same value. This is true because `try_advance` was
+ // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be
+ // advanced two steps ahead of it.
+ let new_epoch = global_epoch.successor();
+ self.epoch.store(new_epoch, Ordering::Release);
+ new_epoch
+ }
+}
+
+/// Participant for garbage collection.
+pub struct Local {
+ /// A node in the intrusive linked list of `Local`s.
+ entry: Entry,
+
+ /// The local epoch.
+ epoch: AtomicEpoch,
+
+ /// A reference to the global data.
+ ///
+ /// When all guards and handles get dropped, this reference is destroyed.
+ collector: UnsafeCell<ManuallyDrop<Collector>>,
+
+ /// The local bag of deferred functions.
+ pub(crate) bag: UnsafeCell<Bag>,
+
+ /// The number of guards keeping this participant pinned.
+ guard_count: Cell<usize>,
+
+ /// The number of active handles.
+ handle_count: Cell<usize>,
+
+ /// Total number of pinnings performed.
+ ///
+ /// This is just an auxilliary counter that sometimes kicks off collection.
+ pin_count: Cell<Wrapping<usize>>,
+}
+
+impl Local {
+ /// Number of pinnings after which a participant will execute some deferred functions from the
+ /// global queue.
+ const PINNINGS_BETWEEN_COLLECT: usize = 128;
+
+ /// Registers a new `Local` in the provided `Global`.
+ pub fn register(collector: &Collector) -> LocalHandle {
+ unsafe {
+ // Since we dereference no pointers in this block, it is safe to use `unprotected`.
+
+ let local = Owned::new(Local {
+ entry: Entry::default(),
+ epoch: AtomicEpoch::new(Epoch::starting()),
+ collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())),
+ bag: UnsafeCell::new(Bag::new()),
+ guard_count: Cell::new(0),
+ handle_count: Cell::new(1),
+ pin_count: Cell::new(Wrapping(0)),
+ })
+ .into_shared(&unprotected());
+ collector.global.locals.insert(local, &unprotected());
+ LocalHandle {
+ local: local.as_raw(),
+ }
+ }
+ }
+
+ /// Returns a reference to the `Global` in which this `Local` resides.
+ #[inline]
+ pub fn global(&self) -> &Global {
+ &self.collector().global
+ }
+
+ /// Returns a reference to the `Collector` in which this `Local` resides.
+ #[inline]
+ pub fn collector(&self) -> &Collector {
+ unsafe { &**self.collector.get() }
+ }
+
+ /// Returns `true` if the current participant is pinned.
+ #[inline]
+ pub fn is_pinned(&self) -> bool {
+ self.guard_count.get() > 0
+ }
+
+ /// Adds `deferred` to the thread-local bag.
+ ///
+ /// # Safety
+ ///
+ /// It should be safe for another thread to execute the given function.
+ pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) {
+ let bag = &mut *self.bag.get();
+
+ while let Err(d) = bag.try_push(deferred) {
+ self.global().push_bag(bag, guard);
+ deferred = d;
+ }
+ }
+
+ pub fn flush(&self, guard: &Guard) {
+ let bag = unsafe { &mut *self.bag.get() };
+
+ if !bag.is_empty() {
+ self.global().push_bag(bag, guard);
+ }
+
+ self.global().collect(guard);
+ }
+
+ /// Pins the `Local`.
+ #[inline]
+ pub fn pin(&self) -> Guard {
+ let guard = Guard { local: self };
+
+ let guard_count = self.guard_count.get();
+ self.guard_count.set(guard_count.checked_add(1).unwrap());
+
+ if guard_count == 0 {
+ let global_epoch = self.global().epoch.load(Ordering::Relaxed);
+ let new_epoch = global_epoch.pinned();
+
+ // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
+ // The fence makes sure that any future loads from `Atomic`s will not happen before
+ // this store.
+ if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ // HACK(stjepang): On x86 architectures there are two different ways of executing
+ // a `SeqCst` fence.
+ //
+ // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
+ // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
+ // instruction.
+ //
+ // Both instructions have the effect of a full barrier, but benchmarks have shown
+ // that the second one makes pinning faster in this particular case. It is not
+ // clear that this is permitted by the C++ memory model (SC fences work very
+ // differently from SC accesses), but experimental evidence suggests that this
+ // works fine. Using inline assembly would be a viable (and correct) alternative,
+ // but alas, that is not possible on stable Rust.
+ let current = Epoch::starting();
+ let previous = self
+ .epoch
+ .compare_and_swap(current, new_epoch, Ordering::SeqCst);
+ debug_assert_eq!(current, previous, "participant was expected to be unpinned");
+ // We add a compiler fence to make it less likely for LLVM to do something wrong
+ // here. Formally, this is not enough to get rid of data races; practically,
+ // it should go a long way.
+ atomic::compiler_fence(Ordering::SeqCst);
+ } else {
+ self.epoch.store(new_epoch, Ordering::Relaxed);
+ atomic::fence(Ordering::SeqCst);
+ }
+
+ // Increment the pin counter.
+ let count = self.pin_count.get();
+ self.pin_count.set(count + Wrapping(1));
+
+ // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting
+ // some garbage.
+ if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 {
+ self.global().collect(&guard);
+ }
+ }
+
+ guard
+ }
+
+ /// Unpins the `Local`.
+ #[inline]
+ pub fn unpin(&self) {
+ let guard_count = self.guard_count.get();
+ self.guard_count.set(guard_count - 1);
+
+ if guard_count == 1 {
+ self.epoch.store(Epoch::starting(), Ordering::Release);
+
+ if self.handle_count.get() == 0 {
+ self.finalize();
+ }
+ }
+ }
+
+ /// Unpins and then pins the `Local`.
+ #[inline]
+ pub fn repin(&self) {
+ let guard_count = self.guard_count.get();
+
+ // Update the local epoch only if there's only one guard.
+ if guard_count == 1 {
+ let epoch = self.epoch.load(Ordering::Relaxed);
+ let global_epoch = self.global().epoch.load(Ordering::Relaxed).pinned();
+
+ // Update the local epoch only if the global epoch is greater than the local epoch.
+ if epoch != global_epoch {
+ // We store the new epoch with `Release` because we need to ensure any memory
+ // accesses from the previous epoch do not leak into the new one.
+ self.epoch.store(global_epoch, Ordering::Release);
+
+ // However, we don't need a following `SeqCst` fence, because it is safe for memory
+ // accesses from the new epoch to be executed before updating the local epoch. At
+ // worse, other threads will see the new epoch late and delay GC slightly.
+ }
+ }
+ }
+
+ /// Increments the handle count.
+ #[inline]
+ pub fn acquire_handle(&self) {
+ let handle_count = self.handle_count.get();
+ debug_assert!(handle_count >= 1);
+ self.handle_count.set(handle_count + 1);
+ }
+
+ /// Decrements the handle count.
+ #[inline]
+ pub fn release_handle(&self) {
+ let guard_count = self.guard_count.get();
+ let handle_count = self.handle_count.get();
+ debug_assert!(handle_count >= 1);
+ self.handle_count.set(handle_count - 1);
+
+ if guard_count == 0 && handle_count == 1 {
+ self.finalize();
+ }
+ }
+
+ /// Removes the `Local` from the global linked list.
+ #[cold]
+ fn finalize(&self) {
+ debug_assert_eq!(self.guard_count.get(), 0);
+ debug_assert_eq!(self.handle_count.get(), 0);
+
+ // Temporarily increment handle count. This is required so that the following call to `pin`
+ // doesn't call `finalize` again.
+ self.handle_count.set(1);
+ unsafe {
+ // Pin and move the local bag into the global queue. It's important that `push_bag`
+ // doesn't defer destruction on any new garbage.
+ let guard = &self.pin();
+ self.global().push_bag(&mut *self.bag.get(), guard);
+ }
+ // Revert the handle count back to zero.
+ self.handle_count.set(0);
+
+ unsafe {
+ // Take the reference to the `Global` out of this `Local`. Since we're not protected
+ // by a guard at this time, it's crucial that the reference is read before marking the
+ // `Local` as deleted.
+ let collector: Collector = ptr::read(&*(*self.collector.get()));
+
+ // Mark this node in the linked list as deleted.
+ self.entry.delete(&unprotected());
+
+ // Finally, drop the reference to the global. Note that this might be the last reference
+ // to the `Global`. If so, the global data will be destroyed and all deferred functions
+ // in its queue will be executed.
+ drop(collector);
+ }
+ }
+}
+
+impl IsElement<Local> for Local {
+ fn entry_of(local: &Local) -> &Entry {
+ let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry;
+ unsafe { &*entry_ptr }
+ }
+
+ unsafe fn element_of(entry: &Entry) -> &Local {
+ // offset_of! macro uses unsafe, but it's unnecessary in this context.
+ #[allow(unused_unsafe)]
+ let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local;
+ &*local_ptr
+ }
+
+ unsafe fn finalize(entry: &Entry, guard: &Guard) {
+ guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use super::*;
+
+ #[test]
+ fn check_defer() {
+ static FLAG: AtomicUsize = AtomicUsize::new(0);
+ fn set() {
+ FLAG.store(42, Ordering::Relaxed);
+ }
+
+ let d = Deferred::new(set);
+ assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+ d.call();
+ assert_eq!(FLAG.load(Ordering::Relaxed), 42);
+ }
+
+ #[test]
+ fn check_bag() {
+ static FLAG: AtomicUsize = AtomicUsize::new(0);
+ fn incr() {
+ FLAG.fetch_add(1, Ordering::Relaxed);
+ }
+
+ let mut bag = Bag::new();
+ assert!(bag.is_empty());
+
+ for _ in 0..MAX_OBJECTS {
+ assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() });
+ assert!(!bag.is_empty());
+ assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+ }
+
+ let result = unsafe { bag.try_push(Deferred::new(incr)) };
+ assert!(result.is_err());
+ assert!(!bag.is_empty());
+ assert_eq!(FLAG.load(Ordering::Relaxed), 0);
+
+ drop(bag);
+ assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS);
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/lib.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/lib.rs
new file mode 100644
index 0000000000..c5b504e50c
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/lib.rs
@@ -0,0 +1,106 @@
+//! Epoch-based memory reclamation.
+//!
+//! An interesting problem concurrent collections deal with comes from the remove operation.
+//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
+//! that same element at the same time. The first thread must wait until the second thread stops
+//! reading the element. Only then it is safe to destruct it.
+//!
+//! Programming languages that come with garbage collectors solve this problem trivially. The
+//! garbage collector will destruct the removed element when no thread can hold a reference to it
+//! anymore.
+//!
+//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an
+//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and
+//! marked with the current epoch. Every time a thread accesses a collection, it checks the current
+//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread
+//! can be referencing it anymore.
+//!
+//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit
+//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something
+//! users of concurrent collections don't have to worry much about.
+//!
+//! # Pointers
+//!
+//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
+//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
+//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely
+//! read.
+//!
+//! # Pinning
+//!
+//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant
+//! we declare that any object that gets removed from now on must not be destructed just
+//! yet. Garbage collection of newly removed objects is suspended until the participant gets
+//! unpinned.
+//!
+//! # Garbage
+//!
+//! Objects that get removed from concurrent collections must be stashed away until all currently
+//! pinned participants get unpinned. Such objects can be stored into a thread-local or global
+//! storage, where they are kept until the right time for their destruction comes.
+//!
+//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an
+//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
+//! structures may defer the deallocation of an object.
+//!
+//! # APIs
+//!
+//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you
+//! want to create your own garbage collector, use the [`Collector`] API.
+//!
+//! [`Atomic`]: struct.Atomic.html
+//! [`Collector`]: struct.Collector.html
+//! [`Shared`]: struct.Shared.html
+//! [`pin`]: fn.pin.html
+//! [`defer`]: fn.defer.html
+
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))]
+
+#[macro_use]
+extern crate cfg_if;
+#[cfg(feature = "std")]
+extern crate core;
+
+cfg_if! {
+ if #[cfg(feature = "alloc")] {
+ extern crate alloc;
+ } else if #[cfg(feature = "std")] {
+ extern crate std as alloc;
+ }
+}
+
+#[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))]
+cfg_if! {
+ if #[cfg(any(feature = "alloc", feature = "std"))] {
+ extern crate crossbeam_utils;
+ #[macro_use]
+ extern crate memoffset;
+ #[macro_use]
+ extern crate scopeguard;
+
+ mod atomic;
+ mod collector;
+ mod deferred;
+ mod epoch;
+ mod guard;
+ mod internal;
+ mod sync;
+
+ pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Pointer, Shared};
+ pub use self::collector::{Collector, LocalHandle};
+ pub use self::guard::{unprotected, Guard};
+ }
+}
+
+cfg_if! {
+ if #[cfg(feature = "std")] {
+ #[macro_use]
+ extern crate lazy_static;
+
+ mod default;
+ pub use self::default::{default_collector, is_pinned, pin};
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/sync/list.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/list.rs
new file mode 100644
index 0000000000..623fbd2a8e
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/list.rs
@@ -0,0 +1,478 @@
+//! Lock-free intrusive linked list.
+//!
+//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA
+//! 2002. http://dl.acm.org/citation.cfm?id=564870.564881
+
+use core::marker::PhantomData;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+use {unprotected, Atomic, Guard, Shared};
+
+/// An entry in a linked list.
+///
+/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different
+/// cache-line than thread-local data in terms of performance.
+#[derive(Debug)]
+pub struct Entry {
+ /// The next entry in the linked list.
+ /// If the tag is 1, this entry is marked as deleted.
+ next: Atomic<Entry>,
+}
+
+/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive
+/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance
+/// of `Entry`.
+///
+/// # Example
+///
+/// ```ignore
+/// struct A {
+/// entry: Entry,
+/// data: usize,
+/// }
+///
+/// impl IsElement<A> for A {
+/// fn entry_of(a: &A) -> &Entry {
+/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry;
+/// unsafe { &*entry_ptr }
+/// }
+///
+/// unsafe fn element_of(entry: &Entry) -> &T {
+/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T;
+/// &*elem_ptr
+/// }
+///
+/// unsafe fn finalize(entry: &Entry, guard: &Guard) {
+/// guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
+/// }
+/// }
+/// ```
+///
+/// This trait is implemented on a type separate from `T` (although it can be just `T`), because
+/// one type might be placeable into multiple lists, in which case it would require multiple
+/// implementations of `IsElement`. In such cases, each struct implementing `IsElement<T>`
+/// represents a distinct `Entry` in `T`.
+///
+/// For example, we can insert the following struct into two lists using `entry1` for one
+/// and `entry2` for the other:
+///
+/// ```ignore
+/// struct B {
+/// entry1: Entry,
+/// entry2: Entry,
+/// data: usize,
+/// }
+/// ```
+///
+pub trait IsElement<T> {
+ /// Returns a reference to this element's `Entry`.
+ fn entry_of(&T) -> &Entry;
+
+ /// Given a reference to an element's entry, returns that element.
+ ///
+ /// ```ignore
+ /// let elem = ListElement::new();
+ /// assert_eq!(elem.entry_of(),
+ /// unsafe { ListElement::element_of(elem.entry_of()) } );
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance
+ /// of the element type (`T`).
+ unsafe fn element_of(&Entry) -> &T;
+
+ /// The function that is called when an entry is unlinked from list.
+ ///
+ /// # Safety
+ ///
+ /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance
+ /// of the element type (`T`).
+ unsafe fn finalize(&Entry, &Guard);
+}
+
+/// A lock-free, intrusive linked list of type `T`.
+#[derive(Debug)]
+pub struct List<T, C: IsElement<T> = T> {
+ /// The head of the linked list.
+ head: Atomic<Entry>,
+
+ /// The phantom data for using `T` and `C`.
+ _marker: PhantomData<(T, C)>,
+}
+
+/// An iterator used for retrieving values from the list.
+pub struct Iter<'g, T: 'g, C: IsElement<T>> {
+ /// The guard that protects the iteration.
+ guard: &'g Guard,
+
+ /// Pointer from the predecessor to the current entry.
+ pred: &'g Atomic<Entry>,
+
+ /// The current entry.
+ curr: Shared<'g, Entry>,
+
+ /// The list head, needed for restarting iteration.
+ head: &'g Atomic<Entry>,
+
+ /// Logically, we store a borrow of an instance of `T` and
+ /// use the type information from `C`.
+ _marker: PhantomData<(&'g T, C)>,
+}
+
+/// An error that occurs during iteration over the list.
+#[derive(PartialEq, Debug)]
+pub enum IterError {
+ /// A concurrent thread modified the state of the list at the same place that this iterator
+ /// was inspecting. Subsequent iteration will restart from the beginning of the list.
+ Stalled,
+}
+
+impl Default for Entry {
+ /// Returns the empty entry.
+ fn default() -> Self {
+ Self {
+ next: Atomic::null(),
+ }
+ }
+}
+
+impl Entry {
+ /// Marks this entry as deleted, deferring the actual deallocation to a later iteration.
+ ///
+ /// # Safety
+ ///
+ /// The entry should be a member of a linked list, and it should not have been deleted.
+ /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C`
+ /// is the associated helper for the linked list.
+ pub unsafe fn delete(&self, guard: &Guard) {
+ self.next.fetch_or(1, Release, guard);
+ }
+}
+
+impl<T, C: IsElement<T>> List<T, C> {
+ /// Returns a new, empty linked list.
+ pub fn new() -> Self {
+ Self {
+ head: Atomic::null(),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Inserts `entry` into the head of the list.
+ ///
+ /// # Safety
+ ///
+ /// You should guarantee that:
+ ///
+ /// - `container` is not null
+ /// - `container` is immovable, e.g. inside an `Owned`
+ /// - the same `Entry` is not inserted more than once
+ /// - the inserted object will be removed before the list is dropped
+ pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
+ // Insert right after head, i.e. at the beginning of the list.
+ let to = &self.head;
+ // Get the intrusively stored Entry of the new element to insert.
+ let entry: &Entry = C::entry_of(container.deref());
+ // Make a Shared ptr to that Entry.
+ let entry_ptr = Shared::from(entry as *const _);
+ // Read the current successor of where we want to insert.
+ let mut next = to.load(Relaxed, guard);
+
+ loop {
+ // Set the Entry of the to-be-inserted element to point to the previous successor of
+ // `to`.
+ entry.next.store(next, Relaxed);
+ match to.compare_and_set_weak(next, entry_ptr, Release, guard) {
+ Ok(_) => break,
+ // We lost the race or weak CAS failed spuriously. Update the successor and try
+ // again.
+ Err(err) => next = err.current,
+ }
+ }
+ }
+
+ /// Returns an iterator over all objects.
+ ///
+ /// # Caveat
+ ///
+ /// Every object that is inserted at the moment this function is called and persists at least
+ /// until the end of iteration will be returned. Since this iterator traverses a lock-free
+ /// linked list that may be concurrently modified, some additional caveats apply:
+ ///
+ /// 1. If a new object is inserted during iteration, it may or may not be returned.
+ /// 2. If an object is deleted during iteration, it may or may not be returned.
+ /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning
+ /// thread will continue to iterate over the same list.
+ pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
+ Iter {
+ guard,
+ pred: &self.head,
+ curr: self.head.load(Acquire, guard),
+ head: &self.head,
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl<T, C: IsElement<T>> Drop for List<T, C> {
+ fn drop(&mut self) {
+ unsafe {
+ let guard = &unprotected();
+ let mut curr = self.head.load(Relaxed, guard);
+ while let Some(c) = curr.as_ref() {
+ let succ = c.next.load(Relaxed, guard);
+ // Verify that all elements have been removed from the list.
+ assert_eq!(succ.tag(), 1);
+
+ C::finalize(curr.deref(), guard);
+ curr = succ;
+ }
+ }
+ }
+}
+
+impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
+ type Item = Result<&'g T, IterError>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(c) = unsafe { self.curr.as_ref() } {
+ let succ = c.next.load(Acquire, self.guard);
+
+ if succ.tag() == 1 {
+ // This entry was removed. Try unlinking it from the list.
+ let succ = succ.with_tag(0);
+
+ // The tag should never be zero, because removing a node after a logically deleted
+ // node leaves the list in an invalid state.
+ debug_assert!(self.curr.tag() == 0);
+
+ match self
+ .pred
+ .compare_and_set(self.curr, succ, Acquire, self.guard)
+ {
+ Ok(_) => {
+ // We succeeded in unlinking this element from the list, so we have to
+ // schedule deallocation. Deferred drop is okay, because `list.delete()`
+ // can only be called if `T: 'static`.
+ unsafe {
+ C::finalize(self.curr.deref(), self.guard);
+ }
+
+ // Move over the removed by only advancing `curr`, not `pred`.
+ self.curr = succ;
+ continue;
+ }
+ Err(_) => {
+ // A concurrent thread modified the predecessor node. Since it might've
+ // been deleted, we need to restart from `head`.
+ self.pred = self.head;
+ self.curr = self.head.load(Acquire, self.guard);
+
+ return Some(Err(IterError::Stalled));
+ }
+ }
+ }
+
+ // Move one step forward.
+ self.pred = &c.next;
+ self.curr = succ;
+
+ return Some(Ok(unsafe { C::element_of(c) }));
+ }
+
+ // We reached the end of the list.
+ None
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crossbeam_utils::thread;
+ use std::sync::Barrier;
+ use {Collector, Owned};
+
+ impl IsElement<Entry> for Entry {
+ fn entry_of(entry: &Entry) -> &Entry {
+ entry
+ }
+
+ unsafe fn element_of(entry: &Entry) -> &Entry {
+ entry
+ }
+
+ unsafe fn finalize(entry: &Entry, guard: &Guard) {
+ guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
+ }
+ }
+
+ /// Checks whether the list retains inserted elements
+ /// and returns them in the correct order.
+ #[test]
+ fn insert() {
+ let collector = Collector::new();
+ let handle = collector.register();
+ let guard = handle.pin();
+
+ let l: List<Entry> = List::new();
+
+ let e1 = Owned::new(Entry::default()).into_shared(&guard);
+ let e2 = Owned::new(Entry::default()).into_shared(&guard);
+ let e3 = Owned::new(Entry::default()).into_shared(&guard);
+
+ unsafe {
+ l.insert(e1, &guard);
+ l.insert(e2, &guard);
+ l.insert(e3, &guard);
+ }
+
+ let mut iter = l.iter(&guard);
+ let maybe_e3 = iter.next();
+ assert!(maybe_e3.is_some());
+ assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
+ let maybe_e2 = iter.next();
+ assert!(maybe_e2.is_some());
+ assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw());
+ let maybe_e1 = iter.next();
+ assert!(maybe_e1.is_some());
+ assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
+ assert!(iter.next().is_none());
+
+ unsafe {
+ e1.as_ref().unwrap().delete(&guard);
+ e2.as_ref().unwrap().delete(&guard);
+ e3.as_ref().unwrap().delete(&guard);
+ }
+ }
+
+ /// Checks whether elements can be removed from the list and whether
+ /// the correct elements are removed.
+ #[test]
+ fn delete() {
+ let collector = Collector::new();
+ let handle = collector.register();
+ let guard = handle.pin();
+
+ let l: List<Entry> = List::new();
+
+ let e1 = Owned::new(Entry::default()).into_shared(&guard);
+ let e2 = Owned::new(Entry::default()).into_shared(&guard);
+ let e3 = Owned::new(Entry::default()).into_shared(&guard);
+ unsafe {
+ l.insert(e1, &guard);
+ l.insert(e2, &guard);
+ l.insert(e3, &guard);
+ e2.as_ref().unwrap().delete(&guard);
+ }
+
+ let mut iter = l.iter(&guard);
+ let maybe_e3 = iter.next();
+ assert!(maybe_e3.is_some());
+ assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
+ let maybe_e1 = iter.next();
+ assert!(maybe_e1.is_some());
+ assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
+ assert!(iter.next().is_none());
+
+ unsafe {
+ e1.as_ref().unwrap().delete(&guard);
+ e3.as_ref().unwrap().delete(&guard);
+ }
+
+ let mut iter = l.iter(&guard);
+ assert!(iter.next().is_none());
+ }
+
+ const THREADS: usize = 8;
+ const ITERS: usize = 512;
+
+ /// Contends the list on insert and delete operations to make sure they can run concurrently.
+ #[test]
+ fn insert_delete_multi() {
+ let collector = Collector::new();
+
+ let l: List<Entry> = List::new();
+ let b = Barrier::new(THREADS);
+
+ thread::scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ b.wait();
+
+ let handle = collector.register();
+ let guard: Guard = handle.pin();
+ let mut v = Vec::with_capacity(ITERS);
+
+ for _ in 0..ITERS {
+ let e = Owned::new(Entry::default()).into_shared(&guard);
+ v.push(e);
+ unsafe {
+ l.insert(e, &guard);
+ }
+ }
+
+ for e in v {
+ unsafe {
+ e.as_ref().unwrap().delete(&guard);
+ }
+ }
+ });
+ }
+ })
+ .unwrap();
+
+ let handle = collector.register();
+ let guard = handle.pin();
+
+ let mut iter = l.iter(&guard);
+ assert!(iter.next().is_none());
+ }
+
+ /// Contends the list on iteration to make sure that it can be iterated over concurrently.
+ #[test]
+ fn iter_multi() {
+ let collector = Collector::new();
+
+ let l: List<Entry> = List::new();
+ let b = Barrier::new(THREADS);
+
+ thread::scope(|s| {
+ for _ in 0..THREADS {
+ s.spawn(|_| {
+ b.wait();
+
+ let handle = collector.register();
+ let guard: Guard = handle.pin();
+ let mut v = Vec::with_capacity(ITERS);
+
+ for _ in 0..ITERS {
+ let e = Owned::new(Entry::default()).into_shared(&guard);
+ v.push(e);
+ unsafe {
+ l.insert(e, &guard);
+ }
+ }
+
+ let mut iter = l.iter(&guard);
+ for _ in 0..ITERS {
+ assert!(iter.next().is_some());
+ }
+
+ for e in v {
+ unsafe {
+ e.as_ref().unwrap().delete(&guard);
+ }
+ }
+ });
+ }
+ })
+ .unwrap();
+
+ let handle = collector.register();
+ let guard = handle.pin();
+
+ let mut iter = l.iter(&guard);
+ assert!(iter.next().is_none());
+ }
+}
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/sync/mod.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/mod.rs
new file mode 100644
index 0000000000..f8eb259600
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/mod.rs
@@ -0,0 +1,4 @@
+//! Synchronization primitives.
+
+pub mod list;
+pub mod queue;
diff --git a/third_party/rust/crossbeam-epoch-0.8.0/src/sync/queue.rs b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/queue.rs
new file mode 100644
index 0000000000..6fe0bef37e
--- /dev/null
+++ b/third_party/rust/crossbeam-epoch-0.8.0/src/sync/queue.rs
@@ -0,0 +1,444 @@
+//! Michael-Scott lock-free queue.
+//!
+//! Usable with any number of producers and consumers.
+//!
+//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
+//! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106
+
+use core::mem::{self, ManuallyDrop};
+use core::ptr;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+use crossbeam_utils::CachePadded;
+
+use {unprotected, Atomic, Guard, Owned, Shared};
+
+// The representation here is a singly-linked list, with a sentinel node at the front. In general
+// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or
+// all `Blocked` (requests for data from blocked threads).
+#[derive(Debug)]
+pub struct Queue<T> {
+ head: CachePadded<Atomic<Node<T>>>,
+ tail: CachePadded<Atomic<Node<T>>>,
+}
+
+#[derive(Debug)]
+struct Node<T> {
+ /// The slot in which a value of type `T` can be stored.
+ ///
+ /// The type of `data` is `ManuallyDrop<T>` because a `Node<T>` doesn't always contain a `T`.
+ /// For example, the sentinel node in a queue never contains a value: its slot is always empty.
+ /// Other nodes start their life with a push operation and contain a value until it gets popped
+ /// out. After that such empty nodes get added to the collector for destruction.
+ data: ManuallyDrop<T>,
+
+ next: Atomic<Node<T>>,
+}
+
+// Any particular `T` should never be accessed concurrently, so no need for `Sync`.
+unsafe impl<T: Send> Sync for Queue<T> {}
+unsafe impl<T: Send> Send for Queue<T> {}
+
+impl<T> Queue<T> {
+ /// Create a new, empty queue.
+ pub fn new() -> Queue<T> {
+ let q = Queue {
+ head: CachePadded::new(Atomic::null()),
+ tail: CachePadded::new(Atomic::null()),
+ };
+ // TODO(taiki-e): when the minimum supported Rust version is bumped to 1.36+,
+ // replace this with `mem::MaybeUninit`.
+ #[allow(deprecated)]
+ let sentinel = Owned::new(Node {
+ data: unsafe { mem::uninitialized() },
+ next: Atomic::null(),
+ });
+ unsafe {
+ let guard = &unprotected();
+ let sentinel = sentinel.into_shared(guard);
+ q.head.store(sentinel, Relaxed);
+ q.tail.store(sentinel, Relaxed);
+ q
+ }
+ }
+
+ /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on
+ /// success. The queue's `tail` pointer may be updated.
+ #[inline(always)]
+ fn push_internal(&self, onto: Shared<Node<T>>, new: Shared<Node<T>>, guard: &Guard) -> bool {
+ // is `onto` the actual tail?
+ let o = unsafe { onto.deref() };
+ let next = o.next.load(Acquire, guard);
+ if unsafe { next.as_ref().is_some() } {
+ // if not, try to "help" by moving the tail pointer forward
+ let _ = self.tail.compare_and_set(onto, next, Release, guard);
+ false
+ } else {
+ // looks like the actual tail; attempt to link in `n`
+ let result = o
+ .next
+ .compare_and_set(Shared::null(), new, Release, guard)
+ .is_ok();
+ if result {
+ // try to move the tail pointer forward
+ let _ = self.tail.compare_and_set(onto, new, Release, guard);
+ }
+ result
+ }
+ }
+
+ /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
+ pub fn push(&self, t: T, guard: &Guard) {
+ let new = Owned::new(Node {
+ data: ManuallyDrop::new(t),
+ next: Atomic::null(),
+ });
+ let new = Owned::into_shared(new, guard);
+
+ loop {
+ // We push onto the tail, so we'll start optimistically by looking there first.
+ let tail = self.tail.load(Acquire, guard);
+
+ // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed.
+ if self.push_internal(tail, new, guard) {
+ break;
+ }
+ }
+ }
+
+ /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop.
+ #[inline(always)]
+ fn pop_internal(&self, guard: &Guard) -> Result<Option<T>, ()> {
+ let head = self.head.load(Acquire, guard);
+ let h = unsafe { head.deref() };
+ let next = h.next.load(Acquire, guard);
+ match unsafe { next.as_ref() } {
+ Some(n) => unsafe {
+ self.head
+ .compare_and_set(head, next, Release, guard)
+ .map(|_| {
+ guard.defer_destroy(head);
+ Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
+ })
+ .map_err(|_| ())
+ },
+ None => Ok(None),
+ }
+ }
+
+ /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue
+ /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop.
+ #[inline(always)]
+ fn pop_if_internal<F>(&self, condition: F, guard: &Guard) -> Result<Option<T>, ()>
+ where
+ T: Sync,
+ F: Fn(&T) -> bool,
+ {
+ let head = self.head.load(Acquire, guard);
+ let h = unsafe { head.deref() };
+ let next = h.next.load(Acquire, guard);
+ match unsafe { next.as_ref() } {
+ Some(n) if condition(&n.data) => unsafe {
+ self.head
+ .compare_and_set(head, next, Release, guard)
+ .map(|_| {
+ guard.defer_destroy(head);
+ Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
+ })
+ .map_err(|_| ())
+ },
+ None | Some(_) => Ok(None),
+ }
+ }
+
+ /// Attempts to dequeue from the front.
+ ///
+ /// Returns `None` if the queue is observed to be empty.
+ pub fn try_pop(&self, guard: &Guard) -> Option<T> {
+ loop {
+ if let Ok(head) = self.pop_internal(guard) {
+ return head;
+ }
+ }
+ }
+
+ /// Attempts to dequeue from the front, if the item satisfies the given condition.
+ ///
+ /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given
+ /// condition.
+ pub fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
+ where
+ T: Sync,
+ F: Fn(&T) -> bool,
+ {
+ loop {
+ if let Ok(head) = self.pop_if_internal(&condition, guard) {
+ return head;
+ }
+ }
+ }
+}
+
+impl<T> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let guard = &unprotected();
+
+ while let Some(_) = self.try_pop(guard) {}
+
+ // Destroy the remaining sentinel node.
+ let sentinel = self.head.load(Relaxed, guard);
+ drop(sentinel.into_owned());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crossbeam_utils::thread;
+ use pin;
+
+ struct Queue<T> {
+ queue: super::Queue<T>,
+ }
+
+ impl<T> Queue<T> {
+ pub fn new() -> Queue<T> {
+ Queue {
+ queue: super::Queue::new(),
+ }
+ }
+
+ pub fn push(&self, t: T) {
+ let guard = &pin();
+ self.queue.push(t, guard);
+ }
+
+ pub fn is_empty(&self) -> bool {
+ let guard = &pin();
+ let head = self.queue.head.load(Acquire, guard);
+ let h = unsafe { head.deref() };
+ h.next.load(Acquire, guard).is_null()
+ }
+
+ pub fn try_pop(&self) -> Option<T> {
+ let guard = &pin();
+ self.queue.try_pop(guard)
+ }
+
+ pub fn pop(&self) -> T {
+ loop {
+ match self.try_pop() {
+ None => continue,
+ Some(t) => return t,
+ }
+ }
+ }
+ }
+
+ const CONC_COUNT: i64 = 1000000;
+
+ #[test]
+ fn push_try_pop_1() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ q.push(37);
+ assert!(!q.is_empty());
+ assert_eq!(q.try_pop(), Some(37));
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn push_try_pop_2() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ q.push(37);
+ q.push(48);
+ assert_eq!(q.try_pop(), Some(37));
+ assert!(!q.is_empty());
+ assert_eq!(q.try_pop(), Some(48));
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn push_try_pop_many_seq() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ for i in 0..200 {
+ q.push(i)
+ }
+ assert!(!q.is_empty());
+ for i in 0..200 {
+ assert_eq!(q.try_pop(), Some(i));
+ }
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn push_pop_1() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ q.push(37);
+ assert!(!q.is_empty());
+ assert_eq!(q.pop(), 37);
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn push_pop_2() {
+ let q: Queue<i64> = Queue::new();
+ q.push(37);
+ q.push(48);
+ assert_eq!(q.pop(), 37);
+ assert_eq!(q.pop(), 48);
+ }
+
+ #[test]
+ fn push_pop_many_seq() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ for i in 0..200 {
+ q.push(i)
+ }
+ assert!(!q.is_empty());
+ for i in 0..200 {
+ assert_eq!(q.pop(), i);
+ }
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn push_try_pop_many_spsc() {
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+
+ thread::scope(|scope| {
+ scope.spawn(|_| {
+ let mut next = 0;
+
+ while next < CONC_COUNT {
+ if let Some(elem) = q.try_pop() {
+ assert_eq!(elem, next);
+ next += 1;
+ }
+ }
+ });
+
+ for i in 0..CONC_COUNT {
+ q.push(i)
+ }
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn push_try_pop_many_spmc() {
+ fn recv(_t: i32, q: &Queue<i64>) {
+ let mut cur = -1;
+ for _i in 0..CONC_COUNT {
+ if let Some(elem) = q.try_pop() {
+ assert!(elem > cur);
+ cur = elem;
+
+ if cur == CONC_COUNT - 1 {
+ break;
+ }
+ }
+ }
+ }
+
+ let q: Queue<i64> = Queue::new();
+ assert!(q.is_empty());
+ thread::scope(|scope| {
+ for i in 0..3 {
+ let q = &q;
+ scope.spawn(move |_| recv(i, q));
+ }
+
+ scope.spawn(|_| {
+ for i in 0..CONC_COUNT {
+ q.push(i);
+ }
+ });
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn push_try_pop_many_mpmc() {
+ enum LR {
+ Left(i64),
+ Right(i64),
+ }
+
+ let q: Queue<LR> = Queue::new();
+ assert!(q.is_empty());
+
+ thread::scope(|scope| {
+ for _t in 0..2 {
+ scope.spawn(|_| {
+ for i in CONC_COUNT - 1..CONC_COUNT {
+ q.push(LR::Left(i))
+ }
+ });
+ scope.spawn(|_| {
+ for i in CONC_COUNT - 1..CONC_COUNT {
+ q.push(LR::Right(i))
+ }
+ });
+ scope.spawn(|_| {
+ let mut vl = vec![];
+ let mut vr = vec![];
+ for _i in 0..CONC_COUNT {
+ match q.try_pop() {
+ Some(LR::Left(x)) => vl.push(x),
+ Some(LR::Right(x)) => vr.push(x),
+ _ => {}
+ }
+ }
+
+ let mut vl2 = vl.clone();
+ let mut vr2 = vr.clone();
+ vl2.sort();
+ vr2.sort();
+
+ assert_eq!(vl, vl2);
+ assert_eq!(vr, vr2);
+ });
+ }
+ })
+ .unwrap();
+ }
+
+ #[test]
+ fn push_pop_many_spsc() {
+ let q: Queue<i64> = Queue::new();
+
+ thread::scope(|scope| {
+ scope.spawn(|_| {
+ let mut next = 0;
+ while next < CONC_COUNT {
+ assert_eq!(q.pop(), next);
+ next += 1;
+ }
+ });
+
+ for i in 0..CONC_COUNT {
+ q.push(i)
+ }
+ })
+ .unwrap();
+ assert!(q.is_empty());
+ }
+
+ #[test]
+ fn is_empty_dont_pop() {
+ let q: Queue<i64> = Queue::new();
+ q.push(20);
+ q.push(20);
+ assert!(!q.is_empty());
+ assert!(!q.is_empty());
+ assert!(q.try_pop().is_some());
+ }
+}