diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 19:33:14 +0000 |
commit | 36d22d82aa202bb199967e9512281e9a53db42c9 (patch) | |
tree | 105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/crossbeam-epoch | |
parent | Initial commit. (diff) | |
download | firefox-esr-upstream.tar.xz firefox-esr-upstream.zip |
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/crossbeam-epoch')
27 files changed, 6425 insertions, 0 deletions
diff --git a/third_party/rust/crossbeam-epoch/.cargo-checksum.json b/third_party/rust/crossbeam-epoch/.cargo-checksum.json new file mode 100644 index 0000000000..fd1a1f9e76 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"41aadd10f28a79ac9a8f8960ed1d9b01b2c637a9c5d16c44632794ea154b195b","Cargo.lock":"6347500366837bb0270a958edbeaf955d148ffe9eaad4a4903cbb078e3d50852","Cargo.toml":"e829a9a24e8c67f588cc33360c98ecee410bb33204eab021d5a29e1c9370c342","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"d67d0cf57751a019707dd95785345ee181a10ea80237789bc3c19bf28c0d45ca","benches/defer.rs":"c330b704d96b2ad1aed29f72c37a99da534adef8cb06a3976d5f93bf567abb20","benches/flush.rs":"0389ac6c473632f0e93c962f223404cc360257f6699b4ec90b9b3be16bb6d74f","benches/pin.rs":"2f649a5153745c7930efdb32a52f9dc522f7b8cf548a251c5e2c82ee25dc3fff","build-common.rs":"502cb7494549bed6fa10ac7bea36e880eeb60290dc69b679ac5c92b376469562","build.rs":"901be3c21843440be5c456ff049f57f72ee5ec365918a772ad2a4751e52f69c5","examples/sanitize.rs":"a39d1635fa61e643e59192d7a63becc97ff81f03c1f4e03d38cedefb1525026a","no_atomic.rs":"c291992d273abba165579ada7873c34ce4f3674363a7c1ec092be856beac0355","src/atomic.rs":"1bd4275c1411852024533e8a70959dfedf72029e3253544d1fbb0cc18b6fd519","src/collector.rs":"29e5911f61510247659b0090517bd1a38d11e1ed86e35811603cb599962d9a58","src/default.rs":"62edf5e1f934eb82d8d7f010d6f25366e6851145a6f0a162372202bb63da1f3a","src/deferred.rs":"0c87df5797212778edd3c2d5fcf0cc04e8b9ed100261ecf9522f74a90804a3d5","src/epoch.rs":"d31e66d8fe62299928e25867336d96391b26a4fe890a1cae0885dfcf36d6835b","src/guard.rs":"f4439909152d38c03b6dfb6eeba6c9f07c39962187d461c92a492c27c258670b","src/internal.rs":"ac40ce276f0ed3dfd561926b78f775592eabb90790e177edde41fe50c13b8256","src/lib.rs":"a036d73230d0574011e67be11d275fe46439a1b5fc3295cb242d9179e5e0a220","src/sync/list.rs":"10aa4c59845ab9ff1d8bcb6f594b70bbe23c320fa7a2b125fdf85df88b9d61e2","src/sync/mod.rs":"326e32489d467e974c441120640a8338aa55da55c24b20276075ce9053997326","src/sync/once_lock.rs":"c03dc9c05a817e087dccf8b682f7307501542805533551da3c2bab442bc40743","src/sync/queue.rs":"06173b2255677d0d39178ceb49876fda2878f491e907c595eb65643dbb43c9ba","tests/loom.rs":"db772f4478966de6ec98774ca4093171dc942da635822a0d2d3257d31188cb9b"},"package":"46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"}
\ No newline at end of file diff --git a/third_party/rust/crossbeam-epoch/CHANGELOG.md b/third_party/rust/crossbeam-epoch/CHANGELOG.md new file mode 100644 index 0000000000..f7508aea77 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/CHANGELOG.md @@ -0,0 +1,183 @@ +# Version 0.9.14 + +- Update `memoffset` to 0.8. (#955) + +# Version 0.9.13 + +- Fix build script bug introduced in 0.9.12. (#932) + +# Version 0.9.12 + +**Note:** This release has been yanked due to regression fixed in 0.9.13. + +- Update `memoffset` to 0.7. (#926) +- Improve support for custom targets. (#922) + +# Version 0.9.11 + +- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913) +- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913) + +# Version 0.9.10 + +- Bump the minimum supported Rust version to 1.38. (#877) +- Mitigate the risk of segmentation faults in buggy downstream implementations. (#879) +- Add `{Atomic, Shared}::try_into_owned` (#701) + +# Version 0.9.9 + +- Replace lazy_static with once_cell. (#817) + +# Version 0.9.8 + +- Make `Atomic::null()` const function at 1.61+. (#797) + +# Version 0.9.7 + +- Fix Miri error when `-Zmiri-check-number-validity` is enabled. (#779) + +# Version 0.9.6 + +- Add `Atomic::fetch_update`. (#706) + +# Version 0.9.5 + +- Fix UB in `Pointable` impl of `[MaybeUninit<T>]`. (#694) +- Support targets that do not have atomic CAS on stable Rust. (#698) +- Fix breakage with nightly feature due to rust-lang/rust#84510. (#692) + +# Version 0.9.4 + +**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. + +- Fix UB in `<[MaybeUninit<T>] as Pointable>::init` when global allocator failed allocation. (#690) +- Bump `loom` dependency to version 0.5. (#686) + +# Version 0.9.3 + +**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. + +- Make `loom` dependency optional. (#666) + +# Version 0.9.2 + +**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. + +- Add `Atomic::compare_exchange` and `Atomic::compare_exchange_weak`. (#628) +- Deprecate `Atomic::compare_and_set` and `Atomic::compare_and_set_weak`. Use `Atomic::compare_exchange` or `Atomic::compare_exchange_weak` instead. (#628) +- Make `const_fn` dependency optional. (#611) +- Add unstable support for `loom`. (#487) + +# Version 0.9.1 + +**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. + +- Bump `memoffset` dependency to version 0.6. (#592) + +# Version 0.9.0 + +**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. + +- Bump the minimum supported Rust version to 1.36. +- Support dynamically sized types. + +# Version 0.8.2 + +- Fix bug in release (yanking 0.8.1) + +# Version 0.8.1 + +- Bump `autocfg` dependency to version 1.0. (#460) +- Reduce stall in list iteration. (#376) +- Stop stealing from the same deque. (#448) +- Fix unsoundness issues by adopting `MaybeUninit`. (#458) +- Fix use-after-free in lock-free queue. (#466) + +# Version 0.8.0 + +- Bump the minimum required version to 1.28. +- Fix breakage with nightly feature due to rust-lang/rust#65214. +- Make `Atomic::null()` const function at 1.31+. +- Bump `crossbeam-utils` to `0.7`. + +# Version 0.7.2 + +- Add `Atomic::into_owned()`. +- Update `memoffset` dependency. + +# Version 0.7.1 + +- Add `Shared::deref_mut()`. +- Add a Treiber stack to examples. + +# Version 0.7.0 + +- Remove `Guard::clone()`. +- Bump dependencies. + +# Version 0.6.1 + +- Update `crossbeam-utils` to `0.6`. + +# Version 0.6.0 + +- `defer` now requires `F: Send + 'static`. +- Bump the minimum Rust version to 1.26. +- Pinning while TLS is tearing down does not fail anymore. +- Rename `Handle` to `LocalHandle`. +- Add `defer_unchecked` and `defer_destroy`. +- Remove `Clone` impl for `LocalHandle`. + +# Version 0.5.2 + +- Update `crossbeam-utils` to `0.5`. + +# Version 0.5.1 + +- Fix compatibility with the latest Rust nightly. + +# Version 0.5.0 + +- Update `crossbeam-utils` to `0.4`. +- Specify the minimum Rust version to `1.25.0`. + +# Version 0.4.3 + +- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change. + +# Version 0.4.2 + +- Expose the `Pointer` trait. +- Warn missing docs and missing debug impls. +- Update `crossbeam-utils` to `0.4`. + +# Version 0.4.1 + +- Add `Debug` impls for `Collector`, `Handle`, and `Guard`. +- Add `load_consume` to `Atomic`. +- Rename `Collector::handle` to `Collector::register`. +- Remove the `Send` implementation for `Handle` (this was a bug). Only + `Collector`s can be shared among multiple threads, while `Handle`s and + `Guard`s must stay within the thread in which they were created. + +# Version 0.4.0 + +- Update dependencies. +- Remove support for Rust 1.13. + +# Version 0.3.0 + +- Add support for Rust 1.13. +- Improve documentation for CAS. + +# Version 0.2.0 + +- Add method `Owned::into_box`. +- Fix a use-after-free bug in `Local::finalize`. +- Fix an ordering bug in `Global::push_bag`. +- Fix a bug in calculating distance between epochs. +- Remove `impl<T> Into<Box<T>> for Owned<T>`. + +# Version 0.1.0 + +- First version of the new epoch-based GC. diff --git a/third_party/rust/crossbeam-epoch/Cargo.lock b/third_party/rust/crossbeam-epoch/Cargo.lock new file mode 100644 index 0000000000..dab6b89c82 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/Cargo.lock @@ -0,0 +1,449 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "cc" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "crossbeam-epoch" +version = "0.9.14" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "loom", + "memoffset", + "rand", + "rustversion", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +dependencies = [ + "cfg-if", + "loom", +] + +[[package]] +name = "generator" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33a20a288a94683f5f4da0adecdbe095c94a77c295e514cc6484e9394dd8376e" +dependencies = [ + "cc", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.139" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "loom" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff50ecb28bb86013e935fb6683ab1f6d3a20016f123c76fd4c27470076ac30f5" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "once_cell" +version = "1.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "regex" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "rustversion" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "unicode-ident" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e745dab35a0c4c77aa3ce42d595e13d2003d6902d6b08c9ef5fc326d08da12b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" diff --git a/third_party/rust/crossbeam-epoch/Cargo.toml b/third_party/rust/crossbeam-epoch/Cargo.toml new file mode 100644 index 0000000000..0ba798595b --- /dev/null +++ b/third_party/rust/crossbeam-epoch/Cargo.toml @@ -0,0 +1,73 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.38" +name = "crossbeam-epoch" +version = "0.9.14" +description = "Epoch-based garbage collection" +homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch" +readme = "README.md" +keywords = [ + "lock-free", + "rcu", + "atomic", + "garbage", +] +categories = [ + "concurrency", + "memory-management", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/crossbeam-rs/crossbeam" + +[dependencies.cfg-if] +version = "1" + +[dependencies.crossbeam-utils] +version = "0.8.5" +default-features = false + +[dependencies.memoffset] +version = "0.8" + +[dependencies.scopeguard] +version = "1.1" +default-features = false + +[dev-dependencies.rand] +version = "0.8" + +[dev-dependencies.rustversion] +version = "1" + +[build-dependencies.autocfg] +version = "1" + +[features] +alloc = [] +default = ["std"] +loom = [ + "loom-crate", + "crossbeam-utils/loom", +] +nightly = ["crossbeam-utils/nightly"] +std = [ + "alloc", + "crossbeam-utils/std", +] + +[target."cfg(crossbeam_loom)".dependencies.loom-crate] +version = "0.5" +optional = true +package = "loom" diff --git a/third_party/rust/crossbeam-epoch/LICENSE-APACHE b/third_party/rust/crossbeam-epoch/LICENSE-APACHE new file mode 100644 index 0000000000..16fe87b06e --- /dev/null +++ b/third_party/rust/crossbeam-epoch/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/crossbeam-epoch/LICENSE-MIT b/third_party/rust/crossbeam-epoch/LICENSE-MIT new file mode 100644 index 0000000000..068d491fd5 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/LICENSE-MIT @@ -0,0 +1,27 @@ +The MIT License (MIT) + +Copyright (c) 2019 The Crossbeam Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/crossbeam-epoch/README.md b/third_party/rust/crossbeam-epoch/README.md new file mode 100644 index 0000000000..2840ea7925 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/README.md @@ -0,0 +1,53 @@ +# Crossbeam Epoch + +[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( +https://github.com/crossbeam-rs/crossbeam/actions) +[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( +https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch#license) +[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)]( +https://crates.io/crates/crossbeam-epoch) +[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)]( +https://docs.rs/crossbeam-epoch) +[![Rust 1.38+](https://img.shields.io/badge/rust-1.38+-lightgray.svg)]( +https://www.rust-lang.org) +[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) + +This crate provides epoch-based garbage collection for building concurrent data structures. + +When a thread removes an object from a concurrent data structure, other threads +may be still using pointers to it at the same time, so it cannot be destroyed +immediately. Epoch-based GC is an efficient mechanism for deferring destruction of +shared objects until no pointers to them can exist. + +Everything in this crate except the global GC can be used in `no_std` environments, provided that +`alloc` feature is enabled. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +crossbeam-epoch = "0.9" +``` + +## Compatibility + +Crossbeam Epoch supports stable Rust releases going back at least six months, +and every time the minimum supported Rust version is increased, a new minor +version is released. Currently, the minimum supported Rust version is 1.38. + +## License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +#### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/crossbeam-epoch/benches/defer.rs b/third_party/rust/crossbeam-epoch/benches/defer.rs new file mode 100644 index 0000000000..246f907988 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/benches/defer.rs @@ -0,0 +1,69 @@ +#![feature(test)] + +extern crate test; + +use crossbeam_epoch::{self as epoch, Owned}; +use crossbeam_utils::thread::scope; +use test::Bencher; + +#[bench] +fn single_alloc_defer_free(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer_destroy(p); + } + }); +} + +#[bench] +fn single_defer(b: &mut Bencher) { + b.iter(|| { + let guard = &epoch::pin(); + guard.defer(move || ()); + }); +} + +#[bench] +fn multi_alloc_defer_free(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + let p = Owned::new(1).into_shared(guard); + unsafe { + guard.defer_destroy(p); + } + } + }); + } + }) + .unwrap(); + }); +} + +#[bench] +fn multi_defer(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + guard.defer(move || ()); + } + }); + } + }) + .unwrap(); + }); +} diff --git a/third_party/rust/crossbeam-epoch/benches/flush.rs b/third_party/rust/crossbeam-epoch/benches/flush.rs new file mode 100644 index 0000000000..99aab19e1e --- /dev/null +++ b/third_party/rust/crossbeam-epoch/benches/flush.rs @@ -0,0 +1,52 @@ +#![feature(test)] + +extern crate test; + +use std::sync::Barrier; + +use crossbeam_epoch as epoch; +use crossbeam_utils::thread::scope; +use test::Bencher; + +#[bench] +fn single_flush(b: &mut Bencher) { + const THREADS: usize = 16; + + let start = Barrier::new(THREADS + 1); + let end = Barrier::new(THREADS + 1); + + scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + epoch::pin(); + start.wait(); + end.wait(); + }); + } + + start.wait(); + b.iter(|| epoch::pin().flush()); + end.wait(); + }) + .unwrap(); +} + +#[bench] +fn multi_flush(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 10_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + for _ in 0..STEPS { + let guard = &epoch::pin(); + guard.flush(); + } + }); + } + }) + .unwrap(); + }); +} diff --git a/third_party/rust/crossbeam-epoch/benches/pin.rs b/third_party/rust/crossbeam-epoch/benches/pin.rs new file mode 100644 index 0000000000..8bf87e9b73 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/benches/pin.rs @@ -0,0 +1,31 @@ +#![feature(test)] + +extern crate test; + +use crossbeam_epoch as epoch; +use crossbeam_utils::thread::scope; +use test::Bencher; + +#[bench] +fn single_pin(b: &mut Bencher) { + b.iter(epoch::pin); +} + +#[bench] +fn multi_pin(b: &mut Bencher) { + const THREADS: usize = 16; + const STEPS: usize = 100_000; + + b.iter(|| { + scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + for _ in 0..STEPS { + epoch::pin(); + } + }); + } + }) + .unwrap(); + }); +} diff --git a/third_party/rust/crossbeam-epoch/build-common.rs b/third_party/rust/crossbeam-epoch/build-common.rs new file mode 100644 index 0000000000..e91bb4d471 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/build-common.rs @@ -0,0 +1,13 @@ +// The target triplets have the form of 'arch-vendor-system'. +// +// When building for Linux (e.g. the 'system' part is +// 'linux-something'), replace the vendor with 'unknown' +// so that mapping to rust standard targets happens correctly. +fn convert_custom_linux_target(target: String) -> String { + let mut parts: Vec<&str> = target.split('-').collect(); + let system = parts.get(2); + if system == Some(&"linux") { + parts[1] = "unknown"; + }; + parts.join("-") +} diff --git a/third_party/rust/crossbeam-epoch/build.rs b/third_party/rust/crossbeam-epoch/build.rs new file mode 100644 index 0000000000..978141aa52 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/build.rs @@ -0,0 +1,57 @@ +// The rustc-cfg listed below are considered public API, but it is *unstable* +// and outside of the normal semver guarantees: +// +// - `crossbeam_no_atomic_cas` +// Assume the target does *not* support atomic CAS operations. +// This is usually detected automatically by the build script, but you may +// need to enable it manually when building for custom targets or using +// non-cargo build systems that don't run the build script. +// +// With the exceptions mentioned above, the rustc-cfg emitted by the build +// script are *not* public API. + +#![warn(rust_2018_idioms)] + +use std::env; + +include!("no_atomic.rs"); +include!("build-common.rs"); + +fn main() { + let target = match env::var("TARGET") { + Ok(target) => convert_custom_linux_target(target), + Err(e) => { + println!( + "cargo:warning={}: unable to get TARGET environment variable: {}", + env!("CARGO_PKG_NAME"), + e + ); + return; + } + }; + + let cfg = match autocfg::AutoCfg::new() { + Ok(cfg) => cfg, + Err(e) => { + println!( + "cargo:warning={}: unable to determine rustc version: {}", + env!("CARGO_PKG_NAME"), + e + ); + return; + } + }; + + // Note that this is `no_`*, not `has_*`. This allows treating as the latest + // stable rustc is used when the build script doesn't run. This is useful + // for non-cargo build systems that don't run the build script. + if NO_ATOMIC_CAS.contains(&&*target) { + println!("cargo:rustc-cfg=crossbeam_no_atomic_cas"); + } + + if !cfg.probe_rustc_version(1, 61) { + println!("cargo:rustc-cfg=crossbeam_no_const_fn_trait_bound"); + } + + println!("cargo:rerun-if-changed=no_atomic.rs"); +} diff --git a/third_party/rust/crossbeam-epoch/examples/sanitize.rs b/third_party/rust/crossbeam-epoch/examples/sanitize.rs new file mode 100644 index 0000000000..4109c34a8c --- /dev/null +++ b/third_party/rust/crossbeam-epoch/examples/sanitize.rs @@ -0,0 +1,66 @@ +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; +use std::sync::Arc; +use std::thread; +use std::time::{Duration, Instant}; + +use crossbeam_epoch::{self as epoch, Atomic, Collector, LocalHandle, Owned, Shared}; +use rand::Rng; + +fn worker(a: Arc<Atomic<AtomicUsize>>, handle: LocalHandle) -> usize { + let mut rng = rand::thread_rng(); + let mut sum = 0; + + if rng.gen() { + thread::sleep(Duration::from_millis(1)); + } + let timeout = Duration::from_millis(rng.gen_range(0..10)); + let now = Instant::now(); + + while now.elapsed() < timeout { + for _ in 0..100 { + let guard = &handle.pin(); + guard.flush(); + + let val = if rng.gen() { + let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard); + unsafe { + guard.defer_destroy(p); + guard.flush(); + p.deref().load(Relaxed) + } + } else { + let p = a.load(Acquire, guard); + unsafe { p.deref().fetch_add(sum, Relaxed) } + }; + + sum = sum.wrapping_add(val); + } + } + + sum +} + +fn main() { + for _ in 0..100 { + let collector = Collector::new(); + let a = Arc::new(Atomic::new(AtomicUsize::new(777))); + + let threads = (0..16) + .map(|_| { + let a = a.clone(); + let c = collector.clone(); + thread::spawn(move || worker(a, c.register())) + }) + .collect::<Vec<_>>(); + + for t in threads { + t.join().unwrap(); + } + + unsafe { + a.swap(Shared::null(), AcqRel, epoch::unprotected()) + .into_owned(); + } + } +} diff --git a/third_party/rust/crossbeam-epoch/no_atomic.rs b/third_party/rust/crossbeam-epoch/no_atomic.rs new file mode 100644 index 0000000000..beb11b0c29 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/no_atomic.rs @@ -0,0 +1,85 @@ +// This file is @generated by no_atomic.sh. +// It is not intended for manual editing. + +const NO_ATOMIC_CAS: &[&str] = &[ + "armv4t-none-eabi", + "armv5te-none-eabi", + "avr-unknown-gnu-atmega328", + "bpfeb-unknown-none", + "bpfel-unknown-none", + "msp430-none-elf", + "riscv32i-unknown-none-elf", + "riscv32im-unknown-none-elf", + "riscv32imc-unknown-none-elf", + "thumbv4t-none-eabi", + "thumbv5te-none-eabi", + "thumbv6m-none-eabi", +]; + +#[allow(dead_code)] // Only crossbeam-utils uses this. +const NO_ATOMIC_64: &[&str] = &[ + "arm-linux-androideabi", + "armebv7r-none-eabi", + "armebv7r-none-eabihf", + "armv4t-none-eabi", + "armv4t-unknown-linux-gnueabi", + "armv5te-none-eabi", + "armv5te-unknown-linux-gnueabi", + "armv5te-unknown-linux-musleabi", + "armv5te-unknown-linux-uclibceabi", + "armv6k-nintendo-3ds", + "armv7-sony-vita-newlibeabihf", + "armv7r-none-eabi", + "armv7r-none-eabihf", + "avr-unknown-gnu-atmega328", + "hexagon-unknown-linux-musl", + "m68k-unknown-linux-gnu", + "mips-unknown-linux-gnu", + "mips-unknown-linux-musl", + "mips-unknown-linux-uclibc", + "mipsel-sony-psp", + "mipsel-sony-psx", + "mipsel-unknown-linux-gnu", + "mipsel-unknown-linux-musl", + "mipsel-unknown-linux-uclibc", + "mipsel-unknown-none", + "mipsisa32r6-unknown-linux-gnu", + "mipsisa32r6el-unknown-linux-gnu", + "msp430-none-elf", + "powerpc-unknown-freebsd", + "powerpc-unknown-linux-gnu", + "powerpc-unknown-linux-gnuspe", + "powerpc-unknown-linux-musl", + "powerpc-unknown-netbsd", + "powerpc-unknown-openbsd", + "powerpc-wrs-vxworks", + "powerpc-wrs-vxworks-spe", + "riscv32gc-unknown-linux-gnu", + "riscv32gc-unknown-linux-musl", + "riscv32i-unknown-none-elf", + "riscv32im-unknown-none-elf", + "riscv32imac-unknown-none-elf", + "riscv32imac-unknown-xous-elf", + "riscv32imc-unknown-none-elf", + "thumbv4t-none-eabi", + "thumbv5te-none-eabi", + "thumbv6m-none-eabi", + "thumbv7em-none-eabi", + "thumbv7em-none-eabihf", + "thumbv7m-none-eabi", + "thumbv8m.base-none-eabi", + "thumbv8m.main-none-eabi", + "thumbv8m.main-none-eabihf", +]; + +#[allow(dead_code)] // Only crossbeam-utils uses this. +const NO_ATOMIC: &[&str] = &[ + "avr-unknown-gnu-atmega328", + "bpfeb-unknown-none", + "bpfel-unknown-none", + "mipsel-sony-psx", + "msp430-none-elf", + "riscv32i-unknown-none-elf", + "riscv32im-unknown-none-elf", + "riscv32imc-unknown-none-elf", +]; diff --git a/third_party/rust/crossbeam-epoch/src/atomic.rs b/third_party/rust/crossbeam-epoch/src/atomic.rs new file mode 100644 index 0000000000..19bab47296 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/atomic.rs @@ -0,0 +1,1718 @@ +use core::borrow::{Borrow, BorrowMut}; +use core::cmp; +use core::fmt; +use core::marker::PhantomData; +use core::mem::{self, MaybeUninit}; +use core::ops::{Deref, DerefMut}; +use core::slice; +use core::sync::atomic::Ordering; + +use crate::alloc::alloc; +use crate::alloc::boxed::Box; +use crate::guard::Guard; +use crate::primitive::sync::atomic::AtomicUsize; +use crossbeam_utils::atomic::AtomicConsume; + +/// Given ordering for the success case in a compare-exchange operation, returns the strongest +/// appropriate ordering for the failure case. +#[inline] +fn strongest_failure_ordering(ord: Ordering) -> Ordering { + use self::Ordering::*; + match ord { + Relaxed | Release => Relaxed, + Acquire | AcqRel => Acquire, + _ => SeqCst, + } +} + +/// The error returned on failed compare-and-set operation. +// TODO: remove in the next major version. +#[deprecated(note = "Use `CompareExchangeError` instead")] +pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>; + +/// The error returned on failed compare-and-swap operation. +pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> { + /// The value in the atomic pointer at the time of the failed operation. + pub current: Shared<'g, T>, + + /// The new value, which the operation failed to store. + pub new: P, +} + +impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CompareExchangeError") + .field("current", &self.current) + .field("new", &self.new) + .finish() + } +} + +/// Memory orderings for compare-and-set operations. +/// +/// A compare-and-set operation can have different memory orderings depending on whether it +/// succeeds or fails. This trait generalizes different ways of specifying memory orderings. +/// +/// The two ways of specifying orderings for compare-and-set are: +/// +/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate +/// ordering is chosen. +/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is +/// for the failure case. +// TODO: remove in the next major version. +#[deprecated( + note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \ + use `compare_exchange` or `compare_exchange_weak instead`" +)] +pub trait CompareAndSetOrdering { + /// The ordering of the operation when it succeeds. + fn success(&self) -> Ordering; + + /// The ordering of the operation when it fails. + /// + /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than + /// the success ordering. + fn failure(&self) -> Ordering; +} + +#[allow(deprecated)] +impl CompareAndSetOrdering for Ordering { + #[inline] + fn success(&self) -> Ordering { + *self + } + + #[inline] + fn failure(&self) -> Ordering { + strongest_failure_ordering(*self) + } +} + +#[allow(deprecated)] +impl CompareAndSetOrdering for (Ordering, Ordering) { + #[inline] + fn success(&self) -> Ordering { + self.0 + } + + #[inline] + fn failure(&self) -> Ordering { + self.1 + } +} + +/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. +#[inline] +fn low_bits<T: ?Sized + Pointable>() -> usize { + (1 << T::ALIGN.trailing_zeros()) - 1 +} + +/// Panics if the pointer is not properly unaligned. +#[inline] +fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) { + assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer"); +} + +/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. +/// +/// `tag` is truncated to fit into the unused bits of the pointer to `T`. +#[inline] +fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize { + (data & !low_bits::<T>()) | (tag & low_bits::<T>()) +} + +/// Decomposes a tagged pointer `data` into the pointer and the tag. +#[inline] +fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) { + (data & !low_bits::<T>(), data & low_bits::<T>()) +} + +/// Types that are pointed to by a single word. +/// +/// In concurrent programming, it is necessary to represent an object within a word because atomic +/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait +/// qualifies such types that are pointed to by a single word. +/// +/// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is +/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for +/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array +/// size and elements. +/// +/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In +/// particular, Crossbeam supports dynamically sized slices as follows. +/// +/// ``` +/// use std::mem::MaybeUninit; +/// use crossbeam_epoch::Owned; +/// +/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10] +/// ``` +pub trait Pointable { + /// The alignment of pointer. + const ALIGN: usize; + + /// The type for initializers. + type Init; + + /// Initializes a with the given initializer. + /// + /// # Safety + /// + /// The result should be a multiple of `ALIGN`. + unsafe fn init(init: Self::Init) -> usize; + + /// Dereferences the given pointer. + /// + /// # Safety + /// + /// - The given `ptr` should have been initialized with [`Pointable::init`]. + /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. + /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently. + unsafe fn deref<'a>(ptr: usize) -> &'a Self; + + /// Mutably dereferences the given pointer. + /// + /// # Safety + /// + /// - The given `ptr` should have been initialized with [`Pointable::init`]. + /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. + /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] + /// concurrently. + unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self; + + /// Drops the object pointed to by the given pointer. + /// + /// # Safety + /// + /// - The given `ptr` should have been initialized with [`Pointable::init`]. + /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. + /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] + /// concurrently. + unsafe fn drop(ptr: usize); +} + +impl<T> Pointable for T { + const ALIGN: usize = mem::align_of::<T>(); + + type Init = T; + + unsafe fn init(init: Self::Init) -> usize { + Box::into_raw(Box::new(init)) as usize + } + + unsafe fn deref<'a>(ptr: usize) -> &'a Self { + &*(ptr as *const T) + } + + unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { + &mut *(ptr as *mut T) + } + + unsafe fn drop(ptr: usize) { + drop(Box::from_raw(ptr as *mut T)); + } +} + +/// Array with size. +/// +/// # Memory layout +/// +/// An array consisting of size and elements: +/// +/// ```text +/// elements +/// | +/// | +/// ------------------------------------ +/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 | +/// ------------------------------------ +/// ``` +/// +/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not +/// along with pointer as in `Box<[T]>`). +/// +/// Elements are not present in the type, but they will be in the allocation. +/// ``` +/// +// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use +// [`alloc::alloc::Layout::extend`] instead. +#[repr(C)] +struct Array<T> { + /// The number of elements (not the number of bytes). + len: usize, + elements: [MaybeUninit<T>; 0], +} + +impl<T> Pointable for [MaybeUninit<T>] { + const ALIGN: usize = mem::align_of::<Array<T>>(); + + type Init = usize; + + unsafe fn init(len: Self::Init) -> usize { + let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len; + let align = mem::align_of::<Array<T>>(); + let layout = alloc::Layout::from_size_align(size, align).unwrap(); + let ptr = alloc::alloc(layout).cast::<Array<T>>(); + if ptr.is_null() { + alloc::handle_alloc_error(layout); + } + (*ptr).len = len; + ptr as usize + } + + unsafe fn deref<'a>(ptr: usize) -> &'a Self { + let array = &*(ptr as *const Array<T>); + slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len) + } + + unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { + let array = &*(ptr as *mut Array<T>); + slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len) + } + + unsafe fn drop(ptr: usize) { + let array = &*(ptr as *mut Array<T>); + let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.len; + let align = mem::align_of::<Array<T>>(); + let layout = alloc::Layout::from_size_align(size, align).unwrap(); + alloc::dealloc(ptr as *mut u8, layout); + } +} + +/// An atomic pointer that can be safely shared between threads. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. For example, the tag for a pointer to a sized type `T` +/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`. +/// +/// Any method that loads the pointer must be passed a reference to a [`Guard`]. +/// +/// Crossbeam supports dynamically sized types. See [`Pointable`] for details. +pub struct Atomic<T: ?Sized + Pointable> { + data: AtomicUsize, + _marker: PhantomData<*mut T>, +} + +unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {} +unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {} + +impl<T> Atomic<T> { + /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::new(1234); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn new(init: T) -> Atomic<T> { + Self::init(init) + } +} + +impl<T: ?Sized + Pointable> Atomic<T> { + /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::<i32>::init(1234); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn init(init: T::Init) -> Atomic<T> { + Self::from(Owned::init(init)) + } + + /// Returns a new atomic pointer pointing to the tagged pointer `data`. + fn from_usize(data: usize) -> Self { + Self { + data: AtomicUsize::new(data), + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::<i32>::null(); + /// ``` + #[cfg(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom)))] + pub const fn null() -> Atomic<T> { + Self { + data: AtomicUsize::new(0), + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + #[cfg(not(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom))))] + pub fn null() -> Atomic<T> { + Self { + data: AtomicUsize::new(0), + _marker: PhantomData, + } + } + + /// Loads a `Shared` from the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.load(ord)) } + } + + /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. + /// + /// This is similar to the "acquire" ordering, except that an ordering is + /// only guaranteed with operations that "depend on" the result of the load. + /// However consume loads are usually much faster than acquire loads on + /// architectures with a weak memory model since they don't require memory + /// fence instructions. + /// + /// The exact definition of "depend on" is a bit vague, but it works as you + /// would expect in practice since a lot of software, especially the Linux + /// kernel, rely on this behavior. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load_consume(guard); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.load_consume()) } + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak + /// a.store(Shared::null(), SeqCst); + /// a.store(Owned::new(1234), SeqCst); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) { + self.data.store(new.into_usize(), ord); + } + + /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous + /// `Shared`. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.swap(Shared::null(), SeqCst, guard); + /// # unsafe { drop(p.into_owned()); } // avoid leak + /// ``` + pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// The return value is a result indicating whether the new pointer was written. On success the + /// pointer that was written is returned. On failure the actual current value and `new` are + /// returned. + /// + /// This method takes two `Ordering` arguments to describe the memory + /// ordering of this operation. `success` describes the required ordering for the + /// read-modify-write operation that takes place if the comparison with `current` succeeds. + /// `failure` describes the required ordering for the load operation that takes place when + /// the comparison fails. Using `Acquire` as success ordering makes the store part + /// of this operation `Relaxed`, and using `Release` makes the successful load + /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` + /// and must be equivalent to or weaker than the success ordering. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// + /// let guard = &epoch::pin(); + /// let curr = a.load(SeqCst, guard); + /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard); + /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard); + /// # unsafe { drop(curr.into_owned()); } // avoid leak + /// ``` + pub fn compare_exchange<'g, P>( + &self, + current: Shared<'_, T>, + new: P, + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> + where + P: Pointer<T>, + { + let new = new.into_usize(); + self.data + .compare_exchange(current.into_usize(), new, success, failure) + .map(|_| unsafe { Shared::from_usize(new) }) + .map_err(|current| unsafe { + CompareExchangeError { + current: Shared::from_usize(current), + new: P::from_usize(new), + } + }) + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison + /// succeeds, which can result in more efficient code on some platforms. The return value is a + /// result indicating whether the new pointer was written. On success the pointer that was + /// written is returned. On failure the actual current value and `new` are returned. + /// + /// This method takes two `Ordering` arguments to describe the memory + /// ordering of this operation. `success` describes the required ordering for the + /// read-modify-write operation that takes place if the comparison with `current` succeeds. + /// `failure` describes the required ordering for the load operation that takes place when + /// the comparison fails. Using `Acquire` as success ordering makes the store part + /// of this operation `Relaxed`, and using `Release` makes the successful load + /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` + /// and must be equivalent to or weaker than the success ordering. + /// + /// [`compare_exchange`]: Atomic::compare_exchange + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// + /// let mut new = Owned::new(5678); + /// let mut ptr = a.load(SeqCst, guard); + /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak + /// loop { + /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) { + /// Ok(p) => { + /// ptr = p; + /// break; + /// } + /// Err(err) => { + /// ptr = err.current; + /// new = err.new; + /// } + /// } + /// } + /// + /// let mut curr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) { + /// Ok(_) => break, + /// Err(err) => curr = err.current, + /// } + /// } + /// # unsafe { drop(curr.into_owned()); } // avoid leak + /// ``` + pub fn compare_exchange_weak<'g, P>( + &self, + current: Shared<'_, T>, + new: P, + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> + where + P: Pointer<T>, + { + let new = new.into_usize(); + self.data + .compare_exchange_weak(current.into_usize(), new, success, failure) + .map(|_| unsafe { Shared::from_usize(new) }) + .map_err(|current| unsafe { + CompareExchangeError { + current: Shared::from_usize(current), + new: P::from_usize(new), + } + }) + } + + /// Fetches the pointer, and then applies a function to it that returns a new value. + /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`. + /// + /// Note that the given function may be called multiple times if the value has been changed by + /// other threads in the meantime, as long as the function returns `Some(_)`, but the function + /// will have been applied only once to the stored value. + /// + /// `fetch_update` takes two [`Ordering`] arguments to describe the memory + /// ordering of this operation. The first describes the required ordering for + /// when the operation finally succeeds while the second describes the + /// required ordering for loads. These correspond to the success and failure + /// orderings of [`Atomic::compare_exchange`] respectively. + /// + /// Using [`Acquire`] as success ordering makes the store part of this + /// operation [`Relaxed`], and using [`Release`] makes the final successful + /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], + /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the + /// success ordering. + /// + /// [`Relaxed`]: Ordering::Relaxed + /// [`Acquire`]: Ordering::Acquire + /// [`Release`]: Ordering::Release + /// [`SeqCst`]: Ordering::SeqCst + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// + /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1))); + /// assert!(res1.is_ok()); + /// + /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None); + /// assert!(res2.is_err()); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn fetch_update<'g, F>( + &self, + set_order: Ordering, + fail_order: Ordering, + guard: &'g Guard, + mut func: F, + ) -> Result<Shared<'g, T>, Shared<'g, T>> + where + F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>, + { + let mut prev = self.load(fail_order, guard); + while let Some(next) = func(prev) { + match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) { + Ok(shared) => return Ok(shared), + Err(next_prev) => prev = next_prev.current, + } + } + Err(prev) + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// The return value is a result indicating whether the new pointer was written. On success the + /// pointer that was written is returned. On failure the actual current value and `new` are + /// returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// # Migrating to `compare_exchange` + /// + /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for + /// memory orderings: + /// + /// Original | Success | Failure + /// -------- | ------- | ------- + /// Relaxed | Relaxed | Relaxed + /// Acquire | Acquire | Acquire + /// Release | Release | Relaxed + /// AcqRel | AcqRel | Acquire + /// SeqCst | SeqCst | SeqCst + /// + /// # Examples + /// + /// ``` + /// # #![allow(deprecated)] + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// + /// let guard = &epoch::pin(); + /// let curr = a.load(SeqCst, guard); + /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); + /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); + /// # unsafe { drop(curr.into_owned()); } // avoid leak + /// ``` + // TODO: remove in the next major version. + #[allow(deprecated)] + #[deprecated(note = "Use `compare_exchange` instead")] + pub fn compare_and_set<'g, O, P>( + &self, + current: Shared<'_, T>, + new: P, + ord: O, + guard: &'g Guard, + ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer<T>, + { + self.compare_exchange(current, new, ord.success(), ord.failure(), guard) + } + + /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current + /// value is the same as `current`. The tag is also taken into account, so two pointers to the + /// same object, but with different tags, will not be considered equal. + /// + /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison + /// succeeds, which can result in more efficient code on some platforms. The return value is a + /// result indicating whether the new pointer was written. On success the pointer that was + /// written is returned. On failure the actual current value and `new` are returned. + /// + /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory + /// ordering of this operation. + /// + /// [`compare_and_set`]: Atomic::compare_and_set + /// + /// # Migrating to `compare_exchange_weak` + /// + /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for + /// memory orderings: + /// + /// Original | Success | Failure + /// -------- | ------- | ------- + /// Relaxed | Relaxed | Relaxed + /// Acquire | Acquire | Acquire + /// Release | Release | Relaxed + /// AcqRel | AcqRel | Acquire + /// SeqCst | SeqCst | SeqCst + /// + /// # Examples + /// + /// ``` + /// # #![allow(deprecated)] + /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// + /// let mut new = Owned::new(5678); + /// let mut ptr = a.load(SeqCst, guard); + /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak + /// loop { + /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { + /// Ok(p) => { + /// ptr = p; + /// break; + /// } + /// Err(err) => { + /// ptr = err.current; + /// new = err.new; + /// } + /// } + /// } + /// + /// let mut curr = a.load(SeqCst, guard); + /// loop { + /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { + /// Ok(_) => break, + /// Err(err) => curr = err.current, + /// } + /// } + /// # unsafe { drop(curr.into_owned()); } // avoid leak + /// ``` + // TODO: remove in the next major version. + #[allow(deprecated)] + #[deprecated(note = "Use `compare_exchange_weak` instead")] + pub fn compare_and_set_weak<'g, O, P>( + &self, + current: Shared<'_, T>, + new: P, + ord: O, + guard: &'g Guard, + ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> + where + O: CompareAndSetOrdering, + P: Pointer<T>, + { + self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard) + } + + /// Bitwise "and" with the current tag. + /// + /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::<i32>::from(Shared::null().with_tag(3)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) } + } + + /// Bitwise "or" with the current tag. + /// + /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 3); + /// ``` + pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) } + } + + /// Bitwise "xor" with the current tag. + /// + /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the + /// new tag to the result. Returns the previous pointer. + /// + /// This method takes an [`Ordering`] argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); + /// let guard = &epoch::pin(); + /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); + /// assert_eq!(a.load(SeqCst, guard).tag(), 2); + /// ``` + pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) } + } + + /// Takes ownership of the pointee. + /// + /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a + /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for + /// destructors of data structures. + /// + /// # Panics + /// + /// Panics if this pointer is null, but only in debug mode. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object. + /// + /// # Examples + /// + /// ```rust + /// # use std::mem; + /// # use crossbeam_epoch::Atomic; + /// struct DataStructure { + /// ptr: Atomic<usize>, + /// } + /// + /// impl Drop for DataStructure { + /// fn drop(&mut self) { + /// // By now the DataStructure lives only in our thread and we are sure we don't hold + /// // any Shared or & to it ourselves. + /// unsafe { + /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned()); + /// } + /// } + /// } + /// ``` + pub unsafe fn into_owned(self) -> Owned<T> { + #[cfg(crossbeam_loom)] + { + // FIXME: loom does not yet support into_inner, so we use unsync_load for now, + // which should have the same synchronization properties: + // https://github.com/tokio-rs/loom/issues/117 + Owned::from_usize(self.data.unsync_load()) + } + #[cfg(not(crossbeam_loom))] + { + Owned::from_usize(self.data.into_inner()) + } + } + + /// Takes ownership of the pointee if it is non-null. + /// + /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a + /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for + /// destructors of data structures. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object, or the pointer is null. + /// + /// # Examples + /// + /// ```rust + /// # use std::mem; + /// # use crossbeam_epoch::Atomic; + /// struct DataStructure { + /// ptr: Atomic<usize>, + /// } + /// + /// impl Drop for DataStructure { + /// fn drop(&mut self) { + /// // By now the DataStructure lives only in our thread and we are sure we don't hold + /// // any Shared or & to it ourselves, but it may be null, so we have to be careful. + /// let old = mem::replace(&mut self.ptr, Atomic::null()); + /// unsafe { + /// if let Some(x) = old.try_into_owned() { + /// drop(x) + /// } + /// } + /// } + /// } + /// ``` + pub unsafe fn try_into_owned(self) -> Option<Owned<T>> { + // FIXME: See self.into_owned() + #[cfg(crossbeam_loom)] + let data = self.data.unsync_load(); + #[cfg(not(crossbeam_loom))] + let data = self.data.into_inner(); + if decompose_tag::<T>(data).0 == 0 { + None + } else { + Some(Owned::from_usize(data)) + } + } +} + +impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, tag) = decompose_tag::<T>(data); + + f.debug_struct("Atomic") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let data = self.data.load(Ordering::SeqCst); + let (raw, _) = decompose_tag::<T>(data); + fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f) + } +} + +impl<T: ?Sized + Pointable> Clone for Atomic<T> { + /// Returns a copy of the atomic value. + /// + /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other + /// atomics or fences. + fn clone(&self) -> Self { + let data = self.data.load(Ordering::Relaxed); + Atomic::from_usize(data) + } +} + +impl<T: ?Sized + Pointable> Default for Atomic<T> { + fn default() -> Self { + Atomic::null() + } +} + +impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> { + /// Returns a new atomic pointer pointing to `owned`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Owned}; + /// + /// let a = Atomic::<i32>::from(Owned::new(1234)); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + fn from(owned: Owned<T>) -> Self { + let data = owned.data; + mem::forget(owned); + Self::from_usize(data) + } +} + +impl<T> From<Box<T>> for Atomic<T> { + fn from(b: Box<T>) -> Self { + Self::from(Owned::from(b)) + } +} + +impl<T> From<T> for Atomic<T> { + fn from(t: T) -> Self { + Self::new(t) + } +} + +impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> { + /// Returns a new atomic pointer pointing to `ptr`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{Atomic, Shared}; + /// + /// let a = Atomic::<i32>::from(Shared::<i32>::null()); + /// ``` + fn from(ptr: Shared<'g, T>) -> Self { + Self::from_usize(ptr.data) + } +} + +impl<T> From<*const T> for Atomic<T> { + /// Returns a new atomic pointer pointing to `raw`. + /// + /// # Examples + /// + /// ``` + /// use std::ptr; + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::<i32>::from(ptr::null::<i32>()); + /// ``` + fn from(raw: *const T) -> Self { + Self::from_usize(raw as usize) + } +} + +/// A trait for either `Owned` or `Shared` pointers. +pub trait Pointer<T: ?Sized + Pointable> { + /// Returns the machine representation of the pointer. + fn into_usize(self) -> usize; + + /// Returns a new pointer pointing to the tagged pointer `data`. + /// + /// # Safety + /// + /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should + /// not be converted back by `Pointer::from_usize()` multiple times. + unsafe fn from_usize(data: usize) -> Self; +} + +/// An owned heap-allocated object. +/// +/// This type is very similar to `Box<T>`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Owned<T: ?Sized + Pointable> { + data: usize, + _marker: PhantomData<Box<T>>, +} + +impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> { + #[inline] + fn into_usize(self) -> usize { + let data = self.data; + mem::forget(self); + data + } + + /// Returns a new pointer pointing to the tagged pointer `data`. + /// + /// # Panics + /// + /// Panics if the data is zero in debug mode. + #[inline] + unsafe fn from_usize(data: usize) -> Self { + debug_assert!(data != 0, "converting zero into `Owned`"); + Owned { + data, + _marker: PhantomData, + } + } +} + +impl<T> Owned<T> { + /// Returns a new owned pointer pointing to `raw`. + /// + /// This function is unsafe because improper use may lead to memory problems. Argument `raw` + /// must be a valid pointer. Also, a double-free may occur if the function is called twice on + /// the same raw pointer. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Safety + /// + /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted + /// back by `Owned::from_raw()` multiple times. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + pub unsafe fn from_raw(raw: *mut T) -> Owned<T> { + let raw = raw as usize; + ensure_aligned::<T>(raw); + Self::from_usize(raw) + } + + /// Converts the owned pointer into a `Box`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(1234); + /// let b: Box<i32> = o.into_box(); + /// assert_eq!(*b, 1234); + /// ``` + pub fn into_box(self) -> Box<T> { + let (raw, _) = decompose_tag::<T>(self.data); + mem::forget(self); + unsafe { Box::from_raw(raw as *mut _) } + } + + /// Allocates `value` on the heap and returns a new owned pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(1234); + /// ``` + pub fn new(init: T) -> Owned<T> { + Self::init(init) + } +} + +impl<T: ?Sized + Pointable> Owned<T> { + /// Allocates `value` on the heap and returns a new owned pointer pointing to it. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::<i32>::init(1234); + /// ``` + pub fn init(init: T::Init) -> Owned<T> { + unsafe { Self::from_usize(T::init(init)) } + } + + /// Converts the owned pointer into a [`Shared`]. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Owned}; + /// + /// let o = Owned::new(1234); + /// let guard = &epoch::pin(); + /// let p = o.into_shared(guard); + /// # unsafe { drop(p.into_owned()); } // avoid leak + /// ``` + #[allow(clippy::needless_lifetimes)] + pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { + unsafe { Shared::from_usize(self.into_usize()) } + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// assert_eq!(Owned::new(1234).tag(), 0); + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_tag::<T>(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = Owned::new(0u64); + /// assert_eq!(o.tag(), 0); + /// let o = o.with_tag(2); + /// assert_eq!(o.tag(), 2); + /// ``` + pub fn with_tag(self, tag: usize) -> Owned<T> { + let data = self.into_usize(); + unsafe { Self::from_usize(compose_tag::<T>(data, tag)) } + } +} + +impl<T: ?Sized + Pointable> Drop for Owned<T> { + fn drop(&mut self) { + let (raw, _) = decompose_tag::<T>(self.data); + unsafe { + T::drop(raw); + } + } +} + +impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (raw, tag) = decompose_tag::<T>(self.data); + + f.debug_struct("Owned") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl<T: Clone> Clone for Owned<T> { + fn clone(&self) -> Self { + Owned::new((**self).clone()).with_tag(self.tag()) + } +} + +impl<T: ?Sized + Pointable> Deref for Owned<T> { + type Target = T; + + fn deref(&self) -> &T { + let (raw, _) = decompose_tag::<T>(self.data); + unsafe { T::deref(raw) } + } +} + +impl<T: ?Sized + Pointable> DerefMut for Owned<T> { + fn deref_mut(&mut self) -> &mut T { + let (raw, _) = decompose_tag::<T>(self.data); + unsafe { T::deref_mut(raw) } + } +} + +impl<T> From<T> for Owned<T> { + fn from(t: T) -> Self { + Owned::new(t) + } +} + +impl<T> From<Box<T>> for Owned<T> { + /// Returns a new owned pointer pointing to `b`. + /// + /// # Panics + /// + /// Panics if the pointer (the `Box`) is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Owned; + /// + /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; + /// ``` + fn from(b: Box<T>) -> Self { + unsafe { Self::from_raw(Box::into_raw(b)) } + } +} + +impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> { + fn borrow(&self) -> &T { + self.deref() + } +} + +impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> { + fn borrow_mut(&mut self) -> &mut T { + self.deref_mut() + } +} + +impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> { + fn as_ref(&self) -> &T { + self.deref() + } +} + +impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> { + fn as_mut(&mut self) -> &mut T { + self.deref_mut() + } +} + +/// A pointer to an object protected by the epoch GC. +/// +/// The pointer is valid for use only during the lifetime `'g`. +/// +/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused +/// least significant bits of the address. +pub struct Shared<'g, T: 'g + ?Sized + Pointable> { + data: usize, + _marker: PhantomData<(&'g (), *const T)>, +} + +impl<T: ?Sized + Pointable> Clone for Shared<'_, T> { + fn clone(&self) -> Self { + Self { + data: self.data, + _marker: PhantomData, + } + } +} + +impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {} + +impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> { + #[inline] + fn into_usize(self) -> usize { + self.data + } + + #[inline] + unsafe fn from_usize(data: usize) -> Self { + Shared { + data, + _marker: PhantomData, + } + } +} + +impl<'g, T> Shared<'g, T> { + /// Converts the pointer to a raw pointer (without the tag). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let o = Owned::new(1234); + /// let raw = &*o as *const _; + /// let a = Atomic::from(o); + /// + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.as_raw(), raw); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn as_raw(&self) -> *const T { + let (raw, _) = decompose_tag::<T>(self.data); + raw as *const _ + } +} + +impl<'g, T: ?Sized + Pointable> Shared<'g, T> { + /// Returns a new null pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = Shared::<i32>::null(); + /// assert!(p.is_null()); + /// ``` + pub fn null() -> Shared<'g, T> { + Shared { + data: 0, + _marker: PhantomData, + } + } + + /// Returns `true` if the pointer is null. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::null(); + /// let guard = &epoch::pin(); + /// assert!(a.load(SeqCst, guard).is_null()); + /// a.store(Owned::new(1234), SeqCst); + /// assert!(!a.load(SeqCst, guard).is_null()); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn is_null(&self) -> bool { + let (raw, _) = decompose_tag::<T>(self.data); + raw == 0 + } + + /// Dereferences the pointer. + /// + /// Returns a reference to the pointee that is valid during the lifetime `'g`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possibility of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.deref(), &1234); + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub unsafe fn deref(&self) -> &'g T { + let (raw, _) = decompose_tag::<T>(self.data); + T::deref(raw) + } + + /// Dereferences the pointer. + /// + /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`. + /// + /// # Safety + /// + /// * There is no guarantee that there are no more threads attempting to read/write from/to the + /// actual object at the same time. + /// + /// The user must know that there are no concurrent accesses towards the object itself. + /// + /// * Other than the above, all safety concerns of `deref()` applies here. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(vec![1, 2, 3, 4]); + /// let guard = &epoch::pin(); + /// + /// let mut p = a.load(SeqCst, guard); + /// unsafe { + /// assert!(!p.is_null()); + /// let b = p.deref_mut(); + /// assert_eq!(b, &vec![1, 2, 3, 4]); + /// b.push(5); + /// assert_eq!(b, &vec![1, 2, 3, 4, 5]); + /// } + /// + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]); + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub unsafe fn deref_mut(&mut self) -> &'g mut T { + let (raw, _) = decompose_tag::<T>(self.data); + T::deref_mut(raw) + } + + /// Converts the pointer to a reference. + /// + /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. + /// + /// # Safety + /// + /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. + /// + /// Another concern is the possibility of data races due to lack of proper synchronization. + /// For example, consider the following scenario: + /// + /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` + /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` + /// + /// The problem is that relaxed orderings don't synchronize initialization of the object with + /// the read from the second thread. This is a data race. A possible solution would be to use + /// `Release` and `Acquire` orderings. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// unsafe { + /// assert_eq!(p.as_ref(), Some(&1234)); + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub unsafe fn as_ref(&self) -> Option<&'g T> { + let (raw, _) = decompose_tag::<T>(self.data); + if raw == 0 { + None + } else { + Some(T::deref(raw)) + } + } + + /// Takes ownership of the pointee. + /// + /// # Panics + /// + /// Panics if this pointer is null, but only in debug mode. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// unsafe { + /// let guard = &epoch::unprotected(); + /// let p = a.load(SeqCst, guard); + /// drop(p.into_owned()); + /// } + /// ``` + pub unsafe fn into_owned(self) -> Owned<T> { + debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`"); + Owned::from_usize(self.data) + } + + /// Takes ownership of the pointee if it is not null. + /// + /// # Safety + /// + /// This method may be called only if the pointer is valid and nobody else is holding a + /// reference to the same object, or if the pointer is null. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(1234); + /// unsafe { + /// let guard = &epoch::unprotected(); + /// let p = a.load(SeqCst, guard); + /// if let Some(x) = p.try_into_owned() { + /// drop(x); + /// } + /// } + /// ``` + pub unsafe fn try_into_owned(self) -> Option<Owned<T>> { + if self.is_null() { + None + } else { + Some(Owned::from_usize(self.data)) + } + } + + /// Returns the tag stored within the pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2)); + /// let guard = &epoch::pin(); + /// let p = a.load(SeqCst, guard); + /// assert_eq!(p.tag(), 2); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn tag(&self) -> usize { + let (_, tag) = decompose_tag::<T>(self.data); + tag + } + + /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the + /// unused bits of the pointer to `T`. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(0u64); + /// let guard = &epoch::pin(); + /// let p1 = a.load(SeqCst, guard); + /// let p2 = p1.with_tag(2); + /// + /// assert_eq!(p1.tag(), 0); + /// assert_eq!(p2.tag(), 2); + /// assert_eq!(p1.as_raw(), p2.as_raw()); + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { + unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) } + } +} + +impl<T> From<*const T> for Shared<'_, T> { + /// Returns a new pointer pointing to `raw`. + /// + /// # Panics + /// + /// Panics if `raw` is not properly aligned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Shared; + /// + /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _); + /// assert!(!p.is_null()); + /// # unsafe { drop(p.into_owned()); } // avoid leak + /// ``` + fn from(raw: *const T) -> Self { + let raw = raw as usize; + ensure_aligned::<T>(raw); + unsafe { Self::from_usize(raw) } + } +} + +impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> { + fn eq(&self, other: &Self) -> bool { + self.data == other.data + } +} + +impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {} + +impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> { + fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { + self.data.partial_cmp(&other.data) + } +} + +impl<T: ?Sized + Pointable> Ord for Shared<'_, T> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.data.cmp(&other.data) + } +} + +impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (raw, tag) = decompose_tag::<T>(self.data); + + f.debug_struct("Shared") + .field("raw", &raw) + .field("tag", &tag) + .finish() + } +} + +impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f) + } +} + +impl<T: ?Sized + Pointable> Default for Shared<'_, T> { + fn default() -> Self { + Shared::null() + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + use super::{Owned, Shared}; + use std::mem::MaybeUninit; + + #[test] + fn valid_tag_i8() { + Shared::<i8>::null().with_tag(0); + } + + #[test] + fn valid_tag_i64() { + Shared::<i64>::null().with_tag(7); + } + + #[rustversion::since(1.61)] + #[test] + fn const_atomic_null() { + use super::Atomic; + static _U: Atomic<u8> = Atomic::<u8>::null(); + } + + #[test] + fn array_init() { + let owned = Owned::<[MaybeUninit<usize>]>::init(10); + let arr: &[MaybeUninit<usize>] = &owned; + assert_eq!(arr.len(), 10); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/collector.rs b/third_party/rust/crossbeam-epoch/src/collector.rs new file mode 100644 index 0000000000..5b08511845 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/collector.rs @@ -0,0 +1,463 @@ +/// Epoch-based garbage collector. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::Collector; +/// +/// let collector = Collector::new(); +/// +/// let handle = collector.register(); +/// drop(collector); // `handle` still works after dropping `collector` +/// +/// handle.pin().flush(); +/// ``` +use core::fmt; + +use crate::guard::Guard; +use crate::internal::{Global, Local}; +use crate::primitive::sync::Arc; + +/// An epoch-based garbage collector. +pub struct Collector { + pub(crate) global: Arc<Global>, +} + +unsafe impl Send for Collector {} +unsafe impl Sync for Collector {} + +impl Default for Collector { + fn default() -> Self { + Self { + global: Arc::new(Global::new()), + } + } +} + +impl Collector { + /// Creates a new collector. + pub fn new() -> Self { + Self::default() + } + + /// Registers a new handle for the collector. + pub fn register(&self) -> LocalHandle { + Local::register(self) + } +} + +impl Clone for Collector { + /// Creates another reference to the same garbage collector. + fn clone(&self) -> Self { + Collector { + global: self.global.clone(), + } + } +} + +impl fmt::Debug for Collector { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Collector { .. }") + } +} + +impl PartialEq for Collector { + /// Checks if both handles point to the same collector. + fn eq(&self, rhs: &Collector) -> bool { + Arc::ptr_eq(&self.global, &rhs.global) + } +} +impl Eq for Collector {} + +/// A handle to a garbage collector. +pub struct LocalHandle { + pub(crate) local: *const Local, +} + +impl LocalHandle { + /// Pins the handle. + #[inline] + pub fn pin(&self) -> Guard { + unsafe { (*self.local).pin() } + } + + /// Returns `true` if the handle is pinned. + #[inline] + pub fn is_pinned(&self) -> bool { + unsafe { (*self.local).is_pinned() } + } + + /// Returns the `Collector` associated with this handle. + #[inline] + pub fn collector(&self) -> &Collector { + unsafe { (*self.local).collector() } + } +} + +impl Drop for LocalHandle { + #[inline] + fn drop(&mut self) { + unsafe { + Local::release_handle(&*self.local); + } + } +} + +impl fmt::Debug for LocalHandle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("LocalHandle { .. }") + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + use std::mem::ManuallyDrop; + use std::sync::atomic::{AtomicUsize, Ordering}; + + use crossbeam_utils::thread; + + use crate::{Collector, Owned}; + + const NUM_THREADS: usize = 8; + + #[test] + fn pin_reentrant() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + assert!(!handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + { + let _guard = &handle.pin(); + assert!(handle.is_pinned()); + } + assert!(handle.is_pinned()); + } + assert!(!handle.is_pinned()); + } + + #[test] + fn flush_local_bag() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + for _ in 0..100 { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(7).into_shared(guard); + guard.defer_destroy(a); + + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); + + while !(*guard.local).bag.with(|b| (*b).is_empty()) { + guard.flush(); + } + } + } + } + + #[test] + fn garbage_buffering() { + let collector = Collector::new(); + let handle = collector.register(); + drop(collector); + + let guard = &handle.pin(); + unsafe { + for _ in 0..10 { + let a = Owned::new(7).into_shared(guard); + guard.defer_destroy(a); + } + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); + } + } + + #[test] + fn pin_holds_advance() { + #[cfg(miri)] + const N: usize = 500; + #[cfg(not(miri))] + const N: usize = 500_000; + + let collector = Collector::new(); + + thread::scope(|scope| { + for _ in 0..NUM_THREADS { + scope.spawn(|_| { + let handle = collector.register(); + for _ in 0..N { + let guard = &handle.pin(); + + let before = collector.global.epoch.load(Ordering::Relaxed); + collector.global.collect(guard); + let after = collector.global.epoch.load(Ordering::Relaxed); + + assert!(after.wrapping_sub(before) <= 2); + } + }); + } + }) + .unwrap(); + } + + #[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to `cfg(crossbeam_sanitize)` reduce `internal::MAX_OBJECTS` + #[test] + fn incremental() { + #[cfg(miri)] + const COUNT: usize = 500; + #[cfg(not(miri))] + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = AtomicUsize::new(0); + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + let mut last = 0; + + while last < COUNT { + let curr = DESTROYS.load(Ordering::Relaxed); + assert!(curr - last <= 1024); + last = curr; + + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert!(DESTROYS.load(Ordering::Relaxed) == COUNT); + } + + #[test] + fn buffering() { + const COUNT: usize = 10; + #[cfg(miri)] + const N: usize = 500; + #[cfg(not(miri))] + const N: usize = 100_000; + static DESTROYS: AtomicUsize = AtomicUsize::new(0); + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + } + + for _ in 0..N { + collector.global.collect(&handle.pin()); + } + assert!(DESTROYS.load(Ordering::Relaxed) < COUNT); + + handle.pin().flush(); + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_drops() { + #[cfg(miri)] + const COUNT: usize = 500; + #[cfg(not(miri))] + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = AtomicUsize::new(0); + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer_destroy(a); + } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn count_destroy() { + #[cfg(miri)] + const COUNT: usize = 500; + #[cfg(not(miri))] + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = AtomicUsize::new(0); + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + for _ in 0..COUNT { + let a = Owned::new(7i32).into_shared(guard); + guard.defer_unchecked(move || { + drop(a.into_owned()); + DESTROYS.fetch_add(1, Ordering::Relaxed); + }); + } + guard.flush(); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn drop_array() { + const COUNT: usize = 700; + static DROPS: AtomicUsize = AtomicUsize::new(0); + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + let handle = collector.register(); + + let mut guard = handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(Elem(i as i32)); + } + + { + let a = Owned::new(v).into_shared(&guard); + unsafe { + guard.defer_destroy(a); + } + guard.flush(); + } + + while DROPS.load(Ordering::Relaxed) < COUNT { + guard.repin(); + collector.global.collect(&guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn destroy_array() { + #[cfg(miri)] + const COUNT: usize = 500; + #[cfg(not(miri))] + const COUNT: usize = 100_000; + static DESTROYS: AtomicUsize = AtomicUsize::new(0); + + let collector = Collector::new(); + let handle = collector.register(); + + unsafe { + let guard = &handle.pin(); + + let mut v = Vec::with_capacity(COUNT); + for i in 0..COUNT { + v.push(i as i32); + } + + let len = v.len(); + let ptr = ManuallyDrop::new(v).as_mut_ptr() as usize; + guard.defer_unchecked(move || { + drop(Vec::from_raw_parts(ptr as *const i32 as *mut i32, len, len)); + DESTROYS.fetch_add(len, Ordering::Relaxed); + }); + guard.flush(); + } + + while DESTROYS.load(Ordering::Relaxed) < COUNT { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); + } + + #[test] + fn stress() { + const THREADS: usize = 8; + #[cfg(miri)] + const COUNT: usize = 500; + #[cfg(not(miri))] + const COUNT: usize = 100_000; + static DROPS: AtomicUsize = AtomicUsize::new(0); + + struct Elem(i32); + + impl Drop for Elem { + fn drop(&mut self) { + DROPS.fetch_add(1, Ordering::Relaxed); + } + } + + let collector = Collector::new(); + + thread::scope(|scope| { + for _ in 0..THREADS { + scope.spawn(|_| { + let handle = collector.register(); + for _ in 0..COUNT { + let guard = &handle.pin(); + unsafe { + let a = Owned::new(Elem(7i32)).into_shared(guard); + guard.defer_destroy(a); + } + } + }); + } + }) + .unwrap(); + + let handle = collector.register(); + while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { + let guard = &handle.pin(); + collector.global.collect(guard); + } + assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/default.rs b/third_party/rust/crossbeam-epoch/src/default.rs new file mode 100644 index 0000000000..b42c1c7a7f --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/default.rs @@ -0,0 +1,93 @@ +//! The default garbage collector. +//! +//! For each thread, a participant is lazily initialized on its first use, when the current thread +//! is registered in the default collector. If initialized, the thread's participant will get +//! destructed on thread exit, which in turn unregisters the thread. + +use crate::collector::{Collector, LocalHandle}; +use crate::guard::Guard; +use crate::primitive::thread_local; +#[cfg(not(crossbeam_loom))] +use crate::sync::once_lock::OnceLock; + +fn collector() -> &'static Collector { + #[cfg(not(crossbeam_loom))] + { + /// The global data for the default garbage collector. + static COLLECTOR: OnceLock<Collector> = OnceLock::new(); + COLLECTOR.get_or_init(Collector::new) + } + // FIXME: loom does not currently provide the equivalent of Lazy: + // https://github.com/tokio-rs/loom/issues/263 + #[cfg(crossbeam_loom)] + { + loom::lazy_static! { + /// The global data for the default garbage collector. + static ref COLLECTOR: Collector = Collector::new(); + } + &COLLECTOR + } +} + +thread_local! { + /// The per-thread participant for the default garbage collector. + static HANDLE: LocalHandle = collector().register(); +} + +/// Pins the current thread. +#[inline] +pub fn pin() -> Guard { + with_handle(|handle| handle.pin()) +} + +/// Returns `true` if the current thread is pinned. +#[inline] +pub fn is_pinned() -> bool { + with_handle(|handle| handle.is_pinned()) +} + +/// Returns the default global collector. +pub fn default_collector() -> &'static Collector { + collector() +} + +#[inline] +fn with_handle<F, R>(mut f: F) -> R +where + F: FnMut(&LocalHandle) -> R, +{ + HANDLE + .try_with(|h| f(h)) + .unwrap_or_else(|_| f(&collector().register())) +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + use crossbeam_utils::thread; + + #[test] + fn pin_while_exiting() { + struct Foo; + + impl Drop for Foo { + fn drop(&mut self) { + // Pin after `HANDLE` has been dropped. This must not panic. + super::pin(); + } + } + + thread_local! { + static FOO: Foo = Foo; + } + + thread::scope(|scope| { + scope.spawn(|_| { + // Initialize `FOO` and then `HANDLE`. + FOO.with(|_| ()); + super::pin(); + // At thread exit, `HANDLE` gets dropped first and `FOO` second. + }); + }) + .unwrap(); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/deferred.rs b/third_party/rust/crossbeam-epoch/src/deferred.rs new file mode 100644 index 0000000000..2f3d79fdf1 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/deferred.rs @@ -0,0 +1,147 @@ +use alloc::boxed::Box; +use core::fmt; +use core::marker::PhantomData; +use core::mem::{self, MaybeUninit}; +use core::ptr; + +/// Number of words a piece of `Data` can hold. +/// +/// Three words should be enough for the majority of cases. For example, you can fit inside it the +/// function pointer together with a fat pointer representing an object that needs to be destroyed. +const DATA_WORDS: usize = 3; + +/// Some space to keep a `FnOnce()` object on the stack. +type Data = [usize; DATA_WORDS]; + +/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. +/// +/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. +pub(crate) struct Deferred { + call: unsafe fn(*mut u8), + data: MaybeUninit<Data>, + _marker: PhantomData<*mut ()>, // !Send + !Sync +} + +impl fmt::Debug for Deferred { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.pad("Deferred { .. }") + } +} + +impl Deferred { + pub(crate) const NO_OP: Self = { + fn no_op_call(_raw: *mut u8) {} + Self { + call: no_op_call, + data: MaybeUninit::uninit(), + _marker: PhantomData, + } + }; + + /// Constructs a new `Deferred` from a `FnOnce()`. + pub(crate) fn new<F: FnOnce()>(f: F) -> Self { + let size = mem::size_of::<F>(); + let align = mem::align_of::<F>(); + + unsafe { + if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() { + let mut data = MaybeUninit::<Data>::uninit(); + ptr::write(data.as_mut_ptr().cast::<F>(), f); + + unsafe fn call<F: FnOnce()>(raw: *mut u8) { + let f: F = ptr::read(raw.cast::<F>()); + f(); + } + + Deferred { + call: call::<F>, + data, + _marker: PhantomData, + } + } else { + let b: Box<F> = Box::new(f); + let mut data = MaybeUninit::<Data>::uninit(); + ptr::write(data.as_mut_ptr().cast::<Box<F>>(), b); + + unsafe fn call<F: FnOnce()>(raw: *mut u8) { + // It's safe to cast `raw` from `*mut u8` to `*mut Box<F>`, because `raw` is + // originally derived from `*mut Box<F>`. + let b: Box<F> = ptr::read(raw.cast::<Box<F>>()); + (*b)(); + } + + Deferred { + call: call::<F>, + data, + _marker: PhantomData, + } + } + } + } + + /// Calls the function. + #[inline] + pub(crate) fn call(mut self) { + let call = self.call; + unsafe { call(self.data.as_mut_ptr().cast::<u8>()) }; + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + #![allow(clippy::drop_copy)] + + use super::Deferred; + use std::cell::Cell; + + #[test] + fn on_stack() { + let fired = &Cell::new(false); + let a = [0usize; 1]; + + let d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + fn on_heap() { + let fired = &Cell::new(false); + let a = [0usize; 10]; + + let d = Deferred::new(move || { + drop(a); + fired.set(true); + }); + + assert!(!fired.get()); + d.call(); + assert!(fired.get()); + } + + #[test] + fn string() { + let a = "hello".to_string(); + let d = Deferred::new(move || assert_eq!(a, "hello")); + d.call(); + } + + #[test] + fn boxed_slice_i32() { + let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); + let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); + d.call(); + } + + #[test] + fn long_slice_usize() { + let a: [usize; 5] = [2, 3, 5, 7, 11]; + let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); + d.call(); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/epoch.rs b/third_party/rust/crossbeam-epoch/src/epoch.rs new file mode 100644 index 0000000000..663508bd7b --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/epoch.rs @@ -0,0 +1,133 @@ +//! The global epoch +//! +//! The last bit in this number is unused and is always zero. Every so often the global epoch is +//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only +//! if all currently pinned participants have been pinned in the current epoch. +//! +//! If an object became garbage in some epoch, then we can be sure that after two advancements no +//! participant will hold a reference to it. That is the crux of safe memory reclamation. + +use crate::primitive::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; + +/// An epoch that can be marked as pinned or unpinned. +/// +/// Internally, the epoch is represented as an integer that wraps around at some unspecified point +/// and a flag that represents whether it is pinned or unpinned. +#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] +pub(crate) struct Epoch { + /// The least significant bit is set if pinned. The rest of the bits hold the epoch. + data: usize, +} + +impl Epoch { + /// Returns the starting epoch in unpinned state. + #[inline] + pub(crate) fn starting() -> Self { + Self::default() + } + + /// Returns the number of epochs `self` is ahead of `rhs`. + /// + /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX + /// / 2)`, so the returned distance will be in the same interval. + pub(crate) fn wrapping_sub(self, rhs: Self) -> isize { + // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`, + // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)` + // will be ignored in the shift operation. + self.data.wrapping_sub(rhs.data & !1) as isize >> 1 + } + + /// Returns `true` if the epoch is marked as pinned. + #[inline] + pub(crate) fn is_pinned(self) -> bool { + (self.data & 1) == 1 + } + + /// Returns the same epoch, but marked as pinned. + #[inline] + pub(crate) fn pinned(self) -> Epoch { + Epoch { + data: self.data | 1, + } + } + + /// Returns the same epoch, but marked as unpinned. + #[inline] + pub(crate) fn unpinned(self) -> Epoch { + Epoch { + data: self.data & !1, + } + } + + /// Returns the successor epoch. + /// + /// The returned epoch will be marked as pinned only if the previous one was as well. + #[inline] + pub(crate) fn successor(self) -> Epoch { + Epoch { + data: self.data.wrapping_add(2), + } + } +} + +/// An atomic value that holds an `Epoch`. +#[derive(Default, Debug)] +pub(crate) struct AtomicEpoch { + /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented + /// using an `AtomicUsize`. + data: AtomicUsize, +} + +impl AtomicEpoch { + /// Creates a new atomic epoch. + #[inline] + pub(crate) fn new(epoch: Epoch) -> Self { + let data = AtomicUsize::new(epoch.data); + AtomicEpoch { data } + } + + /// Loads a value from the atomic epoch. + #[inline] + pub(crate) fn load(&self, ord: Ordering) -> Epoch { + Epoch { + data: self.data.load(ord), + } + } + + /// Stores a value into the atomic epoch. + #[inline] + pub(crate) fn store(&self, epoch: Epoch, ord: Ordering) { + self.data.store(epoch.data, ord); + } + + /// Stores a value into the atomic epoch if the current value is the same as `current`. + /// + /// The return value is a result indicating whether the new value was written and containing + /// the previous value. On success this value is guaranteed to be equal to `current`. + /// + /// This method takes two `Ordering` arguments to describe the memory + /// ordering of this operation. `success` describes the required ordering for the + /// read-modify-write operation that takes place if the comparison with `current` succeeds. + /// `failure` describes the required ordering for the load operation that takes place when + /// the comparison fails. Using `Acquire` as success ordering makes the store part + /// of this operation `Relaxed`, and using `Release` makes the successful load + /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` + /// and must be equivalent to or weaker than the success ordering. + #[inline] + pub(crate) fn compare_exchange( + &self, + current: Epoch, + new: Epoch, + success: Ordering, + failure: Ordering, + ) -> Result<Epoch, Epoch> { + match self + .data + .compare_exchange(current.data, new.data, success, failure) + { + Ok(data) => Ok(Epoch { data }), + Err(data) => Err(Epoch { data }), + } + } +} diff --git a/third_party/rust/crossbeam-epoch/src/guard.rs b/third_party/rust/crossbeam-epoch/src/guard.rs new file mode 100644 index 0000000000..ba7fe1b119 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/guard.rs @@ -0,0 +1,520 @@ +use core::fmt; +use core::mem; + +use scopeguard::defer; + +use crate::atomic::Shared; +use crate::collector::Collector; +use crate::deferred::Deferred; +use crate::internal::Local; + +/// A guard that keeps the current thread pinned. +/// +/// # Pinning +/// +/// The current thread is pinned by calling [`pin`], which returns a new guard: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. +/// // This is not really necessary, but makes passing references to the guard a bit easier. +/// let guard = &epoch::pin(); +/// ``` +/// +/// When a guard gets dropped, the current thread is automatically unpinned. +/// +/// # Pointers on the stack +/// +/// Having a guard allows us to create pointers on the stack to heap-allocated objects. +/// For example: +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::sync::atomic::Ordering::SeqCst; +/// +/// // Create a heap-allocated number. +/// let a = Atomic::new(777); +/// +/// // Pin the current thread. +/// let guard = &epoch::pin(); +/// +/// // Load the heap-allocated object and create pointer `p` on the stack. +/// let p = a.load(SeqCst, guard); +/// +/// // Dereference the pointer and print the value: +/// if let Some(num) = unsafe { p.as_ref() } { +/// println!("The number is {}.", num); +/// } +/// # unsafe { drop(a.into_owned()); } // avoid leak +/// ``` +/// +/// # Multiple guards +/// +/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the +/// thread will actually be pinned only when the first guard is created and unpinned when the last +/// one is dropped: +/// +/// ``` +/// use crossbeam_epoch as epoch; +/// +/// let guard1 = epoch::pin(); +/// let guard2 = epoch::pin(); +/// assert!(epoch::is_pinned()); +/// drop(guard1); +/// assert!(epoch::is_pinned()); +/// drop(guard2); +/// assert!(!epoch::is_pinned()); +/// ``` +/// +/// [`pin`]: super::pin +pub struct Guard { + pub(crate) local: *const Local, +} + +impl Guard { + /// Stores a function so that it can be executed at some point after all currently pinned + /// threads get unpinned. + /// + /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache + /// becomes full, some functions are moved into the global cache. At the same time, some + /// functions from both local and global caches may get executed in order to incrementally + /// clean up the caches as they fill up. + /// + /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it + /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might + /// never run, but the epoch-based garbage collection will make an effort to execute it + /// reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the function will simply be + /// executed immediately. + pub fn defer<F, R>(&self, f: F) + where + F: FnOnce() -> R, + F: Send + 'static, + { + unsafe { + self.defer_unchecked(f); + } + } + + /// Stores a function so that it can be executed at some point after all currently pinned + /// threads get unpinned. + /// + /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache + /// becomes full, some functions are moved into the global cache. At the same time, some + /// functions from both local and global caches may get executed in order to incrementally + /// clean up the caches as they fill up. + /// + /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it + /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might + /// never run, but the epoch-based garbage collection will make an effort to execute it + /// reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the function will simply be + /// executed immediately. + /// + /// # Safety + /// + /// The given function must not hold reference onto the stack. It is highly recommended that + /// the passed function is **always** marked with `move` in order to prevent accidental + /// borrows. + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// let message = "Hello!"; + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. + /// guard.defer_unchecked(move || { + /// println!("{}", message); + /// }); + /// } + /// ``` + /// + /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by + /// the closure must be `Send`. + /// + /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove + /// `F: Send` for typical use cases. For example, consider the following code snippet, which + /// exemplifies the typical use case of deferring the deallocation of a shared reference: + /// + /// ```ignore + /// let shared = Owned::new(7i32).into_shared(guard); + /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`! + /// ``` + /// + /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, + /// because it's called only after the grace period and `shared` is no longer shared with other + /// threads. But we don't expect type systems to prove this. + /// + /// # Examples + /// + /// When a heap-allocated object in a data structure becomes unreachable, it has to be + /// deallocated. However, the current thread and other threads may be still holding references + /// on the stack to that same object. Therefore it cannot be deallocated before those references + /// get dropped. This method can defer deallocation until all those threads get unpinned and + /// consequently drop all their references on the stack. + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new("foo"); + /// + /// // Now suppose that `a` is shared among multiple threads and concurrently + /// // accessed and modified... + /// + /// // Pin the current thread. + /// let guard = &epoch::pin(); + /// + /// // Steal the object currently stored in `a` and swap it with another one. + /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); + /// + /// if !p.is_null() { + /// // The object `p` is pointing to is now unreachable. + /// // Defer its deallocation until all currently pinned threads get unpinned. + /// unsafe { + /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. + /// guard.defer_unchecked(move || { + /// println!("{} is now being deallocated.", p.deref()); + /// // Now we have unique access to the object pointed to by `p` and can turn it + /// // into an `Owned`. Dropping the `Owned` will deallocate the object. + /// drop(p.into_owned()); + /// }); + /// } + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub unsafe fn defer_unchecked<F, R>(&self, f: F) + where + F: FnOnce() -> R, + { + if let Some(local) = self.local.as_ref() { + local.defer(Deferred::new(move || drop(f())), self); + } else { + drop(f()); + } + } + + /// Stores a destructor for an object so that it can be deallocated and dropped at some point + /// after all currently pinned threads get unpinned. + /// + /// This method first stores the destructor into the thread-local (or handle-local) cache. If + /// this cache becomes full, some destructors are moved into the global cache. At the same + /// time, some destructors from both local and global caches may get executed in order to + /// incrementally clean up the caches as they fill up. + /// + /// There is no guarantee when exactly the destructor will be executed. The only guarantee is + /// that it won't be executed until all currently pinned threads get unpinned. In theory, the + /// destructor might never run, but the epoch-based garbage collection will make an effort to + /// execute it reasonably soon. + /// + /// If this method is called from an [`unprotected`] guard, the destructor will simply be + /// executed immediately. + /// + /// # Safety + /// + /// The object must not be reachable by other threads anymore, otherwise it might be still in + /// use when the destructor runs. + /// + /// Apart from that, keep in mind that another thread may execute the destructor, so the object + /// must be sendable to other threads. + /// + /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove + /// `T: Send` for typical use cases. For example, consider the following code snippet, which + /// exemplifies the typical use case of deferring the deallocation of a shared reference: + /// + /// ```ignore + /// let shared = Owned::new(7i32).into_shared(guard); + /// guard.defer_destroy(shared); // `Shared` is not `Send`! + /// ``` + /// + /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because + /// it's called only after the grace period and `shared` is no longer shared with other + /// threads. But we don't expect type systems to prove this. + /// + /// # Examples + /// + /// When a heap-allocated object in a data structure becomes unreachable, it has to be + /// deallocated. However, the current thread and other threads may be still holding references + /// on the stack to that same object. Therefore it cannot be deallocated before those references + /// get dropped. This method can defer deallocation until all those threads get unpinned and + /// consequently drop all their references on the stack. + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new("foo"); + /// + /// // Now suppose that `a` is shared among multiple threads and concurrently + /// // accessed and modified... + /// + /// // Pin the current thread. + /// let guard = &epoch::pin(); + /// + /// // Steal the object currently stored in `a` and swap it with another one. + /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); + /// + /// if !p.is_null() { + /// // The object `p` is pointing to is now unreachable. + /// // Defer its deallocation until all currently pinned threads get unpinned. + /// unsafe { + /// guard.defer_destroy(p); + /// } + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) { + self.defer_unchecked(move || ptr.into_owned()); + } + + /// Clears up the thread-local cache of deferred functions by executing them or moving into the + /// global cache. + /// + /// Call this method after deferring execution of a function if you want to get it executed as + /// soon as possible. Flushing will make sure it is residing in in the global cache, so that + /// any thread has a chance of taking the function and executing it. + /// + /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard = &epoch::pin(); + /// guard.defer(move || { + /// println!("This better be printed as soon as possible!"); + /// }); + /// guard.flush(); + /// ``` + pub fn flush(&self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.flush(self); + } + } + + /// Unpins and then immediately re-pins the thread. + /// + /// This method is useful when you don't want delay the advancement of the global epoch by + /// holding an old epoch. For safety, you should not maintain any guard-based reference across + /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this + /// is the only active guard for the current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn repin(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.repin(); + } + } + + /// Temporarily unpins the thread, executes the given function and then re-pins the thread. + /// + /// This method is useful when you need to perform a long-running operation (e.g. sleeping) + /// and don't need to maintain any guard-based reference across the call (the latter is enforced + /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the + /// current thread. + /// + /// If this method is called from an [`unprotected`] guard, then the passed function is called + /// directly without unpinning the thread. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::{self as epoch, Atomic}; + /// use std::sync::atomic::Ordering::SeqCst; + /// use std::thread; + /// use std::time::Duration; + /// + /// let a = Atomic::new(777); + /// let mut guard = epoch::pin(); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); + /// { + /// let p = a.load(SeqCst, &guard); + /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); + /// } + /// # unsafe { drop(a.into_owned()); } // avoid leak + /// ``` + pub fn repin_after<F, R>(&mut self, f: F) -> R + where + F: FnOnce() -> R, + { + if let Some(local) = unsafe { self.local.as_ref() } { + // We need to acquire a handle here to ensure the Local doesn't + // disappear from under us. + local.acquire_handle(); + local.unpin(); + } + + // Ensure the Guard is re-pinned even if the function panics + defer! { + if let Some(local) = unsafe { self.local.as_ref() } { + mem::forget(local.pin()); + local.release_handle(); + } + } + + f() + } + + /// Returns the `Collector` associated with this guard. + /// + /// This method is useful when you need to ensure that all guards used with + /// a data structure come from the same collector. + /// + /// If this method is called from an [`unprotected`] guard, then `None` is returned. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch as epoch; + /// + /// let guard1 = epoch::pin(); + /// let guard2 = epoch::pin(); + /// assert!(guard1.collector() == guard2.collector()); + /// ``` + pub fn collector(&self) -> Option<&Collector> { + unsafe { self.local.as_ref().map(|local| local.collector()) } + } +} + +impl Drop for Guard { + #[inline] + fn drop(&mut self) { + if let Some(local) = unsafe { self.local.as_ref() } { + local.unpin(); + } + } +} + +impl fmt::Debug for Guard { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Guard { .. }") + } +} + +/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. +/// +/// This guard should be used in special occasions only. Note that it doesn't actually keep any +/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. +/// +/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just +/// execute the function immediately. +/// +/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. +/// +/// # Safety +/// +/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the +/// [`Atomic`] is not being concurrently modified by other threads. +/// +/// # Examples +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// let a = Atomic::new(7); +/// +/// unsafe { +/// // Load `a` without pinning the current thread. +/// a.load(Relaxed, epoch::unprotected()); +/// +/// // It's possible to create more dummy guards by calling `clone()`. +/// let dummy = &epoch::unprotected().clone(); +/// +/// dummy.defer(move || { +/// println!("This gets executed immediately."); +/// }); +/// +/// // Dropping `dummy` doesn't affect the current thread - it's just a noop. +/// } +/// # unsafe { drop(a.into_owned()); } // avoid leak +/// ``` +/// +/// The most common use of this function is when constructing or destructing a data structure. +/// +/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that +/// point no other thread could concurrently modify the [`Atomic`]s we are accessing. +/// +/// If we were to actually pin the current thread during destruction, that would just unnecessarily +/// delay garbage collection and incur some performance cost, so in cases like these `unprotected` +/// is very helpful. +/// +/// ``` +/// use crossbeam_epoch::{self as epoch, Atomic}; +/// use std::mem::ManuallyDrop; +/// use std::sync::atomic::Ordering::Relaxed; +/// +/// struct Stack<T> { +/// head: Atomic<Node<T>>, +/// } +/// +/// struct Node<T> { +/// data: ManuallyDrop<T>, +/// next: Atomic<Node<T>>, +/// } +/// +/// impl<T> Drop for Stack<T> { +/// fn drop(&mut self) { +/// unsafe { +/// // Unprotected load. +/// let mut node = self.head.load(Relaxed, epoch::unprotected()); +/// +/// while let Some(n) = node.as_ref() { +/// // Unprotected load. +/// let next = n.next.load(Relaxed, epoch::unprotected()); +/// +/// // Take ownership of the node, then drop its data and deallocate it. +/// let mut o = node.into_owned(); +/// ManuallyDrop::drop(&mut o.data); +/// drop(o); +/// +/// node = next; +/// } +/// } +/// } +/// } +/// ``` +/// +/// [`Atomic`]: super::Atomic +/// [`defer`]: Guard::defer +#[inline] +pub unsafe fn unprotected() -> &'static Guard { + // An unprotected guard is just a `Guard` with its field `local` set to null. + // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in + // a `static` + struct GuardWrapper(Guard); + unsafe impl Sync for GuardWrapper {} + static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { + local: core::ptr::null(), + }); + &UNPROTECTED.0 +} diff --git a/third_party/rust/crossbeam-epoch/src/internal.rs b/third_party/rust/crossbeam-epoch/src/internal.rs new file mode 100644 index 0000000000..00c66a40a7 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/internal.rs @@ -0,0 +1,599 @@ +//! The global data and participant for garbage collection. +//! +//! # Registration +//! +//! In order to track all participants in one place, we need some form of participant +//! registration. When a participant is created, it is registered to a global lock-free +//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the +//! list. +//! +//! # Pinning +//! +//! Every participant contains an integer that tells whether the participant is pinned and if so, +//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that +//! aids in periodic global epoch advancement. +//! +//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. +//! Guards are necessary for performing atomic operations, and for freeing/dropping locations. +//! +//! # Thread-local bag +//! +//! Objects that get unlinked from concurrent data structures must be stashed away until the global +//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects +//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current +//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages +//! for amortizing the synchronization cost of pushing the garbages to a global queue. +//! +//! # Global queue +//! +//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and +//! destroyed along the way. This design reduces contention on data structures. The global queue +//! cannot be explicitly accessed: the only way to interact with it is by calling functions +//! `defer()` that adds an object to the thread-local bag, or `collect()` that manually triggers +//! garbage collection. +//! +//! Ideally each instance of concurrent data structure may have its own queue that gets fully +//! destroyed as soon as the data structure gets dropped. + +use crate::primitive::cell::UnsafeCell; +use crate::primitive::sync::atomic; +use core::cell::Cell; +use core::mem::{self, ManuallyDrop}; +use core::num::Wrapping; +use core::sync::atomic::Ordering; +use core::{fmt, ptr}; + +use crossbeam_utils::CachePadded; +use memoffset::offset_of; + +use crate::atomic::{Owned, Shared}; +use crate::collector::{Collector, LocalHandle}; +use crate::deferred::Deferred; +use crate::epoch::{AtomicEpoch, Epoch}; +use crate::guard::{unprotected, Guard}; +use crate::sync::list::{Entry, IsElement, IterError, List}; +use crate::sync::queue::Queue; + +/// Maximum number of objects a bag can contain. +#[cfg(not(any(crossbeam_sanitize, miri)))] +const MAX_OBJECTS: usize = 64; +// Makes it more likely to trigger any potential data races. +#[cfg(any(crossbeam_sanitize, miri))] +const MAX_OBJECTS: usize = 4; + +/// A bag of deferred functions. +pub(crate) struct Bag { + /// Stashed objects. + deferreds: [Deferred; MAX_OBJECTS], + len: usize, +} + +/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. +unsafe impl Send for Bag {} + +impl Bag { + /// Returns a new, empty bag. + pub(crate) fn new() -> Self { + Self::default() + } + + /// Returns `true` if the bag is empty. + pub(crate) fn is_empty(&self) -> bool { + self.len == 0 + } + + /// Attempts to insert a deferred function into the bag. + /// + /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is + /// full. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub(crate) unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { + if self.len < MAX_OBJECTS { + self.deferreds[self.len] = deferred; + self.len += 1; + Ok(()) + } else { + Err(deferred) + } + } + + /// Seals the bag with the given epoch. + fn seal(self, epoch: Epoch) -> SealedBag { + SealedBag { epoch, _bag: self } + } +} + +impl Default for Bag { + fn default() -> Self { + Bag { + len: 0, + deferreds: [Deferred::NO_OP; MAX_OBJECTS], + } + } +} + +impl Drop for Bag { + fn drop(&mut self) { + // Call all deferred functions. + for deferred in &mut self.deferreds[..self.len] { + let no_op = Deferred::NO_OP; + let owned_deferred = mem::replace(deferred, no_op); + owned_deferred.call(); + } + } +} + +// can't #[derive(Debug)] because Debug is not implemented for arrays 64 items long +impl fmt::Debug for Bag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Bag") + .field("deferreds", &&self.deferreds[..self.len]) + .finish() + } +} + +/// A pair of an epoch and a bag. +#[derive(Default, Debug)] +struct SealedBag { + epoch: Epoch, + _bag: Bag, +} + +/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. +unsafe impl Sync for SealedBag {} + +impl SealedBag { + /// Checks if it is safe to drop the bag w.r.t. the given global epoch. + fn is_expired(&self, global_epoch: Epoch) -> bool { + // A pinned participant can witness at most one epoch advancement. Therefore, any bag that + // is within one epoch of the current one cannot be destroyed yet. + global_epoch.wrapping_sub(self.epoch) >= 2 + } +} + +/// The global data for a garbage collector. +pub(crate) struct Global { + /// The intrusive linked list of `Local`s. + locals: List<Local>, + + /// The global queue of bags of deferred functions. + queue: Queue<SealedBag>, + + /// The global epoch. + pub(crate) epoch: CachePadded<AtomicEpoch>, +} + +impl Global { + /// Number of bags to destroy. + const COLLECT_STEPS: usize = 8; + + /// Creates a new global data for garbage collection. + #[inline] + pub(crate) fn new() -> Self { + Self { + locals: List::new(), + queue: Queue::new(), + epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), + } + } + + /// Pushes the bag into the global queue and replaces the bag with a new empty bag. + pub(crate) fn push_bag(&self, bag: &mut Bag, guard: &Guard) { + let bag = mem::replace(bag, Bag::new()); + + atomic::fence(Ordering::SeqCst); + + let epoch = self.epoch.load(Ordering::Relaxed); + self.queue.push(bag.seal(epoch), guard); + } + + /// Collects several bags from the global queue and executes deferred functions in them. + /// + /// Note: This may itself produce garbage and in turn allocate new bags. + /// + /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold + /// path. In other words, we want the compiler to optimize branching for the case when + /// `collect()` is not called. + #[cold] + pub(crate) fn collect(&self, guard: &Guard) { + let global_epoch = self.try_advance(guard); + + let steps = if cfg!(crossbeam_sanitize) { + usize::max_value() + } else { + Self::COLLECT_STEPS + }; + + for _ in 0..steps { + match self.queue.try_pop_if( + &|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch), + guard, + ) { + None => break, + Some(sealed_bag) => drop(sealed_bag), + } + } + } + + /// Attempts to advance the global epoch. + /// + /// The global epoch can advance only if all currently pinned participants have been pinned in + /// the current epoch. + /// + /// Returns the current global epoch. + /// + /// `try_advance()` is annotated `#[cold]` because it is rarely called. + #[cold] + pub(crate) fn try_advance(&self, guard: &Guard) -> Epoch { + let global_epoch = self.epoch.load(Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + + // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly + // easy to implement in a lock-free manner. However, traversal can be slow due to cache + // misses and data dependencies. We should experiment with other data structures as well. + for local in self.locals.iter(guard) { + match local { + Err(IterError::Stalled) => { + // A concurrent thread stalled this iteration. That thread might also try to + // advance the epoch, in which case we leave the job to it. Otherwise, the + // epoch will not be advanced. + return global_epoch; + } + Ok(local) => { + let local_epoch = local.epoch.load(Ordering::Relaxed); + + // If the participant was pinned in a different epoch, we cannot advance the + // global epoch just yet. + if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch { + return global_epoch; + } + } + } + } + atomic::fence(Ordering::Acquire); + + // All pinned participants were pinned in the current global epoch. + // Now let's advance the global epoch... + // + // Note that if another thread already advanced it before us, this store will simply + // overwrite the global epoch with the same value. This is true because `try_advance` was + // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be + // advanced two steps ahead of it. + let new_epoch = global_epoch.successor(); + self.epoch.store(new_epoch, Ordering::Release); + new_epoch + } +} + +/// Participant for garbage collection. +pub(crate) struct Local { + /// A node in the intrusive linked list of `Local`s. + entry: Entry, + + /// The local epoch. + epoch: AtomicEpoch, + + /// A reference to the global data. + /// + /// When all guards and handles get dropped, this reference is destroyed. + collector: UnsafeCell<ManuallyDrop<Collector>>, + + /// The local bag of deferred functions. + pub(crate) bag: UnsafeCell<Bag>, + + /// The number of guards keeping this participant pinned. + guard_count: Cell<usize>, + + /// The number of active handles. + handle_count: Cell<usize>, + + /// Total number of pinnings performed. + /// + /// This is just an auxiliary counter that sometimes kicks off collection. + pin_count: Cell<Wrapping<usize>>, +} + +// Make sure `Local` is less than or equal to 2048 bytes. +// https://github.com/crossbeam-rs/crossbeam/issues/551 +#[cfg(not(any(crossbeam_sanitize, miri)))] // `crossbeam_sanitize` and `miri` reduce the size of `Local` +#[test] +fn local_size() { + // TODO: https://github.com/crossbeam-rs/crossbeam/issues/869 + // assert!( + // core::mem::size_of::<Local>() <= 2048, + // "An allocation of `Local` should be <= 2048 bytes." + // ); +} + +impl Local { + /// Number of pinnings after which a participant will execute some deferred functions from the + /// global queue. + const PINNINGS_BETWEEN_COLLECT: usize = 128; + + /// Registers a new `Local` in the provided `Global`. + pub(crate) fn register(collector: &Collector) -> LocalHandle { + unsafe { + // Since we dereference no pointers in this block, it is safe to use `unprotected`. + + let local = Owned::new(Local { + entry: Entry::default(), + epoch: AtomicEpoch::new(Epoch::starting()), + collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())), + bag: UnsafeCell::new(Bag::new()), + guard_count: Cell::new(0), + handle_count: Cell::new(1), + pin_count: Cell::new(Wrapping(0)), + }) + .into_shared(unprotected()); + collector.global.locals.insert(local, unprotected()); + LocalHandle { + local: local.as_raw(), + } + } + } + + /// Returns a reference to the `Global` in which this `Local` resides. + #[inline] + pub(crate) fn global(&self) -> &Global { + &self.collector().global + } + + /// Returns a reference to the `Collector` in which this `Local` resides. + #[inline] + pub(crate) fn collector(&self) -> &Collector { + self.collector.with(|c| unsafe { &**c }) + } + + /// Returns `true` if the current participant is pinned. + #[inline] + pub(crate) fn is_pinned(&self) -> bool { + self.guard_count.get() > 0 + } + + /// Adds `deferred` to the thread-local bag. + /// + /// # Safety + /// + /// It should be safe for another thread to execute the given function. + pub(crate) unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { + let bag = self.bag.with_mut(|b| &mut *b); + + while let Err(d) = bag.try_push(deferred) { + self.global().push_bag(bag, guard); + deferred = d; + } + } + + pub(crate) fn flush(&self, guard: &Guard) { + let bag = self.bag.with_mut(|b| unsafe { &mut *b }); + + if !bag.is_empty() { + self.global().push_bag(bag, guard); + } + + self.global().collect(guard); + } + + /// Pins the `Local`. + #[inline] + pub(crate) fn pin(&self) -> Guard { + let guard = Guard { local: self }; + + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count.checked_add(1).unwrap()); + + if guard_count == 0 { + let global_epoch = self.global().epoch.load(Ordering::Relaxed); + let new_epoch = global_epoch.pinned(); + + // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence. + // The fence makes sure that any future loads from `Atomic`s will not happen before + // this store. + if cfg!(all( + any(target_arch = "x86", target_arch = "x86_64"), + not(miri) + )) { + // HACK(stjepang): On x86 architectures there are two different ways of executing + // a `SeqCst` fence. + // + // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. + // 2. `_.compare_exchange(_, _, SeqCst, SeqCst)`, which compiles into a `lock cmpxchg` + // instruction. + // + // Both instructions have the effect of a full barrier, but benchmarks have shown + // that the second one makes pinning faster in this particular case. It is not + // clear that this is permitted by the C++ memory model (SC fences work very + // differently from SC accesses), but experimental evidence suggests that this + // works fine. Using inline assembly would be a viable (and correct) alternative, + // but alas, that is not possible on stable Rust. + let current = Epoch::starting(); + let res = self.epoch.compare_exchange( + current, + new_epoch, + Ordering::SeqCst, + Ordering::SeqCst, + ); + debug_assert!(res.is_ok(), "participant was expected to be unpinned"); + // We add a compiler fence to make it less likely for LLVM to do something wrong + // here. Formally, this is not enough to get rid of data races; practically, + // it should go a long way. + atomic::compiler_fence(Ordering::SeqCst); + } else { + self.epoch.store(new_epoch, Ordering::Relaxed); + atomic::fence(Ordering::SeqCst); + } + + // Increment the pin counter. + let count = self.pin_count.get(); + self.pin_count.set(count + Wrapping(1)); + + // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting + // some garbage. + if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 { + self.global().collect(&guard); + } + } + + guard + } + + /// Unpins the `Local`. + #[inline] + pub(crate) fn unpin(&self) { + let guard_count = self.guard_count.get(); + self.guard_count.set(guard_count - 1); + + if guard_count == 1 { + self.epoch.store(Epoch::starting(), Ordering::Release); + + if self.handle_count.get() == 0 { + self.finalize(); + } + } + } + + /// Unpins and then pins the `Local`. + #[inline] + pub(crate) fn repin(&self) { + let guard_count = self.guard_count.get(); + + // Update the local epoch only if there's only one guard. + if guard_count == 1 { + let epoch = self.epoch.load(Ordering::Relaxed); + let global_epoch = self.global().epoch.load(Ordering::Relaxed).pinned(); + + // Update the local epoch only if the global epoch is greater than the local epoch. + if epoch != global_epoch { + // We store the new epoch with `Release` because we need to ensure any memory + // accesses from the previous epoch do not leak into the new one. + self.epoch.store(global_epoch, Ordering::Release); + + // However, we don't need a following `SeqCst` fence, because it is safe for memory + // accesses from the new epoch to be executed before updating the local epoch. At + // worse, other threads will see the new epoch late and delay GC slightly. + } + } + } + + /// Increments the handle count. + #[inline] + pub(crate) fn acquire_handle(&self) { + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count + 1); + } + + /// Decrements the handle count. + #[inline] + pub(crate) fn release_handle(&self) { + let guard_count = self.guard_count.get(); + let handle_count = self.handle_count.get(); + debug_assert!(handle_count >= 1); + self.handle_count.set(handle_count - 1); + + if guard_count == 0 && handle_count == 1 { + self.finalize(); + } + } + + /// Removes the `Local` from the global linked list. + #[cold] + fn finalize(&self) { + debug_assert_eq!(self.guard_count.get(), 0); + debug_assert_eq!(self.handle_count.get(), 0); + + // Temporarily increment handle count. This is required so that the following call to `pin` + // doesn't call `finalize` again. + self.handle_count.set(1); + unsafe { + // Pin and move the local bag into the global queue. It's important that `push_bag` + // doesn't defer destruction on any new garbage. + let guard = &self.pin(); + self.global() + .push_bag(self.bag.with_mut(|b| &mut *b), guard); + } + // Revert the handle count back to zero. + self.handle_count.set(0); + + unsafe { + // Take the reference to the `Global` out of this `Local`. Since we're not protected + // by a guard at this time, it's crucial that the reference is read before marking the + // `Local` as deleted. + let collector: Collector = ptr::read(self.collector.with(|c| &*(*c))); + + // Mark this node in the linked list as deleted. + self.entry.delete(unprotected()); + + // Finally, drop the reference to the global. Note that this might be the last reference + // to the `Global`. If so, the global data will be destroyed and all deferred functions + // in its queue will be executed. + drop(collector); + } + } +} + +impl IsElement<Local> for Local { + fn entry_of(local: &Local) -> &Entry { + let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry; + unsafe { &*entry_ptr } + } + + unsafe fn element_of(entry: &Entry) -> &Local { + // offset_of! macro uses unsafe, but it's unnecessary in this context. + #[allow(unused_unsafe)] + let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local; + &*local_ptr + } + + unsafe fn finalize(entry: &Entry, guard: &Guard) { + guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)); + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + + use super::*; + + #[test] + fn check_defer() { + static FLAG: AtomicUsize = AtomicUsize::new(0); + fn set() { + FLAG.store(42, Ordering::Relaxed); + } + + let d = Deferred::new(set); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + d.call(); + assert_eq!(FLAG.load(Ordering::Relaxed), 42); + } + + #[test] + fn check_bag() { + static FLAG: AtomicUsize = AtomicUsize::new(0); + fn incr() { + FLAG.fetch_add(1, Ordering::Relaxed); + } + + let mut bag = Bag::new(); + assert!(bag.is_empty()); + + for _ in 0..MAX_OBJECTS { + assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() }); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + } + + let result = unsafe { bag.try_push(Deferred::new(incr)) }; + assert!(result.is_err()); + assert!(!bag.is_empty()); + assert_eq!(FLAG.load(Ordering::Relaxed), 0); + + drop(bag); + assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/lib.rs b/third_party/rust/crossbeam-epoch/src/lib.rs new file mode 100644 index 0000000000..b432c1f401 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/lib.rs @@ -0,0 +1,169 @@ +//! Epoch-based memory reclamation. +//! +//! An interesting problem concurrent collections deal with comes from the remove operation. +//! Suppose that a thread removes an element from a lock-free map, while another thread is reading +//! that same element at the same time. The first thread must wait until the second thread stops +//! reading the element. Only then it is safe to destruct it. +//! +//! Programming languages that come with garbage collectors solve this problem trivially. The +//! garbage collector will destruct the removed element when no thread can hold a reference to it +//! anymore. +//! +//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an +//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and +//! marked with the current epoch. Every time a thread accesses a collection, it checks the current +//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread +//! can be referencing it anymore. +//! +//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit +//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something +//! users of concurrent collections don't have to worry much about. +//! +//! # Pointers +//! +//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which +//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a +//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely +//! read. +//! +//! # Pinning +//! +//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant +//! we declare that any object that gets removed from now on must not be destructed just +//! yet. Garbage collection of newly removed objects is suspended until the participant gets +//! unpinned. +//! +//! # Garbage +//! +//! Objects that get removed from concurrent collections must be stashed away until all currently +//! pinned participants get unpinned. Such objects can be stored into a thread-local or global +//! storage, where they are kept until the right time for their destruction comes. +//! +//! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an +//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data +//! structures may defer the deallocation of an object. +//! +//! # APIs +//! +//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you +//! want to create your own garbage collector, use the [`Collector`] API. + +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![warn( + missing_docs, + missing_debug_implementations, + rust_2018_idioms, + unreachable_pub +)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(crossbeam_loom)] +extern crate loom_crate as loom; + +use cfg_if::cfg_if; + +#[cfg(crossbeam_loom)] +#[allow(unused_imports, dead_code)] +mod primitive { + pub(crate) mod cell { + pub(crate) use loom::cell::UnsafeCell; + } + pub(crate) mod sync { + pub(crate) mod atomic { + use core::sync::atomic::Ordering; + pub(crate) use loom::sync::atomic::{fence, AtomicUsize}; + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use self::fence as compiler_fence; + } + pub(crate) use loom::sync::Arc; + } + pub(crate) use loom::thread_local; +} +#[cfg(not(crossbeam_no_atomic_cas))] +#[cfg(not(crossbeam_loom))] +#[allow(unused_imports, dead_code)] +mod primitive { + #[cfg(feature = "alloc")] + pub(crate) mod cell { + #[derive(Debug)] + #[repr(transparent)] + pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>); + + // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell. + // Since we want the rest of the code to be agnostic to whether it's running under loom or + // not, we write this small wrapper that provides the loom-supported API for the standard + // library UnsafeCell. This is also what the loom documentation recommends: + // https://github.com/tokio-rs/loom#handling-loom-api-differences + impl<T> UnsafeCell<T> { + #[inline] + pub(crate) const fn new(data: T) -> UnsafeCell<T> { + UnsafeCell(::core::cell::UnsafeCell::new(data)) + } + + #[inline] + pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R { + f(self.0.get()) + } + + #[inline] + pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R { + f(self.0.get()) + } + } + } + #[cfg(feature = "alloc")] + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::compiler_fence; + pub(crate) use core::sync::atomic::fence; + pub(crate) use core::sync::atomic::AtomicUsize; + } + pub(crate) use alloc::sync::Arc; + } + + #[cfg(feature = "std")] + pub(crate) use std::thread_local; +} + +#[cfg(not(crossbeam_no_atomic_cas))] +cfg_if! { + if #[cfg(feature = "alloc")] { + extern crate alloc; + + mod atomic; + mod collector; + mod deferred; + mod epoch; + mod guard; + mod internal; + mod sync; + + pub use self::atomic::{ + Pointable, Atomic, CompareExchangeError, + Owned, Pointer, Shared, + }; + pub use self::collector::{Collector, LocalHandle}; + pub use self::guard::{unprotected, Guard}; + + #[allow(deprecated)] + pub use self::atomic::{CompareAndSetError, CompareAndSetOrdering}; + } +} + +cfg_if! { + if #[cfg(feature = "std")] { + mod default; + pub use self::default::{default_collector, is_pinned, pin}; + } +} diff --git a/third_party/rust/crossbeam-epoch/src/sync/list.rs b/third_party/rust/crossbeam-epoch/src/sync/list.rs new file mode 100644 index 0000000000..52ffd6fca4 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/sync/list.rs @@ -0,0 +1,487 @@ +//! Lock-free intrusive linked list. +//! +//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA +//! 2002. <http://dl.acm.org/citation.cfm?id=564870.564881> + +use core::marker::PhantomData; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use crate::{unprotected, Atomic, Guard, Shared}; + +/// An entry in a linked list. +/// +/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different +/// cache-line than thread-local data in terms of performance. +#[derive(Debug)] +pub(crate) struct Entry { + /// The next entry in the linked list. + /// If the tag is 1, this entry is marked as deleted. + next: Atomic<Entry>, +} + +/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive +/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance +/// of `Entry`. +/// +/// # Example +/// +/// ```ignore +/// struct A { +/// entry: Entry, +/// data: usize, +/// } +/// +/// impl IsElement<A> for A { +/// fn entry_of(a: &A) -> &Entry { +/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry; +/// unsafe { &*entry_ptr } +/// } +/// +/// unsafe fn element_of(entry: &Entry) -> &T { +/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T; +/// &*elem_ptr +/// } +/// +/// unsafe fn finalize(entry: &Entry, guard: &Guard) { +/// guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)); +/// } +/// } +/// ``` +/// +/// This trait is implemented on a type separate from `T` (although it can be just `T`), because +/// one type might be placeable into multiple lists, in which case it would require multiple +/// implementations of `IsElement`. In such cases, each struct implementing `IsElement<T>` +/// represents a distinct `Entry` in `T`. +/// +/// For example, we can insert the following struct into two lists using `entry1` for one +/// and `entry2` for the other: +/// +/// ```ignore +/// struct B { +/// entry1: Entry, +/// entry2: Entry, +/// data: usize, +/// } +/// ``` +/// +pub(crate) trait IsElement<T> { + /// Returns a reference to this element's `Entry`. + fn entry_of(_: &T) -> &Entry; + + /// Given a reference to an element's entry, returns that element. + /// + /// ```ignore + /// let elem = ListElement::new(); + /// assert_eq!(elem.entry_of(), + /// unsafe { ListElement::element_of(elem.entry_of()) } ); + /// ``` + /// + /// # Safety + /// + /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance + /// of the element type (`T`). + unsafe fn element_of(_: &Entry) -> &T; + + /// The function that is called when an entry is unlinked from list. + /// + /// # Safety + /// + /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance + /// of the element type (`T`). + unsafe fn finalize(_: &Entry, _: &Guard); +} + +/// A lock-free, intrusive linked list of type `T`. +#[derive(Debug)] +pub(crate) struct List<T, C: IsElement<T> = T> { + /// The head of the linked list. + head: Atomic<Entry>, + + /// The phantom data for using `T` and `C`. + _marker: PhantomData<(T, C)>, +} + +/// An iterator used for retrieving values from the list. +pub(crate) struct Iter<'g, T, C: IsElement<T>> { + /// The guard that protects the iteration. + guard: &'g Guard, + + /// Pointer from the predecessor to the current entry. + pred: &'g Atomic<Entry>, + + /// The current entry. + curr: Shared<'g, Entry>, + + /// The list head, needed for restarting iteration. + head: &'g Atomic<Entry>, + + /// Logically, we store a borrow of an instance of `T` and + /// use the type information from `C`. + _marker: PhantomData<(&'g T, C)>, +} + +/// An error that occurs during iteration over the list. +#[derive(PartialEq, Debug)] +pub(crate) enum IterError { + /// A concurrent thread modified the state of the list at the same place that this iterator + /// was inspecting. Subsequent iteration will restart from the beginning of the list. + Stalled, +} + +impl Default for Entry { + /// Returns the empty entry. + fn default() -> Self { + Self { + next: Atomic::null(), + } + } +} + +impl Entry { + /// Marks this entry as deleted, deferring the actual deallocation to a later iteration. + /// + /// # Safety + /// + /// The entry should be a member of a linked list, and it should not have been deleted. + /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C` + /// is the associated helper for the linked list. + pub(crate) unsafe fn delete(&self, guard: &Guard) { + self.next.fetch_or(1, Release, guard); + } +} + +impl<T, C: IsElement<T>> List<T, C> { + /// Returns a new, empty linked list. + pub(crate) fn new() -> Self { + Self { + head: Atomic::null(), + _marker: PhantomData, + } + } + + /// Inserts `entry` into the head of the list. + /// + /// # Safety + /// + /// You should guarantee that: + /// + /// - `container` is not null + /// - `container` is immovable, e.g. inside an `Owned` + /// - the same `Entry` is not inserted more than once + /// - the inserted object will be removed before the list is dropped + pub(crate) unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { + // Insert right after head, i.e. at the beginning of the list. + let to = &self.head; + // Get the intrusively stored Entry of the new element to insert. + let entry: &Entry = C::entry_of(container.deref()); + // Make a Shared ptr to that Entry. + let entry_ptr = Shared::from(entry as *const _); + // Read the current successor of where we want to insert. + let mut next = to.load(Relaxed, guard); + + loop { + // Set the Entry of the to-be-inserted element to point to the previous successor of + // `to`. + entry.next.store(next, Relaxed); + match to.compare_exchange_weak(next, entry_ptr, Release, Relaxed, guard) { + Ok(_) => break, + // We lost the race or weak CAS failed spuriously. Update the successor and try + // again. + Err(err) => next = err.current, + } + } + } + + /// Returns an iterator over all objects. + /// + /// # Caveat + /// + /// Every object that is inserted at the moment this function is called and persists at least + /// until the end of iteration will be returned. Since this iterator traverses a lock-free + /// linked list that may be concurrently modified, some additional caveats apply: + /// + /// 1. If a new object is inserted during iteration, it may or may not be returned. + /// 2. If an object is deleted during iteration, it may or may not be returned. + /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning + /// thread will continue to iterate over the same list. + pub(crate) fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { + Iter { + guard, + pred: &self.head, + curr: self.head.load(Acquire, guard), + head: &self.head, + _marker: PhantomData, + } + } +} + +impl<T, C: IsElement<T>> Drop for List<T, C> { + fn drop(&mut self) { + unsafe { + let guard = unprotected(); + let mut curr = self.head.load(Relaxed, guard); + while let Some(c) = curr.as_ref() { + let succ = c.next.load(Relaxed, guard); + // Verify that all elements have been removed from the list. + assert_eq!(succ.tag(), 1); + + C::finalize(curr.deref(), guard); + curr = succ; + } + } + } +} + +impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> { + type Item = Result<&'g T, IterError>; + + fn next(&mut self) -> Option<Self::Item> { + while let Some(c) = unsafe { self.curr.as_ref() } { + let succ = c.next.load(Acquire, self.guard); + + if succ.tag() == 1 { + // This entry was removed. Try unlinking it from the list. + let succ = succ.with_tag(0); + + // The tag should always be zero, because removing a node after a logically deleted + // node leaves the list in an invalid state. + debug_assert!(self.curr.tag() == 0); + + // Try to unlink `curr` from the list, and get the new value of `self.pred`. + let succ = match self + .pred + .compare_exchange(self.curr, succ, Acquire, Acquire, self.guard) + { + Ok(_) => { + // We succeeded in unlinking `curr`, so we have to schedule + // deallocation. Deferred drop is okay, because `list.delete()` can only be + // called if `T: 'static`. + unsafe { + C::finalize(self.curr.deref(), self.guard); + } + + // `succ` is the new value of `self.pred`. + succ + } + Err(e) => { + // `e.current` is the current value of `self.pred`. + e.current + } + }; + + // If the predecessor node is already marked as deleted, we need to restart from + // `head`. + if succ.tag() != 0 { + self.pred = self.head; + self.curr = self.head.load(Acquire, self.guard); + + return Some(Err(IterError::Stalled)); + } + + // Move over the removed by only advancing `curr`, not `pred`. + self.curr = succ; + continue; + } + + // Move one step forward. + self.pred = &c.next; + self.curr = succ; + + return Some(Ok(unsafe { C::element_of(c) })); + } + + // We reached the end of the list. + None + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod tests { + use super::*; + use crate::{Collector, Owned}; + use crossbeam_utils::thread; + use std::sync::Barrier; + + impl IsElement<Entry> for Entry { + fn entry_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn element_of(entry: &Entry) -> &Entry { + entry + } + + unsafe fn finalize(entry: &Entry, guard: &Guard) { + guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)); + } + } + + /// Checks whether the list retains inserted elements + /// and returns them in the correct order. + #[test] + fn insert() { + let collector = Collector::new(); + let handle = collector.register(); + let guard = handle.pin(); + + let l: List<Entry> = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e2 = iter.next(); + assert!(maybe_e2.is_some()); + assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e2.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + } + + /// Checks whether elements can be removed from the list and whether + /// the correct elements are removed. + #[test] + fn delete() { + let collector = Collector::new(); + let handle = collector.register(); + let guard = handle.pin(); + + let l: List<Entry> = List::new(); + + let e1 = Owned::new(Entry::default()).into_shared(&guard); + let e2 = Owned::new(Entry::default()).into_shared(&guard); + let e3 = Owned::new(Entry::default()).into_shared(&guard); + unsafe { + l.insert(e1, &guard); + l.insert(e2, &guard); + l.insert(e3, &guard); + e2.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + let maybe_e3 = iter.next(); + assert!(maybe_e3.is_some()); + assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); + let maybe_e1 = iter.next(); + assert!(maybe_e1.is_some()); + assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); + assert!(iter.next().is_none()); + + unsafe { + e1.as_ref().unwrap().delete(&guard); + e3.as_ref().unwrap().delete(&guard); + } + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + const THREADS: usize = 8; + const ITERS: usize = 512; + + /// Contends the list on insert and delete operations to make sure they can run concurrently. + #[test] + fn insert_delete_multi() { + let collector = Collector::new(); + + let l: List<Entry> = List::new(); + let b = Barrier::new(THREADS); + + thread::scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + b.wait(); + + let handle = collector.register(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + } + }) + .unwrap(); + + let handle = collector.register(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } + + /// Contends the list on iteration to make sure that it can be iterated over concurrently. + #[test] + fn iter_multi() { + let collector = Collector::new(); + + let l: List<Entry> = List::new(); + let b = Barrier::new(THREADS); + + thread::scope(|s| { + for _ in 0..THREADS { + s.spawn(|_| { + b.wait(); + + let handle = collector.register(); + let guard: Guard = handle.pin(); + let mut v = Vec::with_capacity(ITERS); + + for _ in 0..ITERS { + let e = Owned::new(Entry::default()).into_shared(&guard); + v.push(e); + unsafe { + l.insert(e, &guard); + } + } + + let mut iter = l.iter(&guard); + for _ in 0..ITERS { + assert!(iter.next().is_some()); + } + + for e in v { + unsafe { + e.as_ref().unwrap().delete(&guard); + } + } + }); + } + }) + .unwrap(); + + let handle = collector.register(); + let guard = handle.pin(); + + let mut iter = l.iter(&guard); + assert!(iter.next().is_none()); + } +} diff --git a/third_party/rust/crossbeam-epoch/src/sync/mod.rs b/third_party/rust/crossbeam-epoch/src/sync/mod.rs new file mode 100644 index 0000000000..08981be257 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/sync/mod.rs @@ -0,0 +1,7 @@ +//! Synchronization primitives. + +pub(crate) mod list; +#[cfg(feature = "std")] +#[cfg(not(crossbeam_loom))] +pub(crate) mod once_lock; +pub(crate) mod queue; diff --git a/third_party/rust/crossbeam-epoch/src/sync/once_lock.rs b/third_party/rust/crossbeam-epoch/src/sync/once_lock.rs new file mode 100644 index 0000000000..c1fefc96cc --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/sync/once_lock.rs @@ -0,0 +1,103 @@ +// Based on unstable std::sync::OnceLock. +// +// Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs + +use core::cell::UnsafeCell; +use core::mem::MaybeUninit; +use core::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Once; + +pub(crate) struct OnceLock<T> { + once: Once, + // Once::is_completed requires Rust 1.43, so use this to track of whether they have been initialized. + is_initialized: AtomicBool, + value: UnsafeCell<MaybeUninit<T>>, + // Unlike std::sync::OnceLock, we don't need PhantomData here because + // we don't use #[may_dangle]. +} + +unsafe impl<T: Sync + Send> Sync for OnceLock<T> {} +unsafe impl<T: Send> Send for OnceLock<T> {} + +impl<T> OnceLock<T> { + /// Creates a new empty cell. + #[must_use] + pub(crate) const fn new() -> Self { + Self { + once: Once::new(), + is_initialized: AtomicBool::new(false), + value: UnsafeCell::new(MaybeUninit::uninit()), + } + } + + /// Gets the contents of the cell, initializing it with `f` if the cell + /// was empty. + /// + /// Many threads may call `get_or_init` concurrently with different + /// initializing functions, but it is guaranteed that only one function + /// will be executed. + /// + /// # Panics + /// + /// If `f` panics, the panic is propagated to the caller, and the cell + /// remains uninitialized. + /// + /// It is an error to reentrantly initialize the cell from `f`. The + /// exact outcome is unspecified. Current implementation deadlocks, but + /// this may be changed to a panic in the future. + pub(crate) fn get_or_init<F>(&self, f: F) -> &T + where + F: FnOnce() -> T, + { + // Fast path check + if self.is_initialized() { + // SAFETY: The inner value has been initialized + return unsafe { self.get_unchecked() }; + } + self.initialize(f); + + debug_assert!(self.is_initialized()); + + // SAFETY: The inner value has been initialized + unsafe { self.get_unchecked() } + } + + #[inline] + fn is_initialized(&self) -> bool { + self.is_initialized.load(Ordering::Acquire) + } + + #[cold] + fn initialize<F>(&self, f: F) + where + F: FnOnce() -> T, + { + let slot = self.value.get().cast::<T>(); + let is_initialized = &self.is_initialized; + + self.once.call_once(|| { + let value = f(); + unsafe { + slot.write(value); + } + is_initialized.store(true, Ordering::Release); + }); + } + + /// # Safety + /// + /// The value must be initialized + unsafe fn get_unchecked(&self) -> &T { + debug_assert!(self.is_initialized()); + &*self.value.get().cast::<T>() + } +} + +impl<T> Drop for OnceLock<T> { + fn drop(&mut self) { + if self.is_initialized() { + // SAFETY: The inner value has been initialized + unsafe { self.value.get().cast::<T>().drop_in_place() }; + } + } +} diff --git a/third_party/rust/crossbeam-epoch/src/sync/queue.rs b/third_party/rust/crossbeam-epoch/src/sync/queue.rs new file mode 100644 index 0000000000..9500438819 --- /dev/null +++ b/third_party/rust/crossbeam-epoch/src/sync/queue.rs @@ -0,0 +1,469 @@ +//! Michael-Scott lock-free queue. +//! +//! Usable with any number of producers and consumers. +//! +//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue +//! Algorithms. PODC 1996. <http://dl.acm.org/citation.cfm?id=248106> +//! +//! Simon Doherty, Lindsay Groves, Victor Luchangco, and Mark Moir. 2004b. Formal Verification of a +//! Practical Lock-Free Queue Algorithm. <https://doi.org/10.1007/978-3-540-30232-2_7> + +use core::mem::MaybeUninit; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +use crossbeam_utils::CachePadded; + +use crate::{unprotected, Atomic, Guard, Owned, Shared}; + +// The representation here is a singly-linked list, with a sentinel node at the front. In general +// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or +// all `Blocked` (requests for data from blocked threads). +#[derive(Debug)] +pub(crate) struct Queue<T> { + head: CachePadded<Atomic<Node<T>>>, + tail: CachePadded<Atomic<Node<T>>>, +} + +struct Node<T> { + /// The slot in which a value of type `T` can be stored. + /// + /// The type of `data` is `MaybeUninit<T>` because a `Node<T>` doesn't always contain a `T`. + /// For example, the sentinel node in a queue never contains a value: its slot is always empty. + /// Other nodes start their life with a push operation and contain a value until it gets popped + /// out. After that such empty nodes get added to the collector for destruction. + data: MaybeUninit<T>, + + next: Atomic<Node<T>>, +} + +// Any particular `T` should never be accessed concurrently, so no need for `Sync`. +unsafe impl<T: Send> Sync for Queue<T> {} +unsafe impl<T: Send> Send for Queue<T> {} + +impl<T> Queue<T> { + /// Create a new, empty queue. + pub(crate) fn new() -> Queue<T> { + let q = Queue { + head: CachePadded::new(Atomic::null()), + tail: CachePadded::new(Atomic::null()), + }; + let sentinel = Owned::new(Node { + data: MaybeUninit::uninit(), + next: Atomic::null(), + }); + unsafe { + let guard = unprotected(); + let sentinel = sentinel.into_shared(guard); + q.head.store(sentinel, Relaxed); + q.tail.store(sentinel, Relaxed); + q + } + } + + /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on + /// success. The queue's `tail` pointer may be updated. + #[inline(always)] + fn push_internal( + &self, + onto: Shared<'_, Node<T>>, + new: Shared<'_, Node<T>>, + guard: &Guard, + ) -> bool { + // is `onto` the actual tail? + let o = unsafe { onto.deref() }; + let next = o.next.load(Acquire, guard); + if unsafe { next.as_ref().is_some() } { + // if not, try to "help" by moving the tail pointer forward + let _ = self + .tail + .compare_exchange(onto, next, Release, Relaxed, guard); + false + } else { + // looks like the actual tail; attempt to link in `n` + let result = o + .next + .compare_exchange(Shared::null(), new, Release, Relaxed, guard) + .is_ok(); + if result { + // try to move the tail pointer forward + let _ = self + .tail + .compare_exchange(onto, new, Release, Relaxed, guard); + } + result + } + } + + /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. + pub(crate) fn push(&self, t: T, guard: &Guard) { + let new = Owned::new(Node { + data: MaybeUninit::new(t), + next: Atomic::null(), + }); + let new = Owned::into_shared(new, guard); + + loop { + // We push onto the tail, so we'll start optimistically by looking there first. + let tail = self.tail.load(Acquire, guard); + + // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. + if self.push_internal(tail, new, guard) { + break; + } + } + } + + /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_internal(&self, guard: &Guard) -> Result<Option<T>, ()> { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) => unsafe { + self.head + .compare_exchange(head, next, Release, Relaxed, guard) + .map(|_| { + let tail = self.tail.load(Relaxed, guard); + // Advance the tail so that we don't retire a pointer to a reachable node. + if head == tail { + let _ = self + .tail + .compare_exchange(tail, next, Release, Relaxed, guard); + } + guard.defer_destroy(head); + // TODO: Replace with MaybeUninit::read when api is stable + Some(n.data.as_ptr().read()) + }) + .map_err(|_| ()) + }, + None => Ok(None), + } + } + + /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue + /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop. + #[inline(always)] + fn pop_if_internal<F>(&self, condition: F, guard: &Guard) -> Result<Option<T>, ()> + where + T: Sync, + F: Fn(&T) -> bool, + { + let head = self.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + let next = h.next.load(Acquire, guard); + match unsafe { next.as_ref() } { + Some(n) if condition(unsafe { &*n.data.as_ptr() }) => unsafe { + self.head + .compare_exchange(head, next, Release, Relaxed, guard) + .map(|_| { + let tail = self.tail.load(Relaxed, guard); + // Advance the tail so that we don't retire a pointer to a reachable node. + if head == tail { + let _ = self + .tail + .compare_exchange(tail, next, Release, Relaxed, guard); + } + guard.defer_destroy(head); + Some(n.data.as_ptr().read()) + }) + .map_err(|_| ()) + }, + None | Some(_) => Ok(None), + } + } + + /// Attempts to dequeue from the front. + /// + /// Returns `None` if the queue is observed to be empty. + pub(crate) fn try_pop(&self, guard: &Guard) -> Option<T> { + loop { + if let Ok(head) = self.pop_internal(guard) { + return head; + } + } + } + + /// Attempts to dequeue from the front, if the item satisfies the given condition. + /// + /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given + /// condition. + pub(crate) fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T> + where + T: Sync, + F: Fn(&T) -> bool, + { + loop { + if let Ok(head) = self.pop_if_internal(&condition, guard) { + return head; + } + } + } +} + +impl<T> Drop for Queue<T> { + fn drop(&mut self) { + unsafe { + let guard = unprotected(); + + while self.try_pop(guard).is_some() {} + + // Destroy the remaining sentinel node. + let sentinel = self.head.load(Relaxed, guard); + drop(sentinel.into_owned()); + } + } +} + +#[cfg(all(test, not(crossbeam_loom)))] +mod test { + use super::*; + use crate::pin; + use crossbeam_utils::thread; + + struct Queue<T> { + queue: super::Queue<T>, + } + + impl<T> Queue<T> { + pub(crate) fn new() -> Queue<T> { + Queue { + queue: super::Queue::new(), + } + } + + pub(crate) fn push(&self, t: T) { + let guard = &pin(); + self.queue.push(t, guard); + } + + pub(crate) fn is_empty(&self) -> bool { + let guard = &pin(); + let head = self.queue.head.load(Acquire, guard); + let h = unsafe { head.deref() }; + h.next.load(Acquire, guard).is_null() + } + + pub(crate) fn try_pop(&self) -> Option<T> { + let guard = &pin(); + self.queue.try_pop(guard) + } + + pub(crate) fn pop(&self) -> T { + loop { + match self.try_pop() { + None => continue, + Some(t) => return t, + } + } + } + } + + #[cfg(miri)] + const CONC_COUNT: i64 = 1000; + #[cfg(not(miri))] + const CONC_COUNT: i64 = 1000000; + + #[test] + fn push_try_pop_1() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(37)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_2() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + q.push(37); + q.push(48); + assert_eq!(q.try_pop(), Some(37)); + assert!(!q.is_empty()); + assert_eq!(q.try_pop(), Some(48)); + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_seq() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.try_pop(), Some(i)); + } + assert!(q.is_empty()); + } + + #[test] + fn push_pop_1() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + q.push(37); + assert!(!q.is_empty()); + assert_eq!(q.pop(), 37); + assert!(q.is_empty()); + } + + #[test] + fn push_pop_2() { + let q: Queue<i64> = Queue::new(); + q.push(37); + q.push(48); + assert_eq!(q.pop(), 37); + assert_eq!(q.pop(), 48); + } + + #[test] + fn push_pop_many_seq() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + for i in 0..200 { + q.push(i) + } + assert!(!q.is_empty()); + for i in 0..200 { + assert_eq!(q.pop(), i); + } + assert!(q.is_empty()); + } + + #[test] + fn push_try_pop_many_spsc() { + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + + thread::scope(|scope| { + scope.spawn(|_| { + let mut next = 0; + + while next < CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert_eq!(elem, next); + next += 1; + } + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }) + .unwrap(); + } + + #[test] + fn push_try_pop_many_spmc() { + fn recv(_t: i32, q: &Queue<i64>) { + let mut cur = -1; + for _i in 0..CONC_COUNT { + if let Some(elem) = q.try_pop() { + assert!(elem > cur); + cur = elem; + + if cur == CONC_COUNT - 1 { + break; + } + } + } + } + + let q: Queue<i64> = Queue::new(); + assert!(q.is_empty()); + thread::scope(|scope| { + for i in 0..3 { + let q = &q; + scope.spawn(move |_| recv(i, q)); + } + + scope.spawn(|_| { + for i in 0..CONC_COUNT { + q.push(i); + } + }); + }) + .unwrap(); + } + + #[test] + fn push_try_pop_many_mpmc() { + enum LR { + Left(i64), + Right(i64), + } + + let q: Queue<LR> = Queue::new(); + assert!(q.is_empty()); + + thread::scope(|scope| { + for _t in 0..2 { + scope.spawn(|_| { + for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Left(i)) + } + }); + scope.spawn(|_| { + for i in CONC_COUNT - 1..CONC_COUNT { + q.push(LR::Right(i)) + } + }); + scope.spawn(|_| { + let mut vl = vec![]; + let mut vr = vec![]; + for _i in 0..CONC_COUNT { + match q.try_pop() { + Some(LR::Left(x)) => vl.push(x), + Some(LR::Right(x)) => vr.push(x), + _ => {} + } + } + + let mut vl2 = vl.clone(); + let mut vr2 = vr.clone(); + vl2.sort_unstable(); + vr2.sort_unstable(); + + assert_eq!(vl, vl2); + assert_eq!(vr, vr2); + }); + } + }) + .unwrap(); + } + + #[test] + fn push_pop_many_spsc() { + let q: Queue<i64> = Queue::new(); + + thread::scope(|scope| { + scope.spawn(|_| { + let mut next = 0; + while next < CONC_COUNT { + assert_eq!(q.pop(), next); + next += 1; + } + }); + + for i in 0..CONC_COUNT { + q.push(i) + } + }) + .unwrap(); + assert!(q.is_empty()); + } + + #[test] + fn is_empty_dont_pop() { + let q: Queue<i64> = Queue::new(); + q.push(20); + q.push(20); + assert!(!q.is_empty()); + assert!(!q.is_empty()); + assert!(q.try_pop().is_some()); + } +} diff --git a/third_party/rust/crossbeam-epoch/tests/loom.rs b/third_party/rust/crossbeam-epoch/tests/loom.rs new file mode 100644 index 0000000000..4e56acdbca --- /dev/null +++ b/third_party/rust/crossbeam-epoch/tests/loom.rs @@ -0,0 +1,157 @@ +#![cfg(crossbeam_loom)] + +use crossbeam_epoch as epoch; +use loom_crate as loom; + +use epoch::*; +use epoch::{Atomic, Owned}; +use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release}; +use loom::sync::Arc; +use loom::thread::spawn; +use std::mem::ManuallyDrop; +use std::ptr; + +#[test] +fn it_works() { + loom::model(|| { + let collector = Collector::new(); + let item: Atomic<String> = Atomic::from(Owned::new(String::from("boom"))); + let item2 = item.clone(); + let collector2 = collector.clone(); + let guard = collector.register().pin(); + + let jh = loom::thread::spawn(move || { + let guard = collector2.register().pin(); + guard.defer(move || { + // this isn't really safe, since other threads may still have pointers to the + // value, but in this limited test scenario it's okay, since we know the test won't + // access item after all the pins are released. + let mut item = unsafe { item2.into_owned() }; + // mutate it as a second measure to make sure the assert_eq below would fail + item.retain(|c| c == 'o'); + drop(item); + }); + }); + + let item = item.load(Ordering::SeqCst, &guard); + // we pinned strictly before the call to defer_destroy, + // so item cannot have been dropped yet + assert_eq!(*unsafe { item.deref() }, "boom"); + drop(guard); + + jh.join().unwrap(); + + drop(collector); + }) +} + +#[test] +fn treiber_stack() { + /// Treiber's lock-free stack. + /// + /// Usable with any number of producers and consumers. + #[derive(Debug)] + pub struct TreiberStack<T> { + head: Atomic<Node<T>>, + } + + #[derive(Debug)] + struct Node<T> { + data: ManuallyDrop<T>, + next: Atomic<Node<T>>, + } + + impl<T> TreiberStack<T> { + /// Creates a new, empty stack. + pub fn new() -> TreiberStack<T> { + TreiberStack { + head: Atomic::null(), + } + } + + /// Pushes a value on top of the stack. + pub fn push(&self, t: T) { + let mut n = Owned::new(Node { + data: ManuallyDrop::new(t), + next: Atomic::null(), + }); + + let guard = epoch::pin(); + + loop { + let head = self.head.load(Relaxed, &guard); + n.next.store(head, Relaxed); + + match self + .head + .compare_exchange(head, n, Release, Relaxed, &guard) + { + Ok(_) => break, + Err(e) => n = e.new, + } + } + } + + /// Attempts to pop the top element from the stack. + /// + /// Returns `None` if the stack is empty. + pub fn pop(&self) -> Option<T> { + let guard = epoch::pin(); + loop { + let head = self.head.load(Acquire, &guard); + + match unsafe { head.as_ref() } { + Some(h) => { + let next = h.next.load(Relaxed, &guard); + + if self + .head + .compare_exchange(head, next, Relaxed, Relaxed, &guard) + .is_ok() + { + unsafe { + guard.defer_destroy(head); + return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data))); + } + } + } + None => return None, + } + } + } + + /// Returns `true` if the stack is empty. + pub fn is_empty(&self) -> bool { + let guard = epoch::pin(); + self.head.load(Acquire, &guard).is_null() + } + } + + impl<T> Drop for TreiberStack<T> { + fn drop(&mut self) { + while self.pop().is_some() {} + } + } + + loom::model(|| { + let stack1 = Arc::new(TreiberStack::new()); + let stack2 = Arc::clone(&stack1); + + // use 5 since it's greater than the 4 used for the sanitize feature + let jh = spawn(move || { + for i in 0..5 { + stack2.push(i); + assert!(stack2.pop().is_some()); + } + }); + + for i in 0..5 { + stack1.push(i); + assert!(stack1.pop().is_some()); + } + + jh.join().unwrap(); + assert!(stack1.pop().is_none()); + assert!(stack1.is_empty()); + }); +} |