diff options
Diffstat (limited to 'third_party/rust/triple_buffer')
-rw-r--r-- | third_party/rust/triple_buffer/.cargo-checksum.json | 1 | ||||
-rw-r--r-- | third_party/rust/triple_buffer/CHANGELOG.md | 254 | ||||
-rw-r--r-- | third_party/rust/triple_buffer/Cargo.toml | 35 | ||||
-rw-r--r-- | third_party/rust/triple_buffer/LICENSE | 373 | ||||
-rw-r--r-- | third_party/rust/triple_buffer/README.md | 199 | ||||
-rw-r--r-- | third_party/rust/triple_buffer/src/lib.rs | 1092 |
6 files changed, 1954 insertions, 0 deletions
diff --git a/third_party/rust/triple_buffer/.cargo-checksum.json b/third_party/rust/triple_buffer/.cargo-checksum.json new file mode 100644 index 0000000000..1772a37d88 --- /dev/null +++ b/third_party/rust/triple_buffer/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"878a0261b1281e00769cbec862fc6bd37a72280099ec1dd5504fdef12745b0b6","Cargo.toml":"0620bdb8ee9a324fa6c49c56802a2ee1a2d5a59b325b635ac314243d82507959","LICENSE":"4b89d4518bd135ab4ee154a7bce722246b57a98c3d7efc1a09409898160c2bd1","README.md":"662e345ee3319bb82a79afb6dd136c5c956007d35b9a2962cd1d743ee321c7c5","src/lib.rs":"eccdd71723a03ffb8a59e9f9740d408512f44f83d839724bfcb0c630149f140d"},"package":"06577fa2229f6eff69f06ba2e08a27458f32f87a7985abb5047d7bd2e0006512"}
\ No newline at end of file diff --git a/third_party/rust/triple_buffer/CHANGELOG.md b/third_party/rust/triple_buffer/CHANGELOG.md new file mode 100644 index 0000000000..42f0538e24 --- /dev/null +++ b/third_party/rust/triple_buffer/CHANGELOG.md @@ -0,0 +1,254 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + +## [Unreleased] + +_No unreleased changes in the pipeline at the moment._ + + +## [5.0.5] - 2020-07-05 + +### Changed + +- Use only cache-padded instead of the full crossbeam-utils crate +- Clean up CI config and cache Rust toolchain there + + +## [5.0.4] - 2020-02-10 + +### Added + +- Add a changelog to the repository. + +### Changed + +- Deduplicate CI configuration some more. + +### Fixed + +- Drop now-unnecessary manual `rustfmt` configuration. +- Avoid false sharing of back-buffer information. + + +## [5.0.3] - 2020-02-07 + +### Changed + +- Clean up and deduplicate GitHub Actions configuration. +- Tune down concurrent test speed to reduce CI false positives. + + +## [5.0.2] - 2020-01-29 + +### Changed + +- Move continuous integration to GitHub Actions. + + +## [5.0.1] - 2019-11-07 + +### Fixed + +- Update to current version of dependencies. + + +## [5.0.0] - 2019-04-12 + +### Changed + +- Bump travis CI configuration to Ubuntu Xenial. +- Bump minimal supported Rust version to 1.34.0. + +### Fixed + +- Don't use an `usize` for buffer indices where an `u8` will suffice. +- Improve Rust API guidelines compliance. + + +## [4.0.1] - 2018-12-31 + +### Fixed + +- Display `raw` feature documentation on docs.rs. + + +## [4.0.0] - 2018-12-18 + +### Changed + +- Migrate to Rust 2018. +- Bump minimal supported Rust version to 1.31.0. + +### Fixed + +- Update to current version of dependencies. +- Start using Clippy and integrate it into continuous integration. +- Re-apply `rustfmt` coding style (was not in CI at the time...). + + +## [3.0.1] - 2018-08-27 + +### Fixed + +- Make `testbench` a dev-dependency, as it's only used for tests and benchmarks. + + +## [3.0.0] - 2018-08-27 + +### Changed + +- Buffers are now padded to the size of a cache line to reduce false sharing. +- Bump minimal supported Rust version to 1.26.0. + +### Fixed + +- Make `testbench` version requirement more explicit. + + +## [2.0.0] - 2018-02-11 + +### Changed + +- Switch license to MPLv2, which is a better match to Rust's static linking + philosophy than LGPL. + + +## [1.1.1] - 2017-11-19 + +### Fixed + +- Fix my understanding of Cargo features & make the `raw` feature actually work. + + +## [1.1.0] - 2017-11-18 + +### Added + +- Allow in-place writes on the input and output side, at the cost of stronger + synchronization barriers, through use of the `raw` Cargo feature. + +### Fixed + +- Do not require a `Clone` bound on the inner data. + + +## [1.0.0] - 2017-11-10 + +### Changed + +- Simplify component naming convention, e.g. `TripleBufferInput` -> `Input`. + + +## [0.3.4] - 2017-06-25 + +### Changed + +- Use `testbench::RaceCell` as an improved form of data race detection in tests. + +### Fixed + +- Do not require a `PartialEq` bound on the inner data. + + +## [0.3.3] - 2017-06-15 + +### Changed + +- Tune down concurrent test speed to reduce CI false positives. + + +## [0.3.2] - 2017-06-15 + +### Changed + +- Tune down concurrent test speed to reduce CI false positives. + + +## [0.3.1] - 2017-06-15 + +### Changed + +- Tune down concurrent test speed to reduce CI false positives. + + +## [0.3.0] - 2017-06-14 + +### Added + +- Introduce Travis CI continuous integration. + +### Fixed + +- Use CI to clarify minimal supported Rust version (currently 1.12.0). + + +## [0.2.4] - 2017-04-04 + +### Changed + +- Use `testbench` crate for concurrent testing and benchmarking. + + +## [0.2.3] - 2017-03-24 + +### Changed + +- More detailed comparison with other synchronization primitives in README. + +### Fixed + +- Adopt `rustfmt` coding style. + + +## [0.2.2] - 2017-03-20 + +### Changed + +- Reduce reliance on Acquire-Release synchronization. + + +## [0.2.1] - 2017-03-11 + +### Changed + +- Make README a bit more spambot-proof. + + +## [0.2.0] - 2017-03-11 + +### Added + +- First tagged release of triple-buffer. + + + +[Unreleased]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.5...HEAD +[5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.4...v5.0.5 +[5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.3...v5.0.4 +[5.0.3]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.2...v5.0.3 +[5.0.2]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.1...v5.0.2 +[5.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.0...v5.0.1 +[5.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.1...v5.0.0 +[4.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.0...v4.0.1 +[4.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.1...v4.0.0 +[3.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.0...v3.0.1 +[3.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v2.0.0...v3.0.0 +[2.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.1...v2.0.0 +[1.1.1]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.0...v1.1.1 +[1.1.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.4...v1.0.0 +[0.3.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.3...v0.3.4 +[0.3.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.2...v0.3.3 +[0.3.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.1...v0.3.2 +[0.3.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.4...v0.3.0 +[0.2.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.3...v0.2.4 +[0.2.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.2...v0.2.3 +[0.2.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.1...v0.2.2 +[0.2.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/HadrienG2/triple-buffer/releases/tag/v0.2.0 diff --git a/third_party/rust/triple_buffer/Cargo.toml b/third_party/rust/triple_buffer/Cargo.toml new file mode 100644 index 0000000000..1436dbb354 --- /dev/null +++ b/third_party/rust/triple_buffer/Cargo.toml @@ -0,0 +1,35 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "triple_buffer" +version = "5.0.5" +authors = ["Hadrien G. <knights_of_ni@gmx.com>"] +description = "An implementation of triple buffering, useful for sharing frequently updated data between threads" +documentation = "https://docs.rs/triple_buffer/" +readme = "README.md" +keywords = ["synchronization", "spsc", "multithreading", "non-blocking", "wait-free"] +categories = ["algorithms", "asynchronous", "concurrency", "data-structures"] +license = "MPL-2.0" +repository = "https://github.com/HadrienG2/triple-buffer" +[package.metadata.docs.rs] +all-features = true +[dependencies.cache-padded] +version = "1.1" +[dev-dependencies.testbench] +version = "0.7" + +[features] +raw = [] +[badges.maintenance] +status = "passively-maintained" diff --git a/third_party/rust/triple_buffer/LICENSE b/third_party/rust/triple_buffer/LICENSE new file mode 100644 index 0000000000..fa0086a952 --- /dev/null +++ b/third_party/rust/triple_buffer/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0.
\ No newline at end of file diff --git a/third_party/rust/triple_buffer/README.md b/third_party/rust/triple_buffer/README.md new file mode 100644 index 0000000000..c03e052693 --- /dev/null +++ b/third_party/rust/triple_buffer/README.md @@ -0,0 +1,199 @@ +# Triple buffering in Rust + +[![On crates.io](https://img.shields.io/crates/v/triple_buffer.svg)](https://crates.io/crates/triple_buffer) +[![On docs.rs](https://docs.rs/triple_buffer/badge.svg)](https://docs.rs/triple_buffer/) +[![Continuous Integration](https://github.com/HadrienG2/triple-buffer/workflows/Continuous%20Integration/badge.svg)](https://github.com/HadrienG2/triple-buffer/actions?query=workflow%3A%22Continuous+Integration%22) +![Requires rustc 1.34+](https://img.shields.io/badge/rustc-1.34+-red.svg) + + +## What is this? + +This is an implementation of triple buffering written in Rust. You may find it +useful for the following class of thread synchronization problems: + +- There is one producer thread and one consumer thread +- The producer wants to update a shared memory value periodically +- The consumer wants to access the latest update from the producer at any time + +The simplest way to use it is as follows: + +```rust +// Create a triple buffer: +let buf = TripleBuffer::new(0); + +// Split it into an input and output interface, to be respectively sent to +// the producer thread and the consumer thread: +let (mut buf_input, mut buf_output) = buf.split(); + +// The producer can move a value into the buffer at any time +buf_input.write(42); + +// The consumer can access the latest value from the producer at any time +let latest_value_ref = buf_output.read(); +assert_eq!(*latest_value_ref, 42); +``` + +In situations where moving the original value away and being unable to +modify it after the fact is too costly, such as if creating a new value +involves dynamic memory allocation, you can opt into the lower-level "raw" +interface, which allows you to access the buffer's data in place and +precisely control when updates are propagated. + +This data access method is more error-prone and comes at a small performance +cost, which is why you will need to enable it explicitly using the "raw" +[cargo feature](http://doc.crates.io/manifest.html#usage-in-end-products). + +```rust +// Create and split a triple buffer +use triple_buffer::TripleBuffer; +let buf = TripleBuffer::new(String::with_capacity(42)); +let (mut buf_input, mut buf_output) = buf.split(); + +// Mutate the input buffer in place +{ + // Acquire a reference to the input buffer + let raw_input = buf_input.raw_input_buffer(); + + // In general, you don't know what's inside of the buffer, so you should + // always reset the value before use (this is a type-specific process). + raw_input.clear(); + + // Perform an in-place update + raw_input.push_str("Hello, "); +} + +// Publish the input buffer update +buf_input.raw_publish(); + +// Manually fetch the buffer update from the consumer interface +buf_output.raw_update(); + +// Acquire a mutable reference to the output buffer +let raw_output = buf_output.raw_output_buffer(); + +// Post-process the output value before use +raw_output.push_str("world!"); +``` + + +## Give me details! How does it compare to alternatives? + +Compared to a mutex: + +- Only works in single-producer, single-consumer scenarios +- Is nonblocking, and more precisely bounded wait-free. Concurrent accesses will + be slowed down by cache contention, but no deadlock, livelock, or thread + scheduling induced slowdown is possible. +- Allows the producer and consumer to work simultaneously +- Uses a lot more memory (3x payload + 3x bytes vs 1x payload + 1 bool) +- Does not allow in-place updates, as the producer and consumer do not access + the same memory location +- Should be slower if updates are rare and in-place updates are much more + efficient than moves, faster otherwise. + +Compared to the read-copy-update (RCU) primitive from the Linux kernel: + +- Only works in single-producer, single-consumer scenarios +- Has higher dirty read overhead on relaxed-memory architectures (ARM, POWER...) +- Does not require accounting for reader "grace periods": once the reader has + gotten access to the latest value, the synchronization transaction is over +- Does not use the inefficient compare-and-swap hardware primitive on update +- Does not suffer from the ABA problem, allowing much simpler code +- Allocates memory on initialization only, rather than on every update +- May use more memory (3x payload + 3x bytes vs 1x pointer + amount of + payloads and refcounts that depends on the readout and update pattern) +- Should be slower if updates are rare, faster if updates are frequent + +Compared to sending the updates on a message queue: + +- Only works in single-producer, single-consumer scenarios (queues can work in + other scenarios, although the implementations are much less efficient) +- Consumer only has access to the latest state, not the previous ones +- Consumer does not *need* to get through every previous state +- Is nonblocking AND uses bounded amounts of memory (with queues, it's a choice) +- Can transmit information in a single move, rather than two +- Should be faster for any compatible use case + +In short, triple buffering is what you're after in scenarios where a shared +memory location is updated frequently by a single writer, read by a single +reader who only wants the latest version, and you can spare some RAM. + +- If you need multiple producers, look somewhere else +- If you need multiple consumers, you may be interested in my related "SPMC + buffer" work, which basically extends triple buffering to multiple consumers +- If you can't tolerate the RAM overhead or want to update the data in place, + try a Mutex instead (or possibly an RWLock) +- If the shared value is updated very rarely (e.g. every second), try an RCU +- If the consumer must get every update, try a message queue + + +## How do I know your unsafe lock-free code is working? + +By running the tests, of course! Which is unfortunately currently harder than +I'd like it to be. + +First of all, we have sequential tests, which are very thorough but obviously +do not check the lock-free/synchronization part. You run them as follows: + + $ cargo test && cargo test --features raw + +Then we have a concurrent test where a reader thread continuously observes the +values from a rate-limited writer thread, and makes sure that he can see every +single update without any incorrect value slipping in the middle. + +This test is more important, but it is also harder to run because one must first +check some assumptions: + +- The testing host must have at least 2 physical CPU cores to test all possible + race conditions +- No other code should be eating CPU in the background. Including other tests. +- As the proper writing rate is system-dependent, what is configured in this + test may not be appropriate for your machine. + +Taking this and the relatively long run time (~10-20 s) into account, this test +is ignored by default. + +Finally, we have benchmarks, which allow you to test how well the code is +performing on your machine. Because cargo bench has not yet landed in Stable +Rust, these benchmarks masquerade as tests, which make them a bit unpleasant to +run. I apologize for the inconvenience. + +To run the concurrent test and the benchmarks, make sure no one is eating CPU in +the background and do: + + $ cargo test --release -- --ignored --nocapture --test-threads=1 + +(As before, you can also test with `--features raw`) + +Here is a guide to interpreting the benchmark results: + +* `clean_read` measures the triple buffer readout time when the data has not + changed. It should be extremely fast (a couple of CPU clock cycles). +* `write` measures the amount of time it takes to write data in the triple + buffer when no one is reading. +* `write_and_dirty_read` performs a write as before, immediately followed by a + sequential read. To get the dirty read performance, substract the write time + from that result. Writes and dirty read should take comparable time. +* `concurrent_write` measures the write performance when a reader is + continuously reading. Expect significantly worse performance: lock-free + techniques can help against contention, but are not a panacea. +* `concurrent_read` measures the read performance when a writer is continuously + writing. Again, a significant hit is to be expected. + +On an Intel Xeon E5-1620 v3 @ 3.50GHz, typical results are as follows: + +* Write: 7.8 ns +* Clean read: 1.8 ns +* Dirty read: 9.3 ns +* Concurrent write: 45 ns +* Concurrent read: 126 ns + + +## License + +This crate is distributed under the terms of the MPLv2 license. See the LICENSE +file for details. + +More relaxed licensing (Apache, MIT, BSD...) may also be negociated, in +exchange of a financial contribution. Contact me for details at +knights_of_ni AT gmx DOTCOM. diff --git a/third_party/rust/triple_buffer/src/lib.rs b/third_party/rust/triple_buffer/src/lib.rs new file mode 100644 index 0000000000..4d4bbf8c38 --- /dev/null +++ b/third_party/rust/triple_buffer/src/lib.rs @@ -0,0 +1,1092 @@ +//! Triple buffering in Rust +//! +//! In this crate, we propose a Rust implementation of triple buffering. This is +//! a non-blocking thread synchronization mechanism that can be used when a +//! single producer thread is frequently updating a shared data block, and a +//! single consumer thread wants to be able to read the latest available version +//! of the shared data whenever it feels like it. +//! +//! # Examples +//! +//! For many use cases, you can use the default interface, designed for maximal +//! ergonomics and synchronization performance, which is based on moving values +//! into the buffer and subsequently accessing them via shared references: +//! +//! ``` +//! // Create a triple buffer +//! use triple_buffer::TripleBuffer; +//! let buf = TripleBuffer::new(0); +//! +//! // Split it into an input and output interface, to be respectively sent to +//! // the producer thread and the consumer thread +//! let (mut buf_input, mut buf_output) = buf.split(); +//! +//! // The producer can move a value into the buffer at any time +//! buf_input.write(42); +//! +//! // The consumer can access the latest value from the producer at any time +//! let latest_value_ref = buf_output.read(); +//! assert_eq!(*latest_value_ref, 42); +//! ``` +//! +//! In situations where moving the original value away and being unable to +//! modify it after the fact is too costly, such as if creating a new value +//! involves dynamic memory allocation, you can opt into the lower-level "raw" +//! interface, which allows you to access the buffer's data in place and +//! precisely control when updates are propagated. +//! +//! This data access method is more error-prone and comes at a small performance +//! cost, which is why you will need to enable it explicitly using the "raw" +//! [cargo feature](http://doc.crates.io/manifest.html#usage-in-end-products). +//! +//! ``` +//! # #[cfg(feature = "raw")] +//! # { +//! // Create and split a triple buffer +//! use triple_buffer::TripleBuffer; +//! let buf = TripleBuffer::new(String::with_capacity(42)); +//! let (mut buf_input, mut buf_output) = buf.split(); +//! +//! // Mutate the input buffer in place +//! { +//! // Acquire a reference to the input buffer +//! let raw_input = buf_input.raw_input_buffer(); +//! +//! // In general, you don't know what's inside of the buffer, so you should +//! // always reset the value before use (this is a type-specific process). +//! raw_input.clear(); +//! +//! // Perform an in-place update +//! raw_input.push_str("Hello, "); +//! } +//! +//! // Publish the input buffer update +//! buf_input.raw_publish(); +//! +//! // Manually fetch the buffer update from the consumer interface +//! buf_output.raw_update(); +//! +//! // Acquire a mutable reference to the output buffer +//! let raw_output = buf_output.raw_output_buffer(); +//! +//! // Post-process the output value before use +//! raw_output.push_str("world!"); +//! # } +//! ``` + +#![deny(missing_debug_implementations, missing_docs)] + +use cache_padded::CachePadded; + +use std::{ + cell::UnsafeCell, + sync::{ + atomic::{AtomicU8, Ordering}, + Arc, + }, +}; + +/// A triple buffer, useful for nonblocking and thread-safe data sharing +/// +/// A triple buffer is a single-producer single-consumer nonblocking +/// communication channel which behaves like a shared variable: the producer +/// submits regular updates, and the consumer accesses the latest available +/// value whenever it feels like it. +/// +/// The input and output fields of this struct are what producers and consumers +/// actually use in practice. They can safely be moved away from the +/// TripleBuffer struct after construction, and are further documented below. +/// +#[derive(Debug)] +pub struct TripleBuffer<T: Send> { + /// Input object used by producers to send updates + input: Input<T>, + + /// Output object used by consumers to read the current value + output: Output<T>, +} +// +impl<T: Clone + Send> TripleBuffer<T> { + /// Construct a triple buffer with a certain initial value + #[allow(clippy::needless_pass_by_value)] + pub fn new(initial: T) -> Self { + Self::new_impl(|| initial.clone()) + } +} +// +impl<T: Default + Send> Default for TripleBuffer<T> { + /// Construct a triple buffer with a default-constructed value + fn default() -> Self { + Self::new_impl(T::default) + } +} +// +impl<T: Send> TripleBuffer<T> { + /// Construct a triple buffer, using a functor to generate initial values + fn new_impl(mut generator: impl FnMut() -> T) -> Self { + // Start with the shared state... + let shared_state = Arc::new(SharedState::new(|_i| generator(), 0)); + + // ...then construct the input and output structs + TripleBuffer { + input: Input { + shared: shared_state.clone(), + input_idx: 1, + }, + output: Output { + shared: shared_state, + output_idx: 2, + }, + } + } + + /// Extract input and output of the triple buffer + pub fn split(self) -> (Input<T>, Output<T>) { + (self.input, self.output) + } +} +// +// The Clone and PartialEq traits are used internally for testing. +// +#[doc(hidden)] +impl<T: Clone + Send> Clone for TripleBuffer<T> { + fn clone(&self) -> Self { + // Clone the shared state. This is safe because at this layer of the + // interface, one needs an Input/Output &mut to mutate the shared state. + let shared_state = Arc::new(unsafe { (*self.input.shared).clone() }); + + // ...then the input and output structs + TripleBuffer { + input: Input { + shared: shared_state.clone(), + input_idx: self.input.input_idx, + }, + output: Output { + shared: shared_state, + output_idx: self.output.output_idx, + }, + } + } +} +// +#[doc(hidden)] +impl<T: PartialEq + Send> PartialEq for TripleBuffer<T> { + fn eq(&self, other: &Self) -> bool { + // Compare the shared states. This is safe because at this layer of the + // interface, one needs an Input/Output &mut to mutate the shared state. + let shared_states_equal = unsafe { (*self.input.shared).eq(&*other.input.shared) }; + + // Compare the rest of the triple buffer states + shared_states_equal + && (self.input.input_idx == other.input.input_idx) + && (self.output.output_idx == other.output.output_idx) + } +} + +/// Producer interface to the triple buffer +/// +/// The producer of data can use this struct to submit updates to the triple +/// buffer whenever he likes. These updates are nonblocking: a collision between +/// the producer and the consumer will result in cache contention, but deadlocks +/// and scheduling-induced slowdowns cannot happen. +/// +#[derive(Debug)] +pub struct Input<T: Send> { + /// Reference-counted shared state + shared: Arc<SharedState<T>>, + + /// Index of the input buffer (which is private to the producer) + input_idx: BufferIndex, +} +// +// Public interface +impl<T: Send> Input<T> { + /// Write a new value into the triple buffer + pub fn write(&mut self, value: T) { + // Update the input buffer + *self.input_buffer() = value; + + // Publish our update to the consumer + self.publish(); + } + + /// Check if the consumer has fetched our last submission yet + /// + /// This method is only intended for diagnostics purposes. Please do not let + /// it inform your decision of sending or not sending a value, as that would + /// effectively be building a very poor spinlock-based double buffer + /// implementation. If what you truly need is a double buffer, build + /// yourself a proper blocking one instead of wasting CPU time. + /// + pub fn consumed(&self) -> bool { + let back_info = self.shared.back_info.load(Ordering::Relaxed); + back_info & BACK_DIRTY_BIT == 0 + } + + /// Get raw access to the input buffer + /// + /// This advanced interface allows you to update the input buffer in place, + /// which can in some case improve performance by avoiding to create values + /// of type T repeatedy when this is an expensive process. + /// + /// However, by opting into it, you force yourself to take into account + /// subtle implementation details which you could normally ignore. + /// + /// First, the buffer does not contain the last value that you sent (which + /// is now into the hands of the consumer). In fact, the consumer is allowed + /// to write complete garbage into it if it feels so inclined. All you can + /// safely assume is that it contains a valid value of type T. + /// + /// Second, we do not send updates automatically. You need to call + /// raw_publish() in order to propagate a buffer update to the consumer. + /// Alternative designs based on Drop were considered, but ultimately deemed + /// too magical for the target audience of this method. + /// + /// To use this method, you have to enable the crate's `raw` feature + #[cfg(any(feature = "raw", test))] + pub fn raw_input_buffer(&mut self) -> &mut T { + self.input_buffer() + } + + /// Unconditionally publish an update, checking for overwrites + /// + /// After updating the input buffer using raw_input_buffer(), you can use + /// this method to publish your updates to the consumer. It will send back + /// an output flag which tells you whether an unread value was overwritten. + /// + /// To use this method, you have to enable the crate's `raw` feature + #[cfg(any(feature = "raw", test))] + pub fn raw_publish(&mut self) -> bool { + self.publish() + } +} +// +// Internal interface +impl<T: Send> Input<T> { + /// Access the input buffer + /// + /// This is safe because the synchronization protocol ensures that we have + /// exclusive access to this buffer. + /// + fn input_buffer(&mut self) -> &mut T { + let input_ptr = self.shared.buffers[self.input_idx as usize].get(); + unsafe { &mut *input_ptr } + } + + /// Tell which memory ordering should be used for buffer swaps + /// + /// The right answer depends on whether the consumer is allowed to write + /// into the output buffer or not. If it can, then we must synchronize with + /// its writes. If not, we only need to propagate our own writes. + /// + fn swap_ordering() -> Ordering { + if cfg!(feature = "raw") { + Ordering::AcqRel + } else { + Ordering::Release + } + } + + /// Publish an update, checking for overwrites (internal version) + fn publish(&mut self) -> bool { + // Swap the input buffer and the back buffer, setting the dirty bit + let former_back_info = self.shared.back_info.swap( + self.input_idx | BACK_DIRTY_BIT, + Self::swap_ordering(), // Propagate buffer updates as well + ); + + // The old back buffer becomes our new input buffer + self.input_idx = former_back_info & BACK_INDEX_MASK; + + // Tell whether we have overwritten unread data + former_back_info & BACK_DIRTY_BIT != 0 + } +} + +/// Consumer interface to the triple buffer +/// +/// The consumer of data can use this struct to access the latest published +/// update from the producer whenever he likes. Readout is nonblocking: a +/// collision between the producer and consumer will result in cache contention, +/// but deadlocks and scheduling-induced slowdowns cannot happen. +/// +#[derive(Debug)] +pub struct Output<T: Send> { + /// Reference-counted shared state + shared: Arc<SharedState<T>>, + + /// Index of the output buffer (which is private to the consumer) + output_idx: BufferIndex, +} +// +// Public interface +impl<T: Send> Output<T> { + /// Access the latest value from the triple buffer + pub fn read(&mut self) -> &T { + // Fetch updates from the producer + self.update(); + + // Give access to the output buffer + self.output_buffer() + } + + /// Tell whether a buffer update is incoming from the producer + /// + /// This method is only intended for diagnostics purposes. Please do not let + /// it inform your decision of reading a value or not, as that would + /// effectively be building a very poor spinlock-based double buffer + /// implementation. If what you truly need is a double buffer, build + /// yourself a proper blocking one instead of wasting CPU time. + /// + pub fn updated(&self) -> bool { + let back_info = self.shared.back_info.load(Ordering::Relaxed); + back_info & BACK_DIRTY_BIT != 0 + } + + /// Get raw access to the output buffer + /// + /// This advanced interface allows you to modify the contents of the output + /// buffer, which can in some case improve performance by avoiding to create + /// values of type T when this is an expensive process. One possible + /// application, for example, is to post-process values from the producer. + /// + /// However, by opting into it, you force yourself to take into account + /// subtle implementation details which you could normally ignore. + /// + /// First, keep in mind that you can lose access to the current output + /// buffer any time read() or raw_update() is called, as it will be replaced + /// by an updated buffer from the producer automatically. + /// + /// Second, to reduce the potential for the aforementioned usage error, this + /// method does not update the output buffer automatically. You need to call + /// raw_update() in order to fetch buffer updates from the producer. + /// + /// To use this method, you have to enable the crate's `raw` feature + #[cfg(any(feature = "raw", test))] + pub fn raw_output_buffer(&mut self) -> &mut T { + self.output_buffer() + } + + /// Update the output buffer + /// + /// Check for incoming updates from the producer, and if so, update our + /// output buffer to the latest data version. This operation will overwrite + /// any changes which you may have commited into the output buffer. + /// + /// Return a flag telling whether an update was carried out + /// + /// To use this method, you have to enable the crate's `raw` feature + #[cfg(any(feature = "raw", test))] + pub fn raw_update(&mut self) -> bool { + self.update() + } +} +// +// Internal interface +impl<T: Send> Output<T> { + /// Access the output buffer (internal version) + /// + /// This is safe because the synchronization protocol ensures that we have + /// exclusive access to this buffer. + /// + fn output_buffer(&mut self) -> &mut T { + let output_ptr = self.shared.buffers[self.output_idx as usize].get(); + unsafe { &mut *output_ptr } + } + + /// Tell which memory ordering should be used for buffer swaps + /// + /// The right answer depends on whether the client is allowed to write into + /// the output buffer or not. If it can, then we must propagate these writes + /// back to the producer. Otherwise, we only need to fetch producer updates. + /// + fn swap_ordering() -> Ordering { + if cfg!(feature = "raw") { + Ordering::AcqRel + } else { + Ordering::Acquire + } + } + + /// Check out incoming output buffer updates (internal version) + fn update(&mut self) -> bool { + // Access the shared state + let shared_state = &(*self.shared); + + // Check if an update is present in the back-buffer + let updated = self.updated(); + if updated { + // If so, exchange our output buffer with the back-buffer, thusly + // acquiring exclusive access to the old back buffer while giving + // the producer a new back-buffer to write to. + let former_back_info = shared_state.back_info.swap( + self.output_idx, + Self::swap_ordering(), // Synchronize with buffer updates + ); + + // Make the old back-buffer our new output buffer + self.output_idx = former_back_info & BACK_INDEX_MASK; + } + + // Tell whether an update was carried out + updated + } +} + +/// Triple buffer shared state +/// +/// In a triple buffering communication protocol, the producer and consumer +/// share the following storage: +/// +/// - Three memory buffers suitable for storing the data at hand +/// - Information about the back-buffer: which buffer is the current back-buffer +/// and whether an update was published since the last readout. +/// +#[derive(Debug)] +struct SharedState<T: Send> { + /// Data storage buffers + buffers: [CachePadded<UnsafeCell<T>>; 3], + + /// Information about the current back-buffer state + back_info: CachePadded<AtomicBackBufferInfo>, +} +// +#[doc(hidden)] +impl<T: Send> SharedState<T> { + /// Given (a way to generate) buffer contents and the back info, build the shared state + fn new(mut gen_buf_data: impl FnMut(usize) -> T, back_info: BackBufferInfo) -> Self { + let mut make_buf = |i| -> CachePadded<UnsafeCell<T>> { + CachePadded::new(UnsafeCell::new(gen_buf_data(i))) + }; + Self { + buffers: [make_buf(0), make_buf(1), make_buf(2)], + back_info: CachePadded::new(AtomicBackBufferInfo::new(back_info)), + } + } +} +// +#[doc(hidden)] +impl<T: Clone + Send> SharedState<T> { + /// Cloning the shared state is unsafe because you must ensure that no one + /// is concurrently accessing it, since &self is enough for writing. + unsafe fn clone(&self) -> Self { + Self::new( + |i| (*self.buffers[i].get()).clone(), + self.back_info.load(Ordering::Relaxed), + ) + } +} +// +#[doc(hidden)] +impl<T: PartialEq + Send> SharedState<T> { + /// Equality is unsafe for the same reason as cloning: you must ensure that + /// no one is concurrently accessing the triple buffer to avoid data races. + unsafe fn eq(&self, other: &Self) -> bool { + // Check whether the contents of all buffers are equal... + let buffers_equal = self + .buffers + .iter() + .zip(other.buffers.iter()) + .all(|tuple| -> bool { + let (cell1, cell2) = tuple; + *cell1.get() == *cell2.get() + }); + + // ...then check whether the rest of the shared state is equal + buffers_equal + && (self.back_info.load(Ordering::Relaxed) == other.back_info.load(Ordering::Relaxed)) + } +} +// +unsafe impl<T: Send> Sync for SharedState<T> {} + +/// Index types used for triple buffering +/// +/// These types are used to index into triple buffers. In addition, the +/// BackBufferInfo type is actually a bitfield, whose third bit (numerical +/// value: 4) is set to 1 to indicate that the producer published an update into +/// the back-buffer, and reset to 0 when the consumer fetches the update. +/// +type BufferIndex = u8; +type BackBufferInfo = BufferIndex; +// +type AtomicBackBufferInfo = AtomicU8; +const BACK_INDEX_MASK: u8 = 0b11; // Mask used to extract back-buffer index +const BACK_DIRTY_BIT: u8 = 0b100; // Bit set by producer to signal updates + +/// Unit tests +#[cfg(test)] +mod tests { + use super::{BufferIndex, SharedState, TripleBuffer, BACK_DIRTY_BIT, BACK_INDEX_MASK}; + + use std::{fmt::Debug, ops::Deref, sync::atomic::Ordering, thread, time::Duration}; + + use testbench::{ + self, + race_cell::{RaceCell, Racey}, + }; + + /// Check that triple buffers are properly initialized + #[test] + fn initial_state() { + // Let's create a triple buffer + let mut buf = TripleBuffer::new(42); + check_buf_state(&mut buf, false); + assert_eq!(*buf.output.read(), 42); + } + + /// Check that the shared state's unsafe equality operator works + #[test] + fn partial_eq_shared() { + // Let's create some dummy shared state + let dummy_state = SharedState::<u16>::new(|i| [111, 222, 333][i], 0b10); + + // Check that the dummy state is equal to itself + assert!(unsafe { dummy_state.eq(&dummy_state) }); + + // Check that it's not equal to a state where buffer contents differ + assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [114, 222, 333][i], 0b10)) }); + assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 225, 333][i], 0b10)) }); + assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 222, 336][i], 0b10)) }); + + // Check that it's not equal to a state where the back info differs + assert!(unsafe { + !dummy_state.eq(&SharedState::<u16>::new( + |i| [111, 222, 333][i], + BACK_DIRTY_BIT & 0b10, + )) + }); + assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 222, 333][i], 0b01)) }); + } + + /// Check that TripleBuffer's PartialEq impl works + #[test] + fn partial_eq() { + // Create a triple buffer + let buf = TripleBuffer::new("test"); + + // Check that it is equal to itself + assert_eq!(buf, buf); + + // Make another buffer with different contents. As buffer creation is + // deterministic, this should only have an impact on the shared state, + // but the buffers should nevertheless be considered different. + let buf2 = TripleBuffer::new("taste"); + assert_eq!(buf.input.input_idx, buf2.input.input_idx); + assert_eq!(buf.output.output_idx, buf2.output.output_idx); + assert!(buf != buf2); + + // Check that changing either the input or output buffer index will + // also lead two TripleBuffers to be considered different (this test + // technically creates an invalid TripleBuffer state, but it's the only + // way to check that the PartialEq impl is exhaustive) + let mut buf3 = TripleBuffer::new("test"); + assert_eq!(buf, buf3); + let old_input_idx = buf3.input.input_idx; + buf3.input.input_idx = buf3.output.output_idx; + assert!(buf != buf3); + buf3.input.input_idx = old_input_idx; + buf3.output.output_idx = old_input_idx; + assert!(buf != buf3); + } + + /// Check that the shared state's unsafe clone operator works + #[test] + fn clone_shared() { + // Let's create some dummy shared state + let dummy_state = SharedState::<u8>::new(|i| [123, 231, 132][i], BACK_DIRTY_BIT & 0b01); + + // Now, try to clone it + let dummy_state_copy = unsafe { dummy_state.clone() }; + + // Check that the contents of the original state did not change + assert!(unsafe { + dummy_state.eq(&SharedState::<u8>::new( + |i| [123, 231, 132][i], + BACK_DIRTY_BIT & 0b01, + )) + }); + + // Check that the contents of the original and final state are identical + assert!(unsafe { dummy_state.eq(&dummy_state_copy) }); + } + + /// Check that TripleBuffer's Clone impl works + #[test] + fn clone() { + // Create a triple buffer + let mut buf = TripleBuffer::new(4.2); + + // Put it in a nontrivial state + unsafe { + *buf.input.shared.buffers[0].get() = 1.2; + *buf.input.shared.buffers[1].get() = 3.4; + *buf.input.shared.buffers[2].get() = 5.6; + } + buf.input + .shared + .back_info + .store(BACK_DIRTY_BIT & 0b01, Ordering::Relaxed); + buf.input.input_idx = 0b10; + buf.output.output_idx = 0b00; + + // Now clone it + let buf_clone = buf.clone(); + + // Check that the clone uses its own, separate shared data storage + assert_eq!( + as_ptr(&buf_clone.output.shared), + as_ptr(&buf_clone.output.shared) + ); + assert!(as_ptr(&buf_clone.input.shared) != as_ptr(&buf.input.shared)); + + // Check that it is identical from PartialEq's point of view + assert_eq!(buf, buf_clone); + + // Check that the contents of the original buffer did not change + unsafe { + assert_eq!(*buf.input.shared.buffers[0].get(), 1.2); + assert_eq!(*buf.input.shared.buffers[1].get(), 3.4); + assert_eq!(*buf.input.shared.buffers[2].get(), 5.6); + } + assert_eq!( + buf.input.shared.back_info.load(Ordering::Relaxed), + BACK_DIRTY_BIT & 0b01 + ); + assert_eq!(buf.input.input_idx, 0b10); + assert_eq!(buf.output.output_idx, 0b00); + } + + /// Check that the low-level publish/update primitives work + #[test] + fn swaps() { + // Create a new buffer, and a way to track any changes to it + let mut buf = TripleBuffer::new([123, 456]); + let old_buf = buf.clone(); + let old_input_idx = old_buf.input.input_idx; + let old_shared = &old_buf.input.shared; + let old_back_info = old_shared.back_info.load(Ordering::Relaxed); + let old_back_idx = old_back_info & BACK_INDEX_MASK; + let old_output_idx = old_buf.output.output_idx; + + // Check that updating from a clean state works + assert!(!buf.output.raw_update()); + assert_eq!(buf, old_buf); + check_buf_state(&mut buf, false); + + // Check that publishing from a clean state works + assert!(!buf.input.raw_publish()); + let mut expected_buf = old_buf.clone(); + expected_buf.input.input_idx = old_back_idx; + expected_buf + .input + .shared + .back_info + .store(old_input_idx | BACK_DIRTY_BIT, Ordering::Relaxed); + assert_eq!(buf, expected_buf); + check_buf_state(&mut buf, true); + + // Check that overwriting a dirty state works + assert!(buf.input.raw_publish()); + let mut expected_buf = old_buf.clone(); + expected_buf.input.input_idx = old_input_idx; + expected_buf + .input + .shared + .back_info + .store(old_back_idx | BACK_DIRTY_BIT, Ordering::Relaxed); + assert_eq!(buf, expected_buf); + check_buf_state(&mut buf, true); + + // Check that updating from a dirty state works + assert!(buf.output.raw_update()); + expected_buf.output.output_idx = old_back_idx; + expected_buf + .output + .shared + .back_info + .store(old_output_idx, Ordering::Relaxed); + assert_eq!(buf, expected_buf); + check_buf_state(&mut buf, false); + } + + /// Check that (sequentially) writing to a triple buffer works + #[test] + fn sequential_write() { + // Let's create a triple buffer + let mut buf = TripleBuffer::new(false); + + // Back up the initial buffer state + let old_buf = buf.clone(); + + // Perform a write + buf.input.write(true); + + // Check new implementation state + { + // Starting from the old buffer state... + let mut expected_buf = old_buf.clone(); + + // ...write the new value in and swap... + *expected_buf.input.raw_input_buffer() = true; + expected_buf.input.raw_publish(); + + // Nothing else should have changed + assert_eq!(buf, expected_buf); + check_buf_state(&mut buf, true); + } + } + + /// Check that (sequentially) reading from a triple buffer works + #[test] + fn sequential_read() { + // Let's create a triple buffer and write into it + let mut buf = TripleBuffer::new(1.0); + buf.input.write(4.2); + + // Test readout from dirty (freshly written) triple buffer + { + // Back up the initial buffer state + let old_buf = buf.clone(); + + // Read from the buffer + let result = *buf.output.read(); + + // Output value should be correct + assert_eq!(result, 4.2); + + // Result should be equivalent to carrying out an update + let mut expected_buf = old_buf.clone(); + assert!(expected_buf.output.raw_update()); + assert_eq!(buf, expected_buf); + check_buf_state(&mut buf, false); + } + + // Test readout from clean (unchanged) triple buffer + { + // Back up the initial buffer state + let old_buf = buf.clone(); + + // Read from the buffer + let result = *buf.output.read(); + + // Output value should be correct + assert_eq!(result, 4.2); + + // Buffer state should be unchanged + assert_eq!(buf, old_buf); + check_buf_state(&mut buf, false); + } + } + + /// Check that contended concurrent reads and writes work + #[test] + #[ignore] + fn contended_concurrent_read_write() { + // We will stress the infrastructure by performing this many writes + // as a reader continuously reads the latest value + const TEST_WRITE_COUNT: usize = 100_000_000; + + // This is the buffer that our reader and writer will share + let buf = TripleBuffer::new(RaceCell::new(0)); + let (mut buf_input, mut buf_output) = buf.split(); + + // Concurrently run a writer which increments a shared value in a loop, + // and a reader which makes sure that no unexpected value slips in. + let mut last_value = 0usize; + testbench::concurrent_test_2( + move || { + for value in 1..=TEST_WRITE_COUNT { + buf_input.write(RaceCell::new(value)); + } + }, + move || { + while last_value < TEST_WRITE_COUNT { + let new_racey_value = buf_output.read().get(); + match new_racey_value { + Racey::Consistent(new_value) => { + assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT)); + last_value = new_value; + } + Racey::Inconsistent => { + panic!("Inconsistent state exposed by the buffer!"); + } + } + } + }, + ); + } + + /// Check that uncontended concurrent reads and writes work + /// + /// **WARNING:** This test unfortunately needs to have timing-dependent + /// behaviour to do its job. If it fails for you, try the following: + /// + /// - Close running applications in the background + /// - Re-run the tests with only one OS thread (--test-threads=1) + /// - Increase the writer sleep period + /// + #[test] + #[ignore] + fn uncontended_concurrent_read_write() { + // We will stress the infrastructure by performing this many writes + // as a reader continuously reads the latest value + const TEST_WRITE_COUNT: usize = 625; + + // This is the buffer that our reader and writer will share + let buf = TripleBuffer::new(RaceCell::new(0)); + let (mut buf_input, mut buf_output) = buf.split(); + + // Concurrently run a writer which slowly increments a shared value, + // and a reader which checks that it can receive every update + let mut last_value = 0usize; + testbench::concurrent_test_2( + move || { + for value in 1..=TEST_WRITE_COUNT { + buf_input.write(RaceCell::new(value)); + thread::yield_now(); + thread::sleep(Duration::from_millis(32)); + } + }, + move || { + while last_value < TEST_WRITE_COUNT { + let new_racey_value = buf_output.read().get(); + match new_racey_value { + Racey::Consistent(new_value) => { + assert!((new_value >= last_value) && (new_value - last_value <= 1)); + last_value = new_value; + } + Racey::Inconsistent => { + panic!("Inconsistent state exposed by the buffer!"); + } + } + } + }, + ); + } + + /// When raw mode is enabled, the consumer is allowed to modify its bufffer, + /// which means that it will unknowingly send back data to the producer. + /// This creates new correctness requirements for the synchronization + /// protocol, which must be checked as well. + #[test] + #[ignore] + #[cfg(feature = "raw")] + fn concurrent_bidirectional_exchange() { + // We will stress the infrastructure by performing this many writes + // as a reader continuously reads the latest value + const TEST_WRITE_COUNT: usize = 100_000_000; + + // This is the buffer that our reader and writer will share + let buf = TripleBuffer::new(RaceCell::new(0)); + let (mut buf_input, mut buf_output) = buf.split(); + + // Concurrently run a writer which increments a shared value in a loop, + // and a reader which makes sure that no unexpected value slips in. + testbench::concurrent_test_2( + move || { + for new_value in 1..=TEST_WRITE_COUNT { + match buf_input.raw_input_buffer().get() { + Racey::Consistent(curr_value) => { + assert!(curr_value <= TEST_WRITE_COUNT); + } + Racey::Inconsistent => { + panic!("Inconsistent state exposed by the buffer!"); + } + } + buf_input.write(RaceCell::new(new_value)); + } + }, + move || { + let mut last_value = 0usize; + while last_value < TEST_WRITE_COUNT { + match buf_output.raw_output_buffer().get() { + Racey::Consistent(new_value) => { + assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT)); + last_value = new_value; + } + Racey::Inconsistent => { + panic!("Inconsistent state exposed by the buffer!"); + } + } + if buf_output.updated() { + buf_output.raw_output_buffer().set(last_value / 2); + buf_output.raw_update(); + } + } + }, + ); + } + + /// Range check for triple buffer indexes + #[allow(unused_comparisons)] + fn index_in_range(idx: BufferIndex) -> bool { + (idx >= 0) & (idx <= 2) + } + + /// Get a pointer to the target of some reference (e.g. an &, an Arc...) + fn as_ptr<P: Deref>(ref_like: &P) -> *const P::Target { + &(**ref_like) as *const _ + } + + /// Check the state of a buffer, and the effect of queries on it + fn check_buf_state<T>(buf: &mut TripleBuffer<T>, expected_dirty_bit: bool) + where + T: Clone + Debug + PartialEq + Send, + { + // Make a backup of the buffer's initial state + let initial_buf = buf.clone(); + + // Check that the input and output point to the same shared state + assert_eq!(as_ptr(&buf.input.shared), as_ptr(&buf.output.shared)); + + // Access the shared state and decode back-buffer information + let back_info = buf.input.shared.back_info.load(Ordering::Relaxed); + let back_idx = back_info & BACK_INDEX_MASK; + let back_buffer_dirty = back_info & BACK_DIRTY_BIT != 0; + + // Input-/output-/back-buffer indexes must be in range + assert!(index_in_range(buf.input.input_idx)); + assert!(index_in_range(buf.output.output_idx)); + assert!(index_in_range(back_idx)); + + // Input-/output-/back-buffer indexes must be distinct + assert!(buf.input.input_idx != buf.output.output_idx); + assert!(buf.input.input_idx != back_idx); + assert!(buf.output.output_idx != back_idx); + + // Back-buffer must have the expected dirty bit + assert_eq!(back_buffer_dirty, expected_dirty_bit); + + // Check that the "input buffer" query behaves as expected + assert_eq!( + as_ptr(&buf.input.raw_input_buffer()), + buf.input.shared.buffers[buf.input.input_idx as usize].get() + ); + assert_eq!(*buf, initial_buf); + + // Check that the "consumed" query behaves as expected + assert_eq!(!buf.input.consumed(), expected_dirty_bit); + assert_eq!(*buf, initial_buf); + + // Check that the output_buffer query works in the initial state + assert_eq!( + as_ptr(&buf.output.raw_output_buffer()), + buf.output.shared.buffers[buf.output.output_idx as usize].get() + ); + assert_eq!(*buf, initial_buf); + + // Check that the output buffer query works in the initial state + assert_eq!(buf.output.updated(), expected_dirty_bit); + assert_eq!(*buf, initial_buf); + } +} + +/// Performance benchmarks +/// +/// These benchmarks masquerading as tests are a stopgap solution until +/// benchmarking lands in Stable Rust. They should be compiled in release mode, +/// and run with only one OS thread. In addition, the default behaviour of +/// swallowing test output should obviously be suppressed. +/// +/// TL;DR: cargo test --release -- --ignored --nocapture --test-threads=1 +/// +/// TODO: Switch to standard Rust benchmarks once they are stable +/// +#[cfg(test)] +mod benchmarks { + use super::TripleBuffer; + use testbench; + + /// Benchmark for clean read performance + #[test] + #[ignore] + fn clean_read() { + // Create a buffer + let mut buf = TripleBuffer::new(0u32); + + // Benchmark clean reads + testbench::benchmark(2_500_000_000, || { + let read = *buf.output.read(); + assert!(read < u32::max_value()); + }); + } + + /// Benchmark for write performance + #[test] + #[ignore] + fn write() { + // Create a buffer + let mut buf = TripleBuffer::new(0u32); + + // Benchmark writes + let mut iter = 1u32; + testbench::benchmark(640_000_000, || { + buf.input.write(iter); + iter += 1; + }); + } + + /// Benchmark for write + dirty read performance + #[test] + #[ignore] + fn write_and_dirty_read() { + // Create a buffer + let mut buf = TripleBuffer::new(0u32); + + // Benchmark writes + dirty reads + let mut iter = 1u32; + testbench::benchmark(290_000_000u32, || { + buf.input.write(iter); + iter += 1; + let read = *buf.output.read(); + assert!(read < u32::max_value()); + }); + } + + /// Benchmark read performance under concurrent write pressure + #[test] + #[ignore] + fn concurrent_read() { + // Create a buffer + let buf = TripleBuffer::new(0u32); + let (mut buf_input, mut buf_output) = buf.split(); + + // Benchmark reads under concurrent write pressure + let mut counter = 0u32; + testbench::concurrent_benchmark( + 56_000_000u32, + move || { + let read = *buf_output.read(); + assert!(read < u32::max_value()); + }, + move || { + buf_input.write(counter); + counter = (counter + 1) % u32::max_value(); + }, + ); + } + + /// Benchmark write performance under concurrent read pressure + #[test] + #[ignore] + fn concurrent_write() { + // Create a buffer + let buf = TripleBuffer::new(0u32); + let (mut buf_input, mut buf_output) = buf.split(); + + // Benchmark writes under concurrent read pressure + let mut iter = 1u32; + testbench::concurrent_benchmark( + 88_000_000u32, + move || { + buf_input.write(iter); + iter += 1; + }, + move || { + let read = *buf_output.read(); + assert!(read < u32::max_value()); + }, + ); + } +} |