summaryrefslogtreecommitdiffstats
path: root/third_party/rust/triple_buffer
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/triple_buffer')
-rw-r--r--third_party/rust/triple_buffer/.cargo-checksum.json1
-rw-r--r--third_party/rust/triple_buffer/CHANGELOG.md280
-rw-r--r--third_party/rust/triple_buffer/Cargo.toml43
-rw-r--r--third_party/rust/triple_buffer/LICENSE373
-rw-r--r--third_party/rust/triple_buffer/README.md222
-rw-r--r--third_party/rust/triple_buffer/benches/benchmarks.rs80
-rw-r--r--third_party/rust/triple_buffer/src/lib.rs1008
7 files changed, 2007 insertions, 0 deletions
diff --git a/third_party/rust/triple_buffer/.cargo-checksum.json b/third_party/rust/triple_buffer/.cargo-checksum.json
new file mode 100644
index 0000000000..6909e12e29
--- /dev/null
+++ b/third_party/rust/triple_buffer/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"f5b061b67b5b4b4e2dc6ce129049c9fd5975700ed25a9e91905bedc8374301b8","Cargo.toml":"b0546f2f8310daf982602646f128dca5a695e248cfd30b610fed7dd25efdcd54","LICENSE":"4b89d4518bd135ab4ee154a7bce722246b57a98c3d7efc1a09409898160c2bd1","README.md":"a8a254f626f8903fb7c1446fc95c7b0d6bddfe535e089550f73e43fd7d30d026","benches/benchmarks.rs":"c7592c9d442ac61c34585f0e99e3dde941fe0a707ad1590d4ec4f16ec338f90b","src/lib.rs":"95d7a6c1e0033525dee8d1a0b4431b2a9c023debb18bc78132c5cb37feff19f1"},"package":"803966e5a8397a70d3d8111afa1597ba8381346d7de4720e9f539471d371a1a3"} \ No newline at end of file
diff --git a/third_party/rust/triple_buffer/CHANGELOG.md b/third_party/rust/triple_buffer/CHANGELOG.md
new file mode 100644
index 0000000000..c1ce42c13d
--- /dev/null
+++ b/third_party/rust/triple_buffer/CHANGELOG.md
@@ -0,0 +1,280 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+
+## [Unreleased]
+
+_No unreleased changes in the pipeline at the moment._
+
+
+## [5.0.6] - 2021-01-16
+
+### Added
+
+- As a result of the bugfix mentioned below, there is no performance motivation
+ to gate `raw` features behind a feature flag, so those features are now
+ available by default without a `raw_` prefix. Usage of the `raw_` prefix and
+ the `raw` feature flag is deprecated and these may be removed in a future
+ major release, but it doesn't harm to keep them indefinitely for now.
+
+### Changed
+
+- Benchmarks now use `criterion`, and have been significantly cleaned up along
+ the way. They are now more extensive and more reliable.
+- Moved MSRV to Rust 1.36 because we now use crossbeam for testing, which
+ requires that much. The crate itself should still support Rust 1.34 for now,
+ but we cannot test that it continues doing so...
+
+### Fixed
+
+- Removed a possibility of data race that was not observed on current hardware,
+ but could be triggered by future hardware or compiler evolutions. See
+ https://github.com/HadrienG2/triple-buffer/issues/14 .
+
+
+## [5.0.5] - 2020-07-05
+
+### Changed
+
+- Use only cache-padded instead of the full crossbeam-utils crate
+- Clean up CI config and cache Rust toolchain there
+
+
+## [5.0.4] - 2020-02-10
+
+### Added
+
+- Add a changelog to the repository.
+
+### Changed
+
+- Deduplicate CI configuration some more.
+
+### Fixed
+
+- Drop now-unnecessary manual `rustfmt` configuration.
+- Avoid false sharing of back-buffer information.
+
+
+## [5.0.3] - 2020-02-07
+
+### Changed
+
+- Clean up and deduplicate GitHub Actions configuration.
+- Tune down concurrent test speed to reduce CI false positives.
+
+
+## [5.0.2] - 2020-01-29
+
+### Changed
+
+- Move continuous integration to GitHub Actions.
+
+
+## [5.0.1] - 2019-11-07
+
+### Fixed
+
+- Update to current version of dependencies.
+
+
+## [5.0.0] - 2019-04-12
+
+### Changed
+
+- Bump travis CI configuration to Ubuntu Xenial.
+- Bump minimal supported Rust version to 1.34.0.
+
+### Fixed
+
+- Don't use an `usize` for buffer indices where an `u8` will suffice.
+- Improve Rust API guidelines compliance.
+
+
+## [4.0.1] - 2018-12-31
+
+### Fixed
+
+- Display `raw` feature documentation on docs.rs.
+
+
+## [4.0.0] - 2018-12-18
+
+### Changed
+
+- Migrate to Rust 2018.
+- Bump minimal supported Rust version to 1.31.0.
+
+### Fixed
+
+- Update to current version of dependencies.
+- Start using Clippy and integrate it into continuous integration.
+- Re-apply `rustfmt` coding style (was not in CI at the time...).
+
+
+## [3.0.1] - 2018-08-27
+
+### Fixed
+
+- Make `testbench` a dev-dependency, as it's only used for tests and benchmarks.
+
+
+## [3.0.0] - 2018-08-27
+
+### Changed
+
+- Buffers are now padded to the size of a cache line to reduce false sharing.
+- Bump minimal supported Rust version to 1.26.0.
+
+### Fixed
+
+- Make `testbench` version requirement more explicit.
+
+
+## [2.0.0] - 2018-02-11
+
+### Changed
+
+- Switch license to MPLv2, which is a better match to Rust's static linking
+ philosophy than LGPL.
+
+
+## [1.1.1] - 2017-11-19
+
+### Fixed
+
+- Fix my understanding of Cargo features & make the `raw` feature actually work.
+
+
+## [1.1.0] - 2017-11-18
+
+### Added
+
+- Allow in-place writes on the input and output side, at the cost of stronger
+ synchronization barriers, through use of the `raw` Cargo feature.
+
+### Fixed
+
+- Do not require a `Clone` bound on the inner data.
+
+
+## [1.0.0] - 2017-11-10
+
+### Changed
+
+- Simplify component naming convention, e.g. `TripleBufferInput` -> `Input`.
+
+
+## [0.3.4] - 2017-06-25
+
+### Changed
+
+- Use `testbench::RaceCell` as an improved form of data race detection in tests.
+
+### Fixed
+
+- Do not require a `PartialEq` bound on the inner data.
+
+
+## [0.3.3] - 2017-06-15
+
+### Changed
+
+- Tune down concurrent test speed to reduce CI false positives.
+
+
+## [0.3.2] - 2017-06-15
+
+### Changed
+
+- Tune down concurrent test speed to reduce CI false positives.
+
+
+## [0.3.1] - 2017-06-15
+
+### Changed
+
+- Tune down concurrent test speed to reduce CI false positives.
+
+
+## [0.3.0] - 2017-06-14
+
+### Added
+
+- Introduce Travis CI continuous integration.
+
+### Fixed
+
+- Use CI to clarify minimal supported Rust version (currently 1.12.0).
+
+
+## [0.2.4] - 2017-04-04
+
+### Changed
+
+- Use `testbench` crate for concurrent testing and benchmarking.
+
+
+## [0.2.3] - 2017-03-24
+
+### Changed
+
+- More detailed comparison with other synchronization primitives in README.
+
+### Fixed
+
+- Adopt `rustfmt` coding style.
+
+
+## [0.2.2] - 2017-03-20
+
+### Changed
+
+- Reduce reliance on Acquire-Release synchronization.
+
+
+## [0.2.1] - 2017-03-11
+
+### Changed
+
+- Make README a bit more spambot-proof.
+
+
+## [0.2.0] - 2017-03-11
+
+### Added
+
+- First tagged release of triple-buffer.
+
+
+
+[Unreleased]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.6...HEAD
+[5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.5...v5.0.6
+[5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.4...v5.0.5
+[5.0.4]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.3...v5.0.4
+[5.0.3]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.2...v5.0.3
+[5.0.2]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.1...v5.0.2
+[5.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v5.0.0...v5.0.1
+[5.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.1...v5.0.0
+[4.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v4.0.0...v4.0.1
+[4.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.1...v4.0.0
+[3.0.1]: https://github.com/HadrienG2/triple-buffer/compare/v3.0.0...v3.0.1
+[3.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v2.0.0...v3.0.0
+[2.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.1...v2.0.0
+[1.1.1]: https://github.com/HadrienG2/triple-buffer/compare/v1.1.0...v1.1.1
+[1.1.0]: https://github.com/HadrienG2/triple-buffer/compare/v1.0.0...v1.1.0
+[1.0.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.4...v1.0.0
+[0.3.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.3...v0.3.4
+[0.3.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.2...v0.3.3
+[0.3.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.1...v0.3.2
+[0.3.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.3.0...v0.3.1
+[0.3.0]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.4...v0.3.0
+[0.2.4]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.3...v0.2.4
+[0.2.3]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.2...v0.2.3
+[0.2.2]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.1...v0.2.2
+[0.2.1]: https://github.com/HadrienG2/triple-buffer/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/HadrienG2/triple-buffer/releases/tag/v0.2.0
diff --git a/third_party/rust/triple_buffer/Cargo.toml b/third_party/rust/triple_buffer/Cargo.toml
new file mode 100644
index 0000000000..a019221a03
--- /dev/null
+++ b/third_party/rust/triple_buffer/Cargo.toml
@@ -0,0 +1,43 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "triple_buffer"
+version = "5.0.6"
+authors = ["Hadrien G. <knights_of_ni@gmx.com>"]
+description = "An implementation of triple buffering, useful for sharing frequently updated data between threads"
+documentation = "https://docs.rs/triple_buffer/"
+readme = "README.md"
+keywords = ["synchronization", "spsc", "multithreading", "non-blocking", "wait-free"]
+categories = ["algorithms", "asynchronous", "concurrency", "data-structures"]
+license = "MPL-2.0"
+repository = "https://github.com/HadrienG2/triple-buffer"
+
+[lib]
+bench = false
+
+[[bench]]
+name = "benchmarks"
+harness = false
+[dependencies.cache-padded]
+version = "1.1"
+[dev-dependencies.criterion]
+version = "0.3"
+
+[dev-dependencies.testbench]
+version = "0.8"
+
+[features]
+raw = []
+[badges.maintenance]
+status = "passively-maintained"
diff --git a/third_party/rust/triple_buffer/LICENSE b/third_party/rust/triple_buffer/LICENSE
new file mode 100644
index 0000000000..fa0086a952
--- /dev/null
+++ b/third_party/rust/triple_buffer/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0. \ No newline at end of file
diff --git a/third_party/rust/triple_buffer/README.md b/third_party/rust/triple_buffer/README.md
new file mode 100644
index 0000000000..512bd1d574
--- /dev/null
+++ b/third_party/rust/triple_buffer/README.md
@@ -0,0 +1,222 @@
+# Triple buffering in Rust
+
+[![On crates.io](https://img.shields.io/crates/v/triple_buffer.svg)](https://crates.io/crates/triple_buffer)
+[![On docs.rs](https://docs.rs/triple_buffer/badge.svg)](https://docs.rs/triple_buffer/)
+[![Continuous Integration](https://github.com/HadrienG2/triple-buffer/workflows/Continuous%20Integration/badge.svg)](https://github.com/HadrienG2/triple-buffer/actions?query=workflow%3A%22Continuous+Integration%22)
+![Requires rustc 1.36+](https://img.shields.io/badge/rustc-1.36+-red.svg)
+
+
+## What is this?
+
+This is an implementation of triple buffering written in Rust. You may find it
+useful for the following class of thread synchronization problems:
+
+- There is one producer thread and one consumer thread
+- The producer wants to update a shared memory value periodically
+- The consumer wants to access the latest update from the producer at any time
+
+The simplest way to use it is as follows:
+
+```rust
+// Create a triple buffer:
+let buf = TripleBuffer::new(0);
+
+// Split it into an input and output interface, to be respectively sent to
+// the producer thread and the consumer thread:
+let (mut buf_input, mut buf_output) = buf.split();
+
+// The producer can move a value into the buffer at any time
+buf_input.write(42);
+
+// The consumer can access the latest value from the producer at any time
+let latest_value_ref = buf_output.read();
+assert_eq!(*latest_value_ref, 42);
+```
+
+In situations where moving the original value away and being unable to
+modify it on the consumer's side is too costly, such as if creating a new
+value involves dynamic memory allocation, you can use a lower-level API
+which allows you to access the producer and consumer's buffers in place
+and to precisely control when updates are propagated:
+
+```rust
+// Create and split a triple buffer
+use triple_buffer::TripleBuffer;
+let buf = TripleBuffer::new(String::with_capacity(42));
+let (mut buf_input, mut buf_output) = buf.split();
+
+// Mutate the input buffer in place
+{
+ // Acquire a reference to the input buffer
+ let input = buf_input.input_buffer();
+
+ // In general, you don't know what's inside of the buffer, so you should
+ // always reset the value before use (this is a type-specific process).
+ input.clear();
+
+ // Perform an in-place update
+ input.push_str("Hello, ");
+}
+
+// Publish the above input buffer update
+buf_input.publish();
+
+// Manually fetch the buffer update from the consumer interface
+buf_output.update();
+
+// Acquire a mutable reference to the output buffer
+let output = buf_output.output_buffer();
+
+// Post-process the output value before use
+output.push_str("world!");
+```
+
+
+## Give me details! How does it compare to alternatives?
+
+Compared to a mutex:
+
+- Only works in single-producer, single-consumer scenarios
+- Is nonblocking, and more precisely bounded wait-free. Concurrent accesses will
+ be slowed down by cache contention, but no deadlock, livelock, or thread
+ scheduling induced slowdown is possible.
+- Allows the producer and consumer to work simultaneously
+- Uses a lot more memory (3x payload + 3x bytes vs 1x payload + 1 bool)
+- Does not allow in-place updates, as the producer and consumer do not access
+ the same memory location
+- Should be slower if updates are rare and in-place updates are much more
+ efficient than moves, comparable or faster otherwise.
+ * Mutexes and triple buffering have comparably low overhead on the happy path
+ (checking a flag), which is systematically taken when updates are rare. In
+ this scenario, in-place updates can give mutexes a performance advantage.
+ Where triple buffering shines is when a reader often collides with a writer,
+ which is handled very poorly by mutexes.
+
+Compared to the read-copy-update (RCU) primitive from the Linux kernel:
+
+- Only works in single-producer, single-consumer scenarios
+- Has higher dirty read overhead on relaxed-memory architectures (ARM, POWER...)
+- Does not require accounting for reader "grace periods": once the reader has
+ gotten access to the latest value, the synchronization transaction is over
+- Does not use the compare-and-swap hardware primitive on update, which is
+ inefficient by design as it forces its users to retry transactions in a loop.
+- Does not suffer from the ABA problem, allowing much simpler code
+- Allocates memory on initialization only, rather than on every update
+- May use more memory (3x payload + 3x bytes vs 1x pointer + amount of
+ payloads and refcounts that depends on the readout and update pattern)
+- Should be slower if updates are rare, faster if updates are frequent
+ * The RCU's happy reader path is slightly faster (no flag to check), but its
+ update procedure is much more involved and costly.
+
+Compared to sending the updates on a message queue:
+
+- Only works in single-producer, single-consumer scenarios (queues can work in
+ other scenarios, although the implementations are much less efficient)
+- Consumer only has access to the latest state, not the previous ones
+- Consumer does not *need* to get through every previous state
+- Is nonblocking AND uses bounded amounts of memory (with queues, it's a choice,
+ unless you use one of those evil queues that silently drop data when full)
+- Can transmit information in a single move, rather than two
+- Should be faster for any compatible use case.
+ * Queues force you to move data twice, once in, once out, which will incur a
+ significant cost for any nontrivial data. If the inner data requires
+ allocation, they force you to allocate for every transaction. By design,
+ they force you to store and go through every update, which is not useful
+ when you're only interested in the latest version of the data.
+
+In short, triple buffering is what you're after in scenarios where a shared
+memory location is updated frequently by a single writer, read by a single
+reader who only wants the latest version, and you can spare some RAM.
+
+- If you need multiple producers, look somewhere else
+- If you need multiple consumers, you may be interested in my related "SPMC
+ buffer" work, which basically extends triple buffering to multiple consumers
+- If you can't tolerate the RAM overhead or want to update the data in place,
+ try a Mutex instead (or possibly an RWLock)
+- If the shared value is updated very rarely (e.g. every second), try an RCU
+- If the consumer must get every update, try a message queue
+
+
+## How do I know your unsafe lock-free code is working?
+
+By running the tests, of course! Which is unfortunately currently harder than
+I'd like it to be.
+
+First of all, we have sequential tests, which are very thorough but obviously
+do not check the lock-free/synchronization part. You run them as follows:
+
+ $ cargo test
+
+Then we have concurrent tests where, for example, a reader thread continuously
+observes the values from a rate-limited writer thread, and makes sure that he
+can see every single update without any incorrect value slipping in the middle.
+
+These tests are more important, but also harder to run because one must first
+check some assumptions:
+
+- The testing host must have at least 2 physical CPU cores to test all possible
+ race conditions
+- No other code should be eating CPU in the background. Including other tests.
+- As the proper writing rate is system-dependent, what is configured in this
+ test may not be appropriate for your machine.
+- You must test in release mode, as compiler optimizations tend to create more
+ opportunities for race conditions.
+
+Taking this and the relatively long run time (~10-20 s) into account, the
+concurrent tests are ignored by default. To run them, make sure nothing is
+eating CPU in the background and do:
+
+ $ cargo test --release -- --ignored --nocapture --test-threads=1
+
+Finally, we have benchmarks, which allow you to test how well the code is
+performing on your machine. We are now using `criterion` for said benchmarks,
+which seems that to run them, you can simply do:
+
+ $ cargo bench
+
+These benchmarks exercise the worst-case scenario of `u8` payloads, where
+synchronization overhead dominates as the cost of reading and writing the
+actual data is only 1 cycle. In real-world use cases, you will spend more time
+updating buffers and less time synchronizing them.
+
+However, due to the artificial nature of microbenchmarking, the benchmarks must
+exercise two scenarios which are respectively overly optimistic and overly
+pessimistic:
+
+1. In uncontended mode, the buffer input and output reside on the same CPU core,
+ which underestimates the overhead of transferring modified cache lines from
+ the L1 cache of the source CPU to that of the destination CPU.
+ * This is not as bad as it sounds, because you will pay this overhead no
+ matter what kind of thread synchronization primitive you use, so we're not
+ hiding `triple-buffer` specific overhead here. All you need to do is to
+ ensure that when comparing against another synchronization primitive, that
+ primitive is benchmarked in a similar way.
+2. In contended mode, the benchmarked half of the triple buffer is operating
+ under maximal load from the other half, which is much more busy than what is
+ actually going to be observed in real-world workloads.
+ * In this configuration, what you're essentially measuring is the performance
+ of your CPU's cache line locking protocol and inter-CPU core data
+ transfers under the shared data access pattern of `triple-buffer`.
+
+Therefore, consider these benchmarks' timings as orders of magnitude of the best
+and the worst that you can expect from `triple-buffer`, where actual performance
+will be somewhere inbetween these two numbers depending on your workload.
+
+On an Intel Core i3-3220 CPU @ 3.30GHz, typical results are as follows:
+
+* Clean read: 0.9 ns
+* Write: 6.9 ns
+* Write + dirty read: 19.6 ns
+* Dirty read (estimated): 12.7 ns
+* Contended write: 60.8 ns
+* Contended read: 59.2 ns
+
+
+## License
+
+This crate is distributed under the terms of the MPLv2 license. See the LICENSE
+file for details.
+
+More relaxed licensing (Apache, MIT, BSD...) may also be negociated, in
+exchange of a financial contribution. Contact me for details at
+knights_of_ni AT gmx DOTCOM.
diff --git a/third_party/rust/triple_buffer/benches/benchmarks.rs b/third_party/rust/triple_buffer/benches/benchmarks.rs
new file mode 100644
index 0000000000..c97d1db19e
--- /dev/null
+++ b/third_party/rust/triple_buffer/benches/benchmarks.rs
@@ -0,0 +1,80 @@
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use triple_buffer::TripleBuffer;
+
+pub fn benchmark(c: &mut Criterion) {
+ let (mut input, mut output) = TripleBuffer::<u8>::default().split();
+
+ {
+ let mut uncontended = c.benchmark_group("uncontended");
+ uncontended.bench_function("read output", |b| b.iter(|| *output.output_buffer()));
+ uncontended.bench_function("clean update", |b| {
+ b.iter(|| {
+ output.update();
+ })
+ });
+ uncontended.bench_function("clean receive", |b| b.iter(|| *output.read()));
+ uncontended.bench_function("write input", |b| {
+ b.iter(|| {
+ *input.input_buffer() = black_box(0);
+ })
+ });
+ uncontended.bench_function("publish", |b| {
+ b.iter(|| {
+ input.publish();
+ })
+ });
+ uncontended.bench_function("send", |b| b.iter(|| input.write(black_box(0))));
+ uncontended.bench_function("publish + dirty update", |b| {
+ b.iter(|| {
+ input.publish();
+ output.update();
+ })
+ });
+ uncontended.bench_function("transmit", |b| {
+ b.iter(|| {
+ input.write(black_box(0));
+ *output.read()
+ })
+ });
+ }
+
+ {
+ let mut read_contended = c.benchmark_group("read contention");
+ testbench::run_under_contention(
+ || black_box(*output.read()),
+ || {
+ read_contended.bench_function("write input", |b| {
+ b.iter(|| {
+ *input.input_buffer() = black_box(0);
+ })
+ });
+ read_contended.bench_function("publish", |b| {
+ b.iter(|| {
+ input.publish();
+ })
+ });
+ read_contended.bench_function("send", |b| b.iter(|| input.write(black_box(0))));
+ },
+ );
+ }
+
+ {
+ let mut write_contended = c.benchmark_group("write contention");
+ testbench::run_under_contention(
+ || input.write(black_box(0)),
+ || {
+ write_contended
+ .bench_function("read output", |b| b.iter(|| *output.output_buffer()));
+ write_contended.bench_function("update", |b| {
+ b.iter(|| {
+ output.update();
+ })
+ });
+ write_contended.bench_function("receive", |b| b.iter(|| *output.read()));
+ },
+ );
+ }
+}
+
+criterion_group!(benches, benchmark);
+criterion_main!(benches);
diff --git a/third_party/rust/triple_buffer/src/lib.rs b/third_party/rust/triple_buffer/src/lib.rs
new file mode 100644
index 0000000000..f7b9dc73ae
--- /dev/null
+++ b/third_party/rust/triple_buffer/src/lib.rs
@@ -0,0 +1,1008 @@
+//! Triple buffering in Rust
+//!
+//! In this crate, we propose a Rust implementation of triple buffering. This is
+//! a non-blocking thread synchronization mechanism that can be used when a
+//! single producer thread is frequently updating a shared data block, and a
+//! single consumer thread wants to be able to read the latest available version
+//! of the shared data whenever it feels like it.
+//!
+//! # Examples
+//!
+//! For many use cases, you can use the ergonomic write/read interface, where
+//! the producer moves values into the buffer and the consumer accesses the
+//! latest buffer by shared reference:
+//!
+//! ```
+//! // Create a triple buffer
+//! use triple_buffer::TripleBuffer;
+//! let buf = TripleBuffer::new(0);
+//!
+//! // Split it into an input and output interface, to be respectively sent to
+//! // the producer thread and the consumer thread
+//! let (mut buf_input, mut buf_output) = buf.split();
+//!
+//! // The producer can move a value into the buffer at any time
+//! buf_input.write(42);
+//!
+//! // The consumer can access the latest value from the producer at any time
+//! let latest_value_ref = buf_output.read();
+//! assert_eq!(*latest_value_ref, 42);
+//! ```
+//!
+//! In situations where moving the original value away and being unable to
+//! modify it on the consumer's side is too costly, such as if creating a new
+//! value involves dynamic memory allocation, you can use a lower-level API
+//! which allows you to access the producer and consumer's buffers in place
+//! and to precisely control when updates are propagated:
+//!
+//! ```
+//! // Create and split a triple buffer
+//! use triple_buffer::TripleBuffer;
+//! let buf = TripleBuffer::new(String::with_capacity(42));
+//! let (mut buf_input, mut buf_output) = buf.split();
+//!
+//! // Mutate the input buffer in place
+//! {
+//! // Acquire a reference to the input buffer
+//! let input = buf_input.input_buffer();
+//!
+//! // In general, you don't know what's inside of the buffer, so you should
+//! // always reset the value before use (this is a type-specific process).
+//! input.clear();
+//!
+//! // Perform an in-place update
+//! input.push_str("Hello, ");
+//! }
+//!
+//! // Publish the above input buffer update
+//! buf_input.publish();
+//!
+//! // Manually fetch the buffer update from the consumer interface
+//! buf_output.update();
+//!
+//! // Acquire a mutable reference to the output buffer
+//! let output = buf_output.output_buffer();
+//!
+//! // Post-process the output value before use
+//! output.push_str("world!");
+//! ```
+
+#![deny(missing_debug_implementations, missing_docs)]
+
+use cache_padded::CachePadded;
+
+use std::{
+ cell::UnsafeCell,
+ sync::{
+ atomic::{AtomicU8, Ordering},
+ Arc,
+ },
+};
+
+/// A triple buffer, useful for nonblocking and thread-safe data sharing
+///
+/// A triple buffer is a single-producer single-consumer nonblocking
+/// communication channel which behaves like a shared variable: the producer
+/// submits regular updates, and the consumer accesses the latest available
+/// value whenever it feels like it.
+///
+#[derive(Debug)]
+pub struct TripleBuffer<T: Send> {
+ /// Input object used by producers to send updates
+ input: Input<T>,
+
+ /// Output object used by consumers to read the current value
+ output: Output<T>,
+}
+//
+impl<T: Clone + Send> TripleBuffer<T> {
+ /// Construct a triple buffer with a certain initial value
+ //
+ // FIXME: After spending some time thinking about this further, I reached
+ // the conclusion that clippy was right after all. But since this is
+ // a breaking change, I'm keeping that for the next major release.
+ //
+ #[allow(clippy::needless_pass_by_value)]
+ pub fn new(initial: T) -> Self {
+ Self::new_impl(|| initial.clone())
+ }
+}
+//
+impl<T: Default + Send> Default for TripleBuffer<T> {
+ /// Construct a triple buffer with a default-constructed value
+ fn default() -> Self {
+ Self::new_impl(T::default)
+ }
+}
+//
+impl<T: Send> TripleBuffer<T> {
+ /// Construct a triple buffer, using a functor to generate initial values
+ fn new_impl(mut generator: impl FnMut() -> T) -> Self {
+ // Start with the shared state...
+ let shared_state = Arc::new(SharedState::new(|_i| generator(), 0));
+
+ // ...then construct the input and output structs
+ TripleBuffer {
+ input: Input {
+ shared: shared_state.clone(),
+ input_idx: 1,
+ },
+ output: Output {
+ shared: shared_state,
+ output_idx: 2,
+ },
+ }
+ }
+
+ /// Extract input and output of the triple buffer
+ //
+ // NOTE: Although it would be nicer to directly return `Input` and `Output`
+ // from `new()`, the `split()` design gives some API evolution
+ // headroom towards future allocation-free modes of operation where
+ // the SharedState is a static variable, or a stack-allocated variable
+ // used through scoped threads or other unsafe thread synchronization.
+ //
+ // See https://github.com/HadrienG2/triple-buffer/issues/8 .
+ //
+ pub fn split(self) -> (Input<T>, Output<T>) {
+ (self.input, self.output)
+ }
+}
+//
+// The Clone and PartialEq traits are used internally for testing and I don't
+// want to commit to supporting them publicly for now.
+//
+#[doc(hidden)]
+impl<T: Clone + Send> Clone for TripleBuffer<T> {
+ fn clone(&self) -> Self {
+ // Clone the shared state. This is safe because at this layer of the
+ // interface, one needs an Input/Output &mut to mutate the shared state.
+ let shared_state = Arc::new(unsafe { (*self.input.shared).clone() });
+
+ // ...then the input and output structs
+ TripleBuffer {
+ input: Input {
+ shared: shared_state.clone(),
+ input_idx: self.input.input_idx,
+ },
+ output: Output {
+ shared: shared_state,
+ output_idx: self.output.output_idx,
+ },
+ }
+ }
+}
+//
+#[doc(hidden)]
+impl<T: PartialEq + Send> PartialEq for TripleBuffer<T> {
+ fn eq(&self, other: &Self) -> bool {
+ // Compare the shared states. This is safe because at this layer of the
+ // interface, one needs an Input/Output &mut to mutate the shared state.
+ let shared_states_equal = unsafe { (*self.input.shared).eq(&*other.input.shared) };
+
+ // Compare the rest of the triple buffer states
+ shared_states_equal
+ && (self.input.input_idx == other.input.input_idx)
+ && (self.output.output_idx == other.output.output_idx)
+ }
+}
+
+/// Producer interface to the triple buffer
+///
+/// The producer of data can use this struct to submit updates to the triple
+/// buffer whenever he likes. These updates are nonblocking: a collision between
+/// the producer and the consumer will result in cache contention, but deadlocks
+/// and scheduling-induced slowdowns cannot happen.
+///
+#[derive(Debug)]
+pub struct Input<T: Send> {
+ /// Reference-counted shared state
+ shared: Arc<SharedState<T>>,
+
+ /// Index of the input buffer (which is private to the producer)
+ input_idx: BufferIndex,
+}
+//
+// Public interface
+impl<T: Send> Input<T> {
+ /// Write a new value into the triple buffer
+ pub fn write(&mut self, value: T) {
+ // Update the input buffer
+ *self.input_buffer() = value;
+
+ // Publish our update to the consumer
+ self.publish();
+ }
+
+ /// Check if the consumer has fetched our last submission yet
+ ///
+ /// This method is only intended for diagnostics purposes. Please do not let
+ /// it inform your decision of sending or not sending a value, as that would
+ /// effectively be building a very poor spinlock-based double buffer
+ /// implementation. If what you truly need is a double buffer, build
+ /// yourself a proper blocking one instead of wasting CPU time.
+ ///
+ pub fn consumed(&self) -> bool {
+ let back_info = self.shared.back_info.load(Ordering::Relaxed);
+ back_info & BACK_DIRTY_BIT == 0
+ }
+
+ /// Access the input buffer directly
+ ///
+ /// This advanced interface allows you to update the input buffer in place,
+ /// so that you can avoid creating values of type T repeatedy just to push
+ /// them into the triple buffer when doing so is expensive.
+ ///
+ /// However, by using it, you force yourself to take into account some
+ /// implementation subtleties that you could normally ignore.
+ ///
+ /// First, the buffer does not contain the last value that you published
+ /// (which is now available to the consumer thread). In fact, what you get
+ /// may not match _any_ value that you sent in the past, but rather be a new
+ /// value that was written in there by the consumer thread. All you can
+ /// safely assume is that the buffer contains a valid value of type T, which
+ /// you may need to "clean up" before use using a type-specific process.
+ ///
+ /// Second, we do not send updates automatically. You need to call
+ /// `publish()` in order to propagate a buffer update to the consumer.
+ /// Alternative designs based on Drop were considered, but considered too
+ /// magical for the target audience of this interface.
+ ///
+ pub fn input_buffer(&mut self) -> &mut T {
+ // This is safe because the synchronization protocol ensures that we
+ // have exclusive access to this buffer.
+ let input_ptr = self.shared.buffers[self.input_idx as usize].get();
+ unsafe { &mut *input_ptr }
+ }
+
+ /// Publish the current input buffer, checking for overwrites
+ ///
+ /// After updating the input buffer using `input_buffer()`, you can use this
+ /// method to publish your updates to the consumer.
+ ///
+ /// This will replace the current input buffer with another one, as you
+ /// cannot continue using the old one while the consumer is accessing it.
+ ///
+ /// It will also tell you whether you overwrote a value which was not read
+ /// by the consumer thread.
+ ///
+ pub fn publish(&mut self) -> bool {
+ // Swap the input buffer and the back buffer, setting the dirty bit
+ //
+ // The ordering must be AcqRel, because...
+ //
+ // - Our accesses to the old buffer must not be reordered after this
+ // operation (which mandates Release ordering), otherwise they could
+ // race with the consumer accessing the freshly published buffer.
+ // - Our accesses from the buffer must not be reordered before this
+ // operation (which mandates Consume ordering, that is best
+ // approximated by Acquire in Rust), otherwise they would race with
+ // the consumer accessing the buffer as well before switching to
+ // another buffer.
+ // * This reordering may seem paradoxical, but could happen if the
+ // compiler or CPU correctly speculated the new buffer's index
+ // before that index is actually read, as well as on weird hardware
+ // with incoherent caches like GPUs or old DEC Alpha where keeping
+ // data in sync across cores requires manual action.
+ //
+ let former_back_info = self
+ .shared
+ .back_info
+ .swap(self.input_idx | BACK_DIRTY_BIT, Ordering::AcqRel);
+
+ // The old back buffer becomes our new input buffer
+ self.input_idx = former_back_info & BACK_INDEX_MASK;
+
+ // Tell whether we have overwritten unread data
+ former_back_info & BACK_DIRTY_BIT != 0
+ }
+
+ /// Deprecated alias to `input_buffer()`, please use that method instead
+ #[cfg(any(feature = "raw", test))]
+ #[deprecated(
+ since = "5.0.5",
+ note = "The \"raw\" feature is deprecated as the performance \
+ optimization that motivated it turned out to be incorrect. \
+ All functionality is now available without using feature flags."
+ )]
+ pub fn raw_input_buffer(&mut self) -> &mut T {
+ self.input_buffer()
+ }
+
+ /// Deprecated alias to `publish()`, please use that method instead
+ #[cfg(any(feature = "raw", test))]
+ #[deprecated(
+ since = "5.0.5",
+ note = "The \"raw\" feature is deprecated as the performance \
+ optimization that motivated it turned out to be incorrect. \
+ All functionality is now available without using feature flags."
+ )]
+ pub fn raw_publish(&mut self) -> bool {
+ self.publish()
+ }
+}
+
+/// Consumer interface to the triple buffer
+///
+/// The consumer of data can use this struct to access the latest published
+/// update from the producer whenever he likes. Readout is nonblocking: a
+/// collision between the producer and consumer will result in cache contention,
+/// but deadlocks and scheduling-induced slowdowns cannot happen.
+///
+#[derive(Debug)]
+pub struct Output<T: Send> {
+ /// Reference-counted shared state
+ shared: Arc<SharedState<T>>,
+
+ /// Index of the output buffer (which is private to the consumer)
+ output_idx: BufferIndex,
+}
+//
+// Public interface
+impl<T: Send> Output<T> {
+ /// Access the latest value from the triple buffer
+ pub fn read(&mut self) -> &T {
+ // Fetch updates from the producer
+ self.update();
+
+ // Give access to the output buffer
+ self.output_buffer()
+ }
+
+ /// Tell whether a buffer update is incoming from the producer
+ ///
+ /// This method is only intended for diagnostics purposes. Please do not let
+ /// it inform your decision of reading a value or not, as that would
+ /// effectively be building a very poor spinlock-based double buffer
+ /// implementation. If what you truly need is a double buffer, build
+ /// yourself a proper blocking one instead of wasting CPU time.
+ ///
+ pub fn updated(&self) -> bool {
+ let back_info = self.shared.back_info.load(Ordering::Relaxed);
+ back_info & BACK_DIRTY_BIT != 0
+ }
+
+ /// Access the output buffer directly
+ ///
+ /// This advanced interface allows you to modify the contents of the output
+ /// buffer, so that you can avoid copying the output value when this is an
+ /// expensive process. One possible application, for example, is to
+ /// post-process values from the producer before use.
+ ///
+ /// However, by using it, you force yourself to take into account some
+ /// implementation subtleties that you could normally ignore.
+ ///
+ /// First, keep in mind that you can lose access to the current output
+ /// buffer any time `read()` or `update()` is called, as it may be replaced
+ /// by an updated buffer from the producer automatically.
+ ///
+ /// Second, to reduce the potential for the aforementioned usage error, this
+ /// method does not update the output buffer automatically. You need to call
+ /// `update()` in order to fetch buffer updates from the producer.
+ ///
+ pub fn output_buffer(&mut self) -> &mut T {
+ // This is safe because the synchronization protocol ensures that we
+ // have exclusive access to this buffer.
+ let output_ptr = self.shared.buffers[self.output_idx as usize].get();
+ unsafe { &mut *output_ptr }
+ }
+
+ /// Update the output buffer
+ ///
+ /// Check if the producer submitted a new data version, and if one is
+ /// available, update our output buffer to use it. Return a flag that tells
+ /// you whether such an update was carried out.
+ ///
+ /// Bear in mind that when this happens, you will lose any change that you
+ /// performed to the output buffer via the `output_buffer()` interface.
+ ///
+ pub fn update(&mut self) -> bool {
+ // Access the shared state
+ let shared_state = &(*self.shared);
+
+ // Check if an update is present in the back-buffer
+ let updated = self.updated();
+ if updated {
+ // If so, exchange our output buffer with the back-buffer, thusly
+ // acquiring exclusive access to the old back buffer while giving
+ // the producer a new back-buffer to write to.
+ //
+ // The ordering must be AcqRel, because...
+ //
+ // - Our accesses to the previous buffer must not be reordered after
+ // this operation (which mandates Release ordering), otherwise
+ // they could race with the producer accessing the freshly
+ // liberated buffer.
+ // - Our accesses from the buffer must not be reordered before this
+ // operation (which mandates Consume ordering, that is best
+ // approximated by Acquire in Rust), otherwise they would race
+ // with the producer writing into the buffer before publishing it.
+ // * This reordering may seem paradoxical, but could happen if the
+ // compiler or CPU correctly speculated the new buffer's index
+ // before that index is actually read, as well as on weird hardware
+ // like GPUs where CPU caches require manual synchronization.
+ //
+ let former_back_info = shared_state
+ .back_info
+ .swap(self.output_idx, Ordering::AcqRel);
+
+ // Make the old back-buffer our new output buffer
+ self.output_idx = former_back_info & BACK_INDEX_MASK;
+ }
+
+ // Tell whether an update was carried out
+ updated
+ }
+
+ /// Deprecated alias to `output_buffer()`, please use that method instead
+ #[cfg(any(feature = "raw", test))]
+ #[deprecated(
+ since = "5.0.5",
+ note = "The \"raw\" feature is deprecated as the performance \
+ optimization that motivated it turned out to be incorrect. \
+ All functionality is now available without using feature flags."
+ )]
+ pub fn raw_output_buffer(&mut self) -> &mut T {
+ self.output_buffer()
+ }
+ /// Deprecated alias to `update()`, please use that method instead
+ #[cfg(any(feature = "raw", test))]
+ #[deprecated(
+ since = "5.0.5",
+ note = "The \"raw\" feature is deprecated as the performance \
+ optimization that motivated it turned out to be incorrect. \
+ All functionality is now available without using feature flags."
+ )]
+ #[cfg(any(feature = "raw", test))]
+ pub fn raw_update(&mut self) -> bool {
+ self.update()
+ }
+}
+
+/// Triple buffer shared state
+///
+/// In a triple buffering communication protocol, the producer and consumer
+/// share the following storage:
+///
+/// - Three memory buffers suitable for storing the data at hand
+/// - Information about the back-buffer: which buffer is the current back-buffer
+/// and whether an update was published since the last readout.
+///
+#[derive(Debug)]
+struct SharedState<T: Send> {
+ /// Data storage buffers
+ buffers: [CachePadded<UnsafeCell<T>>; 3],
+
+ /// Information about the current back-buffer state
+ back_info: CachePadded<AtomicBackBufferInfo>,
+}
+//
+#[doc(hidden)]
+impl<T: Send> SharedState<T> {
+ /// Given (a way to generate) buffer contents and the back info, build the shared state
+ fn new(mut gen_buf_data: impl FnMut(usize) -> T, back_info: BackBufferInfo) -> Self {
+ let mut make_buf = |i| -> CachePadded<UnsafeCell<T>> {
+ CachePadded::new(UnsafeCell::new(gen_buf_data(i)))
+ };
+ Self {
+ buffers: [make_buf(0), make_buf(1), make_buf(2)],
+ back_info: CachePadded::new(AtomicBackBufferInfo::new(back_info)),
+ }
+ }
+}
+//
+#[doc(hidden)]
+impl<T: Clone + Send> SharedState<T> {
+ /// Cloning the shared state is unsafe because you must ensure that no one
+ /// is concurrently accessing it, since &self is enough for writing.
+ unsafe fn clone(&self) -> Self {
+ Self::new(
+ |i| (*self.buffers[i].get()).clone(),
+ self.back_info.load(Ordering::Relaxed),
+ )
+ }
+}
+//
+#[doc(hidden)]
+impl<T: PartialEq + Send> SharedState<T> {
+ /// Equality is unsafe for the same reason as cloning: you must ensure that
+ /// no one is concurrently accessing the triple buffer to avoid data races.
+ unsafe fn eq(&self, other: &Self) -> bool {
+ // Check whether the contents of all buffers are equal...
+ let buffers_equal = self
+ .buffers
+ .iter()
+ .zip(other.buffers.iter())
+ .all(|tuple| -> bool {
+ let (cell1, cell2) = tuple;
+ *cell1.get() == *cell2.get()
+ });
+
+ // ...then check whether the rest of the shared state is equal
+ buffers_equal
+ && (self.back_info.load(Ordering::Relaxed) == other.back_info.load(Ordering::Relaxed))
+ }
+}
+//
+unsafe impl<T: Send> Sync for SharedState<T> {}
+
+// Index types used for triple buffering
+//
+// These types are used to index into triple buffers. In addition, the
+// BackBufferInfo type is actually a bitfield, whose third bit (numerical
+// value: 4) is set to 1 to indicate that the producer published an update into
+// the back-buffer, and reset to 0 when the consumer fetches the update.
+//
+type BufferIndex = u8;
+type BackBufferInfo = BufferIndex;
+//
+type AtomicBackBufferInfo = AtomicU8;
+const BACK_INDEX_MASK: u8 = 0b11; // Mask used to extract back-buffer index
+const BACK_DIRTY_BIT: u8 = 0b100; // Bit set by producer to signal updates
+
+/// Unit tests
+#[cfg(test)]
+mod tests {
+ use super::{BufferIndex, SharedState, TripleBuffer, BACK_DIRTY_BIT, BACK_INDEX_MASK};
+
+ use std::{fmt::Debug, ops::Deref, sync::atomic::Ordering, thread, time::Duration};
+
+ use testbench::{
+ self,
+ race_cell::{RaceCell, Racey},
+ };
+
+ /// Check that triple buffers are properly initialized
+ #[test]
+ fn initial_state() {
+ // Let's create a triple buffer
+ let mut buf = TripleBuffer::new(42);
+ check_buf_state(&mut buf, false);
+ assert_eq!(*buf.output.read(), 42);
+ }
+
+ /// Check that the shared state's unsafe equality operator works
+ #[test]
+ fn partial_eq_shared() {
+ // Let's create some dummy shared state
+ let dummy_state = SharedState::<u16>::new(|i| [111, 222, 333][i], 0b10);
+
+ // Check that the dummy state is equal to itself
+ assert!(unsafe { dummy_state.eq(&dummy_state) });
+
+ // Check that it's not equal to a state where buffer contents differ
+ assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [114, 222, 333][i], 0b10)) });
+ assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 225, 333][i], 0b10)) });
+ assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 222, 336][i], 0b10)) });
+
+ // Check that it's not equal to a state where the back info differs
+ assert!(unsafe {
+ !dummy_state.eq(&SharedState::<u16>::new(
+ |i| [111, 222, 333][i],
+ BACK_DIRTY_BIT & 0b10,
+ ))
+ });
+ assert!(unsafe { !dummy_state.eq(&SharedState::<u16>::new(|i| [111, 222, 333][i], 0b01)) });
+ }
+
+ /// Check that TripleBuffer's PartialEq impl works
+ #[test]
+ fn partial_eq() {
+ // Create a triple buffer
+ let buf = TripleBuffer::new("test");
+
+ // Check that it is equal to itself
+ assert_eq!(buf, buf);
+
+ // Make another buffer with different contents. As buffer creation is
+ // deterministic, this should only have an impact on the shared state,
+ // but the buffers should nevertheless be considered different.
+ let buf2 = TripleBuffer::new("taste");
+ assert_eq!(buf.input.input_idx, buf2.input.input_idx);
+ assert_eq!(buf.output.output_idx, buf2.output.output_idx);
+ assert!(buf != buf2);
+
+ // Check that changing either the input or output buffer index will
+ // also lead two TripleBuffers to be considered different (this test
+ // technically creates an invalid TripleBuffer state, but it's the only
+ // way to check that the PartialEq impl is exhaustive)
+ let mut buf3 = TripleBuffer::new("test");
+ assert_eq!(buf, buf3);
+ let old_input_idx = buf3.input.input_idx;
+ buf3.input.input_idx = buf3.output.output_idx;
+ assert!(buf != buf3);
+ buf3.input.input_idx = old_input_idx;
+ buf3.output.output_idx = old_input_idx;
+ assert!(buf != buf3);
+ }
+
+ /// Check that the shared state's unsafe clone operator works
+ #[test]
+ fn clone_shared() {
+ // Let's create some dummy shared state
+ let dummy_state = SharedState::<u8>::new(|i| [123, 231, 132][i], BACK_DIRTY_BIT & 0b01);
+
+ // Now, try to clone it
+ let dummy_state_copy = unsafe { dummy_state.clone() };
+
+ // Check that the contents of the original state did not change
+ assert!(unsafe {
+ dummy_state.eq(&SharedState::<u8>::new(
+ |i| [123, 231, 132][i],
+ BACK_DIRTY_BIT & 0b01,
+ ))
+ });
+
+ // Check that the contents of the original and final state are identical
+ assert!(unsafe { dummy_state.eq(&dummy_state_copy) });
+ }
+
+ /// Check that TripleBuffer's Clone impl works
+ #[test]
+ fn clone() {
+ // Create a triple buffer
+ let mut buf = TripleBuffer::new(4.2);
+
+ // Put it in a nontrivial state
+ unsafe {
+ *buf.input.shared.buffers[0].get() = 1.2;
+ *buf.input.shared.buffers[1].get() = 3.4;
+ *buf.input.shared.buffers[2].get() = 5.6;
+ }
+ buf.input
+ .shared
+ .back_info
+ .store(BACK_DIRTY_BIT & 0b01, Ordering::Relaxed);
+ buf.input.input_idx = 0b10;
+ buf.output.output_idx = 0b00;
+
+ // Now clone it
+ let buf_clone = buf.clone();
+
+ // Check that the clone uses its own, separate shared data storage
+ assert_eq!(
+ as_ptr(&buf_clone.output.shared),
+ as_ptr(&buf_clone.output.shared)
+ );
+ assert!(as_ptr(&buf_clone.input.shared) != as_ptr(&buf.input.shared));
+
+ // Check that it is identical from PartialEq's point of view
+ assert_eq!(buf, buf_clone);
+
+ // Check that the contents of the original buffer did not change
+ unsafe {
+ assert_eq!(*buf.input.shared.buffers[0].get(), 1.2);
+ assert_eq!(*buf.input.shared.buffers[1].get(), 3.4);
+ assert_eq!(*buf.input.shared.buffers[2].get(), 5.6);
+ }
+ assert_eq!(
+ buf.input.shared.back_info.load(Ordering::Relaxed),
+ BACK_DIRTY_BIT & 0b01
+ );
+ assert_eq!(buf.input.input_idx, 0b10);
+ assert_eq!(buf.output.output_idx, 0b00);
+ }
+
+ /// Check that the low-level publish/update primitives work
+ #[test]
+ fn swaps() {
+ // Create a new buffer, and a way to track any changes to it
+ let mut buf = TripleBuffer::new([123, 456]);
+ let old_buf = buf.clone();
+ let old_input_idx = old_buf.input.input_idx;
+ let old_shared = &old_buf.input.shared;
+ let old_back_info = old_shared.back_info.load(Ordering::Relaxed);
+ let old_back_idx = old_back_info & BACK_INDEX_MASK;
+ let old_output_idx = old_buf.output.output_idx;
+
+ // Check that updating from a clean state works
+ assert!(!buf.output.update());
+ assert_eq!(buf, old_buf);
+ check_buf_state(&mut buf, false);
+
+ // Check that publishing from a clean state works
+ assert!(!buf.input.publish());
+ let mut expected_buf = old_buf.clone();
+ expected_buf.input.input_idx = old_back_idx;
+ expected_buf
+ .input
+ .shared
+ .back_info
+ .store(old_input_idx | BACK_DIRTY_BIT, Ordering::Relaxed);
+ assert_eq!(buf, expected_buf);
+ check_buf_state(&mut buf, true);
+
+ // Check that overwriting a dirty state works
+ assert!(buf.input.publish());
+ let mut expected_buf = old_buf.clone();
+ expected_buf.input.input_idx = old_input_idx;
+ expected_buf
+ .input
+ .shared
+ .back_info
+ .store(old_back_idx | BACK_DIRTY_BIT, Ordering::Relaxed);
+ assert_eq!(buf, expected_buf);
+ check_buf_state(&mut buf, true);
+
+ // Check that updating from a dirty state works
+ assert!(buf.output.update());
+ expected_buf.output.output_idx = old_back_idx;
+ expected_buf
+ .output
+ .shared
+ .back_info
+ .store(old_output_idx, Ordering::Relaxed);
+ assert_eq!(buf, expected_buf);
+ check_buf_state(&mut buf, false);
+ }
+
+ /// Check that (sequentially) writing to a triple buffer works
+ #[test]
+ fn sequential_write() {
+ // Let's create a triple buffer
+ let mut buf = TripleBuffer::new(false);
+
+ // Back up the initial buffer state
+ let old_buf = buf.clone();
+
+ // Perform a write
+ buf.input.write(true);
+
+ // Check new implementation state
+ {
+ // Starting from the old buffer state...
+ let mut expected_buf = old_buf.clone();
+
+ // ...write the new value in and swap...
+ *expected_buf.input.input_buffer() = true;
+ expected_buf.input.publish();
+
+ // Nothing else should have changed
+ assert_eq!(buf, expected_buf);
+ check_buf_state(&mut buf, true);
+ }
+ }
+
+ /// Check that (sequentially) reading from a triple buffer works
+ #[test]
+ fn sequential_read() {
+ // Let's create a triple buffer and write into it
+ let mut buf = TripleBuffer::new(1.0);
+ buf.input.write(4.2);
+
+ // Test readout from dirty (freshly written) triple buffer
+ {
+ // Back up the initial buffer state
+ let old_buf = buf.clone();
+
+ // Read from the buffer
+ let result = *buf.output.read();
+
+ // Output value should be correct
+ assert_eq!(result, 4.2);
+
+ // Result should be equivalent to carrying out an update
+ let mut expected_buf = old_buf.clone();
+ assert!(expected_buf.output.update());
+ assert_eq!(buf, expected_buf);
+ check_buf_state(&mut buf, false);
+ }
+
+ // Test readout from clean (unchanged) triple buffer
+ {
+ // Back up the initial buffer state
+ let old_buf = buf.clone();
+
+ // Read from the buffer
+ let result = *buf.output.read();
+
+ // Output value should be correct
+ assert_eq!(result, 4.2);
+
+ // Buffer state should be unchanged
+ assert_eq!(buf, old_buf);
+ check_buf_state(&mut buf, false);
+ }
+ }
+
+ /// Check that contended concurrent reads and writes work
+ #[test]
+ #[ignore]
+ fn contended_concurrent_read_write() {
+ // We will stress the infrastructure by performing this many writes
+ // as a reader continuously reads the latest value
+ const TEST_WRITE_COUNT: usize = 100_000_000;
+
+ // This is the buffer that our reader and writer will share
+ let buf = TripleBuffer::new(RaceCell::new(0));
+ let (mut buf_input, mut buf_output) = buf.split();
+
+ // Concurrently run a writer which increments a shared value in a loop,
+ // and a reader which makes sure that no unexpected value slips in.
+ let mut last_value = 0usize;
+ testbench::concurrent_test_2(
+ move || {
+ for value in 1..=TEST_WRITE_COUNT {
+ buf_input.write(RaceCell::new(value));
+ }
+ },
+ move || {
+ while last_value < TEST_WRITE_COUNT {
+ let new_racey_value = buf_output.read().get();
+ match new_racey_value {
+ Racey::Consistent(new_value) => {
+ assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT));
+ last_value = new_value;
+ }
+ Racey::Inconsistent => {
+ panic!("Inconsistent state exposed by the buffer!");
+ }
+ }
+ }
+ },
+ );
+ }
+
+ /// Check that uncontended concurrent reads and writes work
+ ///
+ /// **WARNING:** This test unfortunately needs to have timing-dependent
+ /// behaviour to do its job. If it fails for you, try the following:
+ ///
+ /// - Close running applications in the background
+ /// - Re-run the tests with only one OS thread (--test-threads=1)
+ /// - Increase the writer sleep period
+ ///
+ #[test]
+ #[ignore]
+ fn uncontended_concurrent_read_write() {
+ // We will stress the infrastructure by performing this many writes
+ // as a reader continuously reads the latest value
+ const TEST_WRITE_COUNT: usize = 625;
+
+ // This is the buffer that our reader and writer will share
+ let buf = TripleBuffer::new(RaceCell::new(0));
+ let (mut buf_input, mut buf_output) = buf.split();
+
+ // Concurrently run a writer which slowly increments a shared value,
+ // and a reader which checks that it can receive every update
+ let mut last_value = 0usize;
+ testbench::concurrent_test_2(
+ move || {
+ for value in 1..=TEST_WRITE_COUNT {
+ buf_input.write(RaceCell::new(value));
+ thread::yield_now();
+ thread::sleep(Duration::from_millis(32));
+ }
+ },
+ move || {
+ while last_value < TEST_WRITE_COUNT {
+ let new_racey_value = buf_output.read().get();
+ match new_racey_value {
+ Racey::Consistent(new_value) => {
+ assert!((new_value >= last_value) && (new_value - last_value <= 1));
+ last_value = new_value;
+ }
+ Racey::Inconsistent => {
+ panic!("Inconsistent state exposed by the buffer!");
+ }
+ }
+ }
+ },
+ );
+ }
+
+ /// Through the low-level API, the consumer is allowed to modify its
+ /// bufffer, which means that it will unknowingly send back data to the
+ /// producer. This creates new correctness requirements for the
+ /// synchronization protocol, which must be checked as well.
+ #[test]
+ #[ignore]
+ fn concurrent_bidirectional_exchange() {
+ // We will stress the infrastructure by performing this many writes
+ // as a reader continuously reads the latest value
+ const TEST_WRITE_COUNT: usize = 100_000_000;
+
+ // This is the buffer that our reader and writer will share
+ let buf = TripleBuffer::new(RaceCell::new(0));
+ let (mut buf_input, mut buf_output) = buf.split();
+
+ // Concurrently run a writer which increments a shared value in a loop,
+ // and a reader which makes sure that no unexpected value slips in.
+ testbench::concurrent_test_2(
+ move || {
+ for new_value in 1..=TEST_WRITE_COUNT {
+ match buf_input.input_buffer().get() {
+ Racey::Consistent(curr_value) => {
+ assert!(curr_value <= new_value);
+ }
+ Racey::Inconsistent => {
+ panic!("Inconsistent state exposed by the buffer!");
+ }
+ }
+ buf_input.write(RaceCell::new(new_value));
+ }
+ },
+ move || {
+ let mut last_value = 0usize;
+ while last_value < TEST_WRITE_COUNT {
+ match buf_output.output_buffer().get() {
+ Racey::Consistent(new_value) => {
+ assert!((new_value >= last_value) && (new_value <= TEST_WRITE_COUNT));
+ last_value = new_value;
+ }
+ Racey::Inconsistent => {
+ panic!("Inconsistent state exposed by the buffer!");
+ }
+ }
+ if buf_output.updated() {
+ buf_output.output_buffer().set(last_value / 2);
+ buf_output.update();
+ }
+ }
+ },
+ );
+ }
+
+ /// Range check for triple buffer indexes
+ #[allow(unused_comparisons)]
+ fn index_in_range(idx: BufferIndex) -> bool {
+ (idx >= 0) & (idx <= 2)
+ }
+
+ /// Get a pointer to the target of some reference (e.g. an &, an Arc...)
+ fn as_ptr<P: Deref>(ref_like: &P) -> *const P::Target {
+ &(**ref_like) as *const _
+ }
+
+ /// Check the state of a buffer, and the effect of queries on it
+ fn check_buf_state<T>(buf: &mut TripleBuffer<T>, expected_dirty_bit: bool)
+ where
+ T: Clone + Debug + PartialEq + Send,
+ {
+ // Make a backup of the buffer's initial state
+ let initial_buf = buf.clone();
+
+ // Check that the input and output point to the same shared state
+ assert_eq!(as_ptr(&buf.input.shared), as_ptr(&buf.output.shared));
+
+ // Access the shared state and decode back-buffer information
+ let back_info = buf.input.shared.back_info.load(Ordering::Relaxed);
+ let back_idx = back_info & BACK_INDEX_MASK;
+ let back_buffer_dirty = back_info & BACK_DIRTY_BIT != 0;
+
+ // Input-/output-/back-buffer indexes must be in range
+ assert!(index_in_range(buf.input.input_idx));
+ assert!(index_in_range(buf.output.output_idx));
+ assert!(index_in_range(back_idx));
+
+ // Input-/output-/back-buffer indexes must be distinct
+ assert!(buf.input.input_idx != buf.output.output_idx);
+ assert!(buf.input.input_idx != back_idx);
+ assert!(buf.output.output_idx != back_idx);
+
+ // Back-buffer must have the expected dirty bit
+ assert_eq!(back_buffer_dirty, expected_dirty_bit);
+
+ // Check that the "input buffer" query behaves as expected
+ assert_eq!(
+ as_ptr(&buf.input.input_buffer()),
+ buf.input.shared.buffers[buf.input.input_idx as usize].get()
+ );
+ assert_eq!(*buf, initial_buf);
+
+ // Check that the "consumed" query behaves as expected
+ assert_eq!(!buf.input.consumed(), expected_dirty_bit);
+ assert_eq!(*buf, initial_buf);
+
+ // Check that the output_buffer query works in the initial state
+ assert_eq!(
+ as_ptr(&buf.output.output_buffer()),
+ buf.output.shared.buffers[buf.output.output_idx as usize].get()
+ );
+ assert_eq!(*buf, initial_buf);
+
+ // Check that the output buffer query works in the initial state
+ assert_eq!(buf.output.updated(), expected_dirty_bit);
+ assert_eq!(*buf, initial_buf);
+ }
+}