summaryrefslogtreecommitdiffstats
path: root/vendor/sized-chunks
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/sized-chunks')
-rw-r--r--vendor/sized-chunks/.cargo-checksum.json1
-rw-r--r--vendor/sized-chunks/CHANGELOG.md218
-rw-r--r--vendor/sized-chunks/CODE_OF_CONDUCT.md73
-rw-r--r--vendor/sized-chunks/Cargo.toml49
-rw-r--r--vendor/sized-chunks/LICENCE.md355
-rw-r--r--vendor/sized-chunks/README.md35
-rw-r--r--vendor/sized-chunks/src/arbitrary.rs152
-rw-r--r--vendor/sized-chunks/src/inline_array/iter.rs63
-rw-r--r--vendor/sized-chunks/src/inline_array/mod.rs710
-rw-r--r--vendor/sized-chunks/src/lib.rs126
-rw-r--r--vendor/sized-chunks/src/ring_buffer/index.rs178
-rw-r--r--vendor/sized-chunks/src/ring_buffer/iter.rs218
-rw-r--r--vendor/sized-chunks/src/ring_buffer/mod.rs1156
-rw-r--r--vendor/sized-chunks/src/ring_buffer/refpool.rs69
-rw-r--r--vendor/sized-chunks/src/ring_buffer/slice.rs556
-rw-r--r--vendor/sized-chunks/src/sized_chunk/iter.rs109
-rw-r--r--vendor/sized-chunks/src/sized_chunk/mod.rs1411
-rw-r--r--vendor/sized-chunks/src/sized_chunk/refpool.rs66
-rw-r--r--vendor/sized-chunks/src/sparse_chunk/iter.rs245
-rw-r--r--vendor/sized-chunks/src/sparse_chunk/mod.rs514
-rw-r--r--vendor/sized-chunks/src/sparse_chunk/refpool.rs57
-rw-r--r--vendor/sized-chunks/src/tests.rs18
-rw-r--r--vendor/sized-chunks/src/types.rs54
23 files changed, 6433 insertions, 0 deletions
diff --git a/vendor/sized-chunks/.cargo-checksum.json b/vendor/sized-chunks/.cargo-checksum.json
new file mode 100644
index 000000000..e7ce2c405
--- /dev/null
+++ b/vendor/sized-chunks/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"936cc3874700abd0397c2f2513b416364fe10f14bf9832f1963ab7ad5c915edc","CODE_OF_CONDUCT.md":"3db9f112c815ffef9a6f51eef2f5f6a3f3c2900510c243f26ad890321b888473","Cargo.toml":"9b4be1fb143f1c1027b482d81d0245cee27d0f861e7f78c1903096a88bbe2bd4","LICENCE.md":"e6b6566f085df5746515c6d7e2edcaec0d2b77d527ac40d91409d783fb6c8508","README.md":"569f22320ffa66ba6b234b574c0e1a3fbe3d824c016ede342c1aaf11ca782ae7","src/arbitrary.rs":"1bcdb05e3e6308346f70ad628578d4f4ca162fe77dbbe59ede489f1e44840636","src/inline_array/iter.rs":"f1aa5c101091e47388b2e658a1f7bc32832d7e750910e5164f037ec59edfb920","src/inline_array/mod.rs":"75bcbf2f543322149d7cb92ca3432eeb3578c11690bd7847edb4a4496e2b226d","src/lib.rs":"aecdb94750fdd174dbee8647511624b1078c07616b274e5592df9d2d57c3bd33","src/ring_buffer/index.rs":"0ed3668bbb219d17c16d9cbe4999bd3a252fff8d6bd799bdf70463a0c5ce642f","src/ring_buffer/iter.rs":"b6a0773e3edda8a9ccf8d8cf0a57495733ab64fb9875cf17f5db7a57f42c300b","src/ring_buffer/mod.rs":"7e5879de26ccc4db8d3b967c2c01603b3dba691063dd5c79fe4c828ce9c92172","src/ring_buffer/refpool.rs":"47fae09cf8af709d0594347b185f6ad23227f25ccbaf750dfe9854b7e423ab9a","src/ring_buffer/slice.rs":"6495750e9c77bf017977bfcac06123190b3d060cf29afaa75d577d3341de05c9","src/sized_chunk/iter.rs":"e3db2698d0c2553e50f5a4dbeb61e5eeef201750bd3e5d08db84abb1282eb7ce","src/sized_chunk/mod.rs":"656767d1dd5fb43eb7ebadb03359fcd454dea23a59429856a8bedc3d742a7571","src/sized_chunk/refpool.rs":"e035a13a0b7418fbfd1282d0cede848a3196923db3fdd55c91793c2f9b6db52f","src/sparse_chunk/iter.rs":"415d033db0991577c32f9d0e018c23fdceec9d54004f9171fdaac63272cb10a7","src/sparse_chunk/mod.rs":"b8f6e6c19c7d80ed3b129e4459b58a89124321f4ff7b5f0381d4d056b563e378","src/sparse_chunk/refpool.rs":"729ec32bb616f540400c6ff10711cfc315d30ddcb97efdc6e5fc4308cec94f36","src/tests.rs":"f79cdc24865f06cb56949259e995f000422a4911a2dc83adcad8a8571de4ee67","src/types.rs":"d0af7b6fe6de3b026e7a527fdea40835acbae80b7ea0bd903c5c67208461fda9"},"package":"65e65d6a9f13cd78f361ea5a2cf53a45d67cdda421ba0316b9be101560f3d207"} \ No newline at end of file
diff --git a/vendor/sized-chunks/CHANGELOG.md b/vendor/sized-chunks/CHANGELOG.md
new file mode 100644
index 000000000..b58c9e1e2
--- /dev/null
+++ b/vendor/sized-chunks/CHANGELOG.md
@@ -0,0 +1,218 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project
+adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [0.6.4] - 2021-02-17
+
+### FIXED
+
+- `InlineArray` can be used in recursive types again.
+
+### CHANGED
+
+- `InlineArray::new()` now panics when it can't store elements with large alignment
+ (this was UB prior to 0.6.3). Alignments of `usize` and smaller are always supported.
+ Larger alignments are supported if the capacity-providing type has sufficient alignment.
+
+## [0.6.3] - 2021-02-14
+
+### FIXED
+
+- Multilple soundness fixes: `InlineArray` handles large alignment, panic safety in `Chunk`'s
+ `clone` and `from_iter`, capacity checks in `unit()`, `pair()` and `from()`.
+- `InlineArray` can now handle zero sized values. This relies on conditionals in const functions,
+ a feature which was introduced in Rust 1.46.0, which means this is now the minimum Rust version
+ this crate will work on.
+
+## [0.6.2] - 2020-05-15
+
+### FIXED
+
+- This release exists for no other purpose than to bump the `refpool` optional dependency.
+
+## [0.6.1] - 2020-03-26
+
+### ADDED
+
+- The crate now has a `std` feature flag, which is on by default, and will make the crate `no_std`
+ if disabled.
+
+### FIXED
+
+- Fixed a compilation error if you had the `arbitrary` feature flag enabled without the
+ `ringbuffer` flag.
+
+## [0.6.0] - 2020-03-24
+
+### CHANGED
+
+- `RingBuffer` and its accompanying slice types `Slice` and `SliceMut` now implement `Array` and
+ `ArrayMut` from [`array-ops`](http://docs.rs/array-ops), giving them most of the methods that
+ would be available on primitive slice types and cutting down on code duplication in the
+ implementation, but at the price of having to pull `Array` et al into scope when you need them.
+ Because this means adding a dependency to `array-ops`, `RingBuffer` has now been moved behind
+ the `ringbuffer` feature flag. `Chunk` and `InlineArray` don't and won't implement `Array`,
+ because they are both able to implement `Deref<[A]>`, which provides the same functionality more
+ efficiently.
+
+### ADDED
+
+- The `insert_from` and `insert_ordered` methods recently added to `Chunk` have now also been
+ added to `RingBuffer`.
+- `RingBuffer`'s `Slice` and `SliceMut` now also have the three `binary_search` methods regular
+ slices have.
+- `SparseChunk`, `RingBuffer`, `Slice` and `SliceMut` now have unsafe `get_unchecked` and
+ `get_unchecked_mut` methods.
+- `PartialEq` implementations allowing you to compare `RingBuffer`s, `Slice`s and `SliceMut`s
+ interchangeably have been added.
+
+### FIXED
+
+- Fixed an aliasing issue in `RingBuffer`'s mutable iterator, as uncovered by Miri. Behind the
+ scenes, the full non-fuzzing unit test suite is now able to run on Miri without crashing it
+ (after migrating the last Proptest tests away from the test suite into the fuzz targets), and
+ this has been included in its CI build. (#6)
+
+## [0.5.3] - 2020-03-11
+
+### FIXED
+
+- Debug only assertions made it into the previous release by accident, and this change has been
+ reverted. (#7)
+
+## [0.5.2] - 2020-03-10
+
+### ADDED
+
+- `Chunk` now has an `insert_from` method for inserting multiple values at an index in one go.
+- `Chunk` now also has an `insert_ordered` method for inserting values into a sorted chunk.
+- `SparseChunk` now has the methods `option_iter()`, `option_iter_mut()` and `option_drain()` with
+ their corresponding iterators to iterate over a chunk as if it were an array of `Option`s.
+- [`Arbitrary`](https://docs.rs/arbitrary/latest/arbitrary/trait.Arbitrary.html) implementations
+ for all data types have been added behind the `arbitrary` feature flag.
+
+### FIXED
+
+- Internal consistency assertions are now only performed in debug mode (like with
+ `debug_assert!`). This means `sized_chunks` will no longer cause panics in release mode when you
+ do things like pushing to a full chunk, but do bad and undefined things instead. It also means a
+ very slight performance gain.
+
+## [0.5.1] - 2019-12-12
+
+### ADDED
+
+- `PoolDefault` and `PoolClone` implementations, from the
+ [`refpool`](https://crates.io/crates/refpool) crate, are available for `Chunk`, `SparseChunk`
+ and `RingBuffer`, behind the `refpool` feature flag.
+
+## [0.5.0] - 2019-09-09
+
+### CHANGED
+
+- The `Bitmap` type (and its helper type, `Bits`) has been split off into a separate crate, named
+ `bitmaps`. If you need it, it's in that crate now. `sized-chunks` does not re-export it. Of
+ course, this means `sized-chunks` has gained `bitmaps` as its second hard dependency.
+
+## [0.4.0] - 2019-09-02
+
+### CHANGED
+
+- The 0.3.2 release increased the minimum rustc version required, which should have been a major
+ version bump, so 0.3.2 is being yanked and re-tagged as 0.4.0.
+
+## [0.3.2] - 2019-08-29
+
+### ADDED
+
+- Chunk/bitmap sizes up to 1024 are now supported.
+
+### FIXED
+
+- Replaced `ManuallyDrop` in implementations with `MaybeUninit`, along with a general unsafe code
+ cleanup. (#3)
+
+## [0.3.1] - 2019-08-03
+
+### ADDED
+
+- Chunk sizes up to 256 are now supported.
+
+## [0.3.0] - 2019-05-18
+
+### ADDED
+
+- A new data structure, `InlineArray`, which is a stack allocated array matching the size of a
+ given type, intended for optimising for the case of very small vectors.
+- `Chunk` has an implementation of `From<InlineArray>` which is considerably faster than going via
+ iterators.
+
+## [0.2.2] - 2019-05-10
+
+### ADDED
+
+- `Slice::get` methods now return references with the lifetime of the underlying `RingBuffer`
+ rather than the lifetime of the slice.
+
+## [0.2.1] - 2019-04-15
+
+### ADDED
+
+- A lot of documentation.
+- `std::io::Read` implementations for `Chunk<u8>` and `RingBuffer<u8>` to match their `Write`
+ implementations.
+
+## [0.2.0] - 2019-04-14
+
+### CHANGED
+
+- The `capacity()` method has been replacied with a `CAPACITY` const on each type.
+
+### ADDED
+
+- There is now a `RingBuffer` implementation, which should be nearly a drop-in replacement for
+ `SizedChunk` but is always O(1) on push and cannot be dereferenced to slices (but it has a set
+ of custom slice-like implementations to make that less of a drawback).
+- The `Drain` iterator for `SizedChunk` now implements `DoubleEndedIterator`.
+
+### FIXED
+
+- `SizedChunk::drain_from_front/back` will now always panic if the iterator underflows, instead of
+ only doing it in debug mode.
+
+## [0.1.3] - 2019-04-12
+
+### ADDED
+
+- `SparseChunk` now has a default length of `U64`.
+- `Chunk` now has `PartialEq` defined for anything that can be borrowed as a slice.
+- `SparseChunk<A>` likewise has `PartialEq` defined for `BTreeMap<usize, A>` and
+ `HashMap<usize, A>`. These are intended for debugging and aren't optimally `efficient.
+- `Chunk` and `SparseChunk` now have a new method `capacity()` which returns its maximum capacity
+ (the number in the type) as a usize.
+- Added an `entries()` method to `SparseChunk`.
+- `SparseChunk` now has a `Debug` implementation.
+
+### FIXED
+
+- Extensive integration tests were added for `Chunk` and `SparseChunk`.
+- `Chunk::clear` is now very slightly faster.
+
+## [0.1.2] - 2019-03-11
+
+### FIXED
+
+- Fixed an alignment issue in `Chunk::drain_from_back`. (#1)
+
+## [0.1.1] - 2019-02-19
+
+### FIXED
+
+- Some 2018 edition issues.
+
+## [0.1.0] - 2019-02-19
+
+Initial release.
diff --git a/vendor/sized-chunks/CODE_OF_CONDUCT.md b/vendor/sized-chunks/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..02a086970
--- /dev/null
+++ b/vendor/sized-chunks/CODE_OF_CONDUCT.md
@@ -0,0 +1,73 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+education, socio-economic status, nationality, personal appearance, race,
+religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at admin@immutable.rs. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
diff --git a/vendor/sized-chunks/Cargo.toml b/vendor/sized-chunks/Cargo.toml
new file mode 100644
index 000000000..401b52836
--- /dev/null
+++ b/vendor/sized-chunks/Cargo.toml
@@ -0,0 +1,49 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "sized-chunks"
+version = "0.6.4"
+authors = ["Bodil Stokke <bodil@bodil.org>"]
+exclude = ["release.toml", "proptest-regressions/**"]
+description = "Efficient sized chunk datatypes"
+documentation = "http://docs.rs/sized-chunks"
+readme = "./README.md"
+keywords = ["sparse-array"]
+categories = ["data-structures"]
+license = "MPL-2.0+"
+repository = "https://github.com/bodil/sized-chunks"
+[package.metadata.docs.rs]
+all-features = true
+[dependencies.arbitrary]
+version = "0.4.7"
+optional = true
+
+[dependencies.array-ops]
+version = "0.1.0"
+optional = true
+
+[dependencies.bitmaps]
+version = "2.1.0"
+
+[dependencies.refpool]
+version = "0.4.3"
+optional = true
+
+[dependencies.typenum]
+version = "1.12.0"
+
+[features]
+default = ["std"]
+ringbuffer = ["array-ops"]
+std = []
diff --git a/vendor/sized-chunks/LICENCE.md b/vendor/sized-chunks/LICENCE.md
new file mode 100644
index 000000000..cd44203cd
--- /dev/null
+++ b/vendor/sized-chunks/LICENCE.md
@@ -0,0 +1,355 @@
+Mozilla Public License Version 2.0
+==================================
+
+### 1. Definitions
+
+**1.1. “Contributor”**
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+**1.2. “Contributor Version”**
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+**1.3. “Contribution”**
+ means Covered Software of a particular Contributor.
+
+**1.4. “Covered Software”**
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+**1.5. “Incompatible With Secondary Licenses”**
+ means
+
+* **(a)** that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+* **(b)** that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+**1.6. “Executable Form”**
+ means any form of the work other than Source Code Form.
+
+**1.7. “Larger Work”**
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+**1.8. “License”**
+ means this document.
+
+**1.9. “Licensable”**
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+**1.10. “Modifications”**
+ means any of the following:
+
+* **(a)** any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+* **(b)** any new file in Source Code Form that contains any Covered
+ Software.
+
+**1.11. “Patent Claims” of a Contributor**
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+**1.12. “Secondary License”**
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+**1.13. “Source Code Form”**
+ means the form of the work preferred for making modifications.
+
+**1.14. “You” (or “Your”)**
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, “control” means **(a)** the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or **(b)** ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+
+### 2. License Grants and Conditions
+
+#### 2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+* **(a)** under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+* **(b)** under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+#### 2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+#### 2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+* **(a)** for any code that a Contributor has removed from Covered Software;
+ or
+* **(b)** for infringements caused by: **(i)** Your and any other third party's
+ modifications of Covered Software, or **(ii)** the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+* **(c)** under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+#### 2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+#### 2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+#### 2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+#### 2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+
+### 3. Responsibilities
+
+#### 3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+#### 3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+* **(a)** such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+* **(b)** You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+#### 3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+#### 3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+#### 3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+
+### 4. Inability to Comply Due to Statute or Regulation
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: **(a)** comply with
+the terms of this License to the maximum extent possible; and **(b)**
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+
+### 5. Termination
+
+**5.1.** The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated **(a)** provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and **(b)** on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+**5.2.** If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+**5.3.** In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+
+### 6. Disclaimer of Warranty
+
+> Covered Software is provided under this License on an “as is”
+> basis, without warranty of any kind, either expressed, implied, or
+> statutory, including, without limitation, warranties that the
+> Covered Software is free of defects, merchantable, fit for a
+> particular purpose or non-infringing. The entire risk as to the
+> quality and performance of the Covered Software is with You.
+> Should any Covered Software prove defective in any respect, You
+> (not any Contributor) assume the cost of any necessary servicing,
+> repair, or correction. This disclaimer of warranty constitutes an
+> essential part of this License. No use of any Covered Software is
+> authorized under this License except under this disclaimer.
+
+### 7. Limitation of Liability
+
+> Under no circumstances and under no legal theory, whether tort
+> (including negligence), contract, or otherwise, shall any
+> Contributor, or anyone who distributes Covered Software as
+> permitted above, be liable to You for any direct, indirect,
+> special, incidental, or consequential damages of any character
+> including, without limitation, damages for lost profits, loss of
+> goodwill, work stoppage, computer failure or malfunction, or any
+> and all other commercial damages or losses, even if such party
+> shall have been informed of the possibility of such damages. This
+> limitation of liability shall not apply to liability for death or
+> personal injury resulting from such party's negligence to the
+> extent applicable law prohibits such limitation. Some
+> jurisdictions do not allow the exclusion or limitation of
+> incidental or consequential damages, so this exclusion and
+> limitation may not apply to You.
+
+
+### 8. Litigation
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+
+### 9. Miscellaneous
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+
+### 10. Versions of the License
+
+#### 10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+#### 10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+#### 10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+#### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+## Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+## Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/sized-chunks/README.md b/vendor/sized-chunks/README.md
new file mode 100644
index 000000000..0bb3c31a7
--- /dev/null
+++ b/vendor/sized-chunks/README.md
@@ -0,0 +1,35 @@
+# sized-chunks
+
+Various fixed length array data types, designed for [immutable.rs].
+
+## Overview
+
+This crate provides the core building blocks for the immutable data structures
+in [immutable.rs]: a sized array with O(1) amortised double ended push/pop and
+smarter insert/remove performance (used by `im::Vector` and `im::OrdMap`), and a
+fixed size sparse array (used by `im::HashMap`).
+
+In a nutshell, this crate contains the unsafe bits from [immutable.rs], which
+may or may not be useful to anyone else, and have been split out for ease of
+auditing.
+
+## Documentation
+
+* [API docs](https://docs.rs/sized-chunks)
+
+## Licence
+
+Copyright 2019 Bodil Stokke
+
+This software is subject to the terms of the Mozilla Public
+License, v. 2.0. If a copy of the MPL was not distributed with this
+file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+## Code of Conduct
+
+Please note that this project is released with a [Contributor Code of
+Conduct][coc]. By participating in this project you agree to abide by its
+terms.
+
+[immutable.rs]: https://immutable.rs/
+[coc]: https://github.com/bodil/sized-chunks/blob/master/CODE_OF_CONDUCT.md
diff --git a/vendor/sized-chunks/src/arbitrary.rs b/vendor/sized-chunks/src/arbitrary.rs
new file mode 100644
index 000000000..fc49984ff
--- /dev/null
+++ b/vendor/sized-chunks/src/arbitrary.rs
@@ -0,0 +1,152 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+use bitmaps::Bits;
+use core::iter;
+
+use ::arbitrary::{size_hint, Arbitrary, Result, Unstructured};
+
+use crate::{types::ChunkLength, Chunk, InlineArray, SparseChunk};
+
+#[cfg(feature = "ringbuffer")]
+use crate::RingBuffer;
+
+fn empty<T: 'static>() -> Box<dyn Iterator<Item = T>> {
+ Box::new(iter::empty())
+}
+
+fn shrink_collection<T: Clone, A: Arbitrary>(
+ entries: impl Iterator<Item = T>,
+ f: impl Fn(&T) -> Box<dyn Iterator<Item = A>>,
+) -> Box<dyn Iterator<Item = Vec<A>>> {
+ let entries: Vec<_> = entries.collect();
+ if entries.is_empty() {
+ return empty();
+ }
+
+ let mut shrinkers: Vec<Vec<_>> = vec![];
+ let mut i = entries.len();
+ loop {
+ shrinkers.push(entries.iter().take(i).map(&f).collect());
+ i /= 2;
+ if i == 0 {
+ break;
+ }
+ }
+ Box::new(iter::once(Vec::new()).chain(iter::from_fn(move || loop {
+ let mut shrinker = shrinkers.pop()?;
+ let x: Option<Vec<A>> = shrinker.iter_mut().map(|s| s.next()).collect();
+ if x.is_none() {
+ continue;
+ }
+ shrinkers.push(shrinker);
+ return x;
+ })))
+}
+
+impl<A, N> Arbitrary for Chunk<A, N>
+where
+ A: Arbitrary,
+ N: ChunkLength<A> + 'static,
+{
+ fn arbitrary(u: &mut Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ size_hint::recursion_guard(depth, |depth| {
+ let (_, upper) = A::size_hint(depth);
+ (0, upper.map(|upper| upper * Self::CAPACITY))
+ })
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let collections = shrink_collection(self.iter(), |x| x.shrink());
+ Box::new(collections.map(|entries| entries.into_iter().collect()))
+ }
+}
+
+#[cfg(feature = "ringbuffer")]
+impl<A, N> Arbitrary for RingBuffer<A, N>
+where
+ A: Arbitrary,
+ N: ChunkLength<A> + 'static,
+{
+ fn arbitrary(u: &mut Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ size_hint::recursion_guard(depth, |depth| {
+ let (_, upper) = A::size_hint(depth);
+ (0, upper.map(|upper| upper * Self::CAPACITY))
+ })
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let collections = shrink_collection(self.iter(), |x| x.shrink());
+ Box::new(collections.map(|entries| entries.into_iter().collect()))
+ }
+}
+
+impl<A, N> Arbitrary for SparseChunk<A, N>
+where
+ A: Clone,
+ Option<A>: Arbitrary,
+ N: ChunkLength<A> + Bits + 'static,
+{
+ fn arbitrary(u: &mut Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ size_hint::recursion_guard(depth, |depth| {
+ let (_, upper) = Option::<A>::size_hint(depth);
+ (0, upper.map(|upper| upper * Self::CAPACITY))
+ })
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let collections = shrink_collection(self.clone().option_drain(), |x| x.shrink());
+ Box::new(collections.map(|entries| entries.into_iter().collect()))
+ }
+}
+
+impl<A, T> Arbitrary for InlineArray<A, T>
+where
+ A: Arbitrary,
+ T: 'static,
+{
+ fn arbitrary(u: &mut Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'_>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.take(Self::CAPACITY).collect()
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ size_hint::recursion_guard(depth, |depth| {
+ let (_, upper) = A::size_hint(depth);
+ (0, upper.map(|upper| upper * Self::CAPACITY))
+ })
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let collections = shrink_collection(self.iter(), |x| x.shrink());
+ Box::new(collections.map(|entries| entries.into_iter().collect()))
+ }
+}
diff --git a/vendor/sized-chunks/src/inline_array/iter.rs b/vendor/sized-chunks/src/inline_array/iter.rs
new file mode 100644
index 000000000..eec6ad816
--- /dev/null
+++ b/vendor/sized-chunks/src/inline_array/iter.rs
@@ -0,0 +1,63 @@
+use core::iter::FusedIterator;
+
+use crate::InlineArray;
+
+/// A consuming iterator over the elements of an `InlineArray`.
+pub struct Iter<A, T> {
+ pub(crate) array: InlineArray<A, T>,
+}
+
+impl<A, T> Iterator for Iter<A, T> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.array.remove(0)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.array.len(), Some(self.array.len()))
+ }
+}
+
+impl<A, T> DoubleEndedIterator for Iter<A, T> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.array.pop()
+ }
+}
+
+impl<A, T> ExactSizeIterator for Iter<A, T> {}
+
+impl<A, T> FusedIterator for Iter<A, T> {}
+
+/// A draining iterator over the elements of an `InlineArray`.
+///
+/// "Draining" means that as the iterator yields each element, it's removed from
+/// the `InlineArray`. When the iterator terminates, the array will be empty.
+/// This is different from the consuming iterator `Iter` in that `Iter` will
+/// take ownership of the `InlineArray` and discard it when you're done
+/// iterating, while `Drain` leaves you still owning the drained `InlineArray`.
+pub struct Drain<'a, A, T> {
+ pub(crate) array: &'a mut InlineArray<A, T>,
+}
+
+impl<'a, A, T> Iterator for Drain<'a, A, T> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.array.remove(0)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.array.len(), Some(self.array.len()))
+ }
+}
+
+impl<'a, A, T> DoubleEndedIterator for Drain<'a, A, T> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.array.pop()
+ }
+}
+
+impl<'a, A, T> ExactSizeIterator for Drain<'a, A, T> {}
+
+impl<'a, A, T> FusedIterator for Drain<'a, A, T> {}
diff --git a/vendor/sized-chunks/src/inline_array/mod.rs b/vendor/sized-chunks/src/inline_array/mod.rs
new file mode 100644
index 000000000..43cd7d935
--- /dev/null
+++ b/vendor/sized-chunks/src/inline_array/mod.rs
@@ -0,0 +1,710 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! A fixed capacity array sized to match some other type `T`.
+//!
+//! See [`InlineArray`](struct.InlineArray.html)
+
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp::Ordering;
+use core::fmt::{Debug, Error, Formatter};
+use core::hash::{Hash, Hasher};
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+use core::slice::{from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut};
+
+mod iter;
+pub use self::iter::{Drain, Iter};
+
+/// A fixed capacity array sized to match some other type `T`.
+///
+/// This works like a vector, but allocated on the stack (and thus marginally
+/// faster than `Vec`), with the allocated space exactly matching the size of
+/// the given type `T`. The vector consists of a `usize` tracking its current
+/// length and zero or more elements of type `A`. The capacity is thus
+/// `( size_of::<T>() - size_of::<usize>() ) / size_of::<A>()`. This could lead
+/// to situations where the capacity is zero, if `size_of::<A>()` is greater
+/// than `size_of::<T>() - size_of::<usize>()`, which is not an error and
+/// handled properly by the data structure.
+///
+/// If `size_of::<T>()` is less than `size_of::<usize>()`, meaning the vector
+/// has no space to store its length, `InlineArray::new()` will panic.
+///
+/// This is meant to facilitate optimisations where a list data structure
+/// allocates a fairly large struct for itself, allowing you to replace it with
+/// an `InlineArray` until it grows beyond its capacity. This not only gives you
+/// a performance boost at very small sizes, it also saves you from having to
+/// allocate anything on the heap until absolutely necessary.
+///
+/// For instance, `im::Vector<A>` in its final form currently looks like this
+/// (approximately):
+///
+/// ```rust, ignore
+/// struct RRB<A> {
+/// length: usize,
+/// tree_height: usize,
+/// outer_head: Rc<Chunk<A>>,
+/// inner_head: Rc<Chunk<A>>,
+/// tree: Rc<TreeNode<A>>,
+/// inner_tail: Rc<Chunk<A>>,
+/// outer_tail: Rc<Chunk<A>>,
+/// }
+/// ```
+///
+/// That's two `usize`s and five `Rc`s, which comes in at 56 bytes on x86_64
+/// architectures. With `InlineArray`, that leaves us with 56 -
+/// `size_of::<usize>()` = 48 bytes we can use before having to expand into the
+/// full data struture. If `A` is `u8`, that's 48 elements, and even if `A` is a
+/// pointer we can still keep 6 of them inline before we run out of capacity.
+///
+/// We can declare an enum like this:
+///
+/// ```rust, ignore
+/// enum VectorWrapper<A> {
+/// Inline(InlineArray<A, RRB<A>>),
+/// Full(RRB<A>),
+/// }
+/// ```
+///
+/// Both of these will have the same size, and we can swap the `Inline` case out
+/// with the `Full` case once the `InlineArray` runs out of capacity.
+#[repr(C)]
+pub struct InlineArray<A, T> {
+ // Alignment tricks
+ //
+ // We need both the usize header and data to be properly aligned in memory. We do a few tricks
+ // to handle that.
+ //
+ // * An alignment is always power of 2. Therefore, with a pair of alignments, one is always
+ // a multiple of the other (one way or the other).
+ // * A struct is aligned to at least the max alignment of each of its fields.
+ // * A repr(C) struct follows the order of fields and pushes each as close to the previous one
+ // as allowed by alignment.
+ //
+ // By placing two "fake" fields that have 0 size, but an alignment first, we make sure that all
+ // 3 start at the beginning of the struct and that all of them are aligned to their maximum
+ // alignment.
+ //
+ // Unfortunately, we can't use `[A; 0]` to align to actual alignment of the type A, because
+ // it prevents use of InlineArray in recursive types.
+ // We rely on alignment of usize or T to be sufficient, and panic otherwise.
+ //
+ // Furthermore, because we don't know if usize or A has bigger alignment, we decide on case by
+ // case basis if the header or the elements go first. By placing the one with higher alignment
+ // requirements first, we align that one and the other one will be aligned "automatically" when
+ // placed just after it.
+ //
+ // To the best of our knowledge, this is all guaranteed by the compiler. But just to make sure,
+ // we have bunch of asserts in the constructor to check; as these are invariants enforced by
+ // the compiler, it should be trivial for it to remove the checks so they are for free (if we
+ // are correct) or will save us (if we are not).
+ _header_align: [usize; 0],
+ _phantom: PhantomData<A>,
+ data: MaybeUninit<T>,
+}
+
+const fn capacity(
+ host_size: usize,
+ header_size: usize,
+ element_size: usize,
+ element_align: usize,
+ container_align: usize,
+) -> usize {
+ if element_size == 0 {
+ usize::MAX
+ } else if element_align <= container_align && host_size > header_size {
+ (host_size - header_size) / element_size
+ } else {
+ 0 // larger alignment can't be guaranteed, so it'd be unsafe to store any elements
+ }
+}
+
+impl<A, T> InlineArray<A, T> {
+ const HOST_SIZE: usize = mem::size_of::<T>();
+ const ELEMENT_SIZE: usize = mem::size_of::<A>();
+ const HEADER_SIZE: usize = mem::size_of::<usize>();
+ // Do we place the header before the elements or the other way around?
+ const HEADER_FIRST: bool = mem::align_of::<usize>() >= mem::align_of::<A>();
+ // Note: one of the following is always 0
+ // How many usizes to skip before the first element?
+ const ELEMENT_SKIP: usize = Self::HEADER_FIRST as usize;
+ // How many elements to skip before the header
+ const HEADER_SKIP: usize = Self::CAPACITY * (1 - Self::ELEMENT_SKIP);
+
+ /// The maximum number of elements the `InlineArray` can hold.
+ pub const CAPACITY: usize = capacity(
+ Self::HOST_SIZE,
+ Self::HEADER_SIZE,
+ Self::ELEMENT_SIZE,
+ mem::align_of::<A>(),
+ mem::align_of::<Self>(),
+ );
+
+ #[inline]
+ #[must_use]
+ unsafe fn len_const(&self) -> *const usize {
+ let ptr = self
+ .data
+ .as_ptr()
+ .cast::<A>()
+ .add(Self::HEADER_SKIP)
+ .cast::<usize>();
+ debug_assert!(ptr as usize % mem::align_of::<usize>() == 0);
+ ptr
+ }
+
+ #[inline]
+ #[must_use]
+ pub(crate) unsafe fn len_mut(&mut self) -> *mut usize {
+ let ptr = self
+ .data
+ .as_mut_ptr()
+ .cast::<A>()
+ .add(Self::HEADER_SKIP)
+ .cast::<usize>();
+ debug_assert!(ptr as usize % mem::align_of::<usize>() == 0);
+ ptr
+ }
+
+ #[inline]
+ #[must_use]
+ pub(crate) unsafe fn data(&self) -> *const A {
+ let ptr = self
+ .data
+ .as_ptr()
+ .cast::<usize>()
+ .add(Self::ELEMENT_SKIP)
+ .cast::<A>();
+ debug_assert!(ptr as usize % mem::align_of::<A>() == 0);
+ ptr
+ }
+
+ #[inline]
+ #[must_use]
+ unsafe fn data_mut(&mut self) -> *mut A {
+ let ptr = self
+ .data
+ .as_mut_ptr()
+ .cast::<usize>()
+ .add(Self::ELEMENT_SKIP)
+ .cast::<A>();
+ debug_assert!(ptr as usize % mem::align_of::<A>() == 0);
+ ptr
+ }
+
+ #[inline]
+ #[must_use]
+ unsafe fn ptr_at(&self, index: usize) -> *const A {
+ self.data().add(index)
+ }
+
+ #[inline]
+ #[must_use]
+ unsafe fn ptr_at_mut(&mut self, index: usize) -> *mut A {
+ self.data_mut().add(index)
+ }
+
+ #[inline]
+ unsafe fn read_at(&self, index: usize) -> A {
+ ptr::read(self.ptr_at(index))
+ }
+
+ #[inline]
+ unsafe fn write_at(&mut self, index: usize, value: A) {
+ ptr::write(self.ptr_at_mut(index), value);
+ }
+
+ /// Get the length of the array.
+ #[inline]
+ #[must_use]
+ pub fn len(&self) -> usize {
+ unsafe { *self.len_const() }
+ }
+
+ /// Test if the array is empty.
+ #[inline]
+ #[must_use]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Test if the array is at capacity.
+ #[inline]
+ #[must_use]
+ pub fn is_full(&self) -> bool {
+ self.len() >= Self::CAPACITY
+ }
+
+ /// Construct a new empty array.
+ ///
+ /// # Panics
+ ///
+ /// If the element type requires large alignment, which is larger than
+ /// both alignment of `usize` and alignment of the type that provides the capacity.
+ #[inline]
+ #[must_use]
+ pub fn new() -> Self {
+ assert!(Self::HOST_SIZE > Self::HEADER_SIZE);
+ assert!(
+ mem::align_of::<Self>() % mem::align_of::<A>() == 0,
+ "InlineArray can't satisfy alignment of {}",
+ core::any::type_name::<A>()
+ );
+
+ let mut self_ = Self {
+ _header_align: [],
+ _phantom: PhantomData,
+ data: MaybeUninit::uninit(),
+ };
+ // Sanity check our assumptions about what is guaranteed by the compiler. If we are right,
+ // these should completely optimize out of the resulting binary.
+ assert_eq!(
+ &self_ as *const _ as usize,
+ self_.data.as_ptr() as usize,
+ "Padding at the start of struct",
+ );
+ assert_eq!(mem::size_of::<Self>(), mem::size_of::<T>());
+ assert_eq!(
+ self_.data.as_ptr() as usize % mem::align_of::<usize>(),
+ 0,
+ "Unaligned header"
+ );
+ assert_eq!(
+ self_.data.as_ptr() as usize % mem::align_of::<A>(),
+ 0,
+ "Unaligned elements"
+ );
+ assert!(Self::ELEMENT_SKIP == 0 || Self::HEADER_SKIP == 0);
+ unsafe { ptr::write(self_.len_mut(), 0usize) };
+ self_
+ }
+
+ /// Push an item to the back of the array.
+ ///
+ /// Panics if the capacity of the array is exceeded.
+ ///
+ /// Time: O(1)
+ pub fn push(&mut self, value: A) {
+ if self.is_full() {
+ panic!("InlineArray::push: chunk size overflow");
+ }
+ unsafe {
+ self.write_at(self.len(), value);
+ *self.len_mut() += 1;
+ }
+ }
+
+ /// Pop an item from the back of the array.
+ ///
+ /// Returns `None` if the array is empty.
+ ///
+ /// Time: O(1)
+ pub fn pop(&mut self) -> Option<A> {
+ if self.is_empty() {
+ None
+ } else {
+ unsafe {
+ *self.len_mut() -= 1;
+ }
+ Some(unsafe { self.read_at(self.len()) })
+ }
+ }
+
+ /// Insert a new value at index `index`, shifting all the following values
+ /// to the right.
+ ///
+ /// Panics if the index is out of bounds or the array is at capacity.
+ ///
+ /// Time: O(n) for the number of items shifted
+ pub fn insert(&mut self, index: usize, value: A) {
+ if self.is_full() {
+ panic!("InlineArray::push: chunk size overflow");
+ }
+ if index > self.len() {
+ panic!("InlineArray::insert: index out of bounds");
+ }
+ unsafe {
+ let src = self.ptr_at_mut(index);
+ ptr::copy(src, src.add(1), self.len() - index);
+ ptr::write(src, value);
+ *self.len_mut() += 1;
+ }
+ }
+
+ /// Remove the value at index `index`, shifting all the following values to
+ /// the left.
+ ///
+ /// Returns the removed value, or `None` if the array is empty or the index
+ /// is out of bounds.
+ ///
+ /// Time: O(n) for the number of items shifted
+ pub fn remove(&mut self, index: usize) -> Option<A> {
+ if index >= self.len() {
+ None
+ } else {
+ unsafe {
+ let src = self.ptr_at_mut(index);
+ let value = ptr::read(src);
+ *self.len_mut() -= 1;
+ ptr::copy(src.add(1), src, self.len() - index);
+ Some(value)
+ }
+ }
+ }
+
+ /// Split an array into two, the original array containing
+ /// everything up to `index` and the returned array containing
+ /// everything from `index` onwards.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items in the new chunk
+ pub fn split_off(&mut self, index: usize) -> Self {
+ if index > self.len() {
+ panic!("InlineArray::split_off: index out of bounds");
+ }
+ let mut out = Self::new();
+ if index < self.len() {
+ unsafe {
+ ptr::copy(self.ptr_at(index), out.data_mut(), self.len() - index);
+ *out.len_mut() = self.len() - index;
+ *self.len_mut() = index;
+ }
+ }
+ out
+ }
+
+ #[inline]
+ unsafe fn drop_contents(&mut self) {
+ ptr::drop_in_place::<[A]>(&mut **self) // uses DerefMut
+ }
+
+ /// Discard the contents of the array.
+ ///
+ /// Time: O(n)
+ pub fn clear(&mut self) {
+ unsafe {
+ self.drop_contents();
+ *self.len_mut() = 0;
+ }
+ }
+
+ /// Construct an iterator that drains values from the front of the array.
+ pub fn drain(&mut self) -> Drain<'_, A, T> {
+ Drain { array: self }
+ }
+}
+
+impl<A, T> Drop for InlineArray<A, T> {
+ fn drop(&mut self) {
+ unsafe { self.drop_contents() }
+ }
+}
+
+impl<A, T> Default for InlineArray<A, T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// WANT:
+// impl<A, T> Copy for InlineArray<A, T> where A: Copy {}
+
+impl<A, T> Clone for InlineArray<A, T>
+where
+ A: Clone,
+{
+ fn clone(&self) -> Self {
+ let mut copy = Self::new();
+ for i in 0..self.len() {
+ unsafe {
+ copy.write_at(i, self.get_unchecked(i).clone());
+ }
+ }
+ unsafe {
+ *copy.len_mut() = self.len();
+ }
+ copy
+ }
+}
+
+impl<A, T> Deref for InlineArray<A, T> {
+ type Target = [A];
+ fn deref(&self) -> &Self::Target {
+ unsafe { from_raw_parts(self.data(), self.len()) }
+ }
+}
+
+impl<A, T> DerefMut for InlineArray<A, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { from_raw_parts_mut(self.data_mut(), self.len()) }
+ }
+}
+
+impl<A, T> Borrow<[A]> for InlineArray<A, T> {
+ fn borrow(&self) -> &[A] {
+ self.deref()
+ }
+}
+
+impl<A, T> BorrowMut<[A]> for InlineArray<A, T> {
+ fn borrow_mut(&mut self) -> &mut [A] {
+ self.deref_mut()
+ }
+}
+
+impl<A, T> AsRef<[A]> for InlineArray<A, T> {
+ fn as_ref(&self) -> &[A] {
+ self.deref()
+ }
+}
+
+impl<A, T> AsMut<[A]> for InlineArray<A, T> {
+ fn as_mut(&mut self) -> &mut [A] {
+ self.deref_mut()
+ }
+}
+impl<A, T, Slice> PartialEq<Slice> for InlineArray<A, T>
+where
+ Slice: Borrow<[A]>,
+ A: PartialEq,
+{
+ fn eq(&self, other: &Slice) -> bool {
+ self.deref() == other.borrow()
+ }
+}
+
+impl<A, T> Eq for InlineArray<A, T> where A: Eq {}
+
+impl<A, T> PartialOrd for InlineArray<A, T>
+where
+ A: PartialOrd,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+impl<A, T> Ord for InlineArray<A, T>
+where
+ A: Ord,
+{
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+impl<A, T> Debug for InlineArray<A, T>
+where
+ A: Debug,
+{
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("Chunk")?;
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<A, T> Hash for InlineArray<A, T>
+where
+ A: Hash,
+{
+ fn hash<H>(&self, hasher: &mut H)
+ where
+ H: Hasher,
+ {
+ for item in self {
+ item.hash(hasher)
+ }
+ }
+}
+
+impl<A, T> IntoIterator for InlineArray<A, T> {
+ type Item = A;
+ type IntoIter = Iter<A, T>;
+ fn into_iter(self) -> Self::IntoIter {
+ Iter { array: self }
+ }
+}
+
+impl<A, T> FromIterator<A> for InlineArray<A, T> {
+ fn from_iter<I>(it: I) -> Self
+ where
+ I: IntoIterator<Item = A>,
+ {
+ let mut chunk = Self::new();
+ for item in it {
+ chunk.push(item);
+ }
+ chunk
+ }
+}
+
+impl<'a, A, T> IntoIterator for &'a InlineArray<A, T> {
+ type Item = &'a A;
+ type IntoIter = SliceIter<'a, A>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, A, T> IntoIterator for &'a mut InlineArray<A, T> {
+ type Item = &'a mut A;
+ type IntoIter = SliceIterMut<'a, A>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<A, T> Extend<A> for InlineArray<A, T> {
+ /// Append the contents of the iterator to the back of the array.
+ ///
+ /// Panics if the array exceeds its capacity.
+ ///
+ /// Time: O(n) for the length of the iterator
+ fn extend<I>(&mut self, it: I)
+ where
+ I: IntoIterator<Item = A>,
+ {
+ for item in it {
+ self.push(item);
+ }
+ }
+}
+
+impl<'a, A, T> Extend<&'a A> for InlineArray<A, T>
+where
+ A: 'a + Copy,
+{
+ /// Append the contents of the iterator to the back of the array.
+ ///
+ /// Panics if the array exceeds its capacity.
+ ///
+ /// Time: O(n) for the length of the iterator
+ fn extend<I>(&mut self, it: I)
+ where
+ I: IntoIterator<Item = &'a A>,
+ {
+ for item in it {
+ self.push(*item);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::tests::DropTest;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[test]
+ fn dropping() {
+ let counter = AtomicUsize::new(0);
+ {
+ let mut chunk: InlineArray<DropTest<'_>, [usize; 32]> = InlineArray::new();
+ for _i in 0..16 {
+ chunk.push(DropTest::new(&counter));
+ }
+ assert_eq!(16, counter.load(Ordering::Relaxed));
+ for _i in 0..8 {
+ chunk.pop();
+ }
+ assert_eq!(8, counter.load(Ordering::Relaxed));
+ }
+ assert_eq!(0, counter.load(Ordering::Relaxed));
+ }
+
+ #[test]
+ fn zero_sized_values() {
+ let mut chunk: InlineArray<(), [usize; 32]> = InlineArray::new();
+ for _i in 0..65536 {
+ chunk.push(());
+ }
+ assert_eq!(65536, chunk.len());
+ assert_eq!(Some(()), chunk.pop());
+ }
+
+ #[test]
+ fn low_align_base() {
+ let mut chunk: InlineArray<String, [u8; 512]> = InlineArray::new();
+ chunk.push("Hello".to_owned());
+ assert_eq!(chunk[0], "Hello");
+
+ let mut chunk: InlineArray<String, [u16; 512]> = InlineArray::new();
+ chunk.push("Hello".to_owned());
+ assert_eq!(chunk[0], "Hello");
+ }
+
+ #[test]
+ fn recursive_types_compile() {
+ #[allow(dead_code)]
+ enum Recursive {
+ A(InlineArray<Recursive, u64>),
+ B,
+ }
+ }
+
+ #[test]
+ fn insufficient_alignment1() {
+ #[repr(align(256))]
+ struct BigAlign(u8);
+ #[repr(align(32))]
+ struct MediumAlign(u8);
+
+ assert_eq!(0, InlineArray::<BigAlign, [usize; 256]>::CAPACITY);
+ assert_eq!(0, InlineArray::<BigAlign, [u64; 256]>::CAPACITY);
+ assert_eq!(0, InlineArray::<BigAlign, [f64; 256]>::CAPACITY);
+ assert_eq!(0, InlineArray::<BigAlign, [MediumAlign; 256]>::CAPACITY);
+ }
+
+ #[test]
+ #[should_panic(
+ expected = "InlineArray can't satisfy alignment of sized_chunks::inline_array::test::insufficient_alignment2::BigAlign"
+ )]
+ fn insufficient_alignment2() {
+ #[repr(align(256))]
+ struct BigAlign(usize);
+
+ let _: InlineArray<BigAlign, [usize; 256]> = InlineArray::new();
+ }
+
+ #[test]
+ fn sufficient_alignment1() {
+ #[repr(align(256))]
+ struct BigAlign(u8);
+
+ assert_eq!(13, InlineArray::<BigAlign, [BigAlign; 14]>::CAPACITY);
+ assert_eq!(1, InlineArray::<BigAlign, [BigAlign; 2]>::CAPACITY);
+ assert_eq!(0, InlineArray::<BigAlign, [BigAlign; 1]>::CAPACITY);
+
+ let mut chunk: InlineArray<BigAlign, [BigAlign; 2]> = InlineArray::new();
+ chunk.push(BigAlign(42));
+ assert_eq!(
+ chunk.get(0).unwrap() as *const _ as usize % mem::align_of::<BigAlign>(),
+ 0
+ );
+ }
+
+ #[test]
+ fn sufficient_alignment2() {
+ #[repr(align(128))]
+ struct BigAlign([u8; 64]);
+ #[repr(align(256))]
+ struct BiggerAlign(u8);
+
+ assert_eq!(199, InlineArray::<BigAlign, [BiggerAlign; 100]>::CAPACITY);
+ assert_eq!(3, InlineArray::<BigAlign, [BiggerAlign; 2]>::CAPACITY);
+ assert_eq!(1, InlineArray::<BigAlign, [BiggerAlign; 1]>::CAPACITY);
+ assert_eq!(0, InlineArray::<BigAlign, [BiggerAlign; 0]>::CAPACITY);
+
+ let mut chunk: InlineArray<BigAlign, [BiggerAlign; 1]> = InlineArray::new();
+ chunk.push(BigAlign([0; 64]));
+ assert_eq!(
+ chunk.get(0).unwrap() as *const _ as usize % mem::align_of::<BigAlign>(),
+ 0
+ );
+ }
+}
diff --git a/vendor/sized-chunks/src/lib.rs b/vendor/sized-chunks/src/lib.rs
new file mode 100644
index 000000000..c011bea7a
--- /dev/null
+++ b/vendor/sized-chunks/src/lib.rs
@@ -0,0 +1,126 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! # Sized Chunks
+//!
+//! This crate contains three fixed size low level array like data structures,
+//! primarily intended for use in [immutable.rs], but fully supported as a
+//! standalone crate.
+//!
+//! Their sizing information is encoded in the type using the
+//! [`typenum`][typenum] crate, which you may want to take a look at before
+//! reading on, but usually all you need to know about it is that it provides
+//! types `U1` to `U128` to represent numbers, which the data types take as type
+//! parameters, eg. `SparseChunk<A, U32>` would give you a sparse array with
+//! room for 32 elements of type `A`. You can also omit the size, as they all
+//! default to a size of 64, so `SparseChunk<A>` would be a sparse array with a
+//! capacity of 64.
+//!
+//! All data structures always allocate the same amount of space, as determined
+//! by their capacity, regardless of how many elements they contain, and when
+//! they run out of space, they will panic.
+//!
+//! ## Data Structures
+//!
+//! | Type | Description | Push | Pop | Deref to `&[A]` |
+//! | ---- | ----------- | ---- | --- | --------------- |
+//! | [`Chunk`][Chunk] | Contiguous array | O(1)/O(n) | O(1) | Yes |
+//! | [`RingBuffer`][RingBuffer] | Non-contiguous array | O(1) | O(1) | No |
+//! | [`SparseChunk`][SparseChunk] | Sparse array | N/A | N/A | No |
+//!
+//! The [`Chunk`][Chunk] and [`RingBuffer`][RingBuffer] are very similar in
+//! practice, in that they both work like a plain array, except that you can
+//! push to either end with some expectation of performance. The difference is
+//! that [`RingBuffer`][RingBuffer] always allows you to do this in constant
+//! time, but in order to give that guarantee, it doesn't lay out its elements
+//! contiguously in memory, which means that you can't dereference it to a slice
+//! `&[A]`.
+//!
+//! [`Chunk`][Chunk], on the other hand, will shift its contents around when
+//! necessary to accommodate a push to a full side, but is able to guarantee a
+//! contiguous memory layout in this way, so it can always be dereferenced into
+//! a slice. Performance wise, repeated pushes to the same side will always run
+//! in constant time, but a push to one side followed by a push to the other
+//! side will cause the latter to run in linear time if there's no room (which
+//! there would only be if you've popped from that side).
+//!
+//! To choose between them, you can use the following rules:
+//! - I only ever want to push to the back: you don't need this crate, try
+//! [`ArrayVec`][ArrayVec].
+//! - I need to push to either side but probably not both on the same array: use
+//! [`Chunk`][Chunk].
+//! - I need to push to both sides and I don't need slices: use
+//! [`RingBuffer`][RingBuffer].
+//! - I need to push to both sides but I do need slices: use [`Chunk`][Chunk].
+//!
+//! Finally, [`SparseChunk`][SparseChunk] is a more efficient version of
+//! `Vec<Option<A>>`: each index is either inhabited or not, but instead of
+//! using the `Option` discriminant to decide which is which, it uses a compact
+//! bitmap. You can also think of `SparseChunk<A, N>` as a `BTreeMap<usize, A>`
+//! where the `usize` must be less than `N`, but without the performance
+//! overhead. Its API is also more consistent with a map than an array - there's
+//! no push, pop, append, etc, just insert, remove and lookup.
+//!
+//! # [`InlineArray`][InlineArray]
+//!
+//! Finally, there's [`InlineArray`][InlineArray], which is a simple vector that's
+//! sized to fit inside any `Sized` type that's big enough to hold a size counter
+//! and at least one instance of the array element type. This can be a useful
+//! optimisation when implementing a list like data structure with a nontrivial
+//! set of pointers in its full form, where you could plausibly fit several
+//! elements inside the space allocated for the pointers. `im::Vector` is a
+//! good example of that, and the use case for which [`InlineArray`][InlineArray]
+//! was implemented.
+//!
+//! # Feature Flags
+//!
+//! The following feature flags are available:
+//!
+//! | Feature | Description |
+//! | ------- | ----------- |
+//! | `arbitrary` | Provides [`Arbitrary`][Arbitrary] implementations from the [`arbitrary`][arbitrary_crate] crate. Requires the `std` flag. |
+//! | `refpool` | Provides [`PoolDefault`][PoolDefault] and [`PoolClone`][PoolClone] implemetations from the [`refpool`][refpool] crate. |
+//! | `ringbuffer` | Enables the [`RingBuffer`][RingBuffer] data structure. |
+//! | `std` | Without this flag (enabled by default), the crate will be `no_std`, and absent traits relating to `std::collections` and `std::io`. |
+//!
+//! [immutable.rs]: https://immutable.rs/
+//! [typenum]: https://docs.rs/typenum/
+//! [Chunk]: struct.Chunk.html
+//! [RingBuffer]: struct.RingBuffer.html
+//! [SparseChunk]: struct.SparseChunk.html
+//! [InlineArray]: struct.InlineArray.html
+//! [ArrayVec]: https://docs.rs/arrayvec/
+//! [Arbitrary]: https://docs.rs/arbitrary/latest/arbitrary/trait.Arbitrary.html
+//! [arbitrary_crate]: https://docs.rs/arbitrary
+//! [refpool]: https://docs.rs/refpool
+//! [PoolDefault]: https://docs.rs/refpool/latest/refpool/trait.PoolDefault.html
+//! [PoolClone]: https://docs.rs/refpool/latest/refpool/trait.PoolClone.html
+
+#![forbid(rust_2018_idioms)]
+#![deny(nonstandard_style)]
+#![warn(unreachable_pub, missing_docs)]
+#![cfg_attr(test, deny(warnings))]
+#![cfg_attr(not(any(feature = "std", test)), no_std)]
+// Jeremy Francis Corbyn, clippy devs need to calm down 🤦‍♀️
+#![allow(clippy::suspicious_op_assign_impl, clippy::suspicious_arithmetic_impl)]
+
+pub mod inline_array;
+pub mod sized_chunk;
+pub mod sparse_chunk;
+pub mod types;
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(feature = "arbitrary")]
+mod arbitrary;
+
+pub use crate::inline_array::InlineArray;
+pub use crate::sized_chunk::Chunk;
+pub use crate::sparse_chunk::SparseChunk;
+
+#[cfg(feature = "ringbuffer")]
+pub mod ring_buffer;
+#[cfg(feature = "ringbuffer")]
+pub use crate::ring_buffer::RingBuffer;
diff --git a/vendor/sized-chunks/src/ring_buffer/index.rs b/vendor/sized-chunks/src/ring_buffer/index.rs
new file mode 100644
index 000000000..78009a214
--- /dev/null
+++ b/vendor/sized-chunks/src/ring_buffer/index.rs
@@ -0,0 +1,178 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+use core::iter::FusedIterator;
+use core::marker::PhantomData;
+use core::ops::{Add, AddAssign, Sub, SubAssign};
+
+use typenum::Unsigned;
+
+pub(crate) struct RawIndex<N: Unsigned>(usize, PhantomData<N>);
+
+impl<N: Unsigned> Clone for RawIndex<N> {
+ #[inline]
+ #[must_use]
+ fn clone(&self) -> Self {
+ self.0.into()
+ }
+}
+
+impl<N> Copy for RawIndex<N> where N: Unsigned {}
+
+impl<N: Unsigned> RawIndex<N> {
+ #[inline]
+ #[must_use]
+ pub(crate) fn to_usize(self) -> usize {
+ self.0
+ }
+
+ /// Increments the index and returns a copy of the index /before/ incrementing.
+ #[inline]
+ #[must_use]
+ pub(crate) fn inc(&mut self) -> Self {
+ let old = *self;
+ self.0 = if self.0 == N::USIZE - 1 {
+ 0
+ } else {
+ self.0 + 1
+ };
+ old
+ }
+
+ /// Decrements the index and returns a copy of the new value.
+ #[inline]
+ #[must_use]
+ pub(crate) fn dec(&mut self) -> Self {
+ self.0 = if self.0 == 0 {
+ N::USIZE - 1
+ } else {
+ self.0 - 1
+ };
+ *self
+ }
+}
+
+impl<N: Unsigned> From<usize> for RawIndex<N> {
+ #[inline]
+ #[must_use]
+ fn from(index: usize) -> Self {
+ debug_assert!(index < N::USIZE);
+ RawIndex(index, PhantomData)
+ }
+}
+
+impl<N: Unsigned> PartialEq for RawIndex<N> {
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+impl<N: Unsigned> Eq for RawIndex<N> {}
+
+impl<N: Unsigned> Add for RawIndex<N> {
+ type Output = RawIndex<N>;
+ #[inline]
+ #[must_use]
+ fn add(self, other: Self) -> Self::Output {
+ self + other.0
+ }
+}
+
+impl<N: Unsigned> Add<usize> for RawIndex<N> {
+ type Output = RawIndex<N>;
+ #[inline]
+ #[must_use]
+ fn add(self, other: usize) -> Self::Output {
+ let mut result = self.0 + other;
+ while result >= N::USIZE {
+ result -= N::USIZE;
+ }
+ result.into()
+ }
+}
+
+impl<N: Unsigned> AddAssign<usize> for RawIndex<N> {
+ #[inline]
+ fn add_assign(&mut self, other: usize) {
+ self.0 += other;
+ while self.0 >= N::USIZE {
+ self.0 -= N::USIZE;
+ }
+ }
+}
+
+impl<N: Unsigned> Sub for RawIndex<N> {
+ type Output = RawIndex<N>;
+ #[inline]
+ #[must_use]
+ fn sub(self, other: Self) -> Self::Output {
+ self - other.0
+ }
+}
+
+impl<N: Unsigned> Sub<usize> for RawIndex<N> {
+ type Output = RawIndex<N>;
+ #[inline]
+ #[must_use]
+ fn sub(self, other: usize) -> Self::Output {
+ let mut start = self.0;
+ while other > start {
+ start += N::USIZE;
+ }
+ (start - other).into()
+ }
+}
+
+impl<N: Unsigned> SubAssign<usize> for RawIndex<N> {
+ #[inline]
+ fn sub_assign(&mut self, other: usize) {
+ while other > self.0 {
+ self.0 += N::USIZE;
+ }
+ self.0 -= other;
+ }
+}
+
+pub(crate) struct IndexIter<N: Unsigned> {
+ pub(crate) remaining: usize,
+ pub(crate) left_index: RawIndex<N>,
+ pub(crate) right_index: RawIndex<N>,
+}
+
+impl<N: Unsigned> Iterator for IndexIter<N> {
+ type Item = RawIndex<N>;
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.remaining > 0 {
+ self.remaining -= 1;
+ Some(self.left_index.inc())
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ #[must_use]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.remaining, Some(self.remaining))
+ }
+}
+
+impl<N: Unsigned> DoubleEndedIterator for IndexIter<N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.remaining > 0 {
+ self.remaining -= 1;
+ Some(self.right_index.dec())
+ } else {
+ None
+ }
+ }
+}
+
+impl<N: Unsigned> ExactSizeIterator for IndexIter<N> {}
+
+impl<N: Unsigned> FusedIterator for IndexIter<N> {}
diff --git a/vendor/sized-chunks/src/ring_buffer/iter.rs b/vendor/sized-chunks/src/ring_buffer/iter.rs
new file mode 100644
index 000000000..aa8d06b64
--- /dev/null
+++ b/vendor/sized-chunks/src/ring_buffer/iter.rs
@@ -0,0 +1,218 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+use core::iter::FusedIterator;
+use core::marker::PhantomData;
+
+use crate::types::ChunkLength;
+
+use super::{index::RawIndex, RingBuffer};
+use array_ops::HasLength;
+
+/// A reference iterator over a `RingBuffer`.
+pub struct Iter<'a, A, N>
+where
+ N: ChunkLength<A>,
+{
+ pub(crate) buffer: &'a RingBuffer<A, N>,
+ pub(crate) left_index: RawIndex<N>,
+ pub(crate) right_index: RawIndex<N>,
+ pub(crate) remaining: usize,
+}
+
+impl<'a, A, N> Iterator for Iter<'a, A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Item = &'a A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ None
+ } else {
+ self.remaining -= 1;
+ Some(unsafe { &*self.buffer.ptr(self.left_index.inc()) })
+ }
+ }
+
+ #[inline]
+ #[must_use]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.remaining, Some(self.remaining))
+ }
+}
+
+impl<'a, A, N> DoubleEndedIterator for Iter<'a, A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ None
+ } else {
+ self.remaining -= 1;
+ Some(unsafe { &*self.buffer.ptr(self.right_index.dec()) })
+ }
+ }
+}
+
+impl<'a, A, N> ExactSizeIterator for Iter<'a, A, N> where N: ChunkLength<A> {}
+
+impl<'a, A, N> FusedIterator for Iter<'a, A, N> where N: ChunkLength<A> {}
+
+/// A mutable reference iterator over a `RingBuffer`.
+pub struct IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+{
+ data: *mut A,
+ left_index: RawIndex<N>,
+ right_index: RawIndex<N>,
+ remaining: usize,
+ phantom: PhantomData<&'a ()>,
+}
+
+impl<'a, A, N> IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+ A: 'a,
+{
+ pub(crate) fn new(buffer: &mut RingBuffer<A, N>) -> Self {
+ Self::new_slice(buffer, buffer.origin, buffer.len())
+ }
+
+ pub(crate) fn new_slice(
+ buffer: &mut RingBuffer<A, N>,
+ origin: RawIndex<N>,
+ len: usize,
+ ) -> Self {
+ Self {
+ left_index: origin,
+ right_index: origin + len,
+ remaining: len,
+ phantom: PhantomData,
+ data: buffer.data.as_mut_ptr().cast(),
+ }
+ }
+
+ unsafe fn mut_ptr(&mut self, index: RawIndex<N>) -> *mut A {
+ self.data.add(index.to_usize())
+ }
+}
+
+impl<'a, A, N> Iterator for IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+ A: 'a,
+{
+ type Item = &'a mut A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ None
+ } else {
+ self.remaining -= 1;
+ let index = self.left_index.inc();
+ Some(unsafe { &mut *self.mut_ptr(index) })
+ }
+ }
+
+ #[inline]
+ #[must_use]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.remaining, Some(self.remaining))
+ }
+}
+
+impl<'a, A, N> DoubleEndedIterator for IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+ A: 'a,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.remaining == 0 {
+ None
+ } else {
+ self.remaining -= 1;
+ let index = self.right_index.dec();
+ Some(unsafe { &mut *self.mut_ptr(index) })
+ }
+ }
+}
+
+impl<'a, A, N> ExactSizeIterator for IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+ A: 'a,
+{
+}
+
+impl<'a, A, N> FusedIterator for IterMut<'a, A, N>
+where
+ N: ChunkLength<A>,
+ A: 'a,
+{
+}
+
+/// A draining iterator over a `RingBuffer`.
+pub struct Drain<'a, A, N: ChunkLength<A>> {
+ pub(crate) buffer: &'a mut RingBuffer<A, N>,
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Iterator for Drain<'a, A, N> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.buffer.pop_front()
+ }
+
+ #[inline]
+ #[must_use]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.buffer.len(), Some(self.buffer.len()))
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> DoubleEndedIterator for Drain<'a, A, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.buffer.pop_back()
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> ExactSizeIterator for Drain<'a, A, N> {}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> FusedIterator for Drain<'a, A, N> {}
+
+/// A consuming iterator over a `RingBuffer`.
+pub struct OwnedIter<A, N: ChunkLength<A>> {
+ pub(crate) buffer: RingBuffer<A, N>,
+}
+
+impl<A, N: ChunkLength<A>> Iterator for OwnedIter<A, N> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.buffer.pop_front()
+ }
+
+ #[inline]
+ #[must_use]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.buffer.len(), Some(self.buffer.len()))
+ }
+}
+
+impl<A, N: ChunkLength<A>> DoubleEndedIterator for OwnedIter<A, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.buffer.pop_back()
+ }
+}
+
+impl<A, N: ChunkLength<A>> ExactSizeIterator for OwnedIter<A, N> {}
+
+impl<A, N: ChunkLength<A>> FusedIterator for OwnedIter<A, N> {}
diff --git a/vendor/sized-chunks/src/ring_buffer/mod.rs b/vendor/sized-chunks/src/ring_buffer/mod.rs
new file mode 100644
index 000000000..76e0aaa8f
--- /dev/null
+++ b/vendor/sized-chunks/src/ring_buffer/mod.rs
@@ -0,0 +1,1156 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! A fixed capacity ring buffer.
+//!
+//! See [`RingBuffer`](struct.RingBuffer.html)
+
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::fmt::{Debug, Error, Formatter};
+use core::hash::{Hash, Hasher};
+use core::iter::FromIterator;
+use core::mem::{replace, MaybeUninit};
+use core::ops::{Bound, Range, RangeBounds};
+use core::ops::{Index, IndexMut};
+
+use typenum::U64;
+
+pub use array_ops::{Array, ArrayMut, HasLength};
+
+use crate::types::ChunkLength;
+
+mod index;
+use index::{IndexIter, RawIndex};
+
+mod iter;
+pub use iter::{Drain, Iter, IterMut, OwnedIter};
+
+mod slice;
+pub use slice::{Slice, SliceMut};
+
+#[cfg(feature = "refpool")]
+mod refpool;
+
+/// A fixed capacity ring buffer.
+///
+/// A ring buffer is an array where the first logical index is at some arbitrary
+/// location inside the array, and the indices wrap around to the start of the
+/// array once they overflow its bounds.
+///
+/// This gives us the ability to push to either the front or the end of the
+/// array in constant time, at the cost of losing the ability to get a single
+/// contiguous slice reference to the contents.
+///
+/// It differs from the [`Chunk`][Chunk] in that the latter will have mostly
+/// constant time pushes, but may occasionally need to shift its contents around
+/// to make room. They both have constant time pop, and they both have linear
+/// time insert and remove.
+///
+/// The `RingBuffer` offers its own [`Slice`][Slice] and [`SliceMut`][SliceMut]
+/// types to compensate for the loss of being able to take a slice, but they're
+/// somewhat less efficient, so the general rule should be that you shouldn't
+/// choose a `RingBuffer` if you rely heavily on slices - but if you don't,
+/// it's probably a marginally better choice overall than [`Chunk`][Chunk].
+///
+/// # Feature Flag
+///
+/// To use this data structure, you need to enable the `ringbuffer` feature.
+///
+/// [Chunk]: ../sized_chunk/struct.Chunk.html
+/// [Slice]: struct.Slice.html
+/// [SliceMut]: struct.SliceMut.html
+pub struct RingBuffer<A, N = U64>
+where
+ N: ChunkLength<A>,
+{
+ origin: RawIndex<N>,
+ length: usize,
+ data: MaybeUninit<N::SizedType>,
+}
+
+impl<A, N: ChunkLength<A>> Drop for RingBuffer<A, N> {
+ #[inline]
+ fn drop(&mut self) {
+ if core::mem::needs_drop::<A>() {
+ for i in self.range() {
+ unsafe { self.force_drop(i) }
+ }
+ }
+ }
+}
+
+impl<A, N> HasLength for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// Get the length of the ring buffer.
+ #[inline]
+ #[must_use]
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+impl<A, N> Array for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// Get a reference to the value at a given index.
+ #[must_use]
+ fn get(&self, index: usize) -> Option<&A> {
+ if index >= self.len() {
+ None
+ } else {
+ Some(unsafe { self.get_unchecked(index) })
+ }
+ }
+}
+
+impl<A, N> ArrayMut for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// Get a mutable reference to the value at a given index.
+ #[must_use]
+ fn get_mut(&mut self, index: usize) -> Option<&mut A> {
+ if index >= self.len() {
+ None
+ } else {
+ Some(unsafe { self.get_unchecked_mut(index) })
+ }
+ }
+}
+
+impl<A, N> RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// The capacity of this ring buffer, as a `usize`.
+ pub const CAPACITY: usize = N::USIZE;
+
+ /// Get the raw index for a logical index.
+ #[inline]
+ fn raw(&self, index: usize) -> RawIndex<N> {
+ self.origin + index
+ }
+
+ #[inline]
+ unsafe fn ptr(&self, index: RawIndex<N>) -> *const A {
+ debug_assert!(index.to_usize() < Self::CAPACITY);
+ (&self.data as *const _ as *const A).add(index.to_usize())
+ }
+
+ #[inline]
+ unsafe fn mut_ptr(&mut self, index: RawIndex<N>) -> *mut A {
+ debug_assert!(index.to_usize() < Self::CAPACITY);
+ (&mut self.data as *mut _ as *mut A).add(index.to_usize())
+ }
+
+ /// Drop the value at a raw index.
+ #[inline]
+ unsafe fn force_drop(&mut self, index: RawIndex<N>) {
+ core::ptr::drop_in_place(self.mut_ptr(index))
+ }
+
+ /// Copy the value at a raw index, discarding ownership of the copied value
+ #[inline]
+ unsafe fn force_read(&self, index: RawIndex<N>) -> A {
+ core::ptr::read(self.ptr(index))
+ }
+
+ /// Write a value at a raw index without trying to drop what's already there
+ #[inline]
+ unsafe fn force_write(&mut self, index: RawIndex<N>, value: A) {
+ core::ptr::write(self.mut_ptr(index), value)
+ }
+
+ /// Copy a range of raw indices from another buffer.
+ unsafe fn copy_from(
+ &mut self,
+ source: &mut Self,
+ from: RawIndex<N>,
+ to: RawIndex<N>,
+ count: usize,
+ ) {
+ #[inline]
+ unsafe fn force_copy_to<A, N: ChunkLength<A>>(
+ source: &mut RingBuffer<A, N>,
+ from: RawIndex<N>,
+ target: &mut RingBuffer<A, N>,
+ to: RawIndex<N>,
+ count: usize,
+ ) {
+ if count > 0 {
+ debug_assert!(from.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
+ debug_assert!(to.to_usize() + count <= RingBuffer::<A, N>::CAPACITY);
+ core::ptr::copy_nonoverlapping(source.mut_ptr(from), target.mut_ptr(to), count)
+ }
+ }
+
+ if from.to_usize() + count > Self::CAPACITY {
+ let first_length = Self::CAPACITY - from.to_usize();
+ let last_length = count - first_length;
+ self.copy_from(source, from, to, first_length);
+ self.copy_from(source, 0.into(), to + first_length, last_length);
+ } else if to.to_usize() + count > Self::CAPACITY {
+ let first_length = Self::CAPACITY - to.to_usize();
+ let last_length = count - first_length;
+ force_copy_to(source, from, self, to, first_length);
+ force_copy_to(source, from + first_length, self, 0.into(), last_length);
+ } else {
+ force_copy_to(source, from, self, to, count);
+ }
+ }
+
+ /// Copy values from a slice.
+ #[allow(dead_code)]
+ unsafe fn copy_from_slice(&mut self, source: &[A], to: RawIndex<N>) {
+ let count = source.len();
+ debug_assert!(to.to_usize() + count <= Self::CAPACITY);
+ if to.to_usize() + count > Self::CAPACITY {
+ let first_length = Self::CAPACITY - to.to_usize();
+ let first_slice = &source[..first_length];
+ let last_slice = &source[first_length..];
+ core::ptr::copy_nonoverlapping(
+ first_slice.as_ptr(),
+ self.mut_ptr(to),
+ first_slice.len(),
+ );
+ core::ptr::copy_nonoverlapping(
+ last_slice.as_ptr(),
+ self.mut_ptr(0.into()),
+ last_slice.len(),
+ );
+ } else {
+ core::ptr::copy_nonoverlapping(source.as_ptr(), self.mut_ptr(to), count)
+ }
+ }
+
+ /// Get an iterator over the raw indices of the buffer from left to right.
+ #[inline]
+ fn range(&self) -> IndexIter<N> {
+ IndexIter {
+ remaining: self.len(),
+ left_index: self.origin,
+ right_index: self.origin + self.len(),
+ }
+ }
+
+ /// Construct an empty ring buffer.
+ #[inline]
+ #[must_use]
+ pub fn new() -> Self {
+ Self {
+ origin: 0.into(),
+ length: 0,
+ data: MaybeUninit::uninit(),
+ }
+ }
+
+ /// Construct a ring buffer with a single item.
+ #[inline]
+ #[must_use]
+ pub fn unit(value: A) -> Self {
+ assert!(Self::CAPACITY >= 1);
+ let mut buffer = Self {
+ origin: 0.into(),
+ length: 1,
+ data: MaybeUninit::uninit(),
+ };
+ unsafe {
+ buffer.force_write(0.into(), value);
+ }
+ buffer
+ }
+
+ /// Construct a ring buffer with two items.
+ #[inline]
+ #[must_use]
+ pub fn pair(value1: A, value2: A) -> Self {
+ assert!(Self::CAPACITY >= 2);
+ let mut buffer = Self {
+ origin: 0.into(),
+ length: 2,
+ data: MaybeUninit::uninit(),
+ };
+ unsafe {
+ buffer.force_write(0.into(), value1);
+ buffer.force_write(1.into(), value2);
+ }
+ buffer
+ }
+
+ /// Construct a new ring buffer and move every item from `other` into the
+ /// new buffer.
+ ///
+ /// Time: O(n)
+ #[inline]
+ #[must_use]
+ pub fn drain_from(other: &mut Self) -> Self {
+ Self::from_front(other, other.len())
+ }
+
+ /// Construct a new ring buffer and populate it by taking `count` items from
+ /// the iterator `iter`.
+ ///
+ /// Panics if the iterator contains less than `count` items.
+ ///
+ /// Time: O(n)
+ #[must_use]
+ pub fn collect_from<I>(iter: &mut I, count: usize) -> Self
+ where
+ I: Iterator<Item = A>,
+ {
+ let buffer = Self::from_iter(iter.take(count));
+ if buffer.len() < count {
+ panic!("RingBuffer::collect_from: underfull iterator");
+ }
+ buffer
+ }
+
+ /// Construct a new ring buffer and populate it by taking `count` items from
+ /// the front of `other`.
+ ///
+ /// Time: O(n) for the number of items moved
+ #[must_use]
+ pub fn from_front(other: &mut Self, count: usize) -> Self {
+ let mut buffer = Self::new();
+ buffer.drain_from_front(other, count);
+ buffer
+ }
+
+ /// Construct a new ring buffer and populate it by taking `count` items from
+ /// the back of `other`.
+ ///
+ /// Time: O(n) for the number of items moved
+ #[must_use]
+ pub fn from_back(other: &mut Self, count: usize) -> Self {
+ let mut buffer = Self::new();
+ buffer.drain_from_back(other, count);
+ buffer
+ }
+
+ /// Test if the ring buffer is full.
+ #[inline]
+ #[must_use]
+ pub fn is_full(&self) -> bool {
+ self.len() == Self::CAPACITY
+ }
+
+ /// Get an iterator over references to the items in the ring buffer in
+ /// order.
+ #[inline]
+ #[must_use]
+ pub fn iter(&self) -> Iter<'_, A, N> {
+ Iter {
+ buffer: self,
+ left_index: self.origin,
+ right_index: self.origin + self.len(),
+ remaining: self.len(),
+ }
+ }
+
+ /// Get an iterator over mutable references to the items in the ring buffer
+ /// in order.
+ #[inline]
+ #[must_use]
+ pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
+ IterMut::new(self)
+ }
+
+ #[must_use]
+ fn parse_range<R: RangeBounds<usize>>(&self, range: R) -> Range<usize> {
+ let new_range = Range {
+ start: match range.start_bound() {
+ Bound::Unbounded => 0,
+ Bound::Included(index) => *index,
+ Bound::Excluded(_) => unimplemented!(),
+ },
+ end: match range.end_bound() {
+ Bound::Unbounded => self.len(),
+ Bound::Included(index) => *index + 1,
+ Bound::Excluded(index) => *index,
+ },
+ };
+ if new_range.end > self.len() || new_range.start > new_range.end {
+ panic!("Slice::parse_range: index out of bounds");
+ }
+ new_range
+ }
+
+ /// Get a `Slice` for a subset of the ring buffer.
+ #[must_use]
+ pub fn slice<R: RangeBounds<usize>>(&self, range: R) -> Slice<'_, A, N> {
+ Slice {
+ buffer: self,
+ range: self.parse_range(range),
+ }
+ }
+
+ /// Get a `SliceMut` for a subset of the ring buffer.
+ #[must_use]
+ pub fn slice_mut<R: RangeBounds<usize>>(&mut self, range: R) -> SliceMut<'_, A, N> {
+ SliceMut {
+ range: self.parse_range(range),
+ buffer: self,
+ }
+ }
+
+ /// Get an unchecked reference to the value at the given index.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure the index is not out of bounds.
+ #[must_use]
+ pub unsafe fn get_unchecked(&self, index: usize) -> &A {
+ &*self.ptr(self.raw(index))
+ }
+
+ /// Get an unchecked mutable reference to the value at the given index.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure the index is not out of bounds.
+ #[must_use]
+ pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A {
+ &mut *self.mut_ptr(self.raw(index))
+ }
+
+ /// Push a value to the back of the buffer.
+ ///
+ /// Panics if the capacity of the buffer is exceeded.
+ ///
+ /// Time: O(1)
+ pub fn push_back(&mut self, value: A) {
+ if self.is_full() {
+ panic!("RingBuffer::push_back: can't push to a full buffer")
+ } else {
+ unsafe { self.force_write(self.raw(self.length), value) }
+ self.length += 1;
+ }
+ }
+
+ /// Push a value to the front of the buffer.
+ ///
+ /// Panics if the capacity of the buffer is exceeded.
+ ///
+ /// Time: O(1)
+ pub fn push_front(&mut self, value: A) {
+ if self.is_full() {
+ panic!("RingBuffer::push_front: can't push to a full buffer")
+ } else {
+ let origin = self.origin.dec();
+ self.length += 1;
+ unsafe { self.force_write(origin, value) }
+ }
+ }
+
+ /// Pop a value from the back of the buffer.
+ ///
+ /// Returns `None` if the buffer is empty.
+ ///
+ /// Time: O(1)
+ pub fn pop_back(&mut self) -> Option<A> {
+ if self.is_empty() {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.force_read(self.raw(self.length)) })
+ }
+ }
+
+ /// Pop a value from the front of the buffer.
+ ///
+ /// Returns `None` if the buffer is empty.
+ ///
+ /// Time: O(1)
+ pub fn pop_front(&mut self) -> Option<A> {
+ if self.is_empty() {
+ None
+ } else {
+ self.length -= 1;
+ let index = self.origin.inc();
+ Some(unsafe { self.force_read(index) })
+ }
+ }
+
+ /// Discard all items up to but not including `index`.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items dropped
+ pub fn drop_left(&mut self, index: usize) {
+ if index > 0 {
+ if index > self.len() {
+ panic!("RingBuffer::drop_left: index out of bounds");
+ }
+ for i in self.range().take(index) {
+ unsafe { self.force_drop(i) }
+ }
+ self.origin += index;
+ self.length -= index;
+ }
+ }
+
+ /// Discard all items from `index` onward.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items dropped
+ pub fn drop_right(&mut self, index: usize) {
+ if index > self.len() {
+ panic!("RingBuffer::drop_right: index out of bounds");
+ }
+ if index == self.len() {
+ return;
+ }
+ for i in self.range().skip(index) {
+ unsafe { self.force_drop(i) }
+ }
+ self.length = index;
+ }
+
+ /// Split a buffer into two, the original buffer containing
+ /// everything up to `index` and the returned buffer containing
+ /// everything from `index` onwards.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items in the new buffer
+ #[must_use]
+ pub fn split_off(&mut self, index: usize) -> Self {
+ if index > self.len() {
+ panic!("RingBuffer::split: index out of bounds");
+ }
+ if index == self.len() {
+ return Self::new();
+ }
+ let mut right = Self::new();
+ let length = self.length - index;
+ unsafe { right.copy_from(self, self.raw(index), 0.into(), length) };
+ self.length = index;
+ right.length = length;
+ right
+ }
+
+ /// Remove all items from `other` and append them to the back of `self`.
+ ///
+ /// Panics if the capacity of `self` is exceeded.
+ ///
+ /// `other` will be an empty buffer after this operation.
+ ///
+ /// Time: O(n) for the number of items moved
+ #[inline]
+ pub fn append(&mut self, other: &mut Self) {
+ self.drain_from_front(other, other.len());
+ }
+
+ /// Remove `count` items from the front of `other` and append them to the
+ /// back of `self`.
+ ///
+ /// Panics if `self` doesn't have `count` items left, or if `other` has
+ /// fewer than `count` items.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn drain_from_front(&mut self, other: &mut Self, count: usize) {
+ let self_len = self.len();
+ let other_len = other.len();
+ if self_len + count > Self::CAPACITY {
+ panic!("RingBuffer::drain_from_front: chunk size overflow");
+ }
+ if other_len < count {
+ panic!("RingBuffer::drain_from_front: index out of bounds");
+ }
+ unsafe { self.copy_from(other, other.origin, self.raw(self.len()), count) };
+ other.origin += count;
+ other.length -= count;
+ self.length += count;
+ }
+
+ /// Remove `count` items from the back of `other` and append them to the
+ /// front of `self`.
+ ///
+ /// Panics if `self` doesn't have `count` items left, or if `other` has
+ /// fewer than `count` items.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn drain_from_back(&mut self, other: &mut Self, count: usize) {
+ let self_len = self.len();
+ let other_len = other.len();
+ if self_len + count > Self::CAPACITY {
+ panic!("RingBuffer::drain_from_back: chunk size overflow");
+ }
+ if other_len < count {
+ panic!("RingBuffer::drain_from_back: index out of bounds");
+ }
+ self.origin -= count;
+ let source_index = other.origin + (other.len() - count);
+ unsafe { self.copy_from(other, source_index, self.origin, count) };
+ other.length -= count;
+ self.length += count;
+ }
+
+ /// Insert a new value at index `index`, shifting all the following values
+ /// to the right.
+ ///
+ /// Panics if the index is out of bounds.
+ ///
+ /// Time: O(n) for the number of items shifted
+ pub fn insert(&mut self, index: usize, value: A) {
+ if self.is_full() {
+ panic!("RingBuffer::insert: chunk size overflow");
+ }
+ if index > self.len() {
+ panic!("RingBuffer::insert: index out of bounds");
+ }
+ if index == 0 {
+ return self.push_front(value);
+ }
+ if index == self.len() {
+ return self.push_back(value);
+ }
+ let right_count = self.len() - index;
+ // Check which side has fewer elements to shift.
+ if right_count < index {
+ // Shift to the right.
+ let mut i = self.raw(self.len() - 1);
+ let target = self.raw(index);
+ while i != target {
+ unsafe { self.force_write(i + 1, self.force_read(i)) };
+ i -= 1;
+ }
+ unsafe { self.force_write(target + 1, self.force_read(target)) };
+ self.length += 1;
+ } else {
+ // Shift to the left.
+ self.origin -= 1;
+ self.length += 1;
+ for i in self.range().take(index) {
+ unsafe { self.force_write(i, self.force_read(i + 1)) };
+ }
+ }
+ unsafe { self.force_write(self.raw(index), value) };
+ }
+
+ /// Insert a new value into the buffer in sorted order.
+ ///
+ /// This assumes every element of the buffer is already in sorted order.
+ /// If not, the value will still be inserted but the ordering is not
+ /// guaranteed.
+ ///
+ /// Time: O(log n) to find the insert position, then O(n) for the number
+ /// of elements shifted.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use std::iter::FromIterator;
+ /// # use sized_chunks::Chunk;
+ /// # use typenum::U64;
+ /// let mut chunk = Chunk::<i32, U64>::from_iter(0..5);
+ /// chunk.insert_ordered(3);
+ /// assert_eq!(&[0, 1, 2, 3, 3, 4], chunk.as_slice());
+ /// ```
+ pub fn insert_ordered(&mut self, value: A)
+ where
+ A: Ord,
+ {
+ if self.is_full() {
+ panic!("Chunk::insert: chunk is full");
+ }
+ match self.slice(..).binary_search(&value) {
+ Ok(index) => self.insert(index, value),
+ Err(index) => self.insert(index, value),
+ }
+ }
+
+ /// Insert multiple values at index `index`, shifting all the following values
+ /// to the right.
+ ///
+ /// Panics if the index is out of bounds or the chunk doesn't have room for
+ /// all the values.
+ ///
+ /// Time: O(m+n) where m is the number of elements inserted and n is the number
+ /// of elements following the insertion index. Calling `insert`
+ /// repeatedly would be O(m*n).
+ pub fn insert_from<Iterable, I>(&mut self, index: usize, iter: Iterable)
+ where
+ Iterable: IntoIterator<Item = A, IntoIter = I>,
+ I: ExactSizeIterator<Item = A>,
+ {
+ let iter = iter.into_iter();
+ let insert_size = iter.len();
+ if self.len() + insert_size > Self::CAPACITY {
+ panic!(
+ "Chunk::insert_from: chunk cannot fit {} elements",
+ insert_size
+ );
+ }
+ if index > self.len() {
+ panic!("Chunk::insert_from: index out of bounds");
+ }
+ if index == self.len() {
+ self.extend(iter);
+ return;
+ }
+ let right_count = self.len() - index;
+ // Check which side has fewer elements to shift.
+ if right_count < index {
+ // Shift to the right.
+ let mut i = self.raw(self.len() - 1);
+ let target = self.raw(index);
+ while i != target {
+ unsafe { self.force_write(i + insert_size, self.force_read(i)) };
+ i -= 1;
+ }
+ unsafe { self.force_write(target + insert_size, self.force_read(target)) };
+ self.length += insert_size;
+ } else {
+ // Shift to the left.
+ self.origin -= insert_size;
+ self.length += insert_size;
+ for i in self.range().take(index) {
+ unsafe { self.force_write(i, self.force_read(i + insert_size)) };
+ }
+ }
+ let mut index = self.raw(index);
+ // Panic safety: unless and until we fill it fully, there's a hole somewhere in the middle
+ // and the destructor would drop non-existing elements. Therefore we pretend to be empty
+ // for a while (and leak the elements instead in case something bad happens).
+ let mut inserted = 0;
+ let length = replace(&mut self.length, 0);
+ for value in iter.take(insert_size) {
+ unsafe { self.force_write(index, value) };
+ index += 1;
+ inserted += 1;
+ }
+ // This would/could create a hole in the middle if it was less
+ assert_eq!(
+ inserted, insert_size,
+ "Iterator has fewer elements than advertised",
+ );
+ self.length = length;
+ }
+
+ /// Remove the value at index `index`, shifting all the following values to
+ /// the left.
+ ///
+ /// Returns the removed value.
+ ///
+ /// Panics if the index is out of bounds.
+ ///
+ /// Time: O(n) for the number of items shifted
+ pub fn remove(&mut self, index: usize) -> A {
+ if index >= self.len() {
+ panic!("RingBuffer::remove: index out of bounds");
+ }
+ let value = unsafe { self.force_read(self.raw(index)) };
+ let right_count = self.len() - index;
+ // Check which side has fewer elements to shift.
+ if right_count < index {
+ // Shift from the right.
+ self.length -= 1;
+ let mut i = self.raw(index);
+ let target = self.raw(self.len());
+ while i != target {
+ unsafe { self.force_write(i, self.force_read(i + 1)) };
+ i += 1;
+ }
+ } else {
+ // Shift from the left.
+ let mut i = self.raw(index);
+ while i != self.origin {
+ unsafe { self.force_write(i, self.force_read(i - 1)) };
+ i -= 1;
+ }
+ self.origin += 1;
+ self.length -= 1;
+ }
+ value
+ }
+
+ /// Construct an iterator that drains values from the front of the buffer.
+ pub fn drain(&mut self) -> Drain<'_, A, N> {
+ Drain { buffer: self }
+ }
+
+ /// Discard the contents of the buffer.
+ ///
+ /// Time: O(n)
+ pub fn clear(&mut self) {
+ for i in self.range() {
+ unsafe { self.force_drop(i) };
+ }
+ self.origin = 0.into();
+ self.length = 0;
+ }
+}
+
+impl<A, N: ChunkLength<A>> Default for RingBuffer<A, N> {
+ #[inline]
+ #[must_use]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<A: Clone, N: ChunkLength<A>> Clone for RingBuffer<A, N> {
+ fn clone(&self) -> Self {
+ let mut out = Self::new();
+ out.origin = self.origin;
+ out.length = self.length;
+ let range = self.range();
+ // Panic safety. If we panic, we don't want to drop more than we have initialized.
+ out.length = 0;
+ for index in range {
+ unsafe { out.force_write(index, (&*self.ptr(index)).clone()) };
+ out.length += 1;
+ }
+ out
+ }
+}
+
+impl<A, N> Index<usize> for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Output = A;
+
+ #[must_use]
+ fn index(&self, index: usize) -> &Self::Output {
+ if index >= self.len() {
+ panic!(
+ "RingBuffer::index: index out of bounds {} >= {}",
+ index,
+ self.len()
+ );
+ }
+ unsafe { &*self.ptr(self.raw(index)) }
+ }
+}
+
+impl<A, N> IndexMut<usize> for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ #[must_use]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ if index >= self.len() {
+ panic!(
+ "RingBuffer::index_mut: index out of bounds {} >= {}",
+ index,
+ self.len()
+ );
+ }
+ unsafe { &mut *self.mut_ptr(self.raw(index)) }
+ }
+}
+
+impl<A: PartialEq, N: ChunkLength<A>> PartialEq for RingBuffer<A, N> {
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<A, N, PrimSlice> PartialEq<PrimSlice> for RingBuffer<A, N>
+where
+ PrimSlice: Borrow<[A]>,
+ A: PartialEq,
+ N: ChunkLength<A>,
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &PrimSlice) -> bool {
+ let other = other.borrow();
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<A, N> PartialEq<Slice<'_, A, N>> for RingBuffer<A, N>
+where
+ A: PartialEq,
+ N: ChunkLength<A>,
+{
+ fn eq(&self, other: &Slice<'_, A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<A, N> PartialEq<SliceMut<'_, A, N>> for RingBuffer<A, N>
+where
+ A: PartialEq,
+ N: ChunkLength<A>,
+{
+ fn eq(&self, other: &SliceMut<'_, A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<A: Eq, N: ChunkLength<A>> Eq for RingBuffer<A, N> {}
+
+impl<A: PartialOrd, N: ChunkLength<A>> PartialOrd for RingBuffer<A, N> {
+ #[inline]
+ #[must_use]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+impl<A: Ord, N: ChunkLength<A>> Ord for RingBuffer<A, N> {
+ #[inline]
+ #[must_use]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+impl<A, N: ChunkLength<A>> Extend<A> for RingBuffer<A, N> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = A>>(&mut self, iter: I) {
+ for item in iter {
+ self.push_back(item);
+ }
+ }
+}
+
+impl<'a, A: Clone + 'a, N: ChunkLength<A>> Extend<&'a A> for RingBuffer<A, N> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = &'a A>>(&mut self, iter: I) {
+ for item in iter {
+ self.push_back(item.clone());
+ }
+ }
+}
+
+impl<A: Debug, N: ChunkLength<A>> Debug for RingBuffer<A, N> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("RingBuffer")?;
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<A: Hash, N: ChunkLength<A>> Hash for RingBuffer<A, N> {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ for item in self {
+ item.hash(hasher)
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl<N: ChunkLength<u8>> std::io::Write for RingBuffer<u8, N> {
+ fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
+ let max_new = Self::CAPACITY - self.len();
+ if buf.len() > max_new {
+ buf = &buf[..max_new];
+ }
+ unsafe { self.copy_from_slice(buf, self.origin + self.len()) };
+ self.length += buf.len();
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> std::io::Result<()> {
+ Ok(())
+ }
+}
+
+#[cfg(feature = "std")]
+impl<N: ChunkLength<u8>> std::io::Read for RingBuffer<u8, N> {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ let read_size = buf.len().min(self.len());
+ if read_size == 0 {
+ Ok(0)
+ } else {
+ for p in buf.iter_mut().take(read_size) {
+ *p = self.pop_front().unwrap();
+ }
+ Ok(read_size)
+ }
+ }
+}
+
+impl<A, N: ChunkLength<A>> FromIterator<A> for RingBuffer<A, N> {
+ #[must_use]
+ fn from_iter<I: IntoIterator<Item = A>>(iter: I) -> Self {
+ let mut buffer = RingBuffer::new();
+ buffer.extend(iter);
+ buffer
+ }
+}
+
+impl<A, N: ChunkLength<A>> IntoIterator for RingBuffer<A, N> {
+ type Item = A;
+ type IntoIter = OwnedIter<A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ OwnedIter { buffer: self }
+ }
+}
+
+impl<'a, A, N: ChunkLength<A>> IntoIterator for &'a RingBuffer<A, N> {
+ type Item = &'a A;
+ type IntoIter = Iter<'a, A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, A, N: ChunkLength<A>> IntoIterator for &'a mut RingBuffer<A, N> {
+ type Item = &'a mut A;
+ type IntoIter = IterMut<'a, A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+// Tests
+
+#[cfg(test)]
+mod test {
+ use typenum::U0;
+
+ use super::*;
+
+ #[test]
+ fn validity_invariant() {
+ assert!(Some(RingBuffer::<Box<()>>::new()).is_some());
+ }
+
+ #[test]
+ fn is_full() {
+ let mut chunk = RingBuffer::<_, U64>::new();
+ for i in 0..64 {
+ assert_eq!(false, chunk.is_full());
+ chunk.push_back(i);
+ }
+ assert_eq!(true, chunk.is_full());
+ }
+
+ #[test]
+ fn ref_iter() {
+ let chunk: RingBuffer<i32> = (0..64).collect();
+ let out_vec: Vec<&i32> = chunk.iter().collect();
+ let should_vec_p: Vec<i32> = (0..64).collect();
+ let should_vec: Vec<&i32> = should_vec_p.iter().collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn mut_ref_iter() {
+ let mut chunk: RingBuffer<i32> = (0..64).collect();
+ let out_vec: Vec<&mut i32> = chunk.iter_mut().collect();
+ let mut should_vec_p: Vec<i32> = (0..64).collect();
+ let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn consuming_iter() {
+ let chunk: RingBuffer<i32> = (0..64).collect();
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn draining_iter() {
+ let mut chunk: RingBuffer<i32> = (0..64).collect();
+ let mut half: RingBuffer<i32> = chunk.drain().take(16).collect();
+ half.extend(chunk.drain().rev().take(16));
+ let should: Vec<i32> = (16..48).collect();
+ assert_eq!(chunk, should);
+ let should: Vec<i32> = (0..16).chain((48..64).rev()).collect();
+ assert_eq!(half, should);
+ }
+
+ #[cfg(feature = "std")]
+ #[test]
+ fn io_write() {
+ use std::io::Write;
+ let mut buffer: RingBuffer<u8> = (0..32).collect();
+ let to_write: Vec<u8> = (32..128).collect();
+ assert_eq!(32, buffer.write(&to_write).unwrap());
+ assert_eq!(buffer, (0..64).collect::<Vec<u8>>());
+ }
+
+ #[cfg(feature = "std")]
+ #[test]
+ fn io_read() {
+ use std::io::Read;
+ let mut buffer: RingBuffer<u8> = (16..48).collect();
+ let mut read_buf: Vec<u8> = (0..16).collect();
+ assert_eq!(16, buffer.read(&mut read_buf).unwrap());
+ assert_eq!(read_buf, (16..32).collect::<Vec<u8>>());
+ assert_eq!(buffer, (32..48).collect::<Vec<u8>>());
+ assert_eq!(16, buffer.read(&mut read_buf).unwrap());
+ assert_eq!(read_buf, (32..48).collect::<Vec<u8>>());
+ assert_eq!(buffer, vec![]);
+ assert_eq!(0, buffer.read(&mut read_buf).unwrap());
+ }
+
+ #[test]
+ fn clone() {
+ let buffer: RingBuffer<u32> = (0..50).collect();
+ assert_eq!(buffer, buffer.clone());
+ }
+
+ #[test]
+ fn failing() {
+ let mut buffer: RingBuffer<u32> = RingBuffer::new();
+ buffer.push_front(0);
+ let mut add: RingBuffer<u32> = vec![1, 0, 0, 0, 0, 0].into_iter().collect();
+ buffer.append(&mut add);
+ assert_eq!(1, buffer.remove(1));
+ let expected = vec![0, 0, 0, 0, 0, 0];
+ assert_eq!(buffer, expected);
+ }
+
+ use crate::tests::DropTest;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[test]
+ fn dropping() {
+ let counter = AtomicUsize::new(0);
+ {
+ let mut chunk: RingBuffer<DropTest<'_>> = RingBuffer::new();
+ for _i in 0..20 {
+ chunk.push_back(DropTest::new(&counter))
+ }
+ for _i in 0..20 {
+ chunk.push_front(DropTest::new(&counter))
+ }
+ assert_eq!(40, counter.load(Ordering::Relaxed));
+ for _i in 0..10 {
+ chunk.pop_back();
+ }
+ assert_eq!(30, counter.load(Ordering::Relaxed));
+ }
+ assert_eq!(0, counter.load(Ordering::Relaxed));
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
+ fn unit_on_empty() {
+ let _ = RingBuffer::<usize, U0>::unit(1);
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
+ fn pair_on_empty() {
+ let _ = RingBuffer::<usize, U0>::pair(1, 2);
+ }
+}
diff --git a/vendor/sized-chunks/src/ring_buffer/refpool.rs b/vendor/sized-chunks/src/ring_buffer/refpool.rs
new file mode 100644
index 000000000..453eddf87
--- /dev/null
+++ b/vendor/sized-chunks/src/ring_buffer/refpool.rs
@@ -0,0 +1,69 @@
+use core::mem::MaybeUninit;
+
+use ::refpool::{PoolClone, PoolDefault};
+
+use crate::ring_buffer::index::RawIndex;
+use crate::types::ChunkLength;
+use crate::RingBuffer;
+
+impl<A, N> PoolDefault for RingBuffer<A, N>
+where
+ N: ChunkLength<A>,
+{
+ unsafe fn default_uninit(target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let origin_ptr: *mut RawIndex<N> = &mut (*ptr).origin;
+ let length_ptr: *mut usize = &mut (*ptr).length;
+ origin_ptr.write(0.into());
+ length_ptr.write(0);
+ }
+}
+
+impl<A, N> PoolClone for RingBuffer<A, N>
+where
+ A: Clone,
+ N: ChunkLength<A>,
+{
+ unsafe fn clone_uninit(&self, target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let origin_ptr: *mut RawIndex<N> = &mut (*ptr).origin;
+ let length_ptr: *mut usize = &mut (*ptr).length;
+ let data_ptr: *mut _ = &mut (*ptr).data;
+ let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast();
+ origin_ptr.write(self.origin);
+ length_ptr.write(self.length);
+ for index in self.range() {
+ data_ptr
+ .add(index.to_usize())
+ .write((*self.ptr(index)).clone());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use ::refpool::{Pool, PoolRef};
+ use std::iter::FromIterator;
+
+ #[test]
+ fn default_and_clone() {
+ let pool: Pool<RingBuffer<usize>> = Pool::new(16);
+ let mut ref1 = PoolRef::default(&pool);
+ {
+ let chunk = PoolRef::make_mut(&pool, &mut ref1);
+ chunk.push_back(1);
+ chunk.push_back(2);
+ chunk.push_back(3);
+ }
+ let ref2 = PoolRef::cloned(&pool, &ref1);
+ let ref3 = PoolRef::clone_from(&pool, &RingBuffer::from_iter(1..=3));
+ assert_eq!(RingBuffer::<usize>::from_iter(1..=3), *ref1);
+ assert_eq!(RingBuffer::<usize>::from_iter(1..=3), *ref2);
+ assert_eq!(RingBuffer::<usize>::from_iter(1..=3), *ref3);
+ assert_eq!(ref1, ref2);
+ assert_eq!(ref1, ref3);
+ assert_eq!(ref2, ref3);
+ assert!(!PoolRef::ptr_eq(&ref1, &ref2));
+ }
+}
diff --git a/vendor/sized-chunks/src/ring_buffer/slice.rs b/vendor/sized-chunks/src/ring_buffer/slice.rs
new file mode 100644
index 000000000..1a624eef0
--- /dev/null
+++ b/vendor/sized-chunks/src/ring_buffer/slice.rs
@@ -0,0 +1,556 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::fmt::Debug;
+use core::fmt::Error;
+use core::fmt::Formatter;
+use core::hash::Hash;
+use core::hash::Hasher;
+use core::ops::IndexMut;
+use core::ops::{Bound, Index, Range, RangeBounds};
+
+use super::{Iter, IterMut, RingBuffer};
+use crate::types::ChunkLength;
+
+use array_ops::{Array, ArrayMut, HasLength};
+
+/// An indexable representation of a subset of a `RingBuffer`.
+pub struct Slice<'a, A, N: ChunkLength<A>> {
+ pub(crate) buffer: &'a RingBuffer<A, N>,
+ pub(crate) range: Range<usize>,
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> HasLength for Slice<'a, A, N> {
+ /// Get the length of the slice.
+ #[inline]
+ #[must_use]
+ fn len(&self) -> usize {
+ self.range.end - self.range.start
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Array for Slice<'a, A, N> {
+ /// Get a reference to the value at a given index.
+ #[inline]
+ #[must_use]
+ fn get(&self, index: usize) -> Option<&A> {
+ if index >= self.len() {
+ None
+ } else {
+ Some(unsafe { self.get_unchecked(index) })
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Slice<'a, A, N> {
+ /// Get an unchecked reference to the value at the given index.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure the index is not out of bounds.
+ #[must_use]
+ pub unsafe fn get_unchecked(&self, index: usize) -> &A {
+ self.buffer.get_unchecked(self.range.start + index)
+ }
+
+ /// Get an iterator over references to the items in the slice in order.
+ #[inline]
+ #[must_use]
+ pub fn iter(&self) -> Iter<'_, A, N> {
+ Iter {
+ buffer: self.buffer,
+ left_index: self.buffer.origin + self.range.start,
+ right_index: self.buffer.origin + self.range.start + self.len(),
+ remaining: self.len(),
+ }
+ }
+
+ /// Create a subslice of this slice.
+ ///
+ /// This consumes the slice. To create a subslice without consuming it,
+ /// clone it first: `my_slice.clone().slice(1..2)`.
+ #[must_use]
+ pub fn slice<R: RangeBounds<usize>>(self, range: R) -> Slice<'a, A, N> {
+ let new_range = Range {
+ start: match range.start_bound() {
+ Bound::Unbounded => self.range.start,
+ Bound::Included(index) => self.range.start + index,
+ Bound::Excluded(_) => unimplemented!(),
+ },
+ end: match range.end_bound() {
+ Bound::Unbounded => self.range.end,
+ Bound::Included(index) => self.range.start + index + 1,
+ Bound::Excluded(index) => self.range.start + index,
+ },
+ };
+ if new_range.start < self.range.start
+ || new_range.end > self.range.end
+ || new_range.start > new_range.end
+ {
+ panic!("Slice::slice: index out of bounds");
+ }
+ Slice {
+ buffer: self.buffer,
+ range: new_range,
+ }
+ }
+
+ /// Split the slice into two subslices at the given index.
+ #[must_use]
+ pub fn split_at(self, index: usize) -> (Slice<'a, A, N>, Slice<'a, A, N>) {
+ if index > self.len() {
+ panic!("Slice::split_at: index out of bounds");
+ }
+ let index = self.range.start + index;
+ (
+ Slice {
+ buffer: self.buffer,
+ range: Range {
+ start: self.range.start,
+ end: index,
+ },
+ },
+ Slice {
+ buffer: self.buffer,
+ range: Range {
+ start: index,
+ end: self.range.end,
+ },
+ },
+ )
+ }
+
+ /// Construct a new `RingBuffer` by copying the elements in this slice.
+ #[inline]
+ #[must_use]
+ pub fn to_owned(&self) -> RingBuffer<A, N>
+ where
+ A: Clone,
+ {
+ self.iter().cloned().collect()
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> From<&'a RingBuffer<A, N>> for Slice<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn from(buffer: &'a RingBuffer<A, N>) -> Self {
+ Slice {
+ range: Range {
+ start: 0,
+ end: buffer.len(),
+ },
+ buffer,
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Clone for Slice<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn clone(&self) -> Self {
+ Slice {
+ buffer: self.buffer,
+ range: self.range.clone(),
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Index<usize> for Slice<'a, A, N> {
+ type Output = A;
+
+ #[inline]
+ #[must_use]
+ fn index(&self, index: usize) -> &Self::Output {
+ self.buffer.index(self.range.start + index)
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq for Slice<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq<SliceMut<'a, A, N>>
+ for Slice<'a, A, N>
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &SliceMut<'a, A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq<RingBuffer<A, N>>
+ for Slice<'a, A, N>
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &RingBuffer<A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a, S> PartialEq<S> for Slice<'a, A, N>
+where
+ S: Borrow<[A]>,
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &S) -> bool {
+ let other = other.borrow();
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: Eq + 'a, N: ChunkLength<A> + 'a> Eq for Slice<'a, A, N> {}
+
+impl<'a, A: PartialOrd + 'a, N: ChunkLength<A> + 'a> PartialOrd for Slice<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+impl<'a, A: Ord + 'a, N: ChunkLength<A> + 'a> Ord for Slice<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+impl<'a, A: Debug + 'a, N: ChunkLength<A> + 'a> Debug for Slice<'a, A, N> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("RingBuffer")?;
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<'a, A: Hash + 'a, N: ChunkLength<A> + 'a> Hash for Slice<'a, A, N> {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ for item in self {
+ item.hash(hasher)
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> IntoIterator for &'a Slice<'a, A, N> {
+ type Item = &'a A;
+ type IntoIter = Iter<'a, A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+// Mutable slice
+
+/// An indexable representation of a mutable subset of a `RingBuffer`.
+pub struct SliceMut<'a, A, N: ChunkLength<A>> {
+ pub(crate) buffer: &'a mut RingBuffer<A, N>,
+ pub(crate) range: Range<usize>,
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> HasLength for SliceMut<'a, A, N> {
+ /// Get the length of the slice.
+ #[inline]
+ #[must_use]
+ fn len(&self) -> usize {
+ self.range.end - self.range.start
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Array for SliceMut<'a, A, N> {
+ /// Get a reference to the value at a given index.
+ #[inline]
+ #[must_use]
+ fn get(&self, index: usize) -> Option<&A> {
+ if index >= self.len() {
+ None
+ } else {
+ Some(unsafe { self.get_unchecked(index) })
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> ArrayMut for SliceMut<'a, A, N> {
+ /// Get a mutable reference to the value at a given index.
+ #[inline]
+ #[must_use]
+ fn get_mut(&mut self, index: usize) -> Option<&mut A> {
+ if index >= self.len() {
+ None
+ } else {
+ Some(unsafe { self.get_unchecked_mut(index) })
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> SliceMut<'a, A, N> {
+ /// Downgrade this slice into a non-mutable slice.
+ #[inline]
+ #[must_use]
+ pub fn unmut(self) -> Slice<'a, A, N> {
+ Slice {
+ buffer: self.buffer,
+ range: self.range,
+ }
+ }
+
+ /// Get an unchecked reference to the value at the given index.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure the index is not out of bounds.
+ #[must_use]
+ pub unsafe fn get_unchecked(&self, index: usize) -> &A {
+ self.buffer.get_unchecked(self.range.start + index)
+ }
+
+ /// Get an unchecked mutable reference to the value at the given index.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure the index is not out of bounds.
+ #[must_use]
+ pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A {
+ self.buffer.get_unchecked_mut(self.range.start + index)
+ }
+
+ /// Get an iterator over references to the items in the slice in order.
+ #[inline]
+ #[must_use]
+ pub fn iter(&self) -> Iter<'_, A, N> {
+ Iter {
+ buffer: self.buffer,
+ left_index: self.buffer.origin + self.range.start,
+ right_index: self.buffer.origin + self.range.start + self.len(),
+ remaining: self.len(),
+ }
+ }
+
+ /// Get an iterator over mutable references to the items in the slice in
+ /// order.
+ #[inline]
+ #[must_use]
+ pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
+ IterMut::new_slice(
+ self.buffer,
+ self.buffer.origin + self.range.start,
+ self.len(),
+ )
+ }
+
+ /// Create a subslice of this slice.
+ ///
+ /// This consumes the slice. Because the slice works like a mutable
+ /// reference, you can only have one slice over a given subset of a
+ /// `RingBuffer` at any one time, so that's just how it's got to be.
+ #[must_use]
+ pub fn slice<R: RangeBounds<usize>>(self, range: R) -> SliceMut<'a, A, N> {
+ let new_range = Range {
+ start: match range.start_bound() {
+ Bound::Unbounded => self.range.start,
+ Bound::Included(index) => self.range.start + index,
+ Bound::Excluded(_) => unimplemented!(),
+ },
+ end: match range.end_bound() {
+ Bound::Unbounded => self.range.end,
+ Bound::Included(index) => self.range.start + index + 1,
+ Bound::Excluded(index) => self.range.start + index,
+ },
+ };
+ if new_range.start < self.range.start
+ || new_range.end > self.range.end
+ || new_range.start > new_range.end
+ {
+ panic!("Slice::slice: index out of bounds");
+ }
+ SliceMut {
+ buffer: self.buffer,
+ range: new_range,
+ }
+ }
+
+ /// Split the slice into two subslices at the given index.
+ #[must_use]
+ pub fn split_at(self, index: usize) -> (SliceMut<'a, A, N>, SliceMut<'a, A, N>) {
+ if index > self.len() {
+ panic!("SliceMut::split_at: index out of bounds");
+ }
+ let index = self.range.start + index;
+ let ptr: *mut RingBuffer<A, N> = self.buffer;
+ (
+ SliceMut {
+ buffer: unsafe { &mut *ptr },
+ range: Range {
+ start: self.range.start,
+ end: index,
+ },
+ },
+ SliceMut {
+ buffer: unsafe { &mut *ptr },
+ range: Range {
+ start: index,
+ end: self.range.end,
+ },
+ },
+ )
+ }
+
+ /// Construct a new `RingBuffer` by copying the elements in this slice.
+ #[inline]
+ #[must_use]
+ pub fn to_owned(&self) -> RingBuffer<A, N>
+ where
+ A: Clone,
+ {
+ self.iter().cloned().collect()
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> From<&'a mut RingBuffer<A, N>> for SliceMut<'a, A, N> {
+ #[must_use]
+ fn from(buffer: &'a mut RingBuffer<A, N>) -> Self {
+ SliceMut {
+ range: Range {
+ start: 0,
+ end: buffer.len(),
+ },
+ buffer,
+ }
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Into<Slice<'a, A, N>> for SliceMut<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn into(self) -> Slice<'a, A, N> {
+ self.unmut()
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> Index<usize> for SliceMut<'a, A, N> {
+ type Output = A;
+
+ #[inline]
+ #[must_use]
+ fn index(&self, index: usize) -> &Self::Output {
+ self.buffer.index(self.range.start + index)
+ }
+}
+
+impl<'a, A: 'a, N: ChunkLength<A> + 'a> IndexMut<usize> for SliceMut<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ self.buffer.index_mut(self.range.start + index)
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq for SliceMut<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq<Slice<'a, A, N>>
+ for SliceMut<'a, A, N>
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &Slice<'a, A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a> PartialEq<RingBuffer<A, N>>
+ for SliceMut<'a, A, N>
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &RingBuffer<A, N>) -> bool {
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: PartialEq + 'a, N: ChunkLength<A> + 'a, S> PartialEq<S> for SliceMut<'a, A, N>
+where
+ S: Borrow<[A]>,
+{
+ #[inline]
+ #[must_use]
+ fn eq(&self, other: &S) -> bool {
+ let other = other.borrow();
+ self.len() == other.len() && self.iter().eq(other.iter())
+ }
+}
+
+impl<'a, A: Eq + 'a, N: ChunkLength<A> + 'a> Eq for SliceMut<'a, A, N> {}
+
+impl<'a, A: PartialOrd + 'a, N: ChunkLength<A> + 'a> PartialOrd for SliceMut<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+impl<'a, A: Ord + 'a, N: ChunkLength<A> + 'a> Ord for SliceMut<'a, A, N> {
+ #[inline]
+ #[must_use]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+impl<'a, A: Debug + 'a, N: ChunkLength<A> + 'a> Debug for SliceMut<'a, A, N> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("RingBuffer")?;
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<'a, A: Hash + 'a, N: ChunkLength<A> + 'a> Hash for SliceMut<'a, A, N> {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ for item in self {
+ item.hash(hasher)
+ }
+ }
+}
+
+impl<'a, 'b, A: 'a, N: ChunkLength<A> + 'a> IntoIterator for &'a SliceMut<'a, A, N> {
+ type Item = &'a A;
+ type IntoIter = Iter<'a, A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, 'b, A: 'a, N: ChunkLength<A> + 'a> IntoIterator for &'a mut SliceMut<'a, A, N> {
+ type Item = &'a mut A;
+ type IntoIter = IterMut<'a, A, N>;
+
+ #[inline]
+ #[must_use]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
diff --git a/vendor/sized-chunks/src/sized_chunk/iter.rs b/vendor/sized-chunks/src/sized_chunk/iter.rs
new file mode 100644
index 000000000..88c75fd23
--- /dev/null
+++ b/vendor/sized-chunks/src/sized_chunk/iter.rs
@@ -0,0 +1,109 @@
+use core::iter::FusedIterator;
+
+use super::Chunk;
+use crate::types::ChunkLength;
+
+/// A consuming iterator over the elements of a `Chunk`.
+pub struct Iter<A, N>
+where
+ N: ChunkLength<A>,
+{
+ pub(crate) chunk: Chunk<A, N>,
+}
+
+impl<A, N> Iterator for Iter<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Item = A;
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.chunk.is_empty() {
+ None
+ } else {
+ Some(self.chunk.pop_front())
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.chunk.len(), Some(self.chunk.len()))
+ }
+}
+
+impl<A, N> DoubleEndedIterator for Iter<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.chunk.is_empty() {
+ None
+ } else {
+ Some(self.chunk.pop_back())
+ }
+ }
+}
+
+impl<A, N> ExactSizeIterator for Iter<A, N> where N: ChunkLength<A> {}
+
+impl<A, N> FusedIterator for Iter<A, N> where N: ChunkLength<A> {}
+
+/// A draining iterator over the elements of a `Chunk`.
+///
+/// "Draining" means that as the iterator yields each element, it's removed from
+/// the `Chunk`. When the iterator terminates, the chunk will be empty. This is
+/// different from the consuming iterator `Iter` in that `Iter` will take
+/// ownership of the `Chunk` and discard it when you're done iterating, while
+/// `Drain` leaves you still owning the drained `Chunk`.
+pub struct Drain<'a, A, N>
+where
+ N: ChunkLength<A>,
+{
+ pub(crate) chunk: &'a mut Chunk<A, N>,
+}
+
+impl<'a, A, N> Iterator for Drain<'a, A, N>
+where
+ A: 'a,
+ N: ChunkLength<A> + 'a,
+{
+ type Item = A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.chunk.is_empty() {
+ None
+ } else {
+ Some(self.chunk.pop_front())
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.chunk.len(), Some(self.chunk.len()))
+ }
+}
+
+impl<'a, A, N> DoubleEndedIterator for Drain<'a, A, N>
+where
+ A: 'a,
+ N: ChunkLength<A> + 'a,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.chunk.is_empty() {
+ None
+ } else {
+ Some(self.chunk.pop_back())
+ }
+ }
+}
+
+impl<'a, A, N> ExactSizeIterator for Drain<'a, A, N>
+where
+ A: 'a,
+ N: ChunkLength<A> + 'a,
+{
+}
+
+impl<'a, A, N> FusedIterator for Drain<'a, A, N>
+where
+ A: 'a,
+ N: ChunkLength<A> + 'a,
+{
+}
diff --git a/vendor/sized-chunks/src/sized_chunk/mod.rs b/vendor/sized-chunks/src/sized_chunk/mod.rs
new file mode 100644
index 000000000..fc406433d
--- /dev/null
+++ b/vendor/sized-chunks/src/sized_chunk/mod.rs
@@ -0,0 +1,1411 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! A fixed capacity smart array.
+//!
+//! See [`Chunk`](struct.Chunk.html)
+
+use crate::inline_array::InlineArray;
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp::Ordering;
+use core::fmt::{Debug, Error, Formatter};
+use core::hash::{Hash, Hasher};
+use core::iter::FromIterator;
+use core::mem::{replace, MaybeUninit};
+use core::ops::{Deref, DerefMut, Index, IndexMut};
+use core::ptr;
+use core::slice::{
+ from_raw_parts, from_raw_parts_mut, Iter as SliceIter, IterMut as SliceIterMut, SliceIndex,
+};
+
+#[cfg(feature = "std")]
+use std::io;
+
+use typenum::U64;
+
+use crate::types::ChunkLength;
+
+mod iter;
+pub use self::iter::{Drain, Iter};
+
+#[cfg(feature = "refpool")]
+mod refpool;
+
+/// A fixed capacity smart array.
+///
+/// An inline array of items with a variable length but a fixed, preallocated
+/// capacity given by the `N` type, which must be an [`Unsigned`][Unsigned] type
+/// level numeral.
+///
+/// It's 'smart' because it's able to reorganise its contents based on expected
+/// behaviour. If you construct one using `push_back`, it will be laid out like
+/// a `Vec` with space at the end. If you `push_front` it will start filling in
+/// values from the back instead of the front, so that you still get linear time
+/// push as long as you don't reverse direction. If you do, and there's no room
+/// at the end you're pushing to, it'll shift its contents over to the other
+/// side, creating more space to push into. This technique is tuned for
+/// `Chunk`'s expected use case in [im::Vector]: usually, chunks always see
+/// either `push_front` or `push_back`, but not both unless they move around
+/// inside the tree, in which case they're able to reorganise themselves with
+/// reasonable efficiency to suit their new usage patterns.
+///
+/// It maintains a `left` index and a `right` index instead of a simple length
+/// counter in order to accomplish this, much like a ring buffer would, except
+/// that the `Chunk` keeps all its items sequentially in memory so that you can
+/// always get a `&[A]` slice for them, at the price of the occasional
+/// reordering operation. The allocated size of a `Chunk` is thus `usize` * 2 +
+/// `A` * `N`.
+///
+/// This technique also lets us choose to shift the shortest side to account for
+/// the inserted or removed element when performing insert and remove
+/// operations, unlike `Vec` where you always need to shift the right hand side.
+///
+/// Unlike a `Vec`, the `Chunk` has a fixed capacity and cannot grow beyond it.
+/// Being intended for low level use, it expects you to know or test whether
+/// you're pushing to a full array, and has an API more geared towards panics
+/// than returning `Option`s, on the assumption that you know what you're doing.
+/// Of course, if you don't, you can expect it to panic immediately rather than
+/// do something undefined and usually bad.
+///
+/// ## Isn't this just a less efficient ring buffer?
+///
+/// You might be wondering why you would want to use this data structure rather
+/// than a [`RingBuffer`][RingBuffer], which is similar but doesn't need to
+/// shift its content around when it hits the sides of the allocated buffer. The
+/// answer is that `Chunk` can be dereferenced into a slice, while a ring buffer
+/// can not. You'll also save a few cycles on index lookups, as a `Chunk`'s data
+/// is guaranteed to be contiguous in memory, so there's no need to remap logical
+/// indices to a ring buffer's physical layout.
+///
+/// # Examples
+///
+/// ```rust
+/// # #[macro_use] extern crate sized_chunks;
+/// # extern crate typenum;
+/// # use sized_chunks::Chunk;
+/// # use typenum::U64;
+/// // Construct a chunk with a 64 item capacity
+/// let mut chunk = Chunk::<i32, U64>::new();
+/// // Fill it with descending numbers
+/// chunk.extend((0..64).rev());
+/// // It derefs to a slice so we can use standard slice methods
+/// chunk.sort();
+/// // It's got all the amenities like `FromIterator` and `Eq`
+/// let expected: Chunk<i32, U64> = (0..64).collect();
+/// assert_eq!(expected, chunk);
+/// ```
+///
+/// [Unsigned]: https://docs.rs/typenum/1.10.0/typenum/marker_traits/trait.Unsigned.html
+/// [im::Vector]: https://docs.rs/im/latest/im/vector/enum.Vector.html
+/// [RingBuffer]: ../ring_buffer/struct.RingBuffer.html
+pub struct Chunk<A, N = U64>
+where
+ N: ChunkLength<A>,
+{
+ left: usize,
+ right: usize,
+ data: MaybeUninit<N::SizedType>,
+}
+
+impl<A, N> Drop for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn drop(&mut self) {
+ unsafe { ptr::drop_in_place(self.as_mut_slice()) }
+ }
+}
+
+impl<A, N> Clone for Chunk<A, N>
+where
+ A: Clone,
+ N: ChunkLength<A>,
+{
+ fn clone(&self) -> Self {
+ let mut out = Self::new();
+ out.left = self.left;
+ out.right = self.left;
+ for index in self.left..self.right {
+ unsafe { Chunk::force_write(index, (*self.ptr(index)).clone(), &mut out) }
+ // Panic safety, move the right index to cover only the really initialized things. This
+ // way we don't try to drop uninitialized, but also don't leak if we panic in the
+ // middle.
+ out.right = index + 1;
+ }
+ out
+ }
+}
+
+impl<A, N> Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// The maximum number of elements this `Chunk` can contain.
+ pub const CAPACITY: usize = N::USIZE;
+
+ /// Construct a new empty chunk.
+ pub fn new() -> Self {
+ Self {
+ left: 0,
+ right: 0,
+ data: MaybeUninit::uninit(),
+ }
+ }
+
+ /// Construct a new chunk with one item.
+ pub fn unit(value: A) -> Self {
+ assert!(Self::CAPACITY >= 1);
+ let mut chunk = Self {
+ left: 0,
+ right: 1,
+ data: MaybeUninit::uninit(),
+ };
+ unsafe {
+ Chunk::force_write(0, value, &mut chunk);
+ }
+ chunk
+ }
+
+ /// Construct a new chunk with two items.
+ pub fn pair(left: A, right: A) -> Self {
+ assert!(Self::CAPACITY >= 2);
+ let mut chunk = Self {
+ left: 0,
+ right: 2,
+ data: MaybeUninit::uninit(),
+ };
+ unsafe {
+ Chunk::force_write(0, left, &mut chunk);
+ Chunk::force_write(1, right, &mut chunk);
+ }
+ chunk
+ }
+
+ /// Construct a new chunk and move every item from `other` into the new
+ /// chunk.
+ ///
+ /// Time: O(n)
+ pub fn drain_from(other: &mut Self) -> Self {
+ let other_len = other.len();
+ Self::from_front(other, other_len)
+ }
+
+ /// Construct a new chunk and populate it by taking `count` items from the
+ /// iterator `iter`.
+ ///
+ /// Panics if the iterator contains less than `count` items.
+ ///
+ /// Time: O(n)
+ pub fn collect_from<I>(iter: &mut I, mut count: usize) -> Self
+ where
+ I: Iterator<Item = A>,
+ {
+ let mut chunk = Self::new();
+ while count > 0 {
+ count -= 1;
+ chunk.push_back(
+ iter.next()
+ .expect("Chunk::collect_from: underfull iterator"),
+ );
+ }
+ chunk
+ }
+
+ /// Construct a new chunk and populate it by taking `count` items from the
+ /// front of `other`.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn from_front(other: &mut Self, count: usize) -> Self {
+ let other_len = other.len();
+ debug_assert!(count <= other_len);
+ let mut chunk = Self::new();
+ unsafe { Chunk::force_copy_to(other.left, 0, count, other, &mut chunk) };
+ chunk.right = count;
+ other.left += count;
+ chunk
+ }
+
+ /// Construct a new chunk and populate it by taking `count` items from the
+ /// back of `other`.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn from_back(other: &mut Self, count: usize) -> Self {
+ let other_len = other.len();
+ debug_assert!(count <= other_len);
+ let mut chunk = Self::new();
+ unsafe { Chunk::force_copy_to(other.right - count, 0, count, other, &mut chunk) };
+ chunk.right = count;
+ other.right -= count;
+ chunk
+ }
+
+ /// Get the length of the chunk.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.right - self.left
+ }
+
+ /// Test if the chunk is empty.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.left == self.right
+ }
+
+ /// Test if the chunk is at capacity.
+ #[inline]
+ pub fn is_full(&self) -> bool {
+ self.left == 0 && self.right == Self::CAPACITY
+ }
+
+ #[inline]
+ unsafe fn ptr(&self, index: usize) -> *const A {
+ (&self.data as *const _ as *const A).add(index)
+ }
+
+ /// It has no bounds checks
+ #[inline]
+ unsafe fn mut_ptr(&mut self, index: usize) -> *mut A {
+ (&mut self.data as *mut _ as *mut A).add(index)
+ }
+
+ /// Copy the value at an index, discarding ownership of the copied value
+ #[inline]
+ unsafe fn force_read(index: usize, chunk: &mut Self) -> A {
+ chunk.ptr(index).read()
+ }
+
+ /// Write a value at an index without trying to drop what's already there.
+ /// It has no bounds checks.
+ #[inline]
+ unsafe fn force_write(index: usize, value: A, chunk: &mut Self) {
+ chunk.mut_ptr(index).write(value)
+ }
+
+ /// Copy a range within a chunk
+ #[inline]
+ unsafe fn force_copy(from: usize, to: usize, count: usize, chunk: &mut Self) {
+ if count > 0 {
+ ptr::copy(chunk.ptr(from), chunk.mut_ptr(to), count)
+ }
+ }
+
+ /// Write values from iterator into range starting at write_index.
+ ///
+ /// Will overwrite values at the relevant range without dropping even in case the values were
+ /// already initialized (it is expected they are empty). Does not update the left or right
+ /// index.
+ ///
+ /// # Safety
+ ///
+ /// Range checks must already have been performed.
+ ///
+ /// # Panics
+ ///
+ /// If the iterator panics, the chunk becomes conceptually empty and will leak any previous
+ /// elements (even the ones outside the range).
+ #[inline]
+ unsafe fn write_from_iter<I>(mut write_index: usize, iter: I, chunk: &mut Self)
+ where
+ I: ExactSizeIterator<Item = A>,
+ {
+ // Panic safety. We make the array conceptually empty, so we never ever drop anything that
+ // is unitialized. We do so because we expect to be called when there's a potential "hole"
+ // in the array that makes the space for the new elements to be written. We return it back
+ // to original when everything goes fine, but leak any elements on panic. This is bad, but
+ // better than dropping non-existing stuff.
+ //
+ // Should we worry about some better panic recovery than this?
+ let left = replace(&mut chunk.left, 0);
+ let right = replace(&mut chunk.right, 0);
+ let len = iter.len();
+ let expected_end = write_index + len;
+ for value in iter.take(len) {
+ Chunk::force_write(write_index, value, chunk);
+ write_index += 1;
+ }
+ // Oops, we have a hole in here now. That would be bad, give up.
+ assert_eq!(
+ expected_end, write_index,
+ "ExactSizeIterator yielded fewer values than advertised",
+ );
+ chunk.left = left;
+ chunk.right = right;
+ }
+
+ /// Copy a range between chunks
+ #[inline]
+ unsafe fn force_copy_to(
+ from: usize,
+ to: usize,
+ count: usize,
+ chunk: &mut Self,
+ other: &mut Self,
+ ) {
+ if count > 0 {
+ ptr::copy_nonoverlapping(chunk.ptr(from), other.mut_ptr(to), count)
+ }
+ }
+
+ /// Push an item to the front of the chunk.
+ ///
+ /// Panics if the capacity of the chunk is exceeded.
+ ///
+ /// Time: O(1) if there's room at the front, O(n) otherwise
+ pub fn push_front(&mut self, value: A) {
+ if self.is_full() {
+ panic!("Chunk::push_front: can't push to full chunk");
+ }
+ if self.is_empty() {
+ self.left = N::USIZE;
+ self.right = N::USIZE;
+ } else if self.left == 0 {
+ self.left = N::USIZE - self.right;
+ unsafe { Chunk::force_copy(0, self.left, self.right, self) };
+ self.right = N::USIZE;
+ }
+ self.left -= 1;
+ unsafe { Chunk::force_write(self.left, value, self) }
+ }
+
+ /// Push an item to the back of the chunk.
+ ///
+ /// Panics if the capacity of the chunk is exceeded.
+ ///
+ /// Time: O(1) if there's room at the back, O(n) otherwise
+ pub fn push_back(&mut self, value: A) {
+ if self.is_full() {
+ panic!("Chunk::push_back: can't push to full chunk");
+ }
+ if self.is_empty() {
+ self.left = 0;
+ self.right = 0;
+ } else if self.right == N::USIZE {
+ unsafe { Chunk::force_copy(self.left, 0, self.len(), self) };
+ self.right = N::USIZE - self.left;
+ self.left = 0;
+ }
+ unsafe { Chunk::force_write(self.right, value, self) }
+ self.right += 1;
+ }
+
+ /// Pop an item off the front of the chunk.
+ ///
+ /// Panics if the chunk is empty.
+ ///
+ /// Time: O(1)
+ pub fn pop_front(&mut self) -> A {
+ if self.is_empty() {
+ panic!("Chunk::pop_front: can't pop from empty chunk");
+ } else {
+ let value = unsafe { Chunk::force_read(self.left, self) };
+ self.left += 1;
+ value
+ }
+ }
+
+ /// Pop an item off the back of the chunk.
+ ///
+ /// Panics if the chunk is empty.
+ ///
+ /// Time: O(1)
+ pub fn pop_back(&mut self) -> A {
+ if self.is_empty() {
+ panic!("Chunk::pop_back: can't pop from empty chunk");
+ } else {
+ self.right -= 1;
+ unsafe { Chunk::force_read(self.right, self) }
+ }
+ }
+
+ /// Discard all items up to but not including `index`.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items dropped
+ pub fn drop_left(&mut self, index: usize) {
+ if index > 0 {
+ unsafe { ptr::drop_in_place(&mut self[..index]) }
+ self.left += index;
+ }
+ }
+
+ /// Discard all items from `index` onward.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items dropped
+ pub fn drop_right(&mut self, index: usize) {
+ if index != self.len() {
+ unsafe { ptr::drop_in_place(&mut self[index..]) }
+ self.right = self.left + index;
+ }
+ }
+
+ /// Split a chunk into two, the original chunk containing
+ /// everything up to `index` and the returned chunk containing
+ /// everything from `index` onwards.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(n) for the number of items in the new chunk
+ pub fn split_off(&mut self, index: usize) -> Self {
+ if index > self.len() {
+ panic!("Chunk::split_off: index out of bounds");
+ }
+ if index == self.len() {
+ return Self::new();
+ }
+ let mut right_chunk = Self::new();
+ let start = self.left + index;
+ let len = self.right - start;
+ unsafe { Chunk::force_copy_to(start, 0, len, self, &mut right_chunk) };
+ right_chunk.right = len;
+ self.right = start;
+ right_chunk
+ }
+
+ /// Remove all items from `other` and append them to the back of `self`.
+ ///
+ /// Panics if the capacity of the chunk is exceeded.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn append(&mut self, other: &mut Self) {
+ let self_len = self.len();
+ let other_len = other.len();
+ if self_len + other_len > N::USIZE {
+ panic!("Chunk::append: chunk size overflow");
+ }
+ if self.right + other_len > N::USIZE {
+ unsafe { Chunk::force_copy(self.left, 0, self_len, self) };
+ self.right -= self.left;
+ self.left = 0;
+ }
+ unsafe { Chunk::force_copy_to(other.left, self.right, other_len, other, self) };
+ self.right += other_len;
+ other.left = 0;
+ other.right = 0;
+ }
+
+ /// Remove `count` items from the front of `other` and append them to the
+ /// back of `self`.
+ ///
+ /// Panics if `self` doesn't have `count` items left, or if `other` has
+ /// fewer than `count` items.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn drain_from_front(&mut self, other: &mut Self, count: usize) {
+ let self_len = self.len();
+ let other_len = other.len();
+ assert!(self_len + count <= N::USIZE);
+ assert!(other_len >= count);
+ if self.right + count > N::USIZE {
+ unsafe { Chunk::force_copy(self.left, 0, self_len, self) };
+ self.right -= self.left;
+ self.left = 0;
+ }
+ unsafe { Chunk::force_copy_to(other.left, self.right, count, other, self) };
+ self.right += count;
+ other.left += count;
+ }
+
+ /// Remove `count` items from the back of `other` and append them to the
+ /// front of `self`.
+ ///
+ /// Panics if `self` doesn't have `count` items left, or if `other` has
+ /// fewer than `count` items.
+ ///
+ /// Time: O(n) for the number of items moved
+ pub fn drain_from_back(&mut self, other: &mut Self, count: usize) {
+ let self_len = self.len();
+ let other_len = other.len();
+ assert!(self_len + count <= N::USIZE);
+ assert!(other_len >= count);
+ if self.left < count {
+ unsafe { Chunk::force_copy(self.left, N::USIZE - self_len, self_len, self) };
+ self.left = N::USIZE - self_len;
+ self.right = N::USIZE;
+ }
+ unsafe { Chunk::force_copy_to(other.right - count, self.left - count, count, other, self) };
+ self.left -= count;
+ other.right -= count;
+ }
+
+ /// Update the value at index `index`, returning the old value.
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// Time: O(1)
+ pub fn set(&mut self, index: usize, value: A) -> A {
+ replace(&mut self[index], value)
+ }
+
+ /// Insert a new value at index `index`, shifting all the following values
+ /// to the right.
+ ///
+ /// Panics if the index is out of bounds or the chunk is full.
+ ///
+ /// Time: O(n) for the number of elements shifted
+ pub fn insert(&mut self, index: usize, value: A) {
+ if self.is_full() {
+ panic!("Chunk::insert: chunk is full");
+ }
+ if index > self.len() {
+ panic!("Chunk::insert: index out of bounds");
+ }
+ let real_index = index + self.left;
+ let left_size = index;
+ let right_size = self.right - real_index;
+ if self.right == N::USIZE || (self.left > 0 && left_size < right_size) {
+ unsafe {
+ Chunk::force_copy(self.left, self.left - 1, left_size, self);
+ Chunk::force_write(real_index - 1, value, self);
+ }
+ self.left -= 1;
+ } else {
+ unsafe {
+ Chunk::force_copy(real_index, real_index + 1, right_size, self);
+ Chunk::force_write(real_index, value, self);
+ }
+ self.right += 1;
+ }
+ }
+
+ /// Insert a new value into the chunk in sorted order.
+ ///
+ /// This assumes every element of the chunk is already in sorted order.
+ /// If not, the value will still be inserted but the ordering is not
+ /// guaranteed.
+ ///
+ /// Time: O(log n) to find the insert position, then O(n) for the number
+ /// of elements shifted.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use std::iter::FromIterator;
+ /// # use sized_chunks::Chunk;
+ /// # use typenum::U64;
+ /// let mut chunk = Chunk::<i32, U64>::from_iter(0..5);
+ /// chunk.insert_ordered(3);
+ /// assert_eq!(&[0, 1, 2, 3, 3, 4], chunk.as_slice());
+ /// ```
+ pub fn insert_ordered(&mut self, value: A)
+ where
+ A: Ord,
+ {
+ if self.is_full() {
+ panic!("Chunk::insert: chunk is full");
+ }
+ match self.binary_search(&value) {
+ Ok(index) => self.insert(index, value),
+ Err(index) => self.insert(index, value),
+ }
+ }
+
+ /// Insert multiple values at index `index`, shifting all the following values
+ /// to the right.
+ ///
+ /// Panics if the index is out of bounds or the chunk doesn't have room for
+ /// all the values.
+ ///
+ /// Time: O(m+n) where m is the number of elements inserted and n is the number
+ /// of elements following the insertion index. Calling `insert`
+ /// repeatedly would be O(m*n).
+ pub fn insert_from<Iterable, I>(&mut self, index: usize, iter: Iterable)
+ where
+ Iterable: IntoIterator<Item = A, IntoIter = I>,
+ I: ExactSizeIterator<Item = A>,
+ {
+ let iter = iter.into_iter();
+ let insert_size = iter.len();
+ if self.len() + insert_size > Self::CAPACITY {
+ panic!(
+ "Chunk::insert_from: chunk cannot fit {} elements",
+ insert_size
+ );
+ }
+ if index > self.len() {
+ panic!("Chunk::insert_from: index out of bounds");
+ }
+ let real_index = index + self.left;
+ let left_size = index;
+ let right_size = self.right - real_index;
+ if self.right == N::USIZE || (self.left >= insert_size && left_size < right_size) {
+ unsafe {
+ Chunk::force_copy(self.left, self.left - insert_size, left_size, self);
+ let write_index = real_index - insert_size;
+ Chunk::write_from_iter(write_index, iter, self);
+ }
+ self.left -= insert_size;
+ } else if self.left == 0 || (self.right + insert_size <= Self::CAPACITY) {
+ unsafe {
+ Chunk::force_copy(real_index, real_index + insert_size, right_size, self);
+ let write_index = real_index;
+ Chunk::write_from_iter(write_index, iter, self);
+ }
+ self.right += insert_size;
+ } else {
+ unsafe {
+ Chunk::force_copy(self.left, 0, left_size, self);
+ Chunk::force_copy(real_index, left_size + insert_size, right_size, self);
+ let write_index = left_size;
+ Chunk::write_from_iter(write_index, iter, self);
+ }
+ self.right -= self.left;
+ self.right += insert_size;
+ self.left = 0;
+ }
+ }
+
+ /// Remove the value at index `index`, shifting all the following values to
+ /// the left.
+ ///
+ /// Returns the removed value.
+ ///
+ /// Panics if the index is out of bounds.
+ ///
+ /// Time: O(n) for the number of items shifted
+ pub fn remove(&mut self, index: usize) -> A {
+ if index >= self.len() {
+ panic!("Chunk::remove: index out of bounds");
+ }
+ let real_index = index + self.left;
+ let value = unsafe { Chunk::force_read(real_index, self) };
+ let left_size = index;
+ let right_size = self.right - real_index - 1;
+ if left_size < right_size {
+ unsafe { Chunk::force_copy(self.left, self.left + 1, left_size, self) };
+ self.left += 1;
+ } else {
+ unsafe { Chunk::force_copy(real_index + 1, real_index, right_size, self) };
+ self.right -= 1;
+ }
+ value
+ }
+
+ /// Construct an iterator that drains values from the front of the chunk.
+ pub fn drain(&mut self) -> Drain<'_, A, N> {
+ Drain { chunk: self }
+ }
+
+ /// Discard the contents of the chunk.
+ ///
+ /// Time: O(n)
+ pub fn clear(&mut self) {
+ unsafe { ptr::drop_in_place(self.as_mut_slice()) }
+ self.left = 0;
+ self.right = 0;
+ }
+
+ /// Get a reference to the contents of the chunk as a slice.
+ pub fn as_slice(&self) -> &[A] {
+ unsafe {
+ from_raw_parts(
+ (&self.data as *const MaybeUninit<N::SizedType> as *const A).add(self.left),
+ self.len(),
+ )
+ }
+ }
+
+ /// Get a reference to the contents of the chunk as a mutable slice.
+ pub fn as_mut_slice(&mut self) -> &mut [A] {
+ unsafe {
+ from_raw_parts_mut(
+ (&mut self.data as *mut MaybeUninit<N::SizedType> as *mut A).add(self.left),
+ self.len(),
+ )
+ }
+ }
+}
+
+impl<A, N> Default for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<A, N, I> Index<I> for Chunk<A, N>
+where
+ I: SliceIndex<[A]>,
+ N: ChunkLength<A>,
+{
+ type Output = I::Output;
+ fn index(&self, index: I) -> &Self::Output {
+ self.as_slice().index(index)
+ }
+}
+
+impl<A, N, I> IndexMut<I> for Chunk<A, N>
+where
+ I: SliceIndex<[A]>,
+ N: ChunkLength<A>,
+{
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ self.as_mut_slice().index_mut(index)
+ }
+}
+
+impl<A, N> Debug for Chunk<A, N>
+where
+ A: Debug,
+ N: ChunkLength<A>,
+{
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("Chunk")?;
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<A, N> Hash for Chunk<A, N>
+where
+ A: Hash,
+ N: ChunkLength<A>,
+{
+ fn hash<H>(&self, hasher: &mut H)
+ where
+ H: Hasher,
+ {
+ for item in self {
+ item.hash(hasher)
+ }
+ }
+}
+
+impl<A, N, Slice> PartialEq<Slice> for Chunk<A, N>
+where
+ Slice: Borrow<[A]>,
+ A: PartialEq,
+ N: ChunkLength<A>,
+{
+ fn eq(&self, other: &Slice) -> bool {
+ self.as_slice() == other.borrow()
+ }
+}
+
+impl<A, N> Eq for Chunk<A, N>
+where
+ A: Eq,
+ N: ChunkLength<A>,
+{
+}
+
+impl<A, N> PartialOrd for Chunk<A, N>
+where
+ A: PartialOrd,
+ N: ChunkLength<A>,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+impl<A, N> Ord for Chunk<A, N>
+where
+ A: Ord,
+ N: ChunkLength<A>,
+{
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+#[cfg(feature = "std")]
+impl<N> io::Write for Chunk<u8, N>
+where
+ N: ChunkLength<u8>,
+{
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let old_len = self.len();
+ self.extend(buf.iter().cloned().take(N::USIZE - old_len));
+ Ok(self.len() - old_len)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[cfg(feature = "std")]
+impl<N: ChunkLength<u8>> std::io::Read for Chunk<u8, N> {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ let read_size = buf.len().min(self.len());
+ if read_size == 0 {
+ Ok(0)
+ } else {
+ for p in buf.iter_mut().take(read_size) {
+ *p = self.pop_front();
+ }
+ Ok(read_size)
+ }
+ }
+}
+
+impl<A, N, T> From<InlineArray<A, T>> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ #[inline]
+ fn from(mut array: InlineArray<A, T>) -> Self {
+ Self::from(&mut array)
+ }
+}
+
+impl<'a, A, N, T> From<&'a mut InlineArray<A, T>> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn from(array: &mut InlineArray<A, T>) -> Self {
+ // The first capacity comparison is to help optimize it out
+ assert!(
+ InlineArray::<A, T>::CAPACITY <= Self::CAPACITY || array.len() <= Self::CAPACITY,
+ "CAPACITY too small"
+ );
+ let mut out = Self::new();
+ out.left = 0;
+ out.right = array.len();
+ unsafe {
+ ptr::copy_nonoverlapping(array.data(), out.mut_ptr(0), out.right);
+ *array.len_mut() = 0;
+ }
+ out
+ }
+}
+
+impl<A, N> Borrow<[A]> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn borrow(&self) -> &[A] {
+ self.as_slice()
+ }
+}
+
+impl<A, N> BorrowMut<[A]> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn borrow_mut(&mut self) -> &mut [A] {
+ self.as_mut_slice()
+ }
+}
+
+impl<A, N> AsRef<[A]> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn as_ref(&self) -> &[A] {
+ self.as_slice()
+ }
+}
+
+impl<A, N> AsMut<[A]> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn as_mut(&mut self) -> &mut [A] {
+ self.as_mut_slice()
+ }
+}
+
+impl<A, N> Deref for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Target = [A];
+
+ fn deref(&self) -> &Self::Target {
+ self.as_slice()
+ }
+}
+
+impl<A, N> DerefMut for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.as_mut_slice()
+ }
+}
+
+impl<A, N> FromIterator<A> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ fn from_iter<I>(it: I) -> Self
+ where
+ I: IntoIterator<Item = A>,
+ {
+ let mut chunk = Self::new();
+ for item in it {
+ chunk.push_back(item);
+ }
+ chunk
+ }
+}
+
+impl<'a, A, N> IntoIterator for &'a Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Item = &'a A;
+ type IntoIter = SliceIter<'a, A>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, A, N> IntoIterator for &'a mut Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Item = &'a mut A;
+ type IntoIter = SliceIterMut<'a, A>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<A, N> Extend<A> for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ /// Append the contents of the iterator to the back of the chunk.
+ ///
+ /// Panics if the chunk exceeds its capacity.
+ ///
+ /// Time: O(n) for the length of the iterator
+ fn extend<I>(&mut self, it: I)
+ where
+ I: IntoIterator<Item = A>,
+ {
+ for item in it {
+ self.push_back(item);
+ }
+ }
+}
+
+impl<'a, A, N> Extend<&'a A> for Chunk<A, N>
+where
+ A: 'a + Copy,
+ N: ChunkLength<A>,
+{
+ /// Append the contents of the iterator to the back of the chunk.
+ ///
+ /// Panics if the chunk exceeds its capacity.
+ ///
+ /// Time: O(n) for the length of the iterator
+ fn extend<I>(&mut self, it: I)
+ where
+ I: IntoIterator<Item = &'a A>,
+ {
+ for item in it {
+ self.push_back(*item);
+ }
+ }
+}
+
+impl<A, N> IntoIterator for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ type Item = A;
+ type IntoIter = Iter<A, N>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ Iter { chunk: self }
+ }
+}
+
+#[cfg(test)]
+#[rustfmt::skip]
+mod test {
+ use super::*;
+ use typenum::{U0, U1, U2, U3, U5};
+
+ #[test]
+ #[should_panic(expected = "Chunk::push_back: can't push to full chunk")]
+ fn issue_11_testcase1d() {
+ let mut chunk = Chunk::<usize, U2>::pair(123, 456);
+ chunk.push_back(789);
+ }
+
+ #[test]
+ #[should_panic(expected = "CAPACITY too small")]
+ fn issue_11_testcase2a() {
+ let mut from = InlineArray::<u8, [u8; 256]>::new();
+ from.push(1);
+
+ let _ = Chunk::<u8, U0>::from(from);
+ }
+
+ #[test]
+ fn issue_11_testcase2b() {
+ let mut from = InlineArray::<u8, [u8; 256]>::new();
+ from.push(1);
+
+ let _ = Chunk::<u8, U1>::from(from);
+ }
+
+ struct DropDetector(u32);
+
+ impl DropDetector {
+ fn new(num: u32) -> Self {
+ DropDetector(num)
+ }
+ }
+
+ impl Drop for DropDetector {
+ fn drop(&mut self) {
+ assert!(self.0 == 42 || self.0 == 43);
+ }
+ }
+
+ impl Clone for DropDetector {
+ fn clone(&self) -> Self {
+ if self.0 == 42 {
+ panic!("panic on clone")
+ }
+ DropDetector::new(self.0)
+ }
+ }
+
+ /// This is for miri to catch
+ #[test]
+ fn issue_11_testcase3a() {
+ let mut chunk = Chunk::<DropDetector, U3>::new();
+ chunk.push_back(DropDetector::new(42));
+ chunk.push_back(DropDetector::new(42));
+ chunk.push_back(DropDetector::new(43));
+ let _ = chunk.pop_front();
+
+ let _ = std::panic::catch_unwind(|| {
+ let _ = chunk.clone();
+ });
+ }
+
+ struct PanickingIterator {
+ current: u32,
+ panic_at: u32,
+ len: usize,
+ }
+
+ impl Iterator for PanickingIterator {
+ type Item = DropDetector;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let num = self.current;
+
+ if num == self.panic_at {
+ panic!("panicking index")
+ }
+
+ self.current += 1;
+ Some(DropDetector::new(num))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+ }
+
+ impl ExactSizeIterator for PanickingIterator {}
+
+ #[test]
+ fn issue_11_testcase3b() {
+ let _ = std::panic::catch_unwind(|| {
+ let mut chunk = Chunk::<DropDetector, U5>::new();
+ chunk.push_back(DropDetector::new(1));
+ chunk.push_back(DropDetector::new(2));
+ chunk.push_back(DropDetector::new(3));
+
+ chunk.insert_from(
+ 1,
+ PanickingIterator {
+ current: 1,
+ panic_at: 1,
+ len: 1,
+ },
+ );
+ });
+ }
+
+ struct FakeSizeIterator { reported: usize, actual: usize }
+ impl Iterator for FakeSizeIterator {
+ type Item = u8;
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.actual == 0 {
+ None
+ } else {
+ self.actual -= 1;
+ Some(1)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.reported, Some(self.reported))
+ }
+ }
+
+ impl ExactSizeIterator for FakeSizeIterator {
+ fn len(&self) -> usize {
+ self.reported
+ }
+ }
+
+ #[test]
+ fn iterator_too_long() {
+ let mut chunk = Chunk::<u8, U5>::new();
+ chunk.push_back(0);
+ chunk.push_back(1);
+ chunk.push_back(2);
+ chunk.insert_from(1, FakeSizeIterator { reported: 1, actual: 10 });
+
+ let mut chunk = Chunk::<u8, U5>::new();
+ chunk.push_back(1);
+ chunk.insert_from(0, FakeSizeIterator { reported: 1, actual: 10 });
+
+ let mut chunk = Chunk::<u8, U5>::new();
+ chunk.insert_from(0, FakeSizeIterator { reported: 1, actual: 10 });
+ }
+
+ #[test]
+ #[should_panic(expected = "ExactSizeIterator yielded fewer values than advertised")]
+ fn iterator_too_short1() {
+ let mut chunk = Chunk::<u8, U5>::new();
+ chunk.push_back(0);
+ chunk.push_back(1);
+ chunk.push_back(2);
+ chunk.insert_from(1, FakeSizeIterator { reported: 2, actual: 0 });
+ }
+
+ #[test]
+ #[should_panic(expected = "ExactSizeIterator yielded fewer values than advertised")]
+ fn iterator_too_short2() {
+ let mut chunk = Chunk::<u8, U5>::new();
+ chunk.push_back(1);
+ chunk.insert_from(1, FakeSizeIterator { reported: 4, actual: 2 });
+ }
+
+ #[test]
+ fn is_full() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ assert_eq!(false, chunk.is_full());
+ chunk.push_back(i);
+ }
+ assert_eq!(true, chunk.is_full());
+ }
+
+ #[test]
+ fn push_back_front() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 12..20 {
+ chunk.push_back(i);
+ }
+ assert_eq!(8, chunk.len());
+ for i in (0..12).rev() {
+ chunk.push_front(i);
+ }
+ assert_eq!(20, chunk.len());
+ for i in 20..32 {
+ chunk.push_back(i);
+ }
+ assert_eq!(32, chunk.len());
+ let right: Vec<i32> = chunk.into_iter().collect();
+ let left: Vec<i32> = (0..32).collect();
+ assert_eq!(left, right);
+ }
+
+ #[test]
+ fn push_and_pop() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ chunk.push_back(i);
+ }
+ for i in 0..64 {
+ assert_eq!(i, chunk.pop_front());
+ }
+ for i in 0..64 {
+ chunk.push_front(i);
+ }
+ for i in 0..64 {
+ assert_eq!(i, chunk.pop_back());
+ }
+ }
+
+ #[test]
+ fn drop_left() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..6 {
+ chunk.push_back(i);
+ }
+ chunk.drop_left(3);
+ let vec: Vec<i32> = chunk.into_iter().collect();
+ assert_eq!(vec![3, 4, 5], vec);
+ }
+
+ #[test]
+ fn drop_right() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..6 {
+ chunk.push_back(i);
+ }
+ chunk.drop_right(3);
+ let vec: Vec<i32> = chunk.into_iter().collect();
+ assert_eq!(vec![0, 1, 2], vec);
+ }
+
+ #[test]
+ fn split_off() {
+ let mut left = Chunk::<_, U64>::new();
+ for i in 0..6 {
+ left.push_back(i);
+ }
+ let right = left.split_off(3);
+ let left_vec: Vec<i32> = left.into_iter().collect();
+ let right_vec: Vec<i32> = right.into_iter().collect();
+ assert_eq!(vec![0, 1, 2], left_vec);
+ assert_eq!(vec![3, 4, 5], right_vec);
+ }
+
+ #[test]
+ fn append() {
+ let mut left = Chunk::<_, U64>::new();
+ for i in 0..32 {
+ left.push_back(i);
+ }
+ let mut right = Chunk::<_, U64>::new();
+ for i in (32..64).rev() {
+ right.push_front(i);
+ }
+ left.append(&mut right);
+ let out_vec: Vec<i32> = left.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn ref_iter() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ chunk.push_back(i);
+ }
+ let out_vec: Vec<&i32> = chunk.iter().collect();
+ let should_vec_p: Vec<i32> = (0..64).collect();
+ let should_vec: Vec<&i32> = should_vec_p.iter().collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn mut_ref_iter() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ chunk.push_back(i);
+ }
+ let out_vec: Vec<&mut i32> = chunk.iter_mut().collect();
+ let mut should_vec_p: Vec<i32> = (0..64).collect();
+ let should_vec: Vec<&mut i32> = should_vec_p.iter_mut().collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn consuming_iter() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ chunk.push_back(i);
+ }
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn insert_middle() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..32 {
+ chunk.push_back(i);
+ }
+ for i in 33..64 {
+ chunk.push_back(i);
+ }
+ chunk.insert(32, 32);
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn insert_back() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..63 {
+ chunk.push_back(i);
+ }
+ chunk.insert(63, 63);
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn insert_front() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 1..64 {
+ chunk.push_front(64 - i);
+ }
+ chunk.insert(0, 0);
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ #[test]
+ fn remove_value() {
+ let mut chunk = Chunk::<_, U64>::new();
+ for i in 0..64 {
+ chunk.push_back(i);
+ }
+ chunk.remove(32);
+ let out_vec: Vec<i32> = chunk.into_iter().collect();
+ let should_vec: Vec<i32> = (0..32).chain(33..64).collect();
+ assert_eq!(should_vec, out_vec);
+ }
+
+ use crate::tests::DropTest;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[test]
+ fn dropping() {
+ let counter = AtomicUsize::new(0);
+ {
+ let mut chunk: Chunk<DropTest<'_>> = Chunk::new();
+ for _i in 0..20 {
+ chunk.push_back(DropTest::new(&counter))
+ }
+ for _i in 0..20 {
+ chunk.push_front(DropTest::new(&counter))
+ }
+ assert_eq!(40, counter.load(Ordering::Relaxed));
+ for _i in 0..10 {
+ chunk.pop_back();
+ }
+ assert_eq!(30, counter.load(Ordering::Relaxed));
+ }
+ assert_eq!(0, counter.load(Ordering::Relaxed));
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
+ fn unit_on_empty() {
+ Chunk::<usize, U0>::unit(1);
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
+ fn pair_on_empty() {
+ Chunk::<usize, U0>::pair(1, 2);
+ }
+}
diff --git a/vendor/sized-chunks/src/sized_chunk/refpool.rs b/vendor/sized-chunks/src/sized_chunk/refpool.rs
new file mode 100644
index 000000000..9396ff6a0
--- /dev/null
+++ b/vendor/sized-chunks/src/sized_chunk/refpool.rs
@@ -0,0 +1,66 @@
+use core::mem::MaybeUninit;
+
+use ::refpool::{PoolClone, PoolDefault};
+
+use crate::types::ChunkLength;
+use crate::Chunk;
+
+impl<A, N> PoolDefault for Chunk<A, N>
+where
+ N: ChunkLength<A>,
+{
+ unsafe fn default_uninit(target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let left_ptr: *mut usize = &mut (*ptr).left;
+ let right_ptr: *mut usize = &mut (*ptr).right;
+ left_ptr.write(0);
+ right_ptr.write(0);
+ }
+}
+
+impl<A, N> PoolClone for Chunk<A, N>
+where
+ A: Clone,
+ N: ChunkLength<A>,
+{
+ unsafe fn clone_uninit(&self, target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let left_ptr: *mut usize = &mut (*ptr).left;
+ let right_ptr: *mut usize = &mut (*ptr).right;
+ let data_ptr: *mut _ = &mut (*ptr).data;
+ let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast();
+ left_ptr.write(self.left);
+ right_ptr.write(self.right);
+ for index in self.left..self.right {
+ data_ptr.add(index).write((*self.ptr(index)).clone());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use ::refpool::{Pool, PoolRef};
+ use std::iter::FromIterator;
+
+ #[test]
+ fn default_and_clone() {
+ let pool: Pool<Chunk<usize>> = Pool::new(16);
+ let mut ref1 = PoolRef::default(&pool);
+ {
+ let chunk = PoolRef::make_mut(&pool, &mut ref1);
+ chunk.push_back(1);
+ chunk.push_back(2);
+ chunk.push_back(3);
+ }
+ let ref2 = PoolRef::cloned(&pool, &ref1);
+ let ref3 = PoolRef::clone_from(&pool, &Chunk::from_iter(1..=3));
+ assert_eq!(Chunk::<usize>::from_iter(1..=3), *ref1);
+ assert_eq!(Chunk::<usize>::from_iter(1..=3), *ref2);
+ assert_eq!(Chunk::<usize>::from_iter(1..=3), *ref3);
+ assert_eq!(ref1, ref2);
+ assert_eq!(ref1, ref3);
+ assert_eq!(ref2, ref3);
+ assert!(!PoolRef::ptr_eq(&ref1, &ref2));
+ }
+}
diff --git a/vendor/sized-chunks/src/sparse_chunk/iter.rs b/vendor/sized-chunks/src/sparse_chunk/iter.rs
new file mode 100644
index 000000000..460096cda
--- /dev/null
+++ b/vendor/sized-chunks/src/sparse_chunk/iter.rs
@@ -0,0 +1,245 @@
+use bitmaps::{Bitmap, Bits, Iter as BitmapIter};
+
+use super::SparseChunk;
+use crate::types::ChunkLength;
+
+/// An iterator over references to the elements of a `SparseChunk`.
+pub struct Iter<'a, A, N: Bits + ChunkLength<A>> {
+ pub(crate) indices: BitmapIter<'a, N>,
+ pub(crate) chunk: &'a SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for Iter<'a, A, N> {
+ type Item = &'a A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.indices.next().map(|index| &self.chunk.values()[index])
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(SparseChunk::<A, N>::CAPACITY))
+ }
+}
+
+/// An iterator over mutable references to the elements of a `SparseChunk`.
+pub struct IterMut<'a, A, N: Bits + ChunkLength<A>> {
+ pub(crate) bitmap: Bitmap<N>,
+ pub(crate) chunk: &'a mut SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for IterMut<'a, A, N> {
+ type Item = &'a mut A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if let Some(index) = self.bitmap.first_index() {
+ self.bitmap.set(index, false);
+ unsafe {
+ let p: *mut A = &mut self.chunk.values_mut()[index];
+ Some(&mut *p)
+ }
+ } else {
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(SparseChunk::<A, N>::CAPACITY))
+ }
+}
+
+/// A draining iterator over the elements of a `SparseChunk`.
+///
+/// "Draining" means that as the iterator yields each element, it's removed from
+/// the `SparseChunk`. When the iterator terminates, the chunk will be empty.
+pub struct Drain<A, N: Bits + ChunkLength<A>> {
+ pub(crate) chunk: SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for Drain<A, N> {
+ type Item = A;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.chunk.pop()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.chunk.len();
+ (len, Some(len))
+ }
+}
+
+/// An iterator over `Option`s of references to the elements of a `SparseChunk`.
+///
+/// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+/// returning an `Option<&A>` for each index.
+pub struct OptionIter<'a, A, N: Bits + ChunkLength<A>> {
+ pub(crate) index: usize,
+ pub(crate) chunk: &'a SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for OptionIter<'a, A, N> {
+ type Item = Option<&'a A>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index < N::USIZE {
+ let result = self.chunk.get(self.index);
+ self.index += 1;
+ Some(result)
+ } else {
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (
+ SparseChunk::<A, N>::CAPACITY - self.index,
+ Some(SparseChunk::<A, N>::CAPACITY - self.index),
+ )
+ }
+}
+
+/// An iterator over `Option`s of mutable references to the elements of a `SparseChunk`.
+///
+/// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+/// returning an `Option<&mut A>` for each index.
+pub struct OptionIterMut<'a, A, N: Bits + ChunkLength<A>> {
+ pub(crate) index: usize,
+ pub(crate) chunk: &'a mut SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for OptionIterMut<'a, A, N> {
+ type Item = Option<&'a mut A>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index < N::USIZE {
+ let result = if self.chunk.map.get(self.index) {
+ unsafe {
+ let p: *mut A = &mut self.chunk.values_mut()[self.index];
+ Some(Some(&mut *p))
+ }
+ } else {
+ Some(None)
+ };
+ self.index += 1;
+ result
+ } else {
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (
+ SparseChunk::<A, N>::CAPACITY - self.index,
+ Some(SparseChunk::<A, N>::CAPACITY - self.index),
+ )
+ }
+}
+
+/// A draining iterator over `Option`s of the elements of a `SparseChunk`.
+///
+/// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+/// returning an `Option<A>` for each index.
+pub struct OptionDrain<A, N: Bits + ChunkLength<A>> {
+ pub(crate) index: usize,
+ pub(crate) chunk: SparseChunk<A, N>,
+}
+
+impl<'a, A, N: Bits + ChunkLength<A>> Iterator for OptionDrain<A, N> {
+ type Item = Option<A>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.index < N::USIZE {
+ let result = self.chunk.remove(self.index);
+ self.index += 1;
+ Some(result)
+ } else {
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (
+ SparseChunk::<A, N>::CAPACITY - self.index,
+ Some(SparseChunk::<A, N>::CAPACITY - self.index),
+ )
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use std::iter::FromIterator;
+ use typenum::U64;
+
+ #[test]
+ fn iter() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let chunk: SparseChunk<usize, U64> = vec.iter().cloned().collect();
+ let vec: Vec<usize> = vec
+ .iter()
+ .cloned()
+ .filter(|v| v.is_some())
+ .map(|v| v.unwrap())
+ .collect();
+ assert!(vec.iter().eq(chunk.iter()));
+ }
+
+ #[test]
+ fn iter_mut() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect();
+ let mut vec: Vec<usize> = vec
+ .iter()
+ .cloned()
+ .filter(|v| v.is_some())
+ .map(|v| v.unwrap())
+ .collect();
+ assert!(vec.iter_mut().eq(chunk.iter_mut()));
+ }
+
+ #[test]
+ fn drain() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect();
+ let vec: Vec<usize> = vec
+ .iter()
+ .cloned()
+ .filter(|v| v.is_some())
+ .map(|v| v.unwrap())
+ .collect();
+ assert!(vec.into_iter().eq(chunk.into_iter()));
+ }
+
+ #[test]
+ fn option_iter() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect();
+ assert!(vec
+ .iter()
+ .cloned()
+ .eq(chunk.option_iter().map(|v| v.cloned())));
+ }
+
+ #[test]
+ fn option_iter_mut() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let mut chunk: SparseChunk<_, U64> = vec.iter().cloned().collect();
+ assert!(vec
+ .iter()
+ .cloned()
+ .eq(chunk.option_iter_mut().map(|v| v.cloned())));
+ }
+
+ #[test]
+ fn option_drain() {
+ let vec: Vec<Option<usize>> =
+ Vec::from_iter((0..64).map(|i| if i % 2 == 0 { Some(i) } else { None }));
+ let chunk: SparseChunk<_, U64> = vec.iter().cloned().collect();
+ assert!(vec.iter().cloned().eq(chunk.option_drain()));
+ }
+}
diff --git a/vendor/sized-chunks/src/sparse_chunk/mod.rs b/vendor/sized-chunks/src/sparse_chunk/mod.rs
new file mode 100644
index 000000000..455f1673a
--- /dev/null
+++ b/vendor/sized-chunks/src/sparse_chunk/mod.rs
@@ -0,0 +1,514 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! A fixed capacity sparse array.
+//!
+//! See [`SparseChunk`](struct.SparseChunk.html)
+
+use core::fmt::{Debug, Error, Formatter};
+use core::iter::FromIterator;
+use core::mem::{self, MaybeUninit};
+use core::ops::Index;
+use core::ops::IndexMut;
+use core::ptr;
+use core::slice::{from_raw_parts, from_raw_parts_mut};
+
+#[cfg(feature = "std")]
+use std::collections::{BTreeMap, HashMap};
+
+use typenum::U64;
+
+use bitmaps::{Bitmap, Bits, Iter as BitmapIter};
+
+use crate::types::ChunkLength;
+
+mod iter;
+pub use self::iter::{Drain, Iter, IterMut, OptionDrain, OptionIter, OptionIterMut};
+
+#[cfg(feature = "refpool")]
+mod refpool;
+
+/// A fixed capacity sparse array.
+///
+/// An inline sparse array of up to `N` items of type `A`, where `N` is an
+/// [`Unsigned`][Unsigned] type level numeral. You can think of it as an array
+/// of `Option<A>`, where the discriminant (whether the value is `Some<A>` or
+/// `None`) is kept in a bitmap instead of adjacent to the value.
+///
+/// Because the bitmap is kept in a primitive type, the maximum value of `N` is
+/// currently 128, corresponding to a type of `u128`. The type of the bitmap
+/// will be the minimum unsigned integer type required to fit the number of bits
+/// required. Thus, disregarding memory alignment rules, the allocated size of a
+/// `SparseChunk` will be `uX` + `A` * `N` where `uX` is the type of the
+/// discriminant bitmap, either `u8`, `u16`, `u32`, `u64` or `u128`.
+///
+/// # Examples
+///
+/// ```rust
+/// # #[macro_use] extern crate sized_chunks;
+/// # extern crate typenum;
+/// # use sized_chunks::SparseChunk;
+/// # use typenum::U20;
+/// // Construct a chunk with a 20 item capacity
+/// let mut chunk = SparseChunk::<i32, U20>::new();
+/// // Set the 18th index to the value 5.
+/// chunk.insert(18, 5);
+/// // Set the 5th index to the value 23.
+/// chunk.insert(5, 23);
+///
+/// assert_eq!(chunk.len(), 2);
+/// assert_eq!(chunk.get(5), Some(&23));
+/// assert_eq!(chunk.get(6), None);
+/// assert_eq!(chunk.get(18), Some(&5));
+/// ```
+///
+/// [Unsigned]: https://docs.rs/typenum/1.10.0/typenum/marker_traits/trait.Unsigned.html
+pub struct SparseChunk<A, N: Bits + ChunkLength<A> = U64> {
+ map: Bitmap<N>,
+ data: MaybeUninit<N::SizedType>,
+}
+
+impl<A, N: Bits + ChunkLength<A>> Drop for SparseChunk<A, N> {
+ fn drop(&mut self) {
+ if mem::needs_drop::<A>() {
+ let bits = self.map;
+ for index in &bits {
+ unsafe { ptr::drop_in_place(&mut self.values_mut()[index]) }
+ }
+ }
+ }
+}
+
+impl<A: Clone, N: Bits + ChunkLength<A>> Clone for SparseChunk<A, N> {
+ fn clone(&self) -> Self {
+ let mut out = Self::new();
+ for index in &self.map {
+ out.insert(index, self[index].clone());
+ }
+ out
+ }
+}
+
+impl<A, N> SparseChunk<A, N>
+where
+ N: Bits + ChunkLength<A>,
+{
+ /// The maximum number of elements a `SparseChunk` can contain.
+ pub const CAPACITY: usize = N::USIZE;
+
+ #[inline]
+ fn values(&self) -> &[A] {
+ unsafe { from_raw_parts(&self.data as *const _ as *const A, N::USIZE) }
+ }
+
+ #[inline]
+ fn values_mut(&mut self) -> &mut [A] {
+ unsafe { from_raw_parts_mut(&mut self.data as *mut _ as *mut A, N::USIZE) }
+ }
+
+ /// Copy the value at an index, discarding ownership of the copied value
+ #[inline]
+ unsafe fn force_read(index: usize, chunk: &Self) -> A {
+ ptr::read(&chunk.values()[index as usize])
+ }
+
+ /// Write a value at an index without trying to drop what's already there
+ #[inline]
+ unsafe fn force_write(index: usize, value: A, chunk: &mut Self) {
+ ptr::write(&mut chunk.values_mut()[index as usize], value)
+ }
+
+ /// Construct a new empty chunk.
+ pub fn new() -> Self {
+ Self {
+ map: Bitmap::default(),
+ data: MaybeUninit::uninit(),
+ }
+ }
+
+ /// Construct a new chunk with one item.
+ pub fn unit(index: usize, value: A) -> Self {
+ let mut chunk = Self::new();
+ chunk.insert(index, value);
+ chunk
+ }
+
+ /// Construct a new chunk with two items.
+ pub fn pair(index1: usize, value1: A, index2: usize, value2: A) -> Self {
+ let mut chunk = Self::new();
+ chunk.insert(index1, value1);
+ chunk.insert(index2, value2);
+ chunk
+ }
+
+ /// Get the length of the chunk.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Test if the chunk is empty.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.map.len() == 0
+ }
+
+ /// Test if the chunk is at capacity.
+ #[inline]
+ pub fn is_full(&self) -> bool {
+ self.len() == N::USIZE
+ }
+
+ /// Insert a new value at a given index.
+ ///
+ /// Returns the previous value at that index, if any.
+ pub fn insert(&mut self, index: usize, value: A) -> Option<A> {
+ if index >= N::USIZE {
+ panic!("SparseChunk::insert: index out of bounds");
+ }
+ if self.map.set(index, true) {
+ Some(mem::replace(&mut self.values_mut()[index], value))
+ } else {
+ unsafe { SparseChunk::force_write(index, value, self) };
+ None
+ }
+ }
+
+ /// Remove the value at a given index.
+ ///
+ /// Returns the value, or `None` if the index had no value.
+ pub fn remove(&mut self, index: usize) -> Option<A> {
+ if index >= N::USIZE {
+ panic!("SparseChunk::remove: index out of bounds");
+ }
+ if self.map.set(index, false) {
+ Some(unsafe { SparseChunk::force_read(index, self) })
+ } else {
+ None
+ }
+ }
+
+ /// Remove the first value present in the array.
+ ///
+ /// Returns the value that was removed, or `None` if the array was empty.
+ pub fn pop(&mut self) -> Option<A> {
+ self.first_index().and_then(|index| self.remove(index))
+ }
+
+ /// Get the value at a given index.
+ pub fn get(&self, index: usize) -> Option<&A> {
+ if index >= N::USIZE {
+ return None;
+ }
+ if self.map.get(index) {
+ Some(unsafe { self.get_unchecked(index) })
+ } else {
+ None
+ }
+ }
+
+ /// Get a mutable reference to the value at a given index.
+ pub fn get_mut(&mut self, index: usize) -> Option<&mut A> {
+ if index >= N::USIZE {
+ return None;
+ }
+ if self.map.get(index) {
+ Some(unsafe { self.get_unchecked_mut(index) })
+ } else {
+ None
+ }
+ }
+
+ /// Get an unchecked reference to the value at a given index.
+ ///
+ /// # Safety
+ ///
+ /// Uninhabited indices contain uninitialised data, so make sure you validate
+ /// the index before using this method.
+ pub unsafe fn get_unchecked(&self, index: usize) -> &A {
+ self.values().get_unchecked(index)
+ }
+
+ /// Get an unchecked mutable reference to the value at a given index.
+ ///
+ /// # Safety
+ ///
+ /// Uninhabited indices contain uninitialised data, so make sure you validate
+ /// the index before using this method.
+ pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut A {
+ self.values_mut().get_unchecked_mut(index)
+ }
+
+ /// Make an iterator over the indices which contain values.
+ pub fn indices(&self) -> BitmapIter<'_, N> {
+ self.map.into_iter()
+ }
+
+ /// Find the first index which contains a value.
+ pub fn first_index(&self) -> Option<usize> {
+ self.map.first_index()
+ }
+
+ /// Make an iterator of references to the values contained in the array.
+ pub fn iter(&self) -> Iter<'_, A, N> {
+ Iter {
+ indices: self.indices(),
+ chunk: self,
+ }
+ }
+
+ /// Make an iterator of mutable references to the values contained in the
+ /// array.
+ pub fn iter_mut(&mut self) -> IterMut<'_, A, N> {
+ IterMut {
+ bitmap: self.map,
+ chunk: self,
+ }
+ }
+
+ /// Turn the chunk into an iterator over the values contained within it.
+ pub fn drain(self) -> Drain<A, N> {
+ Drain { chunk: self }
+ }
+
+ /// Make an iterator of pairs of indices and references to the values
+ /// contained in the array.
+ pub fn entries(&self) -> impl Iterator<Item = (usize, &A)> {
+ self.indices().zip(self.iter())
+ }
+
+ /// Make an iterator of `Option`s of references to the values contained in the array.
+ ///
+ /// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+ /// returning an `Option<&A>` for each index.
+ pub fn option_iter(&self) -> OptionIter<'_, A, N> {
+ OptionIter {
+ chunk: self,
+ index: 0,
+ }
+ }
+
+ /// Make an iterator of `Option`s of mutable references to the values contained in the array.
+ ///
+ /// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+ /// returning an `Option<&mut A>` for each index.
+ pub fn option_iter_mut(&mut self) -> OptionIterMut<'_, A, N> {
+ OptionIterMut {
+ chunk: self,
+ index: 0,
+ }
+ }
+
+ /// Make a draining iterator of `Option's of the values contained in the array.
+ ///
+ /// Iterates over every index in the `SparseChunk`, from zero to its full capacity,
+ /// returning an `Option<A>` for each index.
+ pub fn option_drain(self) -> OptionDrain<A, N> {
+ OptionDrain {
+ chunk: self,
+ index: 0,
+ }
+ }
+}
+
+impl<A, N: Bits + ChunkLength<A>> Default for SparseChunk<A, N> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<A, N: Bits + ChunkLength<A>> Index<usize> for SparseChunk<A, N> {
+ type Output = A;
+
+ #[inline]
+ fn index(&self, index: usize) -> &Self::Output {
+ self.get(index).unwrap()
+ }
+}
+
+impl<A, N: Bits + ChunkLength<A>> IndexMut<usize> for SparseChunk<A, N> {
+ #[inline]
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ self.get_mut(index).unwrap()
+ }
+}
+
+impl<A, N: Bits + ChunkLength<A>> IntoIterator for SparseChunk<A, N> {
+ type Item = A;
+ type IntoIter = Drain<A, N>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.drain()
+ }
+}
+
+impl<A, N: Bits + ChunkLength<A>> FromIterator<Option<A>> for SparseChunk<A, N> {
+ fn from_iter<I>(iter: I) -> Self
+ where
+ I: IntoIterator<Item = Option<A>>,
+ {
+ let mut out = Self::new();
+ for (index, value) in iter.into_iter().enumerate() {
+ if let Some(value) = value {
+ out.insert(index, value);
+ }
+ }
+ out
+ }
+}
+
+impl<A, N> PartialEq for SparseChunk<A, N>
+where
+ A: PartialEq,
+ N: Bits + ChunkLength<A>,
+{
+ fn eq(&self, other: &Self) -> bool {
+ if self.map != other.map {
+ return false;
+ }
+ for index in self.indices() {
+ if self.get(index) != other.get(index) {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+#[cfg(feature = "std")]
+impl<A, N> PartialEq<BTreeMap<usize, A>> for SparseChunk<A, N>
+where
+ A: PartialEq,
+ N: Bits + ChunkLength<A>,
+{
+ fn eq(&self, other: &BTreeMap<usize, A>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ for index in self.indices() {
+ if self.get(index) != other.get(&index) {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+#[cfg(feature = "std")]
+impl<A, N> PartialEq<HashMap<usize, A>> for SparseChunk<A, N>
+where
+ A: PartialEq,
+ N: Bits + ChunkLength<A>,
+{
+ fn eq(&self, other: &HashMap<usize, A>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ for index in self.indices() {
+ if self.get(index) != other.get(&index) {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+impl<A, N> Eq for SparseChunk<A, N>
+where
+ A: Eq,
+ N: Bits + ChunkLength<A>,
+{
+}
+
+impl<A, N> Debug for SparseChunk<A, N>
+where
+ A: Debug,
+ N: Bits + ChunkLength<A>,
+{
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ f.write_str("SparseChunk")?;
+ f.debug_map().entries(self.entries()).finish()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use typenum::U32;
+
+ #[test]
+ fn insert_remove_iterate() {
+ let mut chunk: SparseChunk<_, U32> = SparseChunk::new();
+ assert_eq!(None, chunk.insert(5, 5));
+ assert_eq!(None, chunk.insert(1, 1));
+ assert_eq!(None, chunk.insert(24, 42));
+ assert_eq!(None, chunk.insert(22, 22));
+ assert_eq!(Some(42), chunk.insert(24, 24));
+ assert_eq!(None, chunk.insert(31, 31));
+ assert_eq!(Some(24), chunk.remove(24));
+ assert_eq!(4, chunk.len());
+ let indices: Vec<_> = chunk.indices().collect();
+ assert_eq!(vec![1, 5, 22, 31], indices);
+ let values: Vec<_> = chunk.into_iter().collect();
+ assert_eq!(vec![1, 5, 22, 31], values);
+ }
+
+ #[test]
+ fn clone_chunk() {
+ let mut chunk: SparseChunk<_, U32> = SparseChunk::new();
+ assert_eq!(None, chunk.insert(5, 5));
+ assert_eq!(None, chunk.insert(1, 1));
+ assert_eq!(None, chunk.insert(24, 42));
+ assert_eq!(None, chunk.insert(22, 22));
+ let cloned = chunk.clone();
+ let right_indices: Vec<_> = chunk.indices().collect();
+ let left_indices: Vec<_> = cloned.indices().collect();
+ let right: Vec<_> = chunk.into_iter().collect();
+ let left: Vec<_> = cloned.into_iter().collect();
+ assert_eq!(left, right);
+ assert_eq!(left_indices, right_indices);
+ assert_eq!(vec![1, 5, 22, 24], left_indices);
+ assert_eq!(vec![1, 5, 22, 24], right_indices);
+ }
+
+ use crate::tests::DropTest;
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ #[test]
+ fn dropping() {
+ let counter = AtomicUsize::new(0);
+ {
+ let mut chunk: SparseChunk<DropTest<'_>> = SparseChunk::new();
+ for i in 0..40 {
+ chunk.insert(i, DropTest::new(&counter));
+ }
+ assert_eq!(40, counter.load(Ordering::Relaxed));
+ for i in 0..20 {
+ chunk.remove(i);
+ }
+ assert_eq!(20, counter.load(Ordering::Relaxed));
+ }
+ assert_eq!(0, counter.load(Ordering::Relaxed));
+ }
+
+ #[test]
+ fn equality() {
+ let mut c1 = SparseChunk::<usize>::new();
+ for i in 0..32 {
+ c1.insert(i, i);
+ }
+ let mut c2 = c1.clone();
+ assert_eq!(c1, c2);
+ for i in 4..8 {
+ c2.insert(i, 0);
+ }
+ assert_ne!(c1, c2);
+ c2 = c1.clone();
+ for i in 0..16 {
+ c2.remove(i);
+ }
+ assert_ne!(c1, c2);
+ }
+}
diff --git a/vendor/sized-chunks/src/sparse_chunk/refpool.rs b/vendor/sized-chunks/src/sparse_chunk/refpool.rs
new file mode 100644
index 000000000..ca6379e7b
--- /dev/null
+++ b/vendor/sized-chunks/src/sparse_chunk/refpool.rs
@@ -0,0 +1,57 @@
+use core::mem::MaybeUninit;
+
+use bitmaps::{Bitmap, Bits};
+
+use ::refpool::{PoolClone, PoolDefault};
+
+use crate::types::ChunkLength;
+use crate::SparseChunk;
+
+impl<A, N> PoolDefault for SparseChunk<A, N>
+where
+ N: Bits + ChunkLength<A>,
+{
+ unsafe fn default_uninit(target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let map_ptr: *mut Bitmap<N> = &mut (*ptr).map;
+ map_ptr.write(Bitmap::new());
+ }
+}
+
+impl<A, N> PoolClone for SparseChunk<A, N>
+where
+ A: Clone,
+ N: Bits + ChunkLength<A>,
+{
+ unsafe fn clone_uninit(&self, target: &mut MaybeUninit<Self>) {
+ let ptr = target.as_mut_ptr();
+ let map_ptr: *mut Bitmap<N> = &mut (*ptr).map;
+ let data_ptr: *mut _ = &mut (*ptr).data;
+ let data_ptr: *mut A = (*data_ptr).as_mut_ptr().cast();
+ map_ptr.write(self.map);
+ for index in &self.map {
+ data_ptr.add(index).write(self[index].clone());
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use ::refpool::{Pool, PoolRef};
+
+ #[test]
+ fn default_and_clone() {
+ let pool: Pool<SparseChunk<usize>> = Pool::new(16);
+ let mut ref1 = PoolRef::default(&pool);
+ {
+ let chunk = PoolRef::make_mut(&pool, &mut ref1);
+ chunk.insert(5, 13);
+ chunk.insert(10, 37);
+ chunk.insert(31, 337);
+ }
+ let ref2 = PoolRef::cloned(&pool, &ref1);
+ assert_eq!(ref1, ref2);
+ assert!(!PoolRef::ptr_eq(&ref1, &ref2));
+ }
+}
diff --git a/vendor/sized-chunks/src/tests.rs b/vendor/sized-chunks/src/tests.rs
new file mode 100644
index 000000000..e97900fe2
--- /dev/null
+++ b/vendor/sized-chunks/src/tests.rs
@@ -0,0 +1,18 @@
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+pub(crate) struct DropTest<'a> {
+ counter: &'a AtomicUsize,
+}
+
+impl<'a> DropTest<'a> {
+ pub(crate) fn new(counter: &'a AtomicUsize) -> Self {
+ counter.fetch_add(1, Ordering::Relaxed);
+ DropTest { counter }
+ }
+}
+
+impl<'a> Drop for DropTest<'a> {
+ fn drop(&mut self) {
+ self.counter.fetch_sub(1, Ordering::Relaxed);
+ }
+}
diff --git a/vendor/sized-chunks/src/types.rs b/vendor/sized-chunks/src/types.rs
new file mode 100644
index 000000000..0abd464a8
--- /dev/null
+++ b/vendor/sized-chunks/src/types.rs
@@ -0,0 +1,54 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+//! Helper types for chunks.
+
+use core::marker::PhantomData;
+
+use typenum::*;
+
+// Chunk sizes
+
+/// A trait used to decide the size of an array.
+///
+/// `<N as ChunkLength<A>>::SizedType` for a type level integer N will have the
+/// same size as `[A; N]`.
+pub trait ChunkLength<A>: Unsigned {
+ /// A `Sized` type matching the size of an array of `Self` elements of `A`.
+ type SizedType;
+}
+
+impl<A> ChunkLength<A> for UTerm {
+ type SizedType = ();
+}
+
+#[doc(hidden)]
+#[allow(dead_code)]
+pub struct SizeEven<A, B> {
+ parent1: B,
+ parent2: B,
+ _marker: PhantomData<A>,
+}
+
+#[doc(hidden)]
+#[allow(dead_code)]
+pub struct SizeOdd<A, B> {
+ parent1: B,
+ parent2: B,
+ data: A,
+}
+
+impl<A, N> ChunkLength<A> for UInt<N, B0>
+where
+ N: ChunkLength<A>,
+{
+ type SizedType = SizeEven<A, N::SizedType>;
+}
+
+impl<A, N> ChunkLength<A> for UInt<N, B1>
+where
+ N: ChunkLength<A>,
+{
+ type SizedType = SizeOdd<A, N::SizedType>;
+}