diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/bytes | |
parent | Initial commit. (diff) | |
download | firefox-upstream.tar.xz firefox-upstream.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
77 files changed, 16418 insertions, 0 deletions
diff --git a/third_party/rust/bytes-0.4.9/.cargo-checksum.json b/third_party/rust/bytes-0.4.9/.cargo-checksum.json new file mode 100644 index 0000000000..8301dbd01e --- /dev/null +++ b/third_party/rust/bytes-0.4.9/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"55941e30721c4b104cc8f84473da5acd0cd57903d66e8fd029b8c5160d99ed53","Cargo.toml":"f71e10b42ed8637ed615222f6d9e2af5df707f7f3d9d4fd203358c2af87b7ff0","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"3ca600d7b4175eee634621a870904fe5ec761e6fd623f745423d378dec1bfd51","benches/bytes.rs":"a60889c35cf76faf2b403f94d3ab2831a569f2e1f6e4cc4d5e88f3c26bddb8b0","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","ci/tsan":"905d22267f7493550d123b1482fc1a7f4b24e8cbc4ae4f0e0c2d42383e79ad83","src/buf/buf.rs":"1b5ff3ab694380fe59588b8d195111ba663c5f8901b272b531851deb26e4629a","src/buf/buf_mut.rs":"d2f54e9c64b86c8ddd325d40b3c8e1b2132d361937bac3b5fccb7a81154b89b8","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"b6e35d34533fae229f5209b95a39a1c35485f48a873a1d357d99218c486b0b95","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"546f2ef082656be2639314994d4228833f331747578a9ebf69075d2bcec0ae2d","src/debug.rs":"a8bd8062e7e500fdc5a79cb6c848fb860be8359d95e1c91034777fe33c78d54e","src/lib.rs":"fb61bba13236978f2c3b93cc39eb4a99c02f1ecd539c917a8380e5d344e67706","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"6409f32f734969bebeffa7592fed531953d252c5a639e422b6e4b14ec024b1d5","tests/test_buf_mut.rs":"a6a653d5053340b0254900c33e36df6db1421f821c3e985be0044b1b447ecedc","tests/test_bytes.rs":"92ae28671dee4ab91c7e0366e094b009c547defd8fd1c977520e5ad574eea70d","tests/test_chain.rs":"3fe1f28f3bce4377f8ed506718f95f3ed3ebaf251a1cb43b2705331e3dd6b43a","tests/test_debug.rs":"4cfd44c30d0b8f7c5eb8e8916ad7436e9f538732fe9f4b696dc22b84c31ac64a","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8"}
\ No newline at end of file diff --git a/third_party/rust/bytes-0.4.9/CHANGELOG.md b/third_party/rust/bytes-0.4.9/CHANGELOG.md new file mode 100644 index 0000000000..1e87d35be2 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/CHANGELOG.md @@ -0,0 +1,69 @@ +# 0.4.9 (July 12, 2018) + +* Add 128 bit number support behind a feature flag (#209). +* Implement `IntoBuf` for `&mut [u8]` + +# 0.4.8 (May 25, 2018) + +* Fix panic in `BytesMut` `FromIterator` implementation. +* Bytes: Recycle space when reserving space in vec mode (#197). +* Bytes: Add resize fn (#203). + +# 0.4.7 (April 27, 2018) + +* Make `Buf` and `BufMut` usable as trait objects (#186). +* impl BorrowMut for BytesMut (#185). +* Improve accessor performance (#195). + +# 0.4.6 (Janary 8, 2018) + +* Implement FromIterator for Bytes/BytesMut (#148). +* Add `advance` fn to Bytes/BytesMut (#166). +* Add `unsplit` fn to `BytesMut` (#162, #173). +* Improvements to Bytes split fns (#92). + +# 0.4.5 (August 12, 2017) + +* Fix range bug in `Take::bytes` +* Misc performance improvements +* Add extra `PartialEq` implementations. +* Add `Bytes::with_capacity` +* Implement `AsMut[u8]` for `BytesMut` + +# 0.4.4 (May 26, 2017) + +* Add serde support behind feature flag +* Add `extend_from_slice` on `Bytes` and `BytesMut` +* Add `truncate` and `clear` on `Bytes` +* Misc additional std trait implementations +* Misc performance improvements + +# 0.4.3 (April 30, 2017) + +* Fix Vec::advance_mut bug +* Bump minimum Rust version to 1.15 +* Misc performance tweaks + +# 0.4.2 (April 5, 2017) + +* Misc performance tweaks +* Improved `Debug` implementation for `Bytes` +* Avoid some incorrect assert panics + +# 0.4.1 (March 15, 2017) + +* Expose `buf` module and have most types available from there vs. root. +* Implement `IntoBuf` for `T: Buf`. +* Add `FromBuf` and `Buf::collect`. +* Add iterator adapter for `Buf`. +* Add scatter/gather support to `Buf` and `BufMut`. +* Add `Buf::chain`. +* Reduce allocations on repeated calls to `BytesMut::reserve`. +* Implement `Debug` for more types. +* Remove `Source` in favor of `IntoBuf`. +* Implement `Extend` for `BytesMut`. + + +# 0.4.0 (February 24, 2017) + +* Initial release diff --git a/third_party/rust/bytes-0.4.9/Cargo.toml b/third_party/rust/bytes-0.4.9/Cargo.toml new file mode 100644 index 0000000000..61ed633cbc --- /dev/null +++ b/third_party/rust/bytes-0.4.9/Cargo.toml @@ -0,0 +1,41 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "bytes" +version = "0.4.9" +authors = ["Carl Lerche <me@carllerche.com>"] +exclude = [".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*"] +description = "Types and traits for working with bytes" +homepage = "https://github.com/carllerche/bytes" +documentation = "https://carllerche.github.io/bytes/bytes" +readme = "README.md" +keywords = ["buffers", "zero-copy", "io"] +categories = ["network-programming", "data-structures"] +license = "MIT" +repository = "https://github.com/carllerche/bytes" +[package.metadata.docs.rs] +features = ["i128"] +[dependencies.byteorder] +version = "1.1.0" + +[dependencies.iovec] +version = "0.1" + +[dependencies.serde] +version = "1.0" +optional = true +[dev-dependencies.serde_test] +version = "1.0" + +[features] +i128 = ["byteorder/i128"] diff --git a/third_party/rust/bytes-0.4.9/LICENSE b/third_party/rust/bytes-0.4.9/LICENSE new file mode 100644 index 0000000000..58fb29a123 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Carl Lerche + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/bytes-0.4.9/README.md b/third_party/rust/bytes-0.4.9/README.md new file mode 100644 index 0000000000..3b2a80b3bb --- /dev/null +++ b/third_party/rust/bytes-0.4.9/README.md @@ -0,0 +1,45 @@ +# Bytes + +A utility library for working with bytes. + +[![Crates.io](https://img.shields.io/crates/v/bytes.svg?maxAge=2592000)](https://crates.io/crates/bytes) +[![Build Status](https://travis-ci.org/carllerche/bytes.svg?branch=master)](https://travis-ci.org/carllerche/bytes) + +[Documentation](https://carllerche.github.io/bytes/bytes/index.html) + +## Usage + +To use `bytes`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +bytes = "0.4" +``` + +Next, add this to your crate: + +```rust +extern crate bytes; + +use bytes::{Bytes, BytesMut, Buf, BufMut}; +``` + +## Serde support + +Serde support is optional and disabled by default. To enable use the feature `serde`. + +```toml +[dependencies] +bytes = { version = "0.4", features = ["serde"] } +``` + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `bytes` by you, shall be licensed as MIT, without any additional +terms or conditions. + diff --git a/third_party/rust/bytes-0.4.9/benches/bytes.rs b/third_party/rust/bytes-0.4.9/benches/bytes.rs new file mode 100644 index 0000000000..7a338746b0 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/benches/bytes.rs @@ -0,0 +1,250 @@ +#![feature(test)] + +extern crate bytes; +extern crate test; + +use test::Bencher; +use bytes::{Bytes, BytesMut, BufMut}; + +#[bench] +fn alloc_small(b: &mut Bencher) { + b.iter(|| { + for _ in 0..1024 { + test::black_box(BytesMut::with_capacity(12)); + } + }) +} + +#[bench] +fn alloc_mid(b: &mut Bencher) { + b.iter(|| { + test::black_box(BytesMut::with_capacity(128)); + }) +} + +#[bench] +fn alloc_big(b: &mut Bencher) { + b.iter(|| { + test::black_box(BytesMut::with_capacity(4096)); + }) +} + +#[bench] +fn split_off_and_drop(b: &mut Bencher) { + b.iter(|| { + for _ in 0..1024 { + let v = vec![10; 200]; + let mut b = Bytes::from(v); + test::black_box(b.split_off(100)); + test::black_box(b); + } + }) +} + +#[bench] +fn deref_unique(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_unique_unroll(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..128 { + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_shared(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + let _b2 = buf.split_off(1024); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_inline(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(8); + buf.put(&[0u8; 8][..]); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_two(b: &mut Bencher) { + let mut buf1 = BytesMut::with_capacity(8); + buf1.put(&[0u8; 8][..]); + + let mut buf2 = BytesMut::with_capacity(4096); + buf2.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..512 { + test::black_box(&buf1[..]); + test::black_box(&buf2[..]); + } + }) +} + +#[bench] +fn clone_inline(b: &mut Bencher) { + let bytes = Bytes::from_static(b"hello world"); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_static(b: &mut Bencher) { + let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_arc(b: &mut Bencher) { + let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn alloc_write_split_to_mid(b: &mut Bencher) { + b.iter(|| { + let mut buf = BytesMut::with_capacity(128); + buf.put_slice(&[0u8; 64]); + test::black_box(buf.split_to(64)); + }) +} + +#[bench] +fn drain_write_drain(b: &mut Bencher) { + let data = [0u8; 128]; + + b.iter(|| { + let mut buf = BytesMut::with_capacity(1024); + let mut parts = Vec::with_capacity(8); + + for _ in 0..8 { + buf.put(&data[..]); + parts.push(buf.split_to(128)); + } + + test::black_box(parts); + }) +} + +#[bench] +fn fmt_write(b: &mut Bencher) { + use std::fmt::Write; + let mut buf = BytesMut::with_capacity(128); + let s = "foo bar baz quux lorem ipsum dolor et"; + + b.bytes = s.len() as u64; + b.iter(|| { + let _ = write!(buf, "{}", s); + test::black_box(&buf); + unsafe { buf.set_len(0); } + }) +} + +#[bench] +fn from_long_slice(b: &mut Bencher) { + let data = [0u8; 128]; + b.bytes = data.len() as u64; + b.iter(|| { + let buf = BytesMut::from(&data[..]); + test::black_box(buf); + }) +} + +#[bench] +fn slice_empty(b: &mut Bencher) { + b.iter(|| { + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + test::black_box(b.slice(i % 100, i % 100)); + } + }) +} + +#[bench] +fn slice_short_from_arc(b: &mut Bencher) { + b.iter(|| { + // `clone` is to convert to ARC + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + test::black_box(b.slice(1, 2 + i % 10)); + } + }) +} + +// Keep in sync with bytes.rs +#[cfg(target_pointer_width = "64")] +const INLINE_CAP: usize = 4 * 8 - 1; +#[cfg(target_pointer_width = "32")] +const INLINE_CAP: usize = 4 * 4 - 1; + +#[bench] +fn slice_avg_le_inline_from_arc(b: &mut Bencher) { + b.iter(|| { + // `clone` is to convert to ARC + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + // [1, INLINE_CAP] + let len = 1 + i % (INLINE_CAP - 1); + test::black_box(b.slice(i % 10, i % 10 + len)); + } + }) +} + +#[bench] +fn slice_large_le_inline_from_arc(b: &mut Bencher) { + b.iter(|| { + // `clone` is to convert to ARC + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + // [INLINE_CAP - 10, INLINE_CAP] + let len = INLINE_CAP - 9 + i % 10; + test::black_box(b.slice(i % 10, i % 10 + len)); + } + }) +} diff --git a/third_party/rust/bytes-0.4.9/ci/before_deploy.ps1 b/third_party/rust/bytes-0.4.9/ci/before_deploy.ps1 new file mode 100644 index 0000000000..191a30b88d --- /dev/null +++ b/third_party/rust/bytes-0.4.9/ci/before_deploy.ps1 @@ -0,0 +1,23 @@ +# This script takes care of packaging the build artifacts that will go in the +# release zipfile + +$SRC_DIR = $PWD.Path +$STAGE = [System.Guid]::NewGuid().ToString() + +Set-Location $ENV:Temp +New-Item -Type Directory -Name $STAGE +Set-Location $STAGE + +$ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip" + +# TODO Update this to package the right artifacts +Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\hello.exe" '.\' + +7z a "$ZIP" * + +Push-AppveyorArtifact "$ZIP" + +Remove-Item *.* -Force +Set-Location .. +Remove-Item $STAGE +Set-Location $SRC_DIR diff --git a/third_party/rust/bytes-0.4.9/ci/before_deploy.sh b/third_party/rust/bytes-0.4.9/ci/before_deploy.sh new file mode 100644 index 0000000000..026dc2898d --- /dev/null +++ b/third_party/rust/bytes-0.4.9/ci/before_deploy.sh @@ -0,0 +1,33 @@ +# This script takes care of building your crate and packaging it for release + +set -ex + +main() { + local src=$(pwd) \ + stage= + + case $TRAVIS_OS_NAME in + linux) + stage=$(mktemp -d) + ;; + osx) + stage=$(mktemp -d -t tmp) + ;; + esac + + test -f Cargo.lock || cargo generate-lockfile + + # TODO Update this to build the artifacts that matter to you + cross rustc --bin hello --target $TARGET --release -- -C lto + + # TODO Update this to package the right artifacts + cp target/$TARGET/release/hello $stage/ + + cd $stage + tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz * + cd $src + + rm -rf $stage +} + +main diff --git a/third_party/rust/bytes-0.4.9/ci/install.sh b/third_party/rust/bytes-0.4.9/ci/install.sh new file mode 100644 index 0000000000..76bb7340d8 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/ci/install.sh @@ -0,0 +1,31 @@ +set -ex + +main() { + curl https://sh.rustup.rs -sSf | \ + sh -s -- -y --default-toolchain $TRAVIS_RUST_VERSION + + local target= + if [ $TRAVIS_OS_NAME = linux ]; then + target=x86_64-unknown-linux-gnu + sort=sort + else + target=x86_64-apple-darwin + sort=gsort # for `sort --sort-version`, from brew's coreutils. + fi + + # This fetches latest stable release + local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \ + | cut -d/ -f3 \ + | grep -E '^v[0-9.]+$' \ + | $sort --version-sort \ + | tail -n1) + echo cross version: $tag + curl -LSfs https://japaric.github.io/trust/install.sh | \ + sh -s -- \ + --force \ + --git japaric/cross \ + --tag $tag \ + --target $target +} + +main diff --git a/third_party/rust/bytes-0.4.9/ci/script.sh b/third_party/rust/bytes-0.4.9/ci/script.sh new file mode 100644 index 0000000000..d1ed7f9242 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/ci/script.sh @@ -0,0 +1,18 @@ +# This script takes care of testing your crate + +set -ex + +main() { + cross build --target $TARGET $EXTRA_ARGS + + if [ ! -z $DISABLE_TESTS ]; then + return + fi + + cross test --target $TARGET $EXTRA_ARGS +} + +# we don't run the "test phase" when doing deploys +if [ -z $TRAVIS_TAG ]; then + main +fi diff --git a/third_party/rust/bytes-0.4.9/ci/tsan b/third_party/rust/bytes-0.4.9/ci/tsan new file mode 100644 index 0000000000..657d4266a3 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/ci/tsan @@ -0,0 +1,21 @@ +# TSAN suppressions file for `bytes` + +# TSAN does not understand fences and `Arc::drop` is implemented using a fence. +# This causes many false positives. +race:Arc*drop +race:arc*Weak*drop + +# `std` mpsc is not used in any Bytes code base. This race is triggered by some +# rust runtime logic. +race:std*mpsc_queue + +# Not sure why this is warning, but it is in the test harness and not the library. +race:TestEvent*clone +race:test::run_tests_console::*closure + +# Probably more fences in std. +race:__call_tls_dtors + +# `is_inline_or_static` is explicitly called concurrently without synchronization. +# The safety explanation can be found in a comment. +race:Inner::is_inline_or_static diff --git a/third_party/rust/bytes-0.4.9/src/buf/buf.rs b/third_party/rust/bytes-0.4.9/src/buf/buf.rs new file mode 100644 index 0000000000..b72c8d91cb --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/buf.rs @@ -0,0 +1,1153 @@ +use super::{IntoBuf, Take, Reader, Iter, FromBuf, Chain}; +use byteorder::{BigEndian, ByteOrder, LittleEndian}; +use iovec::IoVec; + +use std::{cmp, io, ptr}; + +macro_rules! buf_get_impl { + ($this:ident, $size:expr, $conv:path) => ({ + // try to convert directly from the bytes + let ret = { + // this Option<ret> trick is to avoid keeping a borrow on self + // when advance() is called (mut borrow) and to call bytes() only once + if let Some(src) = $this.bytes().get(..($size)) { + Some($conv(src)) + } else { + None + } + }; + if let Some(ret) = ret { + // if the direct convertion was possible, advance and return + $this.advance($size); + return ret; + } else { + // if not we copy the bytes in a temp buffer then convert + let mut buf = [0; ($size)]; + $this.copy_to_slice(&mut buf); // (do the advance) + return $conv(&buf); + } + }); + ($this:ident, $buf_size:expr, $conv:path, $len_to_read:expr) => ({ + // The same trick as above does not improve the best case speed. + // It seems to be linked to the way the method is optimised by the compiler + let mut buf = [0; ($buf_size)]; + $this.copy_to_slice(&mut buf[..($len_to_read)]); + return $conv(&buf[..($len_to_read)], $len_to_read); + }); +} + +/// Read bytes from a buffer. +/// +/// A buffer stores bytes in memory such that read operations are infallible. +/// The underlying storage may or may not be in contiguous memory. A `Buf` value +/// is a cursor into the buffer. Reading from `Buf` advances the cursor +/// position. It can be thought of as an efficient `Iterator` for collections of +/// bytes. +/// +/// The simplest `Buf` is a `Cursor` wrapping a `[u8]`. +/// +/// ``` +/// use bytes::Buf; +/// use std::io::Cursor; +/// +/// let mut buf = Cursor::new(b"hello world"); +/// +/// assert_eq!(b'h', buf.get_u8()); +/// assert_eq!(b'e', buf.get_u8()); +/// assert_eq!(b'l', buf.get_u8()); +/// +/// let mut rest = [0; 8]; +/// buf.copy_to_slice(&mut rest); +/// +/// assert_eq!(&rest[..], b"lo world"); +/// ``` +pub trait Buf { + /// Returns the number of bytes between the current position and the end of + /// the buffer. + /// + /// This value is greater than or equal to the length of the slice returned + /// by `bytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world"); + /// + /// assert_eq!(buf.remaining(), 11); + /// + /// buf.get_u8(); + /// + /// assert_eq!(buf.remaining(), 10); + /// ``` + /// + /// # Implementer notes + /// + /// Implementations of `remaining` should ensure that the return value does + /// not change unless a call is made to `advance` or any other function that + /// is documented to change the `Buf`'s current position. + fn remaining(&self) -> usize; + + /// Returns a slice starting at the current position and of length between 0 + /// and `Buf::remaining()`. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world"); + /// + /// assert_eq!(buf.bytes(), b"hello world"); + /// + /// buf.advance(6); + /// + /// assert_eq!(buf.bytes(), b"world"); + /// ``` + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `Buf::remaining` returns 0, calls to `bytes` should return an + /// empty slice. + fn bytes(&self) -> &[u8]; + + /// Fills `dst` with potentially multiple slices starting at `self`'s + /// current position. + /// + /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vec` enables + /// fetching more than one slice at once. `dst` is a slice of `IoVec` + /// references, enabling the slice to be directly used with [`writev`] + /// without any further conversion. The sum of the lengths of all the + /// buffers in `dst` will be less than or equal to `Buf::remaining()`. + /// + /// The entries in `dst` will be overwritten, but the data **contained** by + /// the slices **will not** be modified. If `bytes_vec` does not fill every + /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices + /// in `self. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `Buf::remaining` returns 0, calls to `bytes_vec` must return 0 + /// without mutating `dst`. + /// + /// Implementations should also take care to properly handle being called + /// with `dst` being a zero length slice. + /// + /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html + fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize { + if dst.is_empty() { + return 0; + } + + if self.has_remaining() { + dst[0] = self.bytes().into(); + 1 + } else { + 0 + } + } + + /// Advance the internal cursor of the Buf + /// + /// The next call to `bytes` will return a slice starting `cnt` bytes + /// further into the underlying buffer. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world"); + /// + /// assert_eq!(buf.bytes(), b"hello world"); + /// + /// buf.advance(6); + /// + /// assert_eq!(buf.bytes(), b"world"); + /// ``` + /// + /// # Panics + /// + /// This function **may** panic if `cnt > self.remaining()`. + /// + /// # Implementer notes + /// + /// It is recommended for implementations of `advance` to panic if `cnt > + /// self.remaining()`. If the implementation does not panic, the call must + /// behave as if `cnt == self.remaining()`. + /// + /// A call with `cnt == 0` should never panic and be a no-op. + fn advance(&mut self, cnt: usize); + + /// Returns true if there are any more bytes to consume + /// + /// This is equivalent to `self.remaining() != 0`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"a"); + /// + /// assert!(buf.has_remaining()); + /// + /// buf.get_u8(); + /// + /// assert!(!buf.has_remaining()); + /// ``` + fn has_remaining(&self) -> bool { + self.remaining() > 0 + } + + /// Copies bytes from `self` into `dst`. + /// + /// The cursor is advanced by the number of bytes copied. `self` must have + /// enough remaining bytes to fill `dst`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world"); + /// let mut dst = [0; 5]; + /// + /// buf.copy_to_slice(&mut dst); + /// assert_eq!(b"hello", &dst); + /// assert_eq!(6, buf.remaining()); + /// ``` + /// + /// # Panics + /// + /// This function panics if `self.remaining() < dst.len()` + fn copy_to_slice(&mut self, dst: &mut [u8]) { + let mut off = 0; + + assert!(self.remaining() >= dst.len()); + + while off < dst.len() { + let cnt; + + unsafe { + let src = self.bytes(); + cnt = cmp::min(src.len(), dst.len() - off); + + ptr::copy_nonoverlapping( + src.as_ptr(), dst[off..].as_mut_ptr(), cnt); + + off += src.len(); + } + + self.advance(cnt); + } + } + + /// Gets an unsigned 8 bit integer from `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08 hello"); + /// assert_eq!(8, buf.get_u8()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is no more remaining data in `self`. + fn get_u8(&mut self) -> u8 { + assert!(self.remaining() >= 1); + let ret = self.bytes()[0]; + self.advance(1); + ret + } + + /// Gets a signed 8 bit integer from `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08 hello"); + /// assert_eq!(8, buf.get_i8()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is no more remaining data in `self`. + fn get_i8(&mut self) -> i8 { + assert!(self.remaining() >= 1); + let ret = self.bytes()[0] as i8; + self.advance(1); + ret + } + + #[doc(hidden)] + #[deprecated(note="use get_u16_be or get_u16_le")] + fn get_u16<T: ByteOrder>(&mut self) -> u16 where Self: Sized { + let mut buf = [0; 2]; + self.copy_to_slice(&mut buf); + T::read_u16(&buf) + } + + /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x09 hello"); + /// assert_eq!(0x0809, buf.get_u16_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16_be(&mut self) -> u16 { + buf_get_impl!(self, 2, BigEndian::read_u16); + } + + /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x09\x08 hello"); + /// assert_eq!(0x0809, buf.get_u16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16_le(&mut self) -> u16 { + buf_get_impl!(self, 2, LittleEndian::read_u16); + } + + #[doc(hidden)] + #[deprecated(note="use get_i16_be or get_i16_le")] + fn get_i16<T: ByteOrder>(&mut self) -> i16 where Self: Sized { + let mut buf = [0; 2]; + self.copy_to_slice(&mut buf); + T::read_i16(&buf) + } + + /// Gets a signed 16 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x09 hello"); + /// assert_eq!(0x0809, buf.get_i16_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16_be(&mut self) -> i16 { + buf_get_impl!(self, 2, BigEndian::read_i16); + } + + /// Gets a signed 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x09\x08 hello"); + /// assert_eq!(0x0809, buf.get_i16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16_le(&mut self) -> i16 { + buf_get_impl!(self, 2, LittleEndian::read_i16); + } + + #[doc(hidden)] + #[deprecated(note="use get_u32_be or get_u32_le")] + fn get_u32<T: ByteOrder>(&mut self) -> u32 where Self: Sized { + let mut buf = [0; 4]; + self.copy_to_slice(&mut buf); + T::read_u32(&buf) + } + + /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); + /// assert_eq!(0x0809A0A1, buf.get_u32_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32_be(&mut self) -> u32 { + buf_get_impl!(self, 4, BigEndian::read_u32); + } + + /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); + /// assert_eq!(0x0809A0A1, buf.get_u32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32_le(&mut self) -> u32 { + buf_get_impl!(self, 4, LittleEndian::read_u32); + } + + #[doc(hidden)] + #[deprecated(note="use get_i32_be or get_i32_le")] + fn get_i32<T: ByteOrder>(&mut self) -> i32 where Self: Sized { + let mut buf = [0; 4]; + self.copy_to_slice(&mut buf); + T::read_i32(&buf) + } + + /// Gets a signed 32 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello"); + /// assert_eq!(0x0809A0A1, buf.get_i32_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32_be(&mut self) -> i32 { + buf_get_impl!(self, 4, BigEndian::read_i32); + } + + /// Gets a signed 32 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello"); + /// assert_eq!(0x0809A0A1, buf.get_i32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32_le(&mut self) -> i32 { + buf_get_impl!(self, 4, LittleEndian::read_i32); + } + + #[doc(hidden)] + #[deprecated(note="use get_u64_be or get_u64_le")] + fn get_u64<T: ByteOrder>(&mut self) -> u64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf); + T::read_u64(&buf) + } + + /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); + /// assert_eq!(0x0102030405060708, buf.get_u64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_be(&mut self) -> u64 { + buf_get_impl!(self, 8, BigEndian::read_u64); + } + + /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x0102030405060708, buf.get_u64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_le(&mut self) -> u64 { + buf_get_impl!(self, 8, LittleEndian::read_u64); + } + + #[doc(hidden)] + #[deprecated(note="use get_i64_be or get_i64_le")] + fn get_i64<T: ByteOrder>(&mut self) -> i64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf); + T::read_i64(&buf) + } + + /// Gets a signed 64 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"); + /// assert_eq!(0x0102030405060708, buf.get_i64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_be(&mut self) -> i64 { + buf_get_impl!(self, 8, BigEndian::read_i64); + } + + /// Gets a signed 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x0102030405060708, buf.get_i64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_le(&mut self) -> i64 { + buf_get_impl!(self, 8, LittleEndian::read_i64); + } + + /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_u128_be(&mut self) -> u128 { + buf_get_impl!(self, 16, BigEndian::read_u128); + } + + /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_u128_le(&mut self) -> u128 { + buf_get_impl!(self, 16, LittleEndian::read_u128); + } + + /// Gets a signed 128 bit integer from `self` in big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_i128_be(&mut self) -> i128 { + buf_get_impl!(self, 16, BigEndian::read_i128); + } + + /// Gets a signed 128 bit integer from `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"); + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + #[cfg(feature = "i128")] + fn get_i128_le(&mut self) -> i128 { + buf_get_impl!(self, 16, LittleEndian::read_i128); + } + + #[doc(hidden)] + #[deprecated(note="use get_uint_be or get_uint_le")] + fn get_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf[..nbytes]); + T::read_uint(&buf[..nbytes], nbytes) + } + + /// Gets an unsigned n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, BigEndian}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); + /// assert_eq!(0x010203, buf.get_uint_be(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint_be(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(self, 8, BigEndian::read_uint, nbytes); + } + + /// Gets an unsigned n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); + /// assert_eq!(0x010203, buf.get_uint_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint_le(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(self, 8, LittleEndian::read_uint, nbytes); + } + + #[doc(hidden)] + #[deprecated(note="use get_int_be or get_int_le")] + fn get_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf[..nbytes]); + T::read_int(&buf[..nbytes], nbytes) + } + + /// Gets a signed n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x01\x02\x03 hello"); + /// assert_eq!(0x010203, buf.get_int_be(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_be(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(self, 8, BigEndian::read_int, nbytes); + } + + /// Gets a signed n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x03\x02\x01 hello"); + /// assert_eq!(0x010203, buf.get_int_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_le(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(self, 8, LittleEndian::read_int, nbytes); + } + + #[doc(hidden)] + #[deprecated(note="use get_f32_be or get_f32_le")] + fn get_f32<T: ByteOrder>(&mut self) -> f32 where Self: Sized { + let mut buf = [0; 4]; + self.copy_to_slice(&mut buf); + T::read_f32(&buf) + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello"); + /// assert_eq!(1.2f32, buf.get_f32_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_be(&mut self) -> f32 { + buf_get_impl!(self, 4, BigEndian::read_f32); + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x9A\x99\x99\x3F hello"); + /// assert_eq!(1.2f32, buf.get_f32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_le(&mut self) -> f32 { + buf_get_impl!(self, 4, LittleEndian::read_f32); + } + + #[doc(hidden)] + #[deprecated(note="use get_f64_be or get_f64_le")] + fn get_f64<T: ByteOrder>(&mut self) -> f64 where Self: Sized { + let mut buf = [0; 8]; + self.copy_to_slice(&mut buf); + T::read_f64(&buf) + } + + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"); + /// assert_eq!(1.2f64, buf.get_f64_be()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_be(&mut self) -> f64 { + buf_get_impl!(self, 8, BigEndian::read_f64); + } + + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"); + /// assert_eq!(1.2f64, buf.get_f64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_le(&mut self) -> f64 { + buf_get_impl!(self, 8, LittleEndian::read_f64); + } + + /// Transforms a `Buf` into a concrete buffer. + /// + /// `collect()` can operate on any value that implements `Buf`, and turn it + /// into the relevent concrete buffer type. + /// + /// # Examples + /// + /// Collecting a buffer and loading the contents into a `Vec<u8>`. + /// + /// ``` + /// use bytes::{Buf, Bytes, IntoBuf}; + /// + /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); + /// let vec: Vec<u8> = buf.collect(); + /// + /// assert_eq!(vec, &b"hello world"[..]); + /// ``` + fn collect<B>(self) -> B + where Self: Sized, + B: FromBuf, + { + B::from_buf(self) + } + + /// Creates an adaptor which will read at most `limit` bytes from `self`. + /// + /// This function returns a new instance of `Buf` which will read at most + /// `limit` bytes. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new("hello world").take(5); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(dst, b"hello"); + /// + /// let mut buf = buf.into_inner(); + /// dst.clear(); + /// dst.put(&mut buf); + /// assert_eq!(dst, b" world"); + /// ``` + fn take(self, limit: usize) -> Take<Self> + where Self: Sized + { + super::take::new(self, limit) + } + + /// Creates an adaptor which will chain this buffer with another. + /// + /// The returned `Buf` instance will first consume all bytes from `self`. + /// Afterwards the output is equivalent to the output of next. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// use bytes::buf::Chain; + /// + /// let buf = Bytes::from(&b"hello "[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// let full: Bytes = buf.collect(); + /// assert_eq!(full[..], b"hello world"[..]); + /// ``` + fn chain<U>(self, next: U) -> Chain<Self, U::Buf> + where U: IntoBuf, + Self: Sized, + { + Chain::new(self, next.into_buf()) + } + + /// Creates a "by reference" adaptor for this instance of `Buf`. + /// + /// The returned adaptor also implements `Buf` and will simply borrow `self`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new("hello world"); + /// let mut dst = vec![]; + /// + /// { + /// let mut reference = buf.by_ref(); + /// dst.put(&mut reference.take(5)); + /// assert_eq!(dst, b"hello"); + /// } // drop our &mut reference so we can use `buf` again + /// + /// dst.clear(); + /// dst.put(&mut buf); + /// assert_eq!(dst, b" world"); + /// ``` + fn by_ref(&mut self) -> &mut Self where Self: Sized { + self + } + + /// Creates an adaptor which implements the `Read` trait for `self`. + /// + /// This function returns a new value which implements `Read` by adapting + /// the `Read` trait functions to the `Buf` trait functions. Given that + /// `Buf` operations are infallible, none of the `Read` functions will + /// return with `Err`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, IntoBuf, Bytes}; + /// use std::io::Read; + /// + /// let buf = Bytes::from("hello world").into_buf(); + /// + /// let mut reader = buf.reader(); + /// let mut dst = [0; 1024]; + /// + /// let num = reader.read(&mut dst).unwrap(); + /// + /// assert_eq!(11, num); + /// assert_eq!(&dst[..11], b"hello world"); + /// ``` + fn reader(self) -> Reader<Self> where Self: Sized { + super::reader::new(self) + } + + /// Returns an iterator over the bytes contained by the buffer. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, IntoBuf, Bytes}; + /// + /// let buf = Bytes::from(&b"abc"[..]).into_buf(); + /// let mut iter = buf.iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// assert_eq!(iter.next(), Some(b'b')); + /// assert_eq!(iter.next(), Some(b'c')); + /// assert_eq!(iter.next(), None); + /// ``` + fn iter(self) -> Iter<Self> where Self: Sized { + super::iter::new(self) + } +} + +impl<'a, T: Buf + ?Sized> Buf for &'a mut T { + fn remaining(&self) -> usize { + (**self).remaining() + } + + fn bytes(&self) -> &[u8] { + (**self).bytes() + } + + fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize { + (**self).bytes_vec(dst) + } + + fn advance(&mut self, cnt: usize) { + (**self).advance(cnt) + } +} + +impl<T: Buf + ?Sized> Buf for Box<T> { + fn remaining(&self) -> usize { + (**self).remaining() + } + + fn bytes(&self) -> &[u8] { + (**self).bytes() + } + + fn bytes_vec<'b>(&'b self, dst: &mut [&'b IoVec]) -> usize { + (**self).bytes_vec(dst) + } + + fn advance(&mut self, cnt: usize) { + (**self).advance(cnt) + } +} + +impl<T: AsRef<[u8]>> Buf for io::Cursor<T> { + fn remaining(&self) -> usize { + let len = self.get_ref().as_ref().len(); + let pos = self.position(); + + if pos >= len as u64 { + return 0; + } + + len - pos as usize + } + + fn bytes(&self) -> &[u8] { + let len = self.get_ref().as_ref().len(); + let pos = self.position() as usize; + + if pos >= len { + return Default::default(); + } + + &(self.get_ref().as_ref())[pos..] + } + + fn advance(&mut self, cnt: usize) { + let pos = (self.position() as usize) + .checked_add(cnt).expect("overflow"); + + assert!(pos <= self.get_ref().as_ref().len()); + + self.set_position(pos as u64); + } +} + +impl Buf for Option<[u8; 1]> { + fn remaining(&self) -> usize { + if self.is_some() { + 1 + } else { + 0 + } + } + + fn bytes(&self) -> &[u8] { + self.as_ref().map(AsRef::as_ref) + .unwrap_or(Default::default()) + } + + fn advance(&mut self, cnt: usize) { + if cnt == 0 { + return; + } + + if self.is_none() { + panic!("overflow"); + } else { + assert_eq!(1, cnt); + *self = None; + } + } +} + +// The existance of this function makes the compiler catch if the Buf +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &Buf) {} diff --git a/third_party/rust/bytes-0.4.9/src/buf/buf_mut.rs b/third_party/rust/bytes-0.4.9/src/buf/buf_mut.rs new file mode 100644 index 0000000000..71dbda9afe --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/buf_mut.rs @@ -0,0 +1,1166 @@ +use super::{IntoBuf, Writer}; +use byteorder::{LittleEndian, ByteOrder, BigEndian}; +use iovec::IoVec; + +use std::{cmp, io, ptr, usize}; + +/// A trait for values that provide sequential write access to bytes. +/// +/// Write bytes to a buffer +/// +/// A buffer stores bytes in memory such that write operations are infallible. +/// The underlying storage may or may not be in contiguous memory. A `BufMut` +/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor +/// position. +/// +/// The simplest `BufMut` is a `Vec<u8>`. +/// +/// ``` +/// use bytes::BufMut; +/// +/// let mut buf = vec![]; +/// +/// buf.put("hello world"); +/// +/// assert_eq!(buf, b"hello world"); +/// ``` +pub trait BufMut { + /// Returns the number of bytes that can be written from the current + /// position until the end of the buffer is reached. + /// + /// This value is greater than or equal to the length of the slice returned + /// by `bytes_mut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// use std::io::Cursor; + /// + /// let mut dst = [0; 10]; + /// let mut buf = Cursor::new(&mut dst[..]); + /// + /// assert_eq!(10, buf.remaining_mut()); + /// buf.put("hello"); + /// + /// assert_eq!(5, buf.remaining_mut()); + /// ``` + /// + /// # Implementer notes + /// + /// Implementations of `remaining_mut` should ensure that the return value + /// does not change unless a call is made to `advance_mut` or any other + /// function that is documented to change the `BufMut`'s current position. + fn remaining_mut(&self) -> usize; + + /// Advance the internal cursor of the BufMut + /// + /// The next call to `bytes_mut` will return a slice starting `cnt` bytes + /// further into the underlying buffer. + /// + /// This function is unsafe because there is no guarantee that the bytes + /// being advanced past have been initialized. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = Vec::with_capacity(16); + /// + /// unsafe { + /// buf.bytes_mut()[0] = b'h'; + /// buf.bytes_mut()[1] = b'e'; + /// + /// buf.advance_mut(2); + /// + /// buf.bytes_mut()[0] = b'l'; + /// buf.bytes_mut()[1..3].copy_from_slice(b"lo"); + /// + /// buf.advance_mut(3); + /// } + /// + /// assert_eq!(5, buf.len()); + /// assert_eq!(buf, b"hello"); + /// ``` + /// + /// # Panics + /// + /// This function **may** panic if `cnt > self.remaining_mut()`. + /// + /// # Implementer notes + /// + /// It is recommended for implementations of `advance_mut` to panic if + /// `cnt > self.remaining_mut()`. If the implementation does not panic, + /// the call must behave as if `cnt == self.remaining_mut()`. + /// + /// A call with `cnt == 0` should never panic and be a no-op. + unsafe fn advance_mut(&mut self, cnt: usize); + + /// Returns true if there is space in `self` for more bytes. + /// + /// This is equivalent to `self.remaining_mut() != 0`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// use std::io::Cursor; + /// + /// let mut dst = [0; 5]; + /// let mut buf = Cursor::new(&mut dst); + /// + /// assert!(buf.has_remaining_mut()); + /// + /// buf.put("hello"); + /// + /// assert!(!buf.has_remaining_mut()); + /// ``` + fn has_remaining_mut(&self) -> bool { + self.remaining_mut() > 0 + } + + /// Returns a mutable slice starting at the current BufMut position and of + /// length between 0 and `BufMut::remaining_mut()`. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// The returned byte slice may represent uninitialized memory. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = Vec::with_capacity(16); + /// + /// unsafe { + /// buf.bytes_mut()[0] = b'h'; + /// buf.bytes_mut()[1] = b'e'; + /// + /// buf.advance_mut(2); + /// + /// buf.bytes_mut()[0] = b'l'; + /// buf.bytes_mut()[1..3].copy_from_slice(b"lo"); + /// + /// buf.advance_mut(3); + /// } + /// + /// assert_eq!(5, buf.len()); + /// assert_eq!(buf, b"hello"); + /// ``` + /// + /// # Implementer notes + /// + /// This function should never panic. `bytes_mut` should return an empty + /// slice **if and only if** `remaining_mut` returns 0. In other words, + /// `bytes_mut` returning an empty slice implies that `remaining_mut` will + /// return 0 and `remaining_mut` returning 0 implies that `bytes_mut` will + /// return an empty slice. + unsafe fn bytes_mut(&mut self) -> &mut [u8]; + + /// Fills `dst` with potentially multiple mutable slices starting at `self`'s + /// current position. + /// + /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vec_mut` + /// enables fetching more than one slice at once. `dst` is a slice of + /// mutable `IoVec` references, enabling the slice to be directly used with + /// [`readv`] without any further conversion. The sum of the lengths of all + /// the buffers in `dst` will be less than or equal to + /// `Buf::remaining_mut()`. + /// + /// The entries in `dst` will be overwritten, but the data **contained** by + /// the slices **will not** be modified. If `bytes_vec_mut` does not fill every + /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices + /// in `self. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vec_mut` must + /// return 0 without mutating `dst`. + /// + /// Implementations should also take care to properly handle being called + /// with `dst` being a zero length slice. + /// + /// [`readv`]: http://man7.org/linux/man-pages/man2/readv.2.html + unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize { + if dst.is_empty() { + return 0; + } + + if self.has_remaining_mut() { + dst[0] = self.bytes_mut().into(); + 1 + } else { + 0 + } + } + + /// Transfer bytes into `self` from `src` and advance the cursor by the + /// number of bytes written. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// + /// buf.put(b'h'); + /// buf.put(&b"ello"[..]); + /// buf.put(" world"); + /// + /// assert_eq!(buf, b"hello world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `self` does not have enough capacity to contain `src`. + fn put<T: IntoBuf>(&mut self, src: T) where Self: Sized { + use super::Buf; + + let mut src = src.into_buf(); + + assert!(self.remaining_mut() >= src.remaining()); + + while src.has_remaining() { + let l; + + unsafe { + let s = src.bytes(); + let d = self.bytes_mut(); + l = cmp::min(s.len(), d.len()); + + ptr::copy_nonoverlapping( + s.as_ptr(), + d.as_mut_ptr(), + l); + } + + src.advance(l); + unsafe { self.advance_mut(l); } + } + } + + /// Transfer bytes into `self` from `src` and advance the cursor by the + /// number of bytes written. + /// + /// `self` must have enough remaining capacity to contain all of `src`. + /// + /// ``` + /// use bytes::BufMut; + /// use std::io::Cursor; + /// + /// let mut dst = [0; 6]; + /// + /// { + /// let mut buf = Cursor::new(&mut dst); + /// buf.put_slice(b"hello"); + /// + /// assert_eq!(1, buf.remaining_mut()); + /// } + /// + /// assert_eq!(b"hello\0", &dst); + /// ``` + fn put_slice(&mut self, src: &[u8]) { + let mut off = 0; + + assert!(self.remaining_mut() >= src.len(), "buffer overflow"); + + while off < src.len() { + let cnt; + + unsafe { + let dst = self.bytes_mut(); + cnt = cmp::min(dst.len(), src.len() - off); + + ptr::copy_nonoverlapping( + src[off..].as_ptr(), + dst.as_mut_ptr(), + cnt); + + off += cnt; + + } + + unsafe { self.advance_mut(cnt); } + } + } + + /// Writes an unsigned 8 bit integer to `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u8(0x01); + /// assert_eq!(buf, b"\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u8(&mut self, n: u8) { + let src = [n]; + self.put_slice(&src); + } + + /// Writes a signed 8 bit integer to `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i8(0x01); + /// assert_eq!(buf, b"\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i8(&mut self, n: i8) { + let src = [n as u8]; + self.put_slice(&src) + } + + #[doc(hidden)] + #[deprecated(note="use put_u16_be or put_u16_le")] + fn put_u16<T: ByteOrder>(&mut self, n: u16) where Self: Sized { + let mut buf = [0; 2]; + T::write_u16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16_be(0x0809); + /// assert_eq!(buf, b"\x08\x09"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16_be(&mut self, n: u16) { + let mut buf = [0; 2]; + BigEndian::write_u16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16_le(&mut self, n: u16) { + let mut buf = [0; 2]; + LittleEndian::write_u16(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i16_be or put_i16_le")] + fn put_i16<T: ByteOrder>(&mut self, n: i16) where Self: Sized { + let mut buf = [0; 2]; + T::write_i16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 16 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16_be(0x0809); + /// assert_eq!(buf, b"\x08\x09"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16_be(&mut self, n: i16) { + let mut buf = [0; 2]; + BigEndian::write_i16(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16_le(&mut self, n: i16) { + let mut buf = [0; 2]; + LittleEndian::write_i16(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_u32_be or put_u32_le")] + fn put_u32<T: ByteOrder>(&mut self, n: u32) where Self: Sized { + let mut buf = [0; 4]; + T::write_u32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u32_be(0x0809A0A1); + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32_be(&mut self, n: u32) { + let mut buf = [0; 4]; + BigEndian::write_u32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32_le(&mut self, n: u32) { + let mut buf = [0; 4]; + LittleEndian::write_u32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i32_be or put_i32_le")] + fn put_i32<T: ByteOrder>(&mut self, n: i32) where Self: Sized { + let mut buf = [0; 4]; + T::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 32 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_be(0x0809A0A1); + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_be(&mut self, n: i32) { + let mut buf = [0; 4]; + BigEndian::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 32 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_le(&mut self, n: i32) { + let mut buf = [0; 4]; + LittleEndian::write_i32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_u64_be or put_u64_le")] + fn put_u64<T: ByteOrder>(&mut self, n: u64) where Self: Sized { + let mut buf = [0; 8]; + T::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_be(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_be(&mut self, n: u64) { + let mut buf = [0; 8]; + BigEndian::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_le(&mut self, n: u64) { + let mut buf = [0; 8]; + LittleEndian::write_u64(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_i64_be or put_i64_le")] + fn put_i64<T: ByteOrder>(&mut self, n: i64) where Self: Sized { + let mut buf = [0; 8]; + T::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_be(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_be(&mut self, n: i64) { + let mut buf = [0; 8]; + BigEndian::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_le(&mut self, n: i64) { + let mut buf = [0; 8]; + LittleEndian::write_i64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_be(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_u128_be(&mut self, n: u128) { + let mut buf = [0; 16]; + BigEndian::write_u128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_u128_le(&mut self, n: u128) { + let mut buf = [0; 16]; + LittleEndian::write_u128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 128 bit integer to `self` in the big-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_be(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_i128_be(&mut self, n: i128) { + let mut buf = [0; 16]; + BigEndian::write_i128(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes a signed 128 bit integer to `self` in little-endian byte order. + /// + /// **NOTE:** This method requires the `i128` feature. + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + #[cfg(feature = "i128")] + fn put_i128_le(&mut self, n: i128) { + let mut buf = [0; 16]; + LittleEndian::write_i128(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_uint_be or put_uint_le")] + fn put_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) where Self: Sized { + let mut buf = [0; 8]; + T::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes an unsigned n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint_be(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint_be(&mut self, n: u64, nbytes: usize) { + let mut buf = [0; 8]; + BigEndian::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint_le(&mut self, n: u64, nbytes: usize) { + let mut buf = [0; 8]; + LittleEndian::write_uint(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + #[doc(hidden)] + #[deprecated(note="use put_int_be or put_int_le")] + fn put_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) where Self: Sized { + let mut buf = [0; 8]; + T::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes a signed n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_be(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int_be(&mut self, n: i64, nbytes: usize) { + let mut buf = [0; 8]; + BigEndian::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + /// Writes a signed n-byte integer to `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int_le(&mut self, n: i64, nbytes: usize) { + let mut buf = [0; 8]; + LittleEndian::write_int(&mut buf, n, nbytes); + self.put_slice(&buf[0..nbytes]) + } + + #[doc(hidden)] + #[deprecated(note="use put_f32_be or put_f32_le")] + fn put_f32<T: ByteOrder>(&mut self, n: f32) where Self: Sized { + let mut buf = [0; 4]; + T::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_be(1.2f32); + /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32_be(&mut self, n: f32) { + let mut buf = [0; 4]; + BigEndian::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_le(1.2f32); + /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32_le(&mut self, n: f32) { + let mut buf = [0; 4]; + LittleEndian::write_f32(&mut buf, n); + self.put_slice(&buf) + } + + #[doc(hidden)] + #[deprecated(note="use put_f64_be or put_f64_le")] + fn put_f64<T: ByteOrder>(&mut self, n: f64) where Self: Sized { + let mut buf = [0; 8]; + T::write_f64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64_be(1.2f64); + /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64_be(&mut self, n: f64) { + let mut buf = [0; 8]; + BigEndian::write_f64(&mut buf, n); + self.put_slice(&buf) + } + + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64_le(1.2f64); + /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64_le(&mut self, n: f64) { + let mut buf = [0; 8]; + LittleEndian::write_f64(&mut buf, n); + self.put_slice(&buf) + } + + /// Creates a "by reference" adaptor for this instance of `BufMut`. + /// + /// The returned adapter also implements `BufMut` and will simply borrow + /// `self`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// use std::io; + /// + /// let mut buf = vec![]; + /// + /// { + /// let mut reference = buf.by_ref(); + /// + /// // Adapt reference to `std::io::Write`. + /// let mut writer = reference.writer(); + /// + /// // Use the buffer as a writter + /// io::Write::write(&mut writer, &b"hello world"[..]).unwrap(); + /// } // drop our &mut reference so that we can use `buf` again + /// + /// assert_eq!(buf, &b"hello world"[..]); + /// ``` + fn by_ref(&mut self) -> &mut Self where Self: Sized { + self + } + + /// Creates an adaptor which implements the `Write` trait for `self`. + /// + /// This function returns a new value which implements `Write` by adapting + /// the `Write` trait functions to the `BufMut` trait functions. Given that + /// `BufMut` operations are infallible, none of the `Write` functions will + /// return with `Err`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// use std::io::Write; + /// + /// let mut buf = vec![].writer(); + /// + /// let num = buf.write(&b"hello world"[..]).unwrap(); + /// assert_eq!(11, num); + /// + /// let buf = buf.into_inner(); + /// + /// assert_eq!(*buf, b"hello world"[..]); + /// ``` + fn writer(self) -> Writer<Self> where Self: Sized { + super::writer::new(self) + } +} + +impl<'a, T: BufMut + ?Sized> BufMut for &'a mut T { + fn remaining_mut(&self) -> usize { + (**self).remaining_mut() + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + (**self).bytes_mut() + } + + unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize { + (**self).bytes_vec_mut(dst) + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + (**self).advance_mut(cnt) + } +} + +impl<T: BufMut + ?Sized> BufMut for Box<T> { + fn remaining_mut(&self) -> usize { + (**self).remaining_mut() + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + (**self).bytes_mut() + } + + unsafe fn bytes_vec_mut<'b>(&'b mut self, dst: &mut [&'b mut IoVec]) -> usize { + (**self).bytes_vec_mut(dst) + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + (**self).advance_mut(cnt) + } +} + +impl<T: AsMut<[u8]> + AsRef<[u8]>> BufMut for io::Cursor<T> { + fn remaining_mut(&self) -> usize { + use Buf; + self.remaining() + } + + /// Advance the internal cursor of the BufMut + unsafe fn advance_mut(&mut self, cnt: usize) { + use Buf; + self.advance(cnt); + } + + /// Returns a mutable slice starting at the current BufMut position and of + /// length between 0 and `BufMut::remaining()`. + /// + /// The returned byte slice may represent uninitialized memory. + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + let len = self.get_ref().as_ref().len(); + let pos = self.position() as usize; + + if pos >= len { + return Default::default(); + } + + &mut (self.get_mut().as_mut())[pos..] + } +} + +impl BufMut for Vec<u8> { + #[inline] + fn remaining_mut(&self) -> usize { + usize::MAX - self.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + let len = self.len(); + let remaining = self.capacity() - len; + if cnt > remaining { + // Reserve additional capacity, and ensure that the total length + // will not overflow usize. + self.reserve(cnt); + } + + self.set_len(len + cnt); + } + + #[inline] + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + use std::slice; + + if self.capacity() == self.len() { + self.reserve(64); // Grow the vec + } + + let cap = self.capacity(); + let len = self.len(); + + let ptr = self.as_mut_ptr(); + &mut slice::from_raw_parts_mut(ptr, cap)[len..] + } +} + +// The existance of this function makes the compiler catch if the BufMut +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &BufMut) {} diff --git a/third_party/rust/bytes-0.4.9/src/buf/chain.rs b/third_party/rust/bytes-0.4.9/src/buf/chain.rs new file mode 100644 index 0000000000..7dd44ab021 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/chain.rs @@ -0,0 +1,226 @@ +use {Buf, BufMut}; +use iovec::IoVec; + +/// A `Chain` sequences two buffers. +/// +/// `Chain` is an adapter that links two underlying buffers and provides a +/// continous view across both buffers. It is able to sequence either immutable +/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). +/// +/// This struct is generally created by calling [`Buf::chain`]. Please see that +/// function's documentation for more detail. +/// +/// # Examples +/// +/// ``` +/// use bytes::{Bytes, Buf, IntoBuf}; +/// use bytes::buf::Chain; +/// +/// let buf = Bytes::from(&b"hello "[..]).into_buf() +/// .chain(Bytes::from(&b"world"[..])); +/// +/// let full: Bytes = buf.collect(); +/// assert_eq!(full[..], b"hello world"[..]); +/// ``` +/// +/// [`Buf::chain`]: trait.Buf.html#method.chain +/// [`Buf`]: trait.Buf.html +/// [`BufMut`]: trait.BufMut.html +#[derive(Debug)] +pub struct Chain<T, U> { + a: T, + b: U, +} + +impl<T, U> Chain<T, U> { + /// Creates a new `Chain` sequencing the provided values. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// use bytes::buf::Chain; + /// + /// let buf = Chain::new( + /// BytesMut::with_capacity(1024), + /// BytesMut::with_capacity(1024)); + /// + /// // Use the chained buffer + /// ``` + pub fn new(a: T, b: U) -> Chain<T, U> { + Chain { + a: a, + b: b, + } + } + + /// Gets a reference to the first underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// + /// let buf = Bytes::from(&b"hello"[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// assert_eq!(buf.first_ref().get_ref()[..], b"hello"[..]); + /// ``` + pub fn first_ref(&self) -> &T { + &self.a + } + + /// Gets a mutable reference to the first underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// + /// let mut buf = Bytes::from(&b"hello "[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// buf.first_mut().set_position(1); + /// + /// let full: Bytes = buf.collect(); + /// assert_eq!(full[..], b"ello world"[..]); + /// ``` + pub fn first_mut(&mut self) -> &mut T { + &mut self.a + } + + /// Gets a reference to the last underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// + /// let buf = Bytes::from(&b"hello"[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// assert_eq!(buf.last_ref().get_ref()[..], b"world"[..]); + /// ``` + pub fn last_ref(&self) -> &U { + &self.b + } + + /// Gets a mutable reference to the last underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// + /// let mut buf = Bytes::from(&b"hello "[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// buf.last_mut().set_position(1); + /// + /// let full: Bytes = buf.collect(); + /// assert_eq!(full[..], b"hello orld"[..]); + /// ``` + pub fn last_mut(&mut self) -> &mut U { + &mut self.b + } + + /// Consumes this `Chain`, returning the underlying values. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, Buf, IntoBuf}; + /// + /// let buf = Bytes::from(&b"hello"[..]).into_buf() + /// .chain(Bytes::from(&b"world"[..])); + /// + /// let (first, last) = buf.into_inner(); + /// assert_eq!(first.get_ref()[..], b"hello"[..]); + /// assert_eq!(last.get_ref()[..], b"world"[..]); + /// ``` + pub fn into_inner(self) -> (T, U) { + (self.a, self.b) + } +} + +impl<T, U> Buf for Chain<T, U> + where T: Buf, + U: Buf, +{ + fn remaining(&self) -> usize { + self.a.remaining() + self.b.remaining() + } + + fn bytes(&self) -> &[u8] { + if self.a.has_remaining() { + self.a.bytes() + } else { + self.b.bytes() + } + } + + fn advance(&mut self, mut cnt: usize) { + let a_rem = self.a.remaining(); + + if a_rem != 0 { + if a_rem >= cnt { + self.a.advance(cnt); + return; + } + + // Consume what is left of a + self.a.advance(a_rem); + + cnt -= a_rem; + } + + self.b.advance(cnt); + } + + fn bytes_vec<'a>(&'a self, dst: &mut [&'a IoVec]) -> usize { + let mut n = self.a.bytes_vec(dst); + n += self.b.bytes_vec(&mut dst[n..]); + n + } +} + +impl<T, U> BufMut for Chain<T, U> + where T: BufMut, + U: BufMut, +{ + fn remaining_mut(&self) -> usize { + self.a.remaining_mut() + self.b.remaining_mut() + } + + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + if self.a.has_remaining_mut() { + self.a.bytes_mut() + } else { + self.b.bytes_mut() + } + } + + unsafe fn advance_mut(&mut self, mut cnt: usize) { + let a_rem = self.a.remaining_mut(); + + if a_rem != 0 { + if a_rem >= cnt { + self.a.advance_mut(cnt); + return; + } + + // Consume what is left of a + self.a.advance_mut(a_rem); + + cnt -= a_rem; + } + + self.b.advance_mut(cnt); + } + + unsafe fn bytes_vec_mut<'a>(&'a mut self, dst: &mut [&'a mut IoVec]) -> usize { + let mut n = self.a.bytes_vec_mut(dst); + n += self.b.bytes_vec_mut(&mut dst[n..]); + n + } +} diff --git a/third_party/rust/bytes-0.4.9/src/buf/from_buf.rs b/third_party/rust/bytes-0.4.9/src/buf/from_buf.rs new file mode 100644 index 0000000000..55f5cef31b --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/from_buf.rs @@ -0,0 +1,117 @@ +use {Buf, BufMut, IntoBuf, Bytes, BytesMut}; + +/// Conversion from a [`Buf`] +/// +/// Implementing `FromBuf` for a type defines how it is created from a buffer. +/// This is common for types which represent byte storage of some kind. +/// +/// [`FromBuf::from_buf`] is rarely called explicitly, and it is instead used +/// through [`Buf::collect`]. See [`Buf::collect`] documentation for more examples. +/// +/// See also [`IntoBuf`]. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use bytes::{Bytes, IntoBuf}; +/// use bytes::buf::FromBuf; +/// +/// let buf = Bytes::from(&b"hello world"[..]).into_buf(); +/// let vec = Vec::from_buf(buf); +/// +/// assert_eq!(vec, &b"hello world"[..]); +/// ``` +/// +/// Using [`Buf::collect`] to implicitly use `FromBuf`: +/// +/// ``` +/// use bytes::{Buf, Bytes, IntoBuf}; +/// +/// let buf = Bytes::from(&b"hello world"[..]).into_buf(); +/// let vec: Vec<u8> = buf.collect(); +/// +/// assert_eq!(vec, &b"hello world"[..]); +/// ``` +/// +/// Implementing `FromBuf` for your type: +/// +/// ``` +/// use bytes::{BufMut, Bytes}; +/// use bytes::buf::{IntoBuf, FromBuf}; +/// +/// // A sample buffer, that's just a wrapper over Vec<u8> +/// struct MyBuffer(Vec<u8>); +/// +/// impl FromBuf for MyBuffer { +/// fn from_buf<B>(buf: B) -> Self where B: IntoBuf { +/// let mut v = Vec::new(); +/// v.put(buf.into_buf()); +/// MyBuffer(v) +/// } +/// } +/// +/// // Now we can make a new buf +/// let buf = Bytes::from(&b"hello world"[..]); +/// +/// // And make a MyBuffer out of it +/// let my_buf = MyBuffer::from_buf(buf); +/// +/// assert_eq!(my_buf.0, &b"hello world"[..]); +/// ``` +/// +/// [`Buf`]: trait.Buf.html +/// [`FromBuf::from_buf`]: #method.from_buf +/// [`Buf::collect`]: trait.Buf.html#method.collect +/// [`IntoBuf`]: trait.IntoBuf.html +pub trait FromBuf { + /// Creates a value from a buffer. + /// + /// See the [type-level documentation](#) for more details. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use bytes::{Bytes, IntoBuf}; + /// use bytes::buf::FromBuf; + /// + /// let buf = Bytes::from(&b"hello world"[..]).into_buf(); + /// let vec = Vec::from_buf(buf); + /// + /// assert_eq!(vec, &b"hello world"[..]); + /// ``` + fn from_buf<T>(buf: T) -> Self where T: IntoBuf; +} + +impl FromBuf for Vec<u8> { + fn from_buf<T>(buf: T) -> Self + where T: IntoBuf + { + let buf = buf.into_buf(); + let mut ret = Vec::with_capacity(buf.remaining()); + ret.put(buf); + ret + } +} + +impl FromBuf for Bytes { + fn from_buf<T>(buf: T) -> Self + where T: IntoBuf + { + BytesMut::from_buf(buf).freeze() + } +} + +impl FromBuf for BytesMut { + fn from_buf<T>(buf: T) -> Self + where T: IntoBuf + { + let buf = buf.into_buf(); + let mut ret = BytesMut::with_capacity(buf.remaining()); + ret.put(buf); + ret + } +} diff --git a/third_party/rust/bytes-0.4.9/src/buf/into_buf.rs b/third_party/rust/bytes-0.4.9/src/buf/into_buf.rs new file mode 100644 index 0000000000..4c3b420728 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/into_buf.rs @@ -0,0 +1,146 @@ +use super::{Buf}; + +use std::io; + +/// Conversion into a `Buf` +/// +/// An `IntoBuf` implementation defines how to convert a value into a `Buf`. +/// This is common for types that represent byte storage of some kind. `IntoBuf` +/// may be implemented directly for types or on references for those types. +/// +/// # Examples +/// +/// ``` +/// use bytes::{Buf, IntoBuf, BigEndian}; +/// +/// let bytes = b"\x00\x01hello world"; +/// let mut buf = bytes.into_buf(); +/// +/// assert_eq!(1, buf.get_u16::<BigEndian>()); +/// +/// let mut rest = [0; 11]; +/// buf.copy_to_slice(&mut rest); +/// +/// assert_eq!(b"hello world", &rest); +/// ``` +pub trait IntoBuf { + /// The `Buf` type that `self` is being converted into + type Buf: Buf; + + /// Creates a `Buf` from a value. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, IntoBuf, BigEndian}; + /// + /// let bytes = b"\x00\x01hello world"; + /// let mut buf = bytes.into_buf(); + /// + /// assert_eq!(1, buf.get_u16::<BigEndian>()); + /// + /// let mut rest = [0; 11]; + /// buf.copy_to_slice(&mut rest); + /// + /// assert_eq!(b"hello world", &rest); + /// ``` + fn into_buf(self) -> Self::Buf; +} + +impl<T: Buf> IntoBuf for T { + type Buf = Self; + + fn into_buf(self) -> Self { + self + } +} + +impl<'a> IntoBuf for &'a [u8] { + type Buf = io::Cursor<&'a [u8]>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a mut [u8] { + type Buf = io::Cursor<&'a mut [u8]>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a str { + type Buf = io::Cursor<&'a [u8]>; + + fn into_buf(self) -> Self::Buf { + self.as_bytes().into_buf() + } +} + +impl IntoBuf for Vec<u8> { + type Buf = io::Cursor<Vec<u8>>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a Vec<u8> { + type Buf = io::Cursor<&'a [u8]>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(&self[..]) + } +} + +// Kind of annoying... but this impl is required to allow passing `&'static +// [u8]` where for<'a> &'a T: IntoBuf is required. +impl<'a> IntoBuf for &'a &'static [u8] { + type Buf = io::Cursor<&'static [u8]>; + + fn into_buf(self) -> Self::Buf { + io::Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a &'static str { + type Buf = io::Cursor<&'static [u8]>; + + fn into_buf(self) -> Self::Buf { + self.as_bytes().into_buf() + } +} + +impl IntoBuf for String { + type Buf = io::Cursor<Vec<u8>>; + + fn into_buf(self) -> Self::Buf { + self.into_bytes().into_buf() + } +} + +impl<'a> IntoBuf for &'a String { + type Buf = io::Cursor<&'a [u8]>; + + fn into_buf(self) -> Self::Buf { + self.as_bytes().into_buf() + } +} + +impl IntoBuf for u8 { + type Buf = Option<[u8; 1]>; + + fn into_buf(self) -> Self::Buf { + Some([self]) + } +} + +impl IntoBuf for i8 { + type Buf = Option<[u8; 1]>; + + fn into_buf(self) -> Self::Buf { + Some([self as u8; 1]) + } +} diff --git a/third_party/rust/bytes-0.4.9/src/buf/iter.rs b/third_party/rust/bytes-0.4.9/src/buf/iter.rs new file mode 100644 index 0000000000..9345c05b63 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/iter.rs @@ -0,0 +1,116 @@ +use Buf; + +/// Iterator over the bytes contained by the buffer. +/// +/// This struct is created by the [`iter`] method on [`Buf`]. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use bytes::{Buf, IntoBuf, Bytes}; +/// +/// let buf = Bytes::from(&b"abc"[..]).into_buf(); +/// let mut iter = buf.iter(); +/// +/// assert_eq!(iter.next(), Some(b'a')); +/// assert_eq!(iter.next(), Some(b'b')); +/// assert_eq!(iter.next(), Some(b'c')); +/// assert_eq!(iter.next(), None); +/// ``` +/// +/// [`iter`]: trait.Buf.html#method.iter +/// [`Buf`]: trait.Buf.html +#[derive(Debug)] +pub struct Iter<T> { + inner: T, +} + +impl<T> Iter<T> { + /// Consumes this `Iter`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, IntoBuf, Bytes}; + /// + /// let buf = Bytes::from(&b"abc"[..]).into_buf(); + /// let mut iter = buf.iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// let buf = iter.into_inner(); + /// assert_eq!(2, buf.remaining()); + /// ``` + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, IntoBuf, Bytes}; + /// + /// let buf = Bytes::from(&b"abc"[..]).into_buf(); + /// let mut iter = buf.iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// assert_eq!(2, iter.get_ref().remaining()); + /// ``` + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, IntoBuf, BytesMut}; + /// + /// let buf = BytesMut::from(&b"abc"[..]).into_buf(); + /// let mut iter = buf.iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// iter.get_mut().set_position(0); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + +pub fn new<T>(inner: T) -> Iter<T> { + Iter { inner: inner } +} + +impl<T: Buf> Iterator for Iter<T> { + type Item = u8; + + fn next(&mut self) -> Option<u8> { + if !self.inner.has_remaining() { + return None; + } + + let b = self.inner.bytes()[0]; + self.inner.advance(1); + Some(b) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let rem = self.inner.remaining(); + (rem, Some(rem)) + } +} + +impl<T: Buf> ExactSizeIterator for Iter<T> { } diff --git a/third_party/rust/bytes-0.4.9/src/buf/mod.rs b/third_party/rust/bytes-0.4.9/src/buf/mod.rs new file mode 100644 index 0000000000..1f74e0ab40 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/mod.rs @@ -0,0 +1,37 @@ +//! Utilities for working with buffers. +//! +//! A buffer is any structure that contains a sequence of bytes. The bytes may +//! or may not be stored in contiguous memory. This module contains traits used +//! to abstract over buffers as well as utilities for working with buffer types. +//! +//! # `Buf`, `BufMut` +//! +//! These are the two foundational traits for abstractly working with buffers. +//! They can be thought as iterators for byte structures. They offer additional +//! performance over `Iterator` by providing an API optimized for byte slices. +//! +//! See [`Buf`] and [`BufMut`] for more details. +//! +//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) +//! [`Buf`]: trait.Buf.html +//! [`BufMut`]: trait.BufMut.html + +mod buf; +mod buf_mut; +mod from_buf; +mod chain; +mod into_buf; +mod iter; +mod reader; +mod take; +mod writer; + +pub use self::buf::Buf; +pub use self::buf_mut::BufMut; +pub use self::from_buf::FromBuf; +pub use self::chain::Chain; +pub use self::into_buf::IntoBuf; +pub use self::iter::Iter; +pub use self::reader::Reader; +pub use self::take::Take; +pub use self::writer::Writer; diff --git a/third_party/rust/bytes-0.4.9/src/buf/reader.rs b/third_party/rust/bytes-0.4.9/src/buf/reader.rs new file mode 100644 index 0000000000..59f9c33049 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/reader.rs @@ -0,0 +1,88 @@ +use {Buf}; + +use std::{cmp, io}; + +/// A `Buf` adapter which implements `io::Read` for the inner value. +/// +/// This struct is generally created by calling `reader()` on `Buf`. See +/// documentation of [`reader()`](trait.Buf.html#method.reader) for more +/// details. +#[derive(Debug)] +pub struct Reader<B> { + buf: B, +} + +pub fn new<B>(buf: B) -> Reader<B> { + Reader { buf: buf } +} + +impl<B: Buf> Reader<B> { + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::Buf; + /// use std::io::{self, Cursor}; + /// + /// let mut buf = Cursor::new(b"hello world").reader(); + /// + /// assert_eq!(0, buf.get_ref().position()); + /// ``` + pub fn get_ref(&self) -> &B { + &self.buf + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::Buf; + /// use std::io::{self, Cursor}; + /// + /// let mut buf = Cursor::new(b"hello world").reader(); + /// let mut dst = vec![]; + /// + /// buf.get_mut().set_position(2); + /// io::copy(&mut buf, &mut dst).unwrap(); + /// + /// assert_eq!(*dst, b"llo world"[..]); + /// ``` + pub fn get_mut(&mut self) -> &mut B { + &mut self.buf + } + + /// Consumes this `Reader`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::Buf; + /// use std::io::{self, Cursor}; + /// + /// let mut buf = Cursor::new(b"hello world").reader(); + /// let mut dst = vec![]; + /// + /// io::copy(&mut buf, &mut dst).unwrap(); + /// + /// let buf = buf.into_inner(); + /// assert_eq!(0, buf.remaining()); + /// ``` + pub fn into_inner(self) -> B { + self.buf + } +} + +impl<B: Buf + Sized> io::Read for Reader<B> { + fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { + let len = cmp::min(self.buf.remaining(), dst.len()); + + Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); + Ok(len) + } +} diff --git a/third_party/rust/bytes-0.4.9/src/buf/take.rs b/third_party/rust/bytes-0.4.9/src/buf/take.rs new file mode 100644 index 0000000000..a0c8ed479e --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/take.rs @@ -0,0 +1,155 @@ +use {Buf}; + +use std::cmp; + +/// A `Buf` adapter which limits the bytes read from an underlying buffer. +/// +/// This struct is generally created by calling `take()` on `Buf`. See +/// documentation of [`take()`](trait.Buf.html#method.take) for more details. +#[derive(Debug)] +pub struct Take<T> { + inner: T, + limit: usize, +} + +pub fn new<T>(inner: T, limit: usize) -> Take<T> { + Take { + inner: inner, + limit: limit, + } +} + +impl<T> Take<T> { + /// Consumes this `Take`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world").take(2); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"he"[..]); + /// + /// let mut buf = buf.into_inner(); + /// + /// dst.clear(); + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"llo world"[..]); + /// ``` + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world").take(2); + /// + /// assert_eq!(0, buf.get_ref().position()); + /// ``` + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world").take(2); + /// let mut dst = vec![]; + /// + /// buf.get_mut().set_position(2); + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"ll"[..]); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the maximum number of bytes that can be read. + /// + /// # Note + /// + /// If the inner `Buf` has fewer bytes than indicated by this method then + /// that is the actual number of available bytes. + /// + /// # Examples + /// + /// ```rust + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world").take(2); + /// + /// assert_eq!(2, buf.limit()); + /// assert_eq!(b'h', buf.get_u8()); + /// assert_eq!(1, buf.limit()); + /// ``` + pub fn limit(&self) -> usize { + self.limit + } + + /// Sets the maximum number of bytes that can be read. + /// + /// # Note + /// + /// If the inner `Buf` has fewer bytes than `lim` then that is the actual + /// number of available bytes. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut}; + /// use std::io::Cursor; + /// + /// let mut buf = Cursor::new(b"hello world").take(2); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"he"[..]); + /// + /// dst.clear(); + /// + /// buf.set_limit(3); + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"llo"[..]); + /// ``` + pub fn set_limit(&mut self, lim: usize) { + self.limit = lim + } +} + +impl<T: Buf> Buf for Take<T> { + fn remaining(&self) -> usize { + cmp::min(self.inner.remaining(), self.limit) + } + + fn bytes(&self) -> &[u8] { + let bytes = self.inner.bytes(); + &bytes[..cmp::min(bytes.len(), self.limit)] + } + + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.limit); + self.inner.advance(cnt); + self.limit -= cnt; + } +} diff --git a/third_party/rust/bytes-0.4.9/src/buf/writer.rs b/third_party/rust/bytes-0.4.9/src/buf/writer.rs new file mode 100644 index 0000000000..38a739aa66 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/buf/writer.rs @@ -0,0 +1,88 @@ +use BufMut; + +use std::{cmp, io}; + +/// A `BufMut` adapter which implements `io::Write` for the inner value. +/// +/// This struct is generally created by calling `writer()` on `BufMut`. See +/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more +/// details. +#[derive(Debug)] +pub struct Writer<B> { + buf: B, +} + +pub fn new<B>(buf: B) -> Writer<B> { + Writer { buf: buf } +} + +impl<B: BufMut> Writer<B> { + /// Gets a reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::BufMut; + /// + /// let mut buf = Vec::with_capacity(1024).writer(); + /// + /// assert_eq!(1024, buf.get_ref().capacity()); + /// ``` + pub fn get_ref(&self) -> &B { + &self.buf + } + + /// Gets a mutable reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::BufMut; + /// + /// let mut buf = vec![].writer(); + /// + /// buf.get_mut().reserve(1024); + /// + /// assert_eq!(1024, buf.get_ref().capacity()); + /// ``` + pub fn get_mut(&mut self) -> &mut B { + &mut self.buf + } + + /// Consumes this `Writer`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::BufMut; + /// use std::io::{self, Cursor}; + /// + /// let mut buf = vec![].writer(); + /// let mut src = Cursor::new(b"hello world"); + /// + /// io::copy(&mut src, &mut buf).unwrap(); + /// + /// let buf = buf.into_inner(); + /// assert_eq!(*buf, b"hello world"[..]); + /// ``` + pub fn into_inner(self) -> B { + self.buf + } +} + +impl<B: BufMut + Sized> io::Write for Writer<B> { + fn write(&mut self, src: &[u8]) -> io::Result<usize> { + let n = cmp::min(self.buf.remaining_mut(), src.len()); + + self.buf.put(&src[0..n]); + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/third_party/rust/bytes-0.4.9/src/bytes.rs b/third_party/rust/bytes-0.4.9/src/bytes.rs new file mode 100644 index 0000000000..89244dd406 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/bytes.rs @@ -0,0 +1,2875 @@ +use {IntoBuf, Buf, BufMut}; +use buf::Iter; +use debug; + +use std::{cmp, fmt, mem, hash, ops, slice, ptr, usize}; +use std::borrow::{Borrow, BorrowMut}; +use std::io::Cursor; +use std::sync::atomic::{self, AtomicUsize, AtomicPtr}; +use std::sync::atomic::Ordering::{Relaxed, Acquire, Release, AcqRel}; +use std::iter::{FromIterator, Iterator}; + +/// A reference counted contiguous slice of memory. +/// +/// `Bytes` is an efficient container for storing and operating on contiguous +/// slices of memory. It is intended for use primarily in networking code, but +/// could have applications elsewhere as well. +/// +/// `Bytes` values facilitate zero-copy network programming by allowing multiple +/// `Bytes` objects to point to the same underlying memory. This is managed by +/// using a reference count to track when the memory is no longer needed and can +/// be freed. +/// +/// ``` +/// use bytes::Bytes; +/// +/// let mut mem = Bytes::from(&b"Hello world"[..]); +/// let a = mem.slice(0, 5); +/// +/// assert_eq!(&a[..], b"Hello"); +/// +/// let b = mem.split_to(6); +/// +/// assert_eq!(&mem[..], b"world"); +/// assert_eq!(&b[..], b"Hello "); +/// ``` +/// +/// # Memory layout +/// +/// The `Bytes` struct itself is fairly small, limited to a pointer to the +/// memory and 4 `usize` fields used to track information about which segment of +/// the underlying memory the `Bytes` handle has access to. +/// +/// The memory layout looks like this: +/// +/// ```text +/// +-------+ +/// | Bytes | +/// +-------+ +/// / \_____ +/// | \ +/// v v +/// +-----+------------------------------------+ +/// | Arc | | Data | | +/// +-----+------------------------------------+ +/// ``` +/// +/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory +/// slice and a pointer to the start of the region visible by the handle. +/// `Bytes` also tracks the length of its view into the memory. +/// +/// # Sharing +/// +/// The memory itself is reference counted, and multiple `Bytes` objects may +/// point to the same region. Each `Bytes` handle point to different sections within +/// the memory region, and `Bytes` handle may or may not have overlapping views +/// into the memory. +/// +/// +/// ```text +/// +/// Arc ptrs +---------+ +/// ________________________ / | Bytes 2 | +/// / +---------+ +/// / +-----------+ | | +/// |_________/ | Bytes 1 | | | +/// | +-----------+ | | +/// | | | ___/ data | tail +/// | data | tail |/ | +/// v v v v +/// +-----+---------------------------------+-----+ +/// | Arc | | | | | +/// +-----+---------------------------------+-----+ +/// ``` +/// +/// # Mutating +/// +/// While `Bytes` handles may potentially represent overlapping views of the +/// underlying memory slice and may not be mutated, `BytesMut` handles are +/// guaranteed to be the only handle able to view that slice of memory. As such, +/// `BytesMut` handles are able to mutate the underlying memory. Note that +/// holding a unique view to a region of memory does not mean that there are no +/// other `Bytes` and `BytesMut` handles with disjoint views of the underlying +/// memory. +/// +/// # Inline bytes +/// +/// As an optimization, when the slice referenced by a `Bytes` or `BytesMut` +/// handle is small enough [^1], `with_capacity` will avoid the allocation by +/// inlining the slice directly in the handle. In this case, a clone is no +/// longer "shallow" and the data will be copied. Converting from a `Vec` will +/// never use inlining. +/// +/// [^1]: Small enough: 31 bytes on 64 bit systems, 15 on 32 bit systems. +/// +pub struct Bytes { + inner: Inner, +} + +/// A unique reference to a contiguous slice of memory. +/// +/// `BytesMut` represents a unique view into a potentially shared memory region. +/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to +/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and +/// allocations. +/// +/// For more detail, see [Bytes](struct.Bytes.html). +/// +/// # Growth +/// +/// One key difference from `Vec<u8>` is that most operations **do not +/// implicitly grow the buffer**. This means that calling `my_bytes.put("hello +/// world");` could panic if `my_bytes` does not have enough capacity. Before +/// writing to the buffer, ensure that there is enough remaining capacity by +/// calling `my_bytes.remaining_mut()`. In general, avoiding calls to `reserve` +/// is preferable. +/// +/// The only exception is `extend` which implicitly reserves required capacity. +/// +/// # Examples +/// +/// ``` +/// use bytes::{BytesMut, BufMut}; +/// +/// let mut buf = BytesMut::with_capacity(64); +/// +/// buf.put(b'h'); +/// buf.put(b'e'); +/// buf.put("llo"); +/// +/// assert_eq!(&buf[..], b"hello"); +/// +/// // Freeze the buffer so that it can be shared +/// let a = buf.freeze(); +/// +/// // This does not allocate, instead `b` points to the same memory. +/// let b = a.clone(); +/// +/// assert_eq!(&a[..], b"hello"); +/// assert_eq!(&b[..], b"hello"); +/// ``` +pub struct BytesMut { + inner: Inner, +} + +// Both `Bytes` and `BytesMut` are backed by `Inner` and functions are delegated +// to `Inner` functions. The `Bytes` and `BytesMut` shims ensure that functions +// that mutate the underlying buffer are only performed when the data range +// being mutated is only available via a single `BytesMut` handle. +// +// # Data storage modes +// +// The goal of `bytes` is to be as efficient as possible across a wide range of +// potential usage patterns. As such, `bytes` needs to be able to handle buffers +// that are never shared, shared on a single thread, and shared across many +// threads. `bytes` also needs to handle both tiny buffers as well as very large +// buffers. For example, [Cassandra](http://cassandra.apache.org) values have +// been known to be in the hundreds of megabyte, and HTTP header values can be a +// few characters in size. +// +// To achieve high performance in these various situations, `Bytes` and +// `BytesMut` use different strategies for storing the buffer depending on the +// usage pattern. +// +// ## Delayed `Arc` allocation +// +// When a `Bytes` or `BytesMut` is first created, there is only one outstanding +// handle referencing the buffer. Since sharing is not yet required, an `Arc`* is +// not used and the buffer is backed by a `Vec<u8>` directly. Using an +// `Arc<Vec<u8>>` requires two allocations, so if the buffer ends up never being +// shared, that allocation is avoided. +// +// When sharing does become necessary (`clone`, `split_to`, `split_off`), that +// is when the buffer is promoted to being shareable. The `Vec<u8>` is moved +// into an `Arc` and both the original handle and the new handle use the same +// buffer via the `Arc`. +// +// * `Arc` is being used to signify an atomically reference counted cell. We +// don't use the `Arc` implementation provided by `std` and instead use our own. +// This ends up simplifying a number of the `unsafe` code snippets. +// +// ## Inlining small buffers +// +// The `Bytes` / `BytesMut` structs require 4 pointer sized fields. On 64 bit +// systems, this ends up being 32 bytes, which is actually a lot of storage for +// cases where `Bytes` is being used to represent small byte strings, such as +// HTTP header names and values. +// +// To avoid any allocation at all in these cases, `Bytes` will use the struct +// itself for storing the buffer, reserving 1 byte for meta data. This means +// that, on 64 bit systems, 31 byte buffers require no allocation at all. +// +// The byte used for metadata stores a 2 bits flag used to indicate that the +// buffer is stored inline as well as 6 bits for tracking the buffer length (the +// return value of `Bytes::len`). +// +// ## Static buffers +// +// `Bytes` can also represent a static buffer, which is created with +// `Bytes::from_static`. No copying or allocations are required for tracking +// static buffers. The pointer to the `&'static [u8]`, the length, and a flag +// tracking that the `Bytes` instance represents a static buffer is stored in +// the `Bytes` struct. +// +// # Struct layout +// +// Both `Bytes` and `BytesMut` are wrappers around `Inner`, which provides the +// data fields as well as all of the function implementations. +// +// The `Inner` struct is carefully laid out in order to support the +// functionality described above as well as being as small as possible. Size is +// important as growing the size of the `Bytes` struct from 32 bytes to 40 bytes +// added as much as 15% overhead in benchmarks using `Bytes` in an HTTP header +// map structure. +// +// The `Inner` struct contains the following fields: +// +// * `ptr: *mut u8` +// * `len: usize` +// * `cap: usize` +// * `arc: AtomicPtr<Shared>` +// +// ## `ptr: *mut u8` +// +// A pointer to start of the handle's buffer view. When backed by a `Vec<u8>`, +// this is always the `Vec`'s pointer. When backed by an `Arc<Vec<u8>>`, `ptr` +// may have been shifted to point somewhere inside the buffer. +// +// When in "inlined" mode, `ptr` is used as part of the inlined buffer. +// +// ## `len: usize` +// +// The length of the handle's buffer view. When backed by a `Vec<u8>`, this is +// always the `Vec`'s length. The slice represented by `ptr` and `len` should +// (ideally) always be initialized memory. +// +// When in "inlined" mode, `len` is used as part of the inlined buffer. +// +// ## `cap: usize` +// +// The capacity of the handle's buffer view. When backed by a `Vec<u8>`, this is +// always the `Vec`'s capacity. The slice represented by `ptr+len` and `cap-len` +// may or may not be initialized memory. +// +// When in "inlined" mode, `cap` is used as part of the inlined buffer. +// +// ## `arc: AtomicPtr<Shared>` +// +// When `Inner` is in allocated mode (backed by Vec<u8> or Arc<Vec<u8>>), this +// will be the pointer to the `Arc` structure tracking the ref count for the +// underlying buffer. When the pointer is null, then the `Arc` has not been +// allocated yet and `self` is the only outstanding handle for the underlying +// buffer. +// +// The lower two bits of `arc` are used to track the storage mode of `Inner`. +// `0b01` indicates inline storage, `0b10` indicates static storage, and `0b11` +// indicates vector storage, not yet promoted to Arc. Since pointers to +// allocated structures are aligned, the lower two bits of a pointer will always +// be 0. This allows disambiguating between a pointer and the two flags. +// +// When in "inlined" mode, the least significant byte of `arc` is also used to +// store the length of the buffer view (vs. the capacity, which is a constant). +// +// The rest of `arc`'s bytes are used as part of the inline buffer, which means +// that those bytes need to be located next to the `ptr`, `len`, and `cap` +// fields, which make up the rest of the inline buffer. This requires special +// casing the layout of `Inner` depending on if the target platform is bit or +// little endian. +// +// On little endian platforms, the `arc` field must be the first field in the +// struct. On big endian platforms, the `arc` field must be the last field in +// the struct. Since a deterministic struct layout is required, `Inner` is +// annotated with `#[repr(C)]`. +// +// # Thread safety +// +// `Bytes::clone()` returns a new `Bytes` handle with no copying. This is done +// by bumping the buffer ref count and returning a new struct pointing to the +// same buffer. However, the `Arc` structure is lazily allocated. This means +// that if `Bytes` is stored itself in an `Arc` (`Arc<Bytes>`), the `clone` +// function can be called concurrently from multiple threads. This is why an +// `AtomicPtr` is used for the `arc` field vs. a `*const`. +// +// Care is taken to ensure that the need for synchronization is minimized. Most +// operations do not require any synchronization. +// +#[cfg(target_endian = "little")] +#[repr(C)] +struct Inner { + // WARNING: Do not access the fields directly unless you know what you are + // doing. Instead, use the fns. See implementation comment above. + arc: AtomicPtr<Shared>, + ptr: *mut u8, + len: usize, + cap: usize, +} + +#[cfg(target_endian = "big")] +#[repr(C)] +struct Inner { + // WARNING: Do not access the fields directly unless you know what you are + // doing. Instead, use the fns. See implementation comment above. + ptr: *mut u8, + len: usize, + cap: usize, + arc: AtomicPtr<Shared>, +} + +// Thread-safe reference-counted container for the shared storage. This mostly +// the same as `std::sync::Arc` but without the weak counter. The ref counting +// fns are based on the ones found in `std`. +// +// The main reason to use `Shared` instead of `std::sync::Arc` is that it ends +// up making the overall code simpler and easier to reason about. This is due to +// some of the logic around setting `Inner::arc` and other ways the `arc` field +// is used. Using `Arc` ended up requiring a number of funky transmutes and +// other shenanigans to make it work. +struct Shared { + vec: Vec<u8>, + original_capacity_repr: usize, + ref_count: AtomicUsize, +} + +// Buffer storage strategy flags. +const KIND_ARC: usize = 0b00; +const KIND_INLINE: usize = 0b01; +const KIND_STATIC: usize = 0b10; +const KIND_VEC: usize = 0b11; +const KIND_MASK: usize = 0b11; + +// The max original capacity value. Any `Bytes` allocated with a greater initial +// capacity will default to this. +const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; +// The original capacity algorithm will not take effect unless the originally +// allocated capacity was at least 1kb in size. +const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; +// The original capacity is stored in powers of 2 starting at 1kb to a max of +// 64kb. Representing it as such requires only 3 bits of storage. +const ORIGINAL_CAPACITY_MASK: usize = 0b11100; +const ORIGINAL_CAPACITY_OFFSET: usize = 2; + +// When the storage is in the `Vec` representation, the pointer can be advanced +// at most this value. This is due to the amount of storage available to track +// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY +// bits. +const VEC_POS_OFFSET: usize = 5; +const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; +const NOT_VEC_POS_MASK: usize = 0b11111; + +// Bit op constants for extracting the inline length value from the `arc` field. +const INLINE_LEN_MASK: usize = 0b11111100; +const INLINE_LEN_OFFSET: usize = 2; + +// Byte offset from the start of `Inner` to where the inline buffer data +// starts. On little endian platforms, the first byte of the struct is the +// storage flag, so the data is shifted by a byte. On big endian systems, the +// data starts at the beginning of the struct. +#[cfg(target_endian = "little")] +const INLINE_DATA_OFFSET: isize = 1; +#[cfg(target_endian = "big")] +const INLINE_DATA_OFFSET: isize = 0; + +#[cfg(target_pointer_width = "64")] +const PTR_WIDTH: usize = 64; +#[cfg(target_pointer_width = "32")] +const PTR_WIDTH: usize = 32; + +// Inline buffer capacity. This is the size of `Inner` minus 1 byte for the +// metadata. +#[cfg(target_pointer_width = "64")] +const INLINE_CAP: usize = 4 * 8 - 1; +#[cfg(target_pointer_width = "32")] +const INLINE_CAP: usize = 4 * 4 - 1; + +/* + * + * ===== Bytes ===== + * + */ + +impl Bytes { + /// Creates a new `Bytes` with the specified capacity. + /// + /// The returned `Bytes` will be able to hold at least `capacity` bytes + /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`, + /// then `BytesMut` will not allocate. + /// + /// It is important to note that this function does not specify the length + /// of the returned `Bytes`, but only the capacity. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut bytes = Bytes::with_capacity(64); + /// + /// // `bytes` contains no data, even though there is capacity + /// assert_eq!(bytes.len(), 0); + /// + /// bytes.extend_from_slice(&b"hello world"[..]); + /// + /// assert_eq!(&bytes[..], b"hello world"); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Bytes { + Bytes { + inner: Inner::with_capacity(capacity), + } + } + + /// Creates a new empty `Bytes`. + /// + /// This will not allocate and the returned `Bytes` handle will be empty. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::new(); + /// assert_eq!(&b[..], b""); + /// ``` + #[inline] + pub fn new() -> Bytes { + Bytes::with_capacity(0) + } + + /// Creates a new `Bytes` from a static slice. + /// + /// The returned `Bytes` will point directly to the static slice. There is + /// no allocating or copying. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::from_static(b"hello"); + /// assert_eq!(&b[..], b"hello"); + /// ``` + #[inline] + pub fn from_static(bytes: &'static [u8]) -> Bytes { + Bytes { + inner: Inner::from_static(bytes), + } + } + + /// Returns the number of bytes contained in this `Bytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::from(&b"hello"[..]); + /// assert_eq!(b.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns true if the `Bytes` has a length of 0. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::new(); + /// assert!(b.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns a slice of self for the index range `[begin..end)`. + /// + /// This will increment the reference count for the underlying memory and + /// return a new `Bytes` handle set to the slice. + /// + /// This operation is `O(1)`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(&b"hello world"[..]); + /// let b = a.slice(2, 5); + /// + /// assert_eq!(&b[..], b"llo"); + /// ``` + /// + /// # Panics + /// + /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing + /// will panic. + pub fn slice(&self, begin: usize, end: usize) -> Bytes { + assert!(begin <= end); + assert!(end <= self.len()); + + if end - begin <= INLINE_CAP { + return Bytes::from(&self[begin..end]); + } + + let mut ret = self.clone(); + + unsafe { + ret.inner.set_end(end); + ret.inner.set_start(begin); + } + + ret + } + + /// Returns a slice of self for the index range `[begin..self.len())`. + /// + /// This will increment the reference count for the underlying memory and + /// return a new `Bytes` handle set to the slice. + /// + /// This operation is `O(1)` and is equivalent to `self.slice(begin, + /// self.len())`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(&b"hello world"[..]); + /// let b = a.slice_from(6); + /// + /// assert_eq!(&b[..], b"world"); + /// ``` + /// + /// # Panics + /// + /// Requires that `begin <= self.len()`, otherwise slicing will panic. + pub fn slice_from(&self, begin: usize) -> Bytes { + self.slice(begin, self.len()) + } + + /// Returns a slice of self for the index range `[0..end)`. + /// + /// This will increment the reference count for the underlying memory and + /// return a new `Bytes` handle set to the slice. + /// + /// This operation is `O(1)` and is equivalent to `self.slice(0, end)`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(&b"hello world"[..]); + /// let b = a.slice_to(5); + /// + /// assert_eq!(&b[..], b"hello"); + /// ``` + /// + /// # Panics + /// + /// Requires that `end <= self.len()`, otherwise slicing will panic. + pub fn slice_to(&self, end: usize) -> Bytes { + self.slice(0, end) + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` + /// contains elements `[at, len)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut a = Bytes::from(&b"hello world"[..]); + /// let b = a.split_off(5); + /// + /// assert_eq!(&a[..], b"hello"); + /// assert_eq!(&b[..], b" world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + pub fn split_off(&mut self, at: usize) -> Bytes { + assert!(at <= self.len()); + + if at == self.len() { + return Bytes::new(); + } + + if at == 0 { + return mem::replace(self, Bytes::new()); + } + + Bytes { + inner: self.inner.split_off(at), + } + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned + /// `Bytes` contains elements `[0, at)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut a = Bytes::from(&b"hello world"[..]); + /// let b = a.split_to(5); + /// + /// assert_eq!(&a[..], b" world"); + /// assert_eq!(&b[..], b"hello"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + pub fn split_to(&mut self, at: usize) -> Bytes { + assert!(at <= self.len()); + + if at == self.len() { + return mem::replace(self, Bytes::new()); + } + + if at == 0 { + return Bytes::new(); + } + + Bytes { + inner: self.inner.split_to(at), + } + } + + #[deprecated(since = "0.4.1", note = "use split_to instead")] + #[doc(hidden)] + pub fn drain_to(&mut self, at: usize) -> Bytes { + self.split_to(at) + } + + /// Shortens the buffer, keeping the first `len` bytes and dropping the + /// rest. + /// + /// If `len` is greater than the buffer's current length, this has no + /// effect. + /// + /// The [`split_off`] method can emulate `truncate`, but this causes the + /// excess bytes to be returned instead of dropped. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut buf = Bytes::from(&b"hello world"[..]); + /// buf.truncate(5); + /// assert_eq!(buf, b"hello"[..]); + /// ``` + /// + /// [`split_off`]: #method.split_off + pub fn truncate(&mut self, len: usize) { + self.inner.truncate(len); + } + + /// Shortens the buffer, dropping the first `cnt` bytes and keeping the + /// rest. + /// + /// This is the same function as `Buf::advance`, and in the next breaking + /// release of `bytes`, this implementation will be removed in favor of + /// having `Bytes` implement `Buf`. + /// + /// # Panics + /// + /// This function panics if `cnt` is greater than `self.len()` + #[inline] + pub fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.len(), "cannot advance past `remaining`"); + unsafe { self.inner.set_start(cnt); } + } + + /// Clears the buffer, removing all data. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut buf = Bytes::from(&b"hello world"[..]); + /// buf.clear(); + /// assert!(buf.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.truncate(0); + } + + /// Attempts to convert into a `BytesMut` handle. + /// + /// This will only succeed if there are no other outstanding references to + /// the underlying chunk of memory. `Bytes` handles that contain inlined + /// bytes will always be convertable to `BytesMut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(&b"Mary had a little lamb, little lamb, little lamb..."[..]); + /// + /// // Create a shallow clone + /// let b = a.clone(); + /// + /// // This will fail because `b` shares a reference with `a` + /// let a = a.try_mut().unwrap_err(); + /// + /// drop(b); + /// + /// // This will succeed + /// let mut a = a.try_mut().unwrap(); + /// + /// a[0] = b'b'; + /// + /// assert_eq!(&a[..4], b"bary"); + /// ``` + pub fn try_mut(mut self) -> Result<BytesMut, Bytes> { + if self.inner.is_mut_safe() { + Ok(BytesMut { inner: self.inner }) + } else { + Err(self) + } + } + + /// Appends given bytes to this object. + /// + /// If this `Bytes` object has not enough capacity, it is resized first. + /// If it is shared (`refcount > 1`), it is copied first. + /// + /// This operation can be less effective than the similar operation on + /// `BytesMut`, especially on small additions. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut buf = Bytes::from("aabb"); + /// buf.extend_from_slice(b"ccdd"); + /// buf.extend_from_slice(b"eeff"); + /// + /// assert_eq!(b"aabbccddeeff", &buf[..]); + /// ``` + pub fn extend_from_slice(&mut self, extend: &[u8]) { + if extend.is_empty() { + return; + } + + let new_cap = self.len().checked_add(extend.len()).expect("capacity overflow"); + + let result = match mem::replace(self, Bytes::new()).try_mut() { + Ok(mut bytes_mut) => { + bytes_mut.extend_from_slice(extend); + bytes_mut + }, + Err(bytes) => { + let mut bytes_mut = BytesMut::with_capacity(new_cap); + bytes_mut.put_slice(&bytes); + bytes_mut.put_slice(extend); + bytes_mut + } + }; + + mem::replace(self, result.freeze()); + } +} + +impl IntoBuf for Bytes { + type Buf = Cursor<Self>; + + fn into_buf(self) -> Self::Buf { + Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a Bytes { + type Buf = Cursor<Self>; + + fn into_buf(self) -> Self::Buf { + Cursor::new(self) + } +} + +impl Clone for Bytes { + fn clone(&self) -> Bytes { + Bytes { + inner: unsafe { self.inner.shallow_clone(false) }, + } + } +} + +impl AsRef<[u8]> for Bytes { + #[inline] + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl ops::Deref for Bytes { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl From<BytesMut> for Bytes { + fn from(src: BytesMut) -> Bytes { + src.freeze() + } +} + +impl From<Vec<u8>> for Bytes { + fn from(src: Vec<u8>) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl From<String> for Bytes { + fn from(src: String) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl<'a> From<&'a [u8]> for Bytes { + fn from(src: &'a [u8]) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl<'a> From<&'a str> for Bytes { + fn from(src: &'a str) -> Bytes { + BytesMut::from(src).freeze() + } +} + +impl FromIterator<u8> for BytesMut { + fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { + let iter = into_iter.into_iter(); + let (min, maybe_max) = iter.size_hint(); + + let mut out = BytesMut::with_capacity(maybe_max.unwrap_or(min)); + + for i in iter { + out.reserve(1); + out.put(i); + } + + out + } +} + +impl FromIterator<u8> for Bytes { + fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { + BytesMut::from_iter(into_iter).freeze() + } +} + +impl PartialEq for Bytes { + fn eq(&self, other: &Bytes) -> bool { + self.inner.as_ref() == other.inner.as_ref() + } +} + +impl PartialOrd for Bytes { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(other.inner.as_ref()) + } +} + +impl Ord for Bytes { + fn cmp(&self, other: &Bytes) -> cmp::Ordering { + self.inner.as_ref().cmp(other.inner.as_ref()) + } +} + +impl Eq for Bytes { +} + +impl Default for Bytes { + #[inline] + fn default() -> Bytes { + Bytes::new() + } +} + +impl fmt::Debug for Bytes { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt) + } +} + +impl hash::Hash for Bytes { + fn hash<H>(&self, state: &mut H) where H: hash::Hasher { + let s: &[u8] = self.as_ref(); + s.hash(state); + } +} + +impl Borrow<[u8]> for Bytes { + fn borrow(&self) -> &[u8] { + self.as_ref() + } +} + +impl IntoIterator for Bytes { + type Item = u8; + type IntoIter = Iter<Cursor<Bytes>>; + + fn into_iter(self) -> Self::IntoIter { + self.into_buf().iter() + } +} + +impl<'a> IntoIterator for &'a Bytes { + type Item = u8; + type IntoIter = Iter<Cursor<&'a Bytes>>; + + fn into_iter(self) -> Self::IntoIter { + self.into_buf().iter() + } +} + +impl Extend<u8> for Bytes { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> { + let iter = iter.into_iter(); + + let (lower, upper) = iter.size_hint(); + + // Avoid possible conversion into mut if there's nothing to add + if let Some(0) = upper { + return; + } + + let mut bytes_mut = match mem::replace(self, Bytes::new()).try_mut() { + Ok(bytes_mut) => bytes_mut, + Err(bytes) => { + let mut bytes_mut = BytesMut::with_capacity(bytes.len() + lower); + bytes_mut.put_slice(&bytes); + bytes_mut + } + }; + + bytes_mut.extend(iter); + + mem::replace(self, bytes_mut.freeze()); + } +} + +impl<'a> Extend<&'a u8> for Bytes { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> { + self.extend(iter.into_iter().map(|b| *b)) + } +} + +/* + * + * ===== BytesMut ===== + * + */ + +impl BytesMut { + /// Creates a new `BytesMut` with the specified capacity. + /// + /// The returned `BytesMut` will be able to hold at least `capacity` bytes + /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`, + /// then `BytesMut` will not allocate. + /// + /// It is important to note that this function does not specify the length + /// of the returned `BytesMut`, but only the capacity. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut bytes = BytesMut::with_capacity(64); + /// + /// // `bytes` contains no data, even though there is capacity + /// assert_eq!(bytes.len(), 0); + /// + /// bytes.put(&b"hello world"[..]); + /// + /// assert_eq!(&bytes[..], b"hello world"); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> BytesMut { + BytesMut { + inner: Inner::with_capacity(capacity), + } + } + + /// Creates a new `BytesMut` with default capacity. + /// + /// Resulting object has length 0 and unspecified capacity. + /// This function does not allocate. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut bytes = BytesMut::new(); + /// + /// assert_eq!(0, bytes.len()); + /// + /// bytes.reserve(2); + /// bytes.put_slice(b"xy"); + /// + /// assert_eq!(&b"xy"[..], &bytes[..]); + /// ``` + #[inline] + pub fn new() -> BytesMut { + BytesMut::with_capacity(0) + } + + /// Returns the number of bytes contained in this `BytesMut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::from(&b"hello"[..]); + /// assert_eq!(b.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns true if the `BytesMut` has a length of 0. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::with_capacity(64); + /// assert!(b.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of bytes the `BytesMut` can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::with_capacity(64); + /// assert_eq!(b.capacity(), 64); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.inner.capacity() + } + + /// Converts `self` into an immutable `Bytes`. + /// + /// The conversion is zero cost and is used to indicate that the slice + /// referenced by the handle will no longer be mutated. Once the conversion + /// is done, the handle can be cloned and shared across threads. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// use std::thread; + /// + /// let mut b = BytesMut::with_capacity(64); + /// b.put("hello world"); + /// let b1 = b.freeze(); + /// let b2 = b1.clone(); + /// + /// let th = thread::spawn(move || { + /// assert_eq!(&b1[..], b"hello world"); + /// }); + /// + /// assert_eq!(&b2[..], b"hello world"); + /// th.join().unwrap(); + /// ``` + #[inline] + pub fn freeze(self) -> Bytes { + Bytes { inner: self.inner } + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned + /// `BytesMut` contains elements `[at, capacity)`. + /// + /// This is an `O(1)` operation that just increases the reference count + /// and sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut a = BytesMut::from(&b"hello world"[..]); + /// let mut b = a.split_off(5); + /// + /// a[0] = b'j'; + /// b[0] = b'!'; + /// + /// assert_eq!(&a[..], b"jello"); + /// assert_eq!(&b[..], b"!world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > capacity`. + pub fn split_off(&mut self, at: usize) -> BytesMut { + BytesMut { + inner: self.inner.split_off(at), + } + } + + /// Removes the bytes from the current view, returning them in a new + /// `BytesMut` handle. + /// + /// Afterwards, `self` will be empty, but will retain any additional + /// capacity that it had before the operation. This is identical to + /// `self.split_to(self.len())`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut buf = BytesMut::with_capacity(1024); + /// buf.put(&b"hello world"[..]); + /// + /// let other = buf.take(); + /// + /// assert!(buf.is_empty()); + /// assert_eq!(1013, buf.capacity()); + /// + /// assert_eq!(other, b"hello world"[..]); + /// ``` + pub fn take(&mut self) -> BytesMut { + let len = self.len(); + self.split_to(len) + } + + #[deprecated(since = "0.4.1", note = "use take instead")] + #[doc(hidden)] + pub fn drain(&mut self) -> BytesMut { + self.take() + } + + /// Splits the buffer into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` + /// contains elements `[0, at)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut a = BytesMut::from(&b"hello world"[..]); + /// let mut b = a.split_to(5); + /// + /// a[0] = b'!'; + /// b[0] = b'j'; + /// + /// assert_eq!(&a[..], b"!world"); + /// assert_eq!(&b[..], b"jello"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + pub fn split_to(&mut self, at: usize) -> BytesMut { + BytesMut { + inner: self.inner.split_to(at), + } + } + + #[deprecated(since = "0.4.1", note = "use split_to instead")] + #[doc(hidden)] + pub fn drain_to(&mut self, at: usize) -> BytesMut { + self.split_to(at) + } + + /// Shortens the buffer, keeping the first `len` bytes and dropping the + /// rest. + /// + /// If `len` is greater than the buffer's current length, this has no + /// effect. + /// + /// The [`split_off`] method can emulate `truncate`, but this causes the + /// excess bytes to be returned instead of dropped. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello world"[..]); + /// buf.truncate(5); + /// assert_eq!(buf, b"hello"[..]); + /// ``` + /// + /// [`split_off`]: #method.split_off + pub fn truncate(&mut self, len: usize) { + self.inner.truncate(len); + } + + /// Shortens the buffer, dropping the first `cnt` bytes and keeping the + /// rest. + /// + /// This is the same function as `Buf::advance`, and in the next breaking + /// release of `bytes`, this implementation will be removed in favor of + /// having `BytesMut` implement `Buf`. + /// + /// # Panics + /// + /// This function panics if `cnt` is greater than `self.len()` + #[inline] + pub fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.len(), "cannot advance past `remaining`"); + unsafe { self.inner.set_start(cnt); } + } + + /// Clears the buffer, removing all data. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello world"[..]); + /// buf.clear(); + /// assert!(buf.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.truncate(0); + } + + /// Resizes the buffer so that `len` is equal to `new_len`. + /// + /// If `new_len` is greater than `len`, the buffer is extended by the + /// difference with each additional byte set to `value`. If `new_len` is + /// less than `len`, the buffer is simply truncated. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::new(); + /// + /// buf.resize(3, 0x1); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); + /// + /// buf.resize(2, 0x2); + /// assert_eq!(&buf[..], &[0x1, 0x1]); + /// + /// buf.resize(4, 0x3); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); + /// ``` + pub fn resize(&mut self, new_len: usize, value: u8) { + self.inner.resize(new_len, value); + } + + /// Sets the length of the buffer. + /// + /// This will explicitly set the size of the buffer without actually + /// modifying the data, so it is up to the caller to ensure that the data + /// has been initialized. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut b = BytesMut::from(&b"hello world"[..]); + /// + /// unsafe { + /// b.set_len(5); + /// } + /// + /// assert_eq!(&b[..], b"hello"); + /// + /// unsafe { + /// b.set_len(11); + /// } + /// + /// assert_eq!(&b[..], b"hello world"); + /// ``` + /// + /// # Panics + /// + /// This method will panic if `len` is out of bounds for the underlying + /// slice or if it comes after the `end` of the configured window. + pub unsafe fn set_len(&mut self, len: usize) { + self.inner.set_len(len) + } + + /// Reserves capacity for at least `additional` more bytes to be inserted + /// into the given `BytesMut`. + /// + /// More than `additional` bytes may be reserved in order to avoid frequent + /// reallocations. A call to `reserve` may result in an allocation. + /// + /// Before allocating new buffer space, the function will attempt to reclaim + /// space in the existing buffer. If the current handle references a small + /// view in the original buffer and all other handles have been dropped, + /// and the requested capacity is less than or equal to the existing + /// buffer's capacity, then the current view will be copied to the front of + /// the buffer and the handle will take ownership of the full buffer. + /// + /// # Examples + /// + /// In the following example, a new buffer is allocated. + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello"[..]); + /// buf.reserve(64); + /// assert!(buf.capacity() >= 69); + /// ``` + /// + /// In the following example, the existing buffer is reclaimed. + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut buf = BytesMut::with_capacity(128); + /// buf.put(&[0; 64][..]); + /// + /// let ptr = buf.as_ptr(); + /// let other = buf.take(); + /// + /// assert!(buf.is_empty()); + /// assert_eq!(buf.capacity(), 64); + /// + /// drop(other); + /// buf.reserve(128); + /// + /// assert_eq!(buf.capacity(), 128); + /// assert_eq!(buf.as_ptr(), ptr); + /// ``` + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + pub fn reserve(&mut self, additional: usize) { + self.inner.reserve(additional) + } + + /// Appends given bytes to this object. + /// + /// If this `BytesMut` object has not enough capacity, it is resized first. + /// So unlike `put_slice` operation, `extend_from_slice` does not panic. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::with_capacity(0); + /// buf.extend_from_slice(b"aaabbb"); + /// buf.extend_from_slice(b"cccddd"); + /// + /// assert_eq!(b"aaabbbcccddd", &buf[..]); + /// ``` + pub fn extend_from_slice(&mut self, extend: &[u8]) { + self.reserve(extend.len()); + self.put_slice(extend); + } + + /// Combine splitted BytesMut objects back as contiguous. + /// + /// If `BytesMut` objects were not contiguous originally, they will be extended. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::with_capacity(64); + /// buf.extend_from_slice(b"aaabbbcccddd"); + /// + /// let splitted = buf.split_off(6); + /// assert_eq!(b"aaabbb", &buf[..]); + /// assert_eq!(b"cccddd", &splitted[..]); + /// + /// buf.unsplit(splitted); + /// assert_eq!(b"aaabbbcccddd", &buf[..]); + /// ``` + pub fn unsplit(&mut self, other: BytesMut) { + let ptr; + + if other.is_empty() { + return; + } + + if self.is_empty() { + *self = other; + return; + } + + unsafe { + ptr = self.inner.ptr.offset(self.inner.len as isize); + } + if ptr == other.inner.ptr && + self.inner.kind() == KIND_ARC && + other.inner.kind() == KIND_ARC + { + debug_assert_eq!(self.inner.arc.load(Acquire), + other.inner.arc.load(Acquire)); + // Contiguous blocks, just combine directly + self.inner.len += other.inner.len; + self.inner.cap += other.inner.cap; + } + else { + self.extend_from_slice(&other); + } + } +} + +impl BufMut for BytesMut { + #[inline] + fn remaining_mut(&self) -> usize { + self.capacity() - self.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + let new_len = self.len() + cnt; + + // This call will panic if `cnt` is too big + self.inner.set_len(new_len); + } + + #[inline] + unsafe fn bytes_mut(&mut self) -> &mut [u8] { + let len = self.len(); + + // This will never panic as `len` can never become invalid + &mut self.inner.as_raw()[len..] + } + + #[inline] + fn put_slice(&mut self, src: &[u8]) { + assert!(self.remaining_mut() >= src.len()); + + let len = src.len(); + + unsafe { + self.bytes_mut()[..len].copy_from_slice(src); + self.advance_mut(len); + } + } + + #[inline] + fn put_u8(&mut self, n: u8) { + self.inner.put_u8(n); + } + + #[inline] + fn put_i8(&mut self, n: i8) { + self.put_u8(n as u8); + } +} + +impl IntoBuf for BytesMut { + type Buf = Cursor<Self>; + + fn into_buf(self) -> Self::Buf { + Cursor::new(self) + } +} + +impl<'a> IntoBuf for &'a BytesMut { + type Buf = Cursor<&'a BytesMut>; + + fn into_buf(self) -> Self::Buf { + Cursor::new(self) + } +} + +impl AsRef<[u8]> for BytesMut { + #[inline] + fn as_ref(&self) -> &[u8] { + self.inner.as_ref() + } +} + +impl ops::Deref for BytesMut { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &[u8] { + self.as_ref() + } +} + +impl AsMut<[u8]> for BytesMut { + fn as_mut(&mut self) -> &mut [u8] { + self.inner.as_mut() + } +} + +impl ops::DerefMut for BytesMut { + #[inline] + fn deref_mut(&mut self) -> &mut [u8] { + self.inner.as_mut() + } +} + +impl From<Vec<u8>> for BytesMut { + fn from(src: Vec<u8>) -> BytesMut { + BytesMut { + inner: Inner::from_vec(src), + } + } +} + +impl From<String> for BytesMut { + fn from(src: String) -> BytesMut { + BytesMut::from(src.into_bytes()) + } +} + +impl<'a> From<&'a [u8]> for BytesMut { + fn from(src: &'a [u8]) -> BytesMut { + let len = src.len(); + + if len == 0 { + BytesMut::new() + } else if len <= INLINE_CAP { + unsafe { + let mut inner: Inner = mem::uninitialized(); + + // Set inline mask + inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); + inner.set_inline_len(len); + inner.as_raw()[0..len].copy_from_slice(src); + + BytesMut { + inner: inner, + } + } + } else { + BytesMut::from(src.to_vec()) + } + } +} + +impl<'a> From<&'a str> for BytesMut { + fn from(src: &'a str) -> BytesMut { + BytesMut::from(src.as_bytes()) + } +} + +impl From<Bytes> for BytesMut { + fn from(src: Bytes) -> BytesMut { + src.try_mut() + .unwrap_or_else(|src| BytesMut::from(&src[..])) + } +} + +impl PartialEq for BytesMut { + fn eq(&self, other: &BytesMut) -> bool { + self.inner.as_ref() == other.inner.as_ref() + } +} + +impl PartialOrd for BytesMut { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(other.inner.as_ref()) + } +} + +impl Ord for BytesMut { + fn cmp(&self, other: &BytesMut) -> cmp::Ordering { + self.inner.as_ref().cmp(other.inner.as_ref()) + } +} + +impl Eq for BytesMut { +} + +impl Default for BytesMut { + #[inline] + fn default() -> BytesMut { + BytesMut::new() + } +} + +impl fmt::Debug for BytesMut { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&debug::BsDebug(&self.inner.as_ref()), fmt) + } +} + +impl hash::Hash for BytesMut { + fn hash<H>(&self, state: &mut H) where H: hash::Hasher { + let s: &[u8] = self.as_ref(); + s.hash(state); + } +} + +impl Borrow<[u8]> for BytesMut { + fn borrow(&self) -> &[u8] { + self.as_ref() + } +} + +impl BorrowMut<[u8]> for BytesMut { + fn borrow_mut(&mut self) -> &mut [u8] { + self.as_mut() + } +} + +impl fmt::Write for BytesMut { + #[inline] + fn write_str(&mut self, s: &str) -> fmt::Result { + if self.remaining_mut() >= s.len() { + self.put_slice(s.as_bytes()); + Ok(()) + } else { + Err(fmt::Error) + } + } + + #[inline] + fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result { + fmt::write(self, args) + } +} + +impl Clone for BytesMut { + fn clone(&self) -> BytesMut { + BytesMut::from(&self[..]) + } +} + +impl IntoIterator for BytesMut { + type Item = u8; + type IntoIter = Iter<Cursor<BytesMut>>; + + fn into_iter(self) -> Self::IntoIter { + self.into_buf().iter() + } +} + +impl<'a> IntoIterator for &'a BytesMut { + type Item = u8; + type IntoIter = Iter<Cursor<&'a BytesMut>>; + + fn into_iter(self) -> Self::IntoIter { + self.into_buf().iter() + } +} + +impl Extend<u8> for BytesMut { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> { + let iter = iter.into_iter(); + + let (lower, _) = iter.size_hint(); + self.reserve(lower); + + for b in iter { + unsafe { + self.bytes_mut()[0] = b; + self.advance_mut(1); + } + } + } +} + +impl<'a> Extend<&'a u8> for BytesMut { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> { + self.extend(iter.into_iter().map(|b| *b)) + } +} + +/* + * + * ===== Inner ===== + * + */ + +impl Inner { + #[inline] + fn from_static(bytes: &'static [u8]) -> Inner { + let ptr = bytes.as_ptr() as *mut u8; + + Inner { + // `arc` won't ever store a pointer. Instead, use it to + // track the fact that the `Bytes` handle is backed by a + // static buffer. + arc: AtomicPtr::new(KIND_STATIC as *mut Shared), + ptr: ptr, + len: bytes.len(), + cap: bytes.len(), + } + } + + #[inline] + fn from_vec(mut src: Vec<u8>) -> Inner { + let len = src.len(); + let cap = src.capacity(); + let ptr = src.as_mut_ptr(); + + mem::forget(src); + + let original_capacity_repr = original_capacity_to_repr(cap); + let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; + + Inner { + arc: AtomicPtr::new(arc as *mut Shared), + ptr: ptr, + len: len, + cap: cap, + } + } + + #[inline] + fn with_capacity(capacity: usize) -> Inner { + if capacity <= INLINE_CAP { + unsafe { + // Using uninitialized memory is ~30% faster + let mut inner: Inner = mem::uninitialized(); + inner.arc = AtomicPtr::new(KIND_INLINE as *mut Shared); + inner + } + } else { + Inner::from_vec(Vec::with_capacity(capacity)) + } + } + + /// Return a slice for the handle's view into the shared buffer + #[inline] + fn as_ref(&self) -> &[u8] { + unsafe { + if self.is_inline() { + slice::from_raw_parts(self.inline_ptr(), self.inline_len()) + } else { + slice::from_raw_parts(self.ptr, self.len) + } + } + } + + /// Return a mutable slice for the handle's view into the shared buffer + #[inline] + fn as_mut(&mut self) -> &mut [u8] { + debug_assert!(!self.is_static()); + + unsafe { + if self.is_inline() { + slice::from_raw_parts_mut(self.inline_ptr(), self.inline_len()) + } else { + slice::from_raw_parts_mut(self.ptr, self.len) + } + } + } + + /// Return a mutable slice for the handle's view into the shared buffer + /// including potentially uninitialized bytes. + #[inline] + unsafe fn as_raw(&mut self) -> &mut [u8] { + debug_assert!(!self.is_static()); + + if self.is_inline() { + slice::from_raw_parts_mut(self.inline_ptr(), INLINE_CAP) + } else { + slice::from_raw_parts_mut(self.ptr, self.cap) + } + } + + /// Insert a byte into the next slot and advance the len by 1. + #[inline] + fn put_u8(&mut self, n: u8) { + if self.is_inline() { + let len = self.inline_len(); + assert!(len < INLINE_CAP); + unsafe { + *self.inline_ptr().offset(len as isize) = n; + } + self.set_inline_len(len + 1); + } else { + assert!(self.len < self.cap); + unsafe { + *self.ptr.offset(self.len as isize) = n; + } + self.len += 1; + } + } + + #[inline] + fn len(&self) -> usize { + if self.is_inline() { + self.inline_len() + } else { + self.len + } + } + + /// Pointer to the start of the inline buffer + #[inline] + unsafe fn inline_ptr(&self) -> *mut u8 { + (self as *const Inner as *mut Inner as *mut u8) + .offset(INLINE_DATA_OFFSET) + } + + #[inline] + fn inline_len(&self) -> usize { + let p: &usize = unsafe { mem::transmute(&self.arc) }; + (p & INLINE_LEN_MASK) >> INLINE_LEN_OFFSET + } + + /// Set the length of the inline buffer. This is done by writing to the + /// least significant byte of the `arc` field. + #[inline] + fn set_inline_len(&mut self, len: usize) { + debug_assert!(len <= INLINE_CAP); + let p = self.arc.get_mut(); + *p = ((*p as usize & !INLINE_LEN_MASK) | (len << INLINE_LEN_OFFSET)) as _; + } + + /// slice. + #[inline] + unsafe fn set_len(&mut self, len: usize) { + if self.is_inline() { + assert!(len <= INLINE_CAP); + self.set_inline_len(len); + } else { + assert!(len <= self.cap); + self.len = len; + } + } + + #[inline] + fn is_empty(&self) -> bool { + self.len() == 0 + } + + #[inline] + fn capacity(&self) -> usize { + if self.is_inline() { + INLINE_CAP + } else { + self.cap + } + } + + fn split_off(&mut self, at: usize) -> Inner { + let mut other = unsafe { self.shallow_clone(true) }; + + unsafe { + other.set_start(at); + self.set_end(at); + } + + return other + } + + fn split_to(&mut self, at: usize) -> Inner { + let mut other = unsafe { self.shallow_clone(true) }; + + unsafe { + other.set_end(at); + self.set_start(at); + } + + return other + } + + fn truncate(&mut self, len: usize) { + if len <= self.len() { + unsafe { self.set_len(len); } + } + } + + fn resize(&mut self, new_len: usize, value: u8) { + let len = self.len(); + if new_len > len { + let additional = new_len - len; + self.reserve(additional); + unsafe { + let dst = self.as_raw()[len..].as_mut_ptr(); + ptr::write_bytes(dst, value, additional); + self.set_len(new_len); + } + } else { + self.truncate(new_len); + } + } + + unsafe fn set_start(&mut self, start: usize) { + // Setting the start to 0 is a no-op, so return early if this is the + // case. + if start == 0 { + return; + } + + let kind = self.kind(); + + // Always check `inline` first, because if the handle is using inline + // data storage, all of the `Inner` struct fields will be gibberish. + if kind == KIND_INLINE { + assert!(start <= INLINE_CAP); + + let len = self.inline_len(); + + if len <= start { + self.set_inline_len(0); + } else { + // `set_start` is essentially shifting data off the front of the + // view. Inlined buffers only track the length of the slice. + // So, to update the start, the data at the new starting point + // is copied to the beginning of the buffer. + let new_len = len - start; + + let dst = self.inline_ptr(); + let src = (dst as *const u8).offset(start as isize); + + ptr::copy(src, dst, new_len); + + self.set_inline_len(new_len); + } + } else { + assert!(start <= self.cap); + + if kind == KIND_VEC { + // Setting the start when in vec representation is a little more + // complicated. First, we have to track how far ahead the + // "start" of the byte buffer from the beginning of the vec. We + // also have to ensure that we don't exceed the maximum shift. + let (mut pos, prev) = self.uncoordinated_get_vec_pos(); + pos += start; + + if pos <= MAX_VEC_POS { + self.uncoordinated_set_vec_pos(pos, prev); + } else { + // The repr must be upgraded to ARC. This will never happen + // on 64 bit systems and will only happen on 32 bit systems + // when shifting past 134,217,727 bytes. As such, we don't + // worry too much about performance here. + let _ = self.shallow_clone(true); + } + } + + // Updating the start of the view is setting `ptr` to point to the + // new start and updating the `len` field to reflect the new length + // of the view. + self.ptr = self.ptr.offset(start as isize); + + if self.len >= start { + self.len -= start; + } else { + self.len = 0; + } + + self.cap -= start; + } + } + + unsafe fn set_end(&mut self, end: usize) { + debug_assert!(self.is_shared()); + + // Always check `inline` first, because if the handle is using inline + // data storage, all of the `Inner` struct fields will be gibberish. + if self.is_inline() { + assert!(end <= INLINE_CAP); + let new_len = cmp::min(self.inline_len(), end); + self.set_inline_len(new_len); + } else { + assert!(end <= self.cap); + + self.cap = end; + self.len = cmp::min(self.len, end); + } + } + + /// Checks if it is safe to mutate the memory + fn is_mut_safe(&mut self) -> bool { + let kind = self.kind(); + + // Always check `inline` first, because if the handle is using inline + // data storage, all of the `Inner` struct fields will be gibberish. + if kind == KIND_INLINE { + // Inlined buffers can always be mutated as the data is never shared + // across handles. + true + } else if kind == KIND_VEC { + true + } else if kind == KIND_STATIC { + false + } else { + // Otherwise, the underlying buffer is potentially shared with other + // handles, so the ref_count needs to be checked. + unsafe { (**self.arc.get_mut()).is_unique() } + } + } + + /// Increments the ref count. This should only be done if it is known that + /// it can be done safely. As such, this fn is not public, instead other + /// fns will use this one while maintaining the guarantees. + /// Parameter `mut_self` should only be set to `true` if caller holds + /// `&mut self` reference. + /// + /// "Safely" is defined as not exposing two `BytesMut` values that point to + /// the same byte window. + /// + /// This function is thread safe. + unsafe fn shallow_clone(&self, mut_self: bool) -> Inner { + // Always check `inline` first, because if the handle is using inline + // data storage, all of the `Inner` struct fields will be gibberish. + // + // Additionally, if kind is STATIC, then Arc is *never* changed, making + // it safe and faster to check for it now before an atomic acquire. + + if self.is_inline_or_static() { + // In this case, a shallow_clone still involves copying the data. + let mut inner: Inner = mem::uninitialized(); + ptr::copy_nonoverlapping( + self, + &mut inner, + 1, + ); + inner + } else { + self.shallow_clone_sync(mut_self) + } + } + + + #[cold] + unsafe fn shallow_clone_sync(&self, mut_self: bool) -> Inner { + // The function requires `&self`, this means that `shallow_clone` + // could be called concurrently. + // + // The first step is to load the value of `arc`. This will determine + // how to proceed. The `Acquire` ordering synchronizes with the + // `compare_and_swap` that comes later in this function. The goal is + // to ensure that if `arc` is currently set to point to a `Shared`, + // that the current thread acquires the associated memory. + let arc = self.arc.load(Acquire); + let kind = arc as usize & KIND_MASK; + + if kind == KIND_ARC { + self.shallow_clone_arc(arc) + } else { + assert!(kind == KIND_VEC); + self.shallow_clone_vec(arc as usize, mut_self) + } + } + + unsafe fn shallow_clone_arc(&self, arc: *mut Shared) -> Inner { + debug_assert!(arc as usize & KIND_MASK == KIND_ARC); + + let old_size = (*arc).ref_count.fetch_add(1, Relaxed); + + if old_size == usize::MAX { + abort(); + } + + Inner { + arc: AtomicPtr::new(arc), + .. *self + } + } + + #[cold] + unsafe fn shallow_clone_vec(&self, arc: usize, mut_self: bool) -> Inner { + // If the buffer is still tracked in a `Vec<u8>`. It is time to + // promote the vec to an `Arc`. This could potentially be called + // concurrently, so some care must be taken. + + debug_assert!(arc & KIND_MASK == KIND_VEC); + + let original_capacity_repr = + (arc as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; + + // The vec offset cannot be concurrently mutated, so there + // should be no danger reading it. + let off = (arc as usize) >> VEC_POS_OFFSET; + + // First, allocate a new `Shared` instance containing the + // `Vec` fields. It's important to note that `ptr`, `len`, + // and `cap` cannot be mutated without having `&mut self`. + // This means that these fields will not be concurrently + // updated and since the buffer hasn't been promoted to an + // `Arc`, those three fields still are the components of the + // vector. + let shared = Box::new(Shared { + vec: rebuild_vec(self.ptr, self.len, self.cap, off), + original_capacity_repr: original_capacity_repr, + // Initialize refcount to 2. One for this reference, and one + // for the new clone that will be returned from + // `shallow_clone`. + ref_count: AtomicUsize::new(2), + }); + + let shared = Box::into_raw(shared); + + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!(0 == (shared as usize & 0b11)); + + // If there are no references to self in other threads, + // expensive atomic operations can be avoided. + if mut_self { + self.arc.store(shared, Relaxed); + return Inner { + arc: AtomicPtr::new(shared), + .. *self + }; + } + + // Try compare & swapping the pointer into the `arc` field. + // `Release` is used synchronize with other threads that + // will load the `arc` field. + // + // If the `compare_and_swap` fails, then the thread lost the + // race to promote the buffer to shared. The `Acquire` + // ordering will synchronize with the `compare_and_swap` + // that happened in the other thread and the `Shared` + // pointed to by `actual` will be visible. + let actual = self.arc.compare_and_swap(arc as *mut Shared, shared, AcqRel); + + if actual as usize == arc { + // The upgrade was successful, the new handle can be + // returned. + return Inner { + arc: AtomicPtr::new(shared), + .. *self + }; + } + + // The upgrade failed, a concurrent clone happened. Release + // the allocation that was made in this thread, it will not + // be needed. + let shared = Box::from_raw(shared); + mem::forget(*shared); + + // Buffer already promoted to shared storage, so increment ref + // count. + self.shallow_clone_arc(actual) + } + + #[inline] + fn reserve(&mut self, additional: usize) { + let len = self.len(); + let rem = self.capacity() - len; + + if additional <= rem { + // The handle can already store at least `additional` more bytes, so + // there is no further work needed to be done. + return; + } + + let kind = self.kind(); + + // Always check `inline` first, because if the handle is using inline + // data storage, all of the `Inner` struct fields will be gibberish. + if kind == KIND_INLINE { + let new_cap = len + additional; + + // Promote to a vector + let mut v = Vec::with_capacity(new_cap); + v.extend_from_slice(self.as_ref()); + + self.ptr = v.as_mut_ptr(); + self.len = v.len(); + self.cap = v.capacity(); + + // Since the minimum capacity is `INLINE_CAP`, don't bother encoding + // the original capacity as INLINE_CAP + self.arc = AtomicPtr::new(KIND_VEC as *mut Shared); + + mem::forget(v); + return; + } + + if kind == KIND_VEC { + // If there's enough free space before the start of the buffer, then + // just copy the data backwards and reuse the already-allocated + // space. + // + // Otherwise, since backed by a vector, use `Vec::reserve` + unsafe { + let (off, prev) = self.uncoordinated_get_vec_pos(); + + // Only reuse space if we stand to gain at least capacity/2 + // bytes of space back + if off >= additional && off >= (self.cap / 2) { + // There's space - reuse it + // + // Just move the pointer back to the start after copying + // data back. + let base_ptr = self.ptr.offset(-(off as isize)); + ptr::copy(self.ptr, base_ptr, self.len); + self.ptr = base_ptr; + self.uncoordinated_set_vec_pos(0, prev); + + // Length stays constant, but since we moved backwards we + // can gain capacity back. + self.cap += off; + } else { + // No space - allocate more + let mut v = rebuild_vec(self.ptr, self.len, self.cap, off); + v.reserve(additional); + + // Update the info + self.ptr = v.as_mut_ptr().offset(off as isize); + self.len = v.len() - off; + self.cap = v.capacity() - off; + + // Drop the vec reference + mem::forget(v); + } + return; + } + } + + let arc = *self.arc.get_mut(); + + debug_assert!(kind == KIND_ARC); + + // Reserving involves abandoning the currently shared buffer and + // allocating a new vector with the requested capacity. + // + // Compute the new capacity + let mut new_cap = len + additional; + let original_capacity; + let original_capacity_repr; + + unsafe { + original_capacity_repr = (*arc).original_capacity_repr; + original_capacity = original_capacity_from_repr(original_capacity_repr); + + // First, try to reclaim the buffer. This is possible if the current + // handle is the only outstanding handle pointing to the buffer. + if (*arc).is_unique() { + // This is the only handle to the buffer. It can be reclaimed. + // However, before doing the work of copying data, check to make + // sure that the vector has enough capacity. + let v = &mut (*arc).vec; + + if v.capacity() >= new_cap { + // The capacity is sufficient, reclaim the buffer + let ptr = v.as_mut_ptr(); + + ptr::copy(self.ptr, ptr, len); + + self.ptr = ptr; + self.cap = v.capacity(); + + return; + } + + // The vector capacity is not sufficient. The reserve request is + // asking for more than the initial buffer capacity. Allocate more + // than requested if `new_cap` is not much bigger than the current + // capacity. + // + // There are some situations, using `reserve_exact` that the + // buffer capacity could be below `original_capacity`, so do a + // check. + new_cap = cmp::max( + cmp::max(v.capacity() << 1, new_cap), + original_capacity); + } else { + new_cap = cmp::max(new_cap, original_capacity); + } + } + + // Create a new vector to store the data + let mut v = Vec::with_capacity(new_cap); + + // Copy the bytes + v.extend_from_slice(self.as_ref()); + + // Release the shared handle. This must be done *after* the bytes are + // copied. + release_shared(arc); + + // Update self + self.ptr = v.as_mut_ptr(); + self.len = v.len(); + self.cap = v.capacity(); + + let arc = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; + + self.arc = AtomicPtr::new(arc as *mut Shared); + + // Forget the vector handle + mem::forget(v); + } + + /// Returns true if the buffer is stored inline + #[inline] + fn is_inline(&self) -> bool { + self.kind() == KIND_INLINE + } + + #[inline] + fn is_inline_or_static(&self) -> bool { + // The value returned by `kind` isn't itself safe, but the value could + // inform what operations to take, and unsafely do something without + // synchronization. + // + // KIND_INLINE and KIND_STATIC will *never* change, so branches on that + // information is safe. + let kind = self.kind(); + kind == KIND_INLINE || kind == KIND_STATIC + } + + /// Used for `debug_assert` statements. &mut is used to guarantee that it is + /// safe to check VEC_KIND + #[inline] + fn is_shared(&mut self) -> bool { + match self.kind() { + KIND_VEC => false, + _ => true, + } + } + + /// Used for `debug_assert` statements + #[inline] + fn is_static(&mut self) -> bool { + match self.kind() { + KIND_STATIC => true, + _ => false, + } + } + + #[inline] + fn kind(&self) -> usize { + // This function is going to probably raise some eyebrows. The function + // returns true if the buffer is stored inline. This is done by checking + // the least significant bit in the `arc` field. + // + // Now, you may notice that `arc` is an `AtomicPtr` and this is + // accessing it as a normal field without performing an atomic load... + // + // Again, the function only cares about the least significant bit, and + // this bit is set when `Inner` is created and never changed after that. + // All platforms have atomic "word" operations and won't randomly flip + // bits, so even without any explicit atomic operations, reading the + // flag will be correct. + // + // This function is very critical performance wise as it is called for + // every operation. Performing an atomic load would mess with the + // compiler's ability to optimize. Simple benchmarks show up to a 10% + // slowdown using a `Relaxed` atomic load on x86. + + #[cfg(target_endian = "little")] + #[inline] + fn imp(arc: &AtomicPtr<Shared>) -> usize { + unsafe { + let p: &u8 = mem::transmute(arc); + (*p as usize) & KIND_MASK + } + } + + #[cfg(target_endian = "big")] + #[inline] + fn imp(arc: &AtomicPtr<Shared>) -> usize { + unsafe { + let p: &usize = mem::transmute(arc); + *p & KIND_MASK + } + } + + imp(&self.arc) + } + + #[inline] + fn uncoordinated_get_vec_pos(&mut self) -> (usize, usize) { + // Similar to above, this is a pretty crazed function. This should only + // be called when in the KIND_VEC mode. This + the &mut self argument + // guarantees that there is no possibility of concurrent calls to this + // function. + let prev = unsafe { + let p: &AtomicPtr<Shared> = &self.arc; + let p: &usize = mem::transmute(p); + *p + }; + + (prev >> VEC_POS_OFFSET, prev) + } + + #[inline] + fn uncoordinated_set_vec_pos(&mut self, pos: usize, prev: usize) { + // Once more... crazy + debug_assert!(pos <= MAX_VEC_POS); + + unsafe { + let p: &mut AtomicPtr<Shared> = &mut self.arc; + let p: &mut usize = mem::transmute(p); + *p = (pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK); + } + } +} + +fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> { + unsafe { + let ptr = ptr.offset(-(off as isize)); + len += off; + cap += off; + + Vec::from_raw_parts(ptr, len, cap) + } +} + +impl Drop for Inner { + fn drop(&mut self) { + let kind = self.kind(); + + if kind == KIND_VEC { + let (off, _) = self.uncoordinated_get_vec_pos(); + + // Vector storage, free the vector + let _ = rebuild_vec(self.ptr, self.len, self.cap, off); + } else if kind == KIND_ARC { + release_shared(*self.arc.get_mut()); + } + } +} + +fn release_shared(ptr: *mut Shared) { + // `Shared` storage... follow the drop steps from Arc. + unsafe { + if (*ptr).ref_count.fetch_sub(1, Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + atomic::fence(Acquire); + + // Drop the data + Box::from_raw(ptr); + } +} + +impl Shared { + fn is_unique(&self) -> bool { + // The goal is to check if the current handle is the only handle + // that currently has access to the buffer. This is done by + // checking if the `ref_count` is currently 1. + // + // The `Acquire` ordering synchronizes with the `Release` as + // part of the `fetch_sub` in `release_shared`. The `fetch_sub` + // operation guarantees that any mutations done in other threads + // are ordered before the `ref_count` is decremented. As such, + // this `Acquire` will guarantee that those mutations are + // visible to the current thread. + self.ref_count.load(Acquire) == 1 + } +} + +fn original_capacity_to_repr(cap: usize) -> usize { + let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); + cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH) +} + +fn original_capacity_from_repr(repr: usize) -> usize { + if repr == 0 { + return 0; + } + + 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) +} + +#[test] +fn test_original_capacity_to_repr() { + for &cap in &[0, 1, 16, 1000] { + assert_eq!(0, original_capacity_to_repr(cap)); + } + + for &cap in &[1024, 1025, 1100, 2000, 2047] { + assert_eq!(1, original_capacity_to_repr(cap)); + } + + for &cap in &[2048, 2049] { + assert_eq!(2, original_capacity_to_repr(cap)); + } + + // TODO: more + + for &cap in &[65536, 65537, 68000, 1 << 17, 1 << 18, 1 << 20, 1 << 30] { + assert_eq!(7, original_capacity_to_repr(cap), "cap={}", cap); + } +} + +#[test] +fn test_original_capacity_from_repr() { + assert_eq!(0, original_capacity_from_repr(0)); + assert_eq!(1024, original_capacity_from_repr(1)); + assert_eq!(1024 * 2, original_capacity_from_repr(2)); + assert_eq!(1024 * 4, original_capacity_from_repr(3)); + assert_eq!(1024 * 8, original_capacity_from_repr(4)); + assert_eq!(1024 * 16, original_capacity_from_repr(5)); + assert_eq!(1024 * 32, original_capacity_from_repr(6)); + assert_eq!(1024 * 64, original_capacity_from_repr(7)); +} + +unsafe impl Send for Inner {} +unsafe impl Sync for Inner {} + +/* + * + * ===== PartialEq / PartialOrd ===== + * + */ + +impl PartialEq<[u8]> for BytesMut { + fn eq(&self, other: &[u8]) -> bool { + &**self == other + } +} + +impl PartialOrd<[u8]> for BytesMut { + fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { + (**self).partial_cmp(other) + } +} + +impl PartialEq<BytesMut> for [u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for [u8] { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<str> for BytesMut { + fn eq(&self, other: &str) -> bool { + &**self == other.as_bytes() + } +} + +impl PartialOrd<str> for BytesMut { + fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { + (**self).partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<BytesMut> for str { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for str { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Vec<u8>> for BytesMut { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<Vec<u8>> for BytesMut { + fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { + (**self).partial_cmp(&other[..]) + } +} + +impl PartialEq<BytesMut> for Vec<u8> { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for Vec<u8> { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<String> for BytesMut { + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<String> for BytesMut { + fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { + (**self).partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<BytesMut> for String { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for String { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut + where BytesMut: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut + where BytesMut: PartialOrd<T> +{ + fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { + self.partial_cmp(*other) + } +} + +impl<'a> PartialEq<BytesMut> for &'a [u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd<BytesMut> for &'a [u8] { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a> PartialEq<BytesMut> for &'a str { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd<BytesMut> for &'a str { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<[u8]> for Bytes { + fn eq(&self, other: &[u8]) -> bool { + self.inner.as_ref() == other + } +} + +impl PartialOrd<[u8]> for Bytes { + fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(other) + } +} + +impl PartialEq<Bytes> for [u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for [u8] { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<str> for Bytes { + fn eq(&self, other: &str) -> bool { + self.inner.as_ref() == other.as_bytes() + } +} + +impl PartialOrd<str> for Bytes { + fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<Bytes> for str { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for str { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Vec<u8>> for Bytes { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<Vec<u8>> for Bytes { + fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(&other[..]) + } +} + +impl PartialEq<Bytes> for Vec<u8> { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for Vec<u8> { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<String> for Bytes { + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<String> for Bytes { + fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { + self.inner.as_ref().partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<Bytes> for String { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for String { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a> PartialEq<Bytes> for &'a [u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd<Bytes> for &'a [u8] { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a> PartialEq<Bytes> for &'a str { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl<'a> PartialOrd<Bytes> for &'a str { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes + where Bytes: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes + where Bytes: PartialOrd<T> +{ + fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { + self.partial_cmp(&**other) + } +} + +impl PartialEq<BytesMut> for Bytes +{ + fn eq(&self, other: &BytesMut) -> bool { + &other[..] == &self[..] + } +} + +impl PartialEq<Bytes> for BytesMut +{ + fn eq(&self, other: &Bytes) -> bool { + &other[..] == &self[..] + } +} + +// While there is `std::process:abort`, it's only available in Rust 1.17, and +// our minimum supported version is currently 1.15. So, this acts as an abort +// by triggering a double panic, which always aborts in Rust. +struct Abort; + +impl Drop for Abort { + fn drop(&mut self) { + panic!(); + } +} + +#[inline(never)] +#[cold] +fn abort() { + let _a = Abort; + panic!(); +} diff --git a/third_party/rust/bytes-0.4.9/src/debug.rs b/third_party/rust/bytes-0.4.9/src/debug.rs new file mode 100644 index 0000000000..f8b830a241 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/debug.rs @@ -0,0 +1,40 @@ +use std::fmt; + +/// Alternative implementation of `fmt::Debug` for byte slice. +/// +/// Standard `Debug` implementation for `[u8]` is comma separated +/// list of numbers. Since large amount of byte strings are in fact +/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), +/// it is convenient to print strings as ASCII when possible. +/// +/// This struct wraps `&[u8]` just to override `fmt::Debug`. +/// +/// `BsDebug` is not a part of public API of bytes crate. +pub struct BsDebug<'a>(pub &'a [u8]); + +impl<'a> fmt::Debug for BsDebug<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + try!(write!(fmt, "b\"")); + for &c in self.0 { + // https://doc.rust-lang.org/reference.html#byte-escapes + if c == b'\n' { + try!(write!(fmt, "\\n")); + } else if c == b'\r' { + try!(write!(fmt, "\\r")); + } else if c == b'\t' { + try!(write!(fmt, "\\t")); + } else if c == b'\\' || c == b'"' { + try!(write!(fmt, "\\{}", c as char)); + } else if c == b'\0' { + try!(write!(fmt, "\\0")); + // ASCII printable + } else if c >= 0x20 && c < 0x7f { + try!(write!(fmt, "{}", c as char)); + } else { + try!(write!(fmt, "\\x{:02x}", c)); + } + } + try!(write!(fmt, "\"")); + Ok(()) + } +} diff --git a/third_party/rust/bytes-0.4.9/src/lib.rs b/third_party/rust/bytes-0.4.9/src/lib.rs new file mode 100644 index 0000000000..eccb8a3806 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/lib.rs @@ -0,0 +1,101 @@ +//! Provides abstractions for working with bytes. +//! +//! The `bytes` crate provides an efficient byte buffer structure +//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer +//! implementations ([`Buf`], [`BufMut`]). +//! +//! [`Buf`]: trait.Buf.html +//! [`BufMut`]: trait.BufMut.html +//! +//! # `Bytes` +//! +//! `Bytes` is an efficient container for storing and operating on continguous +//! slices of memory. It is intended for use primarily in networking code, but +//! could have applications elsewhere as well. +//! +//! `Bytes` values facilitate zero-copy network programming by allowing multiple +//! `Bytes` objects to point to the same underlying memory. This is managed by +//! using a reference count to track when the memory is no longer needed and can +//! be freed. +//! +//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` +//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For +//! example: +//! +//! ```rust +//! use bytes::{BytesMut, BufMut, BigEndian}; +//! +//! let mut buf = BytesMut::with_capacity(1024); +//! buf.put(&b"hello world"[..]); +//! buf.put_u16::<BigEndian>(1234); +//! +//! let a = buf.take(); +//! assert_eq!(a, b"hello world\x04\xD2"[..]); +//! +//! buf.put(&b"goodbye world"[..]); +//! +//! let b = buf.take(); +//! assert_eq!(b, b"goodbye world"[..]); +//! +//! assert_eq!(buf.capacity(), 998); +//! ``` +//! +//! In the above example, only a single buffer of 1024 is allocated. The handles +//! `a` and `b` will share the underlying buffer and maintain indices tracking +//! the view into the buffer represented by the handle. +//! +//! See the [struct docs] for more details. +//! +//! [struct docs]: struct.Bytes.html +//! +//! # `Buf`, `BufMut` +//! +//! These two traits provide read and write access to buffers. The underlying +//! storage may or may not be in contiguous memory. For example, `Bytes` is a +//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in +//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current +//! position in the underlying byte storage. When bytes are read or written, the +//! cursor is advanced. +//! +//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) +//! +//! ## Relation with `Read` and `Write` +//! +//! At first glance, it may seem that `Buf` and `BufMut` overlap in +//! functionality with `std::io::Read` and `std::io::Write`. However, they +//! serve different purposes. A buffer is the value that is provided as an +//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then +//! perform a syscall, which has the potential of failing. Operations on `Buf` +//! and `BufMut` are infallible. + +#![deny(warnings, missing_docs, missing_debug_implementations)] +#![doc(html_root_url = "https://docs.rs/bytes/0.4.9")] + +extern crate byteorder; +extern crate iovec; + +pub mod buf; +pub use buf::{ + Buf, + BufMut, + IntoBuf, +}; +#[deprecated(since = "0.4.1", note = "moved to `buf` module")] +#[doc(hidden)] +pub use buf::{ + Reader, + Writer, + Take, +}; + +mod bytes; +mod debug; +pub use bytes::{Bytes, BytesMut}; + +#[deprecated] +pub use byteorder::{ByteOrder, BigEndian, LittleEndian}; + +// Optional Serde support +#[cfg(feature = "serde")] +#[doc(hidden)] +pub mod serde; diff --git a/third_party/rust/bytes-0.4.9/src/serde.rs b/third_party/rust/bytes-0.4.9/src/serde.rs new file mode 100644 index 0000000000..d45caff051 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/src/serde.rs @@ -0,0 +1,82 @@ +extern crate serde; + +use std::{cmp, fmt}; +use self::serde::{Serialize, Serializer, Deserialize, Deserializer, de}; +use super::{Bytes, BytesMut}; + +macro_rules! serde_impl { + ($ty:ident, $visitor_ty:ident) => ( + impl Serialize for $ty { + #[inline] + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where S: Serializer + { + serializer.serialize_bytes(&self) + } + } + + struct $visitor_ty; + + impl<'de> de::Visitor<'de> for $visitor_ty { + type Value = $ty; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("byte array") + } + + #[inline] + fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> + where V: de::SeqAccess<'de> + { + let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); + let mut values = Vec::with_capacity(len); + + while let Some(value) = try!(seq.next_element()) { + values.push(value); + } + + Ok(values.into()) + } + + #[inline] + fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::from(v)) + } + + #[inline] + fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::from(v)) + } + + #[inline] + fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::from(v)) + } + + #[inline] + fn visit_string<E>(self, v: String) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::from(v)) + } + } + + impl<'de> Deserialize<'de> for $ty { + #[inline] + fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error> + where D: Deserializer<'de> + { + deserializer.deserialize_byte_buf($visitor_ty) + } + } + ); +} + +serde_impl!(Bytes, BytesVisitor); +serde_impl!(BytesMut, BytesMutVisitor); diff --git a/third_party/rust/bytes-0.4.9/tests/test_buf.rs b/third_party/rust/bytes-0.4.9/tests/test_buf.rs new file mode 100644 index 0000000000..f25c25f2b5 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_buf.rs @@ -0,0 +1,58 @@ +extern crate bytes; +extern crate byteorder; +extern crate iovec; + +use bytes::Buf; +use iovec::IoVec; +use std::io::Cursor; + +#[test] +fn test_fresh_cursor_vec() { + let mut buf = Cursor::new(b"hello".to_vec()); + + assert_eq!(buf.remaining(), 5); + assert_eq!(buf.bytes(), b"hello"); + + buf.advance(2); + + assert_eq!(buf.remaining(), 3); + assert_eq!(buf.bytes(), b"llo"); + + buf.advance(3); + + assert_eq!(buf.remaining(), 0); + assert_eq!(buf.bytes(), b""); +} + +#[test] +fn test_get_u8() { + let mut buf = Cursor::new(b"\x21zomg"); + assert_eq!(0x21, buf.get_u8()); +} + +#[test] +fn test_get_u16() { + let buf = b"\x21\x54zomg"; + assert_eq!(0x2154, Cursor::new(buf).get_u16_be()); + assert_eq!(0x5421, Cursor::new(buf).get_u16_le()); +} + +#[test] +#[should_panic] +fn test_get_u16_buffer_underflow() { + let mut buf = Cursor::new(b"\x21"); + buf.get_u16_be(); +} + +#[test] +fn test_bufs_vec() { + let buf = Cursor::new(b"hello world"); + + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + + let mut dst: [&IoVec; 2] = + [b1.into(), b2.into()]; + + assert_eq!(1, buf.bytes_vec(&mut dst[..])); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_buf_mut.rs b/third_party/rust/bytes-0.4.9/tests/test_buf_mut.rs new file mode 100644 index 0000000000..2c8faa1043 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_buf_mut.rs @@ -0,0 +1,83 @@ +extern crate bytes; +extern crate byteorder; +extern crate iovec; + +use bytes::{BufMut, BytesMut}; +use iovec::IoVec; +use std::usize; +use std::fmt::Write; + +#[test] +fn test_vec_as_mut_buf() { + let mut buf = Vec::with_capacity(64); + + assert_eq!(buf.remaining_mut(), usize::MAX); + + unsafe { + assert!(buf.bytes_mut().len() >= 64); + } + + buf.put(&b"zomg"[..]); + + assert_eq!(&buf, b"zomg"); + + assert_eq!(buf.remaining_mut(), usize::MAX - 4); + assert_eq!(buf.capacity(), 64); + + for _ in 0..16 { + buf.put(&b"zomg"[..]); + } + + assert_eq!(buf.len(), 68); +} + +#[test] +fn test_put_u8() { + let mut buf = Vec::with_capacity(8); + buf.put::<u8>(33); + assert_eq!(b"\x21", &buf[..]); +} + +#[test] +fn test_put_u16() { + let mut buf = Vec::with_capacity(8); + buf.put_u16_be(8532); + assert_eq!(b"\x21\x54", &buf[..]); + + buf.clear(); + buf.put_u16_le(8532); + assert_eq!(b"\x54\x21", &buf[..]); +} + +#[test] +fn test_vec_advance_mut() { + // Regression test for carllerche/bytes#108. + let mut buf = Vec::with_capacity(8); + unsafe { + buf.advance_mut(12); + assert_eq!(buf.len(), 12); + assert!(buf.capacity() >= 12, "capacity: {}", buf.capacity()); + } +} + +#[test] +fn test_clone() { + let mut buf = BytesMut::with_capacity(100); + buf.write_str("this is a test").unwrap(); + let buf2 = buf.clone(); + + buf.write_str(" of our emergecy broadcast system").unwrap(); + assert!(buf != buf2); +} + +#[test] +fn test_bufs_vec_mut() { + use std::mem; + + let mut buf = BytesMut::from(&b"hello world"[..]); + + unsafe { + let mut dst: [&mut IoVec; 2] = mem::zeroed(); + assert_eq!(1, buf.bytes_vec_mut(&mut dst[..])); + } +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_bytes.rs b/third_party/rust/bytes-0.4.9/tests/test_bytes.rs new file mode 100644 index 0000000000..c0cba6b767 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_bytes.rs @@ -0,0 +1,719 @@ +extern crate bytes; + +use bytes::{Bytes, BytesMut, BufMut, IntoBuf}; + +const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; +const SHORT: &'static [u8] = b"hello world"; + +fn inline_cap() -> usize { + use std::mem; + 4 * mem::size_of::<usize>() - 1 +} + +fn is_sync<T: Sync>() {} +fn is_send<T: Send>() {} + +#[test] +fn test_bounds() { + is_sync::<Bytes>(); + is_sync::<BytesMut>(); + is_send::<Bytes>(); + is_send::<BytesMut>(); +} + +#[test] +fn from_slice() { + let a = Bytes::from(&b"abcdefgh"[..]); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); + + let a = BytesMut::from(&b"abcdefgh"[..]); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); +} + +#[test] +fn fmt() { + let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); + let b = "b\"abcdefg\""; + + assert_eq!(a, b); + + let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); + assert_eq!(a, b); +} + +#[test] +fn fmt_write() { + use std::fmt::Write; + use std::iter::FromIterator; + let s = String::from_iter((0..10).map(|_| "abcdefg")); + + let mut a = BytesMut::with_capacity(64); + write!(a, "{}", &s[..64]).unwrap(); + assert_eq!(a, s[..64].as_bytes()); + + + let mut b = BytesMut::with_capacity(64); + write!(b, "{}", &s[..32]).unwrap(); + write!(b, "{}", &s[32..64]).unwrap(); + assert_eq!(b, s[..64].as_bytes()); + + + let mut c = BytesMut::with_capacity(64); + write!(c, "{}", s).unwrap_err(); + assert!(c.is_empty()); +} + +#[test] +fn len() { + let a = Bytes::from(&b"abcdefg"[..]); + assert_eq!(a.len(), 7); + + let a = BytesMut::from(&b"abcdefg"[..]); + assert_eq!(a.len(), 7); + + let a = Bytes::from(&b""[..]); + assert!(a.is_empty()); + + let a = BytesMut::from(&b""[..]); + assert!(a.is_empty()); +} + +#[test] +fn index() { + let a = Bytes::from(&b"hello world"[..]); + assert_eq!(a[0..5], *b"hello"); +} + +#[test] +fn slice() { + let a = Bytes::from(&b"hello world"[..]); + + let b = a.slice(3, 5); + assert_eq!(b, b"lo"[..]); + + let b = a.slice(0, 0); + assert_eq!(b, b""[..]); + + let b = a.slice(3, 3); + assert_eq!(b, b""[..]); + + let b = a.slice(a.len(), a.len()); + assert_eq!(b, b""[..]); + + let b = a.slice_to(5); + assert_eq!(b, b"hello"[..]); + + let b = a.slice_from(3); + assert_eq!(b, b"lo world"[..]); +} + +#[test] +#[should_panic] +fn slice_oob_1() { + let a = Bytes::from(&b"hello world"[..]); + a.slice(5, inline_cap() + 1); +} + +#[test] +#[should_panic] +fn slice_oob_2() { + let a = Bytes::from(&b"hello world"[..]); + a.slice(inline_cap() + 1, inline_cap() + 5); +} + +#[test] +fn split_off() { + let mut hello = Bytes::from(&b"helloworld"[..]); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); + + let mut hello = BytesMut::from(&b"helloworld"[..]); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); +} + +#[test] +#[should_panic] +fn split_off_oob() { + let mut hello = Bytes::from(&b"helloworld"[..]); + hello.split_off(inline_cap() + 1); +} + +#[test] +fn split_off_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let other = bytes.split_off(128); + + assert_eq!(bytes.len(), 0); + assert_eq!(bytes.capacity(), 128); + + assert_eq!(other.len(), 0); + assert_eq!(other.capacity(), 896); +} + +#[test] +fn split_off_to_loop() { + let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + + for i in 0..(s.len() + 1) { + { + let mut bytes = Bytes::from(&s[..]); + let off = bytes.split_off(i); + assert_eq!(i, bytes.len()); + let mut sum = Vec::new(); + sum.extend(&bytes); + sum.extend(&off); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = BytesMut::from(&s[..]); + let off = bytes.split_off(i); + assert_eq!(i, bytes.len()); + let mut sum = Vec::new(); + sum.extend(&bytes); + sum.extend(&off); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = Bytes::from(&s[..]); + let off = bytes.split_to(i); + assert_eq!(i, off.len()); + let mut sum = Vec::new(); + sum.extend(&off); + sum.extend(&bytes); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = BytesMut::from(&s[..]); + let off = bytes.split_to(i); + assert_eq!(i, off.len()); + let mut sum = Vec::new(); + sum.extend(&off); + sum.extend(&bytes); + assert_eq!(&s[..], &sum[..]); + } + } +} + +#[test] +fn split_to_1() { + // Inline + let mut a = Bytes::from(SHORT); + let b = a.split_to(4); + + assert_eq!(SHORT[4..], a); + assert_eq!(SHORT[..4], b); + + // Allocated + let mut a = Bytes::from(LONG); + let b = a.split_to(4); + + assert_eq!(LONG[4..], a); + assert_eq!(LONG[..4], b); + + let mut a = Bytes::from(LONG); + let b = a.split_to(30); + + assert_eq!(LONG[30..], a); + assert_eq!(LONG[..30], b); +} + +#[test] +fn split_to_2() { + let mut a = Bytes::from(LONG); + assert_eq!(LONG, a); + + let b = a.split_to(1); + + assert_eq!(LONG[1..], a); + drop(b); +} + +#[test] +#[should_panic] +fn split_to_oob() { + let mut hello = Bytes::from(&b"helloworld"[..]); + hello.split_to(inline_cap() + 1); +} + +#[test] +#[should_panic] +fn split_to_oob_mut() { + let mut hello = BytesMut::from(&b"helloworld"[..]); + hello.split_to(inline_cap() + 1); +} + +#[test] +fn split_to_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let other = bytes.split_to(128); + + assert_eq!(bytes.len(), 0); + assert_eq!(bytes.capacity(), 896); + + assert_eq!(other.len(), 0); + assert_eq!(other.capacity(), 128); +} + +#[test] +fn split_off_to_at_gt_len() { + fn make_bytes() -> Bytes { + let mut bytes = BytesMut::with_capacity(100); + bytes.put_slice(&[10, 20, 30, 40]); + bytes.freeze() + } + + use std::panic; + + make_bytes().split_to(4); + make_bytes().split_off(4); + + assert!(panic::catch_unwind(move || { + make_bytes().split_to(5); + }).is_err()); + + assert!(panic::catch_unwind(move || { + make_bytes().split_off(5); + }).is_err()); +} + +#[test] +fn fns_defined_for_bytes_mut() { + let mut bytes = BytesMut::from(&b"hello world"[..]); + + bytes.as_ptr(); + bytes.as_mut_ptr(); + + // Iterator + let v: Vec<u8> = bytes.iter().map(|b| *b).collect(); + assert_eq!(&v[..], bytes); +} + +#[test] +fn mut_into_buf() { + let mut v = vec![0, 0, 0, 0]; + let s = &mut v[..]; + s.into_buf().put_u32_le(42); +} + +#[test] +fn reserve_convert() { + // Inline -> Vec + let mut bytes = BytesMut::with_capacity(8); + bytes.put("hello"); + bytes.reserve(40); + assert_eq!(bytes.capacity(), 45); + assert_eq!(bytes, "hello"); + + // Inline -> Inline + let mut bytes = BytesMut::with_capacity(inline_cap()); + bytes.put("abcdefghijkl"); + + let a = bytes.split_to(10); + bytes.reserve(inline_cap() - 3); + assert_eq!(inline_cap(), bytes.capacity()); + + assert_eq!(bytes, "kl"); + assert_eq!(a, "abcdefghij"); + + // Vec -> Vec + let mut bytes = BytesMut::from(LONG); + bytes.reserve(64); + assert_eq!(bytes.capacity(), LONG.len() + 64); + + // Arc -> Vec + let mut bytes = BytesMut::from(LONG); + let a = bytes.split_to(30); + + bytes.reserve(128); + assert!(bytes.capacity() >= bytes.len() + 128); + + drop(a); +} + +#[test] +fn reserve_growth() { + let mut bytes = BytesMut::with_capacity(64); + bytes.put("hello world"); + let _ = bytes.take(); + + bytes.reserve(65); + assert_eq!(bytes.capacity(), 128); +} + +#[test] +fn reserve_allocates_at_least_original_capacity() { + let mut bytes = BytesMut::with_capacity(1024); + + for i in 0..1020 { + bytes.put(i as u8); + } + + let _other = bytes.take(); + + bytes.reserve(16); + assert_eq!(bytes.capacity(), 1024); +} + +#[test] +fn reserve_max_original_capacity_value() { + const SIZE: usize = 128 * 1024; + + let mut bytes = BytesMut::with_capacity(SIZE); + + for _ in 0..SIZE { + bytes.put(0u8); + } + + let _other = bytes.take(); + + bytes.reserve(16); + assert_eq!(bytes.capacity(), 64 * 1024); +} + +// Without either looking at the internals of the BytesMut or doing weird stuff +// with the memory allocator, there's no good way to automatically verify from +// within the program that this actually recycles memory. Instead, just exercise +// the code path to ensure that the results are correct. +#[test] +fn reserve_vec_recycling() { + let mut bytes = BytesMut::from(Vec::with_capacity(16)); + assert_eq!(bytes.capacity(), 16); + bytes.put("0123456789012345"); + bytes.advance(10); + assert_eq!(bytes.capacity(), 6); + bytes.reserve(8); + assert_eq!(bytes.capacity(), 16); +} + +#[test] +fn reserve_in_arc_unique_does_not_overallocate() { + let mut bytes = BytesMut::with_capacity(1000); + bytes.take(); + + // now bytes is Arc and refcount == 1 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(2001); + assert_eq!(2001, bytes.capacity()); +} + +#[test] +fn reserve_in_arc_unique_doubles() { + let mut bytes = BytesMut::with_capacity(1000); + bytes.take(); + + // now bytes is Arc and refcount == 1 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(1001); + assert_eq!(2000, bytes.capacity()); +} + +#[test] +fn reserve_in_arc_nonunique_does_not_overallocate() { + let mut bytes = BytesMut::with_capacity(1000); + let _copy = bytes.take(); + + // now bytes is Arc and refcount == 2 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(2001); + assert_eq!(2001, bytes.capacity()); +} + +#[test] +fn inline_storage() { + let mut bytes = BytesMut::with_capacity(inline_cap()); + let zero = [0u8; 64]; + + bytes.put(&zero[0..inline_cap()]); + assert_eq!(*bytes, zero[0..inline_cap()]); +} + +#[test] +fn extend_mut() { + let mut bytes = BytesMut::with_capacity(0); + bytes.extend(LONG); + assert_eq!(*bytes, LONG[..]); +} + +#[test] +fn extend_shr() { + let mut bytes = Bytes::new(); + bytes.extend(LONG); + assert_eq!(*bytes, LONG[..]); +} + +#[test] +fn extend_from_slice_mut() { + for &i in &[3, 34] { + let mut bytes = BytesMut::new(); + bytes.extend_from_slice(&LONG[..i]); + bytes.extend_from_slice(&LONG[i..]); + assert_eq!(LONG[..], *bytes); + } +} + +#[test] +fn extend_from_slice_shr() { + for &i in &[3, 34] { + let mut bytes = Bytes::new(); + bytes.extend_from_slice(&LONG[..i]); + bytes.extend_from_slice(&LONG[i..]); + assert_eq!(LONG[..], *bytes); + } +} + +#[test] +fn from_static() { + let mut a = Bytes::from_static(b"ab"); + let b = a.split_off(1); + + assert_eq!(a, b"a"[..]); + assert_eq!(b, b"b"[..]); +} + +#[test] +fn advance_inline() { + let mut a = Bytes::from(&b"hello world"[..]); + a.advance(6); + assert_eq!(a, &b"world"[..]); +} + +#[test] +fn advance_static() { + let mut a = Bytes::from_static(b"hello world"); + a.advance(6); + assert_eq!(a, &b"world"[..]); +} + +#[test] +fn advance_vec() { + let mut a = BytesMut::from(b"hello world boooo yah world zomg wat wat".to_vec()); + a.advance(16); + assert_eq!(a, b"o yah world zomg wat wat"[..]); + + a.advance(4); + assert_eq!(a, b"h world zomg wat wat"[..]); + + // Reserve some space. + a.reserve(1024); + assert_eq!(a, b"h world zomg wat wat"[..]); + + a.advance(6); + assert_eq!(a, b"d zomg wat wat"[..]); +} + +#[test] +#[should_panic] +fn advance_past_len() { + let mut a = BytesMut::from(b"hello world".to_vec()); + a.advance(20); +} + +#[test] +// Only run these tests on little endian systems. CI uses qemu for testing +// little endian... and qemu doesn't really support threading all that well. +#[cfg(target_endian = "little")] +fn stress() { + // Tests promoting a buffer from a vec -> shared in a concurrent situation + use std::sync::{Arc, Barrier}; + use std::thread; + + const THREADS: usize = 8; + const ITERS: usize = 1_000; + + for i in 0..ITERS { + let data = [i as u8; 256]; + let buf = Arc::new(Bytes::from(&data[..])); + + let barrier = Arc::new(Barrier::new(THREADS)); + let mut joins = Vec::with_capacity(THREADS); + + for _ in 0..THREADS { + let c = barrier.clone(); + let buf = buf.clone(); + + joins.push(thread::spawn(move || { + c.wait(); + let buf: Bytes = (*buf).clone(); + drop(buf); + })); + } + + for th in joins { + th.join().unwrap(); + } + + assert_eq!(*buf, data[..]); + } +} + +#[test] +fn partial_eq_bytesmut() { + let bytes = Bytes::from(&b"The quick red fox"[..]); + let bytesmut = BytesMut::from(&b"The quick red fox"[..]); + assert!(bytes == bytesmut); + assert!(bytesmut == bytes); + let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]); + assert!(bytes2 != bytesmut); + assert!(bytesmut != bytes2); +} + +#[test] +fn unsplit_basic() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + let splitted = buf.split_off(6); + assert_eq!(b"aaabbb", &buf[..]); + assert_eq!(b"cccddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_empty_other() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + // empty other + let other = BytesMut::new(); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_empty_self() { + // empty self + let mut buf = BytesMut::new(); + + let mut other = BytesMut::with_capacity(64); + other.extend_from_slice(b"aaabbbcccddd"); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn unsplit_inline_arc() { + let mut buf = BytesMut::with_capacity(8); //inline + buf.extend_from_slice(b"aaaabbbb"); + + let mut buf2 = BytesMut::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_arc_inline() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + buf.split_off(8); //arc + + let mut buf2 = BytesMut::with_capacity(8); //inline + buf2.extend_from_slice(b"ccccdddd"); + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); + +} + +#[test] +fn unsplit_both_inline() { + let mut buf = BytesMut::with_capacity(16); //inline + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let splitted = buf.split_off(8); // both inline + assert_eq!(b"aaaabbbb", &buf[..]); + assert_eq!(b"ccccdddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + + +#[test] +fn unsplit_arc_different() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + buf.split_off(8); //arc + + let mut buf2 = BytesMut::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_arc_non_contiguous() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + + let buf3 = buf2.split_off(4); //arc + + buf.unsplit(buf3); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn unsplit_two_split_offs() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + let buf3 = buf2.split_off(4); //arc + + buf2.unsplit(buf3); + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn from_iter_no_size_hint() { + use std::iter; + + let mut expect = vec![]; + + let actual: Bytes = iter::repeat(b'x') + .scan(100, |cnt, item| { + if *cnt >= 1 { + *cnt -= 1; + expect.push(item); + Some(item) + } else { + None + } + }) + .collect(); + + assert_eq!(&actual[..], &expect[..]); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_chain.rs b/third_party/rust/bytes-0.4.9/tests/test_chain.rs new file mode 100644 index 0000000000..2789e7c060 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_chain.rs @@ -0,0 +1,122 @@ +extern crate bytes; +extern crate iovec; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use bytes::buf::Chain; +use iovec::IoVec; +use std::io::Cursor; + +#[test] +fn collect_two_bufs() { + let a = Cursor::new(Bytes::from(&b"hello"[..])); + let b = Cursor::new(Bytes::from(&b"world"[..])); + + let res: Vec<u8> = a.chain(b).collect(); + assert_eq!(res, &b"helloworld"[..]); +} + +#[test] +fn writing_chained() { + let mut a = BytesMut::with_capacity(64); + let mut b = BytesMut::with_capacity(64); + + { + let mut buf = Chain::new(&mut a, &mut b); + + for i in 0..128 { + buf.put(i as u8); + } + } + + assert_eq!(64, a.len()); + assert_eq!(64, b.len()); + + for i in 0..64 { + let expect = i as u8; + assert_eq!(expect, a[i]); + assert_eq!(expect + 64, b[i]); + } +} + +#[test] +fn iterating_two_bufs() { + let a = Cursor::new(Bytes::from(&b"hello"[..])); + let b = Cursor::new(Bytes::from(&b"world"[..])); + + let res: Vec<u8> = a.chain(b).iter().collect(); + assert_eq!(res, &b"helloworld"[..]); +} + +#[test] +fn vectored_read() { + let a = Cursor::new(Bytes::from(&b"hello"[..])); + let b = Cursor::new(Bytes::from(&b"world"[..])); + + let mut buf = a.chain(b); + + { + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; + + assert_eq!(2, buf.bytes_vec(&mut iovecs)); + assert_eq!(iovecs[0][..], b"hello"[..]); + assert_eq!(iovecs[1][..], b"world"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); + } + + buf.advance(2); + + { + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; + + assert_eq!(2, buf.bytes_vec(&mut iovecs)); + assert_eq!(iovecs[0][..], b"llo"[..]); + assert_eq!(iovecs[1][..], b"world"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); + } + + buf.advance(3); + + { + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; + + assert_eq!(1, buf.bytes_vec(&mut iovecs)); + assert_eq!(iovecs[0][..], b"world"[..]); + assert_eq!(iovecs[1][..], b"\0"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); + } + + buf.advance(3); + + { + let b1: &[u8] = &mut [0]; + let b2: &[u8] = &mut [0]; + let b3: &[u8] = &mut [0]; + let b4: &[u8] = &mut [0]; + let mut iovecs: [&IoVec; 4] = + [b1.into(), b2.into(), b3.into(), b4.into()]; + + assert_eq!(1, buf.bytes_vec(&mut iovecs)); + assert_eq!(iovecs[0][..], b"ld"[..]); + assert_eq!(iovecs[1][..], b"\0"[..]); + assert_eq!(iovecs[2][..], b"\0"[..]); + assert_eq!(iovecs[3][..], b"\0"[..]); + } +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_debug.rs b/third_party/rust/bytes-0.4.9/tests/test_debug.rs new file mode 100644 index 0000000000..9945a2835b --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_debug.rs @@ -0,0 +1,35 @@ +extern crate bytes; + +use bytes::Bytes; + +#[test] +fn fmt() { + let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); + + let expected = "b\"\ + \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ + \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ + \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ + \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ + \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ + @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ + `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ + \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ + \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ + \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ + \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ + \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ + \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ + \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ + \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ + \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ + \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ + \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ + \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ + \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ + \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ + \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ + \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; + + assert_eq!(expected, format!("{:?}", Bytes::from(vec))); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_from_buf.rs b/third_party/rust/bytes-0.4.9/tests/test_from_buf.rs new file mode 100644 index 0000000000..216bf12328 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_from_buf.rs @@ -0,0 +1,34 @@ +extern crate bytes; + +use bytes::{Buf, Bytes, BytesMut}; +use std::io::Cursor; + +const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; +const SHORT: &'static [u8] = b"hello world"; + +#[test] +fn collect_to_vec() { + let buf: Vec<u8> = Cursor::new(SHORT).collect(); + assert_eq!(buf, SHORT); + + let buf: Vec<u8> = Cursor::new(LONG).collect(); + assert_eq!(buf, LONG); +} + +#[test] +fn collect_to_bytes() { + let buf: Bytes = Cursor::new(SHORT).collect(); + assert_eq!(buf, SHORT); + + let buf: Bytes = Cursor::new(LONG).collect(); + assert_eq!(buf, LONG); +} + +#[test] +fn collect_to_bytes_mut() { + let buf: BytesMut = Cursor::new(SHORT).collect(); + assert_eq!(buf, SHORT); + + let buf: BytesMut = Cursor::new(LONG).collect(); + assert_eq!(buf, LONG); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_iter.rs b/third_party/rust/bytes-0.4.9/tests/test_iter.rs new file mode 100644 index 0000000000..c16dbf694b --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_iter.rs @@ -0,0 +1,22 @@ +extern crate bytes; + +use bytes::{Buf, IntoBuf, Bytes}; + +#[test] +fn iter_len() { + let buf = Bytes::from(&b"hello world"[..]).into_buf(); + let iter = buf.iter(); + + assert_eq!(iter.size_hint(), (11, Some(11))); + assert_eq!(iter.len(), 11); +} + + +#[test] +fn empty_iter_len() { + let buf = Bytes::from(&b""[..]).into_buf(); + let iter = buf.iter(); + + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.len(), 0); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_serde.rs b/third_party/rust/bytes-0.4.9/tests/test_serde.rs new file mode 100644 index 0000000000..ff440242f4 --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_serde.rs @@ -0,0 +1,21 @@ +#![cfg(feature = "serde")] + +extern crate bytes; +extern crate serde_test; +use serde_test::{Token, assert_tokens}; + +#[test] +fn test_ser_de_empty() { + let b = bytes::Bytes::new(); + assert_tokens(&b, &[Token::Bytes(b"")]); + let b = bytes::BytesMut::with_capacity(0); + assert_tokens(&b, &[Token::Bytes(b"")]); +} + +#[test] +fn test_ser_de() { + let b = bytes::Bytes::from(&b"bytes"[..]); + assert_tokens(&b, &[Token::Bytes(b"bytes")]); + let b = bytes::BytesMut::from(&b"bytes"[..]); + assert_tokens(&b, &[Token::Bytes(b"bytes")]); +} diff --git a/third_party/rust/bytes-0.4.9/tests/test_take.rs b/third_party/rust/bytes-0.4.9/tests/test_take.rs new file mode 100644 index 0000000000..93e0c6c5ab --- /dev/null +++ b/third_party/rust/bytes-0.4.9/tests/test_take.rs @@ -0,0 +1,13 @@ +extern crate bytes; + +use bytes::Buf; +use std::io::Cursor; + +#[test] +fn long_take() { + // Tests that take with a size greater than the buffer length will not + // overrun the buffer. Regression test for #138. + let buf = Cursor::new(b"hello world").take(100); + assert_eq!(11, buf.remaining()); + assert_eq!(b"hello world", buf.bytes()); +} diff --git a/third_party/rust/bytes/.cargo-checksum.json b/third_party/rust/bytes/.cargo-checksum.json new file mode 100644 index 0000000000..8883d9092d --- /dev/null +++ b/third_party/rust/bytes/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"4faf2b723ed25868249363523d3506a939810e53877d7a68f72b705564e7200a","Cargo.toml":"52ab465c70fd369a72545d6fd12f5700edf6b741bfb26ef49076cd74770301a8","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"c2aac235762c99395ae437e5f561d135a06e390a98f74baf80fb4c71dfb91ece","azure-pipelines.yml":"80098a973fbec019ae6da61ffe075047371ebb574acf4e726379628449a77016","benches/buf.rs":"7cfbe40095c70dfc42ebe1ed2cb59c84b557a89c09e8925842efd76be226bd12","benches/bytes.rs":"dd7a4c89e1bb1d7490d0532e19c50634c118bfbfd32d5b1c189b4f96fcce381e","benches/bytes_mut.rs":"e2510665597135634c96fcb85e11519a8cf0363d51460d87821229cf1745b16b","ci/azure-cross-compile.yml":"93d711ef0d66262f762624f82deb0b61afd69637e9a6cfe38d18ad84cd09781d","ci/azure-deploy-docs.yml":"fce86e75cb8bc61aca7513cd6afa5ebe0fff8963beda7d6775e341945bec7eb2","ci/azure-install-rust.yml":"898f3dd92859375bdc14b7449a9da56860936d0e77e9de5d2505663d22abd95e","ci/azure-loom.yml":"c1e8782e855b27d26c022bcf2b34239ed5b0a7de2802de68fd7140566e175317","ci/azure-test-stable.yml":"e8a264a813f17b62db1ca1c7e34ba1842a87cdc5ad4a591c1643af0a8a4057f6","ci/azure-tsan.yml":"3996de625bf276ee16cc815809a3c312d5e9fe62424c38d2e1bc97614caf7df3","ci/tsan":"5194270c4e37b1a72e890c98eb2a4aae5f5506fb26a67af3d2834360d2e3d3c2","src/buf/buf_impl.rs":"d921c3171094f824bba4ec3bd69f07ce47257af257741a3cb0ff96887f5f5bd0","src/buf/buf_mut.rs":"289a9348aa2788e0cc12419d311c89c0e87a5e84d62bd8cd71f045924eb0349f","src/buf/ext/chain.rs":"d526cd39d870b7ae8c08e3bd2bc9e7770e9d014b9d9360246dd42c236b6400db","src/buf/ext/limit.rs":"99a42933ac6e309ee5b87818f9560ff041a3e388e8cef18b78ccfd00e3c5eec9","src/buf/ext/mod.rs":"aa2b370a4b44cd7c56ef7c5b07bdaf3723efe3cc465cef358df56433881503b3","src/buf/ext/reader.rs":"d48f07cb1ae0404a224162509fd356eb217b5f8ab020403467491445631616b1","src/buf/ext/take.rs":"fa1009c96175fc67a66f5a8d013140fed7cf0199fefe49bcd4ace82b7a82741b","src/buf/ext/writer.rs":"f01022d4589cee78e36c96032d01e68b6d559062d549e35132a3af869099a2d0","src/buf/iter.rs":"6de36052c0f428d912cea4055fd5c027038f70d369e881e42b6ada6aa9ea92c2","src/buf/mod.rs":"4f66903ca61fe88513c23664a4f33f26c00e4218fbc607e7f52981ba66b90456","src/buf/vec_deque.rs":"5a4063961d10380c1ab3681f8b3f6201112766d9f57a63e2861dc9f2b134668d","src/bytes.rs":"5af1de291faa0344fd7ebf6c1a5834f04aa9f9a7f1b405c8173c31819dd27ca2","src/bytes_mut.rs":"28af39ed6576df6be1c0e57d526ba4f7dd9d50d0d7b0767a3da54940f9fb3417","src/debug.rs":"0875de8307c223bce68e861bc78917e0ad7ef00d75966c0151a0b1aa83a6521a","src/hex.rs":"39c8ee531a45a25b8ef085b4279a9ba7f3b488e4d36c4f80d8769e04b1e51bfd","src/lib.rs":"7fedc5dee1f1d6968ccdccc84514003b1293a22a4b712b4557b49fa57d0752b2","src/loom.rs":"70263b3847d1e4960450a64cb34a87947eaa73755b45977d151265c13ebe4598","src/serde.rs":"c42e0644bed431852445433ac0d6e46f04891e40c046456350323dd3f7b8cf1c","tests/test_buf.rs":"dd3a83218bf5bcc277a8aa1c59c7ed6deeb7e752252b01bce5be4219e65a3e4f","tests/test_buf_mut.rs":"de50fcb03c984f299a84131829b72e351263541c592eec2c23e7ff4504c8e376","tests/test_bytes.rs":"a3b429df530ad90d450d236e893705b218b0319d61b074e9445715df56a15416","tests/test_chain.rs":"d3dab042b20b35e865af1101d78db002878a6604a0a4f4b7901bb1ee98f60684","tests/test_debug.rs":"5b425e056a32d0319d1857b54c88cf58952397bda6fee26b39c624d6c1444eee","tests/test_iter.rs":"95c531b984bcd9b60222b31558925f9662a38b409e731e4aaaafa904b1a64896","tests/test_reader.rs":"1b782d370c757dac14d59df1c4432a25fd8209cbe31b07fa4c380f5b82eec409","tests/test_serde.rs":"2cd4426bfa3a886745dd6958aab21c3493d1116b961acbbf35ec2866c2168a52","tests/test_take.rs":"998d16facf37fa0b2358e7aa42380279d4466d8dde2a3f8c1ae8a082bb37b180"},"package":"10004c15deb332055f7a4a208190aed362cf9a7c2f6ab70a305fba50e1105f38"}
\ No newline at end of file diff --git a/third_party/rust/bytes/CHANGELOG.md b/third_party/rust/bytes/CHANGELOG.md new file mode 100644 index 0000000000..960bd79ed4 --- /dev/null +++ b/third_party/rust/bytes/CHANGELOG.md @@ -0,0 +1,138 @@ +# 0.5.3 (December 12, 2019) + +### Added +- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337). + +### Fix +- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec<u8>` (#341, #342). +- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333). + +# 0.5.2 (November 27, 2019) + +### Added +- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325). + +# 0.5.1 (November 25, 2019) + +### Fix +- Growth documentation for `BytesMut` (#321) + +# 0.5.0 (November 25, 2019) + +### Fix +- Potential overflow in `copy_to_slice` + +### Changed +- Increased minimum supported Rust version to 1.39. +- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298) +- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns + `usize::MAX` (#316). +- `BufMut::bytes_mut` returns `&mut [MaybeUninit<u8>]` to reflect the unknown + initialization state (#305). +- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]` + respectively (#261). +- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306). +- `BufMutExt::limit` (#309). +- `Bytes::slice` takes a `RangeBounds` argument (#265). +- `Bytes::from_static` is now a `const fn` (#311). +- A multitude of smaller performance optimizations. + +### Added +- `no_std` support (#281). +- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order. +- `BorrowMut` implementation for `BytesMut` (#185). + +### Removed +- `IntoBuf` (#288). +- `Buf` implementation for `&str` (#301). +- `byteorder` dependency (#280). +- `iovec` dependency, use `std::IoSlice` instead (#263). +- optional `either` dependency (#315). +- optional `i128` feature -- now available on stable. (#276). + +# 0.4.12 (March 6, 2019) + +### Added +- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244). +- Implement `Buf` for `VecDeque` (#249). + +# 0.4.11 (November 17, 2018) + +* Use raw pointers for potentially racy loads (#233). +* Implement `BufRead` for `buf::Reader` (#232). +* Documentation tweaks (#234). + +# 0.4.10 (September 4, 2018) + +* impl `Buf` and `BufMut` for `Either` (#225). +* Add `Bytes::slice_ref` (#208). + +# 0.4.9 (July 12, 2018) + +* Add 128 bit number support behind a feature flag (#209). +* Implement `IntoBuf` for `&mut [u8]` + +# 0.4.8 (May 25, 2018) + +* Fix panic in `BytesMut` `FromIterator` implementation. +* Bytes: Recycle space when reserving space in vec mode (#197). +* Bytes: Add resize fn (#203). + +# 0.4.7 (April 27, 2018) + +* Make `Buf` and `BufMut` usable as trait objects (#186). +* impl BorrowMut for BytesMut (#185). +* Improve accessor performance (#195). + +# 0.4.6 (Janary 8, 2018) + +* Implement FromIterator for Bytes/BytesMut (#148). +* Add `advance` fn to Bytes/BytesMut (#166). +* Add `unsplit` fn to `BytesMut` (#162, #173). +* Improvements to Bytes split fns (#92). + +# 0.4.5 (August 12, 2017) + +* Fix range bug in `Take::bytes` +* Misc performance improvements +* Add extra `PartialEq` implementations. +* Add `Bytes::with_capacity` +* Implement `AsMut[u8]` for `BytesMut` + +# 0.4.4 (May 26, 2017) + +* Add serde support behind feature flag +* Add `extend_from_slice` on `Bytes` and `BytesMut` +* Add `truncate` and `clear` on `Bytes` +* Misc additional std trait implementations +* Misc performance improvements + +# 0.4.3 (April 30, 2017) + +* Fix Vec::advance_mut bug +* Bump minimum Rust version to 1.15 +* Misc performance tweaks + +# 0.4.2 (April 5, 2017) + +* Misc performance tweaks +* Improved `Debug` implementation for `Bytes` +* Avoid some incorrect assert panics + +# 0.4.1 (March 15, 2017) + +* Expose `buf` module and have most types available from there vs. root. +* Implement `IntoBuf` for `T: Buf`. +* Add `FromBuf` and `Buf::collect`. +* Add iterator adapter for `Buf`. +* Add scatter/gather support to `Buf` and `BufMut`. +* Add `Buf::chain`. +* Reduce allocations on repeated calls to `BytesMut::reserve`. +* Implement `Debug` for more types. +* Remove `Source` in favor of `IntoBuf`. +* Implement `Extend` for `BytesMut`. + + +# 0.4.0 (February 24, 2017) + +* Initial release diff --git a/third_party/rust/bytes/Cargo.toml b/third_party/rust/bytes/Cargo.toml new file mode 100644 index 0000000000..2a33deff79 --- /dev/null +++ b/third_party/rust/bytes/Cargo.toml @@ -0,0 +1,36 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "bytes" +version = "0.5.3" +authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"] +description = "Types and traits for working with bytes" +documentation = "https://docs.rs/bytes" +readme = "README.md" +keywords = ["buffers", "zero-copy", "io"] +categories = ["network-programming", "data-structures"] +license = "MIT" +repository = "https://github.com/tokio-rs/bytes" +[dependencies.serde] +version = "1.0" +optional = true +[dev-dependencies.loom] +version = "0.2.10" + +[dev-dependencies.serde_test] +version = "1.0" + +[features] +default = ["std"] +std = [] diff --git a/third_party/rust/bytes/LICENSE b/third_party/rust/bytes/LICENSE new file mode 100644 index 0000000000..58fb29a123 --- /dev/null +++ b/third_party/rust/bytes/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2018 Carl Lerche + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/bytes/README.md b/third_party/rust/bytes/README.md new file mode 100644 index 0000000000..afc2ed21cb --- /dev/null +++ b/third_party/rust/bytes/README.md @@ -0,0 +1,48 @@ +# Bytes + +A utility library for working with bytes. + +[![Crates.io][crates-badge]][crates-url] +[![Build Status][azure-badge]][azure-url] + +[crates-badge]: https://img.shields.io/crates/v/bytes.svg +[crates-url]: https://crates.io/crates/bytes +[azure-badge]: https://dev.azure.com/tokio-rs/bytes/_apis/build/status/tokio-rs.bytes?branchName=master +[azure-url]: https://dev.azure.com/tokio-rs/bytes/_build/latest?definitionId=3&branchName=master + +[Documentation](https://docs.rs/bytes) + +## Usage + +To use `bytes`, first add this to your `Cargo.toml`: + +```toml +[dependencies] +bytes = "0.5" +``` + +Next, add this to your crate: + +```rust +use bytes::{Bytes, BytesMut, Buf, BufMut}; +``` + +## Serde support + +Serde support is optional and disabled by default. To enable use the feature `serde`. + +```toml +[dependencies] +bytes = { version = "0.5", features = ["serde"] } +``` + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in `bytes` by you, shall be licensed as MIT, without any additional +terms or conditions. + diff --git a/third_party/rust/bytes/azure-pipelines.yml b/third_party/rust/bytes/azure-pipelines.yml new file mode 100644 index 0000000000..18b59745d1 --- /dev/null +++ b/third_party/rust/bytes/azure-pipelines.yml @@ -0,0 +1,68 @@ +trigger: ["master"] +pr: ["master"] + +jobs: +# Check formatting +# - template: ci/azure-rustfmt.yml +# parameters: +# name: rustfmt + +# Apply clippy lints +# - template: ci/azure-clippy.yml +# parameters: +# name: clippy + +# This represents the minimum Rust version supported by +# Bytes. Updating this should be done in a dedicated PR. +# +# Tests are not run as tests may require newer versions of +# rust. +- template: ci/azure-test-stable.yml + parameters: + name: minrust + rust_version: 1.39.0 + cmd: check + +# Stable +- template: ci/azure-test-stable.yml + parameters: + name: stable + cross: true + features: + - serde + +# Nightly +- template: ci/azure-test-stable.yml + parameters: + name: nightly + # Pin nightly to avoid being impacted by breakage + rust_version: nightly-2019-09-25 + benches: true + +# Run tests on some extra platforms +- template: ci/azure-cross-compile.yml + parameters: + name: cross + +# Sanitizers +- template: ci/azure-tsan.yml + parameters: + name: tsan + rust_version: nightly + +# Loom +- template: ci/azure-loom.yml + parameters: + name: loom + rust_version: stable + + +- template: ci/azure-deploy-docs.yml + parameters: + dependsOn: + # - rustfmt + # - clippy + - stable + - nightly + - minrust + - cross diff --git a/third_party/rust/bytes/benches/buf.rs b/third_party/rust/bytes/benches/buf.rs new file mode 100644 index 0000000000..0c9a1d9557 --- /dev/null +++ b/third_party/rust/bytes/benches/buf.rs @@ -0,0 +1,187 @@ +#![feature(test)] +#![deny(warnings, rust_2018_idioms)] + +extern crate test; + +use test::Bencher; +use bytes::Buf; + +/// Dummy Buf implementation +struct TestBuf { + buf: &'static [u8], + readlens: &'static [usize], + init_pos: usize, + pos: usize, + readlen_pos: usize, + readlen: usize, +} +impl TestBuf { + fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf { + let mut buf = TestBuf { + buf, + readlens, + init_pos, + pos: 0, + readlen_pos: 0, + readlen: 0, + }; + buf.reset(); + buf + } + fn reset(&mut self) { + self.pos = self.init_pos; + self.readlen_pos = 0; + self.next_readlen(); + } + /// Compute the length of the next read : + /// - use the next value specified in readlens (capped by remaining) if any + /// - else the remaining + fn next_readlen(&mut self) { + self.readlen = self.buf.len() - self.pos; + if let Some(readlen) = self.readlens.get(self.readlen_pos) { + self.readlen = std::cmp::min(self.readlen, *readlen); + self.readlen_pos += 1; + } + } +} +impl Buf for TestBuf { + fn remaining(&self) -> usize { + return self.buf.len() - self.pos; + } + fn advance(&mut self, cnt: usize) { + self.pos += cnt; + assert!(self.pos <= self.buf.len()); + self.next_readlen(); + } + fn bytes(&self) -> &[u8] { + if self.readlen == 0 { + Default::default() + } else { + &self.buf[self.pos..self.pos + self.readlen] + } + } +} + +/// Dummy Buf implementation +/// version with methods forced to not be inlined (to simulate costly calls) +struct TestBufC { + inner: TestBuf, +} +impl TestBufC { + fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC { + TestBufC { + inner: TestBuf::new(buf, readlens, init_pos), + } + } + fn reset(&mut self) { + self.inner.reset() + } +} +impl Buf for TestBufC { + #[inline(never)] + fn remaining(&self) -> usize { + self.inner.remaining() + } + #[inline(never)] + fn advance(&mut self, cnt: usize) { + self.inner.advance(cnt) + } + #[inline(never)] + fn bytes(&self) -> &[u8] { + self.inner.bytes() + } +} + +macro_rules! bench { + ($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => ( + #[bench] + fn $fname(b: &mut Bencher) { + let mut bufs = [ + $testbuf::new(&[1u8; 8+0], $readlens, 0), + $testbuf::new(&[1u8; 8+1], $readlens, 1), + $testbuf::new(&[1u8; 8+2], $readlens, 2), + $testbuf::new(&[1u8; 8+3], $readlens, 3), + $testbuf::new(&[1u8; 8+4], $readlens, 4), + $testbuf::new(&[1u8; 8+5], $readlens, 5), + $testbuf::new(&[1u8; 8+6], $readlens, 6), + $testbuf::new(&[1u8; 8+7], $readlens, 7), + ]; + b.iter(|| { + for i in 0..8 { + bufs[i].reset(); + let buf: &mut dyn Buf = &mut bufs[i]; // type erasure + test::black_box(buf.$method($($arg,)*)); + } + }) + } + ); + ($fname:ident, slice, $method:ident $(,$arg:expr)*) => ( + #[bench] + fn $fname(b: &mut Bencher) { + // buf must be long enough for one read of 8 bytes starting at pos 7 + let arr = [1u8; 8+7]; + b.iter(|| { + for i in 0..8 { + let mut buf = &arr[i..]; + let buf = &mut buf as &mut dyn Buf; // type erasure + test::black_box(buf.$method($($arg,)*)); + } + }) + } + ); + ($fname:ident, option) => ( + #[bench] + fn $fname(b: &mut Bencher) { + let data = [1u8; 1]; + b.iter(|| { + for _ in 0..8 { + let mut buf = Some(data); + let buf = &mut buf as &mut dyn Buf; // type erasure + test::black_box(buf.get_u8()); + } + }) + } + ); +} + +macro_rules! bench_group { + ($method:ident $(,$arg:expr)*) => ( + bench!(slice, slice, $method $(,$arg)*); + bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*); + bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*); + bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*); + bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*); + // bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*); + // bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*); + ); +} + +mod get_u8 { + use super::*; + bench_group!(get_u8); + bench!(option, option); +} +mod get_u16 { + use super::*; + bench_group!(get_u16); +} +mod get_u32 { + use super::*; + bench_group!(get_u32); +} +mod get_u64 { + use super::*; + bench_group!(get_u64); +} +mod get_f32 { + use super::*; + bench_group!(get_f32); +} +mod get_f64 { + use super::*; + bench_group!(get_f64); +} +mod get_uint24 { + use super::*; + bench_group!(get_uint, 3); +} diff --git a/third_party/rust/bytes/benches/bytes.rs b/third_party/rust/bytes/benches/bytes.rs new file mode 100644 index 0000000000..9c36e6081b --- /dev/null +++ b/third_party/rust/bytes/benches/bytes.rs @@ -0,0 +1,118 @@ +#![feature(test)] +#![deny(warnings, rust_2018_idioms)] + +extern crate test; + +use test::Bencher; +use bytes::Bytes; + +#[bench] +fn deref_unique(b: &mut Bencher) { + let buf = Bytes::from(vec![0; 1024]); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_shared(b: &mut Bencher) { + let buf = Bytes::from(vec![0; 1024]); + let _b2 = buf.clone(); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_static(b: &mut Bencher) { + let buf = Bytes::from_static(b"hello world"); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn clone_static(b: &mut Bencher) { + let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_shared(b: &mut Bencher) { + let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn clone_arc_vec(b: &mut Bencher) { + use std::sync::Arc; + let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec()); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn from_long_slice(b: &mut Bencher) { + let data = [0u8; 128]; + b.bytes = data.len() as u64; + b.iter(|| { + let buf = Bytes::copy_from_slice(&data[..]); + test::black_box(buf); + }) +} + +#[bench] +fn slice_empty(b: &mut Bencher) { + b.iter(|| { + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + test::black_box(b.slice(i % 100..i % 100)); + } + }) +} + +#[bench] +fn slice_short_from_arc(b: &mut Bencher) { + b.iter(|| { + // `clone` is to convert to ARC + let b = Bytes::from(vec![17; 1024]).clone(); + for i in 0..1000 { + test::black_box(b.slice(1..2 + i % 10)); + } + }) +} + +#[bench] +fn split_off_and_drop(b: &mut Bencher) { + b.iter(|| { + for _ in 0..1024 { + let v = vec![10; 200]; + let mut b = Bytes::from(v); + test::black_box(b.split_off(100)); + test::black_box(b); + } + }) +} diff --git a/third_party/rust/bytes/benches/bytes_mut.rs b/third_party/rust/bytes/benches/bytes_mut.rs new file mode 100644 index 0000000000..ded1d14864 --- /dev/null +++ b/third_party/rust/bytes/benches/bytes_mut.rs @@ -0,0 +1,249 @@ +#![feature(test)] +#![deny(warnings, rust_2018_idioms)] + +extern crate test; + +use test::Bencher; +use bytes::{BufMut, BytesMut}; + +#[bench] +fn alloc_small(b: &mut Bencher) { + b.iter(|| { + for _ in 0..1024 { + test::black_box(BytesMut::with_capacity(12)); + } + }) +} + +#[bench] +fn alloc_mid(b: &mut Bencher) { + b.iter(|| { + test::black_box(BytesMut::with_capacity(128)); + }) +} + +#[bench] +fn alloc_big(b: &mut Bencher) { + b.iter(|| { + test::black_box(BytesMut::with_capacity(4096)); + }) +} + + +#[bench] +fn deref_unique(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_unique_unroll(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..128 { + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_shared(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(4096); + buf.put(&[0u8; 1024][..]); + let _b2 = buf.split_off(1024); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&buf[..]); + } + }) +} + +#[bench] +fn deref_two(b: &mut Bencher) { + let mut buf1 = BytesMut::with_capacity(8); + buf1.put(&[0u8; 8][..]); + + let mut buf2 = BytesMut::with_capacity(4096); + buf2.put(&[0u8; 1024][..]); + + b.iter(|| { + for _ in 0..512 { + test::black_box(&buf1[..]); + test::black_box(&buf2[..]); + } + }) +} + +#[bench] +fn clone_frozen(b: &mut Bencher) { + let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..]).split().freeze(); + + b.iter(|| { + for _ in 0..1024 { + test::black_box(&bytes.clone()); + } + }) +} + +#[bench] +fn alloc_write_split_to_mid(b: &mut Bencher) { + b.iter(|| { + let mut buf = BytesMut::with_capacity(128); + buf.put_slice(&[0u8; 64]); + test::black_box(buf.split_to(64)); + }) +} + +#[bench] +fn drain_write_drain(b: &mut Bencher) { + let data = [0u8; 128]; + + b.iter(|| { + let mut buf = BytesMut::with_capacity(1024); + let mut parts = Vec::with_capacity(8); + + for _ in 0..8 { + buf.put(&data[..]); + parts.push(buf.split_to(128)); + } + + test::black_box(parts); + }) +} + +#[bench] +fn fmt_write(b: &mut Bencher) { + use std::fmt::Write; + let mut buf = BytesMut::with_capacity(128); + let s = "foo bar baz quux lorem ipsum dolor et"; + + b.bytes = s.len() as u64; + b.iter(|| { + let _ = write!(buf, "{}", s); + test::black_box(&buf); + unsafe { buf.set_len(0); } + }) +} + +#[bench] +fn bytes_mut_extend(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(256); + let data = [33u8; 32]; + + b.bytes = data.len() as u64 * 4; + b.iter(|| { + for _ in 0..4 { + buf.extend(&data); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +// BufMut for BytesMut vs Vec<u8> + +#[bench] +fn put_slice_bytes_mut(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(256); + let data = [33u8; 32]; + + b.bytes = data.len() as u64 * 4; + b.iter(|| { + for _ in 0..4 { + buf.put_slice(&data); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +#[bench] +fn put_u8_bytes_mut(b: &mut Bencher) { + let mut buf = BytesMut::with_capacity(256); + let cnt = 128; + + b.bytes = cnt as u64; + b.iter(|| { + for _ in 0..cnt { + buf.put_u8(b'x'); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +#[bench] +fn put_slice_vec(b: &mut Bencher) { + let mut buf = Vec::<u8>::with_capacity(256); + let data = [33u8; 32]; + + b.bytes = data.len() as u64 * 4; + b.iter(|| { + for _ in 0..4 { + buf.put_slice(&data); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +#[bench] +fn put_u8_vec(b: &mut Bencher) { + let mut buf = Vec::<u8>::with_capacity(256); + let cnt = 128; + + b.bytes = cnt as u64; + b.iter(|| { + for _ in 0..cnt { + buf.put_u8(b'x'); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +#[bench] +fn put_slice_vec_extend(b: &mut Bencher) { + let mut buf = Vec::<u8>::with_capacity(256); + let data = [33u8; 32]; + + b.bytes = data.len() as u64 * 4; + b.iter(|| { + for _ in 0..4 { + buf.extend_from_slice(&data); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} + +#[bench] +fn put_u8_vec_push(b: &mut Bencher) { + let mut buf = Vec::<u8>::with_capacity(256); + let cnt = 128; + + b.bytes = cnt as u64; + b.iter(|| { + for _ in 0..cnt { + buf.push(b'x'); + } + test::black_box(&buf); + unsafe { buf.set_len(0); } + }); +} diff --git a/third_party/rust/bytes/ci/azure-cross-compile.yml b/third_party/rust/bytes/ci/azure-cross-compile.yml new file mode 100644 index 0000000000..be46ca3460 --- /dev/null +++ b/third_party/rust/bytes/ci/azure-cross-compile.yml @@ -0,0 +1,46 @@ +parameters: + cmd: build + rust_version: stable + +jobs: +- job: ${{ parameters.name }} + displayName: Cross + strategy: + matrix: + i686: + vmImage: ubuntu-16.04 + target: i686-unknown-linux-gnu + armv7: + vmImage: ubuntu-16.04 + target: armv7-unknown-linux-gnueabihf + powerpc: + vmImage: ubuntu-16.04 + target: powerpc-unknown-linux-gnu + powerpc64: + vmImage: ubuntu-16.04 + target: powerpc64-unknown-linux-gnu + wasm: + vmImage: ubuntu-16.04 + target: wasm32-unknown-unknown + pool: + vmImage: $(vmImage) + + steps: + - template: azure-install-rust.yml + parameters: + rust_version: ${{parameters.rust_version}} + + - script: cargo install cross + displayName: Install cross + condition: not(eq(variables['target'], 'wasm32-unknown-unknown')) + + - script: cross ${{ parameters.cmd }} --target $(target) + displayName: cross ${{ parameters.cmd }} --target $(target) + condition: not(eq(variables['target'], 'wasm32-unknown-unknown')) + + # WASM support + - script: | + rustup target add $(target) + cargo build --target $(target) + displayName: cargo build --target $(target) + condition: eq(variables['target'], 'wasm32-unknown-unknown') diff --git a/third_party/rust/bytes/ci/azure-deploy-docs.yml b/third_party/rust/bytes/ci/azure-deploy-docs.yml new file mode 100644 index 0000000000..52ac48fcdc --- /dev/null +++ b/third_party/rust/bytes/ci/azure-deploy-docs.yml @@ -0,0 +1,39 @@ +parameters: + dependsOn: [] + +jobs: +- job: documentation + displayName: 'Deploy API Documentation' + condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) + pool: + vmImage: 'Ubuntu 16.04' + dependsOn: + - ${{ parameters.dependsOn }} + steps: + - template: azure-install-rust.yml + parameters: + rust_version: stable + - script: | + cargo doc --no-deps + cp -R target/doc '$(Build.BinariesDirectory)' + displayName: 'Generate Documentation' + - script: | + set -e + + git --version + ls -la + git init + git config user.name 'Deployment Bot (from Azure Pipelines)' + git config user.email 'deploy@tokio-rs.com' + git config --global credential.helper 'store --file ~/.my-credentials' + printf "protocol=https\nhost=github.com\nusername=carllerche\npassword=%s\n\n" "$GITHUB_TOKEN" | git credential-store --file ~/.my-credentials store + git remote add origin https://github.com/tokio-rs/bytes + git checkout -b gh-pages + git add . + git commit -m 'Deploy Bytes API documentation' + git push -f origin gh-pages + env: + GITHUB_TOKEN: $(githubPersonalToken) + workingDirectory: '$(Build.BinariesDirectory)' + displayName: 'Deploy Documentation' + diff --git a/third_party/rust/bytes/ci/azure-install-rust.yml b/third_party/rust/bytes/ci/azure-install-rust.yml new file mode 100644 index 0000000000..02176592a6 --- /dev/null +++ b/third_party/rust/bytes/ci/azure-install-rust.yml @@ -0,0 +1,33 @@ +steps: + # Linux and macOS. + - script: | + set -e + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain none + export PATH=$PATH:$HOME/.cargo/bin + rustup toolchain install $RUSTUP_TOOLCHAIN + rustup default $RUSTUP_TOOLCHAIN + echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" + env: + RUSTUP_TOOLCHAIN: ${{parameters.rust_version}} + displayName: "Install rust (*nix)" + condition: not(eq(variables['Agent.OS'], 'Windows_NT')) + + # Windows. + - script: | + curl -sSf -o rustup-init.exe https://win.rustup.rs + rustup-init.exe -y --default-toolchain none + set PATH=%PATH%;%USERPROFILE%\.cargo\bin + rustup toolchain install %RUSTUP_TOOLCHAIN% + rustup default %RUSTUP_TOOLCHAIN% + echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin" + env: + RUSTUP_TOOLCHAIN: ${{parameters.rust_version}} + displayName: "Install rust (windows)" + condition: eq(variables['Agent.OS'], 'Windows_NT') + + # All platforms. + - script: | + rustup toolchain list + rustc -Vv + cargo -V + displayName: Query rust and cargo versions diff --git a/third_party/rust/bytes/ci/azure-loom.yml b/third_party/rust/bytes/ci/azure-loom.yml new file mode 100644 index 0000000000..1db9c3afe1 --- /dev/null +++ b/third_party/rust/bytes/ci/azure-loom.yml @@ -0,0 +1,15 @@ +jobs: +- job: ${{parameters.name}} + displayName: Loom tests + pool: + vmImage: ubuntu-16.04 + + steps: + - template: azure-install-rust.yml + parameters: + rust_version: ${{parameters.rust_version}} + + - script: RUSTFLAGS="--cfg loom" cargo test --lib + displayName: RUSTFLAGS="--cfg loom" cargo test --lib + + diff --git a/third_party/rust/bytes/ci/azure-test-stable.yml b/third_party/rust/bytes/ci/azure-test-stable.yml new file mode 100644 index 0000000000..e543eeeb43 --- /dev/null +++ b/third_party/rust/bytes/ci/azure-test-stable.yml @@ -0,0 +1,50 @@ +parameters: + cmd: test + rust_version: stable + features: [] + +jobs: +- job: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + strategy: + matrix: + Linux: + vmImage: ubuntu-16.04 + + ${{ if parameters.cross }}: + MacOS: + vmImage: macOS-10.13 + Windows: + vmImage: vs2017-win2016 + pool: + vmImage: $(vmImage) + + steps: + - template: azure-install-rust.yml + parameters: + rust_version: ${{parameters.rust_version}} + + # Run with default crate features + - script: cargo ${{ parameters.cmd }} + displayName: cargo ${{ parameters.cmd }} + + # Run with each specified feature + - ${{ each feature in parameters.features }}: + - script: cargo ${{ parameters.cmd }} --features ${{ feature }} + displayName: cargo ${{ parameters.cmd }} --features ${{ feature }} + + - ${{ if eq(parameters.cmd, 'test') }}: + - script: cargo doc --no-deps + displayName: cargo doc --no-deps + + - ${{ if parameters.benches }}: + - script: cargo check --benches + displayName: Check benchmarks + + # Run with all features + - script: cargo ${{ parameters.cmd }} --all-features + displayName: cargo ${{ parameters.cmd }} --all-features + + # Run with no default features + - script: cargo check --no-default-features + displayName: cargo check --no-default-features diff --git a/third_party/rust/bytes/ci/azure-tsan.yml b/third_party/rust/bytes/ci/azure-tsan.yml new file mode 100644 index 0000000000..198b187abf --- /dev/null +++ b/third_party/rust/bytes/ci/azure-tsan.yml @@ -0,0 +1,26 @@ +jobs: +- job: ${{ parameters.name }} + displayName: TSAN + pool: + vmImage: ubuntu-16.04 + + steps: + - template: azure-install-rust.yml + parameters: + rust_version: ${{ parameters.rust_version }} + + - script: | + set -e + + export RUST_TEST_THREADS=1 + export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" + export TSAN_OPTIONS="suppressions=`pwd`/ci/tsan" + + # Run address sanitizer + RUSTFLAGS="-Z sanitizer=address" \ + cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut + + # Run thread sanitizer + RUSTFLAGS="-Z sanitizer=thread" \ + cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut + displayName: TSAN / MSAN diff --git a/third_party/rust/bytes/ci/tsan b/third_party/rust/bytes/ci/tsan new file mode 100644 index 0000000000..e53f9b893d --- /dev/null +++ b/third_party/rust/bytes/ci/tsan @@ -0,0 +1,24 @@ +# TSAN suppressions file for `bytes` + +# TSAN does not understand fences and `Arc::drop` is implemented using a fence. +# This causes many false positives. +race:Arc*drop +race:arc*Weak*drop + +# `std` mpsc is not used in any Bytes code base. This race is triggered by some +# rust runtime logic. +race:std*mpsc_queue + +# Some test runtime races. Allocation should be race free +race:alloc::alloc + +# Not sure why this is warning, but it is in the test harness and not the library. +race:TestEvent*clone +race:test::run_tests_console::*closure + +# Probably more fences in std. +race:__call_tls_dtors + +# This ignores a false positive caused by `thread::park()`/`thread::unpark()`. +# See: https://github.com/rust-lang/rust/pull/54806#issuecomment-436193353 +race:pthread_cond_destroy diff --git a/third_party/rust/bytes/src/buf/buf_impl.rs b/third_party/rust/bytes/src/buf/buf_impl.rs new file mode 100644 index 0000000000..843db718f8 --- /dev/null +++ b/third_party/rust/bytes/src/buf/buf_impl.rs @@ -0,0 +1,1006 @@ +use core::{cmp, ptr, mem}; + +#[cfg(feature = "std")] +use std::io::IoSlice; + +use alloc::{boxed::Box}; + +macro_rules! buf_get_impl { + ($this:ident, $typ:tt::$conv:tt) => ({ + const SIZE: usize = mem::size_of::<$typ>(); + // try to convert directly from the bytes + // this Option<ret> trick is to avoid keeping a borrow on self + // when advance() is called (mut borrow) and to call bytes() only once + let ret = $this.bytes().get(..SIZE).map(|src| unsafe { + $typ::$conv(*(src as *const _ as *const [_; SIZE])) + }); + + if let Some(ret) = ret { + // if the direct conversion was possible, advance and return + $this.advance(SIZE); + return ret; + } else { + // if not we copy the bytes in a temp buffer then convert + let mut buf = [0; SIZE]; + $this.copy_to_slice(&mut buf); // (do the advance) + return $typ::$conv(buf); + } + }); + (le => $this:ident, $typ:tt, $len_to_read:expr) => ({ + debug_assert!(mem::size_of::<$typ>() >= $len_to_read); + + // The same trick as above does not improve the best case speed. + // It seems to be linked to the way the method is optimised by the compiler + let mut buf = [0; (mem::size_of::<$typ>())]; + $this.copy_to_slice(&mut buf[..($len_to_read)]); + return $typ::from_le_bytes(buf); + }); + (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ + debug_assert!(mem::size_of::<$typ>() >= $len_to_read); + + let mut buf = [0; (mem::size_of::<$typ>())]; + $this.copy_to_slice(&mut buf[mem::size_of::<$typ>()-($len_to_read)..]); + return $typ::from_be_bytes(buf); + }}; +} + +/// Read bytes from a buffer. +/// +/// A buffer stores bytes in memory such that read operations are infallible. +/// The underlying storage may or may not be in contiguous memory. A `Buf` value +/// is a cursor into the buffer. Reading from `Buf` advances the cursor +/// position. It can be thought of as an efficient `Iterator` for collections of +/// bytes. +/// +/// The simplest `Buf` is a `&[u8]`. +/// +/// ``` +/// use bytes::Buf; +/// +/// let mut buf = &b"hello world"[..]; +/// +/// assert_eq!(b'h', buf.get_u8()); +/// assert_eq!(b'e', buf.get_u8()); +/// assert_eq!(b'l', buf.get_u8()); +/// +/// let mut rest = [0; 8]; +/// buf.copy_to_slice(&mut rest); +/// +/// assert_eq!(&rest[..], &b"lo world"[..]); +/// ``` +pub trait Buf { + /// Returns the number of bytes between the current position and the end of + /// the buffer. + /// + /// This value is greater than or equal to the length of the slice returned + /// by `bytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"hello world"[..]; + /// + /// assert_eq!(buf.remaining(), 11); + /// + /// buf.get_u8(); + /// + /// assert_eq!(buf.remaining(), 10); + /// ``` + /// + /// # Implementer notes + /// + /// Implementations of `remaining` should ensure that the return value does + /// not change unless a call is made to `advance` or any other function that + /// is documented to change the `Buf`'s current position. + fn remaining(&self) -> usize; + + /// Returns a slice starting at the current position and of length between 0 + /// and `Buf::remaining()`. Note that this *can* return shorter slice (this allows + /// non-continuous internal representation). + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"hello world"[..]; + /// + /// assert_eq!(buf.bytes(), &b"hello world"[..]); + /// + /// buf.advance(6); + /// + /// assert_eq!(buf.bytes(), &b"world"[..]); + /// ``` + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `Buf::remaining` returns 0, calls to `bytes` should return an + /// empty slice. + fn bytes(&self) -> &[u8]; + + /// Fills `dst` with potentially multiple slices starting at `self`'s + /// current position. + /// + /// If the `Buf` is backed by disjoint slices of bytes, `bytes_vectored` enables + /// fetching more than one slice at once. `dst` is a slice of `IoSlice` + /// references, enabling the slice to be directly used with [`writev`] + /// without any further conversion. The sum of the lengths of all the + /// buffers in `dst` will be less than or equal to `Buf::remaining()`. + /// + /// The entries in `dst` will be overwritten, but the data **contained** by + /// the slices **will not** be modified. If `bytes_vectored` does not fill every + /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices + /// in `self. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `Buf::remaining` returns 0, calls to `bytes_vectored` must return 0 + /// without mutating `dst`. + /// + /// Implementations should also take care to properly handle being called + /// with `dst` being a zero length slice. + /// + /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html + #[cfg(feature = "std")] + fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { + if dst.is_empty() { + return 0; + } + + if self.has_remaining() { + dst[0] = IoSlice::new(self.bytes()); + 1 + } else { + 0 + } + } + + /// Advance the internal cursor of the Buf + /// + /// The next call to `bytes` will return a slice starting `cnt` bytes + /// further into the underlying buffer. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"hello world"[..]; + /// + /// assert_eq!(buf.bytes(), &b"hello world"[..]); + /// + /// buf.advance(6); + /// + /// assert_eq!(buf.bytes(), &b"world"[..]); + /// ``` + /// + /// # Panics + /// + /// This function **may** panic if `cnt > self.remaining()`. + /// + /// # Implementer notes + /// + /// It is recommended for implementations of `advance` to panic if `cnt > + /// self.remaining()`. If the implementation does not panic, the call must + /// behave as if `cnt == self.remaining()`. + /// + /// A call with `cnt == 0` should never panic and be a no-op. + fn advance(&mut self, cnt: usize); + + /// Returns true if there are any more bytes to consume + /// + /// This is equivalent to `self.remaining() != 0`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"a"[..]; + /// + /// assert!(buf.has_remaining()); + /// + /// buf.get_u8(); + /// + /// assert!(!buf.has_remaining()); + /// ``` + fn has_remaining(&self) -> bool { + self.remaining() > 0 + } + + /// Copies bytes from `self` into `dst`. + /// + /// The cursor is advanced by the number of bytes copied. `self` must have + /// enough remaining bytes to fill `dst`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"hello world"[..]; + /// let mut dst = [0; 5]; + /// + /// buf.copy_to_slice(&mut dst); + /// assert_eq!(&b"hello"[..], &dst); + /// assert_eq!(6, buf.remaining()); + /// ``` + /// + /// # Panics + /// + /// This function panics if `self.remaining() < dst.len()` + fn copy_to_slice(&mut self, dst: &mut [u8]) { + let mut off = 0; + + assert!(self.remaining() >= dst.len()); + + while off < dst.len() { + let cnt; + + unsafe { + let src = self.bytes(); + cnt = cmp::min(src.len(), dst.len() - off); + + ptr::copy_nonoverlapping( + src.as_ptr(), dst[off..].as_mut_ptr(), cnt); + + off += cnt; + } + + self.advance(cnt); + } + } + + /// Gets an unsigned 8 bit integer from `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08 hello"[..]; + /// assert_eq!(8, buf.get_u8()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is no more remaining data in `self`. + fn get_u8(&mut self) -> u8 { + assert!(self.remaining() >= 1); + let ret = self.bytes()[0]; + self.advance(1); + ret + } + + /// Gets a signed 8 bit integer from `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08 hello"[..]; + /// assert_eq!(8, buf.get_i8()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is no more remaining data in `self`. + fn get_i8(&mut self) -> i8 { + assert!(self.remaining() >= 1); + let ret = self.bytes()[0] as i8; + self.advance(1); + ret + } + + /// Gets an unsigned 16 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x09 hello"[..]; + /// assert_eq!(0x0809, buf.get_u16()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16(&mut self) -> u16 { + buf_get_impl!(self, u16::from_be_bytes); + } + + /// Gets an unsigned 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x09\x08 hello"[..]; + /// assert_eq!(0x0809, buf.get_u16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16_le(&mut self) -> u16 { + buf_get_impl!(self, u16::from_le_bytes); + } + + /// Gets a signed 16 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x09 hello"[..]; + /// assert_eq!(0x0809, buf.get_i16()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16(&mut self) -> i16 { + buf_get_impl!(self, i16::from_be_bytes); + } + + /// Gets a signed 16 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x09\x08 hello"[..]; + /// assert_eq!(0x0809, buf.get_i16_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16_le(&mut self) -> i16 { + buf_get_impl!(self, i16::from_le_bytes); + } + + /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; + /// assert_eq!(0x0809A0A1, buf.get_u32()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32(&mut self) -> u32 { + buf_get_impl!(self, u32::from_be_bytes); + } + + /// Gets an unsigned 32 bit integer from `self` in the little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; + /// assert_eq!(0x0809A0A1, buf.get_u32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32_le(&mut self) -> u32 { + buf_get_impl!(self, u32::from_le_bytes); + } + + /// Gets a signed 32 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x09\xA0\xA1 hello"[..]; + /// assert_eq!(0x0809A0A1, buf.get_i32()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32(&mut self) -> i32 { + buf_get_impl!(self, i32::from_be_bytes); + } + + /// Gets a signed 32 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\xA1\xA0\x09\x08 hello"[..]; + /// assert_eq!(0x0809A0A1, buf.get_i32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32_le(&mut self) -> i32 { + buf_get_impl!(self, i32::from_le_bytes); + } + + /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; + /// assert_eq!(0x0102030405060708, buf.get_u64()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64(&mut self) -> u64 { + buf_get_impl!(self, u64::from_be_bytes); + } + + /// Gets an unsigned 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; + /// assert_eq!(0x0102030405060708, buf.get_u64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_le(&mut self) -> u64 { + buf_get_impl!(self, u64::from_le_bytes); + } + + /// Gets a signed 64 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08 hello"[..]; + /// assert_eq!(0x0102030405060708, buf.get_i64()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64(&mut self) -> i64 { + buf_get_impl!(self, i64::from_be_bytes); + } + + /// Gets a signed 64 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; + /// assert_eq!(0x0102030405060708, buf.get_i64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_le(&mut self) -> i64 { + buf_get_impl!(self, i64::from_le_bytes); + } + + /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u128(&mut self) -> u128 { + buf_get_impl!(self, u128::from_be_bytes); + } + + /// Gets an unsigned 128 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u128_le(&mut self) -> u128 { + buf_get_impl!(self, u128::from_le_bytes); + } + + /// Gets a signed 128 bit integer from `self` in big-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello"[..]; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i128(&mut self) -> i128 { + buf_get_impl!(self, i128::from_be_bytes); + } + + /// Gets a signed 128 bit integer from `self` in little-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello"[..]; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i128_le(&mut self) -> i128 { + buf_get_impl!(self, i128::from_le_bytes); + } + + /// Gets an unsigned n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03 hello"[..]; + /// assert_eq!(0x010203, buf.get_uint(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(be => self, u64, nbytes); + } + + /// Gets an unsigned n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x03\x02\x01 hello"[..]; + /// assert_eq!(0x010203, buf.get_uint_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint_le(&mut self, nbytes: usize) -> u64 { + buf_get_impl!(le => self, u64, nbytes); + } + + /// Gets a signed n-byte integer from `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x01\x02\x03 hello"[..]; + /// assert_eq!(0x010203, buf.get_int(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(be => self, i64, nbytes); + } + + /// Gets a signed n-byte integer from `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x03\x02\x01 hello"[..]; + /// assert_eq!(0x010203, buf.get_int_le(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_le(&mut self, nbytes: usize) -> i64 { + buf_get_impl!(le => self, i64, nbytes); + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x3F\x99\x99\x9A hello"[..]; + /// assert_eq!(1.2f32, buf.get_f32()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32(&mut self) -> f32 { + f32::from_bits(Self::get_u32(self)) + } + + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x9A\x99\x99\x3F hello"[..]; + /// assert_eq!(1.2f32, buf.get_f32_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_le(&mut self) -> f32 { + f32::from_bits(Self::get_u32_le(self)) + } + + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello"[..]; + /// assert_eq!(1.2f64, buf.get_f64()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64(&mut self) -> f64 { + f64::from_bits(Self::get_u64(self)) + } + + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf = &b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello"[..]; + /// assert_eq!(1.2f64, buf.get_f64_le()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_le(&mut self) -> f64 { + f64::from_bits(Self::get_u64_le(self)) + } + + /// Consumes remaining bytes inside self and returns new instance of `Bytes` + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let bytes = (&b"hello world"[..]).to_bytes(); + /// assert_eq!(&bytes[..], &b"hello world"[..]); + /// ``` + fn to_bytes(&mut self) -> crate::Bytes { + use super::BufMut; + let mut ret = crate::BytesMut::with_capacity(self.remaining()); + ret.put(self); + ret.freeze() + } +} + +macro_rules! deref_forward_buf { + () => ( + fn remaining(&self) -> usize { + (**self).remaining() + } + + fn bytes(&self) -> &[u8] { + (**self).bytes() + } + + #[cfg(feature = "std")] + fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { + (**self).bytes_vectored(dst) + } + + fn advance(&mut self, cnt: usize) { + (**self).advance(cnt) + } + + fn has_remaining(&self) -> bool { + (**self).has_remaining() + } + + fn copy_to_slice(&mut self, dst: &mut [u8]) { + (**self).copy_to_slice(dst) + } + + fn get_u8(&mut self) -> u8 { + (**self).get_u8() + } + + fn get_i8(&mut self) -> i8 { + (**self).get_i8() + } + + fn get_u16(&mut self) -> u16 { + (**self).get_u16() + } + + fn get_u16_le(&mut self) -> u16 { + (**self).get_u16_le() + } + + fn get_i16(&mut self) -> i16 { + (**self).get_i16() + } + + fn get_i16_le(&mut self) -> i16 { + (**self).get_i16_le() + } + + fn get_u32(&mut self) -> u32 { + (**self).get_u32() + } + + fn get_u32_le(&mut self) -> u32 { + (**self).get_u32_le() + } + + fn get_i32(&mut self) -> i32 { + (**self).get_i32() + } + + fn get_i32_le(&mut self) -> i32 { + (**self).get_i32_le() + } + + fn get_u64(&mut self) -> u64 { + (**self).get_u64() + } + + fn get_u64_le(&mut self) -> u64 { + (**self).get_u64_le() + } + + fn get_i64(&mut self) -> i64 { + (**self).get_i64() + } + + fn get_i64_le(&mut self) -> i64 { + (**self).get_i64_le() + } + + fn get_uint(&mut self, nbytes: usize) -> u64 { + (**self).get_uint(nbytes) + } + + fn get_uint_le(&mut self, nbytes: usize) -> u64 { + (**self).get_uint_le(nbytes) + } + + fn get_int(&mut self, nbytes: usize) -> i64 { + (**self).get_int(nbytes) + } + + fn get_int_le(&mut self, nbytes: usize) -> i64 { + (**self).get_int_le(nbytes) + } + + fn to_bytes(&mut self) -> crate::Bytes { + (**self).to_bytes() + } + + ) +} + +impl<T: Buf + ?Sized> Buf for &mut T { + deref_forward_buf!(); +} + +impl<T: Buf + ?Sized> Buf for Box<T> { + deref_forward_buf!(); +} + +impl Buf for &[u8] { + #[inline] + fn remaining(&self) -> usize { + self.len() + } + + #[inline] + fn bytes(&self) -> &[u8] { + self + } + + #[inline] + fn advance(&mut self, cnt: usize) { + *self = &self[cnt..]; + } +} + +impl Buf for Option<[u8; 1]> { + fn remaining(&self) -> usize { + if self.is_some() { + 1 + } else { + 0 + } + } + + fn bytes(&self) -> &[u8] { + self.as_ref().map(AsRef::as_ref) + .unwrap_or(Default::default()) + } + + fn advance(&mut self, cnt: usize) { + if cnt == 0 { + return; + } + + if self.is_none() { + panic!("overflow"); + } else { + assert_eq!(1, cnt); + *self = None; + } + } +} + +#[cfg(feature = "std")] +impl<T: AsRef<[u8]>> Buf for std::io::Cursor<T> { + fn remaining(&self) -> usize { + let len = self.get_ref().as_ref().len(); + let pos = self.position(); + + if pos >= len as u64 { + return 0; + } + + len - pos as usize + } + + fn bytes(&self) -> &[u8] { + let len = self.get_ref().as_ref().len(); + let pos = self.position(); + + if pos >= len as u64 { + return &[]; + } + + &self.get_ref().as_ref()[pos as usize..] + } + + fn advance(&mut self, cnt: usize) { + let pos = (self.position() as usize) + .checked_add(cnt).expect("overflow"); + + assert!(pos <= self.get_ref().as_ref().len()); + self.set_position(pos as u64); + } +} + +// The existence of this function makes the compiler catch if the Buf +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &dyn Buf) {} diff --git a/third_party/rust/bytes/src/buf/buf_mut.rs b/third_party/rust/bytes/src/buf/buf_mut.rs new file mode 100644 index 0000000000..f5ed2a7719 --- /dev/null +++ b/third_party/rust/bytes/src/buf/buf_mut.rs @@ -0,0 +1,1088 @@ +use core::{cmp, mem::{self, MaybeUninit}, ptr, usize}; + +#[cfg(feature = "std")] +use std::fmt; + +use alloc::{vec::Vec, boxed::Box}; + +/// A trait for values that provide sequential write access to bytes. +/// +/// Write bytes to a buffer +/// +/// A buffer stores bytes in memory such that write operations are infallible. +/// The underlying storage may or may not be in contiguous memory. A `BufMut` +/// value is a cursor into the buffer. Writing to `BufMut` advances the cursor +/// position. +/// +/// The simplest `BufMut` is a `Vec<u8>`. +/// +/// ``` +/// use bytes::BufMut; +/// +/// let mut buf = vec![]; +/// +/// buf.put(&b"hello world"[..]); +/// +/// assert_eq!(buf, b"hello world"); +/// ``` +pub trait BufMut { + /// Returns the number of bytes that can be written from the current + /// position until the end of the buffer is reached. + /// + /// This value is greater than or equal to the length of the slice returned + /// by `bytes_mut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut dst = [0; 10]; + /// let mut buf = &mut dst[..]; + /// + /// let original_remaining = buf.remaining_mut(); + /// buf.put(&b"hello"[..]); + /// + /// assert_eq!(original_remaining - 5, buf.remaining_mut()); + /// ``` + /// + /// # Implementer notes + /// + /// Implementations of `remaining_mut` should ensure that the return value + /// does not change unless a call is made to `advance_mut` or any other + /// function that is documented to change the `BufMut`'s current position. + fn remaining_mut(&self) -> usize; + + /// Advance the internal cursor of the BufMut + /// + /// The next call to `bytes_mut` will return a slice starting `cnt` bytes + /// further into the underlying buffer. + /// + /// This function is unsafe because there is no guarantee that the bytes + /// being advanced past have been initialized. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = Vec::with_capacity(16); + /// + /// unsafe { + /// // MaybeUninit::as_mut_ptr + /// buf.bytes_mut()[0].as_mut_ptr().write(b'h'); + /// buf.bytes_mut()[1].as_mut_ptr().write(b'e'); + /// + /// buf.advance_mut(2); + /// + /// buf.bytes_mut()[0].as_mut_ptr().write(b'l'); + /// buf.bytes_mut()[1].as_mut_ptr().write(b'l'); + /// buf.bytes_mut()[2].as_mut_ptr().write(b'o'); + /// + /// buf.advance_mut(3); + /// } + /// + /// assert_eq!(5, buf.len()); + /// assert_eq!(buf, b"hello"); + /// ``` + /// + /// # Panics + /// + /// This function **may** panic if `cnt > self.remaining_mut()`. + /// + /// # Implementer notes + /// + /// It is recommended for implementations of `advance_mut` to panic if + /// `cnt > self.remaining_mut()`. If the implementation does not panic, + /// the call must behave as if `cnt == self.remaining_mut()`. + /// + /// A call with `cnt == 0` should never panic and be a no-op. + unsafe fn advance_mut(&mut self, cnt: usize); + + /// Returns true if there is space in `self` for more bytes. + /// + /// This is equivalent to `self.remaining_mut() != 0`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut dst = [0; 5]; + /// let mut buf = &mut dst[..]; + /// + /// assert!(buf.has_remaining_mut()); + /// + /// buf.put(&b"hello"[..]); + /// + /// assert!(!buf.has_remaining_mut()); + /// ``` + fn has_remaining_mut(&self) -> bool { + self.remaining_mut() > 0 + } + + /// Returns a mutable slice starting at the current BufMut position and of + /// length between 0 and `BufMut::remaining_mut()`. Note that this *can* be shorter than the + /// whole remainder of the buffer (this allows non-continuous implementation). + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// The returned byte slice may represent uninitialized memory. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = Vec::with_capacity(16); + /// + /// unsafe { + /// // MaybeUninit::as_mut_ptr + /// buf.bytes_mut()[0].as_mut_ptr().write(b'h'); + /// buf.bytes_mut()[1].as_mut_ptr().write(b'e'); + /// + /// buf.advance_mut(2); + /// + /// buf.bytes_mut()[0].as_mut_ptr().write(b'l'); + /// buf.bytes_mut()[1].as_mut_ptr().write(b'l'); + /// buf.bytes_mut()[2].as_mut_ptr().write(b'o'); + /// + /// buf.advance_mut(3); + /// } + /// + /// assert_eq!(5, buf.len()); + /// assert_eq!(buf, b"hello"); + /// ``` + /// + /// # Implementer notes + /// + /// This function should never panic. `bytes_mut` should return an empty + /// slice **if and only if** `remaining_mut` returns 0. In other words, + /// `bytes_mut` returning an empty slice implies that `remaining_mut` will + /// return 0 and `remaining_mut` returning 0 implies that `bytes_mut` will + /// return an empty slice. + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>]; + + /// Fills `dst` with potentially multiple mutable slices starting at `self`'s + /// current position. + /// + /// If the `BufMut` is backed by disjoint slices of bytes, `bytes_vectored_mut` + /// enables fetching more than one slice at once. `dst` is a slice of + /// mutable `IoSliceMut` references, enabling the slice to be directly used with + /// [`readv`] without any further conversion. The sum of the lengths of all + /// the buffers in `dst` will be less than or equal to + /// `Buf::remaining_mut()`. + /// + /// The entries in `dst` will be overwritten, but the data **contained** by + /// the slices **will not** be modified. If `bytes_vectored_mut` does not fill every + /// entry in `dst`, then `dst` is guaranteed to contain all remaining slices + /// in `self. + /// + /// This is a lower level function. Most operations are done with other + /// functions. + /// + /// # Implementer notes + /// + /// This function should never panic. Once the end of the buffer is reached, + /// i.e., `BufMut::remaining_mut` returns 0, calls to `bytes_vectored_mut` must + /// return 0 without mutating `dst`. + /// + /// Implementations should also take care to properly handle being called + /// with `dst` being a zero length slice. + /// + /// [`readv`]: http://man7.org/linux/man-pages/man2/readv.2.html + #[cfg(feature = "std")] + fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize { + if dst.is_empty() { + return 0; + } + + if self.has_remaining_mut() { + dst[0] = IoSliceMut::from(self.bytes_mut()); + 1 + } else { + 0 + } + } + + /// Transfer bytes into `self` from `src` and advance the cursor by the + /// number of bytes written. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// + /// buf.put_u8(b'h'); + /// buf.put(&b"ello"[..]); + /// buf.put(&b" world"[..]); + /// + /// assert_eq!(buf, b"hello world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `self` does not have enough capacity to contain `src`. + fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized { + assert!(self.remaining_mut() >= src.remaining()); + + while src.has_remaining() { + let l; + + unsafe { + let s = src.bytes(); + let d = self.bytes_mut(); + l = cmp::min(s.len(), d.len()); + + ptr::copy_nonoverlapping( + s.as_ptr(), + d.as_mut_ptr() as *mut u8, + l); + } + + src.advance(l); + unsafe { self.advance_mut(l); } + } + } + + /// Transfer bytes into `self` from `src` and advance the cursor by the + /// number of bytes written. + /// + /// `self` must have enough remaining capacity to contain all of `src`. + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut dst = [0; 6]; + /// + /// { + /// let mut buf = &mut dst[..]; + /// buf.put_slice(b"hello"); + /// + /// assert_eq!(1, buf.remaining_mut()); + /// } + /// + /// assert_eq!(b"hello\0", &dst); + /// ``` + fn put_slice(&mut self, src: &[u8]) { + let mut off = 0; + + assert!(self.remaining_mut() >= src.len(), "buffer overflow; remaining = {}; src = {}", self.remaining_mut(), src.len()); + + while off < src.len() { + let cnt; + + unsafe { + let dst = self.bytes_mut(); + cnt = cmp::min(dst.len(), src.len() - off); + + ptr::copy_nonoverlapping( + src[off..].as_ptr(), + dst.as_mut_ptr() as *mut u8, + cnt); + + off += cnt; + + } + + unsafe { self.advance_mut(cnt); } + } + } + + /// Writes an unsigned 8 bit integer to `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u8(0x01); + /// assert_eq!(buf, b"\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u8(&mut self, n: u8) { + let src = [n]; + self.put_slice(&src); + } + + /// Writes a signed 8 bit integer to `self`. + /// + /// The current position is advanced by 1. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i8(0x01); + /// assert_eq!(buf, b"\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i8(&mut self, n: i8) { + let src = [n as u8]; + self.put_slice(&src) + } + + /// Writes an unsigned 16 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16(0x0809); + /// assert_eq!(buf, b"\x08\x09"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16(&mut self, n: u16) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes an unsigned 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16_le(&mut self, n: u16) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes a signed 16 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16(0x0809); + /// assert_eq!(buf, b"\x08\x09"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16(&mut self, n: i16) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes a signed 16 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16_le(0x0809); + /// assert_eq!(buf, b"\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16_le(&mut self, n: i16) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u32(0x0809A0A1); + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32(&mut self, n: u32) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes an unsigned 32 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32_le(&mut self, n: u32) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes a signed 32 bit integer to `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32(0x0809A0A1); + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32(&mut self, n: i32) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes a signed 32 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_le(0x0809A0A1); + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_le(&mut self, n: i32) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64(&mut self, n: u64) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes an unsigned 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_le(&mut self, n: u64) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes a signed 64 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64(0x0102030405060708); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64(&mut self, n: i64) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes a signed 64 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_le(0x0102030405060708); + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_le(&mut self, n: i64) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u128(&mut self, n: u128) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes an unsigned 128 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u128_le(&mut self, n: u128) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes a signed 128 bit integer to `self` in the big-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i128(&mut self, n: i128) { + self.put_slice(&n.to_be_bytes()) + } + + /// Writes a signed 128 bit integer to `self` in little-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_le(0x01020304050607080910111213141516); + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i128_le(&mut self, n: i128) { + self.put_slice(&n.to_le_bytes()) + } + + /// Writes an unsigned n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint(&mut self, n: u64, nbytes: usize) { + self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); + } + + /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_uint_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_uint_le(&mut self, n: u64, nbytes: usize) { + self.put_slice(&n.to_le_bytes()[0..nbytes]); + } + + /// Writes a signed n-byte integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int(0x010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int(&mut self, n: i64, nbytes: usize) { + self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); + } + + /// Writes a signed n-byte integer to `self` in little-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_le(0x010203, 3); + /// assert_eq!(buf, b"\x03\x02\x01"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_int_le(&mut self, n: i64, nbytes: usize) { + self.put_slice(&n.to_le_bytes()[0..nbytes]); + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32(1.2f32); + /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32(&mut self, n: f32) { + self.put_u32(n.to_bits()); + } + + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_le(1.2f32); + /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32_le(&mut self, n: f32) { + self.put_u32_le(n.to_bits()); + } + + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in big-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64(1.2f64); + /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64(&mut self, n: f64) { + self.put_u64(n.to_bits()); + } + + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in little-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64_le(1.2f64); + /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64_le(&mut self, n: f64) { + self.put_u64_le(n.to_bits()); + } +} + +macro_rules! deref_forward_bufmut { + () => ( + fn remaining_mut(&self) -> usize { + (**self).remaining_mut() + } + + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { + (**self).bytes_mut() + } + + #[cfg(feature = "std")] + fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize { + (**self).bytes_vectored_mut(dst) + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + (**self).advance_mut(cnt) + } + + fn put_slice(&mut self, src: &[u8]) { + (**self).put_slice(src) + } + + fn put_u8(&mut self, n: u8) { + (**self).put_u8(n) + } + + fn put_i8(&mut self, n: i8) { + (**self).put_i8(n) + } + + fn put_u16(&mut self, n: u16) { + (**self).put_u16(n) + } + + fn put_u16_le(&mut self, n: u16) { + (**self).put_u16_le(n) + } + + fn put_i16(&mut self, n: i16) { + (**self).put_i16(n) + } + + fn put_i16_le(&mut self, n: i16) { + (**self).put_i16_le(n) + } + + fn put_u32(&mut self, n: u32) { + (**self).put_u32(n) + } + + fn put_u32_le(&mut self, n: u32) { + (**self).put_u32_le(n) + } + + fn put_i32(&mut self, n: i32) { + (**self).put_i32(n) + } + + fn put_i32_le(&mut self, n: i32) { + (**self).put_i32_le(n) + } + + fn put_u64(&mut self, n: u64) { + (**self).put_u64(n) + } + + fn put_u64_le(&mut self, n: u64) { + (**self).put_u64_le(n) + } + + fn put_i64(&mut self, n: i64) { + (**self).put_i64(n) + } + + fn put_i64_le(&mut self, n: i64) { + (**self).put_i64_le(n) + } + ) +} + +impl<T: BufMut + ?Sized> BufMut for &mut T { + deref_forward_bufmut!(); +} + +impl<T: BufMut + ?Sized> BufMut for Box<T> { + deref_forward_bufmut!(); +} + +impl BufMut for &mut [u8] { + #[inline] + fn remaining_mut(&self) -> usize { + self.len() + } + + #[inline] + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { + // MaybeUninit is repr(transparent), so safe to transmute + unsafe { mem::transmute(&mut **self) } + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + // Lifetime dance taken from `impl Write for &mut [u8]`. + let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); + *self = b; + } +} + +impl BufMut for Vec<u8> { + #[inline] + fn remaining_mut(&self) -> usize { + usize::MAX - self.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + let len = self.len(); + let remaining = self.capacity() - len; + if cnt > remaining { + // Reserve additional capacity, and ensure that the total length + // will not overflow usize. + self.reserve(cnt); + } + + self.set_len(len + cnt); + } + + #[inline] + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { + use core::slice; + + if self.capacity() == self.len() { + self.reserve(64); // Grow the vec + } + + let cap = self.capacity(); + let len = self.len(); + + let ptr = self.as_mut_ptr() as *mut MaybeUninit<u8>; + unsafe { + &mut slice::from_raw_parts_mut(ptr, cap)[len..] + } + } + + // Specialize these methods so they can skip checking `remaining_mut` + // and `advance_mut`. + + fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized { + // In case the src isn't contiguous, reserve upfront + self.reserve(src.remaining()); + + while src.has_remaining() { + let l; + + // a block to contain the src.bytes() borrow + { + let s = src.bytes(); + l = s.len(); + self.extend_from_slice(s); + } + + src.advance(l); + } + } + + fn put_slice(&mut self, src: &[u8]) { + self.extend_from_slice(src); + } +} + +// The existence of this function makes the compiler catch if the BufMut +// trait is "object-safe" or not. +fn _assert_trait_object(_b: &dyn BufMut) {} + +// ===== impl IoSliceMut ===== + +/// A buffer type used for `readv`. +/// +/// This is a wrapper around an `std::io::IoSliceMut`, but does not expose +/// the inner bytes in a safe API, as they may point at uninitialized memory. +/// +/// This is `repr(transparent)` of the `std::io::IoSliceMut`, so it is valid to +/// transmute them. However, as the memory might be uninitialized, care must be +/// taken to not *read* the internal bytes, only *write* to them. +#[repr(transparent)] +#[cfg(feature = "std")] +pub struct IoSliceMut<'a>(std::io::IoSliceMut<'a>); + +#[cfg(feature = "std")] +impl fmt::Debug for IoSliceMut<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IoSliceMut") + .field("len", &self.0.len()) + .finish() + } +} + +#[cfg(feature = "std")] +impl<'a> From<&'a mut [u8]> for IoSliceMut<'a> { + fn from(buf: &'a mut [u8]) -> IoSliceMut<'a> { + IoSliceMut(std::io::IoSliceMut::new(buf)) + } +} + +#[cfg(feature = "std")] +impl<'a> From<&'a mut [MaybeUninit<u8>]> for IoSliceMut<'a> { + fn from(buf: &'a mut [MaybeUninit<u8>]) -> IoSliceMut<'a> { + IoSliceMut(std::io::IoSliceMut::new(unsafe { + // We don't look at the contents, and `std::io::IoSliceMut` + // doesn't either. + mem::transmute::<&'a mut [MaybeUninit<u8>], &'a mut [u8]>(buf) + })) + } +} diff --git a/third_party/rust/bytes/src/buf/ext/chain.rs b/third_party/rust/bytes/src/buf/ext/chain.rs new file mode 100644 index 0000000000..a1ec597df6 --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/chain.rs @@ -0,0 +1,234 @@ +use crate::{Buf, BufMut}; +use crate::buf::IntoIter; + +use core::mem::MaybeUninit; + +#[cfg(feature = "std")] +use std::io::{IoSlice}; +#[cfg(feature = "std")] +use crate::buf::IoSliceMut; + +/// A `Chain` sequences two buffers. +/// +/// `Chain` is an adapter that links two underlying buffers and provides a +/// continuous view across both buffers. It is able to sequence either immutable +/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values). +/// +/// This struct is generally created by calling [`Buf::chain`]. Please see that +/// function's documentation for more detail. +/// +/// # Examples +/// +/// ``` +/// use bytes::{Bytes, Buf, buf::BufExt}; +/// +/// let mut buf = (&b"hello "[..]) +/// .chain(&b"world"[..]); +/// +/// let full: Bytes = buf.to_bytes(); +/// assert_eq!(full[..], b"hello world"[..]); +/// ``` +/// +/// [`Buf::chain`]: trait.Buf.html#method.chain +/// [`Buf`]: trait.Buf.html +/// [`BufMut`]: trait.BufMut.html +#[derive(Debug)] +pub struct Chain<T, U> { + a: T, + b: U, +} + +impl<T, U> Chain<T, U> { + /// Creates a new `Chain` sequencing the provided values. + pub fn new(a: T, b: U) -> Chain<T, U> { + Chain { + a, + b, + } + } + + /// Gets a reference to the first underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::buf::BufExt; + /// + /// let buf = (&b"hello"[..]) + /// .chain(&b"world"[..]); + /// + /// assert_eq!(buf.first_ref()[..], b"hello"[..]); + /// ``` + pub fn first_ref(&self) -> &T { + &self.a + } + + /// Gets a mutable reference to the first underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, buf::BufExt}; + /// + /// let mut buf = (&b"hello"[..]) + /// .chain(&b"world"[..]); + /// + /// buf.first_mut().advance(1); + /// + /// let full = buf.to_bytes(); + /// assert_eq!(full, b"elloworld"[..]); + /// ``` + pub fn first_mut(&mut self) -> &mut T { + &mut self.a + } + + /// Gets a reference to the last underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::buf::BufExt; + /// + /// let buf = (&b"hello"[..]) + /// .chain(&b"world"[..]); + /// + /// assert_eq!(buf.last_ref()[..], b"world"[..]); + /// ``` + pub fn last_ref(&self) -> &U { + &self.b + } + + /// Gets a mutable reference to the last underlying `Buf`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, buf::BufExt}; + /// + /// let mut buf = (&b"hello "[..]) + /// .chain(&b"world"[..]); + /// + /// buf.last_mut().advance(1); + /// + /// let full = buf.to_bytes(); + /// assert_eq!(full, b"hello orld"[..]); + /// ``` + pub fn last_mut(&mut self) -> &mut U { + &mut self.b + } + + /// Consumes this `Chain`, returning the underlying values. + /// + /// # Examples + /// + /// ``` + /// use bytes::buf::BufExt; + /// + /// let chain = (&b"hello"[..]) + /// .chain(&b"world"[..]); + /// + /// let (first, last) = chain.into_inner(); + /// assert_eq!(first[..], b"hello"[..]); + /// assert_eq!(last[..], b"world"[..]); + /// ``` + pub fn into_inner(self) -> (T, U) { + (self.a, self.b) + } +} + +impl<T, U> Buf for Chain<T, U> + where T: Buf, + U: Buf, +{ + fn remaining(&self) -> usize { + self.a.remaining() + self.b.remaining() + } + + fn bytes(&self) -> &[u8] { + if self.a.has_remaining() { + self.a.bytes() + } else { + self.b.bytes() + } + } + + fn advance(&mut self, mut cnt: usize) { + let a_rem = self.a.remaining(); + + if a_rem != 0 { + if a_rem >= cnt { + self.a.advance(cnt); + return; + } + + // Consume what is left of a + self.a.advance(a_rem); + + cnt -= a_rem; + } + + self.b.advance(cnt); + } + + #[cfg(feature = "std")] + fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { + let mut n = self.a.bytes_vectored(dst); + n += self.b.bytes_vectored(&mut dst[n..]); + n + } +} + +impl<T, U> BufMut for Chain<T, U> + where T: BufMut, + U: BufMut, +{ + fn remaining_mut(&self) -> usize { + self.a.remaining_mut() + self.b.remaining_mut() + } + + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { + if self.a.has_remaining_mut() { + self.a.bytes_mut() + } else { + self.b.bytes_mut() + } + } + + unsafe fn advance_mut(&mut self, mut cnt: usize) { + let a_rem = self.a.remaining_mut(); + + if a_rem != 0 { + if a_rem >= cnt { + self.a.advance_mut(cnt); + return; + } + + // Consume what is left of a + self.a.advance_mut(a_rem); + + cnt -= a_rem; + } + + self.b.advance_mut(cnt); + } + + #[cfg(feature = "std")] + fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize { + let mut n = self.a.bytes_vectored_mut(dst); + n += self.b.bytes_vectored_mut(&mut dst[n..]); + n + } +} + +impl<T, U> IntoIterator for Chain<T, U> +where + T: Buf, + U: Buf, +{ + type Item = u8; + type IntoIter = IntoIter<Chain<T, U>>; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self) + } +} diff --git a/third_party/rust/bytes/src/buf/ext/limit.rs b/third_party/rust/bytes/src/buf/ext/limit.rs new file mode 100644 index 0000000000..f86e01151c --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/limit.rs @@ -0,0 +1,77 @@ +use crate::BufMut; + +use core::{cmp, mem::MaybeUninit}; + +/// A `BufMut` adapter which limits the amount of bytes that can be written +/// to an underlying buffer. +#[derive(Debug)] +pub struct Limit<T> { + inner: T, + limit: usize, +} + +pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> { + Limit { + inner, + limit, + } +} + +impl<T> Limit<T> { + /// Consumes this `Limit`, returning the underlying value. + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the maximum number of bytes that can be written + /// + /// # Note + /// + /// If the inner `BufMut` has fewer bytes than indicated by this method then + /// that is the actual number of available bytes. + pub fn limit(&self) -> usize { + self.limit + } + + /// Sets the maximum number of bytes that can be written. + /// + /// # Note + /// + /// If the inner `BufMut` has fewer bytes than `lim` then that is the actual + /// number of available bytes. + pub fn set_limit(&mut self, lim: usize) { + self.limit = lim + } +} + +impl<T: BufMut> BufMut for Limit<T> { + fn remaining_mut(&self) -> usize { + cmp::min(self.inner.remaining_mut(), self.limit) + } + + fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] { + let bytes = self.inner.bytes_mut(); + let end = cmp::min(bytes.len(), self.limit); + &mut bytes[..end] + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + assert!(cnt <= self.limit); + self.inner.advance_mut(cnt); + self.limit -= cnt; + } +} diff --git a/third_party/rust/bytes/src/buf/ext/mod.rs b/third_party/rust/bytes/src/buf/ext/mod.rs new file mode 100644 index 0000000000..7b0bdab200 --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/mod.rs @@ -0,0 +1,176 @@ +//! Extra utilities for `Buf` and `BufMut` types. + +use super::{Buf, BufMut}; + +mod chain; +mod limit; +#[cfg(feature = "std")] +mod reader; +mod take; +#[cfg(feature = "std")] +mod writer; + +pub use self::limit::Limit; +pub use self::take::Take; +pub use self::chain::Chain; + +#[cfg(feature = "std")] +pub use self::{reader::Reader, writer::Writer}; + +/// Extra methods for implementations of `Buf`. +pub trait BufExt: Buf { + /// Creates an adaptor which will read at most `limit` bytes from `self`. + /// + /// This function returns a new instance of `Buf` which will read at most + /// `limit` bytes. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, BufMut, buf::BufExt}; + /// + /// let mut buf = b"hello world"[..].take(5); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(dst, b"hello"); + /// + /// let mut buf = buf.into_inner(); + /// dst.clear(); + /// dst.put(&mut buf); + /// assert_eq!(dst, b" world"); + /// ``` + fn take(self, limit: usize) -> Take<Self> + where Self: Sized + { + take::new(self, limit) + } + + /// Creates an adaptor which will chain this buffer with another. + /// + /// The returned `Buf` instance will first consume all bytes from `self`. + /// Afterwards the output is equivalent to the output of next. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, buf::BufExt}; + /// + /// let mut chain = b"hello "[..].chain(&b"world"[..]); + /// + /// let full = chain.to_bytes(); + /// assert_eq!(full.bytes(), b"hello world"); + /// ``` + fn chain<U: Buf>(self, next: U) -> Chain<Self, U> + where Self: Sized + { + Chain::new(self, next) + } + + /// Creates an adaptor which implements the `Read` trait for `self`. + /// + /// This function returns a new value which implements `Read` by adapting + /// the `Read` trait functions to the `Buf` trait functions. Given that + /// `Buf` operations are infallible, none of the `Read` functions will + /// return with `Err`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, Bytes, buf::BufExt}; + /// use std::io::Read; + /// + /// let buf = Bytes::from("hello world"); + /// + /// let mut reader = buf.reader(); + /// let mut dst = [0; 1024]; + /// + /// let num = reader.read(&mut dst).unwrap(); + /// + /// assert_eq!(11, num); + /// assert_eq!(&dst[..11], &b"hello world"[..]); + /// ``` + #[cfg(feature = "std")] + fn reader(self) -> Reader<Self> where Self: Sized { + reader::new(self) + } +} + +impl<B: Buf + ?Sized> BufExt for B {} + +/// Extra methods for implementations of `BufMut`. +pub trait BufMutExt: BufMut { + /// Creates an adaptor which can write at most `limit` bytes to `self`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BufMut, buf::BufMutExt}; + /// + /// let arr = &mut [0u8; 128][..]; + /// assert_eq!(arr.remaining_mut(), 128); + /// + /// let dst = arr.limit(10); + /// assert_eq!(dst.remaining_mut(), 10); + /// ``` + fn limit(self, limit: usize) -> Limit<Self> + where Self: Sized + { + limit::new(self, limit) + } + + /// Creates an adaptor which implements the `Write` trait for `self`. + /// + /// This function returns a new value which implements `Write` by adapting + /// the `Write` trait functions to the `BufMut` trait functions. Given that + /// `BufMut` operations are infallible, none of the `Write` functions will + /// return with `Err`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BufMut, buf::BufMutExt}; + /// use std::io::Write; + /// + /// let mut buf = vec![].writer(); + /// + /// let num = buf.write(&b"hello world"[..]).unwrap(); + /// assert_eq!(11, num); + /// + /// let buf = buf.into_inner(); + /// + /// assert_eq!(*buf, b"hello world"[..]); + /// ``` + #[cfg(feature = "std")] + fn writer(self) -> Writer<Self> where Self: Sized { + writer::new(self) + } + + /// Creates an adapter which will chain this buffer with another. + /// + /// The returned `BufMut` instance will first write to all bytes from + /// `self`. Afterwards, it will write to `next`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BufMut, buf::BufMutExt}; + /// + /// let mut a = [0u8; 5]; + /// let mut b = [0u8; 6]; + /// + /// let mut chain = (&mut a[..]).chain_mut(&mut b[..]); + /// + /// chain.put_slice(b"hello world"); + /// + /// assert_eq!(&a[..], b"hello"); + /// assert_eq!(&b[..], b" world"); + /// ``` + fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U> + where Self: Sized + { + Chain::new(self, next) + } +} + +impl<B: BufMut + ?Sized> BufMutExt for B {} diff --git a/third_party/rust/bytes/src/buf/ext/reader.rs b/third_party/rust/bytes/src/buf/ext/reader.rs new file mode 100644 index 0000000000..e38103b1de --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/reader.rs @@ -0,0 +1,81 @@ +use crate::{Buf}; + +use std::{cmp, io}; + +/// A `Buf` adapter which implements `io::Read` for the inner value. +/// +/// This struct is generally created by calling `reader()` on `Buf`. See +/// documentation of [`reader()`](trait.Buf.html#method.reader) for more +/// details. +#[derive(Debug)] +pub struct Reader<B> { + buf: B, +} + +pub fn new<B>(buf: B) -> Reader<B> { + Reader { buf } +} + +impl<B: Buf> Reader<B> { + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::buf::BufExt; + /// + /// let mut buf = b"hello world".reader(); + /// + /// assert_eq!(b"hello world", buf.get_ref()); + /// ``` + pub fn get_ref(&self) -> &B { + &self.buf + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + pub fn get_mut(&mut self) -> &mut B { + &mut self.buf + } + + /// Consumes this `Reader`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, buf::BufExt}; + /// use std::io; + /// + /// let mut buf = b"hello world".reader(); + /// let mut dst = vec![]; + /// + /// io::copy(&mut buf, &mut dst).unwrap(); + /// + /// let buf = buf.into_inner(); + /// assert_eq!(0, buf.remaining()); + /// ``` + pub fn into_inner(self) -> B { + self.buf + } +} + +impl<B: Buf + Sized> io::Read for Reader<B> { + fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { + let len = cmp::min(self.buf.remaining(), dst.len()); + + Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]); + Ok(len) + } +} + +impl<B: Buf + Sized> io::BufRead for Reader<B> { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + Ok(self.buf.bytes()) + } + fn consume(&mut self, amt: usize) { + self.buf.advance(amt) + } +} diff --git a/third_party/rust/bytes/src/buf/ext/take.rs b/third_party/rust/bytes/src/buf/ext/take.rs new file mode 100644 index 0000000000..6fc4ffc72c --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/take.rs @@ -0,0 +1,150 @@ +use crate::Buf; + +use core::cmp; + +/// A `Buf` adapter which limits the bytes read from an underlying buffer. +/// +/// This struct is generally created by calling `take()` on `Buf`. See +/// documentation of [`take()`](trait.Buf.html#method.take) for more details. +#[derive(Debug)] +pub struct Take<T> { + inner: T, + limit: usize, +} + +pub fn new<T>(inner: T, limit: usize) -> Take<T> { + Take { + inner, + limit, + } +} + +impl<T> Take<T> { + /// Consumes this `Take`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::buf::{Buf, BufMut, BufExt}; + /// + /// let mut buf = b"hello world".take(2); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"he"[..]); + /// + /// let mut buf = buf.into_inner(); + /// + /// dst.clear(); + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"llo world"[..]); + /// ``` + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, buf::BufExt}; + /// + /// let mut buf = b"hello world".take(2); + /// + /// assert_eq!(11, buf.get_ref().remaining()); + /// ``` + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut, buf::BufExt}; + /// + /// let mut buf = b"hello world".take(2); + /// let mut dst = vec![]; + /// + /// buf.get_mut().advance(2); + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"ll"[..]); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the maximum number of bytes that can be read. + /// + /// # Note + /// + /// If the inner `Buf` has fewer bytes than indicated by this method then + /// that is the actual number of available bytes. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, buf::BufExt}; + /// + /// let mut buf = b"hello world".take(2); + /// + /// assert_eq!(2, buf.limit()); + /// assert_eq!(b'h', buf.get_u8()); + /// assert_eq!(1, buf.limit()); + /// ``` + pub fn limit(&self) -> usize { + self.limit + } + + /// Sets the maximum number of bytes that can be read. + /// + /// # Note + /// + /// If the inner `Buf` has fewer bytes than `lim` then that is the actual + /// number of available bytes. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BufMut, buf::BufExt}; + /// + /// let mut buf = b"hello world".take(2); + /// let mut dst = vec![]; + /// + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"he"[..]); + /// + /// dst.clear(); + /// + /// buf.set_limit(3); + /// dst.put(&mut buf); + /// assert_eq!(*dst, b"llo"[..]); + /// ``` + pub fn set_limit(&mut self, lim: usize) { + self.limit = lim + } +} + +impl<T: Buf> Buf for Take<T> { + fn remaining(&self) -> usize { + cmp::min(self.inner.remaining(), self.limit) + } + + fn bytes(&self) -> &[u8] { + let bytes = self.inner.bytes(); + &bytes[..cmp::min(bytes.len(), self.limit)] + } + + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.limit); + self.inner.advance(cnt); + self.limit -= cnt; + } +} diff --git a/third_party/rust/bytes/src/buf/ext/writer.rs b/third_party/rust/bytes/src/buf/ext/writer.rs new file mode 100644 index 0000000000..1418418e81 --- /dev/null +++ b/third_party/rust/bytes/src/buf/ext/writer.rs @@ -0,0 +1,88 @@ +use crate::BufMut; + +use std::{cmp, io}; + +/// A `BufMut` adapter which implements `io::Write` for the inner value. +/// +/// This struct is generally created by calling `writer()` on `BufMut`. See +/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more +/// details. +#[derive(Debug)] +pub struct Writer<B> { + buf: B, +} + +pub fn new<B>(buf: B) -> Writer<B> { + Writer { buf } +} + +impl<B: BufMut> Writer<B> { + /// Gets a reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::buf::BufMutExt; + /// + /// let mut buf = Vec::with_capacity(1024).writer(); + /// + /// assert_eq!(1024, buf.get_ref().capacity()); + /// ``` + pub fn get_ref(&self) -> &B { + &self.buf + } + + /// Gets a mutable reference to the underlying `BufMut`. + /// + /// It is inadvisable to directly write to the underlying `BufMut`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::buf::BufMutExt; + /// + /// let mut buf = vec![].writer(); + /// + /// buf.get_mut().reserve(1024); + /// + /// assert_eq!(1024, buf.get_ref().capacity()); + /// ``` + pub fn get_mut(&mut self) -> &mut B { + &mut self.buf + } + + /// Consumes this `Writer`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::buf::BufMutExt; + /// use std::io; + /// + /// let mut buf = vec![].writer(); + /// let mut src = &b"hello world"[..]; + /// + /// io::copy(&mut src, &mut buf).unwrap(); + /// + /// let buf = buf.into_inner(); + /// assert_eq!(*buf, b"hello world"[..]); + /// ``` + pub fn into_inner(self) -> B { + self.buf + } +} + +impl<B: BufMut + Sized> io::Write for Writer<B> { + fn write(&mut self, src: &[u8]) -> io::Result<usize> { + let n = cmp::min(self.buf.remaining_mut(), src.len()); + + self.buf.put(&src[0..n]); + Ok(n) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/third_party/rust/bytes/src/buf/iter.rs b/third_party/rust/bytes/src/buf/iter.rs new file mode 100644 index 0000000000..1af421a8d5 --- /dev/null +++ b/third_party/rust/bytes/src/buf/iter.rs @@ -0,0 +1,133 @@ +use crate::Buf; + +/// Iterator over the bytes contained by the buffer. +/// +/// This struct is created by the [`iter`] method on [`Buf`]. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use bytes::{Buf, Bytes}; +/// +/// let buf = Bytes::from(&b"abc"[..]); +/// let mut iter = buf.into_iter(); +/// +/// assert_eq!(iter.next(), Some(b'a')); +/// assert_eq!(iter.next(), Some(b'b')); +/// assert_eq!(iter.next(), Some(b'c')); +/// assert_eq!(iter.next(), None); +/// ``` +/// +/// [`iter`]: trait.Buf.html#method.iter +/// [`Buf`]: trait.Buf.html +#[derive(Debug)] +pub struct IntoIter<T> { + inner: T, +} + +impl<T> IntoIter<T> { + /// Creates an iterator over the bytes contained by the buffer. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Buf, Bytes}; + /// use bytes::buf::IntoIter; + /// + /// let buf = Bytes::from_static(b"abc"); + /// let mut iter = IntoIter::new(buf); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// assert_eq!(iter.next(), Some(b'b')); + /// assert_eq!(iter.next(), Some(b'c')); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn new(inner: T) -> IntoIter<T> { + IntoIter { inner } + } + /// Consumes this `IntoIter`, returning the underlying value. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, Bytes}; + /// + /// let buf = Bytes::from(&b"abc"[..]); + /// let mut iter = buf.into_iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// let buf = iter.into_inner(); + /// assert_eq!(2, buf.remaining()); + /// ``` + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, Bytes}; + /// + /// let buf = Bytes::from(&b"abc"[..]); + /// let mut iter = buf.into_iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// assert_eq!(2, iter.get_ref().remaining()); + /// ``` + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying `Buf`. + /// + /// It is inadvisable to directly read from the underlying `Buf`. + /// + /// # Examples + /// + /// ```rust + /// use bytes::{Buf, BytesMut}; + /// + /// let buf = BytesMut::from(&b"abc"[..]); + /// let mut iter = buf.into_iter(); + /// + /// assert_eq!(iter.next(), Some(b'a')); + /// + /// iter.get_mut().advance(1); + /// + /// assert_eq!(iter.next(), Some(b'c')); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } +} + + +impl<T: Buf> Iterator for IntoIter<T> { + type Item = u8; + + fn next(&mut self) -> Option<u8> { + if !self.inner.has_remaining() { + return None; + } + + let b = self.inner.bytes()[0]; + self.inner.advance(1); + + Some(b) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let rem = self.inner.remaining(); + (rem, Some(rem)) + } +} + +impl<T: Buf> ExactSizeIterator for IntoIter<T> { } diff --git a/third_party/rust/bytes/src/buf/mod.rs b/third_party/rust/bytes/src/buf/mod.rs new file mode 100644 index 0000000000..d4538f21ea --- /dev/null +++ b/third_party/rust/bytes/src/buf/mod.rs @@ -0,0 +1,31 @@ +//! Utilities for working with buffers. +//! +//! A buffer is any structure that contains a sequence of bytes. The bytes may +//! or may not be stored in contiguous memory. This module contains traits used +//! to abstract over buffers as well as utilities for working with buffer types. +//! +//! # `Buf`, `BufMut` +//! +//! These are the two foundational traits for abstractly working with buffers. +//! They can be thought as iterators for byte structures. They offer additional +//! performance over `Iterator` by providing an API optimized for byte slices. +//! +//! See [`Buf`] and [`BufMut`] for more details. +//! +//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) +//! [`Buf`]: trait.Buf.html +//! [`BufMut`]: trait.BufMut.html + +mod buf_impl; +mod buf_mut; +pub mod ext; +mod iter; +mod vec_deque; + +pub use self::buf_impl::Buf; +pub use self::buf_mut::BufMut; +pub use self::ext::{BufExt, BufMutExt}; +#[cfg(feature = "std")] +pub use self::buf_mut::IoSliceMut; +pub use self::iter::IntoIter; + diff --git a/third_party/rust/bytes/src/buf/vec_deque.rs b/third_party/rust/bytes/src/buf/vec_deque.rs new file mode 100644 index 0000000000..195e6897f4 --- /dev/null +++ b/third_party/rust/bytes/src/buf/vec_deque.rs @@ -0,0 +1,22 @@ +use alloc::collections::VecDeque; + +use super::Buf; + +impl Buf for VecDeque<u8> { + fn remaining(&self) -> usize { + self.len() + } + + fn bytes(&self) -> &[u8] { + let (s1, s2) = self.as_slices(); + if s1.is_empty() { + s2 + } else { + s1 + } + } + + fn advance(&mut self, cnt: usize) { + self.drain(..cnt); + } +} diff --git a/third_party/rust/bytes/src/bytes.rs b/third_party/rust/bytes/src/bytes.rs new file mode 100644 index 0000000000..e2f08b57e5 --- /dev/null +++ b/third_party/rust/bytes/src/bytes.rs @@ -0,0 +1,1004 @@ +use core::{cmp, fmt, hash, mem, ptr, slice, usize}; +use core::iter::{FromIterator}; +use core::ops::{Deref, RangeBounds}; + +use alloc::{vec::Vec, string::String, boxed::Box, borrow::Borrow}; + +use crate::Buf; +use crate::buf::IntoIter; +use crate::debug; +use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; + +/// A reference counted contiguous slice of memory. +/// +/// `Bytes` is an efficient container for storing and operating on contiguous +/// slices of memory. It is intended for use primarily in networking code, but +/// could have applications elsewhere as well. +/// +/// `Bytes` values facilitate zero-copy network programming by allowing multiple +/// `Bytes` objects to point to the same underlying memory. This is managed by +/// using a reference count to track when the memory is no longer needed and can +/// be freed. +/// +/// ``` +/// use bytes::Bytes; +/// +/// let mut mem = Bytes::from("Hello world"); +/// let a = mem.slice(0..5); +/// +/// assert_eq!(a, "Hello"); +/// +/// let b = mem.split_to(6); +/// +/// assert_eq!(mem, "world"); +/// assert_eq!(b, "Hello "); +/// ``` +/// +/// # Memory layout +/// +/// The `Bytes` struct itself is fairly small, limited to 4 `usize` fields used +/// to track information about which segment of the underlying memory the +/// `Bytes` handle has access to. +/// +/// `Bytes` keeps both a pointer to the shared `Arc` containing the full memory +/// slice and a pointer to the start of the region visible by the handle. +/// `Bytes` also tracks the length of its view into the memory. +/// +/// # Sharing +/// +/// The memory itself is reference counted, and multiple `Bytes` objects may +/// point to the same region. Each `Bytes` handle point to different sections within +/// the memory region, and `Bytes` handle may or may not have overlapping views +/// into the memory. +/// +/// +/// ```text +/// +/// Arc ptrs +---------+ +/// ________________________ / | Bytes 2 | +/// / +---------+ +/// / +-----------+ | | +/// |_________/ | Bytes 1 | | | +/// | +-----------+ | | +/// | | | ___/ data | tail +/// | data | tail |/ | +/// v v v v +/// +-----+---------------------------------+-----+ +/// | Arc | | | | | +/// +-----+---------------------------------+-----+ +/// ``` +pub struct Bytes { + ptr: *const u8, + len: usize, + // inlined "trait object" + data: AtomicPtr<()>, + vtable: &'static Vtable, +} + +pub(crate) struct Vtable { + /// fn(data, ptr, len) + pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, + /// fn(data, ptr, len) + pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), +} + +impl Bytes { + /// Creates a new empty `Bytes`. + /// + /// This will not allocate and the returned `Bytes` handle will be empty. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::new(); + /// assert_eq!(&b[..], b""); + /// ``` + #[inline] + pub fn new() -> Bytes { + Bytes::from_static(b"") + } + + /// Creates a new `Bytes` from a static slice. + /// + /// The returned `Bytes` will point directly to the static slice. There is + /// no allocating or copying. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::from_static(b"hello"); + /// assert_eq!(&b[..], b"hello"); + /// ``` + #[inline] + #[cfg(not(all(loom, test)))] + pub const fn from_static(bytes: &'static [u8]) -> Bytes { + Bytes { + ptr: bytes.as_ptr(), + len: bytes.len(), + data: AtomicPtr::new(ptr::null_mut()), + vtable: &STATIC_VTABLE, + } + } + + #[cfg(all(loom, test))] + pub fn from_static(bytes: &'static [u8]) -> Bytes { + Bytes { + ptr: bytes.as_ptr(), + len: bytes.len(), + data: AtomicPtr::new(ptr::null_mut()), + vtable: &STATIC_VTABLE, + } + } + + /// Returns the number of bytes contained in this `Bytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::from(&b"hello"[..]); + /// assert_eq!(b.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Returns true if the `Bytes` has a length of 0. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let b = Bytes::new(); + /// assert!(b.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + + ///Creates `Bytes` instance from slice, by copying it. + pub fn copy_from_slice(data: &[u8]) -> Self { + data.to_vec().into() + } + + /// Returns a slice of self for the provided range. + /// + /// This will increment the reference count for the underlying memory and + /// return a new `Bytes` handle set to the slice. + /// + /// This operation is `O(1)`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(&b"hello world"[..]); + /// let b = a.slice(2..5); + /// + /// assert_eq!(&b[..], b"llo"); + /// ``` + /// + /// # Panics + /// + /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing + /// will panic. + pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes { + use core::ops::Bound; + + let len = self.len(); + + let begin = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + 1, + Bound::Unbounded => 0, + }; + + let end = match range.end_bound() { + Bound::Included(&n) => n + 1, + Bound::Excluded(&n) => n, + Bound::Unbounded => len, + }; + + assert!(begin <= end); + assert!(end <= len); + + if end == begin { + return Bytes::new(); + } + + + let mut ret = self.clone(); + + ret.len = end - begin; + ret.ptr = unsafe { ret.ptr.offset(begin as isize) }; + + ret + } + + /// Returns a slice of self that is equivalent to the given `subset`. + /// + /// When processing a `Bytes` buffer with other tools, one often gets a + /// `&[u8]` which is in fact a slice of the `Bytes`, i.e. a subset of it. + /// This function turns that `&[u8]` into another `Bytes`, as if one had + /// called `self.slice()` with the offsets that correspond to `subset`. + /// + /// This operation is `O(1)`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let bytes = Bytes::from(&b"012345678"[..]); + /// let as_slice = bytes.as_ref(); + /// let subset = &as_slice[2..6]; + /// let subslice = bytes.slice_ref(&subset); + /// assert_eq!(&subslice[..], b"2345"); + /// ``` + /// + /// # Panics + /// + /// Requires that the given `sub` slice is in fact contained within the + /// `Bytes` buffer; otherwise this function will panic. + pub fn slice_ref(&self, subset: &[u8]) -> Bytes { + let bytes_p = self.as_ptr() as usize; + let bytes_len = self.len(); + + let sub_p = subset.as_ptr() as usize; + let sub_len = subset.len(); + + assert!( + sub_p >= bytes_p, + "subset pointer ({:p}) is smaller than self pointer ({:p})", + sub_p as *const u8, + bytes_p as *const u8, + ); + assert!( + sub_p + sub_len <= bytes_p + bytes_len, + "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", + bytes_p as *const u8, + bytes_len, + sub_p as *const u8, + sub_len, + ); + + let sub_offset = sub_p - bytes_p; + + self.slice(sub_offset..(sub_offset + sub_len)) + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned `Bytes` + /// contains elements `[at, len)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut a = Bytes::from(&b"hello world"[..]); + /// let b = a.split_off(5); + /// + /// assert_eq!(&a[..], b"hello"); + /// assert_eq!(&b[..], b" world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + #[must_use = "consider Bytes::truncate if you don't need the other half"] + pub fn split_off(&mut self, at: usize) -> Bytes { + assert!(at <= self.len()); + + if at == self.len() { + return Bytes::new(); + } + + if at == 0 { + return mem::replace(self, Bytes::new()); + } + + let mut ret = self.clone(); + + self.len = at; + + unsafe { ret.inc_start(at) }; + + ret + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned + /// `Bytes` contains elements `[0, at)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut a = Bytes::from(&b"hello world"[..]); + /// let b = a.split_to(5); + /// + /// assert_eq!(&a[..], b" world"); + /// assert_eq!(&b[..], b"hello"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + #[must_use = "consider Bytes::advance if you don't need the other half"] + pub fn split_to(&mut self, at: usize) -> Bytes { + assert!(at <= self.len()); + + if at == self.len() { + return mem::replace(self, Bytes::new()); + } + + if at == 0 { + return Bytes::new(); + } + + + let mut ret = self.clone(); + + unsafe { self.inc_start(at) }; + + ret.len = at; + ret + } + + /// Shortens the buffer, keeping the first `len` bytes and dropping the + /// rest. + /// + /// If `len` is greater than the buffer's current length, this has no + /// effect. + /// + /// The [`split_off`] method can emulate `truncate`, but this causes the + /// excess bytes to be returned instead of dropped. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut buf = Bytes::from(&b"hello world"[..]); + /// buf.truncate(5); + /// assert_eq!(buf, b"hello"[..]); + /// ``` + /// + /// [`split_off`]: #method.split_off + #[inline] + pub fn truncate(&mut self, len: usize) { + if len < self.len { + self.len = len; + } + } + + /// Clears the buffer, removing all data. + /// + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let mut buf = Bytes::from(&b"hello world"[..]); + /// buf.clear(); + /// assert!(buf.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + self.truncate(0); + } + + #[inline] + pub(crate) unsafe fn with_vtable(ptr: *const u8, len: usize, data: AtomicPtr<()>, vtable: &'static Vtable) -> Bytes { + Bytes { + ptr, + len, + data, + vtable, + } + } + + // private + + #[inline] + fn as_slice(&self) -> &[u8] { + unsafe { + slice::from_raw_parts(self.ptr, self.len) + } + } + + #[inline] + unsafe fn inc_start(&mut self, by: usize) { + // should already be asserted, but debug assert for tests + debug_assert!(self.len >= by); + self.len -= by; + self.ptr = self.ptr.offset(by as isize); + } +} + +// Vtable must enforce this behavior +unsafe impl Send for Bytes {} +unsafe impl Sync for Bytes {} + +impl Drop for Bytes { + #[inline] + fn drop(&mut self) { + unsafe { + (self.vtable.drop)(&mut self.data, self.ptr, self.len) + } + } +} + +impl Clone for Bytes { + #[inline] + fn clone(&self) -> Bytes { + unsafe { + (self.vtable.clone)(&self.data, self.ptr, self.len) + } + } +} + +impl fmt::Debug for Bytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&debug::BsDebug(&self.as_slice()), f) + } +} + +impl Buf for Bytes { + #[inline] + fn remaining(&self) -> usize { + self.len() + } + + #[inline] + fn bytes(&self) -> &[u8] { + self.as_slice() + } + + #[inline] + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.len(), "cannot advance past `remaining`"); + unsafe { + self.inc_start(cnt); + } + } + + fn to_bytes(&mut self) -> crate::Bytes { + core::mem::replace(self, Bytes::new()) + } +} + +impl Deref for Bytes { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &[u8] { + self.as_slice() + } +} + +impl AsRef<[u8]> for Bytes { + #[inline] + fn as_ref(&self) -> &[u8] { + self.as_slice() + } +} + +impl hash::Hash for Bytes { + fn hash<H>(&self, state: &mut H) where H: hash::Hasher { + self.as_slice().hash(state); + } +} + +impl Borrow<[u8]> for Bytes { + fn borrow(&self) -> &[u8] { + self.as_slice() + } +} + +impl IntoIterator for Bytes { + type Item = u8; + type IntoIter = IntoIter<Bytes>; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self) + } +} + +impl<'a> IntoIterator for &'a Bytes { + type Item = &'a u8; + type IntoIter = core::slice::Iter<'a, u8>; + + fn into_iter(self) -> Self::IntoIter { + self.as_slice().into_iter() + } +} + +impl FromIterator<u8> for Bytes { + fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { + Vec::from_iter(into_iter).into() + } +} + +// impl Eq + +impl PartialEq for Bytes { + fn eq(&self, other: &Bytes) -> bool { + self.as_slice() == other.as_slice() + } +} + +impl PartialOrd for Bytes { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(other.as_slice()) + } +} + +impl Ord for Bytes { + fn cmp(&self, other: &Bytes) -> cmp::Ordering { + self.as_slice().cmp(other.as_slice()) + } +} + +impl Eq for Bytes {} + +impl PartialEq<[u8]> for Bytes { + fn eq(&self, other: &[u8]) -> bool { + self.as_slice() == other + } +} + +impl PartialOrd<[u8]> for Bytes { + fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(other) + } +} + +impl PartialEq<Bytes> for [u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for [u8] { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<str> for Bytes { + fn eq(&self, other: &str) -> bool { + self.as_slice() == other.as_bytes() + } +} + +impl PartialOrd<str> for Bytes { + fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<Bytes> for str { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for str { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Vec<u8>> for Bytes { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<Vec<u8>> for Bytes { + fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(&other[..]) + } +} + +impl PartialEq<Bytes> for Vec<u8> { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for Vec<u8> { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<String> for Bytes { + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<String> for Bytes { + fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<Bytes> for String { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for String { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Bytes> for &[u8] { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for &[u8] { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Bytes> for &str { + fn eq(&self, other: &Bytes) -> bool { + *other == *self + } +} + +impl PartialOrd<Bytes> for &str { + fn partial_cmp(&self, other: &Bytes) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes + where Bytes: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes + where Bytes: PartialOrd<T> +{ + fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { + self.partial_cmp(&**other) + } +} + +// impl From + +impl Default for Bytes { + #[inline] + fn default() -> Bytes { + Bytes::new() + } +} + +impl From<&'static [u8]> for Bytes { + fn from(slice: &'static [u8]) -> Bytes { + Bytes::from_static(slice) + } +} + +impl From<&'static str> for Bytes { + fn from(slice: &'static str) -> Bytes { + Bytes::from_static(slice.as_bytes()) + } +} + +impl From<Vec<u8>> for Bytes { + fn from(vec: Vec<u8>) -> Bytes { + // into_boxed_slice doesn't return a heap allocation for empty vectors, + // so the pointer isn't aligned enough for the KIND_VEC stashing to + // work. + if vec.is_empty() { + return Bytes::new(); + } + + let slice = vec.into_boxed_slice(); + let len = slice.len(); + let ptr = slice.as_ptr(); + + assert!( + ptr as usize & KIND_VEC == 0, + "Vec pointer should not have LSB set: {:p}", + ptr, + ); + drop(Box::into_raw(slice)); + + let data = ptr as usize | KIND_VEC; + Bytes { + ptr, + len, + data: AtomicPtr::new(data as *mut _), + vtable: &SHARED_VTABLE, + } + } +} + +impl From<String> for Bytes { + fn from(s: String) -> Bytes { + Bytes::from(s.into_bytes()) + } +} + +// ===== impl Vtable ===== + +impl fmt::Debug for Vtable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Vtable") + .field("clone", &(self.clone as *const ())) + .field("drop", &(self.drop as *const ())) + .finish() + } +} + +// ===== impl StaticVtable ===== + +const STATIC_VTABLE: Vtable = Vtable { + clone: static_clone, + drop: static_drop, +}; + +unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let slice = slice::from_raw_parts(ptr, len); + Bytes::from_static(slice) +} + +unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { + // nothing to drop for &'static [u8] +} + +// ===== impl SharedVtable ===== + +struct Shared { + // holds vec for drop, but otherwise doesnt access it + _vec: Vec<u8>, + ref_cnt: AtomicUsize, +} + +static SHARED_VTABLE: Vtable = Vtable { + clone: shared_clone, + drop: shared_drop, +}; + +const KIND_ARC: usize = 0b0; +const KIND_VEC: usize = 0b1; +const KIND_MASK: usize = 0b1; + +unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shallow_clone_arc(shared as _, ptr, len) + } else { + debug_assert_eq!(kind, KIND_VEC); + shallow_clone_vec(data, shared, ptr, len) + } +} + +unsafe fn shared_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { + let shared = *data.get_mut(); + let kind = shared as usize & KIND_MASK; + + + if kind == KIND_ARC { + release_shared(shared as *mut Shared); + } else { + debug_assert_eq!(kind, KIND_VEC); + + drop(rebuild_vec(shared, ptr, len)); + } +} + +unsafe fn rebuild_vec(shared: *const (), offset: *const u8, len: usize) -> Vec<u8> { + debug_assert!( + shared as usize & KIND_MASK == KIND_VEC, + "rebuild_vec should have beeen called with KIND_VEC", + ); + debug_assert!( + shared as usize & !KIND_MASK != 0, + "rebuild_vec should be called with non-null pointer: {:p}", + shared, + ); + + let buf = (shared as usize & !KIND_MASK) as *mut u8; + let cap = (offset as usize - buf as usize) + len; + Vec::from_raw_parts(buf, cap, cap) +} + +unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes { + let old_size = (*shared).ref_cnt.fetch_add(1, Ordering::Relaxed); + + if old_size > usize::MAX >> 1 { + crate::abort(); + } + + Bytes { + ptr, + len, + data: AtomicPtr::new(shared as _), + vtable: &SHARED_VTABLE, + } +} + +#[cold] +unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), offset: *const u8, len: usize) -> Bytes { + // If the buffer is still tracked in a `Vec<u8>`. It is time to + // promote the vec to an `Arc`. This could potentially be called + // concurrently, so some care must be taken. + + debug_assert_eq!(ptr as usize & KIND_MASK, KIND_VEC); + + // First, allocate a new `Shared` instance containing the + // `Vec` fields. It's important to note that `ptr`, `len`, + // and `cap` cannot be mutated without having `&mut self`. + // This means that these fields will not be concurrently + // updated and since the buffer hasn't been promoted to an + // `Arc`, those three fields still are the components of the + // vector. + let vec = rebuild_vec(ptr as *const (), offset, len); + let shared = Box::new(Shared { + _vec: vec, + // Initialize refcount to 2. One for this reference, and one + // for the new clone that will be returned from + // `shallow_clone`. + ref_cnt: AtomicUsize::new(2), + }); + + let shared = Box::into_raw(shared); + + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!(0 == (shared as usize & KIND_MASK)); + + // Try compare & swapping the pointer into the `arc` field. + // `Release` is used synchronize with other threads that + // will load the `arc` field. + // + // If the `compare_and_swap` fails, then the thread lost the + // race to promote the buffer to shared. The `Acquire` + // ordering will synchronize with the `compare_and_swap` + // that happened in the other thread and the `Shared` + // pointed to by `actual` will be visible. + let actual = atom.compare_and_swap(ptr as _, shared as _, Ordering::AcqRel); + + if actual as usize == ptr as usize { + // The upgrade was successful, the new handle can be + // returned. + return Bytes { + ptr: offset, + len, + data: AtomicPtr::new(shared as _), + vtable: &SHARED_VTABLE, + }; + } + + // The upgrade failed, a concurrent clone happened. Release + // the allocation that was made in this thread, it will not + // be needed. + let shared = Box::from_raw(shared); + mem::forget(*shared); + + // Buffer already promoted to shared storage, so increment ref + // count. + shallow_clone_arc(actual as _, offset, len) +} + +unsafe fn release_shared(ptr: *mut Shared) { + // `Shared` storage... follow the drop steps from Arc. + if (*ptr).ref_cnt.fetch_sub(1, Ordering::Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + atomic::fence(Ordering::Acquire); + + // Drop the data + Box::from_raw(ptr); +} + +// compile-fails + +/// ```compile_fail +/// use bytes::Bytes; +/// #[deny(unused_must_use)] +/// { +/// let mut b1 = Bytes::from("hello world"); +/// b1.split_to(6); +/// } +/// ``` +fn _split_to_must_use() {} + +/// ```compile_fail +/// use bytes::Bytes; +/// #[deny(unused_must_use)] +/// { +/// let mut b1 = Bytes::from("hello world"); +/// b1.split_off(6); +/// } +/// ``` +fn _split_off_must_use() {} + +// fuzz tests +#[cfg(all(test, loom))] +mod fuzz { + use std::sync::Arc; + use loom::thread; + + use super::Bytes; + #[test] + fn bytes_cloning_vec() { + loom::model(|| { + let a = Bytes::from(b"abcdefgh".to_vec()); + let addr = a.as_ptr() as usize; + + // test the Bytes::clone is Sync by putting it in an Arc + let a1 = Arc::new(a); + let a2 = a1.clone(); + + let t1 = thread::spawn(move || { + let b: Bytes = (*a1).clone(); + assert_eq!(b.as_ptr() as usize, addr); + }); + + let t2 = thread::spawn(move || { + let b: Bytes = (*a2).clone(); + assert_eq!(b.as_ptr() as usize, addr); + }); + + t1.join().unwrap(); + t2.join().unwrap(); + }); + } +} diff --git a/third_party/rust/bytes/src/bytes_mut.rs b/third_party/rust/bytes/src/bytes_mut.rs new file mode 100644 index 0000000000..7a15444343 --- /dev/null +++ b/third_party/rust/bytes/src/bytes_mut.rs @@ -0,0 +1,1533 @@ +use core::{cmp, fmt, hash, isize, slice, usize}; +use core::mem::{self, ManuallyDrop}; +use core::ops::{Deref, DerefMut}; +use core::ptr::{self, NonNull}; +use core::iter::{FromIterator, Iterator}; + +use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}}; + +use crate::{Bytes, Buf, BufMut}; +use crate::bytes::Vtable; +use crate::buf::IntoIter; +use crate::debug; +use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; + +/// A unique reference to a contiguous slice of memory. +/// +/// `BytesMut` represents a unique view into a potentially shared memory region. +/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to +/// mutate the memory. It is similar to a `Vec<u8>` but with less copies and +/// allocations. +/// +/// # Growth +/// +/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as +/// necessary. However, explicitly reserving the required space up-front before +/// a series of inserts will be more efficient. +/// +/// # Examples +/// +/// ``` +/// use bytes::{BytesMut, BufMut}; +/// +/// let mut buf = BytesMut::with_capacity(64); +/// +/// buf.put_u8(b'h'); +/// buf.put_u8(b'e'); +/// buf.put(&b"llo"[..]); +/// +/// assert_eq!(&buf[..], b"hello"); +/// +/// // Freeze the buffer so that it can be shared +/// let a = buf.freeze(); +/// +/// // This does not allocate, instead `b` points to the same memory. +/// let b = a.clone(); +/// +/// assert_eq!(&a[..], b"hello"); +/// assert_eq!(&b[..], b"hello"); +/// ``` +pub struct BytesMut { + ptr: NonNull<u8>, + len: usize, + cap: usize, + data: *mut Shared, +} + +// Thread-safe reference-counted container for the shared storage. This mostly +// the same as `core::sync::Arc` but without the weak counter. The ref counting +// fns are based on the ones found in `std`. +// +// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends +// up making the overall code simpler and easier to reason about. This is due to +// some of the logic around setting `Inner::arc` and other ways the `arc` field +// is used. Using `Arc` ended up requiring a number of funky transmutes and +// other shenanigans to make it work. +struct Shared { + vec: Vec<u8>, + original_capacity_repr: usize, + ref_count: AtomicUsize, +} + +// Buffer storage strategy flags. +const KIND_ARC: usize = 0b0; +const KIND_VEC: usize = 0b1; +const KIND_MASK: usize = 0b1; + +// The max original capacity value. Any `Bytes` allocated with a greater initial +// capacity will default to this. +const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17; +// The original capacity algorithm will not take effect unless the originally +// allocated capacity was at least 1kb in size. +const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; +// The original capacity is stored in powers of 2 starting at 1kb to a max of +// 64kb. Representing it as such requires only 3 bits of storage. +const ORIGINAL_CAPACITY_MASK: usize = 0b11100; +const ORIGINAL_CAPACITY_OFFSET: usize = 2; + +// When the storage is in the `Vec` representation, the pointer can be advanced +// at most this value. This is due to the amount of storage available to track +// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY +// bits. +const VEC_POS_OFFSET: usize = 5; +const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; +const NOT_VEC_POS_MASK: usize = 0b11111; + +#[cfg(target_pointer_width = "64")] +const PTR_WIDTH: usize = 64; +#[cfg(target_pointer_width = "32")] +const PTR_WIDTH: usize = 32; + +/* + * + * ===== BytesMut ===== + * + */ + +impl BytesMut { + /// Creates a new `BytesMut` with the specified capacity. + /// + /// The returned `BytesMut` will be able to hold at least `capacity` bytes + /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`, + /// then `BytesMut` will not allocate. + /// + /// It is important to note that this function does not specify the length + /// of the returned `BytesMut`, but only the capacity. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut bytes = BytesMut::with_capacity(64); + /// + /// // `bytes` contains no data, even though there is capacity + /// assert_eq!(bytes.len(), 0); + /// + /// bytes.put(&b"hello world"[..]); + /// + /// assert_eq!(&bytes[..], b"hello world"); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> BytesMut { + BytesMut::from_vec(Vec::with_capacity(capacity)) + } + + /// Creates a new `BytesMut` with default capacity. + /// + /// Resulting object has length 0 and unspecified capacity. + /// This function does not allocate. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut bytes = BytesMut::new(); + /// + /// assert_eq!(0, bytes.len()); + /// + /// bytes.reserve(2); + /// bytes.put_slice(b"xy"); + /// + /// assert_eq!(&b"xy"[..], &bytes[..]); + /// ``` + #[inline] + pub fn new() -> BytesMut { + BytesMut::with_capacity(0) + } + + /// Returns the number of bytes contained in this `BytesMut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::from(&b"hello"[..]); + /// assert_eq!(b.len(), 5); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Returns true if the `BytesMut` has a length of 0. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::with_capacity(64); + /// assert!(b.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + /// Returns the number of bytes the `BytesMut` can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let b = BytesMut::with_capacity(64); + /// assert_eq!(b.capacity(), 64); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.cap + } + + /// Converts `self` into an immutable `Bytes`. + /// + /// The conversion is zero cost and is used to indicate that the slice + /// referenced by the handle will no longer be mutated. Once the conversion + /// is done, the handle can be cloned and shared across threads. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// use std::thread; + /// + /// let mut b = BytesMut::with_capacity(64); + /// b.put(&b"hello world"[..]); + /// let b1 = b.freeze(); + /// let b2 = b1.clone(); + /// + /// let th = thread::spawn(move || { + /// assert_eq!(&b1[..], b"hello world"); + /// }); + /// + /// assert_eq!(&b2[..], b"hello world"); + /// th.join().unwrap(); + /// ``` + #[inline] + pub fn freeze(mut self) -> Bytes { + if self.kind() == KIND_VEC { + // Just re-use `Bytes` internal Vec vtable + unsafe { + let (off, _) = self.get_vec_pos(); + let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); + mem::forget(self); + vec.into() + } + } else { + debug_assert_eq!(self.kind(), KIND_ARC); + + let ptr = self.ptr.as_ptr(); + let len = self.len; + let data = AtomicPtr::new(self.data as _); + mem::forget(self); + unsafe { + Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) + } + } + } + + /// Splits the bytes into two at the given index. + /// + /// Afterwards `self` contains elements `[0, at)`, and the returned + /// `BytesMut` contains elements `[at, capacity)`. + /// + /// This is an `O(1)` operation that just increases the reference count + /// and sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut a = BytesMut::from(&b"hello world"[..]); + /// let mut b = a.split_off(5); + /// + /// a[0] = b'j'; + /// b[0] = b'!'; + /// + /// assert_eq!(&a[..], b"jello"); + /// assert_eq!(&b[..], b"!world"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > capacity`. + #[must_use = "consider BytesMut::truncate if you don't need the other half"] + pub fn split_off(&mut self, at: usize) -> BytesMut { + assert!(at <= self.capacity()); + unsafe { + let mut other = self.shallow_clone(); + other.set_start(at); + self.set_end(at); + other + } + } + + /// Removes the bytes from the current view, returning them in a new + /// `BytesMut` handle. + /// + /// Afterwards, `self` will be empty, but will retain any additional + /// capacity that it had before the operation. This is identical to + /// `self.split_to(self.len())`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut buf = BytesMut::with_capacity(1024); + /// buf.put(&b"hello world"[..]); + /// + /// let other = buf.split(); + /// + /// assert!(buf.is_empty()); + /// assert_eq!(1013, buf.capacity()); + /// + /// assert_eq!(other, b"hello world"[..]); + /// ``` + #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"] + pub fn split(&mut self) -> BytesMut { + let len = self.len(); + self.split_to(len) + } + + /// Splits the buffer into two at the given index. + /// + /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut` + /// contains elements `[0, at)`. + /// + /// This is an `O(1)` operation that just increases the reference count and + /// sets a few indices. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut a = BytesMut::from(&b"hello world"[..]); + /// let mut b = a.split_to(5); + /// + /// a[0] = b'!'; + /// b[0] = b'j'; + /// + /// assert_eq!(&a[..], b"!world"); + /// assert_eq!(&b[..], b"jello"); + /// ``` + /// + /// # Panics + /// + /// Panics if `at > len`. + #[must_use = "consider BytesMut::advance if you don't need the other half"] + pub fn split_to(&mut self, at: usize) -> BytesMut { + assert!(at <= self.len()); + + unsafe { + let mut other = self.shallow_clone(); + other.set_end(at); + self.set_start(at); + other + } + } + + /// Shortens the buffer, keeping the first `len` bytes and dropping the + /// rest. + /// + /// If `len` is greater than the buffer's current length, this has no + /// effect. + /// + /// The [`split_off`] method can emulate `truncate`, but this causes the + /// excess bytes to be returned instead of dropped. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello world"[..]); + /// buf.truncate(5); + /// assert_eq!(buf, b"hello"[..]); + /// ``` + /// + /// [`split_off`]: #method.split_off + pub fn truncate(&mut self, len: usize) { + if len <= self.len() { + unsafe { self.set_len(len); } + } + } + + /// Clears the buffer, removing all data. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello world"[..]); + /// buf.clear(); + /// assert!(buf.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.truncate(0); + } + + /// Resizes the buffer so that `len` is equal to `new_len`. + /// + /// If `new_len` is greater than `len`, the buffer is extended by the + /// difference with each additional byte set to `value`. If `new_len` is + /// less than `len`, the buffer is simply truncated. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::new(); + /// + /// buf.resize(3, 0x1); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]); + /// + /// buf.resize(2, 0x2); + /// assert_eq!(&buf[..], &[0x1, 0x1]); + /// + /// buf.resize(4, 0x3); + /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); + /// ``` + pub fn resize(&mut self, new_len: usize, value: u8) { + let len = self.len(); + if new_len > len { + let additional = new_len - len; + self.reserve(additional); + unsafe { + let dst = self.bytes_mut().as_mut_ptr(); + ptr::write_bytes(dst, value, additional); + self.set_len(new_len); + } + } else { + self.truncate(new_len); + } + } + + /// Sets the length of the buffer. + /// + /// This will explicitly set the size of the buffer without actually + /// modifying the data, so it is up to the caller to ensure that the data + /// has been initialized. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut b = BytesMut::from(&b"hello world"[..]); + /// + /// unsafe { + /// b.set_len(5); + /// } + /// + /// assert_eq!(&b[..], b"hello"); + /// + /// unsafe { + /// b.set_len(11); + /// } + /// + /// assert_eq!(&b[..], b"hello world"); + /// ``` + pub unsafe fn set_len(&mut self, len: usize) { + debug_assert!(len <= self.cap); + self.len = len; + } + + /// Reserves capacity for at least `additional` more bytes to be inserted + /// into the given `BytesMut`. + /// + /// More than `additional` bytes may be reserved in order to avoid frequent + /// reallocations. A call to `reserve` may result in an allocation. + /// + /// Before allocating new buffer space, the function will attempt to reclaim + /// space in the existing buffer. If the current handle references a small + /// view in the original buffer and all other handles have been dropped, + /// and the requested capacity is less than or equal to the existing + /// buffer's capacity, then the current view will be copied to the front of + /// the buffer and the handle will take ownership of the full buffer. + /// + /// # Examples + /// + /// In the following example, a new buffer is allocated. + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello"[..]); + /// buf.reserve(64); + /// assert!(buf.capacity() >= 69); + /// ``` + /// + /// In the following example, the existing buffer is reclaimed. + /// + /// ``` + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut buf = BytesMut::with_capacity(128); + /// buf.put(&[0; 64][..]); + /// + /// let ptr = buf.as_ptr(); + /// let other = buf.split(); + /// + /// assert!(buf.is_empty()); + /// assert_eq!(buf.capacity(), 64); + /// + /// drop(other); + /// buf.reserve(128); + /// + /// assert_eq!(buf.capacity(), 128); + /// assert_eq!(buf.as_ptr(), ptr); + /// ``` + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + #[inline] + pub fn reserve(&mut self, additional: usize) { + let len = self.len(); + let rem = self.capacity() - len; + + if additional <= rem { + // The handle can already store at least `additional` more bytes, so + // there is no further work needed to be done. + return; + } + + self.reserve_inner(additional); + } + + // In separate function to allow the short-circuits in `reserve` to + // be inline-able. Significant helps performance. + fn reserve_inner(&mut self, additional: usize) { + let len = self.len(); + let kind = self.kind(); + + if kind == KIND_VEC { + // If there's enough free space before the start of the buffer, then + // just copy the data backwards and reuse the already-allocated + // space. + // + // Otherwise, since backed by a vector, use `Vec::reserve` + unsafe { + let (off, prev) = self.get_vec_pos(); + + // Only reuse space if we stand to gain at least capacity/2 + // bytes of space back + if off >= additional && off >= (self.cap / 2) { + // There's space - reuse it + // + // Just move the pointer back to the start after copying + // data back. + let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); + ptr::copy(self.ptr.as_ptr(), base_ptr, self.len); + self.ptr = vptr(base_ptr); + self.set_vec_pos(0, prev); + + // Length stays constant, but since we moved backwards we + // can gain capacity back. + self.cap += off; + } else { + // No space - allocate more + let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); + v.reserve(additional); + + // Update the info + self.ptr = vptr(v.as_mut_ptr().offset(off as isize)); + self.len = v.len() - off; + self.cap = v.capacity() - off; + } + + return; + } + } + + debug_assert_eq!(kind, KIND_ARC); + let shared: *mut Shared = self.data as _; + + + // Reserving involves abandoning the currently shared buffer and + // allocating a new vector with the requested capacity. + // + // Compute the new capacity + let mut new_cap = len.checked_add(additional).expect("overflow"); + + let original_capacity; + let original_capacity_repr; + + unsafe { + original_capacity_repr = (*shared).original_capacity_repr; + original_capacity = original_capacity_from_repr(original_capacity_repr); + + // First, try to reclaim the buffer. This is possible if the current + // handle is the only outstanding handle pointing to the buffer. + if (*shared).is_unique() { + // This is the only handle to the buffer. It can be reclaimed. + // However, before doing the work of copying data, check to make + // sure that the vector has enough capacity. + let v = &mut (*shared).vec; + + if v.capacity() >= new_cap { + // The capacity is sufficient, reclaim the buffer + let ptr = v.as_mut_ptr(); + + ptr::copy(self.ptr.as_ptr(), ptr, len); + + self.ptr = vptr(ptr); + self.cap = v.capacity(); + + return; + } + + // The vector capacity is not sufficient. The reserve request is + // asking for more than the initial buffer capacity. Allocate more + // than requested if `new_cap` is not much bigger than the current + // capacity. + // + // There are some situations, using `reserve_exact` that the + // buffer capacity could be below `original_capacity`, so do a + // check. + let double = v.capacity().checked_shl(1).unwrap_or(new_cap); + + new_cap = cmp::max( + cmp::max(double, new_cap), + original_capacity); + } else { + new_cap = cmp::max(new_cap, original_capacity); + } + } + + // Create a new vector to store the data + let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); + + // Copy the bytes + v.extend_from_slice(self.as_ref()); + + // Release the shared handle. This must be done *after* the bytes are + // copied. + unsafe { release_shared(shared) }; + + // Update self + let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; + self.data = data as _; + self.ptr = vptr(v.as_mut_ptr()); + self.len = v.len(); + self.cap = v.capacity(); + } + /// Appends given bytes to this object. + /// + /// If this `BytesMut` object has not enough capacity, it is resized first. + /// So unlike `put_slice` operation, `extend_from_slice` does not panic. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::with_capacity(0); + /// buf.extend_from_slice(b"aaabbb"); + /// buf.extend_from_slice(b"cccddd"); + /// + /// assert_eq!(b"aaabbbcccddd", &buf[..]); + /// ``` + pub fn extend_from_slice(&mut self, extend: &[u8]) { + let cnt = extend.len(); + self.reserve(cnt); + + unsafe { + let dst = self.maybe_uninit_bytes(); + // Reserved above + debug_assert!(dst.len() >= cnt); + + ptr::copy_nonoverlapping( + extend.as_ptr(), + dst.as_mut_ptr() as *mut u8, + cnt); + + } + + unsafe { self.advance_mut(cnt); } + } + + /// Combine splitted BytesMut objects back as contiguous. + /// + /// If `BytesMut` objects were not contiguous originally, they will be extended. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::with_capacity(64); + /// buf.extend_from_slice(b"aaabbbcccddd"); + /// + /// let splitted = buf.split_off(6); + /// assert_eq!(b"aaabbb", &buf[..]); + /// assert_eq!(b"cccddd", &splitted[..]); + /// + /// buf.unsplit(splitted); + /// assert_eq!(b"aaabbbcccddd", &buf[..]); + /// ``` + pub fn unsplit(&mut self, other: BytesMut) { + if self.is_empty() { + *self = other; + return; + } + + if let Err(other) = self.try_unsplit(other) { + self.extend_from_slice(other.as_ref()); + } + } + + // private + + // For now, use a `Vec` to manage the memory for us, but we may want to + // change that in the future to some alternate allocator strategy. + // + // Thus, we don't expose an easy way to construct from a `Vec` since an + // internal change could make a simple pattern (`BytesMut::from(vec)`) + // suddenly a lot more expensive. + #[inline] + pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut { + let ptr = vptr(vec.as_mut_ptr()); + let len = vec.len(); + let cap = vec.capacity(); + mem::forget(vec); + + let original_capacity_repr = original_capacity_to_repr(cap); + let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; + + BytesMut { + ptr, + len, + cap, + data: data as *mut _, + } + } + + #[inline] + fn as_slice(&self) -> &[u8] { + unsafe { + slice::from_raw_parts(self.ptr.as_ptr(), self.len) + } + } + + #[inline] + fn as_slice_mut(&mut self) -> &mut [u8] { + unsafe { + slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) + } + } + + unsafe fn set_start(&mut self, start: usize) { + // Setting the start to 0 is a no-op, so return early if this is the + // case. + if start == 0 { + return; + } + + debug_assert!(start <= self.cap); + + let kind = self.kind(); + + if kind == KIND_VEC { + // Setting the start when in vec representation is a little more + // complicated. First, we have to track how far ahead the + // "start" of the byte buffer from the beginning of the vec. We + // also have to ensure that we don't exceed the maximum shift. + let (mut pos, prev) = self.get_vec_pos(); + pos += start; + + if pos <= MAX_VEC_POS { + self.set_vec_pos(pos, prev); + } else { + // The repr must be upgraded to ARC. This will never happen + // on 64 bit systems and will only happen on 32 bit systems + // when shifting past 134,217,727 bytes. As such, we don't + // worry too much about performance here. + self.promote_to_shared(/*ref_count = */1); + } + } + + // Updating the start of the view is setting `ptr` to point to the + // new start and updating the `len` field to reflect the new length + // of the view. + self.ptr = vptr(self.ptr.as_ptr().offset(start as isize)); + + if self.len >= start { + self.len -= start; + } else { + self.len = 0; + } + + self.cap -= start; + } + + unsafe fn set_end(&mut self, end: usize) { + debug_assert_eq!(self.kind(), KIND_ARC); + assert!(end <= self.cap); + + self.cap = end; + self.len = cmp::min(self.len, end); + } + + fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { + if other.is_empty() { + return Ok(()); + } + + let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) }; + if ptr == other.ptr.as_ptr() && + self.kind() == KIND_ARC && + other.kind() == KIND_ARC && + self.data == other.data + { + // Contiguous blocks, just combine directly + self.len += other.len; + self.cap += other.cap; + Ok(()) + } else { + Err(other) + } + } + + #[inline] + fn kind(&self) -> usize { + self.data as usize & KIND_MASK + } + + unsafe fn promote_to_shared(&mut self, ref_cnt: usize) { + debug_assert_eq!(self.kind(), KIND_VEC); + debug_assert!(ref_cnt == 1 || ref_cnt == 2); + + let original_capacity_repr = + (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET; + + // The vec offset cannot be concurrently mutated, so there + // should be no danger reading it. + let off = (self.data as usize) >> VEC_POS_OFFSET; + + // First, allocate a new `Shared` instance containing the + // `Vec` fields. It's important to note that `ptr`, `len`, + // and `cap` cannot be mutated without having `&mut self`. + // This means that these fields will not be concurrently + // updated and since the buffer hasn't been promoted to an + // `Arc`, those three fields still are the components of the + // vector. + let shared = Box::new(Shared { + vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off), + original_capacity_repr, + ref_count: AtomicUsize::new(ref_cnt), + }); + + let shared = Box::into_raw(shared); + + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); + + self.data = shared as _; + } + + /// Makes an exact shallow clone of `self`. + /// + /// The kind of `self` doesn't matter, but this is unsafe + /// because the clone will have the same offsets. You must + /// be sure the returned value to the user doesn't allow + /// two views into the same range. + #[inline] + unsafe fn shallow_clone(&mut self) -> BytesMut { + if self.kind() == KIND_ARC { + increment_shared(self.data); + ptr::read(self) + } else { + self.promote_to_shared(/*ref_count = */2); + ptr::read(self) + } + } + + #[inline] + unsafe fn get_vec_pos(&mut self) -> (usize, usize) { + debug_assert_eq!(self.kind(), KIND_VEC); + + let prev = self.data as usize; + (prev >> VEC_POS_OFFSET, prev) + } + + #[inline] + unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) { + debug_assert_eq!(self.kind(), KIND_VEC); + debug_assert!(pos <= MAX_VEC_POS); + + self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _; + } + + #[inline] + fn maybe_uninit_bytes(&mut self) -> &mut [mem::MaybeUninit<u8>] { + unsafe { + let ptr = self.ptr.as_ptr().offset(self.len as isize); + let len = self.cap - self.len; + + slice::from_raw_parts_mut(ptr as *mut mem::MaybeUninit<u8>, len) + } + } +} + +impl Drop for BytesMut { + fn drop(&mut self) { + let kind = self.kind(); + + if kind == KIND_VEC { + unsafe { + let (off, _) = self.get_vec_pos(); + + // Vector storage, free the vector + let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); + } + } else if kind == KIND_ARC { + unsafe { release_shared(self.data as _) }; + } + } +} + +impl Buf for BytesMut { + #[inline] + fn remaining(&self) -> usize { + self.len() + } + + #[inline] + fn bytes(&self) -> &[u8] { + self.as_slice() + } + + #[inline] + fn advance(&mut self, cnt: usize) { + assert!(cnt <= self.remaining(), "cannot advance past `remaining`"); + unsafe { self.set_start(cnt); } + } + + fn to_bytes(&mut self) -> crate::Bytes { + self.split().freeze() + } +} + +impl BufMut for BytesMut { + #[inline] + fn remaining_mut(&self) -> usize { + usize::MAX - self.len() + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + let new_len = self.len() + cnt; + assert!(new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap); + self.len = new_len; + } + + #[inline] + fn bytes_mut(&mut self) -> &mut [mem::MaybeUninit<u8>] { + if self.capacity() == self.len() { + self.reserve(64); + } + self.maybe_uninit_bytes() + } + + // Specialize these methods so they can skip checking `remaining_mut` + // and `advance_mut`. + + fn put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized { + while src.has_remaining() { + let s = src.bytes(); + let l = s.len(); + self.extend_from_slice(s); + src.advance(l); + } + } + + fn put_slice(&mut self, src: &[u8]) { + self.extend_from_slice(src); + } +} + +impl AsRef<[u8]> for BytesMut { + #[inline] + fn as_ref(&self) -> &[u8] { + self.as_slice() + } +} + +impl Deref for BytesMut { + type Target = [u8]; + + #[inline] + fn deref(&self) -> &[u8] { + self.as_ref() + } +} + +impl AsMut<[u8]> for BytesMut { + fn as_mut(&mut self) -> &mut [u8] { + self.as_slice_mut() + } +} + +impl DerefMut for BytesMut { + #[inline] + fn deref_mut(&mut self) -> &mut [u8] { + self.as_mut() + } +} + +impl<'a> From<&'a [u8]> for BytesMut { + fn from(src: &'a [u8]) -> BytesMut { + BytesMut::from_vec(src.to_vec()) + } +} + +impl<'a> From<&'a str> for BytesMut { + fn from(src: &'a str) -> BytesMut { + BytesMut::from(src.as_bytes()) + } +} + +impl PartialEq for BytesMut { + fn eq(&self, other: &BytesMut) -> bool { + self.as_slice() == other.as_slice() + } +} + +impl PartialOrd for BytesMut { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + self.as_slice().partial_cmp(other.as_slice()) + } +} + +impl Ord for BytesMut { + fn cmp(&self, other: &BytesMut) -> cmp::Ordering { + self.as_slice().cmp(other.as_slice()) + } +} + +impl Eq for BytesMut { +} + +impl Default for BytesMut { + #[inline] + fn default() -> BytesMut { + BytesMut::new() + } +} + +impl fmt::Debug for BytesMut { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&debug::BsDebug(&self.as_slice()), fmt) + } +} + +impl hash::Hash for BytesMut { + fn hash<H>(&self, state: &mut H) where H: hash::Hasher { + let s: &[u8] = self.as_ref(); + s.hash(state); + } +} + +impl Borrow<[u8]> for BytesMut { + fn borrow(&self) -> &[u8] { + self.as_ref() + } +} + +impl BorrowMut<[u8]> for BytesMut { + fn borrow_mut(&mut self) -> &mut [u8] { + self.as_mut() + } +} + +impl fmt::Write for BytesMut { + #[inline] + fn write_str(&mut self, s: &str) -> fmt::Result { + if self.remaining_mut() >= s.len() { + self.put_slice(s.as_bytes()); + Ok(()) + } else { + Err(fmt::Error) + } + } + + #[inline] + fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { + fmt::write(self, args) + } +} + +impl Clone for BytesMut { + fn clone(&self) -> BytesMut { + BytesMut::from(&self[..]) + } +} + +impl IntoIterator for BytesMut { + type Item = u8; + type IntoIter = IntoIter<BytesMut>; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self) + } +} + +impl<'a> IntoIterator for &'a BytesMut { + type Item = &'a u8; + type IntoIter = core::slice::Iter<'a, u8>; + + fn into_iter(self) -> Self::IntoIter { + self.as_ref().into_iter() + } +} + +impl Extend<u8> for BytesMut { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> { + let iter = iter.into_iter(); + + let (lower, _) = iter.size_hint(); + self.reserve(lower); + + // TODO: optimize + // 1. If self.kind() == KIND_VEC, use Vec::extend + // 2. Make `reserve` inline-able + for b in iter { + self.reserve(1); + self.put_u8(b); + } + } +} + +impl<'a> Extend<&'a u8> for BytesMut { + fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> { + self.extend(iter.into_iter().map(|b| *b)) + } +} + +impl FromIterator<u8> for BytesMut { + fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self { + BytesMut::from_vec(Vec::from_iter(into_iter)) + } +} + +impl<'a> FromIterator<&'a u8> for BytesMut { + fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self { + BytesMut::from_iter(into_iter.into_iter().map(|b| *b)) + } +} + +/* + * + * ===== Inner ===== + * + */ + +unsafe fn increment_shared(ptr: *mut Shared) { + let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed); + + if old_size > isize::MAX as usize { + crate::abort(); + } +} + +unsafe fn release_shared(ptr: *mut Shared) { + // `Shared` storage... follow the drop steps from Arc. + if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + atomic::fence(Ordering::Acquire); + + // Drop the data + Box::from_raw(ptr); +} + +impl Shared { + fn is_unique(&self) -> bool { + // The goal is to check if the current handle is the only handle + // that currently has access to the buffer. This is done by + // checking if the `ref_count` is currently 1. + // + // The `Acquire` ordering synchronizes with the `Release` as + // part of the `fetch_sub` in `release_shared`. The `fetch_sub` + // operation guarantees that any mutations done in other threads + // are ordered before the `ref_count` is decremented. As such, + // this `Acquire` will guarantee that those mutations are + // visible to the current thread. + self.ref_count.load(Ordering::Acquire) == 1 + } +} + +fn original_capacity_to_repr(cap: usize) -> usize { + let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); + cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH) +} + +fn original_capacity_from_repr(repr: usize) -> usize { + if repr == 0 { + return 0; + } + + 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) +} + +/* +#[test] +fn test_original_capacity_to_repr() { + assert_eq!(original_capacity_to_repr(0), 0); + + let max_width = 32; + + for width in 1..(max_width + 1) { + let cap = 1 << width - 1; + + let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { + 0 + } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { + width - MIN_ORIGINAL_CAPACITY_WIDTH + } else { + MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH + }; + + assert_eq!(original_capacity_to_repr(cap), expected); + + if width > 1 { + assert_eq!(original_capacity_to_repr(cap + 1), expected); + } + + // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below + if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { + assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); + assert_eq!(original_capacity_to_repr(cap + 76), expected); + } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { + assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); + assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); + } + } +} + +#[test] +fn test_original_capacity_from_repr() { + assert_eq!(0, original_capacity_from_repr(0)); + + let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; + + assert_eq!(min_cap, original_capacity_from_repr(1)); + assert_eq!(min_cap * 2, original_capacity_from_repr(2)); + assert_eq!(min_cap * 4, original_capacity_from_repr(3)); + assert_eq!(min_cap * 8, original_capacity_from_repr(4)); + assert_eq!(min_cap * 16, original_capacity_from_repr(5)); + assert_eq!(min_cap * 32, original_capacity_from_repr(6)); + assert_eq!(min_cap * 64, original_capacity_from_repr(7)); +} +*/ + +unsafe impl Send for BytesMut {} +unsafe impl Sync for BytesMut {} + +/* + * + * ===== PartialEq / PartialOrd ===== + * + */ + +impl PartialEq<[u8]> for BytesMut { + fn eq(&self, other: &[u8]) -> bool { + &**self == other + } +} + +impl PartialOrd<[u8]> for BytesMut { + fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> { + (**self).partial_cmp(other) + } +} + +impl PartialEq<BytesMut> for [u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for [u8] { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<str> for BytesMut { + fn eq(&self, other: &str) -> bool { + &**self == other.as_bytes() + } +} + +impl PartialOrd<str> for BytesMut { + fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> { + (**self).partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<BytesMut> for str { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for str { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<Vec<u8>> for BytesMut { + fn eq(&self, other: &Vec<u8>) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<Vec<u8>> for BytesMut { + fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> { + (**self).partial_cmp(&other[..]) + } +} + +impl PartialEq<BytesMut> for Vec<u8> { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for Vec<u8> { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<String> for BytesMut { + fn eq(&self, other: &String) -> bool { + *self == &other[..] + } +} + +impl PartialOrd<String> for BytesMut { + fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> { + (**self).partial_cmp(other.as_bytes()) + } +} + +impl PartialEq<BytesMut> for String { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for String { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut + where BytesMut: PartialEq<T> +{ + fn eq(&self, other: &&'a T) -> bool { + *self == **other + } +} + +impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut + where BytesMut: PartialOrd<T> +{ + fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> { + self.partial_cmp(*other) + } +} + +impl PartialEq<BytesMut> for &[u8] { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for &[u8] { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<BytesMut> for &str { + fn eq(&self, other: &BytesMut) -> bool { + *other == *self + } +} + +impl PartialOrd<BytesMut> for &str { + fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> { + other.partial_cmp(self) + } +} + +impl PartialEq<BytesMut> for Bytes { + fn eq(&self, other: &BytesMut) -> bool { + &other[..] == &self[..] + } +} + +impl PartialEq<Bytes> for BytesMut { + fn eq(&self, other: &Bytes) -> bool { + &other[..] == &self[..] + } +} + +fn vptr(ptr: *mut u8) -> NonNull<u8> { + if cfg!(debug_assertions) { + NonNull::new(ptr).expect("Vec pointer should be non-null") + } else { + unsafe { NonNull::new_unchecked(ptr) } + } +} + +unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> { + let ptr = ptr.offset(-(off as isize)); + len += off; + cap += off; + + Vec::from_raw_parts(ptr, len, cap) +} + +// ===== impl SharedVtable ===== + +static SHARED_VTABLE: Vtable = Vtable { + clone: shared_v_clone, + drop: shared_v_drop, +}; + +unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { + let shared = data.load(Ordering::Acquire) as *mut Shared; + increment_shared(shared); + + let data = AtomicPtr::new(shared as _); + Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) +} + +unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { + let shared = (*data.get_mut()) as *mut Shared; + release_shared(shared as *mut Shared); +} + +// compile-fails + +/// ```compile_fail +/// use bytes::BytesMut; +/// #[deny(unused_must_use)] +/// { +/// let mut b1 = BytesMut::from("hello world"); +/// b1.split_to(6); +/// } +/// ``` +fn _split_to_must_use() {} + +/// ```compile_fail +/// use bytes::BytesMut; +/// #[deny(unused_must_use)] +/// { +/// let mut b1 = BytesMut::from("hello world"); +/// b1.split_off(6); +/// } +/// ``` +fn _split_off_must_use() {} + +/// ```compile_fail +/// use bytes::BytesMut; +/// #[deny(unused_must_use)] +/// { +/// let mut b1 = BytesMut::from("hello world"); +/// b1.split(); +/// } +/// ``` +fn _split_must_use() {} + +// fuzz tests +#[cfg(all(test, loom))] +mod fuzz { + use std::sync::Arc; + use loom::thread; + + use crate::Bytes; + use super::BytesMut; + + #[test] + fn bytes_mut_cloning_frozen() { + loom::model(|| { + let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze(); + let addr = a.as_ptr() as usize; + + // test the Bytes::clone is Sync by putting it in an Arc + let a1 = Arc::new(a); + let a2 = a1.clone(); + + let t1 = thread::spawn(move || { + let b: Bytes = (*a1).clone(); + assert_eq!(b.as_ptr() as usize, addr); + }); + + let t2 = thread::spawn(move || { + let b: Bytes = (*a2).clone(); + assert_eq!(b.as_ptr() as usize, addr); + }); + + t1.join().unwrap(); + t2.join().unwrap(); + }); + } +} diff --git a/third_party/rust/bytes/src/debug.rs b/third_party/rust/bytes/src/debug.rs new file mode 100644 index 0000000000..b1a3cc8189 --- /dev/null +++ b/third_party/rust/bytes/src/debug.rs @@ -0,0 +1,40 @@ +use core::fmt; + +/// Alternative implementation of `fmt::Debug` for byte slice. +/// +/// Standard `Debug` implementation for `[u8]` is comma separated +/// list of numbers. Since large amount of byte strings are in fact +/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), +/// it is convenient to print strings as ASCII when possible. +/// +/// This struct wraps `&[u8]` just to override `fmt::Debug`. +/// +/// `BsDebug` is not a part of public API of bytes crate. +pub struct BsDebug<'a>(pub &'a [u8]); + +impl fmt::Debug for BsDebug<'_> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + write!(fmt, "b\"")?; + for &c in self.0 { + // https://doc.rust-lang.org/reference.html#byte-escapes + if c == b'\n' { + write!(fmt, "\\n")?; + } else if c == b'\r' { + write!(fmt, "\\r")?; + } else if c == b'\t' { + write!(fmt, "\\t")?; + } else if c == b'\\' || c == b'"' { + write!(fmt, "\\{}", c as char)?; + } else if c == b'\0' { + write!(fmt, "\\0")?; + // ASCII printable + } else if c >= 0x20 && c < 0x7f { + write!(fmt, "{}", c as char)?; + } else { + write!(fmt, "\\x{:02x}", c)?; + } + } + write!(fmt, "\"")?; + Ok(()) + } +} diff --git a/third_party/rust/bytes/src/hex.rs b/third_party/rust/bytes/src/hex.rs new file mode 100644 index 0000000000..48ae6a42c1 --- /dev/null +++ b/third_party/rust/bytes/src/hex.rs @@ -0,0 +1,37 @@ +use crate::{Bytes, BytesMut}; +use core::fmt::{Formatter, LowerHex, Result, UpperHex}; + +struct BytesRef<'a>(&'a [u8]); + +impl<'a> LowerHex for BytesRef<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + for b in self.0 { + write!(f, "{:02x}", b)?; + } + Ok(()) + } +} + +impl<'a> UpperHex for BytesRef<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + for b in self.0 { + write!(f, "{:02X}", b)?; + } + Ok(()) + } +} + +macro_rules! hex_impl { + ($tr:ident, $ty:ty) => { + impl $tr for $ty { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + $tr::fmt(&BytesRef(self.as_ref()), f) + } + } + }; +} + +hex_impl!(LowerHex, Bytes); +hex_impl!(LowerHex, BytesMut); +hex_impl!(UpperHex, Bytes); +hex_impl!(UpperHex, BytesMut); diff --git a/third_party/rust/bytes/src/lib.rs b/third_party/rust/bytes/src/lib.rs new file mode 100644 index 0000000000..2df1fb3b23 --- /dev/null +++ b/third_party/rust/bytes/src/lib.rs @@ -0,0 +1,118 @@ +#![deny(warnings, missing_docs, missing_debug_implementations, rust_2018_idioms)] +#![doc(html_root_url = "https://docs.rs/bytes/0.5.3")] +#![no_std] + +//! Provides abstractions for working with bytes. +//! +//! The `bytes` crate provides an efficient byte buffer structure +//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer +//! implementations ([`Buf`], [`BufMut`]). +//! +//! [`Buf`]: trait.Buf.html +//! [`BufMut`]: trait.BufMut.html +//! +//! # `Bytes` +//! +//! `Bytes` is an efficient container for storing and operating on contiguous +//! slices of memory. It is intended for use primarily in networking code, but +//! could have applications elsewhere as well. +//! +//! `Bytes` values facilitate zero-copy network programming by allowing multiple +//! `Bytes` objects to point to the same underlying memory. This is managed by +//! using a reference count to track when the memory is no longer needed and can +//! be freed. +//! +//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]` +//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For +//! example: +//! +//! ```rust +//! use bytes::{BytesMut, BufMut}; +//! +//! let mut buf = BytesMut::with_capacity(1024); +//! buf.put(&b"hello world"[..]); +//! buf.put_u16(1234); +//! +//! let a = buf.split(); +//! assert_eq!(a, b"hello world\x04\xD2"[..]); +//! +//! buf.put(&b"goodbye world"[..]); +//! +//! let b = buf.split(); +//! assert_eq!(b, b"goodbye world"[..]); +//! +//! assert_eq!(buf.capacity(), 998); +//! ``` +//! +//! In the above example, only a single buffer of 1024 is allocated. The handles +//! `a` and `b` will share the underlying buffer and maintain indices tracking +//! the view into the buffer represented by the handle. +//! +//! See the [struct docs] for more details. +//! +//! [struct docs]: struct.Bytes.html +//! +//! # `Buf`, `BufMut` +//! +//! These two traits provide read and write access to buffers. The underlying +//! storage may or may not be in contiguous memory. For example, `Bytes` is a +//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in +//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current +//! position in the underlying byte storage. When bytes are read or written, the +//! cursor is advanced. +//! +//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) +//! +//! ## Relation with `Read` and `Write` +//! +//! At first glance, it may seem that `Buf` and `BufMut` overlap in +//! functionality with `std::io::Read` and `std::io::Write`. However, they +//! serve different purposes. A buffer is the value that is provided as an +//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then +//! perform a syscall, which has the potential of failing. Operations on `Buf` +//! and `BufMut` are infallible. + + +extern crate alloc; + +#[cfg(feature = "std")] +extern crate std; + +pub mod buf; +pub use crate::buf::{ + Buf, + BufMut, +}; + +mod bytes_mut; +mod bytes; +mod debug; +mod hex; +mod loom; +pub use crate::bytes_mut::BytesMut; +pub use crate::bytes::Bytes; + +// Optional Serde support +#[cfg(feature = "serde")] +mod serde; + +#[inline(never)] +#[cold] +fn abort() -> ! { + #[cfg(feature = "std")] + { + std::process::abort(); + } + + #[cfg(not(feature = "std"))] + { + struct Abort; + impl Drop for Abort { + fn drop(&mut self) { + panic!(); + } + } + let _a = Abort; + panic!("abort"); + } +} diff --git a/third_party/rust/bytes/src/loom.rs b/third_party/rust/bytes/src/loom.rs new file mode 100644 index 0000000000..80947acecc --- /dev/null +++ b/third_party/rust/bytes/src/loom.rs @@ -0,0 +1,9 @@ +#[cfg(not(all(test, loom)))] +pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; + } +} + +#[cfg(all(test, loom))] +pub(crate) use ::loom::sync; diff --git a/third_party/rust/bytes/src/serde.rs b/third_party/rust/bytes/src/serde.rs new file mode 100644 index 0000000000..11020ae7f0 --- /dev/null +++ b/third_party/rust/bytes/src/serde.rs @@ -0,0 +1,82 @@ +use alloc::string::String; +use alloc::vec::Vec; +use core::{cmp, fmt}; +use serde::{Serialize, Serializer, Deserialize, Deserializer, de}; +use super::{Bytes, BytesMut}; + +macro_rules! serde_impl { + ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => ( + impl Serialize for $ty { + #[inline] + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where S: Serializer + { + serializer.serialize_bytes(&self) + } + } + + struct $visitor_ty; + + impl<'de> de::Visitor<'de> for $visitor_ty { + type Value = $ty; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("byte array") + } + + #[inline] + fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> + where V: de::SeqAccess<'de> + { + let len = cmp::min(seq.size_hint().unwrap_or(0), 4096); + let mut values: Vec<u8> = Vec::with_capacity(len); + + while let Some(value) = seq.next_element()? { + values.push(value); + } + + Ok($ty::$from_vec(values)) + } + + #[inline] + fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::$from_slice(v)) + } + + #[inline] + fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::$from_vec(v)) + } + + #[inline] + fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::$from_slice(v.as_bytes())) + } + + #[inline] + fn visit_string<E>(self, v: String) -> Result<Self::Value, E> + where E: de::Error + { + Ok($ty::$from_vec(v.into_bytes())) + } + } + + impl<'de> Deserialize<'de> for $ty { + #[inline] + fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error> + where D: Deserializer<'de> + { + deserializer.deserialize_byte_buf($visitor_ty) + } + } + ); +} + +serde_impl!(Bytes, BytesVisitor, copy_from_slice, from); +serde_impl!(BytesMut, BytesMutVisitor, from, from_vec); diff --git a/third_party/rust/bytes/tests/test_buf.rs b/third_party/rust/bytes/tests/test_buf.rs new file mode 100644 index 0000000000..12b75a4ad7 --- /dev/null +++ b/third_party/rust/bytes/tests/test_buf.rs @@ -0,0 +1,101 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::Buf; +use std::io::IoSlice; + +#[test] +fn test_fresh_cursor_vec() { + let mut buf = &b"hello"[..]; + + assert_eq!(buf.remaining(), 5); + assert_eq!(buf.bytes(), b"hello"); + + buf.advance(2); + + assert_eq!(buf.remaining(), 3); + assert_eq!(buf.bytes(), b"llo"); + + buf.advance(3); + + assert_eq!(buf.remaining(), 0); + assert_eq!(buf.bytes(), b""); +} + +#[test] +fn test_get_u8() { + let mut buf = &b"\x21zomg"[..]; + assert_eq!(0x21, buf.get_u8()); +} + +#[test] +fn test_get_u16() { + let mut buf = &b"\x21\x54zomg"[..]; + assert_eq!(0x2154, buf.get_u16()); + let mut buf = &b"\x21\x54zomg"[..]; + assert_eq!(0x5421, buf.get_u16_le()); +} + +#[test] +#[should_panic] +fn test_get_u16_buffer_underflow() { + let mut buf = &b"\x21"[..]; + buf.get_u16(); +} + +#[test] +fn test_bufs_vec() { + let buf = &b"hello world"[..]; + + let b1: &[u8] = &mut []; + let b2: &[u8] = &mut []; + + let mut dst = [IoSlice::new(b1), IoSlice::new(b2)]; + + assert_eq!(1, buf.bytes_vectored(&mut dst[..])); +} + +#[test] +fn test_vec_deque() { + use std::collections::VecDeque; + + let mut buffer: VecDeque<u8> = VecDeque::new(); + buffer.extend(b"hello world"); + assert_eq!(11, buffer.remaining()); + assert_eq!(b"hello world", buffer.bytes()); + buffer.advance(6); + assert_eq!(b"world", buffer.bytes()); + buffer.extend(b" piece"); + let mut out = [0; 11]; + buffer.copy_to_slice(&mut out); + assert_eq!(b"world piece", &out[..]); +} + +#[test] +fn test_deref_buf_forwards() { + struct Special; + + impl Buf for Special { + fn remaining(&self) -> usize { + unreachable!("remaining"); + } + + fn bytes(&self) -> &[u8] { + unreachable!("bytes"); + } + + fn advance(&mut self, _: usize) { + unreachable!("advance"); + } + + fn get_u8(&mut self) -> u8 { + // specialized! + b'x' + } + } + + // these should all use the specialized method + assert_eq!(Special.get_u8(), b'x'); + assert_eq!((&mut Special as &mut dyn Buf).get_u8(), b'x'); + assert_eq!((Box::new(Special) as Box<dyn Buf>).get_u8(), b'x'); + assert_eq!(Box::new(Special).get_u8(), b'x'); +} diff --git a/third_party/rust/bytes/tests/test_buf_mut.rs b/third_party/rust/bytes/tests/test_buf_mut.rs new file mode 100644 index 0000000000..f002f7d5ff --- /dev/null +++ b/third_party/rust/bytes/tests/test_buf_mut.rs @@ -0,0 +1,118 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::{buf::IoSliceMut, BufMut, BytesMut}; +use std::usize; +use std::fmt::Write; + +#[test] +fn test_vec_as_mut_buf() { + let mut buf = Vec::with_capacity(64); + + assert_eq!(buf.remaining_mut(), usize::MAX); + + assert!(buf.bytes_mut().len() >= 64); + + buf.put(&b"zomg"[..]); + + assert_eq!(&buf, b"zomg"); + + assert_eq!(buf.remaining_mut(), usize::MAX - 4); + assert_eq!(buf.capacity(), 64); + + for _ in 0..16 { + buf.put(&b"zomg"[..]); + } + + assert_eq!(buf.len(), 68); +} + +#[test] +fn test_put_u8() { + let mut buf = Vec::with_capacity(8); + buf.put_u8(33); + assert_eq!(b"\x21", &buf[..]); +} + +#[test] +fn test_put_u16() { + let mut buf = Vec::with_capacity(8); + buf.put_u16(8532); + assert_eq!(b"\x21\x54", &buf[..]); + + buf.clear(); + buf.put_u16_le(8532); + assert_eq!(b"\x54\x21", &buf[..]); +} + +#[test] +fn test_vec_advance_mut() { + // Regression test for carllerche/bytes#108. + let mut buf = Vec::with_capacity(8); + unsafe { + buf.advance_mut(12); + assert_eq!(buf.len(), 12); + assert!(buf.capacity() >= 12, "capacity: {}", buf.capacity()); + } +} + +#[test] +fn test_clone() { + let mut buf = BytesMut::with_capacity(100); + buf.write_str("this is a test").unwrap(); + let buf2 = buf.clone(); + + buf.write_str(" of our emergency broadcast system").unwrap(); + assert!(buf != buf2); +} + +#[test] +fn test_bufs_vec_mut() { + let b1: &mut [u8] = &mut []; + let b2: &mut [u8] = &mut []; + let mut dst = [IoSliceMut::from(b1), IoSliceMut::from(b2)]; + + // with no capacity + let mut buf = BytesMut::new(); + assert_eq!(buf.capacity(), 0); + assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..])); + + // with capacity + let mut buf = BytesMut::with_capacity(64); + assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..])); +} + +#[test] +fn test_mut_slice() { + let mut v = vec![0, 0, 0, 0]; + let mut s = &mut v[..]; + s.put_u32(42); +} + +#[test] +fn test_deref_bufmut_forwards() { + struct Special; + + impl BufMut for Special { + fn remaining_mut(&self) -> usize { + unreachable!("remaining_mut"); + } + + fn bytes_mut(&mut self) -> &mut [std::mem::MaybeUninit<u8>] { + unreachable!("bytes_mut"); + } + + unsafe fn advance_mut(&mut self, _: usize) { + unreachable!("advance"); + } + + fn put_u8(&mut self, _: u8) { + // specialized! + } + } + + // these should all use the specialized method + Special.put_u8(b'x'); + (&mut Special as &mut dyn BufMut).put_u8(b'x'); + (Box::new(Special) as Box<dyn BufMut>).put_u8(b'x'); + Box::new(Special).put_u8(b'x'); +} diff --git a/third_party/rust/bytes/tests/test_bytes.rs b/third_party/rust/bytes/tests/test_bytes.rs new file mode 100644 index 0000000000..b582627566 --- /dev/null +++ b/third_party/rust/bytes/tests/test_bytes.rs @@ -0,0 +1,883 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::{Bytes, BytesMut, Buf, BufMut}; + +use std::usize; + +const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb"; +const SHORT: &'static [u8] = b"hello world"; + +fn is_sync<T: Sync>() {} +fn is_send<T: Send>() {} + +#[test] +fn test_bounds() { + is_sync::<Bytes>(); + is_sync::<BytesMut>(); + is_send::<Bytes>(); + is_send::<BytesMut>(); +} + +#[test] +fn test_layout() { + use std::mem; + + assert_eq!( + mem::size_of::<Bytes>(), + mem::size_of::<usize>() * 4, + "Bytes size should be 4 words", + ); + assert_eq!( + mem::size_of::<BytesMut>(), + mem::size_of::<usize>() * 4, + "BytesMut should be 4 words", + ); + + assert_eq!( + mem::size_of::<Bytes>(), + mem::size_of::<Option<Bytes>>(), + "Bytes should be same size as Option<Bytes>", + ); + + assert_eq!( + mem::size_of::<BytesMut>(), + mem::size_of::<Option<BytesMut>>(), + "BytesMut should be same size as Option<BytesMut>", + ); + +} + +#[test] +fn from_slice() { + let a = Bytes::from(&b"abcdefgh"[..]); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); + + let a = BytesMut::from(&b"abcdefgh"[..]); + assert_eq!(a, b"abcdefgh"[..]); + assert_eq!(a, &b"abcdefgh"[..]); + assert_eq!(a, Vec::from(&b"abcdefgh"[..])); + assert_eq!(b"abcdefgh"[..], a); + assert_eq!(&b"abcdefgh"[..], a); + assert_eq!(Vec::from(&b"abcdefgh"[..]), a); +} + +#[test] +fn fmt() { + let a = format!("{:?}", Bytes::from(&b"abcdefg"[..])); + let b = "b\"abcdefg\""; + + assert_eq!(a, b); + + let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..])); + assert_eq!(a, b); +} + +#[test] +fn fmt_write() { + use std::fmt::Write; + use std::iter::FromIterator; + let s = String::from_iter((0..10).map(|_| "abcdefg")); + + let mut a = BytesMut::with_capacity(64); + write!(a, "{}", &s[..64]).unwrap(); + assert_eq!(a, s[..64].as_bytes()); + + + let mut b = BytesMut::with_capacity(64); + write!(b, "{}", &s[..32]).unwrap(); + write!(b, "{}", &s[32..64]).unwrap(); + assert_eq!(b, s[..64].as_bytes()); + + + let mut c = BytesMut::with_capacity(64); + write!(c, "{}", s).unwrap(); + assert_eq!(c, s[..].as_bytes()); +} + +#[test] +fn len() { + let a = Bytes::from(&b"abcdefg"[..]); + assert_eq!(a.len(), 7); + + let a = BytesMut::from(&b"abcdefg"[..]); + assert_eq!(a.len(), 7); + + let a = Bytes::from(&b""[..]); + assert!(a.is_empty()); + + let a = BytesMut::from(&b""[..]); + assert!(a.is_empty()); +} + +#[test] +fn index() { + let a = Bytes::from(&b"hello world"[..]); + assert_eq!(a[0..5], *b"hello"); +} + +#[test] +fn slice() { + let a = Bytes::from(&b"hello world"[..]); + + let b = a.slice(3..5); + assert_eq!(b, b"lo"[..]); + + let b = a.slice(0..0); + assert_eq!(b, b""[..]); + + let b = a.slice(3..3); + assert_eq!(b, b""[..]); + + let b = a.slice(a.len()..a.len()); + assert_eq!(b, b""[..]); + + let b = a.slice(..5); + assert_eq!(b, b"hello"[..]); + + let b = a.slice(3..); + assert_eq!(b, b"lo world"[..]); +} + +#[test] +#[should_panic] +fn slice_oob_1() { + let a = Bytes::from(&b"hello world"[..]); + a.slice(5..44); +} + +#[test] +#[should_panic] +fn slice_oob_2() { + let a = Bytes::from(&b"hello world"[..]); + a.slice(44..49); +} + +#[test] +fn split_off() { + let mut hello = Bytes::from(&b"helloworld"[..]); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); + + let mut hello = BytesMut::from(&b"helloworld"[..]); + let world = hello.split_off(5); + + assert_eq!(hello, &b"hello"[..]); + assert_eq!(world, &b"world"[..]); +} + +#[test] +#[should_panic] +fn split_off_oob() { + let mut hello = Bytes::from(&b"helloworld"[..]); + let _ = hello.split_off(44); +} + +#[test] +fn split_off_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let other = bytes.split_off(128); + + assert_eq!(bytes.len(), 0); + assert_eq!(bytes.capacity(), 128); + + assert_eq!(other.len(), 0); + assert_eq!(other.capacity(), 896); +} + +#[test] +fn split_off_to_loop() { + let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + + for i in 0..(s.len() + 1) { + { + let mut bytes = Bytes::from(&s[..]); + let off = bytes.split_off(i); + assert_eq!(i, bytes.len()); + let mut sum = Vec::new(); + sum.extend(bytes.iter()); + sum.extend(off.iter()); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = BytesMut::from(&s[..]); + let off = bytes.split_off(i); + assert_eq!(i, bytes.len()); + let mut sum = Vec::new(); + sum.extend(&bytes); + sum.extend(&off); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = Bytes::from(&s[..]); + let off = bytes.split_to(i); + assert_eq!(i, off.len()); + let mut sum = Vec::new(); + sum.extend(off.iter()); + sum.extend(bytes.iter()); + assert_eq!(&s[..], &sum[..]); + } + { + let mut bytes = BytesMut::from(&s[..]); + let off = bytes.split_to(i); + assert_eq!(i, off.len()); + let mut sum = Vec::new(); + sum.extend(&off); + sum.extend(&bytes); + assert_eq!(&s[..], &sum[..]); + } + } +} + +#[test] +fn split_to_1() { + // Static + let mut a = Bytes::from_static(SHORT); + let b = a.split_to(4); + + assert_eq!(SHORT[4..], a); + assert_eq!(SHORT[..4], b); + + // Allocated + let mut a = Bytes::copy_from_slice(LONG); + let b = a.split_to(4); + + assert_eq!(LONG[4..], a); + assert_eq!(LONG[..4], b); + + let mut a = Bytes::copy_from_slice(LONG); + let b = a.split_to(30); + + assert_eq!(LONG[30..], a); + assert_eq!(LONG[..30], b); +} + +#[test] +fn split_to_2() { + let mut a = Bytes::from(LONG); + assert_eq!(LONG, a); + + let b = a.split_to(1); + + assert_eq!(LONG[1..], a); + drop(b); +} + +#[test] +#[should_panic] +fn split_to_oob() { + let mut hello = Bytes::from(&b"helloworld"[..]); + let _ = hello.split_to(33); +} + +#[test] +#[should_panic] +fn split_to_oob_mut() { + let mut hello = BytesMut::from(&b"helloworld"[..]); + let _ = hello.split_to(33); +} + +#[test] +#[should_panic] +fn split_to_uninitialized() { + let mut bytes = BytesMut::with_capacity(1024); + let _other = bytes.split_to(128); +} + +#[test] +fn split_off_to_at_gt_len() { + fn make_bytes() -> Bytes { + let mut bytes = BytesMut::with_capacity(100); + bytes.put_slice(&[10, 20, 30, 40]); + bytes.freeze() + } + + use std::panic; + + let _ = make_bytes().split_to(4); + let _ = make_bytes().split_off(4); + + assert!(panic::catch_unwind(move || { + let _ = make_bytes().split_to(5); + }).is_err()); + + assert!(panic::catch_unwind(move || { + let _ = make_bytes().split_off(5); + }).is_err()); +} + +#[test] +fn truncate() { + let s = &b"helloworld"[..]; + let mut hello = Bytes::from(s); + hello.truncate(15); + assert_eq!(hello, s); + hello.truncate(10); + assert_eq!(hello, s); + hello.truncate(5); + assert_eq!(hello, "hello"); +} + +#[test] +fn freeze_clone_shared() { + let s = &b"abcdefgh"[..]; + let b = BytesMut::from(s).split().freeze(); + assert_eq!(b, s); + let c = b.clone(); + assert_eq!(c, s); +} + +#[test] +fn freeze_clone_unique() { + let s = &b"abcdefgh"[..]; + let b = BytesMut::from(s).freeze(); + assert_eq!(b, s); + let c = b.clone(); + assert_eq!(c, s); +} + +#[test] +fn fns_defined_for_bytes_mut() { + let mut bytes = BytesMut::from(&b"hello world"[..]); + + bytes.as_ptr(); + bytes.as_mut_ptr(); + + // Iterator + let v: Vec<u8> = bytes.as_ref().iter().cloned().collect(); + assert_eq!(&v[..], bytes); +} + +#[test] +fn reserve_convert() { + // Vec -> Vec + let mut bytes = BytesMut::from(LONG); + bytes.reserve(64); + assert_eq!(bytes.capacity(), LONG.len() + 64); + + // Arc -> Vec + let mut bytes = BytesMut::from(LONG); + let a = bytes.split_to(30); + + bytes.reserve(128); + assert!(bytes.capacity() >= bytes.len() + 128); + + drop(a); +} + +#[test] +fn reserve_growth() { + let mut bytes = BytesMut::with_capacity(64); + bytes.put("hello world".as_bytes()); + let _ = bytes.split(); + + bytes.reserve(65); + assert_eq!(bytes.capacity(), 128); +} + +#[test] +fn reserve_allocates_at_least_original_capacity() { + let mut bytes = BytesMut::with_capacity(1024); + + for i in 0..1020 { + bytes.put_u8(i as u8); + } + + let _other = bytes.split(); + + bytes.reserve(16); + assert_eq!(bytes.capacity(), 1024); +} + +#[test] +fn reserve_max_original_capacity_value() { + const SIZE: usize = 128 * 1024; + + let mut bytes = BytesMut::with_capacity(SIZE); + + for _ in 0..SIZE { + bytes.put_u8(0u8); + } + + let _other = bytes.split(); + + bytes.reserve(16); + assert_eq!(bytes.capacity(), 64 * 1024); +} + +#[test] +fn reserve_vec_recycling() { + let mut bytes = BytesMut::with_capacity(16); + assert_eq!(bytes.capacity(), 16); + let addr = bytes.as_ptr() as usize; + bytes.put("0123456789012345".as_bytes()); + assert_eq!(bytes.as_ptr() as usize, addr); + bytes.advance(10); + assert_eq!(bytes.capacity(), 6); + bytes.reserve(8); + assert_eq!(bytes.capacity(), 16); + assert_eq!(bytes.as_ptr() as usize, addr); +} + +#[test] +fn reserve_in_arc_unique_does_not_overallocate() { + let mut bytes = BytesMut::with_capacity(1000); + let _ = bytes.split(); + + // now bytes is Arc and refcount == 1 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(2001); + assert_eq!(2001, bytes.capacity()); +} + +#[test] +fn reserve_in_arc_unique_doubles() { + let mut bytes = BytesMut::with_capacity(1000); + let _ = bytes.split(); + + // now bytes is Arc and refcount == 1 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(1001); + assert_eq!(2000, bytes.capacity()); +} + +#[test] +fn reserve_in_arc_nonunique_does_not_overallocate() { + let mut bytes = BytesMut::with_capacity(1000); + let _copy = bytes.split(); + + // now bytes is Arc and refcount == 2 + + assert_eq!(1000, bytes.capacity()); + bytes.reserve(2001); + assert_eq!(2001, bytes.capacity()); +} + +#[test] +fn extend_mut() { + let mut bytes = BytesMut::with_capacity(0); + bytes.extend(LONG); + assert_eq!(*bytes, LONG[..]); +} + +#[test] +fn extend_from_slice_mut() { + for &i in &[3, 34] { + let mut bytes = BytesMut::new(); + bytes.extend_from_slice(&LONG[..i]); + bytes.extend_from_slice(&LONG[i..]); + assert_eq!(LONG[..], *bytes); + } +} + +#[test] +fn extend_mut_without_size_hint() { + let mut bytes = BytesMut::with_capacity(0); + let mut long_iter = LONG.iter(); + + // Use iter::from_fn since it doesn't know a size_hint + bytes.extend(std::iter::from_fn(|| long_iter.next())); + assert_eq!(*bytes, LONG[..]); +} + +#[test] +fn from_static() { + let mut a = Bytes::from_static(b"ab"); + let b = a.split_off(1); + + assert_eq!(a, b"a"[..]); + assert_eq!(b, b"b"[..]); +} + +#[test] +fn advance_static() { + let mut a = Bytes::from_static(b"hello world"); + a.advance(6); + assert_eq!(a, &b"world"[..]); +} + +#[test] +fn advance_vec() { + let mut a = Bytes::from(b"hello world boooo yah world zomg wat wat".to_vec()); + a.advance(16); + assert_eq!(a, b"o yah world zomg wat wat"[..]); + + a.advance(4); + assert_eq!(a, b"h world zomg wat wat"[..]); + + a.advance(6); + assert_eq!(a, b"d zomg wat wat"[..]); +} + +#[test] +fn advance_bytes_mut() { + let mut a = BytesMut::from("hello world boooo yah world zomg wat wat"); + a.advance(16); + assert_eq!(a, b"o yah world zomg wat wat"[..]); + + a.advance(4); + assert_eq!(a, b"h world zomg wat wat"[..]); + + // Reserve some space. + a.reserve(1024); + assert_eq!(a, b"h world zomg wat wat"[..]); + + a.advance(6); + assert_eq!(a, b"d zomg wat wat"[..]); +} + +#[test] +#[should_panic] +fn advance_past_len() { + let mut a = BytesMut::from("hello world"); + a.advance(20); +} + +#[test] +// Only run these tests on little endian systems. CI uses qemu for testing +// little endian... and qemu doesn't really support threading all that well. +#[cfg(target_endian = "little")] +fn stress() { + // Tests promoting a buffer from a vec -> shared in a concurrent situation + use std::sync::{Arc, Barrier}; + use std::thread; + + const THREADS: usize = 8; + const ITERS: usize = 1_000; + + for i in 0..ITERS { + let data = [i as u8; 256]; + let buf = Arc::new(Bytes::copy_from_slice(&data[..])); + + let barrier = Arc::new(Barrier::new(THREADS)); + let mut joins = Vec::with_capacity(THREADS); + + for _ in 0..THREADS { + let c = barrier.clone(); + let buf = buf.clone(); + + joins.push(thread::spawn(move || { + c.wait(); + let buf: Bytes = (*buf).clone(); + drop(buf); + })); + } + + for th in joins { + th.join().unwrap(); + } + + assert_eq!(*buf, data[..]); + } +} + +#[test] +fn partial_eq_bytesmut() { + let bytes = Bytes::from(&b"The quick red fox"[..]); + let bytesmut = BytesMut::from(&b"The quick red fox"[..]); + assert!(bytes == bytesmut); + assert!(bytesmut == bytes); + let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]); + assert!(bytes2 != bytesmut); + assert!(bytesmut != bytes2); +} + +/* +#[test] +fn bytes_unsplit_basic() { + let buf = Bytes::from(&b"aaabbbcccddd"[..]); + + let splitted = buf.split_off(6); + assert_eq!(b"aaabbb", &buf[..]); + assert_eq!(b"cccddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_empty_other() { + let buf = Bytes::from(&b"aaabbbcccddd"[..]); + + // empty other + let other = Bytes::new(); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_empty_self() { + // empty self + let mut buf = Bytes::new(); + + let mut other = Bytes::with_capacity(64); + other.extend_from_slice(b"aaabbbcccddd"); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_arc_different() { + let mut buf = Bytes::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + buf.split_off(8); //arc + + let mut buf2 = Bytes::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_arc_non_contiguous() { + let mut buf = Bytes::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + + let buf3 = buf2.split_off(4); //arc + + buf.unsplit(buf3); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_two_split_offs() { + let mut buf = Bytes::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + let buf3 = buf2.split_off(4); //arc + + buf2.unsplit(buf3); + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn bytes_unsplit_overlapping_references() { + let mut buf = Bytes::with_capacity(64); + buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz"); + let mut buf0010 = buf.slice(0..10); + let buf1020 = buf.slice(10..20); + let buf0515 = buf.slice(5..15); + buf0010.unsplit(buf1020); + assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]); + assert_eq!(b"fghijklmno", &buf0515[..]); +} +*/ + +#[test] +fn bytes_mut_unsplit_basic() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + let splitted = buf.split_off(6); + assert_eq!(b"aaabbb", &buf[..]); + assert_eq!(b"cccddd", &splitted[..]); + + buf.unsplit(splitted); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_mut_unsplit_empty_other() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaabbbcccddd"); + + // empty other + let other = BytesMut::new(); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_mut_unsplit_empty_self() { + // empty self + let mut buf = BytesMut::new(); + + let mut other = BytesMut::with_capacity(64); + other.extend_from_slice(b"aaabbbcccddd"); + + buf.unsplit(other); + assert_eq!(b"aaabbbcccddd", &buf[..]); +} + +#[test] +fn bytes_mut_unsplit_arc_different() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeee"); + + let _ = buf.split_off(8); //arc + + let mut buf2 = BytesMut::with_capacity(64); + buf2.extend_from_slice(b"ccccddddeeee"); + + let _ = buf2.split_off(8); //arc + + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn bytes_mut_unsplit_arc_non_contiguous() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + + let buf3 = buf2.split_off(4); //arc + + buf.unsplit(buf3); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn bytes_mut_unsplit_two_split_offs() { + let mut buf = BytesMut::with_capacity(64); + buf.extend_from_slice(b"aaaabbbbccccdddd"); + + let mut buf2 = buf.split_off(8); //arc + let buf3 = buf2.split_off(4); //arc + + buf2.unsplit(buf3); + buf.unsplit(buf2); + assert_eq!(b"aaaabbbbccccdddd", &buf[..]); +} + +#[test] +fn from_iter_no_size_hint() { + use std::iter; + + let mut expect = vec![]; + + let actual: Bytes = iter::repeat(b'x') + .scan(100, |cnt, item| { + if *cnt >= 1 { + *cnt -= 1; + expect.push(item); + Some(item) + } else { + None + } + }) + .collect(); + + assert_eq!(&actual[..], &expect[..]); +} + +fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) { + let slice = &(bytes.as_ref()[start..end]); + let sub = bytes.slice_ref(&slice); + assert_eq!(&sub[..], expected); +} + +#[test] +fn slice_ref_works() { + let bytes = Bytes::from(&b"012345678"[..]); + + test_slice_ref(&bytes, 0, 0, b""); + test_slice_ref(&bytes, 0, 3, b"012"); + test_slice_ref(&bytes, 2, 6, b"2345"); + test_slice_ref(&bytes, 7, 9, b"78"); + test_slice_ref(&bytes, 9, 9, b""); +} + + +#[test] +fn slice_ref_empty() { + let bytes = Bytes::from(&b""[..]); + let slice = &(bytes.as_ref()[0..0]); + + let sub = bytes.slice_ref(&slice); + assert_eq!(&sub[..], b""); +} + +#[test] +#[should_panic] +fn slice_ref_catches_not_a_subset() { + let bytes = Bytes::from(&b"012345678"[..]); + let slice = &b"012345"[0..4]; + + bytes.slice_ref(slice); +} + +#[test] +#[should_panic] +fn slice_ref_catches_not_an_empty_subset() { + let bytes = Bytes::from(&b"012345678"[..]); + let slice = &b""[0..0]; + + bytes.slice_ref(slice); +} + +#[test] +#[should_panic] +fn empty_slice_ref_catches_not_an_empty_subset() { + let bytes = Bytes::new(); + let slice = &b"some other slice"[0..0]; + + // Protect this test against Bytes internals. + // + // This should panic *because* the slice's ptr doesn't fit in the range + // of the `bytes`. + if bytes.as_ptr() as usize == slice.as_ptr() as usize { + // don't panic, failing the test + return; + } + + bytes.slice_ref(slice); +} + +#[test] +fn bytes_buf_mut_advance() { + let mut bytes = BytesMut::with_capacity(1024); + + unsafe { + let ptr = bytes.bytes_mut().as_ptr(); + assert_eq!(1024, bytes.bytes_mut().len()); + + bytes.advance_mut(10); + + let next = bytes.bytes_mut().as_ptr(); + assert_eq!(1024 - 10, bytes.bytes_mut().len()); + assert_eq!(ptr.offset(10), next); + + // advance to the end + bytes.advance_mut(1024 - 10); + + // The buffer size is doubled + assert_eq!(1024, bytes.bytes_mut().len()); + } +} + +#[test] +#[should_panic] +fn bytes_reserve_overflow() { + let mut bytes = BytesMut::with_capacity(1024); + bytes.put_slice(b"hello world"); + + bytes.reserve(usize::MAX); +} + +#[test] +fn bytes_with_capacity_but_empty() { + // See https://github.com/tokio-rs/bytes/issues/340 + let vec = Vec::with_capacity(1); + let _ = Bytes::from(vec); +} diff --git a/third_party/rust/bytes/tests/test_chain.rs b/third_party/rust/bytes/tests/test_chain.rs new file mode 100644 index 0000000000..332571d8b3 --- /dev/null +++ b/third_party/rust/bytes/tests/test_chain.rs @@ -0,0 +1,133 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::{Buf, BufMut, Bytes}; +use bytes::buf::{BufExt, BufMutExt}; +use std::io::IoSlice; + +#[test] +fn collect_two_bufs() { + let a = Bytes::from(&b"hello"[..]); + let b = Bytes::from(&b"world"[..]); + + let res = a.chain(b).to_bytes(); + assert_eq!(res, &b"helloworld"[..]); +} + +#[test] +fn writing_chained() { + let mut a = [0u8; 64]; + let mut b = [0u8; 64]; + + { + let mut buf = (&mut a[..]).chain_mut(&mut b[..]); + + for i in 0u8..128 { + buf.put_u8(i); + } + } + + for i in 0..64 { + let expect = i as u8; + assert_eq!(expect, a[i]); + assert_eq!(expect + 64, b[i]); + } +} + +#[test] +fn iterating_two_bufs() { + let a = Bytes::from(&b"hello"[..]); + let b = Bytes::from(&b"world"[..]); + + let res: Vec<u8> = a.chain(b).into_iter().collect(); + assert_eq!(res, &b"helloworld"[..]); +} + +#[test] +fn vectored_read() { + let a = Bytes::from(&b"hello"[..]); + let b = Bytes::from(&b"world"[..]); + + let mut buf = a.chain(b); + + { + let b1: &[u8] = &mut []; + let b2: &[u8] = &mut []; + let b3: &[u8] = &mut []; + let b4: &[u8] = &mut []; + let mut iovecs = [ + IoSlice::new(b1), + IoSlice::new(b2), + IoSlice::new(b3), + IoSlice::new(b4), + ]; + + assert_eq!(2, buf.bytes_vectored(&mut iovecs)); + assert_eq!(iovecs[0][..], b"hello"[..]); + assert_eq!(iovecs[1][..], b"world"[..]); + assert_eq!(iovecs[2][..], b""[..]); + assert_eq!(iovecs[3][..], b""[..]); + } + + buf.advance(2); + + { + let b1: &[u8] = &mut []; + let b2: &[u8] = &mut []; + let b3: &[u8] = &mut []; + let b4: &[u8] = &mut []; + let mut iovecs = [ + IoSlice::new(b1), + IoSlice::new(b2), + IoSlice::new(b3), + IoSlice::new(b4), + ]; + + assert_eq!(2, buf.bytes_vectored(&mut iovecs)); + assert_eq!(iovecs[0][..], b"llo"[..]); + assert_eq!(iovecs[1][..], b"world"[..]); + assert_eq!(iovecs[2][..], b""[..]); + assert_eq!(iovecs[3][..], b""[..]); + } + + buf.advance(3); + + { + let b1: &[u8] = &mut []; + let b2: &[u8] = &mut []; + let b3: &[u8] = &mut []; + let b4: &[u8] = &mut []; + let mut iovecs = [ + IoSlice::new(b1), + IoSlice::new(b2), + IoSlice::new(b3), + IoSlice::new(b4), + ]; + + assert_eq!(1, buf.bytes_vectored(&mut iovecs)); + assert_eq!(iovecs[0][..], b"world"[..]); + assert_eq!(iovecs[1][..], b""[..]); + assert_eq!(iovecs[2][..], b""[..]); + assert_eq!(iovecs[3][..], b""[..]); + } + + buf.advance(3); + + { + let b1: &[u8] = &mut []; + let b2: &[u8] = &mut []; + let b3: &[u8] = &mut []; + let b4: &[u8] = &mut []; + let mut iovecs = [ + IoSlice::new(b1), + IoSlice::new(b2), + IoSlice::new(b3), + IoSlice::new(b4), + ]; + + assert_eq!(1, buf.bytes_vectored(&mut iovecs)); + assert_eq!(iovecs[0][..], b"ld"[..]); + assert_eq!(iovecs[1][..], b""[..]); + assert_eq!(iovecs[2][..], b""[..]); + assert_eq!(iovecs[3][..], b""[..]); + } +} diff --git a/third_party/rust/bytes/tests/test_debug.rs b/third_party/rust/bytes/tests/test_debug.rs new file mode 100644 index 0000000000..7528bac87b --- /dev/null +++ b/third_party/rust/bytes/tests/test_debug.rs @@ -0,0 +1,35 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::Bytes; + +#[test] +fn fmt() { + let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); + + let expected = "b\"\ + \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ + \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ + \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ + \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ + \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ + @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ + `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ + \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ + \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ + \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ + \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ + \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ + \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ + \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ + \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ + \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ + \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ + \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ + \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ + \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ + \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ + \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ + \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; + + assert_eq!(expected, format!("{:?}", Bytes::from(vec))); +} diff --git a/third_party/rust/bytes/tests/test_iter.rs b/third_party/rust/bytes/tests/test_iter.rs new file mode 100644 index 0000000000..13b86cdad4 --- /dev/null +++ b/third_party/rust/bytes/tests/test_iter.rs @@ -0,0 +1,22 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::Bytes; + +#[test] +fn iter_len() { + let buf = Bytes::from_static(b"hello world"); + let iter = buf.iter(); + + assert_eq!(iter.size_hint(), (11, Some(11))); + assert_eq!(iter.len(), 11); +} + + +#[test] +fn empty_iter_len() { + let buf = Bytes::from_static(b""); + let iter = buf.iter(); + + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.len(), 0); +} diff --git a/third_party/rust/bytes/tests/test_reader.rs b/third_party/rust/bytes/tests/test_reader.rs new file mode 100644 index 0000000000..9c5972a965 --- /dev/null +++ b/third_party/rust/bytes/tests/test_reader.rs @@ -0,0 +1,28 @@ +#![deny(warnings, rust_2018_idioms)] + +use std::io::{BufRead, Read}; + +use bytes::buf::{BufExt}; + +#[test] +fn read() { + let buf1 = &b"hello "[..]; + let buf2 = &b"world"[..]; + let buf = BufExt::chain(buf1, buf2); // Disambiguate with Read::chain + let mut buffer = Vec::new(); + buf.reader().read_to_end(&mut buffer).unwrap(); + assert_eq!(b"hello world", &buffer[..]); +} + +#[test] +fn buf_read() { + let buf1 = &b"hell"[..]; + let buf2 = &b"o\nworld"[..]; + let mut reader = BufExt::chain(buf1, buf2).reader(); + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + assert_eq!("hello\n", &line); + line.clear(); + reader.read_line(&mut line).unwrap(); + assert_eq!("world", &line); +} diff --git a/third_party/rust/bytes/tests/test_serde.rs b/third_party/rust/bytes/tests/test_serde.rs new file mode 100644 index 0000000000..18b135692b --- /dev/null +++ b/third_party/rust/bytes/tests/test_serde.rs @@ -0,0 +1,20 @@ +#![cfg(feature = "serde")] +#![deny(warnings, rust_2018_idioms)] + +use serde_test::{Token, assert_tokens}; + +#[test] +fn test_ser_de_empty() { + let b = bytes::Bytes::new(); + assert_tokens(&b, &[Token::Bytes(b"")]); + let b = bytes::BytesMut::with_capacity(0); + assert_tokens(&b, &[Token::Bytes(b"")]); +} + +#[test] +fn test_ser_de() { + let b = bytes::Bytes::from(&b"bytes"[..]); + assert_tokens(&b, &[Token::Bytes(b"bytes")]); + let b = bytes::BytesMut::from(&b"bytes"[..]); + assert_tokens(&b, &[Token::Bytes(b"bytes")]); +} diff --git a/third_party/rust/bytes/tests/test_take.rs b/third_party/rust/bytes/tests/test_take.rs new file mode 100644 index 0000000000..b9b525b1f8 --- /dev/null +++ b/third_party/rust/bytes/tests/test_take.rs @@ -0,0 +1,12 @@ +#![deny(warnings, rust_2018_idioms)] + +use bytes::buf::{Buf, BufExt}; + +#[test] +fn long_take() { + // Tests that get a take with a size greater than the buffer length will not + // overrun the buffer. Regression test for #138. + let buf = b"hello world".take(100); + assert_eq!(11, buf.remaining()); + assert_eq!(b"hello world", buf.bytes()); +} |