summaryrefslogtreecommitdiffstats
path: root/third_party/rust/scroll
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/scroll/.cargo-checksum.json1
-rw-r--r--third_party/rust/scroll/CHANGELOG.md17
-rw-r--r--third_party/rust/scroll/Cargo.lock205
-rw-r--r--third_party/rust/scroll/Cargo.toml36
-rw-r--r--third_party/rust/scroll/LICENSE21
-rw-r--r--third_party/rust/scroll/README.md193
-rw-r--r--third_party/rust/scroll/benches/bench.rs157
-rw-r--r--third_party/rust/scroll/examples/data_ctx.rs24
-rw-r--r--third_party/rust/scroll/src/ctx.rs897
-rw-r--r--third_party/rust/scroll/src/endian.rs51
-rw-r--r--third_party/rust/scroll/src/error.rs84
-rw-r--r--third_party/rust/scroll/src/greater.rs169
-rw-r--r--third_party/rust/scroll/src/leb128.rs249
-rw-r--r--third_party/rust/scroll/src/lesser.rs178
-rw-r--r--third_party/rust/scroll/src/lib.rs637
-rw-r--r--third_party/rust/scroll/src/pread.rs178
-rw-r--r--third_party/rust/scroll/src/pwrite.rs96
-rw-r--r--third_party/rust/scroll/tests/api.rs292
-rw-r--r--third_party/rust/scroll_derive/.cargo-checksum.json1
-rw-r--r--third_party/rust/scroll_derive/Cargo.toml53
-rw-r--r--third_party/rust/scroll_derive/LICENSE21
-rw-r--r--third_party/rust/scroll_derive/README.md35
-rw-r--r--third_party/rust/scroll_derive/src/lib.rs538
23 files changed, 4133 insertions, 0 deletions
diff --git a/third_party/rust/scroll/.cargo-checksum.json b/third_party/rust/scroll/.cargo-checksum.json
new file mode 100644
index 0000000000..406f97faa2
--- /dev/null
+++ b/third_party/rust/scroll/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"de2bbf4669561405d402322f4cc2604218d4986b73b75b41708b9505aebcb02c","Cargo.lock":"d6a215b7466d37e08551c56949e77be4ee488f989bdef3e507713c729bbda0e6","Cargo.toml":"c240c5768d23ea9611ef57308f08b8ee4372ede6c04f0783dc9fd1710e664c19","LICENSE":"6e24b7455f0b9afefdf4f3efd59a56ce76a3020c2dc4371937e281fc5e587fd7","README.md":"e4fe9aabcd87d85a5ec93241eeefc0d69aa0d98fbd67da2fe1849e4cbddac3ce","benches/bench.rs":"12ae02c383c91f1b0e11e9201eb8a9d44dadfb2b5987e7e71b0ef7c6589af1ca","examples/data_ctx.rs":"79684fc44d499d0b13a173184793837fbaba70d2f74f075e796eb37a1803ce3d","src/ctx.rs":"8f58672c5f3bc09b8f09c76f1d423431cbff786af75f5b39a0cef23b820d48c6","src/endian.rs":"5b717eb5ed0dc2b536779316b020df4e6489c05b13b4fd9b5f5e683aca1b2c28","src/error.rs":"a6a0ec9a6237d23febd608637c0e3926d147511e7983195366bc5a11f12d9093","src/greater.rs":"29d9736f9d35a0f92ca054c7a36878ade0a77b4e8ee27441c34cd81c6bdb68e6","src/leb128.rs":"e343f4e104ca6d8660a3dded30934b83bad4c04d8888ce2cbebfa562f5ac115d","src/lesser.rs":"d3028781977e60d67003512e45666935deab9a03c76a3ba9316a5dbdddf432eb","src/lib.rs":"49d02fa761bb2a771d1857ffd150aa4b6f55b4f03aee1a7a23d8181c76a55fd6","src/pread.rs":"64afdcf2c2785f1f23d065ec5e565d78569086dfd9ece0a3d2553b05aee5df9b","src/pwrite.rs":"05e3129ec666790a61f5b5f894ad863103e213eb798243cfe5f2cbb54d042ba1","tests/api.rs":"1bef345e020a6a4e590350ea4f6069c5836941656379e252bfbdaee6edbbc0de"},"package":"04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da"} \ No newline at end of file
diff --git a/third_party/rust/scroll/CHANGELOG.md b/third_party/rust/scroll/CHANGELOG.md
new file mode 100644
index 0000000000..bae87ee590
--- /dev/null
+++ b/third_party/rust/scroll/CHANGELOG.md
@@ -0,0 +1,17 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+Before 1.0, this project does not adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [0.10.0] - unreleased
+### Added
+ - scroll is now 2018 compliant, thanks @lzutao: https://github.com/m4b/scroll/pull/49
+ - scroll_derive now lives in scroll repo itself
+### Removed
+ - BREAKING: removed units/size generics in SizeWith, thanks @willglynn: https://github.com/m4b/scroll/pull/45
+
+## [0.9.1] - 2018-9-22
+### Added
+ - pread primitive references: https://github.com/m4b/scroll/pull/35
+ - u128/i128 support: https://github.com/m4b/scroll/pull/32
+ - CStr support: https://github.com/m4b/scroll/pull/30
diff --git a/third_party/rust/scroll/Cargo.lock b/third_party/rust/scroll/Cargo.lock
new file mode 100644
index 0000000000..baf29fe049
--- /dev/null
+++ b/third_party/rust/scroll/Cargo.lock
@@ -0,0 +1,205 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "byteorder"
+version = "1.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "const_fn"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6"
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
+dependencies = [
+ "cfg-if",
+ "const_fn",
+ "crossbeam-utils",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "lazy_static",
+]
+
+[[package]]
+name = "either"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929"
+
+[[package]]
+name = "memoffset"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
+dependencies = [
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "lazy_static",
+ "num_cpus",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "scroll"
+version = "0.11.0"
+dependencies = [
+ "byteorder",
+ "rayon",
+ "scroll_derive",
+]
+
+[[package]]
+name = "scroll_derive"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
diff --git a/third_party/rust/scroll/Cargo.toml b/third_party/rust/scroll/Cargo.toml
new file mode 100644
index 0000000000..548be72db9
--- /dev/null
+++ b/third_party/rust/scroll/Cargo.toml
@@ -0,0 +1,36 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+name = "scroll"
+version = "0.11.0"
+authors = ["m4b <m4b.github.io@gmail.com>", "Ted Mielczarek <ted@mielczarek.org>"]
+description = "A suite of powerful, extensible, generic, endian-aware Read/Write traits for byte buffers"
+documentation = "https://docs.rs/scroll"
+readme = "README.md"
+keywords = ["bytes", "endian", "immutable", "pread", "pwrite"]
+license = "MIT"
+repository = "https://github.com/m4b/scroll"
+resolver = "2"
+[dependencies.scroll_derive]
+version = "0.11"
+optional = true
+[dev-dependencies.byteorder]
+version = "1"
+
+[dev-dependencies.rayon]
+version = "1"
+
+[features]
+default = ["std"]
+derive = ["scroll_derive"]
+std = []
diff --git a/third_party/rust/scroll/LICENSE b/third_party/rust/scroll/LICENSE
new file mode 100644
index 0000000000..28e9a1f370
--- /dev/null
+++ b/third_party/rust/scroll/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) m4b 2016
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rust/scroll/README.md b/third_party/rust/scroll/README.md
new file mode 100644
index 0000000000..717fe6a234
--- /dev/null
+++ b/third_party/rust/scroll/README.md
@@ -0,0 +1,193 @@
+ [![Build Status](https://travis-ci.org/m4b/scroll.svg?branch=master)](https://travis-ci.org/m4b/scroll)
+## Scroll - cast some magic
+
+```text
+ _______________
+ ()==( (@==()
+ '______________'|
+ | |
+ | ἀρετή |
+ __)_____________|
+ ()==( (@==()
+ '--------------'
+
+```
+
+### Documentation
+
+https://docs.rs/scroll
+
+### Usage
+
+Add to your `Cargo.toml`
+
+```toml, no_test
+[dependencies]
+scroll = "0.10"
+```
+
+### Overview
+
+Scroll implements several traits for read/writing generic containers (byte buffers are currently implemented by default). Most familiar will likely be the `Pread` trait, which at its basic takes an immutable reference to self, an immutable offset to read at, (and a parsing context, more on that later), and then returns the deserialized value.
+
+Because self is immutable, _**all** reads can be performed in parallel_ and hence are trivially parallelizable.
+
+A simple example demonstrates its flexibility:
+
+```rust
+use scroll::{ctx, Pread, LE};
+
+fn main() -> Result<(), scroll::Error> {
+ let bytes: [u8; 4] = [0xde, 0xad, 0xbe, 0xef];
+
+ // reads a u32 out of `b` with the endianness of the host machine, at offset 0, turbofish-style
+ let number: u32 = bytes.pread::<u32>(0)?;
+ // ...or a byte, with type ascription on the binding.
+ let byte: u8 = bytes.pread(0)?;
+
+ //If the type is known another way by the compiler, say reading into a struct field, we can omit the turbofish, and type ascription altogether!
+
+ // If we want, we can explicitly add a endianness to read with by calling `pread_with`.
+ // The following reads a u32 out of `b` with Big Endian byte order, at offset 0
+ let be_number: u32 = bytes.pread_with(0, scroll::BE)?;
+ // or a u16 - specify the type either on the variable or with the beloved turbofish
+ let be_number2 = bytes.pread_with::<u16>(2, scroll::BE)?;
+
+ // Scroll has core friendly errors (no allocation). This will have the type `scroll::Error::BadOffset` because it tried to read beyond the bound
+ let byte: scroll::Result<i64> = bytes.pread(0);
+
+ // Scroll is extensible: as long as the type implements `TryWithCtx`, then you can read your type out of the byte array!
+
+ // We can parse out custom datatypes, or types with lifetimes
+ // if they implement the conversion trait `TryFromCtx`; here we parse a C-style \0 delimited &str (safely)
+ let hello: &[u8] = b"hello_world\0more words";
+ let hello_world: &str = hello.pread(0)?;
+ assert_eq!("hello_world", hello_world);
+
+ // ... and this parses the string if its space separated!
+ use scroll::ctx::*;
+ let spaces: &[u8] = b"hello world some junk";
+ let world: &str = spaces.pread_with(6, StrCtx::Delimiter(SPACE))?;
+ assert_eq!("world", world);
+ Ok(())
+}
+```
+
+### Deriving `Pread` and `Pwrite`
+
+Scroll implements a custom derive that can provide `Pread` and `Pwrite` implementations for your structs.
+
+```rust
+use scroll::{Pread, Pwrite, BE};
+
+#[derive(Pread, Pwrite)]
+struct Data {
+ one: u32,
+ two: u16,
+ three: u8,
+}
+
+fn main() -> Result<(), scroll::Error> {
+ let bytes: [u8; 7] = [0xde, 0xad, 0xbe, 0xef, 0xfa, 0xce, 0xff];
+ // Read a single `Data` at offset zero in big-endian byte order.
+ let data: Data = bytes.pread_with(0, BE)?;
+ assert_eq!(data.one, 0xdeadbeef);
+ assert_eq!(data.two, 0xface);
+ assert_eq!(data.three, 0xff);
+
+ // Write it back to a buffer
+ let mut out: [u8; 7] = [0; 7];
+ out.pwrite_with(data, 0, BE)?;
+ assert_eq!(bytes, out);
+ Ok(())
+}
+```
+
+This feature is **not** enabled by default, you must enable the `derive` feature in Cargo.toml to use it:
+
+```toml, no_test
+[dependencies]
+scroll = { version = "0.10", features = ["derive"] }
+```
+
+# `std::io` API
+
+Scroll can also read/write simple types from a `std::io::Read` or `std::io::Write` implementor. The built-in numeric types are taken care of for you. If you want to read a custom type, you need to implement the `FromCtx` (_how_ to parse) and `SizeWith` (_how_ big the parsed thing will be) traits. You must compile with default features. For example:
+
+```rust
+use std::io::Cursor;
+use scroll::IOread;
+
+fn main() -> Result<(), scroll::Error> {
+ let bytes_ = [0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0xef,0xbe,0x00,0x00,];
+ let mut bytes = Cursor::new(bytes_);
+
+ // this will bump the cursor's Seek
+ let foo = bytes.ioread::<usize>()?;
+ // ..ditto
+ let bar = bytes.ioread::<u32>()?;
+ Ok(())
+}
+```
+
+Similarly, we can write to anything that implements `std::io::Write` quite naturally:
+
+```rust
+use scroll::{IOwrite, LE, BE};
+use std::io::{Write, Cursor};
+
+fn main() -> Result<(), scroll::Error> {
+ let mut bytes = [0x0u8; 10];
+ let mut cursor = Cursor::new(&mut bytes[..]);
+ cursor.write_all(b"hello")?;
+ cursor.iowrite_with(0xdeadbeef as u32, BE)?;
+ assert_eq!(cursor.into_inner(), [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0xde, 0xad, 0xbe, 0xef, 0x0]);
+ Ok(())
+}
+```
+
+# Advanced Uses
+
+Scroll is designed to be highly configurable - it allows you to implement various context (`Ctx`) sensitive traits, which then grants the implementor _automatic_ uses of the `Pread` and/or `Pwrite` traits.
+
+For example, suppose we have a datatype and we want to specify how to parse or serialize this datatype out of some arbitrary
+byte buffer. In order to do this, we need to provide a [TryFromCtx](trait.TryFromCtx.html) impl for our datatype.
+
+In particular, if we do this for the `[u8]` target, using the convention `(usize, YourCtx)`, you will automatically get access to
+calling `pread_with::<YourDatatype>` on arrays of bytes.
+
+```rust
+use scroll::{ctx, Pread, BE, Endian};
+
+struct Data<'a> {
+ name: &'a str,
+ id: u32,
+}
+
+// note the lifetime specified here
+impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> {
+ type Error = scroll::Error;
+ // and the lifetime annotation on `&'a [u8]` here
+ fn try_from_ctx (src: &'a [u8], endian: Endian)
+ -> Result<(Self, usize), Self::Error> {
+ let offset = &mut 0;
+ let name = src.gread::<&str>(offset)?;
+ let id = src.gread_with(offset, endian)?;
+ Ok((Data { name: name, id: id }, *offset))
+ }
+}
+
+fn main() -> Result<(), scroll::Error> {
+ let bytes = b"UserName\x00\x01\x02\x03\x04";
+ let data = bytes.pread_with::<Data>(0, BE)?;
+ assert_eq!(data.id, 0x01020304);
+ assert_eq!(data.name.to_string(), "UserName".to_string());
+ Ok(())
+}
+```
+
+Please see the official documentation, or a simple [example](examples/data_ctx.rs) for more.
+
+# Contributing
+
+Any ideas, thoughts, or contributions are welcome!
diff --git a/third_party/rust/scroll/benches/bench.rs b/third_party/rust/scroll/benches/bench.rs
new file mode 100644
index 0000000000..0787dbe14b
--- /dev/null
+++ b/third_party/rust/scroll/benches/bench.rs
@@ -0,0 +1,157 @@
+#![feature(test)]
+extern crate test;
+
+use scroll::{Cread, Pread, LE};
+use test::black_box;
+
+#[bench]
+fn bench_parallel_cread_with(b: &mut test::Bencher) {
+ use rayon::prelude::*;
+ let vec = vec![0u8; 1_000_000];
+ let nums = vec![0usize; 500_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ nums.par_iter().for_each(|offset| {
+ let _: u16 = black_box(data.cread_with(*offset, LE));
+ });
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_cread_vec(b: &mut test::Bencher) {
+ let vec = vec![0u8; 1_000_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ for val in data.chunks(2) {
+ let _: u16 = black_box(val.cread_with(0, LE));
+ }
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_cread(b: &mut test::Bencher) {
+ const NITER: i32 = 100_000;
+ b.iter(|| {
+ for _ in 1..NITER {
+ let data = black_box([1, 2]);
+ let _: u16 = black_box(data.cread(0));
+ }
+ });
+ b.bytes = 2 * NITER as u64;
+}
+
+#[bench]
+fn bench_pread_ctx_vec(b: &mut test::Bencher) {
+ let vec = vec![0u8; 1_000_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ for val in data.chunks(2) {
+ let _: Result<u16, _> = black_box(val.pread(0));
+ }
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_pread_with_unwrap(b: &mut test::Bencher) {
+ const NITER: i32 = 100_000;
+ b.iter(|| {
+ for _ in 1..NITER {
+ let data: &[u8] = &black_box([1, 2]);
+ let _: u16 = black_box(data.pread_with(0, LE).unwrap());
+ }
+ });
+ b.bytes = 2 * NITER as u64;
+}
+
+#[bench]
+fn bench_pread_vec(b: &mut test::Bencher) {
+ let vec = vec![0u8; 1_000_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ for val in data.chunks(2) {
+ let _: Result<u16, _> = black_box(val.pread_with(0, LE));
+ }
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_pread_unwrap(b: &mut test::Bencher) {
+ const NITER: i32 = 100_000;
+ b.iter(|| {
+ for _ in 1..NITER {
+ let data = black_box([1, 2]);
+ let _: u16 = black_box(data.pread(0)).unwrap();
+ }
+ });
+ b.bytes = 2 * NITER as u64;
+}
+
+#[bench]
+fn bench_gread_vec(b: &mut test::Bencher) {
+ let vec = vec![0u8; 1_000_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ for val in data.chunks(2) {
+ let mut offset = 0;
+ let _: Result<u16, _> = black_box(val.gread(&mut offset));
+ }
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_gread_unwrap(b: &mut test::Bencher) {
+ const NITER: i32 = 100_000;
+ b.iter(|| {
+ for _ in 1..NITER {
+ let data = black_box([1, 2]);
+ let mut offset = 0;
+ let _: u16 = black_box(data.gread_with(&mut offset, LE).unwrap());
+ }
+ });
+ b.bytes = 2 * NITER as u64;
+}
+
+#[bench]
+fn bench_parallel_pread_with(b: &mut test::Bencher) {
+ use rayon::prelude::*;
+ let vec = vec![0u8; 1_000_000];
+ let nums = vec![0usize; 500_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ nums.par_iter().for_each(|offset| {
+ let _: Result<u16, _> = black_box(data.pread_with(*offset, LE));
+ });
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_byteorder_vec(b: &mut test::Bencher) {
+ use byteorder::ReadBytesExt;
+ let vec = vec![0u8; 1_000_000];
+ b.iter(|| {
+ let data = black_box(&vec[..]);
+ for mut val in data.chunks(2) {
+ let _: Result<u16, _> = black_box(val.read_u16::<byteorder::LittleEndian>());
+ }
+ });
+ b.bytes = vec.len() as u64;
+}
+
+#[bench]
+fn bench_byteorder(b: &mut test::Bencher) {
+ use byteorder::ByteOrder;
+ const NITER: i32 = 100_000;
+ b.iter(|| {
+ for _ in 1..NITER {
+ let data = black_box([1, 2]);
+ let _: u16 = black_box(byteorder::LittleEndian::read_u16(&data));
+ }
+ });
+ b.bytes = 2 * NITER as u64;
+}
diff --git a/third_party/rust/scroll/examples/data_ctx.rs b/third_party/rust/scroll/examples/data_ctx.rs
new file mode 100644
index 0000000000..667f4b18f0
--- /dev/null
+++ b/third_party/rust/scroll/examples/data_ctx.rs
@@ -0,0 +1,24 @@
+use scroll::{ctx, Endian, Pread, BE};
+
+#[derive(Debug)]
+struct Data<'a> {
+ name: &'a str,
+ id: u32,
+}
+
+impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> {
+ type Error = scroll::Error;
+ fn try_from_ctx(src: &'a [u8], endian: Endian) -> Result<(Self, usize), Self::Error> {
+ let name = src.pread::<&'a str>(0)?;
+ let id = src.pread_with(name.len() + 1, endian)?;
+ Ok((Data { name: name, id: id }, name.len() + 4))
+ }
+}
+
+fn main() {
+ let bytes = b"UserName\x00\x01\x02\x03\x04";
+ let data = bytes.pread_with::<Data>(0, BE).unwrap();
+ assert_eq!(data.id, 0x01020304);
+ assert_eq!(data.name.to_string(), "UserName".to_string());
+ println!("Data: {:?}", &data);
+}
diff --git a/third_party/rust/scroll/src/ctx.rs b/third_party/rust/scroll/src/ctx.rs
new file mode 100644
index 0000000000..1f982b82fa
--- /dev/null
+++ b/third_party/rust/scroll/src/ctx.rs
@@ -0,0 +1,897 @@
+//! Generic context-aware conversion traits, for automatic _downstream_ extension of `Pread`, et. al
+//!
+//! The context traits are arguably the center piece of the scroll crate. In simple terms they
+//! define how to actually read and write, respectively, a data type from a container, being able to
+//! take context into account.
+//!
+//! ### Reading
+//!
+//! Types implementing [TryFromCtx](trait.TryFromCtx.html) and it's infallible cousin [FromCtx](trait.FromCtx.html)
+//! allow a user of [Pread::pread](../trait.Pread.html#method.pread) or respectively
+//! [Cread::cread](../trait.Cread.html#method.cread) and
+//! [IOread::ioread](../trait.IOread.html#method.ioread) to read that data type from a data source one
+//! of the `*read` traits has been implemented for.
+//!
+//! Implementations of `TryFromCtx` specify a source (called `This`) and an `Error` type for failed
+//! reads. The source defines the kind of container the type can be read from, and defaults to
+//! `[u8]` for any type that implements `AsRef<[u8]>`.
+//!
+//! `FromCtx` is slightly more restricted; it requires the implementer to use `[u8]` as source and
+//! never fail, and thus does not have an `Error` type.
+//!
+//! Types chosen here are of relevance to `Pread` implementations; of course only a container which
+//! can produce a source of the type `This` can be used to read a `TryFromCtx` requiring it and the
+//! `Error` type returned in `Err` of `Pread::pread`'s Result.
+//!
+//! ### Writing
+//!
+//! [TryIntoCtx](trait.TryIntoCtx.html) and the infallible [IntoCtx](trait.IntoCtx.html) work
+//! similarly to the above traits, allowing [Pwrite::pwrite](../trait.Pwrite.html#method.pwrite) or
+//! respectively [Cwrite::cwrite](../trait.Cwrite.html#method.cwrite) and
+//! [IOwrite::iowrite](../trait.IOwrite.html#method.iowrite) to write data into a byte sink for
+//! which one of the `*write` traits has been implemented for.
+//!
+//! `IntoCtx` is similarly restricted as `FromCtx` is to `TryFromCtx`. And equally the types chosen
+//! affect usable `Pwrite` implementation.
+//!
+//! ### Context
+//!
+//! Each of the traits passes along a `Ctx` to the marshalling logic. This context type contains
+//! any additional information that may be required to successfully parse or write the data:
+//! Examples would be endianness to use, field lengths of a serialized struct, or delimiters to use
+//! when reading/writing `&str`. The context type can be any type but must derive
+//! [Copy](https://doc.rust-lang.org/std/marker/trait.Copy.html). In addition if you want to use
+//! the `*read`-methods instead of the `*read_with` ones you must also implement
+//! [default::Default](https://doc.rust-lang.org/std/default/trait.Default.html).
+//!
+//! # Example
+//!
+//! Let's expand on the [previous example](../index.html#complex-use-cases).
+//!
+//! ```rust
+//! use scroll::{self, ctx, Pread, Endian};
+//! use scroll::ctx::StrCtx;
+//!
+//! #[derive(Copy, Clone, PartialEq, Eq)]
+//! enum FieldSize {
+//! U32,
+//! U64
+//! }
+//!
+//! // Our custom context type. As said above it has to derive Copy.
+//! #[derive(Copy, Clone)]
+//! struct Context {
+//! fieldsize: FieldSize,
+//! endianess: Endian,
+//! }
+//!
+//! // Our custom data type
+//! struct Data<'b> {
+//! // These u64 are encoded either as 32-bit or 64-bit wide ints. Which one it is is defined in
+//! // the Context.
+//! // Also, let's imagine they have a strict relationship: A < B < C otherwise the struct is
+//! // invalid.
+//! field_a: u64,
+//! field_b: u64,
+//! field_c: u64,
+//!
+//! // Both of these are marshalled with a prefixed length.
+//! name: &'b str,
+//! value: &'b [u8],
+//! }
+//!
+//! #[derive(Debug)]
+//! enum Error {
+//! // We'll return this custom error if the field* relationship doesn't hold
+//! BadFieldMatchup,
+//! Scroll(scroll::Error),
+//! }
+//!
+//! impl<'a> ctx::TryFromCtx<'a, Context> for Data<'a> {
+//! type Error = Error;
+//!
+//! // Using the explicit lifetime specification again you ensure that read data doesn't outlife
+//! // its source buffer without having to resort to copying.
+//! fn try_from_ctx (src: &'a [u8], ctx: Context)
+//! // the `usize` returned here is the amount of bytes read.
+//! -> Result<(Self, usize), Self::Error>
+//! {
+//! // The offset counter; gread and gread_with increment a given counter automatically so we
+//! // don't have to manually care.
+//! let offset = &mut 0;
+//!
+//! let field_a;
+//! let field_b;
+//! let field_c;
+//!
+//! // Switch the amount of bytes read depending on the parsing context
+//! if ctx.fieldsize == FieldSize::U32 {
+//! field_a = src.gread_with::<u32>(offset, ctx.endianess)? as u64;
+//! field_b = src.gread_with::<u32>(offset, ctx.endianess)? as u64;
+//! field_c = src.gread_with::<u32>(offset, ctx.endianess)? as u64;
+//! } else {
+//! field_a = src.gread_with::<u64>(offset, ctx.endianess)?;
+//! field_b = src.gread_with::<u64>(offset, ctx.endianess)?;
+//! field_c = src.gread_with::<u64>(offset, ctx.endianess)?;
+//! }
+//!
+//! // You can use type ascribition or turbofish operators, whichever you prefer.
+//! let namelen = src.gread_with::<u16>(offset, ctx.endianess)? as usize;
+//! let name: &str = src.gread_with(offset, scroll::ctx::StrCtx::Length(namelen))?;
+//!
+//! let vallen = src.gread_with::<u16>(offset, ctx.endianess)? as usize;
+//! let value = &src[*offset..(*offset+vallen)];
+//!
+//! // Let's sanity check those fields, shall we?
+//! if ! (field_a < field_b && field_b < field_c) {
+//! return Err(Error::BadFieldMatchup);
+//! }
+//!
+//! Ok((Data { field_a, field_b, field_c, name, value }, *offset))
+//! }
+//! }
+//!
+//! // In lieu of a complex byte buffer we hearken back to the venerable &[u8]; do note however
+//! // that the implementation of TryFromCtx did not specify such. In fact any type that implements
+//! // Pread can now read `Data` as it implements TryFromCtx.
+//! let bytes = b"\x00\x02\x03\x04\x01\x02\x03\x04\xde\xad\xbe\xef\x00\x08UserName\x00\x02\xCA\xFE";
+//!
+//! // We define an appropiate context, and get going
+//! let contextA = Context {
+//! fieldsize: FieldSize::U32,
+//! endianess: Endian::Big,
+//! };
+//! let data: Data = bytes.pread_with(0, contextA).unwrap();
+//!
+//! assert_eq!(data.field_a, 0x00020304);
+//! assert_eq!(data.field_b, 0x01020304);
+//! assert_eq!(data.field_c, 0xdeadbeef);
+//! assert_eq!(data.name, "UserName");
+//! assert_eq!(data.value, [0xCA, 0xFE]);
+//!
+//! // Here we have a context with a different FieldSize, changing parsing information at runtime.
+//! let contextB = Context {
+//! fieldsize: FieldSize::U64,
+//! endianess: Endian::Big,
+//! };
+//!
+//! // Which will of course error with a malformed input for the context
+//! let err: Result<Data, Error> = bytes.pread_with(0, contextB);
+//! assert!(err.is_err());
+//!
+//! let bytes_long = [0x00,0x00,0x00,0x00,0x00,0x02,0x03,0x04,0x00,0x00,0x00,0x00,0x01,0x02,0x03,
+//! 0x04,0x00,0x00,0x00,0x00,0xde,0xad,0xbe,0xef,0x00,0x08,0x55,0x73,0x65,0x72,
+//! 0x4e,0x61,0x6d,0x65,0x00,0x02,0xCA,0xFE];
+//!
+//! let data: Data = bytes_long.pread_with(0, contextB).unwrap();
+//!
+//! assert_eq!(data.field_a, 0x00020304);
+//! assert_eq!(data.field_b, 0x01020304);
+//! assert_eq!(data.field_c, 0xdeadbeef);
+//! assert_eq!(data.name, "UserName");
+//! assert_eq!(data.value, [0xCA, 0xFE]);
+//!
+//! // Ergonomic conversion, not relevant really.
+//! use std::convert::From;
+//! impl From<scroll::Error> for Error {
+//! fn from(error: scroll::Error) -> Error {
+//! Error::Scroll(error)
+//! }
+//! }
+//! ```
+
+use core::mem::size_of;
+use core::mem::transmute;
+use core::ptr::copy_nonoverlapping;
+use core::result;
+use core::str;
+
+#[cfg(feature = "std")]
+use std::ffi::{CStr, CString};
+
+use crate::endian::Endian;
+use crate::error;
+
+/// A trait for measuring how large something is; for a byte sequence, it will be its length.
+pub trait MeasureWith<Ctx> {
+ /// How large is `Self`, given the `ctx`?
+ fn measure_with(&self, ctx: &Ctx) -> usize;
+}
+
+impl<Ctx> MeasureWith<Ctx> for [u8] {
+ #[inline]
+ fn measure_with(&self, _ctx: &Ctx) -> usize {
+ self.len()
+ }
+}
+
+impl<Ctx, T: AsRef<[u8]>> MeasureWith<Ctx> for T {
+ #[inline]
+ fn measure_with(&self, _ctx: &Ctx) -> usize {
+ self.as_ref().len()
+ }
+}
+
+/// The parsing context for converting a byte sequence to a `&str`
+///
+/// `StrCtx` specifies what byte delimiter to use, and defaults to C-style null terminators. Be careful.
+#[derive(Debug, Copy, Clone)]
+pub enum StrCtx {
+ Delimiter(u8),
+ DelimiterUntil(u8, usize),
+ Length(usize),
+}
+
+/// A C-style, null terminator based delimiter
+pub const NULL: u8 = 0;
+/// A space-based delimiter
+pub const SPACE: u8 = 0x20;
+/// A newline-based delimiter
+pub const RET: u8 = 0x0a;
+/// A tab-based delimiter
+pub const TAB: u8 = 0x09;
+
+impl Default for StrCtx {
+ #[inline]
+ fn default() -> Self {
+ StrCtx::Delimiter(NULL)
+ }
+}
+
+impl StrCtx {
+ pub fn len(&self) -> usize {
+ match *self {
+ StrCtx::Delimiter(_) | StrCtx::DelimiterUntil(_, _) => 1,
+ StrCtx::Length(_) => 0,
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ if let StrCtx::Length(_) = *self {
+ true
+ } else {
+ false
+ }
+ }
+}
+
+/// Reads `Self` from `This` using the context `Ctx`; must _not_ fail
+pub trait FromCtx<Ctx: Copy = (), This: ?Sized = [u8]> {
+ fn from_ctx(this: &This, ctx: Ctx) -> Self;
+}
+
+/// Tries to read `Self` from `This` using the context `Ctx`
+///
+/// # Implementing Your Own Reader
+/// If you want to implement your own reader for a type `Foo` from some kind of buffer (say
+/// `[u8]`), then you need to implement this trait
+///
+/// ```rust
+/// use scroll::{self, ctx, Pread};
+/// #[derive(Debug, PartialEq, Eq)]
+/// pub struct Foo(u16);
+///
+/// impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Foo {
+/// type Error = scroll::Error;
+/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, usize), Self::Error> {
+/// if this.len() < 2 { return Err((scroll::Error::Custom("whatever".to_string())).into()) }
+/// let n = this.pread_with(0, le)?;
+/// Ok((Foo(n), 2))
+/// }
+/// }
+///
+/// let bytes: [u8; 4] = [0xde, 0xad, 0, 0];
+/// let foo = bytes.pread_with::<Foo>(0, scroll::LE).unwrap();
+/// assert_eq!(Foo(0xadde), foo);
+///
+/// let foo2 = bytes.pread_with::<Foo>(0, scroll::BE).unwrap();
+/// assert_eq!(Foo(0xdeadu16), foo2);
+/// ```
+///
+/// # Advanced: Using Your Own Error in `TryFromCtx`
+/// ```rust
+/// use scroll::{self, ctx, Pread};
+/// use std::error;
+/// use std::fmt::{self, Display};
+/// // make some kind of normal error which also can transformed from a scroll error
+/// #[derive(Debug)]
+/// pub struct ExternalError {}
+///
+/// impl Display for ExternalError {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// write!(fmt, "ExternalError")
+/// }
+/// }
+///
+/// impl error::Error for ExternalError {
+/// fn description(&self) -> &str {
+/// "ExternalError"
+/// }
+/// fn cause(&self) -> Option<&dyn error::Error> { None}
+/// }
+///
+/// impl From<scroll::Error> for ExternalError {
+/// fn from(err: scroll::Error) -> Self {
+/// match err {
+/// _ => ExternalError{},
+/// }
+/// }
+/// }
+/// #[derive(Debug, PartialEq, Eq)]
+/// pub struct Foo(u16);
+///
+/// impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Foo {
+/// type Error = ExternalError;
+/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, usize), Self::Error> {
+/// if this.len() <= 2 { return Err((ExternalError {}).into()) }
+/// let offset = &mut 0;
+/// let n = this.gread_with(offset, le)?;
+/// Ok((Foo(n), *offset))
+/// }
+/// }
+///
+/// let bytes: [u8; 4] = [0xde, 0xad, 0, 0];
+/// let foo: Result<Foo, ExternalError> = bytes.pread(0);
+/// ```
+pub trait TryFromCtx<'a, Ctx: Copy = (), This: ?Sized = [u8]>
+where
+ Self: 'a + Sized,
+{
+ type Error;
+ fn try_from_ctx(from: &'a This, ctx: Ctx) -> Result<(Self, usize), Self::Error>;
+}
+
+/// Writes `Self` into `This` using the context `Ctx`
+pub trait IntoCtx<Ctx: Copy = (), This: ?Sized = [u8]>: Sized {
+ fn into_ctx(self, _: &mut This, ctx: Ctx);
+}
+
+/// Tries to write `Self` into `This` using the context `Ctx`
+/// To implement writing into an arbitrary byte buffer, implement `TryIntoCtx`
+/// # Example
+/// ```rust
+/// use scroll::{self, ctx, LE, Endian, Pwrite};
+/// #[derive(Debug, PartialEq, Eq)]
+/// pub struct Foo(u16);
+///
+/// // this will use the default `DefaultCtx = scroll::Endian`
+/// impl ctx::TryIntoCtx<Endian> for Foo {
+/// // you can use your own error here too, but you will then need to specify it in fn generic parameters
+/// type Error = scroll::Error;
+/// // you can write using your own context type, see `leb128.rs`
+/// fn try_into_ctx(self, this: &mut [u8], le: Endian) -> Result<usize, Self::Error> {
+/// if this.len() < 2 { return Err((scroll::Error::Custom("whatever".to_string())).into()) }
+/// this.pwrite_with(self.0, 0, le)?;
+/// Ok(2)
+/// }
+/// }
+/// // now we can write a `Foo` into some buffer (in this case, a byte buffer, because that's what we implemented it for above)
+///
+/// let mut bytes: [u8; 4] = [0, 0, 0, 0];
+/// bytes.pwrite_with(Foo(0x7f), 1, LE).unwrap();
+/// ```
+pub trait TryIntoCtx<Ctx: Copy = (), This: ?Sized = [u8]>: Sized {
+ type Error;
+ fn try_into_ctx(self, _: &mut This, ctx: Ctx) -> Result<usize, Self::Error>;
+}
+
+/// Gets the size of `Self` with a `Ctx`, and in `Self::Units`. Implementors can then call `Gread` related functions
+///
+/// The rationale behind this trait is to:
+///
+/// 1. Prevent `gread` from being used, and the offset being modified based on simply the sizeof the value, which can be a misnomer, e.g., for Leb128, etc.
+/// 2. Allow a context based size, which is useful for 32/64 bit variants for various containers, etc.
+pub trait SizeWith<Ctx = ()> {
+ fn size_with(ctx: &Ctx) -> usize;
+}
+
+#[rustfmt::skip]
+macro_rules! signed_to_unsigned {
+ (i8) => {u8 };
+ (u8) => {u8 };
+ (i16) => {u16};
+ (u16) => {u16};
+ (i32) => {u32};
+ (u32) => {u32};
+ (i64) => {u64};
+ (u64) => {u64};
+ (i128) => {u128};
+ (u128) => {u128};
+ (f32) => {u32};
+ (f64) => {u64};
+}
+
+macro_rules! write_into {
+ ($typ:ty, $size:expr, $n:expr, $dst:expr, $endian:expr) => {{
+ unsafe {
+ assert!($dst.len() >= $size);
+ let bytes = transmute::<$typ, [u8; $size]>(if $endian.is_little() {
+ $n.to_le()
+ } else {
+ $n.to_be()
+ });
+ copy_nonoverlapping((&bytes).as_ptr(), $dst.as_mut_ptr(), $size);
+ }
+ }};
+}
+
+macro_rules! into_ctx_impl {
+ ($typ:tt, $size:expr) => {
+ impl IntoCtx<Endian> for $typ {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], le: Endian) {
+ assert!(dst.len() >= $size);
+ write_into!($typ, $size, self, dst, le);
+ }
+ }
+ impl<'a> IntoCtx<Endian> for &'a $typ {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], le: Endian) {
+ (*self).into_ctx(dst, le)
+ }
+ }
+ impl TryIntoCtx<Endian> for $typ
+ where
+ $typ: IntoCtx<Endian>,
+ {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result<usize> {
+ if $size > dst.len() {
+ Err(error::Error::TooBig {
+ size: $size,
+ len: dst.len(),
+ })
+ } else {
+ <$typ as IntoCtx<Endian>>::into_ctx(self, dst, le);
+ Ok($size)
+ }
+ }
+ }
+ impl<'a> TryIntoCtx<Endian> for &'a $typ {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result<usize> {
+ (*self).try_into_ctx(dst, le)
+ }
+ }
+ };
+}
+
+macro_rules! from_ctx_impl {
+ ($typ:tt, $size:expr) => {
+ impl<'a> FromCtx<Endian> for $typ {
+ #[inline]
+ fn from_ctx(src: &[u8], le: Endian) -> Self {
+ assert!(src.len() >= $size);
+ let mut data: signed_to_unsigned!($typ) = 0;
+ unsafe {
+ copy_nonoverlapping(
+ src.as_ptr(),
+ &mut data as *mut signed_to_unsigned!($typ) as *mut u8,
+ $size,
+ );
+ }
+ (if le.is_little() {
+ data.to_le()
+ } else {
+ data.to_be()
+ }) as $typ
+ }
+ }
+
+ impl<'a> TryFromCtx<'a, Endian> for $typ
+ where
+ $typ: FromCtx<Endian>,
+ {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(
+ src: &'a [u8],
+ le: Endian,
+ ) -> result::Result<(Self, usize), Self::Error> {
+ if $size > src.len() {
+ Err(error::Error::TooBig {
+ size: $size,
+ len: src.len(),
+ })
+ } else {
+ Ok((FromCtx::from_ctx(&src, le), $size))
+ }
+ }
+ }
+ // as ref
+ impl<'a, T> FromCtx<Endian, T> for $typ
+ where
+ T: AsRef<[u8]>,
+ {
+ #[inline]
+ fn from_ctx(src: &T, le: Endian) -> Self {
+ let src = src.as_ref();
+ assert!(src.len() >= $size);
+ let mut data: signed_to_unsigned!($typ) = 0;
+ unsafe {
+ copy_nonoverlapping(
+ src.as_ptr(),
+ &mut data as *mut signed_to_unsigned!($typ) as *mut u8,
+ $size,
+ );
+ }
+ (if le.is_little() {
+ data.to_le()
+ } else {
+ data.to_be()
+ }) as $typ
+ }
+ }
+
+ impl<'a, T> TryFromCtx<'a, Endian, T> for $typ
+ where
+ $typ: FromCtx<Endian, T>,
+ T: AsRef<[u8]>,
+ {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a T, le: Endian) -> result::Result<(Self, usize), Self::Error> {
+ let src = src.as_ref();
+ Self::try_from_ctx(src, le)
+ }
+ }
+ };
+}
+
+macro_rules! ctx_impl {
+ ($typ:tt, $size:expr) => {
+ from_ctx_impl!($typ, $size);
+ };
+}
+
+ctx_impl!(u8, 1);
+ctx_impl!(i8, 1);
+ctx_impl!(u16, 2);
+ctx_impl!(i16, 2);
+ctx_impl!(u32, 4);
+ctx_impl!(i32, 4);
+ctx_impl!(u64, 8);
+ctx_impl!(i64, 8);
+ctx_impl!(u128, 16);
+ctx_impl!(i128, 16);
+
+macro_rules! from_ctx_float_impl {
+ ($typ:tt, $size:expr) => {
+ impl<'a> FromCtx<Endian> for $typ {
+ #[inline]
+ fn from_ctx(src: &[u8], le: Endian) -> Self {
+ assert!(src.len() >= ::core::mem::size_of::<Self>());
+ let mut data: signed_to_unsigned!($typ) = 0;
+ unsafe {
+ copy_nonoverlapping(
+ src.as_ptr(),
+ &mut data as *mut signed_to_unsigned!($typ) as *mut u8,
+ $size,
+ );
+ transmute(if le.is_little() {
+ data.to_le()
+ } else {
+ data.to_be()
+ })
+ }
+ }
+ }
+ impl<'a> TryFromCtx<'a, Endian> for $typ
+ where
+ $typ: FromCtx<Endian>,
+ {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(
+ src: &'a [u8],
+ le: Endian,
+ ) -> result::Result<(Self, usize), Self::Error> {
+ if $size > src.len() {
+ Err(error::Error::TooBig {
+ size: $size,
+ len: src.len(),
+ })
+ } else {
+ Ok((FromCtx::from_ctx(src, le), $size))
+ }
+ }
+ }
+ };
+}
+
+from_ctx_float_impl!(f32, 4);
+from_ctx_float_impl!(f64, 8);
+
+into_ctx_impl!(u8, 1);
+into_ctx_impl!(i8, 1);
+into_ctx_impl!(u16, 2);
+into_ctx_impl!(i16, 2);
+into_ctx_impl!(u32, 4);
+into_ctx_impl!(i32, 4);
+into_ctx_impl!(u64, 8);
+into_ctx_impl!(i64, 8);
+into_ctx_impl!(u128, 16);
+into_ctx_impl!(i128, 16);
+
+macro_rules! into_ctx_float_impl {
+ ($typ:tt, $size:expr) => {
+ impl IntoCtx<Endian> for $typ {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], le: Endian) {
+ assert!(dst.len() >= $size);
+ write_into!(
+ signed_to_unsigned!($typ),
+ $size,
+ transmute::<$typ, signed_to_unsigned!($typ)>(self),
+ dst,
+ le
+ );
+ }
+ }
+ impl<'a> IntoCtx<Endian> for &'a $typ {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], le: Endian) {
+ (*self).into_ctx(dst, le)
+ }
+ }
+ impl TryIntoCtx<Endian> for $typ
+ where
+ $typ: IntoCtx<Endian>,
+ {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result<usize> {
+ if $size > dst.len() {
+ Err(error::Error::TooBig {
+ size: $size,
+ len: dst.len(),
+ })
+ } else {
+ <$typ as IntoCtx<Endian>>::into_ctx(self, dst, le);
+ Ok($size)
+ }
+ }
+ }
+ impl<'a> TryIntoCtx<Endian> for &'a $typ {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result<usize> {
+ (*self).try_into_ctx(dst, le)
+ }
+ }
+ };
+}
+
+into_ctx_float_impl!(f32, 4);
+into_ctx_float_impl!(f64, 8);
+
+impl<'a> TryFromCtx<'a, StrCtx> for &'a str {
+ type Error = error::Error;
+ #[inline]
+ /// Read a `&str` from `src` using `delimiter`
+ fn try_from_ctx(src: &'a [u8], ctx: StrCtx) -> Result<(Self, usize), Self::Error> {
+ let len = match ctx {
+ StrCtx::Length(len) => len,
+ StrCtx::Delimiter(delimiter) => src.iter().take_while(|c| **c != delimiter).count(),
+ StrCtx::DelimiterUntil(delimiter, len) => {
+ if len > src.len() {
+ return Err(error::Error::TooBig {
+ size: len,
+ len: src.len(),
+ });
+ };
+ src.iter()
+ .take_while(|c| **c != delimiter)
+ .take(len)
+ .count()
+ }
+ };
+
+ if len > src.len() {
+ return Err(error::Error::TooBig {
+ size: len,
+ len: src.len(),
+ });
+ };
+
+ match str::from_utf8(&src[..len]) {
+ Ok(res) => Ok((res, len + ctx.len())),
+ Err(_) => Err(error::Error::BadInput {
+ size: src.len(),
+ msg: "invalid utf8",
+ }),
+ }
+ }
+}
+
+impl<'a, T> TryFromCtx<'a, StrCtx, T> for &'a str
+where
+ T: AsRef<[u8]>,
+{
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a T, ctx: StrCtx) -> result::Result<(Self, usize), Self::Error> {
+ let src = src.as_ref();
+ TryFromCtx::try_from_ctx(src, ctx)
+ }
+}
+
+impl<'a> TryIntoCtx for &'a [u8] {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result<usize> {
+ let src_len = self.len() as isize;
+ let dst_len = dst.len() as isize;
+ // if src_len < 0 || dst_len < 0 || offset < 0 {
+ // return Err(error::Error::BadOffset(format!("requested operation has negative casts: src len: {} dst len: {} offset: {}", src_len, dst_len, offset)).into())
+ // }
+ if src_len > dst_len {
+ Err(error::Error::TooBig {
+ size: self.len(),
+ len: dst.len(),
+ })
+ } else {
+ unsafe { copy_nonoverlapping(self.as_ptr(), dst.as_mut_ptr(), src_len as usize) };
+ Ok(self.len())
+ }
+ }
+}
+
+// TODO: make TryIntoCtx use StrCtx for awesomeness
+impl<'a> TryIntoCtx for &'a str {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result<usize> {
+ let bytes = self.as_bytes();
+ TryIntoCtx::try_into_ctx(bytes, dst, ())
+ }
+}
+
+// TODO: we can make this compile time without size_of call, but compiler probably does that anyway
+macro_rules! sizeof_impl {
+ ($ty:ty) => {
+ impl SizeWith<Endian> for $ty {
+ #[inline]
+ fn size_with(_ctx: &Endian) -> usize {
+ size_of::<$ty>()
+ }
+ }
+ };
+}
+
+sizeof_impl!(u8);
+sizeof_impl!(i8);
+sizeof_impl!(u16);
+sizeof_impl!(i16);
+sizeof_impl!(u32);
+sizeof_impl!(i32);
+sizeof_impl!(u64);
+sizeof_impl!(i64);
+sizeof_impl!(u128);
+sizeof_impl!(i128);
+sizeof_impl!(f32);
+sizeof_impl!(f64);
+
+impl<'a> TryFromCtx<'a, usize> for &'a [u8] {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], size: usize) -> result::Result<(Self, usize), Self::Error> {
+ if size > src.len() {
+ Err(error::Error::TooBig {
+ size,
+ len: src.len(),
+ })
+ } else {
+ Ok((&src[..size], size))
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'a> TryFromCtx<'a> for &'a CStr {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> {
+ let null_byte = match src.iter().position(|b| *b == 0) {
+ Some(ix) => ix,
+ None => {
+ return Err(error::Error::BadInput {
+ size: 0,
+ msg: "The input doesn't contain a null byte",
+ })
+ }
+ };
+
+ let cstr = unsafe { CStr::from_bytes_with_nul_unchecked(&src[..=null_byte]) };
+ Ok((cstr, null_byte + 1))
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'a> TryFromCtx<'a> for CString {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> {
+ let (raw, bytes_read) = <&CStr as TryFromCtx>::try_from_ctx(src, _ctx)?;
+ Ok((raw.to_owned(), bytes_read))
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'a> TryIntoCtx for &'a CStr {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result<usize> {
+ let data = self.to_bytes_with_nul();
+
+ if dst.len() < data.len() {
+ Err(error::Error::TooBig {
+ size: dst.len(),
+ len: data.len(),
+ })
+ } else {
+ unsafe {
+ copy_nonoverlapping(data.as_ptr(), dst.as_mut_ptr(), data.len());
+ }
+
+ Ok(data.len())
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl TryIntoCtx for CString {
+ type Error = error::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result<usize> {
+ self.as_c_str().try_into_ctx(dst, ())
+ }
+}
+
+// example of marshalling to bytes, let's wait until const is an option
+// impl FromCtx for [u8; 10] {
+// fn from_ctx(bytes: &[u8], _ctx: Endian) -> Self {
+// let mut dst: Self = [0; 10];
+// assert!(bytes.len() >= dst.len());
+// unsafe {
+// copy_nonoverlapping(bytes.as_ptr(), dst.as_mut_ptr(), dst.len());
+// }
+// dst
+// }
+// }
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn parse_a_cstr() {
+ let src = CString::new("Hello World").unwrap();
+ let as_bytes = src.as_bytes_with_nul();
+
+ let (got, bytes_read) = <&CStr as TryFromCtx>::try_from_ctx(as_bytes, ()).unwrap();
+
+ assert_eq!(bytes_read, as_bytes.len());
+ assert_eq!(got, src.as_c_str());
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn round_trip_a_c_str() {
+ let src = CString::new("Hello World").unwrap();
+ let src = src.as_c_str();
+ let as_bytes = src.to_bytes_with_nul();
+
+ let mut buffer = vec![0; as_bytes.len()];
+ let bytes_written = src.try_into_ctx(&mut buffer, ()).unwrap();
+ assert_eq!(bytes_written, as_bytes.len());
+
+ let (got, bytes_read) = <&CStr as TryFromCtx>::try_from_ctx(&buffer, ()).unwrap();
+
+ assert_eq!(bytes_read, as_bytes.len());
+ assert_eq!(got, src);
+ }
+}
diff --git a/third_party/rust/scroll/src/endian.rs b/third_party/rust/scroll/src/endian.rs
new file mode 100644
index 0000000000..06d7a1dc1c
--- /dev/null
+++ b/third_party/rust/scroll/src/endian.rs
@@ -0,0 +1,51 @@
+#[derive(PartialEq, Eq, Copy, Debug, Clone)]
+/// The endianness (byte order) of a stream of bytes
+pub enum Endian {
+ Little,
+ Big,
+}
+
+/// Little Endian byte order context
+pub const LE: Endian = Endian::Little;
+/// Big Endian byte order context
+pub const BE: Endian = Endian::Big;
+/// Network byte order context
+pub const NETWORK: Endian = Endian::Big;
+#[cfg(target_endian = "little")]
+/// The machine's native byte order
+pub const NATIVE: Endian = LE;
+#[cfg(target_endian = "big")]
+/// The machine's native byte order
+pub const NATIVE: Endian = BE;
+
+impl Default for Endian {
+ #[inline]
+ fn default() -> Self {
+ NATIVE
+ }
+}
+
+impl From<bool> for Endian {
+ #[inline]
+ fn from(little_endian: bool) -> Self {
+ if little_endian {
+ LE
+ } else {
+ BE
+ }
+ }
+}
+
+impl Endian {
+ #[inline]
+ pub fn network() -> Endian {
+ NETWORK
+ }
+ #[inline]
+ pub fn is_little(&self) -> bool {
+ match *self {
+ LE => true,
+ _ => false,
+ }
+ }
+}
diff --git a/third_party/rust/scroll/src/error.rs b/third_party/rust/scroll/src/error.rs
new file mode 100644
index 0000000000..7740254774
--- /dev/null
+++ b/third_party/rust/scroll/src/error.rs
@@ -0,0 +1,84 @@
+use core::fmt::{self, Display};
+use core::result;
+
+#[cfg(feature = "std")]
+use std::error;
+#[cfg(feature = "std")]
+use std::io;
+
+#[derive(Debug)]
+/// A custom Scroll error
+pub enum Error {
+ /// The type you tried to read was too big
+ TooBig {
+ size: usize,
+ len: usize,
+ },
+ /// The requested offset to read/write at is invalid
+ BadOffset(usize),
+ BadInput {
+ size: usize,
+ msg: &'static str,
+ },
+ #[cfg(feature = "std")]
+ /// A custom Scroll error for reporting messages to clients
+ Custom(String),
+ #[cfg(feature = "std")]
+ /// Returned when IO based errors are encountered
+ IO(io::Error),
+}
+
+#[cfg(feature = "std")]
+impl error::Error for Error {
+ fn description(&self) -> &str {
+ match *self {
+ Error::TooBig { .. } => "TooBig",
+ Error::BadOffset(_) => "BadOffset",
+ Error::BadInput { .. } => "BadInput",
+ Error::Custom(_) => "Custom",
+ Error::IO(_) => "IO",
+ }
+ }
+ fn cause(&self) -> Option<&dyn error::Error> {
+ match *self {
+ Error::TooBig { .. } => None,
+ Error::BadOffset(_) => None,
+ Error::BadInput { .. } => None,
+ Error::Custom(_) => None,
+ Error::IO(ref io) => io.source(),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl From<io::Error> for Error {
+ fn from(err: io::Error) -> Error {
+ Error::IO(err)
+ }
+}
+
+impl Display for Error {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ Error::TooBig { ref size, ref len } => {
+ write!(fmt, "type is too big ({}) for {}", size, len)
+ }
+ Error::BadOffset(ref offset) => {
+ write!(fmt, "bad offset {}", offset)
+ }
+ Error::BadInput { ref msg, ref size } => {
+ write!(fmt, "bad input {} ({})", msg, size)
+ }
+ #[cfg(feature = "std")]
+ Error::Custom(ref msg) => {
+ write!(fmt, "{}", msg)
+ }
+ #[cfg(feature = "std")]
+ Error::IO(ref err) => {
+ write!(fmt, "{}", err)
+ }
+ }
+ }
+}
+
+pub type Result<T> = result::Result<T, Error>;
diff --git a/third_party/rust/scroll/src/greater.rs b/third_party/rust/scroll/src/greater.rs
new file mode 100644
index 0000000000..353aab7c4c
--- /dev/null
+++ b/third_party/rust/scroll/src/greater.rs
@@ -0,0 +1,169 @@
+use core::ops::{Index, IndexMut, RangeFrom};
+
+use crate::ctx::{FromCtx, IntoCtx};
+
+/// Core-read - core, no_std friendly trait for reading basic traits from byte buffers. Cannot fail
+/// unless the buffer is too small, in which case an assert fires and the program panics.
+///
+/// If your type implements [FromCtx](ctx/trait.FromCtx.html) then you can `cread::<YourType>(offset)`.
+///
+/// # Example
+///
+/// ```rust
+/// use scroll::{ctx, Cread, LE};
+///
+/// #[repr(packed)]
+/// struct Bar {
+/// foo: i32,
+/// bar: u32,
+/// }
+///
+/// impl ctx::FromCtx<scroll::Endian> for Bar {
+/// fn from_ctx(bytes: &[u8], ctx: scroll::Endian) -> Self {
+/// use scroll::Cread;
+/// Bar { foo: bytes.cread_with(0, ctx), bar: bytes.cread_with(4, ctx) }
+/// }
+/// }
+///
+/// let bytes = [0xff, 0xff, 0xff, 0xff, 0xef,0xbe,0xad,0xde,];
+/// let bar = bytes.cread_with::<Bar>(0, LE);
+/// // Remember that you need to copy out fields from packed structs
+/// // with a `{}` block instead of borrowing them directly
+/// // ref: https://github.com/rust-lang/rust/issues/46043
+/// assert_eq!({bar.foo}, -1);
+/// assert_eq!({bar.bar}, 0xdeadbeef);
+/// ```
+pub trait Cread<Ctx, I = usize>: Index<I> + Index<RangeFrom<I>>
+where
+ Ctx: Copy,
+{
+ /// Reads a value from `Self` at `offset` with `ctx`. Cannot fail.
+ /// If the buffer is too small for the value requested, this will panic.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// use scroll::{Cread, BE, LE};
+ /// use std::i64::MAX;
+ ///
+ /// let bytes = [0x7f,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xef,0xbe,0xad,0xde,];
+ /// let foo = bytes.cread_with::<i64>(0, BE);
+ /// let bar = bytes.cread_with::<u32>(8, LE);
+ /// assert_eq!(foo, MAX);
+ /// assert_eq!(bar, 0xdeadbeef);
+ /// ```
+ #[inline]
+ fn cread_with<N: FromCtx<Ctx, <Self as Index<RangeFrom<I>>>::Output>>(
+ &self,
+ offset: I,
+ ctx: Ctx,
+ ) -> N {
+ N::from_ctx(&self[offset..], ctx)
+ }
+ /// Reads a value implementing `FromCtx` from `Self` at `offset`,
+ /// with the **target machine**'s endianness.
+ /// For the primitive types, this will be the **target machine**'s endianness.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// use scroll::Cread;
+ ///
+ /// let bytes = [0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0xef,0xbe,0x00,0x00,];
+ /// let foo = bytes.cread::<i64>(0);
+ /// let bar = bytes.cread::<u32>(8);
+ /// #[cfg(target_endian = "little")]
+ /// assert_eq!(foo, 1);
+ /// #[cfg(target_endian = "big")]
+ /// assert_eq!(foo, 0x100_0000_0000_0000);
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// assert_eq!(bar, 0xbeef);
+ /// #[cfg(target_endian = "big")]
+ /// assert_eq!(bar, 0xefbe0000);
+ /// ```
+ #[inline]
+ fn cread<N: FromCtx<Ctx, <Self as Index<RangeFrom<I>>>::Output>>(&self, offset: I) -> N
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ N::from_ctx(&self[offset..], ctx)
+ }
+}
+
+impl<Ctx: Copy, I, R: ?Sized + Index<I> + Index<RangeFrom<I>>> Cread<Ctx, I> for R {}
+
+/// Core-write - core, no_std friendly trait for writing basic types into byte buffers. Cannot fail
+/// unless the buffer is too small, in which case an assert fires and the program panics.
+/// Similar to [Cread](trait.Cread.html), if your type implements [IntoCtx](ctx/trait.IntoCtx.html)
+/// then you can `cwrite(your_type, offset)`.
+///
+/// # Example
+///
+/// ```rust
+/// use scroll::{ctx, Cwrite};
+///
+/// #[repr(packed)]
+/// struct Bar {
+/// foo: i32,
+/// bar: u32,
+/// }
+///
+/// impl ctx::IntoCtx<scroll::Endian> for Bar {
+/// fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
+/// use scroll::Cwrite;
+/// bytes.cwrite_with(self.foo, 0, ctx);
+/// bytes.cwrite_with(self.bar, 4, ctx);
+/// }
+/// }
+///
+/// let bar = Bar { foo: -1, bar: 0xdeadbeef };
+/// let mut bytes = [0x0; 16];
+/// bytes.cwrite::<Bar>(bar, 0);
+/// ```
+pub trait Cwrite<Ctx: Copy, I = usize>: Index<I> + IndexMut<RangeFrom<I>> {
+ /// Writes `n` into `Self` at `offset`; uses default context.
+ /// For the primitive types, this will be the **target machine**'s endianness.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use scroll::{Cwrite, Cread};
+ /// let mut bytes = [0x0; 16];
+ /// bytes.cwrite::<i64>(42, 0);
+ /// bytes.cwrite::<u32>(0xdeadbeef, 8);
+ ///
+ /// assert_eq!(bytes.cread::<i64>(0), 42);
+ /// assert_eq!(bytes.cread::<u32>(8), 0xdeadbeef);
+ #[inline]
+ fn cwrite<N: IntoCtx<Ctx, <Self as Index<RangeFrom<I>>>::Output>>(&mut self, n: N, offset: I)
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ n.into_ctx(self.index_mut(offset..), ctx)
+ }
+ /// Writes `n` into `Self` at `offset` with `ctx`
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use scroll::{Cwrite, Cread, LE, BE};
+ /// let mut bytes = [0x0; 0x10];
+ /// bytes.cwrite_with::<i64>(42, 0, LE);
+ /// bytes.cwrite_with::<u32>(0xdeadbeef, 8, BE);
+ /// assert_eq!(bytes.cread_with::<i64>(0, LE), 42);
+ /// assert_eq!(bytes.cread_with::<u32>(8, LE), 0xefbeadde);
+ #[inline]
+ fn cwrite_with<N: IntoCtx<Ctx, <Self as Index<RangeFrom<I>>>::Output>>(
+ &mut self,
+ n: N,
+ offset: I,
+ ctx: Ctx,
+ ) {
+ n.into_ctx(self.index_mut(offset..), ctx)
+ }
+}
+
+impl<Ctx: Copy, I, W: ?Sized + Index<I> + IndexMut<RangeFrom<I>>> Cwrite<Ctx, I> for W {}
diff --git a/third_party/rust/scroll/src/leb128.rs b/third_party/rust/scroll/src/leb128.rs
new file mode 100644
index 0000000000..43f50b95f1
--- /dev/null
+++ b/third_party/rust/scroll/src/leb128.rs
@@ -0,0 +1,249 @@
+use crate::ctx::TryFromCtx;
+use crate::error;
+use crate::Pread;
+use core::convert::{AsRef, From};
+use core::result;
+use core::u8;
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+/// An unsigned leb128 integer
+pub struct Uleb128 {
+ value: u64,
+ count: usize,
+}
+
+impl Uleb128 {
+ #[inline]
+ /// Return how many bytes this Uleb128 takes up in memory
+ pub fn size(&self) -> usize {
+ self.count
+ }
+ #[inline]
+ /// Read a variable length u64 from `bytes` at `offset`
+ pub fn read(bytes: &[u8], offset: &mut usize) -> error::Result<u64> {
+ let tmp = bytes.pread::<Uleb128>(*offset)?;
+ *offset += tmp.size();
+ Ok(tmp.into())
+ }
+}
+
+impl AsRef<u64> for Uleb128 {
+ fn as_ref(&self) -> &u64 {
+ &self.value
+ }
+}
+
+impl From<Uleb128> for u64 {
+ #[inline]
+ fn from(uleb128: Uleb128) -> u64 {
+ uleb128.value
+ }
+}
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+/// An signed leb128 integer
+pub struct Sleb128 {
+ value: i64,
+ count: usize,
+}
+
+impl Sleb128 {
+ #[inline]
+ /// Return how many bytes this Sleb128 takes up in memory
+ pub fn size(&self) -> usize {
+ self.count
+ }
+ #[inline]
+ /// Read a variable length i64 from `bytes` at `offset`
+ pub fn read(bytes: &[u8], offset: &mut usize) -> error::Result<i64> {
+ let tmp = bytes.pread::<Sleb128>(*offset)?;
+ *offset += tmp.size();
+ Ok(tmp.into())
+ }
+}
+
+impl AsRef<i64> for Sleb128 {
+ fn as_ref(&self) -> &i64 {
+ &self.value
+ }
+}
+
+impl From<Sleb128> for i64 {
+ #[inline]
+ fn from(sleb128: Sleb128) -> i64 {
+ sleb128.value
+ }
+}
+
+// Below implementation heavily adapted from: https://github.com/fitzgen/leb128
+const CONTINUATION_BIT: u8 = 1 << 7;
+const SIGN_BIT: u8 = 1 << 6;
+
+#[inline]
+fn mask_continuation(byte: u8) -> u8 {
+ byte & !CONTINUATION_BIT
+}
+
+// #[inline]
+// fn mask_continuation_u64(val: u64) -> u8 {
+// let byte = val & (u8::MAX as u64);
+// mask_continuation(byte as u8)
+// }
+
+impl<'a> TryFromCtx<'a> for Uleb128 {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> {
+ let mut result = 0;
+ let mut shift = 0;
+ let mut count = 0;
+ loop {
+ let byte: u8 = src.pread(count)?;
+
+ if shift == 63 && byte != 0x00 && byte != 0x01 {
+ return Err(error::Error::BadInput {
+ size: src.len(),
+ msg: "failed to parse",
+ });
+ }
+
+ let low_bits = u64::from(mask_continuation(byte));
+ result |= low_bits << shift;
+
+ count += 1;
+ shift += 7;
+
+ if byte & CONTINUATION_BIT == 0 {
+ return Ok((
+ Uleb128 {
+ value: result,
+ count,
+ },
+ count,
+ ));
+ }
+ }
+ }
+}
+
+impl<'a> TryFromCtx<'a> for Sleb128 {
+ type Error = error::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> {
+ let o = 0;
+ let offset = &mut 0;
+ let mut result = 0;
+ let mut shift = 0;
+ let size = 64;
+ let mut byte: u8;
+ loop {
+ byte = src.gread(offset)?;
+
+ if shift == 63 && byte != 0x00 && byte != 0x7f {
+ return Err(error::Error::BadInput {
+ size: src.len(),
+ msg: "failed to parse",
+ });
+ }
+
+ let low_bits = i64::from(mask_continuation(byte));
+ result |= low_bits << shift;
+ shift += 7;
+
+ if byte & CONTINUATION_BIT == 0 {
+ break;
+ }
+ }
+
+ if shift < size && (SIGN_BIT & byte) == SIGN_BIT {
+ // Sign extend the result.
+ result |= !0 << shift;
+ }
+ let count = *offset - o;
+ Ok((
+ Sleb128 {
+ value: result,
+ count,
+ },
+ count,
+ ))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::super::LE;
+ use super::{Sleb128, Uleb128};
+
+ const CONTINUATION_BIT: u8 = 1 << 7;
+ //const SIGN_BIT: u8 = 1 << 6;
+
+ #[test]
+ fn uleb_size() {
+ use super::super::Pread;
+ let buf = [2u8 | CONTINUATION_BIT, 1];
+ let bytes = &buf[..];
+ let num = bytes.pread::<Uleb128>(0).unwrap();
+ println!("num: {:?}", &num);
+ assert_eq!(130u64, num.into());
+ assert_eq!(num.size(), 2);
+
+ let buf = [0x00, 0x01];
+ let bytes = &buf[..];
+ let num = bytes.pread::<Uleb128>(0).unwrap();
+ println!("num: {:?}", &num);
+ assert_eq!(0u64, num.into());
+ assert_eq!(num.size(), 1);
+
+ let buf = [0x21];
+ let bytes = &buf[..];
+ let num = bytes.pread::<Uleb128>(0).unwrap();
+ println!("num: {:?}", &num);
+ assert_eq!(0x21u64, num.into());
+ assert_eq!(num.size(), 1);
+ }
+
+ #[test]
+ fn uleb128() {
+ use super::super::Pread;
+ let buf = [2u8 | CONTINUATION_BIT, 1];
+ let bytes = &buf[..];
+ let num = bytes.pread::<Uleb128>(0).expect("Should read Uleb128");
+ assert_eq!(130u64, num.into());
+ assert_eq!(
+ 386,
+ bytes.pread_with::<u16>(0, LE).expect("Should read number")
+ );
+ }
+
+ #[test]
+ fn uleb128_overflow() {
+ use super::super::Pread;
+ let buf = [
+ 2u8 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 2 | CONTINUATION_BIT,
+ 1,
+ ];
+ let bytes = &buf[..];
+ assert!(bytes.pread::<Uleb128>(0).is_err());
+ }
+
+ #[test]
+ fn sleb128() {
+ use super::super::Pread;
+ let bytes = [0x7fu8 | CONTINUATION_BIT, 0x7e];
+ let num: i64 = bytes
+ .pread::<Sleb128>(0)
+ .expect("Should read Sleb128")
+ .into();
+ assert_eq!(-129, num);
+ }
+}
diff --git a/third_party/rust/scroll/src/lesser.rs b/third_party/rust/scroll/src/lesser.rs
new file mode 100644
index 0000000000..46ef4c5b11
--- /dev/null
+++ b/third_party/rust/scroll/src/lesser.rs
@@ -0,0 +1,178 @@
+use crate::ctx::{FromCtx, IntoCtx, SizeWith};
+use std::io::{Read, Result, Write};
+
+/// An extension trait to `std::io::Read` streams; mainly targeted at reading primitive types with
+/// a known size.
+///
+/// Requires types to implement [`FromCtx`](ctx/trait.FromCtx.html) and [`SizeWith`](ctx/trait.SizeWith.html).
+///
+/// **NB** You should probably add `repr(C)` and be very careful how you implement
+/// [`SizeWith`](ctx/trait.SizeWith.html), otherwise you will get IO errors failing to fill entire
+/// buffer (the size you specified in `SizeWith`), or out of bound errors (depending on your impl)
+/// in `from_ctx`.
+///
+/// Warning: Currently ioread/write uses a small 256-byte buffer and can not read/write larger types
+///
+/// # Example
+/// ```rust
+/// use std::io::Cursor;
+/// use scroll::{self, ctx, LE, Pread, IOread};
+///
+/// #[repr(packed)]
+/// struct Foo {
+/// foo: i64,
+/// bar: u32,
+/// }
+///
+/// impl ctx::FromCtx<scroll::Endian> for Foo {
+/// fn from_ctx(bytes: &[u8], ctx: scroll::Endian) -> Self {
+/// Foo { foo: bytes.pread_with::<i64>(0, ctx).unwrap(), bar: bytes.pread_with::<u32>(8, ctx).unwrap() }
+/// }
+/// }
+///
+/// impl ctx::SizeWith<scroll::Endian> for Foo {
+/// // our parsing context doesn't influence our size
+/// fn size_with(_: &scroll::Endian) -> usize {
+/// ::std::mem::size_of::<Foo>()
+/// }
+/// }
+///
+/// let bytes_ = [0x0b,0x0b,0x00,0x00,0x00,0x00,0x00,0x00, 0xef,0xbe,0x00,0x00,];
+/// let mut bytes = Cursor::new(bytes_);
+/// let foo = bytes.ioread_with::<i64>(LE).unwrap();
+/// let bar = bytes.ioread_with::<u32>(LE).unwrap();
+/// assert_eq!(foo, 0xb0b);
+/// assert_eq!(bar, 0xbeef);
+/// let error = bytes.ioread_with::<f64>(LE);
+/// assert!(error.is_err());
+/// let mut bytes = Cursor::new(bytes_);
+/// let foo_ = bytes.ioread_with::<Foo>(LE).unwrap();
+/// // Remember that you need to copy out fields from packed structs
+/// // with a `{}` block instead of borrowing them directly
+/// // ref: https://github.com/rust-lang/rust/issues/46043
+/// assert_eq!({foo_.foo}, foo);
+/// assert_eq!({foo_.bar}, bar);
+/// ```
+///
+pub trait IOread<Ctx: Copy>: Read {
+ /// Reads the type `N` from `Self`, with a default parsing context.
+ /// For the primitive numeric types, this will be at the host machine's endianness.
+ ///
+ /// # Example
+ /// ```rust
+ /// use scroll::IOread;
+ /// use std::io::Cursor;
+ /// let bytes = [0xef, 0xbe];
+ /// let mut bytes = Cursor::new(&bytes[..]);
+ /// let beef = bytes.ioread::<u16>().unwrap();
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// assert_eq!(0xbeef, beef);
+ /// #[cfg(target_endian = "big")]
+ /// assert_eq!(0xefbe, beef);
+ /// ```
+ #[inline]
+ fn ioread<N: FromCtx<Ctx> + SizeWith<Ctx>>(&mut self) -> Result<N>
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ self.ioread_with(ctx)
+ }
+
+ /// Reads the type `N` from `Self`, with the parsing context `ctx`.
+ /// **NB**: this will panic if the type you're reading has a size greater than 256. Plans are to have this allocate in larger cases.
+ ///
+ /// For the primitive numeric types, this will be at the host machine's endianness.
+ ///
+ /// # Example
+ /// ```rust
+ /// use scroll::{IOread, LE, BE};
+ /// use std::io::Cursor;
+ /// let bytes = [0xef, 0xbe, 0xb0, 0xb0, 0xfe, 0xed, 0xde, 0xad];
+ /// let mut bytes = Cursor::new(&bytes[..]);
+ /// let beef = bytes.ioread_with::<u16>(LE).unwrap();
+ /// assert_eq!(0xbeef, beef);
+ /// let b0 = bytes.ioread::<u8>().unwrap();
+ /// assert_eq!(0xb0, b0);
+ /// let b0 = bytes.ioread::<u8>().unwrap();
+ /// assert_eq!(0xb0, b0);
+ /// let feeddead = bytes.ioread_with::<u32>(BE).unwrap();
+ /// assert_eq!(0xfeeddead, feeddead);
+ /// ```
+ #[inline]
+ fn ioread_with<N: FromCtx<Ctx> + SizeWith<Ctx>>(&mut self, ctx: Ctx) -> Result<N> {
+ let mut scratch = [0u8; 256];
+ let size = N::size_with(&ctx);
+ let mut buf = &mut scratch[0..size];
+ self.read_exact(&mut buf)?;
+ Ok(N::from_ctx(buf, ctx))
+ }
+}
+
+/// Types that implement `Read` get methods defined in `IOread`
+/// for free.
+impl<Ctx: Copy, R: Read + ?Sized> IOread<Ctx> for R {}
+
+/// An extension trait to `std::io::Write` streams; this only serializes simple types, like `u8`, `i32`, `f32`, `usize`, etc.
+///
+/// To write custom types with a single `iowrite::<YourType>` call, implement [`IntoCtx`](ctx/trait.IntoCtx.html) and [`SizeWith`](ctx/trait.SizeWith.html) for `YourType`.
+pub trait IOwrite<Ctx: Copy>: Write {
+ /// Writes the type `N` into `Self`, with the parsing context `ctx`.
+ /// **NB**: this will panic if the type you're writing has a size greater than 256. Plans are to have this allocate in larger cases.
+ ///
+ /// For the primitive numeric types, this will be at the host machine's endianness.
+ ///
+ /// # Example
+ /// ```rust
+ /// use scroll::IOwrite;
+ /// use std::io::Cursor;
+ ///
+ /// let mut bytes = [0x0u8; 4];
+ /// let mut bytes = Cursor::new(&mut bytes[..]);
+ /// bytes.iowrite(0xdeadbeef as u32).unwrap();
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// assert_eq!(bytes.into_inner(), [0xef, 0xbe, 0xad, 0xde,]);
+ /// #[cfg(target_endian = "big")]
+ /// assert_eq!(bytes.into_inner(), [0xde, 0xad, 0xbe, 0xef,]);
+ /// ```
+ #[inline]
+ fn iowrite<N: SizeWith<Ctx> + IntoCtx<Ctx>>(&mut self, n: N) -> Result<()>
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ self.iowrite_with(n, ctx)
+ }
+
+ /// Writes the type `N` into `Self`, with the parsing context `ctx`.
+ /// **NB**: this will panic if the type you're writing has a size greater than 256. Plans are to have this allocate in larger cases.
+ ///
+ /// For the primitive numeric types, this will be at the host machine's endianness.
+ ///
+ /// # Example
+ /// ```rust
+ /// use scroll::{IOwrite, LE, BE};
+ /// use std::io::{Write, Cursor};
+ ///
+ /// let mut bytes = [0x0u8; 10];
+ /// let mut cursor = Cursor::new(&mut bytes[..]);
+ /// cursor.write_all(b"hello").unwrap();
+ /// cursor.iowrite_with(0xdeadbeef as u32, BE).unwrap();
+ /// assert_eq!(cursor.into_inner(), [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0xde, 0xad, 0xbe, 0xef, 0x0]);
+ /// ```
+ #[inline]
+ fn iowrite_with<N: SizeWith<Ctx> + IntoCtx<Ctx>>(&mut self, n: N, ctx: Ctx) -> Result<()> {
+ let mut buf = [0u8; 256];
+ let size = N::size_with(&ctx);
+ let buf = &mut buf[0..size];
+ n.into_ctx(buf, ctx);
+ self.write_all(buf)?;
+ Ok(())
+ }
+}
+
+/// Types that implement `Write` get methods defined in `IOwrite`
+/// for free.
+impl<Ctx: Copy, W: Write + ?Sized> IOwrite<Ctx> for W {}
diff --git a/third_party/rust/scroll/src/lib.rs b/third_party/rust/scroll/src/lib.rs
new file mode 100644
index 0000000000..dcb58e7564
--- /dev/null
+++ b/third_party/rust/scroll/src/lib.rs
@@ -0,0 +1,637 @@
+//! # Scroll
+//!
+//! ```text, no_run
+//! _______________
+//! ()==( (@==()
+//! '______________'|
+//! | |
+//! | ἀρετή |
+//! __)_____________|
+//! ()==( (@==()
+//! '--------------'
+//!
+//! ```
+//!
+//! Scroll is a library for easily and efficiently reading/writing types from data containers like
+//! byte arrays.
+//!
+//! ## Easily:
+//!
+//! Scroll sets down a number of traits:
+//!
+//! [FromCtx](ctx/trait.FromCtx.html), [IntoCtx](ctx/trait.IntoCtx.html),
+//! [TryFromCtx](ctx/trait.TryFromCtx.html) and [TryIntoCtx](ctx/trait.TryIntoCtx.html) — further
+//! explained in the [ctx module](ctx/index.html); to be implemented on custom types to allow
+//! reading, writing, and potentially fallible reading/writing respectively.
+//!
+//! [Pread](trait.Pread.html) and [Pwrite](trait.Pwrite.html) which are implemented on data
+//! containers such as byte arrays to define how to read or respectively write types implementing
+//! the *Ctx traits above.
+//! In addition scroll also defines [IOread](trait.IOread.html) and
+//! [IOwrite](trait.IOwrite.html) with additional constraits that then allow reading and writing
+//! from `std::io` [Read](https://doc.rust-lang.org/nightly/std/io/trait.Read.html) and
+//! [Write](https://doc.rust-lang.org/nightly/std/io/trait.Write.html).
+//!
+//!
+//! In most cases you can use [scroll_derive](https://docs.rs/scroll_derive) to derive sensible
+//! defaults for `Pread`, `Pwrite`, their IO counterpart and `SizeWith`. More complex situations
+//! call for manual implementation of those traits; refer to [the ctx module](ctx/index.html) for
+//! details.
+//!
+//!
+//! ## Efficiently:
+//!
+//! Reading Slices — including [&str](https://doc.rust-lang.org/std/primitive.str.html) — supports
+//! zero-copy. Scroll is designed with a `no_std` context in mind; every dependency on `std` is
+//! cfg-gated and errors need not allocate.
+//!
+//! Reads by default take only immutable references wherever possible, allowing for trivial
+//! parallelization.
+//!
+//! # Examples
+//!
+//! Let's start with a simple example
+//!
+//! ```rust
+//! use scroll::{ctx, Pread};
+//!
+//! // Let's first define some data, cfg-gated so our assertions later on hold.
+//! #[cfg(target_endian = "little")]
+//! let bytes: [u8; 4] = [0xde, 0xad, 0xbe, 0xef];
+//! #[cfg(target_endian = "big")]
+//! let bytes: [u8; 4] = [0xef, 0xbe, 0xad, 0xde];
+//!
+//! // We can read a u32 from the array `bytes` at offset 0.
+//! // This will use a default context for the type being parsed;
+//! // in the case of u32 this defines to use the host's endianess.
+//! let number = bytes.pread::<u32>(0).unwrap();
+//! assert_eq!(number, 0xefbeadde);
+//!
+//!
+//! // Similarly we can also read a single byte at offset 2
+//! // This time using type ascription instead of the turbofish (::<>) operator.
+//! let byte: u8 = bytes.pread(2).unwrap();
+//! #[cfg(target_endian = "little")]
+//! assert_eq!(byte, 0xbe);
+//! #[cfg(target_endian = "big")]
+//! assert_eq!(byte, 0xad);
+//!
+//!
+//! // If required we can also provide a specific parsing context; e.g. if we want to explicitly
+//! // define the endianess to use:
+//! let be_number: u32 = bytes.pread_with(0, scroll::BE).unwrap();
+//! #[cfg(target_endian = "little")]
+//! assert_eq!(be_number, 0xdeadbeef);
+//! #[cfg(target_endian = "big")]
+//! assert_eq!(be_number, 0xefbeadde);
+//!
+//! let be_number16 = bytes.pread_with::<u16>(1, scroll::BE).unwrap();
+//! #[cfg(target_endian = "little")]
+//! assert_eq!(be_number16, 0xadbe);
+//! #[cfg(target_endian = "big")]
+//! assert_eq!(be_number16, 0xbead);
+//!
+//!
+//! // Reads may fail; in this example due to a too large read for the given container.
+//! // Scroll's error type does not by default allocate to work in environments like no_std.
+//! let byte_err: scroll::Result<i64> = bytes.pread(0);
+//! assert!(byte_err.is_err());
+//!
+//!
+//! // We can parse out custom datatypes, or types with lifetimes, as long as they implement
+//! // the conversion traits `TryFromCtx/FromCtx`.
+//! // Here we use the default context for &str which parses are C-style '\0'-delimited string.
+//! let hello: &[u8] = b"hello world\0more words";
+//! let hello_world: &str = hello.pread(0).unwrap();
+//! assert_eq!("hello world", hello_world);
+//!
+//! // We can again provide a custom context; for example to parse Space-delimited strings.
+//! // As you can see while we still call `pread` changing the context can influence the output —
+//! // instead of splitting at '\0' we split at spaces
+//! let hello2: &[u8] = b"hello world\0more words";
+//! let world: &str = hello2.pread_with(6, ctx::StrCtx::Delimiter(ctx::SPACE)).unwrap();
+//! assert_eq!("world\0more", world);
+//! ```
+//!
+//! ## `std::io` API
+//!
+//! Scroll also allows reading from `std::io`. For this the types to read need to implement
+//! [FromCtx](ctx/trait.FromCtx.html) and [SizeWith](ctx/trait.SizeWith.html).
+//!
+//! ```rust
+//! use std::io::Cursor;
+//! use scroll::{IOread, ctx, Endian};
+//! let bytes = [0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0xef,0xbe,0x00,0x00,];
+//! let mut cursor = Cursor::new(bytes);
+//!
+//! // IOread uses std::io::Read methods, thus the Cursor will be incremented on these reads:
+//! let prev = cursor.position();
+//!
+//! let integer = cursor.ioread_with::<u64>(Endian::Little).unwrap();
+//!
+//! let after = cursor.position();
+//!
+//! assert!(prev < after);
+//!
+//! // SizeWith allows us to define a context-sensitive size of a read type:
+//! // Contexts can have different instantiations; e.g. the `Endian` context can be either Little or
+//! // Big. This is useful if for example the context contains the word-size of fields to be
+//! // read/written, e.g. switching between ELF32 or ELF64 at runtime.
+//! let size = <u64 as ctx::SizeWith<Endian>>::size_with(&Endian::Little) as u64;
+//! assert_eq!(prev + size, after);
+//! ```
+//!
+//! In the same vein as IOread we can use IOwrite to write a type to anything implementing
+//! `std::io::Write`:
+//!
+//! ```rust
+//! use std::io::Cursor;
+//! use scroll::{IOwrite};
+//!
+//! let mut bytes = [0x0u8; 5];
+//! let mut cursor = Cursor::new(&mut bytes[..]);
+//!
+//! // This of course once again increments the cursor position
+//! cursor.iowrite_with(0xdeadbeef as u32, scroll::BE).unwrap();
+//!
+//! assert_eq!(cursor.into_inner(), [0xde, 0xad, 0xbe, 0xef, 0x0]);
+//! ```
+//!
+//! ## Complex use cases
+//!
+//! Scoll is designed to be highly adaptable while providing a strong abstraction between the types
+//! being read/written and the data container containing them.
+//!
+//! In this example we'll define a custom Data and allow it to be read from an arbitrary byte
+//! buffer.
+//!
+//! ```rust
+//! use scroll::{self, ctx, Pread, Endian};
+//! use scroll::ctx::StrCtx;
+//!
+//! // Our custom context type. In a more complex situation you could for example store details on
+//! // how to write or read your type, field-sizes or other information.
+//! // In this simple example we could also do without using a custom context in the first place.
+//! #[derive(Copy, Clone)]
+//! struct Context(Endian);
+//!
+//! // Our custom data type
+//! struct Data<'zerocopy> {
+//! // This is only a reference to the actual data; we make use of scroll's zero-copy capability
+//! name: &'zerocopy str,
+//! id: u32,
+//! }
+//!
+//! // To allow for safe zero-copying scroll allows to specify lifetimes explicitly:
+//! // The context
+//! impl<'a> ctx::TryFromCtx<'a, Context> for Data<'a> {
+//! // If necessary you can set a custom error type here, which will be returned by Pread/Pwrite
+//! type Error = scroll::Error;
+//!
+//! // Using the explicit lifetime specification again you ensure that read data doesn't outlife
+//! // its source buffer without having to resort to copying.
+//! fn try_from_ctx (src: &'a [u8], ctx: Context)
+//! // the `usize` returned here is the amount of bytes read.
+//! -> Result<(Self, usize), Self::Error>
+//! {
+//! let offset = &mut 0;
+//!
+//! let id = src.gread_with(offset, ctx.0)?;
+//!
+//! // In a more serious application you would validate data here of course.
+//! let namelen: u16 = src.gread_with(offset, ctx.0)?;
+//! let name = src.gread_with::<&str>(offset, StrCtx::Length(namelen as usize))?;
+//!
+//! Ok((Data { name: name, id: id }, *offset))
+//! }
+//! }
+//!
+//! // In lieu of a complex byte buffer we hearken back to a simple &[u8]; the default source
+//! // of TryFromCtx. However, any type that implements Pread to produce a &[u8] can now read
+//! // `Data` thanks to it's implementation of TryFromCtx.
+//! let bytes = b"\x01\x02\x03\x04\x00\x08UserName";
+//! let data: Data = bytes.pread_with(0, Context(Endian::Big)).unwrap();
+//!
+//! assert_eq!(data.id, 0x01020304);
+//! assert_eq!(data.name.to_string(), "UserName".to_string());
+//! ```
+//!
+//! For further explanation of the traits and how to implement them manually refer to
+//! [Pread](trait.Pread.html) and [TryFromCtx](ctx/trait.TryFromCtx.html).
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+#[cfg(feature = "derive")]
+#[allow(unused_imports)]
+pub use scroll_derive::{IOread, IOwrite, Pread, Pwrite, SizeWith};
+
+#[cfg(feature = "std")]
+extern crate core;
+
+pub mod ctx;
+mod endian;
+mod error;
+mod greater;
+mod leb128;
+#[cfg(feature = "std")]
+mod lesser;
+mod pread;
+mod pwrite;
+
+pub use crate::endian::*;
+pub use crate::error::*;
+pub use crate::greater::*;
+pub use crate::leb128::*;
+#[cfg(feature = "std")]
+pub use crate::lesser::*;
+pub use crate::pread::*;
+pub use crate::pwrite::*;
+
+#[doc(hidden)]
+pub mod export {
+ pub use ::core::mem;
+ pub use ::core::result;
+}
+
+#[allow(unused)]
+macro_rules! doc_comment {
+ ($x:expr) => {
+ #[doc = $x]
+ #[doc(hidden)]
+ mod readme_tests {}
+ };
+}
+
+#[cfg(feature = "derive")]
+doc_comment!(include_str!("../README.md"));
+
+#[cfg(test)]
+mod tests {
+ #[allow(overflowing_literals)]
+ use super::LE;
+
+ #[test]
+ fn test_measure_with_bytes() {
+ use super::ctx::MeasureWith;
+ let bytes: [u8; 4] = [0xef, 0xbe, 0xad, 0xde];
+ assert_eq!(bytes.measure_with(&()), 4);
+ }
+
+ #[test]
+ fn test_measurable() {
+ use super::ctx::SizeWith;
+ assert_eq!(8, u64::size_with(&LE));
+ }
+
+ //////////////////////////////////////////////////////////////
+ // begin pread_with
+ //////////////////////////////////////////////////////////////
+
+ macro_rules! pwrite_test {
+ ($write:ident, $read:ident, $deadbeef:expr) => {
+ #[test]
+ fn $write() {
+ use super::{Pread, Pwrite, BE};
+ let mut bytes: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
+ let b = &mut bytes[..];
+ b.pwrite_with::<$read>($deadbeef, 0, LE).unwrap();
+ assert_eq!(b.pread_with::<$read>(0, LE).unwrap(), $deadbeef);
+ b.pwrite_with::<$read>($deadbeef, 0, BE).unwrap();
+ assert_eq!(b.pread_with::<$read>(0, BE).unwrap(), $deadbeef);
+ }
+ };
+ }
+
+ pwrite_test!(pwrite_and_pread_roundtrip_u16, u16, 0xbeef);
+ pwrite_test!(pwrite_and_pread_roundtrip_i16, i16, 0x7eef);
+ pwrite_test!(pwrite_and_pread_roundtrip_u32, u32, 0xbeefbeef);
+ pwrite_test!(pwrite_and_pread_roundtrip_i32, i32, 0x7eefbeef);
+ pwrite_test!(pwrite_and_pread_roundtrip_u64, u64, 0xbeefbeef7eef7eef);
+ pwrite_test!(pwrite_and_pread_roundtrip_i64, i64, 0x7eefbeef7eef7eef);
+
+ #[test]
+ fn pread_with_be() {
+ use super::Pread;
+ let bytes: [u8; 2] = [0x7e, 0xef];
+ let b = &bytes[..];
+ let byte: u16 = b.pread_with(0, super::BE).unwrap();
+ assert_eq!(0x7eef, byte);
+ let bytes: [u8; 2] = [0xde, 0xad];
+ let dead: u16 = bytes.pread_with(0, super::BE).unwrap();
+ assert_eq!(0xdead, dead);
+ }
+
+ #[test]
+ fn pread() {
+ use super::Pread;
+ let bytes: [u8; 2] = [0x7e, 0xef];
+ let b = &bytes[..];
+ let byte: u16 = b.pread(0).unwrap();
+ #[cfg(target_endian = "little")]
+ assert_eq!(0xef7e, byte);
+ #[cfg(target_endian = "big")]
+ assert_eq!(0x7eef, byte);
+ }
+
+ #[test]
+ fn pread_slice() {
+ use super::ctx::StrCtx;
+ use super::Pread;
+ let bytes: [u8; 2] = [0x7e, 0xef];
+ let b = &bytes[..];
+ let iserr: Result<&str, _> = b.pread_with(0, StrCtx::Length(3));
+ assert!(iserr.is_err());
+ // let bytes2: &[u8] = b.pread_with(0, 2).unwrap();
+ // assert_eq!(bytes2.len(), bytes[..].len());
+ // for i in 0..bytes2.len() {
+ // assert_eq!(bytes2[i], bytes[i])
+ // }
+ }
+
+ #[test]
+ fn pread_str() {
+ use super::ctx::*;
+ use super::Pread;
+ let bytes: [u8; 2] = [0x2e, 0x0];
+ let b = &bytes[..];
+ let s: &str = b.pread(0).unwrap();
+ println!("str: {}", s);
+ assert_eq!(s.len(), bytes[..].len() - 1);
+ let bytes: &[u8] = b"hello, world!\0some_other_things";
+ let hello_world: &str = bytes.pread_with(0, StrCtx::Delimiter(NULL)).unwrap();
+ println!("{:?}", &hello_world);
+ assert_eq!(hello_world.len(), 13);
+ let hello: &str = bytes.pread_with(0, StrCtx::Delimiter(SPACE)).unwrap();
+ println!("{:?}", &hello);
+ assert_eq!(hello.len(), 6);
+ // this could result in underflow so we just try it
+ let _error = bytes.pread_with::<&str>(6, StrCtx::Delimiter(SPACE));
+ let error = bytes.pread_with::<&str>(7, StrCtx::Delimiter(SPACE));
+ println!("{:?}", &error);
+ assert!(error.is_ok());
+ }
+
+ #[test]
+ fn pread_str_weird() {
+ use super::ctx::*;
+ use super::Pread;
+ let bytes: &[u8] = b"";
+ let hello_world = bytes.pread_with::<&str>(0, StrCtx::Delimiter(NULL));
+ println!("1 {:?}", &hello_world);
+ assert_eq!(hello_world.is_err(), true);
+ let error = bytes.pread_with::<&str>(7, StrCtx::Delimiter(SPACE));
+ println!("2 {:?}", &error);
+ assert!(error.is_err());
+ let bytes: &[u8] = b"\0";
+ let null = bytes.pread::<&str>(0).unwrap();
+ println!("3 {:?}", &null);
+ assert_eq!(null.len(), 0);
+ }
+
+ #[test]
+ fn pwrite_str_and_bytes() {
+ use super::ctx::*;
+ use super::{Pread, Pwrite};
+ let astring: &str = "lol hello_world lal\0ala imabytes";
+ let mut buffer = [0u8; 33];
+ buffer.pwrite(astring, 0).unwrap();
+ {
+ let hello_world = buffer
+ .pread_with::<&str>(4, StrCtx::Delimiter(SPACE))
+ .unwrap();
+ assert_eq!(hello_world, "hello_world");
+ }
+ let bytes: &[u8] = b"more\0bytes";
+ buffer.pwrite(bytes, 0).unwrap();
+ let more = bytes
+ .pread_with::<&str>(0, StrCtx::Delimiter(NULL))
+ .unwrap();
+ assert_eq!(more, "more");
+ let bytes = bytes
+ .pread_with::<&str>(more.len() + 1, StrCtx::Delimiter(NULL))
+ .unwrap();
+ assert_eq!(bytes, "bytes");
+ }
+
+ use std::error;
+ use std::fmt::{self, Display};
+
+ #[derive(Debug)]
+ pub struct ExternalError {}
+
+ impl Display for ExternalError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "ExternalError")
+ }
+ }
+
+ impl error::Error for ExternalError {
+ fn description(&self) -> &str {
+ "ExternalError"
+ }
+ fn cause(&self) -> Option<&dyn error::Error> {
+ None
+ }
+ }
+
+ impl From<super::Error> for ExternalError {
+ fn from(err: super::Error) -> Self {
+ //use super::Error::*;
+ match err {
+ _ => ExternalError {},
+ }
+ }
+ }
+
+ #[derive(Debug, PartialEq, Eq)]
+ pub struct Foo(u16);
+
+ impl super::ctx::TryIntoCtx<super::Endian> for Foo {
+ type Error = ExternalError;
+ fn try_into_ctx(self, this: &mut [u8], le: super::Endian) -> Result<usize, Self::Error> {
+ use super::Pwrite;
+ if this.len() < 2 {
+ return Err((ExternalError {}).into());
+ }
+ this.pwrite_with(self.0, 0, le)?;
+ Ok(2)
+ }
+ }
+
+ impl<'a> super::ctx::TryFromCtx<'a, super::Endian> for Foo {
+ type Error = ExternalError;
+ fn try_from_ctx(this: &'a [u8], le: super::Endian) -> Result<(Self, usize), Self::Error> {
+ use super::Pread;
+ if this.len() > 2 {
+ return Err((ExternalError {}).into());
+ }
+ let n = this.pread_with(0, le)?;
+ Ok((Foo(n), 2))
+ }
+ }
+
+ #[test]
+ fn pread_with_iter_bytes() {
+ use super::Pread;
+ let mut bytes_to: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
+ let bytes_from: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
+ let bytes_to = &mut bytes_to[..];
+ let bytes_from = &bytes_from[..];
+ for i in 0..bytes_from.len() {
+ bytes_to[i] = bytes_from.pread(i).unwrap();
+ }
+ assert_eq!(bytes_to, bytes_from);
+ }
+
+ //////////////////////////////////////////////////////////////
+ // end pread_with
+ //////////////////////////////////////////////////////////////
+
+ //////////////////////////////////////////////////////////////
+ // begin gread_with
+ //////////////////////////////////////////////////////////////
+ macro_rules! g_test {
+ ($read:ident, $deadbeef:expr, $typ:ty) => {
+ #[test]
+ fn $read() {
+ use super::Pread;
+ let bytes: [u8; 8] = [0xf, 0xe, 0xe, 0xb, 0xd, 0xa, 0xe, 0xd];
+ let mut offset = 0;
+ let deadbeef: $typ = bytes.gread_with(&mut offset, LE).unwrap();
+ assert_eq!(deadbeef, $deadbeef as $typ);
+ assert_eq!(offset, ::std::mem::size_of::<$typ>());
+ }
+ };
+ }
+
+ g_test!(simple_gread_u16, 0xe0f, u16);
+ g_test!(simple_gread_u32, 0xb0e0e0f, u32);
+ g_test!(simple_gread_u64, 0xd0e0a0d0b0e0e0f, u64);
+ g_test!(simple_gread_i64, 940700423303335439, i64);
+
+ macro_rules! simple_float_test {
+ ($read:ident, $deadbeef:expr, $typ:ty) => {
+ #[test]
+ fn $read() {
+ use super::Pread;
+ let bytes: [u8; 8] = [0u8, 0, 0, 0, 0, 0, 224, 63];
+ let mut offset = 0;
+ let deadbeef: $typ = bytes.gread_with(&mut offset, LE).unwrap();
+ assert_eq!(deadbeef, $deadbeef as $typ);
+ assert_eq!(offset, ::std::mem::size_of::<$typ>());
+ }
+ };
+ }
+
+ simple_float_test!(gread_f32, 0.0, f32);
+ simple_float_test!(gread_f64, 0.5, f64);
+
+ macro_rules! g_read_write_test {
+ ($read:ident, $val:expr, $typ:ty) => {
+ #[test]
+ fn $read() {
+ use super::{Pread, Pwrite, BE, LE};
+ let mut buffer = [0u8; 16];
+ let offset = &mut 0;
+ buffer.gwrite_with($val.clone(), offset, LE).unwrap();
+ let o2 = &mut 0;
+ let val: $typ = buffer.gread_with(o2, LE).unwrap();
+ assert_eq!(val, $val);
+ assert_eq!(*offset, ::std::mem::size_of::<$typ>());
+ assert_eq!(*o2, ::std::mem::size_of::<$typ>());
+ assert_eq!(*o2, *offset);
+ buffer.gwrite_with($val.clone(), offset, BE).unwrap();
+ let val: $typ = buffer.gread_with(o2, BE).unwrap();
+ assert_eq!(val, $val);
+ }
+ };
+ }
+
+ g_read_write_test!(gread_gwrite_f64_1, 0.25f64, f64);
+ g_read_write_test!(gread_gwrite_f64_2, 0.5f64, f64);
+ g_read_write_test!(gread_gwrite_f64_3, 0.064, f64);
+
+ g_read_write_test!(gread_gwrite_f32_1, 0.25f32, f32);
+ g_read_write_test!(gread_gwrite_f32_2, 0.5f32, f32);
+ g_read_write_test!(gread_gwrite_f32_3, 0.0f32, f32);
+
+ g_read_write_test!(gread_gwrite_i64_1, 0i64, i64);
+ g_read_write_test!(gread_gwrite_i64_2, -1213213211111i64, i64);
+ g_read_write_test!(gread_gwrite_i64_3, -3000i64, i64);
+
+ g_read_write_test!(gread_gwrite_i32_1, 0i32, i32);
+ g_read_write_test!(gread_gwrite_i32_2, -1213213232, i32);
+ g_read_write_test!(gread_gwrite_i32_3, -3000i32, i32);
+
+ // useful for ferreting out problems with impls
+ #[test]
+ fn gread_with_iter_bytes() {
+ use super::Pread;
+ let mut bytes_to: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
+ let bytes_from: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
+ let bytes_to = &mut bytes_to[..];
+ let bytes_from = &bytes_from[..];
+ let mut offset = &mut 0;
+ for i in 0..bytes_from.len() {
+ bytes_to[i] = bytes_from.gread(&mut offset).unwrap();
+ }
+ assert_eq!(bytes_to, bytes_from);
+ assert_eq!(*offset, bytes_to.len());
+ }
+
+ #[test]
+ fn gread_inout() {
+ use super::Pread;
+ let mut bytes_to: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
+ let bytes_from: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8];
+ let bytes = &bytes_from[..];
+ let offset = &mut 0;
+ bytes.gread_inout(offset, &mut bytes_to[..]).unwrap();
+ assert_eq!(bytes_to, bytes_from);
+ assert_eq!(*offset, bytes_to.len());
+ }
+
+ #[test]
+ fn gread_with_byte() {
+ use super::Pread;
+ let bytes: [u8; 1] = [0x7f];
+ let b = &bytes[..];
+ let offset = &mut 0;
+ let byte: u8 = b.gread(offset).unwrap();
+ assert_eq!(0x7f, byte);
+ assert_eq!(*offset, 1);
+ }
+
+ #[test]
+ fn gread_slice() {
+ use super::ctx::StrCtx;
+ use super::Pread;
+ let bytes: [u8; 2] = [0x7e, 0xef];
+ let b = &bytes[..];
+ let offset = &mut 0;
+ let res = b.gread_with::<&str>(offset, StrCtx::Length(3));
+ assert!(res.is_err());
+ *offset = 0;
+ let astring: [u8; 3] = [0x45, 042, 0x44];
+ let string = astring.gread_with::<&str>(offset, StrCtx::Length(2));
+ match &string {
+ &Ok(_) => {}
+ &Err(ref err) => {
+ println!("{}", &err);
+ panic!();
+ }
+ }
+ assert_eq!(string.unwrap(), "E*");
+ *offset = 0;
+ let bytes2: &[u8] = b.gread_with(offset, 2).unwrap();
+ assert_eq!(*offset, 2);
+ assert_eq!(bytes2.len(), bytes[..].len());
+ for i in 0..bytes2.len() {
+ assert_eq!(bytes2[i], bytes[i])
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // end gread_with
+ /////////////////////////////////////////////////////////////////
+}
diff --git a/third_party/rust/scroll/src/pread.rs b/third_party/rust/scroll/src/pread.rs
new file mode 100644
index 0000000000..72ba877054
--- /dev/null
+++ b/third_party/rust/scroll/src/pread.rs
@@ -0,0 +1,178 @@
+use core::result;
+
+use crate::ctx::TryFromCtx;
+use crate::error;
+
+/// A very generic, contextual pread interface in Rust.
+///
+/// Like [Pwrite](trait.Pwrite.html) — but for reading!
+///
+/// Implementing `Pread` on a data store allows you to then read almost arbitrarily complex types
+/// efficiently.
+///
+/// To this end the Pread trait works in conjuction with the [TryFromCtx](ctx/trait.TryFromCtx.html);
+/// The `TryFromCtx` trait implemented on a type defines how to convert data to an object of that
+/// type, the Pread trait implemented on a data store defines how to extract said data from that
+/// store.
+///
+/// It should be noted though that in this context, data does not necessarily mean `&[u8]` —
+/// `Pread` and `TryFromCtx` are generic over what 'data' means and could be implemented instead
+/// over chunks of memory or any other indexable type — but scroll does come with a set of powerful
+/// blanket implementations for data being a continous block of byte-addressable memory.
+///
+/// Pread provides two main groups of functions: pread and gread.
+///
+/// `pread` is the basic function that simply extracts a given type from a given data store - either
+/// using a provided Context in the case of [pread_with](trait.Pread.html#method.pread_with) or
+/// with the default context for the given type in the case of [pread](trait.Pread.html#method.pread)
+///
+/// `gread` does in addition to that update the offset it's currently at, allowing for a cursored
+/// read — `gread_inout` expands on that and reads a number of continous types from the data store.
+/// gread again comes with `_with` variants to allow using a specific context.
+///
+/// Since pread and friends are very generic functions their types are rather complex, but very
+/// much understandable; `TryFromCtx` is generic over `Ctx` ([described
+/// here](ctx/index.html#context)), `Output` and `Error`. The Error type is hopefully
+/// self-explanatory, however the `Output` type is rather important; it defines what Pread extracts
+/// from the data store and has to match up with what `TryFromCtx` expects as input to convert into
+/// the resulting type. scroll defaults to `&[u8]` here.
+///
+/// Unless you need to implement your own data store — that is either can't convert to `&[u8]` or
+/// have a data that does not expose a `&[u8]` — you will probably want to implement
+/// [TryFromCtx](ctx/trait.TryFromCtx.html) on your Rust types to be extracted.
+///
+pub trait Pread<Ctx: Copy, E> {
+ #[inline]
+ /// Reads a value from `self` at `offset` with a default `Ctx`. For the primitive numeric values, this will read at the machine's endianness.
+ /// # Example
+ /// ```rust
+ /// use scroll::Pread;
+ /// let bytes = [0x7fu8; 0x01];
+ /// let byte = bytes.pread::<u8>(0).unwrap();
+ fn pread<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: usize,
+ ) -> result::Result<N, E>
+ where
+ Ctx: Default,
+ {
+ self.pread_with(offset, Ctx::default())
+ }
+
+ #[inline]
+ /// Reads a value from `self` at `offset` with the given `ctx`
+ /// # Example
+ /// ```rust
+ /// use scroll::Pread;
+ /// let bytes: [u8; 2] = [0xde, 0xad];
+ /// let dead: u16 = bytes.pread_with(0, scroll::BE).unwrap();
+ /// assert_eq!(dead, 0xdeadu16);
+ fn pread_with<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: usize,
+ ctx: Ctx,
+ ) -> result::Result<N, E> {
+ let mut ignored = offset;
+ self.gread_with(&mut ignored, ctx)
+ }
+
+ #[inline]
+ /// Reads a value from `self` at `offset` with a default `Ctx`. For the primitive numeric values, this will read at the machine's endianness. Updates the offset
+ /// # Example
+ /// ```rust
+ /// use scroll::Pread;
+ /// let offset = &mut 0;
+ /// let bytes = [0x7fu8; 0x01];
+ /// let byte = bytes.gread::<u8>(offset).unwrap();
+ /// assert_eq!(*offset, 1);
+ fn gread<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: &mut usize,
+ ) -> result::Result<N, E>
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ self.gread_with(offset, ctx)
+ }
+
+ /// Reads a value from `self` at `offset` with the given `ctx`, and updates the offset.
+ /// # Example
+ /// ```rust
+ /// use scroll::Pread;
+ /// let offset = &mut 0;
+ /// let bytes: [u8; 2] = [0xde, 0xad];
+ /// let dead: u16 = bytes.gread_with(offset, scroll::BE).unwrap();
+ /// assert_eq!(dead, 0xdeadu16);
+ /// assert_eq!(*offset, 2);
+ fn gread_with<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: &mut usize,
+ ctx: Ctx,
+ ) -> result::Result<N, E>;
+
+ /// Tries to write `inout.len()` `N`s into `inout` from `Self` starting at `offset`, using the default context for `N`, and updates the offset.
+ /// # Example
+ /// ```rust
+ /// use scroll::Pread;
+ /// let mut bytes: Vec<u8> = vec![0, 0];
+ /// let offset = &mut 0;
+ /// let bytes_from: [u8; 2] = [0x48, 0x49];
+ /// bytes_from.gread_inout(offset, &mut bytes).unwrap();
+ /// assert_eq!(&bytes, &bytes_from);
+ /// assert_eq!(*offset, 2);
+ #[inline]
+ fn gread_inout<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: &mut usize,
+ inout: &mut [N],
+ ) -> result::Result<(), E>
+ where
+ Ctx: Default,
+ {
+ for i in inout.iter_mut() {
+ *i = self.gread(offset)?;
+ }
+ Ok(())
+ }
+
+ /// Tries to write `inout.len()` `N`s into `inout` from `Self` starting at `offset`, using the context `ctx`
+ /// # Example
+ /// ```rust
+ /// use scroll::{ctx, LE, Pread};
+ /// let mut bytes: Vec<u8> = vec![0, 0];
+ /// let offset = &mut 0;
+ /// let bytes_from: [u8; 2] = [0x48, 0x49];
+ /// bytes_from.gread_inout_with(offset, &mut bytes, LE).unwrap();
+ /// assert_eq!(&bytes, &bytes_from);
+ /// assert_eq!(*offset, 2);
+ #[inline]
+ fn gread_inout_with<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: &mut usize,
+ inout: &mut [N],
+ ctx: Ctx,
+ ) -> result::Result<(), E> {
+ for i in inout.iter_mut() {
+ *i = self.gread_with(offset, ctx)?;
+ }
+ Ok(())
+ }
+}
+
+impl<Ctx: Copy, E: From<error::Error>> Pread<Ctx, E> for [u8] {
+ fn gread_with<'a, N: TryFromCtx<'a, Ctx, Self, Error = E>>(
+ &'a self,
+ offset: &mut usize,
+ ctx: Ctx,
+ ) -> result::Result<N, E> {
+ let start = *offset;
+ if start >= self.len() {
+ return Err(error::Error::BadOffset(start).into());
+ }
+ N::try_from_ctx(&self[start..], ctx).map(|(n, size)| {
+ *offset += size;
+ n
+ })
+ }
+}
diff --git a/third_party/rust/scroll/src/pwrite.rs b/third_party/rust/scroll/src/pwrite.rs
new file mode 100644
index 0000000000..ab6d96157d
--- /dev/null
+++ b/third_party/rust/scroll/src/pwrite.rs
@@ -0,0 +1,96 @@
+use core::result;
+
+use crate::ctx::TryIntoCtx;
+use crate::error;
+
+/// A very generic, contextual pwrite interface in Rust.
+///
+/// Like [Pread](trait.Pread.html) — but for writing!
+///
+/// Implementing `Pwrite` on a data store allows you to then write almost arbitrarily complex types
+/// efficiently.
+///
+/// To this end the Pwrite trait works in conjuction with the [TryIntoCtx](ctx/trait.TryIntoCtx.html);
+/// The `TryIntoCtx` trait implemented on a type defines how to convert said type into data that
+/// an implementation of Pwrite can … well … write.
+///
+/// As with [Pread](trait.Pread.html) 'data' does not necessarily mean `&[u8]` but can be any
+/// indexable type. In fact much of the documentation of `Pread` applies to `Pwrite` as well just
+/// with 'read' switched for 'write' and 'From' switched with 'Into' so if you haven't yet you
+/// should read the documentation of `Pread` first.
+///
+/// Unless you need to implement your own data store — that is either can't convert to `&[u8]` or
+/// have a data that does not expose a `&mut [u8]` — you will probably want to implement
+/// [TryIntoCtx](ctx/trait.TryIntoCtx.html) on your Rust types to be written.
+///
+pub trait Pwrite<Ctx: Copy, E> {
+ #[inline]
+ fn pwrite<N: TryIntoCtx<Ctx, Self, Error = E>>(
+ &mut self,
+ n: N,
+ offset: usize,
+ ) -> result::Result<usize, E>
+ where
+ Ctx: Default,
+ {
+ self.pwrite_with(n, offset, Ctx::default())
+ }
+
+ /// Write `N` at offset `I` with context `Ctx`
+ /// # Example
+ /// ```
+ /// use scroll::{Pwrite, Pread, LE};
+ /// let mut bytes: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
+ /// bytes.pwrite_with::<u32>(0xbeefbeef, 0, LE).unwrap();
+ /// assert_eq!(bytes.pread_with::<u32>(0, LE).unwrap(), 0xbeefbeef);
+ fn pwrite_with<N: TryIntoCtx<Ctx, Self, Error = E>>(
+ &mut self,
+ n: N,
+ offset: usize,
+ ctx: Ctx,
+ ) -> result::Result<usize, E>;
+
+ /// Write `n` into `self` at `offset`, with a default `Ctx`. Updates the offset.
+ #[inline]
+ fn gwrite<N: TryIntoCtx<Ctx, Self, Error = E>>(
+ &mut self,
+ n: N,
+ offset: &mut usize,
+ ) -> result::Result<usize, E>
+ where
+ Ctx: Default,
+ {
+ let ctx = Ctx::default();
+ self.gwrite_with(n, offset, ctx)
+ }
+
+ /// Write `n` into `self` at `offset`, with the `ctx`. Updates the offset.
+ #[inline]
+ fn gwrite_with<N: TryIntoCtx<Ctx, Self, Error = E>>(
+ &mut self,
+ n: N,
+ offset: &mut usize,
+ ctx: Ctx,
+ ) -> result::Result<usize, E> {
+ let o = *offset;
+ self.pwrite_with(n, o, ctx).map(|size| {
+ *offset += size;
+ size
+ })
+ }
+}
+
+impl<Ctx: Copy, E: From<error::Error>> Pwrite<Ctx, E> for [u8] {
+ fn pwrite_with<N: TryIntoCtx<Ctx, Self, Error = E>>(
+ &mut self,
+ n: N,
+ offset: usize,
+ ctx: Ctx,
+ ) -> result::Result<usize, E> {
+ if offset >= self.len() {
+ return Err(error::Error::BadOffset(offset).into());
+ }
+ let dst = &mut self[offset..];
+ n.try_into_ctx(dst, ctx)
+ }
+}
diff --git a/third_party/rust/scroll/tests/api.rs b/third_party/rust/scroll/tests/api.rs
new file mode 100644
index 0000000000..e10726f22a
--- /dev/null
+++ b/third_party/rust/scroll/tests/api.rs
@@ -0,0 +1,292 @@
+// this exists primarily to test various API usages of scroll; e.g., must compile
+
+// guard against potential undefined behaviour when borrowing from
+// packed structs. See https://github.com/rust-lang/rust/issues/46043
+#![deny(unaligned_references)]
+
+// #[macro_use] extern crate scroll_derive;
+
+use scroll::ctx::SizeWith;
+use scroll::{ctx, Cread, Pread, Result};
+use std::ops::{Deref, DerefMut};
+
+#[derive(Default)]
+pub struct Section<'a> {
+ pub sectname: [u8; 16],
+ pub segname: [u8; 16],
+ pub addr: u64,
+ pub size: u64,
+ pub offset: u32,
+ pub align: u32,
+ pub reloff: u32,
+ pub nreloc: u32,
+ pub flags: u32,
+ pub data: &'a [u8],
+}
+
+impl<'a> Section<'a> {
+ pub fn name(&self) -> Result<&str> {
+ self.sectname.pread::<&str>(0)
+ }
+ pub fn segname(&self) -> Result<&str> {
+ self.segname.pread::<&str>(0)
+ }
+}
+
+impl<'a> ctx::SizeWith for Section<'a> {
+ fn size_with(_ctx: &()) -> usize {
+ 4
+ }
+}
+
+#[repr(C)]
+// renable when scroll_derive Pread/Pwrite matches
+//#[derive(Debug, Clone, Copy, Pread, Pwrite)]
+#[derive(Debug, Clone, Copy)]
+pub struct Section32 {
+ pub sectname: [u8; 16],
+ pub segname: [u8; 16],
+ pub addr: u32,
+ pub size: u32,
+ pub offset: u32,
+ pub align: u32,
+ pub reloff: u32,
+ pub nreloc: u32,
+ pub flags: u32,
+ pub reserved1: u32,
+ pub reserved2: u32,
+}
+
+impl<'a> ctx::TryFromCtx<'a, ()> for Section<'a> {
+ type Error = scroll::Error;
+ fn try_from_ctx(
+ _bytes: &'a [u8],
+ _ctx: (),
+ ) -> ::std::result::Result<(Self, usize), Self::Error> {
+ let section = Section::default();
+ Ok((section, ::std::mem::size_of::<Section>()))
+ }
+}
+
+pub struct Segment<'a> {
+ pub cmd: u32,
+ pub cmdsize: u32,
+ pub segname: [u8; 16],
+ pub vmaddr: u64,
+ pub vmsize: u64,
+ pub fileoff: u64,
+ pub filesize: u64,
+ pub maxprot: u32,
+ pub initprot: u32,
+ pub nsects: u32,
+ pub flags: u32,
+ pub data: &'a [u8],
+ offset: usize,
+ raw_data: &'a [u8],
+}
+
+impl<'a> Segment<'a> {
+ pub fn name(&self) -> Result<&str> {
+ Ok(self.segname.pread::<&str>(0)?)
+ }
+ pub fn sections(&self) -> Result<Vec<Section<'a>>> {
+ let nsects = self.nsects as usize;
+ let mut sections = Vec::with_capacity(nsects);
+ let offset = &mut (self.offset + Self::size_with(&()));
+ let _size = Section::size_with(&());
+ let raw_data: &'a [u8] = self.raw_data;
+ for _ in 0..nsects {
+ let section = raw_data.gread_with::<Section<'a>>(offset, ())?;
+ sections.push(section);
+ //offset += size;
+ }
+ Ok(sections)
+ }
+}
+
+impl<'a> ctx::SizeWith for Segment<'a> {
+ fn size_with(_ctx: &()) -> usize {
+ 4
+ }
+}
+
+pub struct Segments<'a> {
+ pub segments: Vec<Segment<'a>>,
+}
+
+impl<'a> Deref for Segments<'a> {
+ type Target = Vec<Segment<'a>>;
+ fn deref(&self) -> &Self::Target {
+ &self.segments
+ }
+}
+
+impl<'a> DerefMut for Segments<'a> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.segments
+ }
+}
+
+impl<'a> Segments<'a> {
+ pub fn new() -> Self {
+ Segments {
+ segments: Vec::new(),
+ }
+ }
+ pub fn sections(&self) -> Result<Vec<Vec<Section<'a>>>> {
+ let mut sections = Vec::new();
+ for segment in &self.segments {
+ sections.push(segment.sections()?);
+ }
+ Ok(sections)
+ }
+}
+
+fn lifetime_passthrough_<'a>(segments: &Segments<'a>, section_name: &str) -> Option<&'a [u8]> {
+ let segment_name = "__TEXT";
+ for segment in &segments.segments {
+ if let Ok(name) = segment.name() {
+ println!("segment.name: {}", name);
+ if name == segment_name {
+ if let Ok(sections) = segment.sections() {
+ for section in sections {
+ let sname = section.name().unwrap();
+ println!("section.name: {}", sname);
+ if section_name == sname {
+ return Some(section.data);
+ }
+ }
+ }
+ }
+ }
+ }
+ None
+}
+
+#[test]
+fn lifetime_passthrough() {
+ let segments = Segments::new();
+ let _res = lifetime_passthrough_(&segments, "__text");
+ assert!(true)
+}
+
+#[derive(Default)]
+#[repr(packed)]
+struct Foo {
+ foo: i64,
+ bar: u32,
+}
+
+impl scroll::ctx::FromCtx<scroll::Endian> for Foo {
+ fn from_ctx(bytes: &[u8], ctx: scroll::Endian) -> Self {
+ Foo {
+ foo: bytes.cread_with::<i64>(0, ctx),
+ bar: bytes.cread_with::<u32>(8, ctx),
+ }
+ }
+}
+
+impl scroll::ctx::SizeWith<scroll::Endian> for Foo {
+ fn size_with(_: &scroll::Endian) -> usize {
+ ::std::mem::size_of::<Foo>()
+ }
+}
+
+#[test]
+fn ioread_api() {
+ use scroll::{IOread, LE};
+ use std::io::Cursor;
+ let bytes_ = [
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xbe, 0x00, 0x00,
+ ];
+ let mut bytes = Cursor::new(bytes_);
+ let foo = bytes.ioread_with::<i64>(LE).unwrap();
+ let bar = bytes.ioread_with::<u32>(LE).unwrap();
+ assert_eq!(foo, 1);
+ assert_eq!(bar, 0xbeef);
+ let error = bytes.ioread_with::<f64>(LE);
+ assert!(error.is_err());
+ let mut bytes = Cursor::new(bytes_);
+ let foo_ = bytes.ioread_with::<Foo>(LE).unwrap();
+ assert_eq!({ foo_.foo }, foo);
+ assert_eq!({ foo_.bar }, bar);
+}
+
+#[repr(packed)]
+struct Bar {
+ foo: i32,
+ bar: u32,
+}
+
+impl scroll::ctx::FromCtx<scroll::Endian> for Bar {
+ fn from_ctx(bytes: &[u8], ctx: scroll::Endian) -> Self {
+ Bar {
+ foo: bytes.cread_with(0, ctx),
+ bar: bytes.cread_with(4, ctx),
+ }
+ }
+}
+
+#[test]
+fn cread_api() {
+ use scroll::{Cread, LE};
+ let bytes = [
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xbe, 0x00, 0x00,
+ ];
+ let foo = bytes.cread_with::<u64>(0, LE);
+ let bar = bytes.cread_with::<u32>(8, LE);
+ assert_eq!(foo, 1);
+ assert_eq!(bar, 0xbeef);
+}
+
+#[test]
+fn cread_api_customtype() {
+ use scroll::{Cread, LE};
+ let bytes = [0xff, 0xff, 0xff, 0xff, 0xef, 0xbe, 0xad, 0xde];
+ let bar = &bytes[..].cread_with::<Bar>(0, LE);
+ assert_eq!({ bar.foo }, -1);
+ assert_eq!({ bar.bar }, 0xdeadbeef);
+}
+
+#[test]
+#[should_panic]
+fn cread_api_badindex() {
+ use scroll::Cread;
+ let bytes = [
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xbe, 0xad, 0xde,
+ ];
+ let _foo = bytes.cread::<i64>(1_000_000);
+}
+
+#[test]
+fn cwrite_api() {
+ use scroll::Cread;
+ use scroll::Cwrite;
+ let mut bytes = [0x0; 16];
+ bytes.cwrite::<u64>(42, 0);
+ bytes.cwrite::<u32>(0xdeadbeef, 8);
+ assert_eq!(bytes.cread::<u64>(0), 42);
+ assert_eq!(bytes.cread::<u32>(8), 0xdeadbeef);
+}
+
+impl scroll::ctx::IntoCtx<scroll::Endian> for Bar {
+ fn into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) {
+ use scroll::Cwrite;
+ bytes.cwrite_with(self.foo, 0, ctx);
+ bytes.cwrite_with(self.bar, 4, ctx);
+ }
+}
+
+#[test]
+fn cwrite_api_customtype() {
+ use scroll::{Cread, Cwrite};
+ let bar = Bar {
+ foo: -1,
+ bar: 0xdeadbeef,
+ };
+ let mut bytes = [0x0; 16];
+ let _ = &bytes[..].cwrite::<Bar>(bar, 0);
+ let bar = bytes.cread::<Bar>(0);
+ assert_eq!({ bar.foo }, -1);
+ assert_eq!({ bar.bar }, 0xdeadbeef);
+}
diff --git a/third_party/rust/scroll_derive/.cargo-checksum.json b/third_party/rust/scroll_derive/.cargo-checksum.json
new file mode 100644
index 0000000000..8c6b3b87c4
--- /dev/null
+++ b/third_party/rust/scroll_derive/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"9fbb5068c3ffbf2c357f4068f854f439bae4999e04527e2dedc6758fa37a9807","LICENSE":"afb11426e09da40a1ae4f8fa17ddcc6b6a52d14df04c29bc5bcd06eb8730624d","README.md":"f89c7768454b0d2b9db816afe05db3a4cea1125bef87f08ed3eefd65e9e2b180","src/lib.rs":"a9cabe3c0b373f352357745b817f188ab841e9445056014dee9cc83c4d167483"},"package":"1db149f81d46d2deba7cd3c50772474707729550221e69588478ebf9ada425ae"} \ No newline at end of file
diff --git a/third_party/rust/scroll_derive/Cargo.toml b/third_party/rust/scroll_derive/Cargo.toml
new file mode 100644
index 0000000000..71f40a7e6c
--- /dev/null
+++ b/third_party/rust/scroll_derive/Cargo.toml
@@ -0,0 +1,53 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "scroll_derive"
+version = "0.11.1"
+authors = [
+ "m4b <m4b.github.io@gmail.com>",
+ "Ted Mielczarek <ted@mielczarek.org>",
+ "Systemcluster <me@systemcluster.me>",
+]
+include = [
+ "src/**/*",
+ "Cargo.toml",
+ "LICENSE",
+ "README.md",
+]
+description = "A macros 1.1 derive implementation for Pread and Pwrite traits from the scroll crate"
+documentation = "https://docs.rs/scroll_derive"
+readme = "README.md"
+keywords = [
+ "derive",
+ "macros",
+ "pread",
+ "pwrite",
+ "bytes",
+]
+license = "MIT"
+repository = "https://github.com/m4b/scroll"
+
+[lib]
+proc-macro = true
+
+[dependencies.proc-macro2]
+version = "1"
+
+[dependencies.quote]
+version = "1"
+
+[dependencies.syn]
+version = "2"
+
+[dev-dependencies.scroll]
+version = "0.11"
diff --git a/third_party/rust/scroll_derive/LICENSE b/third_party/rust/scroll_derive/LICENSE
new file mode 100644
index 0000000000..8864d4a396
--- /dev/null
+++ b/third_party/rust/scroll_derive/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rust/scroll_derive/README.md b/third_party/rust/scroll_derive/README.md
new file mode 100644
index 0000000000..a7f7e85f0e
--- /dev/null
+++ b/third_party/rust/scroll_derive/README.md
@@ -0,0 +1,35 @@
+# scroll_derive
+Macros 1.1 implementing #[derive(Pread, Pwrite)] for https://github.com/m4b/scroll
+
+Add derive annotations to your POD seamlessly and easily:
+
+```rust
+extern crate scroll;
+#[macro_use]
+extern crate scroll_derive;
+
+#[derive(Debug, PartialEq, Pread, Pwrite, IOread, IOwrite, SizeWith)]
+#[repr(C)]
+struct Data {
+ id: u32,
+ timestamp: f64,
+ arr: [u16; 2],
+}
+
+use scroll::{Pread, Pwrite, Cread, LE};
+
+fn main (){
+ let bytes = [0xefu8, 0xbe, 0xad, 0xde, 0, 0, 0, 0, 0, 0, 224, 63, 0xad, 0xde, 0xef, 0xbe];
+ let data: Data = bytes.pread_with(0, LE).unwrap();
+ println!("data: {:?}", &data);
+ assert_eq!(data.id, 0xdeadbeefu32);
+ let mut bytes2 = vec![0; ::std::mem::size_of::<Data>()];
+ bytes2.pwrite_with(data, 0, LE).unwrap();
+ let data: Data = bytes.pread_with(0, LE).unwrap();
+ let data2: Data = bytes2.pread_with(0, LE).unwrap();
+ assert_eq!(data, data2);
+
+ let data: Data = bytes.cread_with(0, LE);
+ assert_eq!(data, data2);
+}
+```
diff --git a/third_party/rust/scroll_derive/src/lib.rs b/third_party/rust/scroll_derive/src/lib.rs
new file mode 100644
index 0000000000..a2ba6692df
--- /dev/null
+++ b/third_party/rust/scroll_derive/src/lib.rs
@@ -0,0 +1,538 @@
+#![recursion_limit = "1024"]
+
+extern crate proc_macro;
+use proc_macro2;
+use quote::quote;
+
+use proc_macro::TokenStream;
+
+fn impl_field(ident: &proc_macro2::TokenStream, ty: &syn::Type) -> proc_macro2::TokenStream {
+ match *ty {
+ syn::Type::Array(ref array) => match array.len {
+ syn::Expr::Lit(syn::ExprLit {
+ lit: syn::Lit::Int(ref int),
+ ..
+ }) => {
+ let size = int.base10_parse::<usize>().unwrap();
+ quote! {
+ #ident: { let mut __tmp: #ty = [0u8.into(); #size]; src.gread_inout_with(offset, &mut __tmp, ctx)?; __tmp }
+ }
+ }
+ _ => panic!("Pread derive with bad array constexpr"),
+ },
+ syn::Type::Group(ref group) => impl_field(ident, &group.elem),
+ _ => {
+ quote! {
+ #ident: src.gread_with::<#ty>(offset, ctx)?
+ }
+ }
+ }
+}
+
+fn impl_struct(
+ name: &syn::Ident,
+ fields: &syn::punctuated::Punctuated<syn::Field, syn::Token![,]>,
+ generics: &syn::Generics,
+) -> proc_macro2::TokenStream {
+ let items: Vec<_> = fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let ident = &f.ident.as_ref().map(|i| quote! {#i}).unwrap_or({
+ let t = proc_macro2::Literal::usize_unsuffixed(i);
+ quote! {#t}
+ });
+ let ty = &f.ty;
+ impl_field(ident, ty)
+ })
+ .collect();
+
+ let gl = &generics.lt_token;
+ let gp = &generics.params;
+ let gg = &generics.gt_token;
+ let gn = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! { #ident }
+ }
+ p => quote! { #p },
+ });
+ let gn = quote! { #gl #( #gn ),* #gg };
+ let gw = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ #ident : ::scroll::ctx::TryFromCtx<'a, ::scroll::Endian> + ::std::convert::From<u8> + ::std::marker::Copy,
+ ::scroll::Error : ::std::convert::From<< #ident as ::scroll::ctx::TryFromCtx<'a, ::scroll::Endian>>::Error>,
+ < #ident as ::scroll::ctx::TryFromCtx<'a, ::scroll::Endian>>::Error : ::std::convert::From<scroll::Error>
+ }
+ },
+ p => quote! { #p }
+ });
+ quote! { #( #gi ),* , }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl<'a, #gp > ::scroll::ctx::TryFromCtx<'a, ::scroll::Endian> for #name #gn where #gw #name #gn : 'a {
+ type Error = ::scroll::Error;
+ #[inline]
+ fn try_from_ctx(src: &'a [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result<(Self, usize), Self::Error> {
+ use ::scroll::Pread;
+ let offset = &mut 0;
+ let data = Self { #(#items,)* };
+ Ok((data, *offset))
+ }
+ }
+ }
+}
+
+fn impl_try_from_ctx(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
+ let name = &ast.ident;
+ let generics = &ast.generics;
+ match ast.data {
+ syn::Data::Struct(ref data) => match data.fields {
+ syn::Fields::Named(ref fields) => impl_struct(name, &fields.named, generics),
+ syn::Fields::Unnamed(ref fields) => impl_struct(name, &fields.unnamed, generics),
+ _ => {
+ panic!("Pread can not be derived for unit structs")
+ }
+ },
+ _ => panic!("Pread can only be derived for structs"),
+ }
+}
+
+#[proc_macro_derive(Pread)]
+pub fn derive_pread(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let gen = impl_try_from_ctx(&ast);
+ gen.into()
+}
+
+fn impl_pwrite_field(ident: &proc_macro2::TokenStream, ty: &syn::Type) -> proc_macro2::TokenStream {
+ match ty {
+ syn::Type::Array(ref array) => match array.len {
+ syn::Expr::Lit(syn::ExprLit {
+ lit: syn::Lit::Int(ref int),
+ ..
+ }) => {
+ let size = int.base10_parse::<usize>().unwrap();
+ quote! {
+ for i in 0..#size {
+ dst.gwrite_with(&self.#ident[i], offset, ctx)?;
+ }
+ }
+ }
+ _ => panic!("Pwrite derive with bad array constexpr"),
+ },
+ syn::Type::Group(group) => impl_pwrite_field(ident, &group.elem),
+ _ => {
+ quote! {
+ dst.gwrite_with(&self.#ident, offset, ctx)?
+ }
+ }
+ }
+}
+
+fn impl_try_into_ctx(
+ name: &syn::Ident,
+ fields: &syn::punctuated::Punctuated<syn::Field, syn::Token![,]>,
+ generics: &syn::Generics,
+) -> proc_macro2::TokenStream {
+ let items: Vec<_> = fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let ident = &f.ident.as_ref().map(|i| quote! {#i}).unwrap_or({
+ let t = proc_macro2::Literal::usize_unsuffixed(i);
+ quote! {#t}
+ });
+ let ty = &f.ty;
+ impl_pwrite_field(ident, ty)
+ })
+ .collect();
+
+ let gl = &generics.lt_token;
+ let gp = &generics.params;
+ let gg = &generics.gt_token;
+ let gn = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! { #ident }
+ }
+ p => quote! { #p },
+ });
+ let gn = quote! { #gl #( #gn ),* #gg };
+ let gwref = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ &'a #ident : ::scroll::ctx::TryIntoCtx<::scroll::Endian>,
+ ::scroll::Error: ::std::convert::From<<&'a #ident as ::scroll::ctx::TryIntoCtx<::scroll::Endian>>::Error>,
+ <&'a #ident as ::scroll::ctx::TryIntoCtx<::scroll::Endian>>::Error: ::std::convert::From<scroll::Error>
+ }
+ },
+ p => quote! { #p }
+ });
+ quote! { where #( #gi ),* }
+ } else {
+ quote! {}
+ };
+ let gw = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ #ident : ::scroll::ctx::TryIntoCtx<::scroll::Endian>,
+ ::scroll::Error: ::std::convert::From<<#ident as ::scroll::ctx::TryIntoCtx<::scroll::Endian>>::Error>,
+ <#ident as ::scroll::ctx::TryIntoCtx<::scroll::Endian>>::Error: ::std::convert::From<scroll::Error>
+ }
+ },
+ p => quote! { #p }
+ });
+ quote! { where Self: ::std::marker::Copy, #( #gi ),* }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl<'a, #gp > ::scroll::ctx::TryIntoCtx<::scroll::Endian> for &'a #name #gn #gwref {
+ type Error = ::scroll::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result<usize, Self::Error> {
+ use ::scroll::Pwrite;
+ let offset = &mut 0;
+ #(#items;)*;
+ Ok(*offset)
+ }
+ }
+
+ impl #gl #gp #gg ::scroll::ctx::TryIntoCtx<::scroll::Endian> for #name #gn #gw {
+ type Error = ::scroll::Error;
+ #[inline]
+ fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result<usize, Self::Error> {
+ (&self).try_into_ctx(dst, ctx)
+ }
+ }
+ }
+}
+
+fn impl_pwrite(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
+ let name = &ast.ident;
+ let generics = &ast.generics;
+ match ast.data {
+ syn::Data::Struct(ref data) => match data.fields {
+ syn::Fields::Named(ref fields) => impl_try_into_ctx(name, &fields.named, generics),
+ syn::Fields::Unnamed(ref fields) => impl_try_into_ctx(name, &fields.unnamed, generics),
+ _ => {
+ panic!("Pwrite can not be derived for unit structs")
+ }
+ },
+ _ => panic!("Pwrite can only be derived for structs"),
+ }
+}
+
+#[proc_macro_derive(Pwrite)]
+pub fn derive_pwrite(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let gen = impl_pwrite(&ast);
+ gen.into()
+}
+
+fn size_with(
+ name: &syn::Ident,
+ fields: &syn::punctuated::Punctuated<syn::Field, syn::Token![,]>,
+ generics: &syn::Generics,
+) -> proc_macro2::TokenStream {
+ let items: Vec<_> = fields
+ .iter()
+ .map(|f| {
+ let ty = &f.ty;
+ match *ty {
+ syn::Type::Array(ref array) => {
+ let elem = &array.elem;
+ match array.len {
+ syn::Expr::Lit(syn::ExprLit {
+ lit: syn::Lit::Int(ref int),
+ ..
+ }) => {
+ let size = int.base10_parse::<usize>().unwrap();
+ quote! {
+ (#size * <#elem>::size_with(ctx))
+ }
+ }
+ _ => panic!("Pread derive with bad array constexpr"),
+ }
+ }
+ _ => {
+ quote! {
+ <#ty>::size_with(ctx)
+ }
+ }
+ }
+ })
+ .collect();
+
+ let gl = &generics.lt_token;
+ let gp = &generics.params;
+ let gg = &generics.gt_token;
+ let gn = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! { #ident }
+ }
+ p => quote! { #p },
+ });
+ let gn = quote! { #gl #( #gn ),* #gg };
+ let gw = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ #ident : ::scroll::ctx::SizeWith<::scroll::Endian>
+ }
+ }
+ p => quote! { #p },
+ });
+ quote! { where #( #gi ),* }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl #gl #gp #gg ::scroll::ctx::SizeWith<::scroll::Endian> for #name #gn #gw {
+ #[inline]
+ fn size_with(ctx: &::scroll::Endian) -> usize {
+ 0 #(+ #items)*
+ }
+ }
+ }
+}
+
+fn impl_size_with(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
+ let name = &ast.ident;
+ let generics = &ast.generics;
+ match ast.data {
+ syn::Data::Struct(ref data) => match data.fields {
+ syn::Fields::Named(ref fields) => size_with(name, &fields.named, generics),
+ syn::Fields::Unnamed(ref fields) => size_with(name, &fields.unnamed, generics),
+ _ => {
+ panic!("SizeWith can not be derived for unit structs")
+ }
+ },
+ _ => panic!("SizeWith can only be derived for structs"),
+ }
+}
+
+#[proc_macro_derive(SizeWith)]
+pub fn derive_sizewith(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let gen = impl_size_with(&ast);
+ gen.into()
+}
+
+fn impl_cread_struct(
+ name: &syn::Ident,
+ fields: &syn::punctuated::Punctuated<syn::Field, syn::Token![,]>,
+ generics: &syn::Generics,
+) -> proc_macro2::TokenStream {
+ let items: Vec<_> = fields.iter().enumerate().map(|(i, f)| {
+ let ident = &f.ident.as_ref().map(|i|quote!{#i}).unwrap_or({let t = proc_macro2::Literal::usize_unsuffixed(i); quote!{#t}});
+ let ty = &f.ty;
+ match *ty {
+ syn::Type::Array(ref array) => {
+ let arrty = &array.elem;
+ match array.len {
+ syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Int(ref int), ..}) => {
+ let size = int.base10_parse::<usize>().unwrap();
+ let incr = quote! { ::scroll::export::mem::size_of::<#arrty>() };
+ quote! {
+ #ident: {
+ let mut __tmp: #ty = [0u8.into(); #size];
+ for i in 0..__tmp.len() {
+ __tmp[i] = src.cread_with(*offset, ctx);
+ *offset += #incr;
+ }
+ __tmp
+ }
+ }
+ },
+ _ => panic!("IOread derive with bad array constexpr")
+ }
+ },
+ _ => {
+ let size = quote! { ::scroll::export::mem::size_of::<#ty>() };
+ quote! {
+ #ident: { let res = src.cread_with::<#ty>(*offset, ctx); *offset += #size; res }
+ }
+ }
+ }
+ }).collect();
+
+ let gl = &generics.lt_token;
+ let gp = &generics.params;
+ let gg = &generics.gt_token;
+ let gn = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! { #ident }
+ }
+ p => quote! { #p },
+ });
+ let gn = quote! { #gl #( #gn ),* #gg };
+ let gw = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ #ident : ::scroll::ctx::FromCtx<::scroll::Endian> + ::std::convert::From<u8> + ::std::marker::Copy
+ }
+ },
+ p => quote! { #p }
+ });
+ quote! { where #( #gi ),* , }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl #gl #gp #gg ::scroll::ctx::FromCtx<::scroll::Endian> for #name #gn #gw {
+ #[inline]
+ fn from_ctx(src: &[u8], ctx: ::scroll::Endian) -> Self {
+ use ::scroll::Cread;
+ let offset = &mut 0;
+ let data = Self { #(#items,)* };
+ data
+ }
+ }
+ }
+}
+
+fn impl_from_ctx(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
+ let name = &ast.ident;
+ let generics = &ast.generics;
+ match ast.data {
+ syn::Data::Struct(ref data) => match data.fields {
+ syn::Fields::Named(ref fields) => impl_cread_struct(name, &fields.named, generics),
+ syn::Fields::Unnamed(ref fields) => impl_cread_struct(name, &fields.unnamed, generics),
+ _ => {
+ panic!("IOread can not be derived for unit structs")
+ }
+ },
+ _ => panic!("IOread can only be derived for structs"),
+ }
+}
+
+#[proc_macro_derive(IOread)]
+pub fn derive_ioread(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let gen = impl_from_ctx(&ast);
+ gen.into()
+}
+
+fn impl_into_ctx(
+ name: &syn::Ident,
+ fields: &syn::punctuated::Punctuated<syn::Field, syn::Token![,]>,
+ generics: &syn::Generics,
+) -> proc_macro2::TokenStream {
+ let items: Vec<_> = fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let ident = &f.ident.as_ref().map(|i| quote! {#i}).unwrap_or({
+ let t = proc_macro2::Literal::usize_unsuffixed(i);
+ quote! {#t}
+ });
+ let ty = &f.ty;
+ let size = quote! { ::scroll::export::mem::size_of::<#ty>() };
+ match *ty {
+ syn::Type::Array(ref array) => {
+ let arrty = &array.elem;
+ quote! {
+ let size = ::scroll::export::mem::size_of::<#arrty>();
+ for i in 0..self.#ident.len() {
+ dst.cwrite_with(self.#ident[i], *offset, ctx);
+ *offset += size;
+ }
+ }
+ }
+ _ => {
+ quote! {
+ dst.cwrite_with(self.#ident, *offset, ctx);
+ *offset += #size;
+ }
+ }
+ }
+ })
+ .collect();
+
+ let gl = &generics.lt_token;
+ let gp = &generics.params;
+ let gg = &generics.gt_token;
+ let gn = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! { #ident }
+ }
+ p => quote! { #p },
+ });
+ let gw = if !gp.is_empty() {
+ let gi = gp.iter().map(|param: &syn::GenericParam| match param {
+ syn::GenericParam::Type(ref t) => {
+ let ident = &t.ident;
+ quote! {
+ #ident : ::scroll::ctx::IntoCtx<::scroll::Endian> + ::std::marker::Copy
+ }
+ }
+ p => quote! { #p },
+ });
+ quote! { where #( #gi ),* }
+ } else {
+ quote! {}
+ };
+ let gn = quote! { #gl #( #gn ),* #gg };
+
+ quote! {
+ impl<'a, #gp > ::scroll::ctx::IntoCtx<::scroll::Endian> for &'a #name #gn #gw {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) {
+ use ::scroll::Cwrite;
+ let offset = &mut 0;
+ #(#items;)*;
+ ()
+ }
+ }
+
+ impl #gl #gp #gg ::scroll::ctx::IntoCtx<::scroll::Endian> for #name #gn #gw {
+ #[inline]
+ fn into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) {
+ (&self).into_ctx(dst, ctx)
+ }
+ }
+ }
+}
+
+fn impl_iowrite(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
+ let name = &ast.ident;
+ let generics = &ast.generics;
+ match ast.data {
+ syn::Data::Struct(ref data) => match data.fields {
+ syn::Fields::Named(ref fields) => impl_into_ctx(name, &fields.named, generics),
+ syn::Fields::Unnamed(ref fields) => impl_into_ctx(name, &fields.unnamed, generics),
+ _ => {
+ panic!("IOwrite can not be derived for unit structs")
+ }
+ },
+ _ => panic!("IOwrite can only be derived for structs"),
+ }
+}
+
+#[proc_macro_derive(IOwrite)]
+pub fn derive_iowrite(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let gen = impl_iowrite(&ast);
+ gen.into()
+}