summaryrefslogtreecommitdiffstats
path: root/vendor/xz2/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/xz2/src')
-rw-r--r--vendor/xz2/src/bufread.rs63
-rw-r--r--vendor/xz2/src/lib.rs14
-rw-r--r--vendor/xz2/src/read.rs31
-rw-r--r--vendor/xz2/src/stream.rs115
-rw-r--r--vendor/xz2/src/write.rs47
5 files changed, 145 insertions, 125 deletions
diff --git a/vendor/xz2/src/bufread.rs b/vendor/xz2/src/bufread.rs
index c193265f3..9433a9d2c 100644
--- a/vendor/xz2/src/bufread.rs
+++ b/vendor/xz2/src/bufread.rs
@@ -1,15 +1,15 @@
//! I/O streams for wrapping `BufRead` types as encoders/decoders
-use std::io::prelude::*;
-use std::io;
use lzma_sys;
+use std::io;
+use std::io::prelude::*;
#[cfg(feature = "tokio")]
use futures::Poll;
#[cfg(feature = "tokio")]
use tokio_io::{AsyncRead, AsyncWrite};
-use stream::{Stream, Check, Action, Status};
+use crate::stream::{Action, Check, Status, Stream};
/// An xz encoder, or compressor.
///
@@ -75,10 +75,10 @@ impl<R> XzEncoder<R> {
///
/// Note that, due to buffering, this only bears any relation to
/// total_in() when the compressor chooses to flush its data
- /// (unfortunately, this won't happen this won't happen in general
- /// at the end of the stream, because the compressor doesn't know
- /// if there's more data to come). At that point,
- /// `total_out() / total_in()` would be the compression ratio.
+ /// (unfortunately, this won't happen in general at the end of the
+ /// stream, because the compressor doesn't know if there's more data
+ /// to come). At that point, `total_out() / total_in()` would be
+ /// the compression ratio.
pub fn total_out(&self) -> u64 {
self.data.total_out()
}
@@ -99,7 +99,7 @@ impl<R: BufRead> Read for XzEncoder<R> {
eof = input.is_empty();
let before_out = self.data.total_out();
let before_in = self.data.total_in();
- let action = if eof {Action::Finish} else {Action::Run};
+ let action = if eof { Action::Finish } else { Action::Run };
ret = self.data.process(input, buf, action);
read = (self.data.total_out() - before_out) as usize;
consumed = (self.data.total_in() - before_in) as usize;
@@ -112,16 +112,15 @@ impl<R: BufRead> Read for XzEncoder<R> {
// need to keep asking for more data because if we return that 0
// bytes of data have been read then it will be interpreted as EOF.
if read == 0 && !eof && buf.len() > 0 {
- continue
+ continue;
}
- return Ok(read)
+ return Ok(read);
}
}
}
#[cfg(feature = "tokio")]
-impl<R: AsyncRead + BufRead> AsyncRead for XzEncoder<R> {
-}
+impl<R: AsyncRead + BufRead> AsyncRead for XzEncoder<R> {}
impl<W: Write> Write for XzEncoder<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -151,7 +150,8 @@ impl<R: BufRead> XzDecoder<R> {
/// Creates a new decoder which will decompress data read from the given
/// input. All the concatenated xz streams from input will be consumed.
pub fn new_multi_decoder(r: R) -> XzDecoder<R> {
- let stream = Stream::new_auto_decoder(u64::max_value(), lzma_sys::LZMA_CONCATENATED).unwrap();
+ let stream =
+ Stream::new_auto_decoder(u64::max_value(), lzma_sys::LZMA_CONCATENATED).unwrap();
XzDecoder::new_stream(r, stream)
}
@@ -160,7 +160,10 @@ impl<R: BufRead> XzDecoder<R> {
/// The `Stream` can be pre-configured for various checks, different
/// decompression options/tuning, etc.
pub fn new_stream(r: R, stream: Stream) -> XzDecoder<R> {
- XzDecoder { obj: r, data: stream }
+ XzDecoder {
+ obj: r,
+ data: stream,
+ }
}
}
@@ -206,7 +209,9 @@ impl<R: BufRead> Read for XzDecoder<R> {
eof = input.is_empty();
let before_out = self.data.total_out();
let before_in = self.data.total_in();
- ret = self.data.process(input, buf, if eof { Action::Finish } else { Action::Run });
+ ret = self
+ .data
+ .process(input, buf, if eof { Action::Finish } else { Action::Run });
read = (self.data.total_out() - before_out) as usize;
consumed = (self.data.total_in() - before_in) as usize;
}
@@ -215,22 +220,25 @@ impl<R: BufRead> Read for XzDecoder<R> {
let status = ret?;
if read > 0 || eof || buf.len() == 0 {
if read == 0 && status != Status::StreamEnd && buf.len() > 0 {
- return Err(io::Error::new(io::ErrorKind::Other,
- "premature eof"))
+ return Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "premature eof",
+ ));
}
- return Ok(read)
+ return Ok(read);
}
if consumed == 0 {
- return Err(io::Error::new(io::ErrorKind::Other,
- "corrupt xz stream"))
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "corrupt xz stream",
+ ));
}
}
}
}
#[cfg(feature = "tokio")]
-impl<R: AsyncRead + BufRead> AsyncRead for XzDecoder<R> {
-}
+impl<R: AsyncRead + BufRead> AsyncRead for XzDecoder<R> {}
impl<W: Write> Write for XzDecoder<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -251,13 +259,13 @@ impl<R: AsyncWrite> AsyncWrite for XzDecoder<R> {
#[cfg(test)]
mod tests {
- use bufread::{XzEncoder, XzDecoder};
+ use crate::bufread::{XzDecoder, XzEncoder};
use std::io::Read;
#[test]
fn compressed_and_trailing_data() {
// Make a vector with compressed data...
- let mut to_compress : Vec<u8> = Vec::new();
+ let mut to_compress: Vec<u8> = Vec::new();
const COMPRESSED_ORIG_SIZE: usize = 1024;
for num in 0..COMPRESSED_ORIG_SIZE {
to_compress.push(num as u8)
@@ -268,7 +276,7 @@ mod tests {
encoder.read_to_end(&mut decoder_input).unwrap();
// ...plus additional unrelated trailing data
- const ADDITIONAL_SIZE : usize = 123;
+ const ADDITIONAL_SIZE: usize = 123;
let mut additional_data = Vec::new();
for num in 0..ADDITIONAL_SIZE {
additional_data.push(((25 + num) % 256) as u8)
@@ -281,7 +289,10 @@ mod tests {
let mut decoder = XzDecoder::new(&mut decoder_reader);
let mut decompressed_data = vec![0u8; to_compress.len()];
- assert_eq!(decoder.read(&mut decompressed_data).unwrap(), COMPRESSED_ORIG_SIZE);
+ assert_eq!(
+ decoder.read(&mut decompressed_data).unwrap(),
+ COMPRESSED_ORIG_SIZE
+ );
assert_eq!(decompressed_data, &to_compress[..]);
}
diff --git a/vendor/xz2/src/lib.rs b/vendor/xz2/src/lib.rs
index 735ad1449..750521248 100644
--- a/vendor/xz2/src/lib.rs
+++ b/vendor/xz2/src/lib.rs
@@ -29,7 +29,7 @@
//! the `tokio` feature of this crate:
//!
//! ```toml
-//! xz2 = { version = "0.3", features = ["tokio"] }
+//! xz2 = { version = "0.1.6", features = ["tokio"] }
//! ```
//!
//! All methods are internally capable of working with streams that may return
@@ -46,18 +46,6 @@
#![deny(missing_docs)]
#![doc(html_root_url = "https://docs.rs/xz2/0.1")]
-extern crate lzma_sys;
-
-#[cfg(test)]
-extern crate rand;
-#[cfg(test)]
-extern crate quickcheck;
-#[cfg(feature = "tokio")]
-#[macro_use]
-extern crate tokio_io;
-#[cfg(feature = "tokio")]
-extern crate futures;
-
pub mod stream;
pub mod bufread;
diff --git a/vendor/xz2/src/read.rs b/vendor/xz2/src/read.rs
index 40b4ce951..19f0985f7 100644
--- a/vendor/xz2/src/read.rs
+++ b/vendor/xz2/src/read.rs
@@ -8,8 +8,8 @@ use futures::Poll;
#[cfg(feature = "tokio")]
use tokio_io::{AsyncRead, AsyncWrite};
-use bufread;
-use stream::Stream;
+use crate::bufread;
+use crate::stream::Stream;
/// A compression stream which wraps an uncompressed stream of data. Compressed
/// data will be read from the stream.
@@ -89,8 +89,7 @@ impl<R: Read> Read for XzEncoder<R> {
}
#[cfg(feature = "tokio")]
-impl<R: AsyncRead> AsyncRead for XzEncoder<R> {
-}
+impl<R: AsyncRead> AsyncRead for XzEncoder<R> {}
impl<W: Write + Read> Write for XzEncoder<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -181,8 +180,7 @@ impl<R: Read> Read for XzDecoder<R> {
}
#[cfg(feature = "tokio")]
-impl<R: AsyncRead + Read> AsyncRead for XzDecoder<R> {
-}
+impl<R: AsyncRead + Read> AsyncRead for XzDecoder<R> {}
impl<W: Write + Read> Write for XzDecoder<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -203,9 +201,10 @@ impl<R: AsyncWrite + Read> AsyncWrite for XzDecoder<R> {
#[cfg(test)]
mod tests {
- use std::io::prelude::*;
- use read::{XzEncoder, XzDecoder};
+ use crate::read::{XzDecoder, XzEncoder};
use rand::{thread_rng, Rng};
+ use std::io::prelude::*;
+ use std::iter;
#[test]
fn smoke() {
@@ -247,14 +246,19 @@ mod tests {
let mut result = Vec::new();
c.read_to_end(&mut result).unwrap();
- let v = thread_rng().gen_iter::<u8>().take(1024).collect::<Vec<_>>();
+ let mut rng = thread_rng();
+ let v = iter::repeat_with(|| rng.gen::<u8>())
+ .take(1024)
+ .collect::<Vec<_>>();
for _ in 0..200 {
result.extend(v.iter().map(|x| *x));
}
let mut d = XzDecoder::new(&result[..]);
let mut data = Vec::with_capacity(m.len());
- unsafe { data.set_len(m.len()); }
+ unsafe {
+ data.set_len(m.len());
+ }
assert!(d.read(&mut data).unwrap() == m.len());
assert!(data == &m[..]);
}
@@ -302,7 +306,7 @@ mod tests {
fn two_streams() {
let mut input_stream1: Vec<u8> = Vec::new();
let mut input_stream2: Vec<u8> = Vec::new();
- let mut all_input : Vec<u8> = Vec::new();
+ let mut all_input: Vec<u8> = Vec::new();
// Generate input data.
const STREAM1_SIZE: usize = 1024;
@@ -334,7 +338,10 @@ mod tests {
let mut decoder = XzDecoder::new_multi_decoder(&mut decoder_reader);
let mut decompressed_data = vec![0u8; all_input.len()];
- assert_eq!(decoder.read(&mut decompressed_data).unwrap(), all_input.len());
+ assert_eq!(
+ decoder.read(&mut decompressed_data).unwrap(),
+ all_input.len()
+ );
assert_eq!(decompressed_data, &all_input[..]);
}
}
diff --git a/vendor/xz2/src/stream.rs b/vendor/xz2/src/stream.rs
index b005f26d8..88681f1fb 100644
--- a/vendor/xz2/src/stream.rs
+++ b/vendor/xz2/src/stream.rs
@@ -52,13 +52,13 @@ pub struct Filters {
/// the amount of input will make `process` return `Error::Program`.
#[derive(Copy, Clone)]
pub enum Action {
- /// Continue processing
- ///
+ /// Continue processing
+ ///
/// When encoding, encode as much input as possible. Some internal buffering
/// will probably be done (depends on the filter chain in use), which causes
/// latency: the input used won't usually be decodeable from the output of
/// the same `process` call.
- ///
+ ///
/// When decoding, decode as much input as possible and produce as much
/// output as possible.
Run = lzma_sys::LZMA_RUN as isize,
@@ -278,9 +278,11 @@ impl Stream {
pub fn new_easy_encoder(preset: u32, check: Check) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_easy_encoder(&mut init.raw,
- preset,
- check as lzma_sys::lzma_check))?;
+ cvt(lzma_sys::lzma_easy_encoder(
+ &mut init.raw,
+ preset,
+ check as lzma_sys::lzma_check,
+ ))?;
Ok(init)
}
}
@@ -310,13 +312,14 @@ impl Stream {
///
/// This function is similar to `new_easy_encoder` but a custom filter chain
/// is specified.
- pub fn new_stream_encoder(filters: &Filters,
- check: Check) -> Result<Stream, Error> {
+ pub fn new_stream_encoder(filters: &Filters, check: Check) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_stream_encoder(&mut init.raw,
- filters.inner.as_ptr(),
- check as lzma_sys::lzma_check))?;
+ cvt(lzma_sys::lzma_stream_encoder(
+ &mut init.raw,
+ filters.inner.as_ptr(),
+ check as lzma_sys::lzma_check,
+ ))?;
Ok(init)
}
}
@@ -326,13 +329,14 @@ impl Stream {
/// The maximum memory usage can be specified along with flags such as
/// `TELL_ANY_CHECK`, `TELL_NO_CHECK`, `TELL_UNSUPPORTED_CHECK`,
/// `TELL_IGNORE_CHECK`, or `CONCATENATED`.
- pub fn new_stream_decoder(memlimit: u64,
- flags: u32) -> Result<Stream, Error> {
+ pub fn new_stream_decoder(memlimit: u64, flags: u32) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_stream_decoder(&mut init.raw,
- memlimit,
- flags))?;
+ cvt(lzma_sys::lzma_stream_decoder(
+ &mut init.raw,
+ memlimit,
+ flags,
+ ))?;
Ok(init)
}
}
@@ -343,8 +347,7 @@ impl Stream {
pub fn new_lzma_decoder(memlimit: u64) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_alone_decoder(&mut init.raw,
- memlimit))?;
+ cvt(lzma_sys::lzma_alone_decoder(&mut init.raw, memlimit))?;
Ok(init)
}
}
@@ -354,9 +357,7 @@ impl Stream {
pub fn new_auto_decoder(memlimit: u64, flags: u32) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_auto_decoder(&mut init.raw,
- memlimit,
- flags))?;
+ cvt(lzma_sys::lzma_auto_decoder(&mut init.raw, memlimit, flags))?;
Ok(init)
}
}
@@ -366,18 +367,18 @@ impl Stream {
/// This will perform the appropriate encoding or decoding operation
/// depending on the kind of underlying stream. Documentation for the
/// various `action` arguments can be found on the respective variants.
- pub fn process(&mut self,
- input: &[u8],
- output: &mut [u8],
- action: Action) -> Result<Status, Error> {
+ pub fn process(
+ &mut self,
+ input: &[u8],
+ output: &mut [u8],
+ action: Action,
+ ) -> Result<Status, Error> {
self.raw.next_in = input.as_ptr();
self.raw.avail_in = input.len();
self.raw.next_out = output.as_mut_ptr();
self.raw.avail_out = output.len();
let action = action as lzma_sys::lzma_action;
- unsafe {
- cvt(lzma_sys::lzma_code(&mut self.raw, action))
- }
+ unsafe { cvt(lzma_sys::lzma_code(&mut self.raw, action)) }
}
/// Performs the same data as `process`, but places output data in a `Vec`.
@@ -385,10 +386,12 @@ impl Stream {
/// This function will use the extra capacity of `output` as a destination
/// for bytes to be placed. The length of `output` will automatically get
/// updated after the operation has completed.
- pub fn process_vec(&mut self,
- input: &[u8],
- output: &mut Vec<u8>,
- action: Action) -> Result<Status, Error> {
+ pub fn process_vec(
+ &mut self,
+ input: &[u8],
+ output: &mut Vec<u8>,
+ action: Action,
+ ) -> Result<Status, Error> {
let cap = output.capacity();
let len = output.len();
@@ -400,7 +403,7 @@ impl Stream {
self.process(input, out, action)
};
output.set_len((self.total_out() - before) as usize + len);
- return ret
+ return ret;
}
}
@@ -426,8 +429,7 @@ impl Stream {
/// This can return `Error::MemLimit` if the new limit is too small or
/// `Error::Program` if this stream doesn't take a memory limit.
pub fn set_memlimit(&mut self, limit: u64) -> Result<(), Error> {
- cvt(unsafe { lzma_sys::lzma_memlimit_set(&mut self.raw, limit) })
- .map(|_| ())
+ cvt(unsafe { lzma_sys::lzma_memlimit_set(&mut self.raw, limit) }).map(|_| ())
}
}
@@ -572,9 +574,7 @@ impl LzmaOptions {
impl Check {
/// Test if this check is supported in this build of liblzma.
pub fn is_supported(&self) -> bool {
- let ret = unsafe {
- lzma_sys::lzma_check_is_supported(*self as lzma_sys::lzma_check)
- };
+ let ret = unsafe { lzma_sys::lzma_check_is_supported(*self as lzma_sys::lzma_check) };
ret != 0
}
}
@@ -582,9 +582,7 @@ impl Check {
impl MatchFinder {
/// Test if this match finder is supported in this build of liblzma.
pub fn is_supported(&self) -> bool {
- let ret = unsafe {
- lzma_sys::lzma_mf_is_supported(*self as lzma_sys::lzma_match_finder)
- };
+ let ret = unsafe { lzma_sys::lzma_mf_is_supported(*self as lzma_sys::lzma_match_finder) };
ret != 0
}
}
@@ -700,7 +698,7 @@ impl MtStreamBuilder {
filters: None,
};
init.raw.threads = 1;
- return init
+ return init;
}
}
@@ -792,8 +790,7 @@ impl MtStreamBuilder {
pub fn encoder(&self) -> Result<Stream, Error> {
unsafe {
let mut init = Stream { raw: mem::zeroed() };
- cvt(lzma_sys::lzma_stream_encoder_mt(&mut init.raw,
- &self.raw))?;
+ cvt(lzma_sys::lzma_stream_encoder_mt(&mut init.raw, &self.raw))?;
Ok(init)
}
}
@@ -819,17 +816,36 @@ fn cvt(rc: lzma_sys::lzma_ret) -> Result<Status, Error> {
impl From<Error> for io::Error {
fn from(e: Error) -> io::Error {
- io::Error::new(io::ErrorKind::Other, e)
+ let kind = match e {
+ Error::Data => std::io::ErrorKind::InvalidData,
+ Error::Options => std::io::ErrorKind::InvalidInput,
+ Error::Format => std::io::ErrorKind::InvalidData,
+ Error::MemLimit => std::io::ErrorKind::Other,
+ Error::Mem => std::io::ErrorKind::Other,
+ Error::Program => std::io::ErrorKind::Other,
+ Error::NoCheck => std::io::ErrorKind::InvalidInput,
+ Error::UnsupportedCheck => std::io::ErrorKind::Other,
+ };
+
+ io::Error::new(kind, e)
}
}
-impl error::Error for Error {
- fn description(&self) -> &str { "lzma data error" }
-}
+impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- error::Error::description(self).fmt(f)
+ match self {
+ Error::Data => "lzma data error",
+ Error::Options => "invalid options",
+ Error::Format => "stream/file format not recognized",
+ Error::MemLimit => "memory limit reached",
+ Error::Mem => "can't allocate memory",
+ Error::Program => "liblzma internal error",
+ Error::NoCheck => "no integrity check was available",
+ Error::UnsupportedCheck => "liblzma not built with check support",
+ }
+ .fmt(f)
}
}
@@ -840,4 +856,3 @@ impl Drop for Stream {
}
}
}
-
diff --git a/vendor/xz2/src/write.rs b/vendor/xz2/src/write.rs
index c6d95c35e..9ffad2388 100644
--- a/vendor/xz2/src/write.rs
+++ b/vendor/xz2/src/write.rs
@@ -1,15 +1,15 @@
//! Writer-based compression/decompression streams
-use std::io::prelude::*;
-use std::io;
use lzma_sys;
+use std::io;
+use std::io::prelude::*;
#[cfg(feature = "tokio")]
use futures::Poll;
#[cfg(feature = "tokio")]
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio_io::{try_nb, AsyncRead, AsyncWrite};
-use stream::{Action, Status, Stream, Check};
+use crate::stream::{Action, Check, Status, Stream};
/// A compression stream which will have uncompressed data written to it and
/// will write compressed data to an output stream.
@@ -81,7 +81,7 @@ impl<W: Write> XzEncoder<W> {
self.dump()?;
let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?;
if res == Status::StreamEnd {
- break
+ break;
}
}
self.dump()
@@ -124,12 +124,13 @@ impl<W: Write> Write for XzEncoder<W> {
self.dump()?;
let total_in = self.total_in();
- self.data.process_vec(data, &mut self.buf, Action::Run)
+ self.data
+ .process_vec(data, &mut self.buf, Action::Run)
.unwrap();
let written = (self.total_in() - total_in) as usize;
if written > 0 || data.len() == 0 {
- return Ok(written)
+ return Ok(written);
}
}
}
@@ -137,10 +138,12 @@ impl<W: Write> Write for XzEncoder<W> {
fn flush(&mut self) -> io::Result<()> {
loop {
self.dump()?;
- let status = self.data.process_vec(&[], &mut self.buf,
- Action::FullFlush).unwrap();
+ let status = self
+ .data
+ .process_vec(&[], &mut self.buf, Action::FullFlush)
+ .unwrap();
if status == Status::StreamEnd {
- break
+ break;
}
}
self.obj.as_mut().unwrap().flush()
@@ -162,8 +165,7 @@ impl<W: Read + Write> Read for XzEncoder<W> {
}
#[cfg(feature = "tokio")]
-impl<W: AsyncRead + AsyncWrite> AsyncRead for XzEncoder<W> {
-}
+impl<W: AsyncRead + AsyncWrite> AsyncRead for XzEncoder<W> {}
impl<W: Write> Drop for XzEncoder<W> {
fn drop(&mut self) {
@@ -184,7 +186,8 @@ impl<W: Write> XzDecoder<W> {
/// Creates a new decoding stream which will decode into `obj` all the xz streams
/// from the input written to it.
pub fn new_multi_decoder(obj: W) -> XzDecoder<W> {
- let stream = Stream::new_stream_decoder(u64::max_value(), lzma_sys::LZMA_CONCATENATED).unwrap();
+ let stream =
+ Stream::new_stream_decoder(u64::max_value(), lzma_sys::LZMA_CONCATENATED).unwrap();
XzDecoder::new_stream(obj, stream)
}
@@ -225,8 +228,7 @@ impl<W: Write> XzDecoder<W> {
fn try_finish(&mut self) -> io::Result<()> {
loop {
self.dump()?;
- let res = self.data.process_vec(&[], &mut self.buf,
- Action::Finish)?;
+ let res = self.data.process_vec(&[], &mut self.buf, Action::Finish)?;
// When decoding a truncated file, XZ returns LZMA_BUF_ERROR and
// decodes no new data, which corresponds to this crate's MemNeeded
@@ -236,13 +238,12 @@ impl<W: Write> XzDecoder<W> {
// See the 02_decompress.c example in xz-utils.
if self.buf.is_empty() && res == Status::MemNeeded {
let msg = "xz compressed stream is truncated or otherwise corrupt";
- return Err(io::Error::new(io::ErrorKind::UnexpectedEof, msg))
+ return Err(io::Error::new(io::ErrorKind::UnexpectedEof, msg));
}
if res == Status::StreamEnd {
- break
+ break;
}
-
}
self.dump()
}
@@ -275,12 +276,11 @@ impl<W: Write> Write for XzDecoder<W> {
self.dump()?;
let before = self.total_in();
- let res = self.data.process_vec(data, &mut self.buf,
- Action::Run)?;
+ let res = self.data.process_vec(data, &mut self.buf, Action::Run)?;
let written = (self.total_in() - before) as usize;
if written > 0 || data.len() == 0 || res == Status::StreamEnd {
- return Ok(written)
+ return Ok(written);
}
}
}
@@ -306,8 +306,7 @@ impl<W: Read + Write> Read for XzDecoder<W> {
}
#[cfg(feature = "tokio")]
-impl<W: AsyncRead + AsyncWrite> AsyncRead for XzDecoder<W> {
-}
+impl<W: AsyncRead + AsyncWrite> AsyncRead for XzDecoder<W> {}
impl<W: Write> Drop for XzDecoder<W> {
fn drop(&mut self) {
@@ -319,9 +318,9 @@ impl<W: Write> Drop for XzDecoder<W> {
#[cfg(test)]
mod tests {
+ use super::{XzDecoder, XzEncoder};
use std::io::prelude::*;
use std::iter::repeat;
- use super::{XzEncoder, XzDecoder};
#[test]
fn smoke() {