summaryrefslogtreecommitdiffstats
path: root/vendor/base64/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /vendor/base64/src
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/base64/src')
-rw-r--r--vendor/base64/src/alphabet.rs37
-rw-r--r--vendor/base64/src/chunked_encoder.rs98
-rw-r--r--vendor/base64/src/decode.rs8
-rw-r--r--vendor/base64/src/encode.rs6
-rw-r--r--vendor/base64/src/engine/mod.rs165
-rw-r--r--vendor/base64/src/engine/tests.rs6
-rw-r--r--vendor/base64/src/write/encoder_string_writer.rs45
-rw-r--r--vendor/base64/src/write/encoder_tests.rs2
8 files changed, 208 insertions, 159 deletions
diff --git a/vendor/base64/src/alphabet.rs b/vendor/base64/src/alphabet.rs
index 7cd1b5707..f7cd81911 100644
--- a/vendor/base64/src/alphabet.rs
+++ b/vendor/base64/src/alphabet.rs
@@ -1,7 +1,7 @@
//! Provides [Alphabet] and constants for alphabets commonly used in the wild.
use crate::PAD_BYTE;
-use core::fmt;
+use core::{convert, fmt};
#[cfg(any(feature = "std", test))]
use std::error;
@@ -12,6 +12,10 @@ const ALPHABET_SIZE: usize = 64;
/// Common alphabets are provided as constants, and custom alphabets
/// can be made via `from_str` or the `TryFrom<str>` implementation.
///
+/// # Examples
+///
+/// Building and using a custom Alphabet:
+///
/// ```
/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap();
///
@@ -19,6 +23,33 @@ const ALPHABET_SIZE: usize = 64;
/// &custom,
/// base64::engine::general_purpose::PAD);
/// ```
+///
+/// Building a const:
+///
+/// ```
+/// use base64::alphabet::Alphabet;
+///
+/// static CUSTOM: Alphabet = {
+/// // Result::unwrap() isn't const yet, but panic!() is OK
+/// match Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") {
+/// Ok(x) => x,
+/// Err(_) => panic!("creation of alphabet failed"),
+/// }
+/// };
+/// ```
+///
+/// Building a lazy_static:
+///
+/// ```
+/// use base64::{
+/// alphabet::Alphabet,
+/// engine::{general_purpose::GeneralPurpose, GeneralPurposeConfig},
+/// };
+///
+/// lazy_static::lazy_static! {
+/// static ref CUSTOM: Alphabet = Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap();
+/// }
+/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Alphabet {
pub(crate) symbols: [u8; ALPHABET_SIZE],
@@ -93,7 +124,7 @@ impl Alphabet {
}
}
-impl TryFrom<&str> for Alphabet {
+impl convert::TryFrom<&str> for Alphabet {
type Error = ParseAlphabetError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
@@ -171,7 +202,7 @@ pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked(
#[cfg(test)]
mod tests {
use crate::alphabet::*;
- use std::convert::TryFrom as _;
+ use core::convert::TryFrom as _;
#[test]
fn detects_duplicate_start() {
diff --git a/vendor/base64/src/chunked_encoder.rs b/vendor/base64/src/chunked_encoder.rs
index bc3810ab7..69bc7457e 100644
--- a/vendor/base64/src/chunked_encoder.rs
+++ b/vendor/base64/src/chunked_encoder.rs
@@ -1,12 +1,12 @@
+use crate::{
+ encode::add_padding,
+ engine::{Config, Engine},
+};
#[cfg(any(feature = "alloc", feature = "std", test))]
use alloc::string::String;
-use core::cmp;
#[cfg(any(feature = "alloc", feature = "std", test))]
use core::str;
-use crate::encode::add_padding;
-use crate::engine::{Config, Engine};
-
/// The output mechanism for ChunkedEncoder's encoded bytes.
pub trait Sink {
type Error;
@@ -15,72 +15,37 @@ pub trait Sink {
fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>;
}
-const BUF_SIZE: usize = 1024;
-
/// A base64 encoder that emits encoded bytes in chunks without heap allocation.
pub struct ChunkedEncoder<'e, E: Engine + ?Sized> {
engine: &'e E,
- max_input_chunk_len: usize,
}
impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> {
pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> {
- ChunkedEncoder {
- engine,
- max_input_chunk_len: max_input_length(BUF_SIZE, engine.config().encode_padding()),
- }
+ ChunkedEncoder { engine }
}
pub fn encode<S: Sink>(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> {
- let mut encode_buf: [u8; BUF_SIZE] = [0; BUF_SIZE];
- let mut input_index = 0;
-
- while input_index < bytes.len() {
- // either the full input chunk size, or it's the last iteration
- let input_chunk_len = cmp::min(self.max_input_chunk_len, bytes.len() - input_index);
-
- let chunk = &bytes[input_index..(input_index + input_chunk_len)];
-
- let mut b64_bytes_written = self.engine.internal_encode(chunk, &mut encode_buf);
-
- input_index += input_chunk_len;
- let more_input_left = input_index < bytes.len();
-
- if self.engine.config().encode_padding() && !more_input_left {
- // no more input, add padding if needed. Buffer will have room because
- // max_input_length leaves room for it.
- b64_bytes_written +=
- add_padding(b64_bytes_written, &mut encode_buf[b64_bytes_written..]);
+ const BUF_SIZE: usize = 1024;
+ const CHUNK_SIZE: usize = BUF_SIZE / 4 * 3;
+
+ let mut buf = [0; BUF_SIZE];
+ for chunk in bytes.chunks(CHUNK_SIZE) {
+ let mut len = self.engine.internal_encode(chunk, &mut buf);
+ if chunk.len() != CHUNK_SIZE && self.engine.config().encode_padding() {
+ // Final, potentially partial, chunk.
+ // Only need to consider if padding is needed on a partial chunk since full chunk
+ // is a multiple of 3, which therefore won't be padded.
+ // Pad output to multiple of four bytes if required by config.
+ len += add_padding(len, &mut buf[len..]);
}
-
- sink.write_encoded_bytes(&encode_buf[0..b64_bytes_written])?;
+ sink.write_encoded_bytes(&buf[..len])?;
}
Ok(())
}
}
-/// Calculate the longest input that can be encoded for the given output buffer size.
-///
-/// If the config requires padding, two bytes of buffer space will be set aside so that the last
-/// chunk of input can be encoded safely.
-///
-/// The input length will always be a multiple of 3 so that no encoding state has to be carried over
-/// between chunks.
-fn max_input_length(encoded_buf_len: usize, padded: bool) -> usize {
- let effective_buf_len = if padded {
- // make room for padding
- encoded_buf_len
- .checked_sub(2)
- .expect("Don't use a tiny buffer")
- } else {
- encoded_buf_len
- };
-
- // No padding, so just normal base64 expansion.
- (effective_buf_len / 4) * 3
-}
-
// A really simple sink that just appends to a string
#[cfg(any(feature = "alloc", feature = "std", test))]
pub(crate) struct StringSink<'a> {
@@ -152,38 +117,13 @@ pub mod tests {
chunked_encode_matches_normal_encode_random(&helper);
}
- #[test]
- fn max_input_length_no_pad() {
- assert_eq!(768, max_input_length(1024, false));
- }
-
- #[test]
- fn max_input_length_with_pad_decrements_one_triple() {
- assert_eq!(765, max_input_length(1024, true));
- }
-
- #[test]
- fn max_input_length_with_pad_one_byte_short() {
- assert_eq!(765, max_input_length(1025, true));
- }
-
- #[test]
- fn max_input_length_with_pad_fits_exactly() {
- assert_eq!(768, max_input_length(1026, true));
- }
-
- #[test]
- fn max_input_length_cant_use_extra_single_encoded_byte() {
- assert_eq!(300, max_input_length(401, false));
- }
-
pub fn chunked_encode_matches_normal_encode_random<S: SinkTestHelper>(sink_test_helper: &S) {
let mut input_buf: Vec<u8> = Vec::new();
let mut output_buf = String::new();
let mut rng = rand::rngs::SmallRng::from_entropy();
let input_len_range = Uniform::new(1, 10_000);
- for _ in 0..5_000 {
+ for _ in 0..20_000 {
input_buf.clear();
output_buf.clear();
diff --git a/vendor/base64/src/decode.rs b/vendor/base64/src/decode.rs
index 7d29fdc82..f590cbdd0 100644
--- a/vendor/base64/src/decode.rs
+++ b/vendor/base64/src/decode.rs
@@ -41,11 +41,7 @@ impl fmt::Display for DecodeError {
}
#[cfg(any(feature = "std", test))]
-impl error::Error for DecodeError {
- fn cause(&self) -> Option<&dyn error::Error> {
- None
- }
-}
+impl error::Error for DecodeError {}
/// Errors that can occur while decoding into a slice.
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -69,7 +65,7 @@ impl fmt::Display for DecodeSliceError {
#[cfg(any(feature = "std", test))]
impl error::Error for DecodeSliceError {
- fn cause(&self) -> Option<&dyn error::Error> {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
DecodeSliceError::DecodeError(e) => Some(e),
DecodeSliceError::OutputSliceTooSmall => None,
diff --git a/vendor/base64/src/encode.rs b/vendor/base64/src/encode.rs
index 15b903d2c..00ade7472 100644
--- a/vendor/base64/src/encode.rs
+++ b/vendor/base64/src/encode.rs
@@ -149,11 +149,7 @@ impl fmt::Display for EncodeSliceError {
}
#[cfg(any(feature = "std", test))]
-impl error::Error for EncodeSliceError {
- fn cause(&self) -> Option<&dyn error::Error> {
- None
- }
-}
+impl error::Error for EncodeSliceError {}
#[cfg(test)]
mod tests {
diff --git a/vendor/base64/src/engine/mod.rs b/vendor/base64/src/engine/mod.rs
index aa41dffec..e10d66bb2 100644
--- a/vendor/base64/src/engine/mod.rs
+++ b/vendor/base64/src/engine/mod.rs
@@ -114,14 +114,23 @@ pub trait Engine: Send + Sync {
///
/// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~");
#[cfg(any(feature = "alloc", feature = "std", test))]
+ #[inline]
fn encode<T: AsRef<[u8]>>(&self, input: T) -> String {
- let encoded_size = encoded_len(input.as_ref().len(), self.config().encode_padding())
- .expect("integer overflow when calculating buffer size");
- let mut buf = vec![0; encoded_size];
+ fn inner<E>(engine: &E, input_bytes: &[u8]) -> String
+ where
+ E: Engine + ?Sized,
+ {
+ let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
+ .expect("integer overflow when calculating buffer size");
+
+ let mut buf = vec![0; encoded_size];
+
+ encode_with_padding(input_bytes, &mut buf[..], engine, encoded_size);
- encode_with_padding(input.as_ref(), &mut buf[..], self, encoded_size);
+ String::from_utf8(buf).expect("Invalid UTF8")
+ }
- String::from_utf8(buf).expect("Invalid UTF8")
+ inner(self, input.as_ref())
}
/// Encode arbitrary octets as base64 into a supplied `String`.
@@ -145,16 +154,20 @@ pub trait Engine: Send + Sync {
/// }
/// ```
#[cfg(any(feature = "alloc", feature = "std", test))]
+ #[inline]
fn encode_string<T: AsRef<[u8]>>(&self, input: T, output_buf: &mut String) {
- let input_bytes = input.as_ref();
-
+ fn inner<E>(engine: &E, input_bytes: &[u8], output_buf: &mut String)
+ where
+ E: Engine + ?Sized,
{
let mut sink = chunked_encoder::StringSink::new(output_buf);
- chunked_encoder::ChunkedEncoder::new(self)
+ chunked_encoder::ChunkedEncoder::new(engine)
.encode(input_bytes, &mut sink)
.expect("Writing to a String shouldn't fail");
}
+
+ inner(self, input.as_ref(), output_buf)
}
/// Encode arbitrary octets as base64 into a supplied slice.
@@ -179,25 +192,35 @@ pub trait Engine: Send + Sync {
///
/// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice());
/// ```
+ #[inline]
fn encode_slice<T: AsRef<[u8]>>(
&self,
input: T,
output_buf: &mut [u8],
) -> Result<usize, EncodeSliceError> {
- let input_bytes = input.as_ref();
+ fn inner<E>(
+ engine: &E,
+ input_bytes: &[u8],
+ output_buf: &mut [u8],
+ ) -> Result<usize, EncodeSliceError>
+ where
+ E: Engine + ?Sized,
+ {
+ let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
+ .expect("usize overflow when calculating buffer size");
- let encoded_size = encoded_len(input_bytes.len(), self.config().encode_padding())
- .expect("usize overflow when calculating buffer size");
+ if output_buf.len() < encoded_size {
+ return Err(EncodeSliceError::OutputSliceTooSmall);
+ }
- if output_buf.len() < encoded_size {
- return Err(EncodeSliceError::OutputSliceTooSmall);
- }
+ let b64_output = &mut output_buf[0..encoded_size];
- let b64_output = &mut output_buf[0..encoded_size];
+ encode_with_padding(input_bytes, b64_output, engine, encoded_size);
- encode_with_padding(input_bytes, b64_output, self, encoded_size);
+ Ok(encoded_size)
+ }
- Ok(encoded_size)
+ inner(self, input.as_ref(), output_buf)
}
/// Decode the input into a new `Vec`.
@@ -219,18 +242,25 @@ pub trait Engine: Send + Sync {
/// println!("{:?}", bytes_url);
/// ```
#[cfg(any(feature = "alloc", feature = "std", test))]
+ #[inline]
fn decode<T: AsRef<[u8]>>(&self, input: T) -> Result<Vec<u8>, DecodeError> {
- let input_bytes = input.as_ref();
+ fn inner<E>(engine: &E, input_bytes: &[u8]) -> Result<Vec<u8>, DecodeError>
+ where
+ E: Engine + ?Sized,
+ {
+ let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
+ let mut buffer = vec![0; estimate.decoded_len_estimate()];
- let estimate = self.internal_decoded_len_estimate(input_bytes.len());
- let mut buffer = vec![0; estimate.decoded_len_estimate()];
+ let bytes_written = engine
+ .internal_decode(input_bytes, &mut buffer, estimate)?
+ .decoded_len;
- let bytes_written = self
- .internal_decode(input_bytes, &mut buffer, estimate)?
- .decoded_len;
- buffer.truncate(bytes_written);
+ buffer.truncate(bytes_written);
- Ok(buffer)
+ Ok(buffer)
+ }
+
+ inner(self, input.as_ref())
}
/// Decode the `input` into the supplied `buffer`.
@@ -264,30 +294,38 @@ pub trait Engine: Send + Sync {
/// }
/// ```
#[cfg(any(feature = "alloc", feature = "std", test))]
+ #[inline]
fn decode_vec<T: AsRef<[u8]>>(
&self,
input: T,
buffer: &mut Vec<u8>,
) -> Result<(), DecodeError> {
- let input_bytes = input.as_ref();
+ fn inner<E>(engine: &E, input_bytes: &[u8], buffer: &mut Vec<u8>) -> Result<(), DecodeError>
+ where
+ E: Engine + ?Sized,
+ {
+ let starting_output_len = buffer.len();
+ let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
+
+ let total_len_estimate = estimate
+ .decoded_len_estimate()
+ .checked_add(starting_output_len)
+ .expect("Overflow when calculating output buffer length");
- let starting_output_len = buffer.len();
+ buffer.resize(total_len_estimate, 0);
- let estimate = self.internal_decoded_len_estimate(input_bytes.len());
- let total_len_estimate = estimate
- .decoded_len_estimate()
- .checked_add(starting_output_len)
- .expect("Overflow when calculating output buffer length");
- buffer.resize(total_len_estimate, 0);
+ let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
- let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
- let bytes_written = self
- .internal_decode(input_bytes, buffer_slice, estimate)?
- .decoded_len;
+ let bytes_written = engine
+ .internal_decode(input_bytes, buffer_slice, estimate)?
+ .decoded_len;
- buffer.truncate(starting_output_len + bytes_written);
+ buffer.truncate(starting_output_len + bytes_written);
+
+ Ok(())
+ }
- Ok(())
+ inner(self, input.as_ref(), buffer)
}
/// Decode the input into the provided output slice.
@@ -301,21 +339,33 @@ pub trait Engine: Send + Sync {
///
/// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error
/// if the output buffer is too small.
+ #[inline]
fn decode_slice<T: AsRef<[u8]>>(
&self,
input: T,
output: &mut [u8],
) -> Result<usize, DecodeSliceError> {
- let input_bytes = input.as_ref();
+ fn inner<E>(
+ engine: &E,
+ input_bytes: &[u8],
+ output: &mut [u8],
+ ) -> Result<usize, DecodeSliceError>
+ where
+ E: Engine + ?Sized,
+ {
+ let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
+
+ if output.len() < estimate.decoded_len_estimate() {
+ return Err(DecodeSliceError::OutputSliceTooSmall);
+ }
- let estimate = self.internal_decoded_len_estimate(input_bytes.len());
- if output.len() < estimate.decoded_len_estimate() {
- return Err(DecodeSliceError::OutputSliceTooSmall);
+ engine
+ .internal_decode(input_bytes, output, estimate)
+ .map_err(|e| e.into())
+ .map(|dm| dm.decoded_len)
}
- self.internal_decode(input_bytes, output, estimate)
- .map_err(|e| e.into())
- .map(|dm| dm.decoded_len)
+ inner(self, input.as_ref(), output)
}
/// Decode the input into the provided output slice.
@@ -332,19 +382,26 @@ pub trait Engine: Send + Sync {
/// # Panics
///
/// Panics if the provided output buffer is too small for the decoded data.
+ #[inline]
fn decode_slice_unchecked<T: AsRef<[u8]>>(
&self,
input: T,
output: &mut [u8],
) -> Result<usize, DecodeError> {
- let input_bytes = input.as_ref();
-
- self.internal_decode(
- input_bytes,
- output,
- self.internal_decoded_len_estimate(input_bytes.len()),
- )
- .map(|dm| dm.decoded_len)
+ fn inner<E>(engine: &E, input_bytes: &[u8], output: &mut [u8]) -> Result<usize, DecodeError>
+ where
+ E: Engine + ?Sized,
+ {
+ engine
+ .internal_decode(
+ input_bytes,
+ output,
+ engine.internal_decoded_len_estimate(input_bytes.len()),
+ )
+ .map(|dm| dm.decoded_len)
+ }
+
+ inner(self, input.as_ref(), output)
}
}
diff --git a/vendor/base64/src/engine/tests.rs b/vendor/base64/src/engine/tests.rs
index 6430b35a9..b04800552 100644
--- a/vendor/base64/src/engine/tests.rs
+++ b/vendor/base64/src/engine/tests.rs
@@ -623,7 +623,7 @@ fn decode_padding_before_final_non_padding_char_error_invalid_byte<E: EngineWrap
let mut rng = seeded_rng();
// the different amounts of proper padding, w/ offset from end for the last non-padding char
- let suffixes = vec![("/w==", 2), ("iYu=", 1), ("zzzz", 0)];
+ let suffixes = [("/w==", 2), ("iYu=", 1), ("zzzz", 0)];
let prefix_quads_range = distributions::Uniform::from(0..=256);
@@ -869,7 +869,7 @@ fn decode_pad_mode_requires_canonical_accepts_canonical<E: EngineWrapper>(engine
fn decode_pad_mode_requires_canonical_rejects_non_canonical<E: EngineWrapper>(engine_wrapper: E) {
let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical);
- let suffixes = vec!["/w", "/w=", "iYU"];
+ let suffixes = ["/w", "/w=", "iYU"];
for num_prefix_quads in 0..256 {
for &suffix in suffixes.iter() {
let mut encoded = "AAAA".repeat(num_prefix_quads);
@@ -896,7 +896,7 @@ fn decode_pad_mode_requires_no_padding_accepts_no_padding<E: EngineWrapper>(engi
fn decode_pad_mode_requires_no_padding_rejects_any_padding<E: EngineWrapper>(engine_wrapper: E) {
let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone);
- let suffixes = vec!["/w=", "/w==", "iYU="];
+ let suffixes = ["/w=", "/w==", "iYU="];
for num_prefix_quads in 0..256 {
for &suffix in suffixes.iter() {
let mut encoded = "AAAA".repeat(num_prefix_quads);
diff --git a/vendor/base64/src/write/encoder_string_writer.rs b/vendor/base64/src/write/encoder_string_writer.rs
index 9394dc9bf..9c02bcde8 100644
--- a/vendor/base64/src/write/encoder_string_writer.rs
+++ b/vendor/base64/src/write/encoder_string_writer.rs
@@ -44,11 +44,6 @@ use std::io;
/// assert_eq!("base64: YXNkZg==", &buf);
/// ```
///
-/// # Panics
-///
-/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without
-/// error is invalid and will panic.
-///
/// # Performance
///
/// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain
@@ -144,6 +139,7 @@ mod tests {
engine::Engine, tests::random_engine, write::encoder_string_writer::EncoderStringWriter,
};
use rand::Rng;
+ use std::cmp;
use std::io::Write;
#[test]
@@ -158,9 +154,8 @@ mod tests {
orig_data.clear();
normal_encoded.clear();
- for _ in 0..size {
- orig_data.push(rng.gen());
- }
+ orig_data.resize(size, 0);
+ rng.fill(&mut orig_data[..]);
let engine = random_engine(&mut rng);
engine.encode_string(&orig_data, &mut normal_encoded);
@@ -175,4 +170,38 @@ mod tests {
assert_eq!(normal_encoded, stream_encoded);
}
}
+ #[test]
+ fn incremental_writes() {
+ let mut rng = rand::thread_rng();
+ let mut orig_data = Vec::<u8>::new();
+ let mut normal_encoded = String::new();
+
+ let size = 5_000;
+
+ for _ in 0..size {
+ orig_data.clear();
+ normal_encoded.clear();
+
+ orig_data.resize(size, 0);
+ rng.fill(&mut orig_data[..]);
+
+ let engine = random_engine(&mut rng);
+ engine.encode_string(&orig_data, &mut normal_encoded);
+
+ let mut stream_encoder = EncoderStringWriter::new(&engine);
+ // write small nibbles of data
+ let mut offset = 0;
+ while offset < size {
+ let nibble_size = cmp::min(rng.gen_range(0..=64), size - offset);
+ let len = stream_encoder
+ .write(&orig_data[offset..offset + nibble_size])
+ .unwrap();
+ offset += len;
+ }
+
+ let stream_encoded = stream_encoder.into_inner();
+
+ assert_eq!(normal_encoded, stream_encoded);
+ }
+ }
}
diff --git a/vendor/base64/src/write/encoder_tests.rs b/vendor/base64/src/write/encoder_tests.rs
index ce76d631e..1f1a1650a 100644
--- a/vendor/base64/src/write/encoder_tests.rs
+++ b/vendor/base64/src/write/encoder_tests.rs
@@ -358,7 +358,7 @@ fn retrying_writes_that_error_with_interrupted_works() {
Ok(_) => break,
Err(e) => match e.kind() {
io::ErrorKind::Interrupted => continue,
- _ => Err(e).unwrap(), // bail
+ _ => panic!("{:?}", e), // bail
},
}
}