summaryrefslogtreecommitdiffstats
path: root/vendor/base64/src/read/decoder.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/base64/src/read/decoder.rs
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/base64/src/read/decoder.rs')
-rw-r--r--vendor/base64/src/read/decoder.rs65
1 files changed, 43 insertions, 22 deletions
diff --git a/vendor/base64/src/read/decoder.rs b/vendor/base64/src/read/decoder.rs
index 4888c9c4e..b656ae3d2 100644
--- a/vendor/base64/src/read/decoder.rs
+++ b/vendor/base64/src/read/decoder.rs
@@ -1,4 +1,4 @@
-use crate::{engine::Engine, DecodeError};
+use crate::{engine::Engine, DecodeError, PAD_BYTE};
use std::{cmp, fmt, io};
// This should be large, but it has to fit on the stack.
@@ -46,13 +46,15 @@ pub struct DecoderReader<'e, E: Engine, R: io::Read> {
// Technically we only need to hold 2 bytes but then we'd need a separate temporary buffer to
// decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest
// into here, which seems like a lot of complexity for 1 extra byte of storage.
- decoded_buffer: [u8; 3],
+ decoded_buffer: [u8; DECODED_CHUNK_SIZE],
// index of start of decoded data
decoded_offset: usize,
// length of decoded data
decoded_len: usize,
// used to provide accurate offsets in errors
total_b64_decoded: usize,
+ // offset of previously seen padding, if any
+ padding_offset: Option<usize>,
}
impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> {
@@ -64,6 +66,7 @@ impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> {
.field("decoded_offset", &self.decoded_offset)
.field("decoded_len", &self.decoded_len)
.field("total_b64_decoded", &self.total_b64_decoded)
+ .field("padding_offset", &self.padding_offset)
.finish()
}
}
@@ -81,6 +84,7 @@ impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
decoded_offset: 0,
decoded_len: 0,
total_b64_decoded: 0,
+ padding_offset: None,
}
}
@@ -127,20 +131,28 @@ impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
/// caller's responsibility to choose the number of b64 bytes to decode correctly.
///
/// Returns a Result with the number of decoded bytes written to `buf`.
- fn decode_to_buf(&mut self, num_bytes: usize, buf: &mut [u8]) -> io::Result<usize> {
- debug_assert!(self.b64_len >= num_bytes);
+ fn decode_to_buf(&mut self, b64_len_to_decode: usize, buf: &mut [u8]) -> io::Result<usize> {
+ debug_assert!(self.b64_len >= b64_len_to_decode);
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
debug_assert!(!buf.is_empty());
- let decoded = self
+ let b64_to_decode = &self.b64_buffer[self.b64_offset..self.b64_offset + b64_len_to_decode];
+ let decode_metadata = self
.engine
.internal_decode(
- &self.b64_buffer[self.b64_offset..self.b64_offset + num_bytes],
+ b64_to_decode,
buf,
- self.engine.internal_decoded_len_estimate(num_bytes),
+ self.engine.internal_decoded_len_estimate(b64_len_to_decode),
)
.map_err(|e| match e {
DecodeError::InvalidByte(offset, byte) => {
+ // This can be incorrect, but not in a way that probably matters to anyone:
+ // if there was padding handled in a previous decode, and we are now getting
+ // InvalidByte due to more padding, we should arguably report InvalidByte with
+ // PAD_BYTE at the original padding position (`self.padding_offset`), but we
+ // don't have a good way to tie those two cases together, so instead we
+ // just report the invalid byte as if the previous padding, and its possibly
+ // related downgrade to a now invalid byte, didn't happen.
DecodeError::InvalidByte(self.total_b64_decoded + offset, byte)
}
DecodeError::InvalidLength => DecodeError::InvalidLength,
@@ -151,13 +163,27 @@ impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
})
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
- self.total_b64_decoded += num_bytes;
- self.b64_offset += num_bytes;
- self.b64_len -= num_bytes;
+ if let Some(offset) = self.padding_offset {
+ // we've already seen padding
+ if decode_metadata.decoded_len > 0 {
+ // we read more after already finding padding; report error at first padding byte
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ DecodeError::InvalidByte(offset, PAD_BYTE),
+ ));
+ }
+ }
+
+ self.padding_offset = self.padding_offset.or(decode_metadata
+ .padding_offset
+ .map(|offset| self.total_b64_decoded + offset));
+ self.total_b64_decoded += b64_len_to_decode;
+ self.b64_offset += b64_len_to_decode;
+ self.b64_len -= b64_len_to_decode;
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
- Ok(decoded)
+ Ok(decode_metadata.decoded_len)
}
/// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded
@@ -205,9 +231,9 @@ impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> {
self.decoded_offset < DECODED_CHUNK_SIZE
});
- // We shouldn't ever decode into here when we can't immediately write at least one byte into
- // the provided buf, so the effective length should only be 3 momentarily between when we
- // decode and when we copy into the target buffer.
+ // We shouldn't ever decode into decoded_buffer when we can't immediately write at least one
+ // byte into the provided buf, so the effective length should only be 3 momentarily between
+ // when we decode and when we copy into the target buffer.
debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE);
@@ -217,20 +243,15 @@ impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> {
} else {
let mut at_eof = false;
while self.b64_len < BASE64_CHUNK_SIZE {
- // Work around lack of copy_within, which is only present in 1.37
// Copy any bytes we have to the start of the buffer.
- // We know we have < 1 chunk, so we can use a tiny tmp buffer.
- let mut memmove_buf = [0_u8; BASE64_CHUNK_SIZE];
- memmove_buf[..self.b64_len].copy_from_slice(
- &self.b64_buffer[self.b64_offset..self.b64_offset + self.b64_len],
- );
- self.b64_buffer[0..self.b64_len].copy_from_slice(&memmove_buf[..self.b64_len]);
+ self.b64_buffer
+ .copy_within(self.b64_offset..self.b64_offset + self.b64_len, 0);
self.b64_offset = 0;
// then fill in more data
let read = self.read_from_delegate()?;
if read == 0 {
- // we never pass in an empty buf, so 0 => we've hit EOF
+ // we never read into an empty buf, so 0 => we've hit EOF
at_eof = true;
break;
}