summaryrefslogtreecommitdiffstats
path: root/library/std/src/io/buffered
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/io/buffered')
-rw-r--r--library/std/src/io/buffered/bufreader.rs22
-rw-r--r--library/std/src/io/buffered/bufreader/buffer.rs27
-rw-r--r--library/std/src/io/buffered/tests.rs58
3 files changed, 79 insertions, 28 deletions
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index f7fbaa9c2..4f339a18a 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -2,7 +2,7 @@ mod buffer;
use crate::fmt;
use crate::io::{
- self, BufRead, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
+ self, BorrowedCursor, BufRead, IoSliceMut, Read, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE,
};
use buffer::Buffer;
@@ -224,6 +224,14 @@ impl<R> BufReader<R> {
}
}
+// This is only used by a test which asserts that the initialization-tracking is correct.
+#[cfg(test)]
+impl<R> BufReader<R> {
+ pub fn initialized(&self) -> usize {
+ self.buf.initialized()
+ }
+}
+
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
@@ -266,21 +274,21 @@ impl<R: Read> Read for BufReader<R> {
Ok(nread)
}
- fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> {
+ fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
- if self.buf.pos() == self.buf.filled() && buf.remaining() >= self.capacity() {
+ if self.buf.pos() == self.buf.filled() && cursor.capacity() >= self.capacity() {
self.discard_buffer();
- return self.inner.read_buf(buf);
+ return self.inner.read_buf(cursor);
}
- let prev = buf.filled_len();
+ let prev = cursor.written();
let mut rem = self.fill_buf()?;
- rem.read_buf(buf)?;
+ rem.read_buf(cursor.reborrow())?;
- self.consume(buf.filled_len() - prev); //slice impl of read_buf known to never unfill buf
+ self.consume(cursor.written() - prev); //slice impl of read_buf known to never unfill buf
Ok(())
}
diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs
index 8ae01f3b0..e9e29d60c 100644
--- a/library/std/src/io/buffered/bufreader/buffer.rs
+++ b/library/std/src/io/buffered/bufreader/buffer.rs
@@ -9,7 +9,7 @@
/// that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
/// without encountering any runtime bounds checks.
use crate::cmp;
-use crate::io::{self, Read, ReadBuf};
+use crate::io::{self, BorrowedBuf, Read};
use crate::mem::MaybeUninit;
pub struct Buffer {
@@ -20,13 +20,19 @@ pub struct Buffer {
// Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
// initialized with bytes from a read.
filled: usize,
+ // This is the max number of bytes returned across all `fill_buf` calls. We track this so that we
+ // can accurately tell `read_buf` how many bytes of buf are initialized, to bypass as much of its
+ // defensive initialization as possible. Note that while this often the same as `filled`, it
+ // doesn't need to be. Calls to `fill_buf` are not required to actually fill the buffer, and
+ // omitting this is a huge perf regression for `Read` impls that do not.
+ initialized: usize,
}
impl Buffer {
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let buf = Box::new_uninit_slice(capacity);
- Self { buf, pos: 0, filled: 0 }
+ Self { buf, pos: 0, filled: 0, initialized: 0 }
}
#[inline]
@@ -51,6 +57,12 @@ impl Buffer {
self.pos
}
+ // This is only used by a test which asserts that the initialization-tracking is correct.
+ #[cfg(test)]
+ pub fn initialized(&self) -> usize {
+ self.initialized
+ }
+
#[inline]
pub fn discard_buffer(&mut self) {
self.pos = 0;
@@ -93,12 +105,17 @@ impl Buffer {
if self.pos >= self.filled {
debug_assert!(self.pos == self.filled);
- let mut readbuf = ReadBuf::uninit(&mut self.buf);
+ let mut buf = BorrowedBuf::from(&mut *self.buf);
+ // SAFETY: `self.filled` bytes will always have been initialized.
+ unsafe {
+ buf.set_init(self.initialized);
+ }
- reader.read_buf(&mut readbuf)?;
+ reader.read_buf(buf.unfilled())?;
- self.filled = readbuf.filled_len();
self.pos = 0;
+ self.filled = buf.len();
+ self.initialized = buf.init_len();
}
Ok(self.buffer())
}
diff --git a/library/std/src/io/buffered/tests.rs b/library/std/src/io/buffered/tests.rs
index fe45b1326..f4e688eb9 100644
--- a/library/std/src/io/buffered/tests.rs
+++ b/library/std/src/io/buffered/tests.rs
@@ -1,5 +1,7 @@
use crate::io::prelude::*;
-use crate::io::{self, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, ReadBuf, SeekFrom};
+use crate::io::{
+ self, BorrowedBuf, BufReader, BufWriter, ErrorKind, IoSlice, LineWriter, SeekFrom,
+};
use crate::mem::MaybeUninit;
use crate::panic;
use crate::sync::atomic::{AtomicUsize, Ordering};
@@ -61,48 +63,48 @@ fn test_buffered_reader_read_buf() {
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, inner);
- let mut buf = [MaybeUninit::uninit(); 3];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 3];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [5, 6, 7]);
assert_eq!(reader.buffer(), []);
- let mut buf = [MaybeUninit::uninit(); 2];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 2];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [0, 1]);
assert_eq!(reader.buffer(), []);
- let mut buf = [MaybeUninit::uninit(); 1];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 1];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [2]);
assert_eq!(reader.buffer(), [3]);
- let mut buf = [MaybeUninit::uninit(); 3];
- let mut buf = ReadBuf::uninit(&mut buf);
+ let buf: &mut [_] = &mut [MaybeUninit::uninit(); 3];
+ let mut buf: BorrowedBuf<'_> = buf.into();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [3]);
assert_eq!(reader.buffer(), []);
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
assert_eq!(buf.filled(), [3, 4]);
assert_eq!(reader.buffer(), []);
buf.clear();
- reader.read_buf(&mut buf).unwrap();
+ reader.read_buf(buf.unfilled()).unwrap();
- assert_eq!(buf.filled_len(), 0);
+ assert!(buf.filled().is_empty());
}
#[test]
@@ -1037,3 +1039,27 @@ fn single_formatted_write() {
writeln!(&mut writer, "{}, {}!", "hello", "world").unwrap();
assert_eq!(writer.get_ref().events, [RecordedEvent::Write("hello, world!\n".to_string())]);
}
+
+#[test]
+fn bufreader_full_initialize() {
+ struct OneByteReader;
+ impl Read for OneByteReader {
+ fn read(&mut self, buf: &mut [u8]) -> crate::io::Result<usize> {
+ if buf.len() > 0 {
+ buf[0] = 0;
+ Ok(1)
+ } else {
+ Ok(0)
+ }
+ }
+ }
+ let mut reader = BufReader::new(OneByteReader);
+ // Nothing is initialized yet.
+ assert_eq!(reader.initialized(), 0);
+
+ let buf = reader.fill_buf().unwrap();
+ // We read one byte...
+ assert_eq!(buf.len(), 1);
+ // But we initialized the whole buffer!
+ assert_eq!(reader.initialized(), reader.capacity());
+}