diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 02:49:50 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 02:49:50 +0000 |
commit | 9835e2ae736235810b4ea1c162ca5e65c547e770 (patch) | |
tree | 3fcebf40ed70e581d776a8a4c65923e8ec20e026 /vendor/bytes/src | |
parent | Releasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff) | |
download | rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip |
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/bytes/src')
-rw-r--r-- | vendor/bytes/src/buf/buf_impl.rs | 323 | ||||
-rw-r--r-- | vendor/bytes/src/buf/buf_mut.rs | 428 | ||||
-rw-r--r-- | vendor/bytes/src/buf/chain.rs | 26 | ||||
-rw-r--r-- | vendor/bytes/src/buf/iter.rs | 4 | ||||
-rw-r--r-- | vendor/bytes/src/buf/take.rs | 12 | ||||
-rw-r--r-- | vendor/bytes/src/buf/uninit_slice.rs | 39 | ||||
-rw-r--r-- | vendor/bytes/src/bytes.rs | 278 | ||||
-rw-r--r-- | vendor/bytes/src/bytes_mut.rs | 349 | ||||
-rw-r--r-- | vendor/bytes/src/fmt/debug.rs | 6 | ||||
-rw-r--r-- | vendor/bytes/src/lib.rs | 2 | ||||
-rw-r--r-- | vendor/bytes/src/loom.rs | 4 |
11 files changed, 1334 insertions, 137 deletions
diff --git a/vendor/bytes/src/buf/buf_impl.rs b/vendor/bytes/src/buf/buf_impl.rs index 16ad8a7ee..366cfc989 100644 --- a/vendor/bytes/src/buf/buf_impl.rs +++ b/vendor/bytes/src/buf/buf_impl.rs @@ -127,6 +127,9 @@ pub trait Buf { /// This function should never panic. Once the end of the buffer is reached, /// i.e., `Buf::remaining` returns 0, calls to `chunk()` should return an /// empty slice. + // The `chunk` method was previously called `bytes`. This alias makes the rename + // more easily discoverable. + #[cfg_attr(docsrs, doc(alias = "bytes"))] fn chunk(&self) -> &[u8]; /// Fills `dst` with potentially multiple slices starting at `self`'s @@ -157,6 +160,7 @@ pub trait Buf { /// /// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize { if dst.is_empty() { return 0; @@ -351,6 +355,29 @@ pub trait Buf { buf_get_impl!(self, u16::from_le_bytes); } + /// Gets an unsigned 16 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x08\x09 hello", + /// false => b"\x09\x08 hello", + /// }; + /// assert_eq!(0x0809, buf.get_u16_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u16_ne(&mut self) -> u16 { + buf_get_impl!(self, u16::from_ne_bytes); + } + /// Gets a signed 16 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 2. @@ -391,6 +418,29 @@ pub trait Buf { buf_get_impl!(self, i16::from_le_bytes); } + /// Gets a signed 16 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x08\x09 hello", + /// false => b"\x09\x08 hello", + /// }; + /// assert_eq!(0x0809, buf.get_i16_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i16_ne(&mut self) -> i16 { + buf_get_impl!(self, i16::from_ne_bytes); + } + /// Gets an unsigned 32 bit integer from `self` in the big-endian byte order. /// /// The current position is advanced by 4. @@ -431,6 +481,29 @@ pub trait Buf { buf_get_impl!(self, u32::from_le_bytes); } + /// Gets an unsigned 32 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x08\x09\xA0\xA1 hello", + /// false => b"\xA1\xA0\x09\x08 hello", + /// }; + /// assert_eq!(0x0809A0A1, buf.get_u32_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u32_ne(&mut self) -> u32 { + buf_get_impl!(self, u32::from_ne_bytes); + } + /// Gets a signed 32 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 4. @@ -471,6 +544,29 @@ pub trait Buf { buf_get_impl!(self, i32::from_le_bytes); } + /// Gets a signed 32 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x08\x09\xA0\xA1 hello", + /// false => b"\xA1\xA0\x09\x08 hello", + /// }; + /// assert_eq!(0x0809A0A1, buf.get_i32_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i32_ne(&mut self) -> i32 { + buf_get_impl!(self, i32::from_ne_bytes); + } + /// Gets an unsigned 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. @@ -511,6 +607,29 @@ pub trait Buf { buf_get_impl!(self, u64::from_le_bytes); } + /// Gets an unsigned 64 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", + /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x0102030405060708, buf.get_u64_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u64_ne(&mut self) -> u64 { + buf_get_impl!(self, u64::from_ne_bytes); + } + /// Gets a signed 64 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 8. @@ -551,6 +670,29 @@ pub trait Buf { buf_get_impl!(self, i64::from_le_bytes); } + /// Gets a signed 64 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello", + /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x0102030405060708, buf.get_i64_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i64_ne(&mut self) -> i64 { + buf_get_impl!(self, i64::from_ne_bytes); + } + /// Gets an unsigned 128 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 16. @@ -591,6 +733,29 @@ pub trait Buf { buf_get_impl!(self, u128::from_le_bytes); } + /// Gets an unsigned 128 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", + /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_u128_ne(&mut self) -> u128 { + buf_get_impl!(self, u128::from_ne_bytes); + } + /// Gets a signed 128 bit integer from `self` in big-endian byte order. /// /// The current position is advanced by 16. @@ -631,6 +796,29 @@ pub trait Buf { buf_get_impl!(self, i128::from_le_bytes); } + /// Gets a signed 128 bit integer from `self` in native-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello", + /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_i128_ne(&mut self) -> i128 { + buf_get_impl!(self, i128::from_ne_bytes); + } + /// Gets an unsigned n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. @@ -671,6 +859,33 @@ pub trait Buf { buf_get_impl!(le => self, u64, nbytes); } + /// Gets an unsigned n-byte integer from `self` in native-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03 hello", + /// false => b"\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x010203, buf.get_uint_ne(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_uint_ne(&mut self, nbytes: usize) -> u64 { + if cfg!(target_endian = "big") { + self.get_uint(nbytes) + } else { + self.get_uint_le(nbytes) + } + } + /// Gets a signed n-byte integer from `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. @@ -711,6 +926,33 @@ pub trait Buf { buf_get_impl!(le => self, i64, nbytes); } + /// Gets a signed n-byte integer from `self` in native-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x01\x02\x03 hello", + /// false => b"\x03\x02\x01 hello", + /// }; + /// assert_eq!(0x010203, buf.get_int_ne(3)); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_int_ne(&mut self, nbytes: usize) -> i64 { + if cfg!(target_endian = "big") { + self.get_int(nbytes) + } else { + self.get_int_le(nbytes) + } + } + /// Gets an IEEE754 single-precision (4 bytes) floating point number from /// `self` in big-endian byte order. /// @@ -753,6 +995,30 @@ pub trait Buf { f32::from_bits(Self::get_u32_le(self)) } + /// Gets an IEEE754 single-precision (4 bytes) floating point number from + /// `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x3F\x99\x99\x9A hello", + /// false => b"\x9A\x99\x99\x3F hello", + /// }; + /// assert_eq!(1.2f32, buf.get_f32_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f32_ne(&mut self) -> f32 { + f32::from_bits(Self::get_u32_ne(self)) + } + /// Gets an IEEE754 double-precision (8 bytes) floating point number from /// `self` in big-endian byte order. /// @@ -795,6 +1061,30 @@ pub trait Buf { f64::from_bits(Self::get_u64_le(self)) } + /// Gets an IEEE754 double-precision (8 bytes) floating point number from + /// `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::Buf; + /// + /// let mut buf: &[u8] = match cfg!(target_endian = "big") { + /// true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello", + /// false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello", + /// }; + /// assert_eq!(1.2f64, buf.get_f64_ne()); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining data in `self`. + fn get_f64_ne(&mut self) -> f64 { + f64::from_bits(Self::get_u64_ne(self)) + } + /// Consumes `len` bytes inside self and returns new instance of `Bytes` /// with this data. /// @@ -894,6 +1184,7 @@ pub trait Buf { /// assert_eq!(&dst[..11], &b"hello world"[..]); /// ``` #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn reader(self) -> Reader<Self> where Self: Sized, @@ -945,6 +1236,10 @@ macro_rules! deref_forward_buf { (**self).get_u16_le() } + fn get_u16_ne(&mut self) -> u16 { + (**self).get_u16_ne() + } + fn get_i16(&mut self) -> i16 { (**self).get_i16() } @@ -953,6 +1248,10 @@ macro_rules! deref_forward_buf { (**self).get_i16_le() } + fn get_i16_ne(&mut self) -> i16 { + (**self).get_i16_ne() + } + fn get_u32(&mut self) -> u32 { (**self).get_u32() } @@ -961,6 +1260,10 @@ macro_rules! deref_forward_buf { (**self).get_u32_le() } + fn get_u32_ne(&mut self) -> u32 { + (**self).get_u32_ne() + } + fn get_i32(&mut self) -> i32 { (**self).get_i32() } @@ -969,6 +1272,10 @@ macro_rules! deref_forward_buf { (**self).get_i32_le() } + fn get_i32_ne(&mut self) -> i32 { + (**self).get_i32_ne() + } + fn get_u64(&mut self) -> u64 { (**self).get_u64() } @@ -977,6 +1284,10 @@ macro_rules! deref_forward_buf { (**self).get_u64_le() } + fn get_u64_ne(&mut self) -> u64 { + (**self).get_u64_ne() + } + fn get_i64(&mut self) -> i64 { (**self).get_i64() } @@ -985,6 +1296,10 @@ macro_rules! deref_forward_buf { (**self).get_i64_le() } + fn get_i64_ne(&mut self) -> i64 { + (**self).get_i64_ne() + } + fn get_uint(&mut self, nbytes: usize) -> u64 { (**self).get_uint(nbytes) } @@ -993,6 +1308,10 @@ macro_rules! deref_forward_buf { (**self).get_uint_le(nbytes) } + fn get_uint_ne(&mut self, nbytes: usize) -> u64 { + (**self).get_uint_ne(nbytes) + } + fn get_int(&mut self, nbytes: usize) -> i64 { (**self).get_int(nbytes) } @@ -1001,6 +1320,10 @@ macro_rules! deref_forward_buf { (**self).get_int_le(nbytes) } + fn get_int_ne(&mut self, nbytes: usize) -> i64 { + (**self).get_int_ne(nbytes) + } + fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { (**self).copy_to_bytes(len) } diff --git a/vendor/bytes/src/buf/buf_mut.rs b/vendor/bytes/src/buf/buf_mut.rs index f736727b4..685fcc76b 100644 --- a/vendor/bytes/src/buf/buf_mut.rs +++ b/vendor/bytes/src/buf/buf_mut.rs @@ -33,6 +33,10 @@ pub unsafe trait BufMut { /// This value is greater than or equal to the length of the slice returned /// by `chunk_mut()`. /// + /// Writing to a `BufMut` may involve allocating more memory on the fly. + /// Implementations may fail before reaching the number of bytes indicated + /// by this method if they encounter an allocation failure. + /// /// # Examples /// /// ``` @@ -52,6 +56,10 @@ pub unsafe trait BufMut { /// Implementations of `remaining_mut` should ensure that the return value /// does not change unless a call is made to `advance_mut` or any other /// function that is documented to change the `BufMut`'s current position. + /// + /// # Note + /// + /// `remaining_mut` may return value smaller than actual available space. fn remaining_mut(&self) -> usize; /// Advance the internal cursor of the BufMut @@ -158,6 +166,12 @@ pub unsafe trait BufMut { /// `chunk_mut()` returning an empty slice implies that `remaining_mut()` will /// return 0 and `remaining_mut()` returning 0 implies that `chunk_mut()` will /// return an empty slice. + /// + /// This function may trigger an out-of-memory abort if it tries to allocate + /// memory and fails to do so. + // The `chunk_mut` method was previously called `bytes_mut`. This alias makes the + // rename more easily discoverable. + #[cfg_attr(docsrs, doc(alias = "bytes_mut"))] fn chunk_mut(&mut self) -> &mut UninitSlice; /// Transfer bytes into `self` from `src` and advance the cursor by the @@ -251,6 +265,37 @@ pub unsafe trait BufMut { } } + /// Put `cnt` bytes `val` into `self`. + /// + /// Logically equivalent to calling `self.put_u8(val)` `cnt` times, but may work faster. + /// + /// `self` must have at least `cnt` remaining capacity. + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut dst = [0; 6]; + /// + /// { + /// let mut buf = &mut dst[..]; + /// buf.put_bytes(b'a', 4); + /// + /// assert_eq!(2, buf.remaining_mut()); + /// } + /// + /// assert_eq!(b"aaaa\0\0", &dst); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_bytes(&mut self, val: u8, cnt: usize) { + for _ in 0..cnt { + self.put_u8(val); + } + } + /// Writes an unsigned 8 bit integer to `self`. /// /// The current position is advanced by 1. @@ -341,6 +386,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes an unsigned 16 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u16_ne(0x0809); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x08\x09"); + /// } else { + /// assert_eq!(buf, b"\x09\x08"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u16_ne(&mut self, n: u16) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes a signed 16 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 2. @@ -385,6 +456,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes a signed 16 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 2. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i16_ne(0x0809); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x08\x09"); + /// } else { + /// assert_eq!(buf, b"\x09\x08"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i16_ne(&mut self, n: i16) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes an unsigned 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. @@ -429,6 +526,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes an unsigned 32 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u32_ne(0x0809A0A1); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// } else { + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u32_ne(&mut self, n: u32) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes a signed 32 bit integer to `self` in big-endian byte order. /// /// The current position is advanced by 4. @@ -473,6 +596,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes a signed 32 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i32_ne(0x0809A0A1); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x08\x09\xA0\xA1"); + /// } else { + /// assert_eq!(buf, b"\xA1\xA0\x09\x08"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i32_ne(&mut self, n: i32) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes an unsigned 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. @@ -517,6 +666,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes an unsigned 64 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u64_ne(0x0102030405060708); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// } else { + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u64_ne(&mut self, n: u64) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes a signed 64 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 8. @@ -561,6 +736,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes a signed 64 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i64_ne(0x0102030405060708); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08"); + /// } else { + /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i64_ne(&mut self, n: i64) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes an unsigned 128 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 16. @@ -605,6 +806,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes an unsigned 128 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_u128_ne(0x01020304050607080910111213141516); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// } else { + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_u128_ne(&mut self, n: u128) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes a signed 128 bit integer to `self` in the big-endian byte order. /// /// The current position is advanced by 16. @@ -649,6 +876,32 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()) } + /// Writes a signed 128 bit integer to `self` in native-endian byte order. + /// + /// The current position is advanced by 16. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_i128_ne(0x01020304050607080910111213141516); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16"); + /// } else { + /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_i128_ne(&mut self, n: i128) { + self.put_slice(&n.to_ne_bytes()) + } + /// Writes an unsigned n-byte integer to `self` in big-endian byte order. /// /// The current position is advanced by `nbytes`. @@ -693,7 +946,7 @@ pub unsafe trait BufMut { self.put_slice(&n.to_le_bytes()[0..nbytes]); } - /// Writes a signed n-byte integer to `self` in big-endian byte order. + /// Writes an unsigned n-byte integer to `self` in the native-endian byte order. /// /// The current position is advanced by `nbytes`. /// @@ -703,19 +956,49 @@ pub unsafe trait BufMut { /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_int(0x010203, 3); - /// assert_eq!(buf, b"\x01\x02\x03"); + /// buf.put_uint_ne(0x010203, 3); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03"); + /// } else { + /// assert_eq!(buf, b"\x03\x02\x01"); + /// } /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in /// `self`. + fn put_uint_ne(&mut self, n: u64, nbytes: usize) { + if cfg!(target_endian = "big") { + self.put_uint(n, nbytes) + } else { + self.put_uint_le(n, nbytes) + } + } + + /// Writes low `nbytes` of a signed integer to `self` in big-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int(0x0504010203, 3); + /// assert_eq!(buf, b"\x01\x02\x03"); + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self` or if `nbytes` is greater than 8. fn put_int(&mut self, n: i64, nbytes: usize) { self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); } - /// Writes a signed n-byte integer to `self` in little-endian byte order. + /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order. /// /// The current position is advanced by `nbytes`. /// @@ -725,18 +1008,48 @@ pub unsafe trait BufMut { /// use bytes::BufMut; /// /// let mut buf = vec![]; - /// buf.put_int_le(0x010203, 3); + /// buf.put_int_le(0x0504010203, 3); /// assert_eq!(buf, b"\x03\x02\x01"); /// ``` /// /// # Panics /// /// This function panics if there is not enough remaining capacity in - /// `self`. + /// `self` or if `nbytes` is greater than 8. fn put_int_le(&mut self, n: i64, nbytes: usize) { self.put_slice(&n.to_le_bytes()[0..nbytes]); } + /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order. + /// + /// The current position is advanced by `nbytes`. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_int_ne(0x010203, 3); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x01\x02\x03"); + /// } else { + /// assert_eq!(buf, b"\x03\x02\x01"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self` or if `nbytes` is greater than 8. + fn put_int_ne(&mut self, n: i64, nbytes: usize) { + if cfg!(target_endian = "big") { + self.put_int(n, nbytes) + } else { + self.put_int_le(n, nbytes) + } + } + /// Writes an IEEE754 single-precision (4 bytes) floating point number to /// `self` in big-endian byte order. /// @@ -783,6 +1096,33 @@ pub unsafe trait BufMut { self.put_u32_le(n.to_bits()); } + /// Writes an IEEE754 single-precision (4 bytes) floating point number to + /// `self` in native-endian byte order. + /// + /// The current position is advanced by 4. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f32_ne(1.2f32); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x3F\x99\x99\x9A"); + /// } else { + /// assert_eq!(buf, b"\x9A\x99\x99\x3F"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f32_ne(&mut self, n: f32) { + self.put_u32_ne(n.to_bits()); + } + /// Writes an IEEE754 double-precision (8 bytes) floating point number to /// `self` in big-endian byte order. /// @@ -829,6 +1169,33 @@ pub unsafe trait BufMut { self.put_u64_le(n.to_bits()); } + /// Writes an IEEE754 double-precision (8 bytes) floating point number to + /// `self` in native-endian byte order. + /// + /// The current position is advanced by 8. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut buf = vec![]; + /// buf.put_f64_ne(1.2f64); + /// if cfg!(target_endian = "big") { + /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33"); + /// } else { + /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F"); + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if there is not enough remaining capacity in + /// `self`. + fn put_f64_ne(&mut self, n: f64) { + self.put_u64_ne(n.to_bits()); + } + /// Creates an adaptor which can write at most `limit` bytes to `self`. /// /// # Examples @@ -872,6 +1239,7 @@ pub unsafe trait BufMut { /// assert_eq!(*buf, b"hello world"[..]); /// ``` #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] fn writer(self) -> Writer<Self> where Self: Sized, @@ -941,6 +1309,10 @@ macro_rules! deref_forward_bufmut { (**self).put_u16_le(n) } + fn put_u16_ne(&mut self, n: u16) { + (**self).put_u16_ne(n) + } + fn put_i16(&mut self, n: i16) { (**self).put_i16(n) } @@ -949,6 +1321,10 @@ macro_rules! deref_forward_bufmut { (**self).put_i16_le(n) } + fn put_i16_ne(&mut self, n: i16) { + (**self).put_i16_ne(n) + } + fn put_u32(&mut self, n: u32) { (**self).put_u32(n) } @@ -957,6 +1333,10 @@ macro_rules! deref_forward_bufmut { (**self).put_u32_le(n) } + fn put_u32_ne(&mut self, n: u32) { + (**self).put_u32_ne(n) + } + fn put_i32(&mut self, n: i32) { (**self).put_i32(n) } @@ -965,6 +1345,10 @@ macro_rules! deref_forward_bufmut { (**self).put_i32_le(n) } + fn put_i32_ne(&mut self, n: i32) { + (**self).put_i32_ne(n) + } + fn put_u64(&mut self, n: u64) { (**self).put_u64(n) } @@ -973,6 +1357,10 @@ macro_rules! deref_forward_bufmut { (**self).put_u64_le(n) } + fn put_u64_ne(&mut self, n: u64) { + (**self).put_u64_ne(n) + } + fn put_i64(&mut self, n: i64) { (**self).put_i64(n) } @@ -980,6 +1368,10 @@ macro_rules! deref_forward_bufmut { fn put_i64_le(&mut self, n: i64) { (**self).put_i64_le(n) } + + fn put_i64_ne(&mut self, n: i64) { + (**self).put_i64_ne(n) + } }; } @@ -1009,12 +1401,29 @@ unsafe impl BufMut for &mut [u8] { let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); *self = b; } + + #[inline] + fn put_slice(&mut self, src: &[u8]) { + self[..src.len()].copy_from_slice(src); + unsafe { + self.advance_mut(src.len()); + } + } + + fn put_bytes(&mut self, val: u8, cnt: usize) { + assert!(self.remaining_mut() >= cnt); + unsafe { + ptr::write_bytes(self.as_mut_ptr(), val, cnt); + self.advance_mut(cnt); + } + } } unsafe impl BufMut for Vec<u8> { #[inline] fn remaining_mut(&self) -> usize { - usize::MAX - self.len() + // A vector can never have more than isize::MAX bytes + core::isize::MAX as usize - self.len() } #[inline] @@ -1072,6 +1481,11 @@ unsafe impl BufMut for Vec<u8> { fn put_slice(&mut self, src: &[u8]) { self.extend_from_slice(src); } + + fn put_bytes(&mut self, val: u8, cnt: usize) { + let new_len = self.len().checked_add(cnt).unwrap(); + self.resize(new_len, val); + } } // The existence of this function makes the compiler catch if the BufMut diff --git a/vendor/bytes/src/buf/chain.rs b/vendor/bytes/src/buf/chain.rs index d68bc2d0e..78979a123 100644 --- a/vendor/bytes/src/buf/chain.rs +++ b/vendor/bytes/src/buf/chain.rs @@ -1,5 +1,5 @@ use crate::buf::{IntoIter, UninitSlice}; -use crate::{Buf, BufMut}; +use crate::{Buf, BufMut, Bytes}; #[cfg(feature = "std")] use std::io::IoSlice; @@ -135,7 +135,7 @@ where U: Buf, { fn remaining(&self) -> usize { - self.a.remaining() + self.b.remaining() + self.a.remaining().checked_add(self.b.remaining()).unwrap() } fn chunk(&self) -> &[u8] { @@ -170,6 +170,24 @@ where n += self.b.chunks_vectored(&mut dst[n..]); n } + + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + let a_rem = self.a.remaining(); + if a_rem >= len { + self.a.copy_to_bytes(len) + } else if a_rem == 0 { + self.b.copy_to_bytes(len) + } else { + assert!( + len - a_rem <= self.b.remaining(), + "`len` greater than remaining" + ); + let mut ret = crate::BytesMut::with_capacity(len); + ret.put(&mut self.a); + ret.put((&mut self.b).take(len - a_rem)); + ret.freeze() + } + } } unsafe impl<T, U> BufMut for Chain<T, U> @@ -178,7 +196,9 @@ where U: BufMut, { fn remaining_mut(&self) -> usize { - self.a.remaining_mut() + self.b.remaining_mut() + self.a + .remaining_mut() + .saturating_add(self.b.remaining_mut()) } fn chunk_mut(&mut self) -> &mut UninitSlice { diff --git a/vendor/bytes/src/buf/iter.rs b/vendor/bytes/src/buf/iter.rs index 8914a40e8..c694e3d41 100644 --- a/vendor/bytes/src/buf/iter.rs +++ b/vendor/bytes/src/buf/iter.rs @@ -2,8 +2,6 @@ use crate::Buf; /// Iterator over the bytes contained by the buffer. /// -/// This struct is created by the [`iter`] method on [`Buf`]. -/// /// # Examples /// /// Basic usage: @@ -43,7 +41,7 @@ impl<T> IntoIter<T> { /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` - pub(crate) fn new(inner: T) -> IntoIter<T> { + pub fn new(inner: T) -> IntoIter<T> { IntoIter { inner } } diff --git a/vendor/bytes/src/buf/take.rs b/vendor/bytes/src/buf/take.rs index 1747f6e83..d3cb10ab6 100644 --- a/vendor/bytes/src/buf/take.rs +++ b/vendor/bytes/src/buf/take.rs @@ -1,11 +1,11 @@ -use crate::Buf; +use crate::{Buf, Bytes}; use core::cmp; /// A `Buf` adapter which limits the bytes read from an underlying buffer. /// /// This struct is generally created by calling `take()` on `Buf`. See -/// documentation of [`take()`](trait.BufExt.html#method.take) for more details. +/// documentation of [`take()`](trait.Buf.html#method.take) for more details. #[derive(Debug)] pub struct Take<T> { inner: T, @@ -144,4 +144,12 @@ impl<T: Buf> Buf for Take<T> { self.inner.advance(cnt); self.limit -= cnt; } + + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + assert!(len <= self.remaining(), "`len` greater than remaining"); + + let r = self.inner.copy_to_bytes(len); + self.limit -= len; + r + } } diff --git a/vendor/bytes/src/buf/uninit_slice.rs b/vendor/bytes/src/buf/uninit_slice.rs index 73f4e8924..3161a147e 100644 --- a/vendor/bytes/src/buf/uninit_slice.rs +++ b/vendor/bytes/src/buf/uninit_slice.rs @@ -22,6 +22,10 @@ use core::ops::{ pub struct UninitSlice([MaybeUninit<u8>]); impl UninitSlice { + pub(crate) fn from_slice(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice { + unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut UninitSlice) } + } + /// Create a `&mut UninitSlice` from a pointer and a length. /// /// # Safety @@ -40,10 +44,11 @@ impl UninitSlice { /// /// let slice = unsafe { UninitSlice::from_raw_parts_mut(ptr, len) }; /// ``` + #[inline] pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { let maybe_init: &mut [MaybeUninit<u8>] = core::slice::from_raw_parts_mut(ptr as *mut _, len); - &mut *(maybe_init as *mut [MaybeUninit<u8>] as *mut UninitSlice) + Self::from_slice(maybe_init) } /// Write a single byte at the specified offset. @@ -64,6 +69,7 @@ impl UninitSlice { /// /// assert_eq!(b"boo", &data[..]); /// ``` + #[inline] pub fn write_byte(&mut self, index: usize, byte: u8) { assert!(index < self.len()); @@ -90,6 +96,7 @@ impl UninitSlice { /// /// assert_eq!(b"bar", &data[..]); /// ``` + #[inline] pub fn copy_from_slice(&mut self, src: &[u8]) { use core::ptr; @@ -116,10 +123,37 @@ impl UninitSlice { /// let mut slice = &mut data[..]; /// let ptr = BufMut::chunk_mut(&mut slice).as_mut_ptr(); /// ``` + #[inline] pub fn as_mut_ptr(&mut self) -> *mut u8 { self.0.as_mut_ptr() as *mut _ } + /// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer. + /// + /// # Safety + /// + /// The caller **must not** read from the referenced memory and **must not** write + /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation + /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized + /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined + /// behavior. + /// + /// # Examples + /// + /// ``` + /// use bytes::BufMut; + /// + /// let mut data = [0, 1, 2]; + /// let mut slice = &mut data[..]; + /// unsafe { + /// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut(); + /// }; + /// ``` + #[inline] + pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit<u8>] { + &mut *(self as *mut _ as *mut [MaybeUninit<u8>]) + } + /// Returns the number of bytes in the slice. /// /// # Examples @@ -133,6 +167,7 @@ impl UninitSlice { /// /// assert_eq!(len, 3); /// ``` + #[inline] pub fn len(&self) -> usize { self.0.len() } @@ -150,6 +185,7 @@ macro_rules! impl_index { impl Index<$t> for UninitSlice { type Output = UninitSlice; + #[inline] fn index(&self, index: $t) -> &UninitSlice { let maybe_uninit: &[MaybeUninit<u8>] = &self.0[index]; unsafe { &*(maybe_uninit as *const [MaybeUninit<u8>] as *const UninitSlice) } @@ -157,6 +193,7 @@ macro_rules! impl_index { } impl IndexMut<$t> for UninitSlice { + #[inline] fn index_mut(&mut self, index: $t) -> &mut UninitSlice { let maybe_uninit: &mut [MaybeUninit<u8>] = &mut self.0[index]; unsafe { &mut *(maybe_uninit as *mut [MaybeUninit<u8>] as *mut UninitSlice) } diff --git a/vendor/bytes/src/bytes.rs b/vendor/bytes/src/bytes.rs index b1b35ea83..0404a72db 100644 --- a/vendor/bytes/src/bytes.rs +++ b/vendor/bytes/src/bytes.rs @@ -2,12 +2,18 @@ use core::iter::FromIterator; use core::ops::{Deref, RangeBounds}; use core::{cmp, fmt, hash, mem, ptr, slice, usize}; -use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec}; +use alloc::{ + alloc::{dealloc, Layout}, + borrow::Borrow, + boxed::Box, + string::String, + vec::Vec, +}; use crate::buf::IntoIter; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use crate::Buf; /// A cheaply cloneable and sliceable chunk of contiguous memory. @@ -26,7 +32,7 @@ use crate::Buf; /// All `Bytes` implementations must fulfill the following requirements: /// - They are cheaply cloneable and thereby shareable between an unlimited amount /// of components, for example by modifying a reference count. -/// - Instances can be sliced to refer to a subset of the the original buffer. +/// - Instances can be sliced to refer to a subset of the original buffer. /// /// ``` /// use bytes::Bytes; @@ -55,7 +61,7 @@ use crate::Buf; /// # Sharing /// /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define -/// how sharing/cloneing is implemented in detail. +/// how sharing/cloning is implemented in detail. /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for /// cloning the backing storage in order to share it behind between multiple /// `Bytes` instances. @@ -65,7 +71,7 @@ use crate::Buf; /// /// For `Bytes` implementations which point to a reference counted shared storage /// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the -/// the reference count. +/// reference count. /// /// Due to this mechanism, multiple `Bytes` instances may point to the same /// shared memory region. @@ -78,18 +84,18 @@ use crate::Buf; /// /// ```text /// -/// Arc ptrs +---------+ -/// ________________________ / | Bytes 2 | -/// / +---------+ -/// / +-----------+ | | -/// |_________/ | Bytes 1 | | | -/// | +-----------+ | | +/// Arc ptrs ┌─────────┐ +/// ________________________ / │ Bytes 2 │ +/// / └─────────┘ +/// / ┌───────────┐ | | +/// |_________/ │ Bytes 1 │ | | +/// | └───────────┘ | | /// | | | ___/ data | tail /// | data | tail |/ | /// v v v v -/// +-----+---------------------------------+-----+ -/// | Arc | | | | | -/// +-----+---------------------------------+-----+ +/// ┌─────┬─────┬───────────┬───────────────┬─────┐ +/// │ Arc │ │ │ │ │ +/// └─────┴─────┴───────────┴───────────────┴─────┘ /// ``` pub struct Bytes { ptr: *const u8, @@ -103,6 +109,10 @@ pub(crate) struct Vtable { /// fn(data, ptr, len) pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes, /// fn(data, ptr, len) + /// + /// takes `Bytes` to value + pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>, + /// fn(data, ptr, len) pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), } @@ -121,7 +131,7 @@ impl Bytes { /// ``` #[inline] #[cfg(not(all(loom, test)))] - pub const fn new() -> Bytes { + pub const fn new() -> Self { // Make it a named const to work around // "unsizing casts are not allowed in const fn" const EMPTY: &[u8] = &[]; @@ -129,7 +139,7 @@ impl Bytes { } #[cfg(all(loom, test))] - pub fn new() -> Bytes { + pub fn new() -> Self { const EMPTY: &[u8] = &[]; Bytes::from_static(EMPTY) } @@ -149,7 +159,7 @@ impl Bytes { /// ``` #[inline] #[cfg(not(all(loom, test)))] - pub const fn from_static(bytes: &'static [u8]) -> Bytes { + pub const fn from_static(bytes: &'static [u8]) -> Self { Bytes { ptr: bytes.as_ptr(), len: bytes.len(), @@ -159,7 +169,7 @@ impl Bytes { } #[cfg(all(loom, test))] - pub fn from_static(bytes: &'static [u8]) -> Bytes { + pub fn from_static(bytes: &'static [u8]) -> Self { Bytes { ptr: bytes.as_ptr(), len: bytes.len(), @@ -179,7 +189,7 @@ impl Bytes { /// assert_eq!(b.len(), 5); /// ``` #[inline] - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.len } @@ -194,7 +204,7 @@ impl Bytes { /// assert!(b.is_empty()); /// ``` #[inline] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.len == 0 } @@ -225,7 +235,7 @@ impl Bytes { /// /// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing /// will panic. - pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes { + pub fn slice(&self, range: impl RangeBounds<usize>) -> Self { use core::ops::Bound; let len = self.len(); @@ -262,7 +272,7 @@ impl Bytes { let mut ret = self.clone(); ret.len = end - begin; - ret.ptr = unsafe { ret.ptr.offset(begin as isize) }; + ret.ptr = unsafe { ret.ptr.add(begin) }; ret } @@ -292,7 +302,7 @@ impl Bytes { /// /// Requires that the given `sub` slice is in fact contained within the /// `Bytes` buffer; otherwise this function will panic. - pub fn slice_ref(&self, subset: &[u8]) -> Bytes { + pub fn slice_ref(&self, subset: &[u8]) -> Self { // Empty slice and empty Bytes may have their pointers reset // so explicitly allow empty slice to be a subslice of any slice. if subset.is_empty() { @@ -308,15 +318,15 @@ impl Bytes { assert!( sub_p >= bytes_p, "subset pointer ({:p}) is smaller than self pointer ({:p})", - sub_p as *const u8, - bytes_p as *const u8, + subset.as_ptr(), + self.as_ptr(), ); assert!( sub_p + sub_len <= bytes_p + bytes_len, "subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})", - bytes_p as *const u8, + self.as_ptr(), bytes_len, - sub_p as *const u8, + subset.as_ptr(), sub_len, ); @@ -349,7 +359,7 @@ impl Bytes { /// /// Panics if `at > len`. #[must_use = "consider Bytes::truncate if you don't need the other half"] - pub fn split_off(&mut self, at: usize) -> Bytes { + pub fn split_off(&mut self, at: usize) -> Self { assert!( at <= self.len(), "split_off out of bounds: {:?} <= {:?}", @@ -398,7 +408,7 @@ impl Bytes { /// /// Panics if `at > len`. #[must_use = "consider Bytes::advance if you don't need the other half"] - pub fn split_to(&mut self, at: usize) -> Bytes { + pub fn split_to(&mut self, at: usize) -> Self { assert!( at <= self.len(), "split_to out of bounds: {:?} <= {:?}", @@ -501,7 +511,7 @@ impl Bytes { // should already be asserted, but debug assert for tests debug_assert!(self.len >= by, "internal: inc_start out of bounds"); self.len -= by; - self.ptr = self.ptr.offset(by as isize); + self.ptr = self.ptr.add(by); } } @@ -604,7 +614,7 @@ impl<'a> IntoIterator for &'a Bytes { type IntoIter = core::slice::Iter<'a, u8>; fn into_iter(self) -> Self::IntoIter { - self.as_slice().into_iter() + self.as_slice().iter() } } @@ -686,7 +696,7 @@ impl PartialOrd<Bytes> for str { impl PartialEq<Vec<u8>> for Bytes { fn eq(&self, other: &Vec<u8>) -> bool { - *self == &other[..] + *self == other[..] } } @@ -710,7 +720,7 @@ impl PartialOrd<Bytes> for Vec<u8> { impl PartialEq<String> for Bytes { fn eq(&self, other: &String) -> bool { - *self == &other[..] + *self == other[..] } } @@ -797,30 +807,64 @@ impl From<&'static str> for Bytes { impl From<Vec<u8>> for Bytes { fn from(vec: Vec<u8>) -> Bytes { - // into_boxed_slice doesn't return a heap allocation for empty vectors, + let mut vec = vec; + let ptr = vec.as_mut_ptr(); + let len = vec.len(); + let cap = vec.capacity(); + + // Avoid an extra allocation if possible. + if len == cap { + return Bytes::from(vec.into_boxed_slice()); + } + + let shared = Box::new(Shared { + buf: ptr, + cap, + ref_cnt: AtomicUsize::new(1), + }); + mem::forget(vec); + + let shared = Box::into_raw(shared); + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!( + 0 == (shared as usize & KIND_MASK), + "internal: Box<Shared> should have an aligned pointer", + ); + Bytes { + ptr, + len, + data: AtomicPtr::new(shared as _), + vtable: &SHARED_VTABLE, + } + } +} + +impl From<Box<[u8]>> for Bytes { + fn from(slice: Box<[u8]>) -> Bytes { + // Box<[u8]> doesn't contain a heap allocation for empty slices, // so the pointer isn't aligned enough for the KIND_VEC stashing to // work. - if vec.is_empty() { + if slice.is_empty() { return Bytes::new(); } - let slice = vec.into_boxed_slice(); let len = slice.len(); let ptr = Box::into_raw(slice) as *mut u8; if ptr as usize & 0x1 == 0 { - let data = ptr as usize | KIND_VEC; + let data = ptr_map(ptr, |addr| addr | KIND_VEC); Bytes { ptr, len, - data: AtomicPtr::new(data as *mut _), + data: AtomicPtr::new(data.cast()), vtable: &PROMOTABLE_EVEN_VTABLE, } } else { Bytes { ptr, len, - data: AtomicPtr::new(ptr as *mut _), + data: AtomicPtr::new(ptr.cast()), vtable: &PROMOTABLE_ODD_VTABLE, } } @@ -833,6 +877,13 @@ impl From<String> for Bytes { } } +impl From<Bytes> for Vec<u8> { + fn from(bytes: Bytes) -> Vec<u8> { + let bytes = mem::ManuallyDrop::new(bytes); + unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } + } +} + // ===== impl Vtable ===== impl fmt::Debug for Vtable { @@ -848,6 +899,7 @@ impl fmt::Debug for Vtable { const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, + to_vec: static_to_vec, drop: static_drop, }; @@ -856,6 +908,11 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes { Bytes::from_static(slice) } +unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { + let slice = slice::from_raw_parts(ptr, len); + slice.to_vec() +} + unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { // nothing to drop for &'static [u8] } @@ -864,11 +921,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, + to_vec: promotable_even_to_vec, drop: promotable_even_drop, }; static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, + to_vec: promotable_odd_to_vec, drop: promotable_odd_drop, }; @@ -877,25 +936,57 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { - shallow_clone_arc(shared as _, ptr, len) + shallow_clone_arc(shared.cast(), ptr, len) } else { debug_assert_eq!(kind, KIND_VEC); - let buf = (shared as usize & !KIND_MASK) as *mut u8; + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); shallow_clone_vec(data, shared, buf, ptr, len) } } +unsafe fn promotable_to_vec( + data: &AtomicPtr<()>, + ptr: *const u8, + len: usize, + f: fn(*mut ()) -> *mut u8, +) -> Vec<u8> { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared_to_vec_impl(shared.cast(), ptr, len) + } else { + // If Bytes holds a Vec, then the offset must be 0. + debug_assert_eq!(kind, KIND_VEC); + + let buf = f(shared); + + let cap = (ptr as usize - buf as usize) + len; + + // Copy back buffer + ptr::copy(ptr, buf, len); + + Vec::from_raw_parts(buf, len, cap) + } +} + +unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { + promotable_to_vec(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + }) +} + unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { - release_shared(shared as *mut Shared); + release_shared(shared.cast()); } else { debug_assert_eq!(kind, KIND_VEC); - let buf = (shared as usize & !KIND_MASK) as *mut u8; - drop(rebuild_boxed_slice(buf, ptr, len)); + let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK); + free_boxed_slice(buf, ptr, len); } }); } @@ -908,38 +999,49 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) shallow_clone_arc(shared as _, ptr, len) } else { debug_assert_eq!(kind, KIND_VEC); - shallow_clone_vec(data, shared, shared as *mut u8, ptr, len) + shallow_clone_vec(data, shared, shared.cast(), ptr, len) } } +unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { + promotable_to_vec(data, ptr, len, |shared| shared.cast()) +} + unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; let kind = shared as usize & KIND_MASK; if kind == KIND_ARC { - release_shared(shared as *mut Shared); + release_shared(shared.cast()); } else { debug_assert_eq!(kind, KIND_VEC); - drop(rebuild_boxed_slice(shared as *mut u8, ptr, len)); + free_boxed_slice(shared.cast(), ptr, len); } }); } -unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> { +unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { let cap = (offset as usize - buf as usize) + len; - Box::from_raw(slice::from_raw_parts_mut(buf, cap)) + dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) } // ===== impl SharedVtable ===== struct Shared { - // holds vec for drop, but otherwise doesnt access it - _vec: Vec<u8>, + // Holds arguments to dealloc upon Drop, but otherwise doesn't use them + buf: *mut u8, + cap: usize, ref_cnt: AtomicUsize, } +impl Drop for Shared { + fn drop(&mut self) { + unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) } + } +} + // Assert that the alignment of `Shared` is divisible by 2. // This is a necessary invariant since we depend on allocating `Shared` a // shared object to implicitly carry the `KIND_ARC` flag in its pointer. @@ -948,6 +1050,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, + to_vec: shared_to_vec, drop: shared_drop, }; @@ -960,9 +1063,42 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte shallow_clone_arc(shared as _, ptr, len) } +unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> { + // Check that the ref_cnt is 1 (unique). + // + // If it is unique, then it is set to 0 with AcqRel fence for the same + // reason in release_shared. + // + // Otherwise, we take the other branch and call release_shared. + if (*shared) + .ref_cnt + .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + let buf = (*shared).buf; + let cap = (*shared).cap; + + // Deallocate Shared + drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>)); + + // Copy back buffer + ptr::copy(ptr, buf, len); + + Vec::from_raw_parts(buf, len, cap) + } else { + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + v + } +} + +unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { + shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) +} + unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { - release_shared(*shared as *mut Shared); + release_shared(shared.cast()); }); } @@ -1000,9 +1136,9 @@ unsafe fn shallow_clone_vec( // updated and since the buffer hasn't been promoted to an // `Arc`, those three fields still are the components of the // vector. - let vec = rebuild_boxed_slice(buf, offset, len).into_vec(); let shared = Box::new(Shared { - _vec: vec, + buf, + cap: (offset as usize - buf as usize) + len, // Initialize refcount to 2. One for this reference, and one // for the new clone that will be returned from // `shallow_clone`. @@ -1076,10 +1212,40 @@ unsafe fn release_shared(ptr: *mut Shared) { // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - atomic::fence(Ordering::Acquire); + // + // Thread sanitizer does not support atomic fences. Use an atomic load + // instead. + (*ptr).ref_cnt.load(Ordering::Acquire); // Drop the data - Box::from_raw(ptr); + drop(Box::from_raw(ptr)); +} + +// Ideally we would always use this version of `ptr_map` since it is strict +// provenance compatible, but it results in worse codegen. We will however still +// use it on miri because it gives better diagnostics for people who test bytes +// code with miri. +// +// See https://github.com/tokio-rs/bytes/pull/545 for more info. +#[cfg(miri)] +fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 +where + F: FnOnce(usize) -> usize, +{ + let old_addr = ptr as usize; + let new_addr = f(old_addr); + let diff = new_addr.wrapping_sub(old_addr); + ptr.wrapping_add(diff) +} + +#[cfg(not(miri))] +fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8 +where + F: FnOnce(usize) -> usize, +{ + let old_addr = ptr as usize; + let new_addr = f(old_addr); + new_addr as *mut u8 } // compile-fails diff --git a/vendor/bytes/src/bytes_mut.rs b/vendor/bytes/src/bytes_mut.rs index 61c0460ca..70613b224 100644 --- a/vendor/bytes/src/bytes_mut.rs +++ b/vendor/bytes/src/bytes_mut.rs @@ -1,5 +1,5 @@ use core::iter::{FromIterator, Iterator}; -use core::mem::{self, ManuallyDrop}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; use core::{cmp, fmt, hash, isize, slice, usize}; @@ -8,6 +8,7 @@ use alloc::{ borrow::{Borrow, BorrowMut}, boxed::Box, string::String, + vec, vec::Vec, }; @@ -15,7 +16,7 @@ use crate::buf::{IntoIter, UninitSlice}; use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; -use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use crate::{Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. @@ -252,12 +253,28 @@ impl BytesMut { let ptr = self.ptr.as_ptr(); let len = self.len; - let data = AtomicPtr::new(self.data as _); + let data = AtomicPtr::new(self.data.cast()); mem::forget(self); unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } } } + /// Creates a new `BytesMut`, which is initialized with zero. + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// let zeros = BytesMut::zeroed(42); + /// + /// assert_eq!(zeros.len(), 42); + /// zeros.into_iter().for_each(|x| assert_eq!(x, 0)); + /// ``` + pub fn zeroed(len: usize) -> BytesMut { + BytesMut::from_vec(vec![0; len]) + } + /// Splits the bytes into two at the given index. /// /// Afterwards `self` contains elements `[0, at)`, and the returned @@ -380,6 +397,8 @@ impl BytesMut { /// If `len` is greater than the buffer's current length, this has no /// effect. /// + /// Existing underlying capacity is preserved. + /// /// The [`split_off`] method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// @@ -402,7 +421,7 @@ impl BytesMut { } } - /// Clears the buffer, removing all data. + /// Clears the buffer, removing all data. Existing capacity is preserved. /// /// # Examples /// @@ -492,11 +511,20 @@ impl BytesMut { /// reallocations. A call to `reserve` may result in an allocation. /// /// Before allocating new buffer space, the function will attempt to reclaim - /// space in the existing buffer. If the current handle references a small - /// view in the original buffer and all other handles have been dropped, - /// and the requested capacity is less than or equal to the existing - /// buffer's capacity, then the current view will be copied to the front of - /// the buffer and the handle will take ownership of the full buffer. + /// space in the existing buffer. If the current handle references a view + /// into a larger original buffer, and all other handles referencing part + /// of the same original buffer have been dropped, then the current view + /// can be copied/shifted to the front of the buffer and the handle can take + /// ownership of the full buffer, provided that the full buffer is large + /// enough to fit the requested additional capacity. + /// + /// This optimization will only happen if shifting the data from the current + /// view to the front of the buffer is not too expensive in terms of the + /// (amortized) time required. The precise condition is subject to change; + /// as of now, the length of the data being shifted needs to be at least as + /// large as the distance that it's shifted by. If the current view is empty + /// and the original buffer is large enough to fit the requested additional + /// capacity, then reallocations will never happen. /// /// # Examples /// @@ -560,17 +588,34 @@ impl BytesMut { // space. // // Otherwise, since backed by a vector, use `Vec::reserve` + // + // We need to make sure that this optimization does not kill the + // amortized runtimes of BytesMut's operations. unsafe { let (off, prev) = self.get_vec_pos(); // Only reuse space if we can satisfy the requested additional space. - if self.capacity() - self.len() + off >= additional { - // There's space - reuse it + // + // Also check if the value of `off` suggests that enough bytes + // have been read to account for the overhead of shifting all + // the data (in an amortized analysis). + // Hence the condition `off >= self.len()`. + // + // This condition also already implies that the buffer is going + // to be (at least) half-empty in the end; so we do not break + // the (amortized) runtime with future resizes of the underlying + // `Vec`. + // + // [For more details check issue #524, and PR #525.] + if self.capacity() - self.len() + off >= additional && off >= self.len() { + // There's enough space, and it's not too much overhead: + // reuse the space! // // Just move the pointer back to the start after copying // data back. let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); - ptr::copy(self.ptr.as_ptr(), base_ptr, self.len); + // Since `off >= self.len()`, the two regions don't overlap. + ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); self.ptr = vptr(base_ptr); self.set_vec_pos(0, prev); @@ -578,13 +623,14 @@ impl BytesMut { // can gain capacity back. self.cap += off; } else { - // No space - allocate more + // Not enough space, or reusing might be too much overhead: + // allocate more space! let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); v.reserve(additional); // Update the info - self.ptr = vptr(v.as_mut_ptr().offset(off as isize)); + self.ptr = vptr(v.as_mut_ptr().add(off)); self.len = v.len() - off; self.cap = v.capacity() - off; } @@ -594,7 +640,7 @@ impl BytesMut { } debug_assert_eq!(kind, KIND_ARC); - let shared: *mut Shared = self.data as _; + let shared: *mut Shared = self.data; // Reserving involves abandoning the currently shared buffer and // allocating a new vector with the requested capacity. @@ -617,29 +663,65 @@ impl BytesMut { // sure that the vector has enough capacity. let v = &mut (*shared).vec; - if v.capacity() >= new_cap { - // The capacity is sufficient, reclaim the buffer - let ptr = v.as_mut_ptr(); + let v_capacity = v.capacity(); + let ptr = v.as_mut_ptr(); + + let offset = offset_from(self.ptr.as_ptr(), ptr); - ptr::copy(self.ptr.as_ptr(), ptr, len); + // Compare the condition in the `kind == KIND_VEC` case above + // for more details. + if v_capacity >= new_cap + offset { + self.cap = new_cap; + // no copy is necessary + } else if v_capacity >= new_cap && offset >= len { + // The capacity is sufficient, and copying is not too much + // overhead: reclaim the buffer! + + // `offset >= len` means: no overlap + ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len); self.ptr = vptr(ptr); self.cap = v.capacity(); + } else { + // calculate offset + let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize); - return; - } + // new_cap is calculated in terms of `BytesMut`, not the underlying + // `Vec`, so it does not take the offset into account. + // + // Thus we have to manually add it here. + new_cap = new_cap.checked_add(off).expect("overflow"); - // The vector capacity is not sufficient. The reserve request is - // asking for more than the initial buffer capacity. Allocate more - // than requested if `new_cap` is not much bigger than the current - // capacity. - // - // There are some situations, using `reserve_exact` that the - // buffer capacity could be below `original_capacity`, so do a - // check. - let double = v.capacity().checked_shl(1).unwrap_or(new_cap); + // The vector capacity is not sufficient. The reserve request is + // asking for more than the initial buffer capacity. Allocate more + // than requested if `new_cap` is not much bigger than the current + // capacity. + // + // There are some situations, using `reserve_exact` that the + // buffer capacity could be below `original_capacity`, so do a + // check. + let double = v.capacity().checked_shl(1).unwrap_or(new_cap); - new_cap = cmp::max(cmp::max(double, new_cap), original_capacity); + new_cap = cmp::max(double, new_cap); + + // No space - allocate more + // + // The length field of `Shared::vec` is not used by the `BytesMut`; + // instead we use the `len` field in the `BytesMut` itself. However, + // when calling `reserve`, it doesn't guarantee that data stored in + // the unused capacity of the vector is copied over to the new + // allocation, so we need to ensure that we don't have any data we + // care about in the unused capacity before calling `reserve`. + debug_assert!(off + len <= v.capacity()); + v.set_len(off + len); + v.reserve(new_cap - v.len()); + + // Update the info + self.ptr = vptr(v.as_mut_ptr().add(off)); + self.cap = v.capacity() - off; + } + + return; } else { new_cap = cmp::max(new_cap, original_capacity); } @@ -657,7 +739,7 @@ impl BytesMut { // Update self let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; - self.data = data as _; + self.data = invalid_ptr(data); self.ptr = vptr(v.as_mut_ptr()); self.len = v.len(); self.cap = v.capacity(); @@ -684,11 +766,11 @@ impl BytesMut { self.reserve(cnt); unsafe { - let dst = self.uninit_slice(); + let dst = self.spare_capacity_mut(); // Reserved above debug_assert!(dst.len() >= cnt); - ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); + ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt); } unsafe { @@ -698,10 +780,11 @@ impl BytesMut { /// Absorbs a `BytesMut` that was previously split off. /// - /// If the two `BytesMut` objects were previously contiguous, i.e., if - /// `other` was created by calling `split_off` on this `BytesMut`, then - /// this is an `O(1)` operation that just decreases a reference - /// count and sets a few indices. Otherwise this method degenerates to + /// If the two `BytesMut` objects were previously contiguous and not mutated + /// in a way that causes re-allocation i.e., if `other` was created by + /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation + /// that just decreases a reference count and sets a few indices. + /// Otherwise this method degenerates to /// `self.extend_from_slice(other.as_ref())`. /// /// # Examples @@ -752,7 +835,7 @@ impl BytesMut { ptr, len, cap, - data: data as *mut _, + data: invalid_ptr(data), } } @@ -799,7 +882,7 @@ impl BytesMut { // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. - self.ptr = vptr(self.ptr.as_ptr().offset(start as isize)); + self.ptr = vptr(self.ptr.as_ptr().add(start)); if self.len >= start { self.len -= start; @@ -819,11 +902,11 @@ impl BytesMut { } fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { - if other.is_empty() { + if other.capacity() == 0 { return Ok(()); } - let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) }; + let ptr = unsafe { self.ptr.as_ptr().add(self.len) }; if ptr == other.ptr.as_ptr() && self.kind() == KIND_ARC && other.kind() == KIND_ARC @@ -873,7 +956,7 @@ impl BytesMut { // always succeed. debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC); - self.data = shared as _; + self.data = shared; } /// Makes an exact shallow clone of `self`. @@ -906,16 +989,45 @@ impl BytesMut { debug_assert_eq!(self.kind(), KIND_VEC); debug_assert!(pos <= MAX_VEC_POS); - self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _; + self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)); } + /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`. + /// + /// The returned slice can be used to fill the buffer with data (e.g. by + /// reading from a file) before marking the data as initialized using the + /// [`set_len`] method. + /// + /// [`set_len`]: BytesMut::set_len + /// + /// # Examples + /// + /// ``` + /// use bytes::BytesMut; + /// + /// // Allocate buffer big enough for 10 bytes. + /// let mut buf = BytesMut::with_capacity(10); + /// + /// // Fill in the first 3 elements. + /// let uninit = buf.spare_capacity_mut(); + /// uninit[0].write(0); + /// uninit[1].write(1); + /// uninit[2].write(2); + /// + /// // Mark the first 3 bytes of the buffer as being initialized. + /// unsafe { + /// buf.set_len(3); + /// } + /// + /// assert_eq!(&buf[..], &[0, 1, 2]); + /// ``` #[inline] - fn uninit_slice(&mut self) -> &mut UninitSlice { + pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] { unsafe { - let ptr = self.ptr.as_ptr().offset(self.len as isize); + let ptr = self.ptr.as_ptr().add(self.len); let len = self.cap - self.len; - UninitSlice::from_raw_parts_mut(ptr, len) + slice::from_raw_parts_mut(ptr.cast(), len) } } } @@ -932,7 +1044,7 @@ impl Drop for BytesMut { let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); } } else if kind == KIND_ARC { - unsafe { release_shared(self.data as _) }; + unsafe { release_shared(self.data) }; } } } @@ -989,7 +1101,7 @@ unsafe impl BufMut for BytesMut { if self.capacity() == self.len() { self.reserve(64); } - self.uninit_slice() + UninitSlice::from_slice(self.spare_capacity_mut()) } // Specialize these methods so they can skip checking `remaining_mut` @@ -1010,6 +1122,19 @@ unsafe impl BufMut for BytesMut { fn put_slice(&mut self, src: &[u8]) { self.extend_from_slice(src); } + + fn put_bytes(&mut self, val: u8, cnt: usize) { + self.reserve(cnt); + unsafe { + let dst = self.spare_capacity_mut(); + // Reserved above + debug_assert!(dst.len() >= cnt); + + ptr::write_bytes(dst.as_mut_ptr(), val, cnt); + + self.advance_mut(cnt); + } + } } impl AsRef<[u8]> for BytesMut { @@ -1146,7 +1271,7 @@ impl<'a> IntoIterator for &'a BytesMut { type IntoIter = core::slice::Iter<'a, u8>; fn into_iter(self) -> Self::IntoIter { - self.as_ref().into_iter() + self.as_ref().iter() } } @@ -1175,7 +1300,18 @@ impl<'a> Extend<&'a u8> for BytesMut { where T: IntoIterator<Item = &'a u8>, { - self.extend(iter.into_iter().map(|b| *b)) + self.extend(iter.into_iter().copied()) + } +} + +impl Extend<Bytes> for BytesMut { + fn extend<T>(&mut self, iter: T) + where + T: IntoIterator<Item = Bytes>, + { + for bytes in iter { + self.extend_from_slice(&bytes) + } } } @@ -1187,7 +1323,7 @@ impl FromIterator<u8> for BytesMut { impl<'a> FromIterator<&'a u8> for BytesMut { fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self { - BytesMut::from_iter(into_iter.into_iter().map(|b| *b)) + BytesMut::from_iter(into_iter.into_iter().copied()) } } @@ -1228,10 +1364,13 @@ unsafe fn release_shared(ptr: *mut Shared) { // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - atomic::fence(Ordering::Acquire); + // + // Thread sanitizer does not support atomic fences. Use an atomic load + // instead. + (*ptr).ref_count.load(Ordering::Acquire); // Drop the data - Box::from_raw(ptr); + drop(Box::from_raw(ptr)); } impl Shared { @@ -1250,6 +1389,7 @@ impl Shared { } } +#[inline] fn original_capacity_to_repr(cap: usize) -> usize { let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize); cmp::min( @@ -1376,7 +1516,7 @@ impl PartialOrd<BytesMut> for str { impl PartialEq<Vec<u8>> for BytesMut { fn eq(&self, other: &Vec<u8>) -> bool { - *self == &other[..] + *self == other[..] } } @@ -1400,7 +1540,7 @@ impl PartialOrd<BytesMut> for Vec<u8> { impl PartialEq<String> for BytesMut { fn eq(&self, other: &String) -> bool { - *self == &other[..] + *self == other[..] } } @@ -1466,16 +1606,55 @@ impl PartialOrd<BytesMut> for &str { impl PartialEq<BytesMut> for Bytes { fn eq(&self, other: &BytesMut) -> bool { - &other[..] == &self[..] + other[..] == self[..] } } impl PartialEq<Bytes> for BytesMut { fn eq(&self, other: &Bytes) -> bool { - &other[..] == &self[..] + other[..] == self[..] } } +impl From<BytesMut> for Vec<u8> { + fn from(mut bytes: BytesMut) -> Self { + let kind = bytes.kind(); + + let mut vec = if kind == KIND_VEC { + unsafe { + let (off, _) = bytes.get_vec_pos(); + rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) + } + } else if kind == KIND_ARC { + let shared = bytes.data as *mut Shared; + + if unsafe { (*shared).is_unique() } { + let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new()); + + unsafe { release_shared(shared) }; + + vec + } else { + return bytes.deref().to_vec(); + } + } else { + return bytes.deref().to_vec(); + }; + + let len = bytes.len; + + unsafe { + ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len); + vec.set_len(len); + } + + mem::forget(bytes); + + vec + } +} + +#[inline] fn vptr(ptr: *mut u8) -> NonNull<u8> { if cfg!(debug_assertions) { NonNull::new(ptr).expect("Vec pointer should be non-null") @@ -1484,6 +1663,35 @@ fn vptr(ptr: *mut u8) -> NonNull<u8> { } } +/// Returns a dangling pointer with the given address. This is used to store +/// integer data in pointer fields. +/// +/// It is equivalent to `addr as *mut T`, but this fails on miri when strict +/// provenance checking is enabled. +#[inline] +fn invalid_ptr<T>(addr: usize) -> *mut T { + let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr); + debug_assert_eq!(ptr as usize, addr); + ptr.cast::<T>() +} + +/// Precondition: dst >= original +/// +/// The following line is equivalent to: +/// +/// ```rust,ignore +/// self.ptr.as_ptr().offset_from(ptr) as usize; +/// ``` +/// +/// But due to min rust is 1.39 and it is only stablised +/// in 1.47, we cannot use it. +#[inline] +fn offset_from(dst: *mut u8, original: *mut u8) -> usize { + debug_assert!(dst >= original); + + dst as usize - original as usize +} + unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> { let ptr = ptr.offset(-(off as isize)); len += off; @@ -1496,6 +1704,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, + to_vec: shared_v_to_vec, drop: shared_v_drop, }; @@ -1503,10 +1712,32 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By let shared = data.load(Ordering::Relaxed) as *mut Shared; increment_shared(shared); - let data = AtomicPtr::new(shared as _); + let data = AtomicPtr::new(shared as *mut ()); Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } +unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> { + let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); + + if (*shared).is_unique() { + let shared = &mut *shared; + + // Drop shared + let mut vec = mem::replace(&mut shared.vec, Vec::new()); + release_shared(shared); + + // Copy back buffer + ptr::copy(ptr, vec.as_mut_ptr(), len); + vec.set_len(len); + + vec + } else { + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + v + } +} + unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { release_shared(*shared as *mut Shared); diff --git a/vendor/bytes/src/fmt/debug.rs b/vendor/bytes/src/fmt/debug.rs index a8545514e..83de695dd 100644 --- a/vendor/bytes/src/fmt/debug.rs +++ b/vendor/bytes/src/fmt/debug.rs @@ -25,7 +25,7 @@ impl Debug for BytesRef<'_> { } else if b == b'\0' { write!(f, "\\0")?; // ASCII printable - } else if b >= 0x20 && b < 0x7f { + } else if (0x20..0x7f).contains(&b) { write!(f, "{}", b as char)?; } else { write!(f, "\\x{:02x}", b)?; @@ -38,12 +38,12 @@ impl Debug for BytesRef<'_> { impl Debug for Bytes { fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(&self.as_ref()), f) + Debug::fmt(&BytesRef(self.as_ref()), f) } } impl Debug for BytesMut { fn fmt(&self, f: &mut Formatter<'_>) -> Result { - Debug::fmt(&BytesRef(&self.as_ref()), f) + Debug::fmt(&BytesRef(self.as_ref()), f) } } diff --git a/vendor/bytes/src/lib.rs b/vendor/bytes/src/lib.rs index dd8cc9661..af436b316 100644 --- a/vendor/bytes/src/lib.rs +++ b/vendor/bytes/src/lib.rs @@ -3,8 +3,8 @@ no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) ))] -#![doc(html_root_url = "https://docs.rs/bytes/1.0.1")] #![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] //! Provides abstractions for working with bytes. //! diff --git a/vendor/bytes/src/loom.rs b/vendor/bytes/src/loom.rs index 1cae8812e..9e6b2d5e2 100644 --- a/vendor/bytes/src/loom.rs +++ b/vendor/bytes/src/loom.rs @@ -1,7 +1,7 @@ #[cfg(not(all(test, loom)))] pub(crate) mod sync { pub(crate) mod atomic { - pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; + pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; pub(crate) trait AtomicMut<T> { fn with_mut<F, R>(&mut self, f: F) -> R @@ -23,7 +23,7 @@ pub(crate) mod sync { #[cfg(all(test, loom))] pub(crate) mod sync { pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; + pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; pub(crate) trait AtomicMut<T> {} } |