summaryrefslogtreecommitdiffstats
path: root/third_party/rust/bytes/src/bytes_mut.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/bytes/src/bytes_mut.rs
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/bytes/src/bytes_mut.rs')
-rw-r--r--third_party/rust/bytes/src/bytes_mut.rs1812
1 files changed, 1812 insertions, 0 deletions
diff --git a/third_party/rust/bytes/src/bytes_mut.rs b/third_party/rust/bytes/src/bytes_mut.rs
new file mode 100644
index 0000000000..70613b2248
--- /dev/null
+++ b/third_party/rust/bytes/src/bytes_mut.rs
@@ -0,0 +1,1812 @@
+use core::iter::{FromIterator, Iterator};
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::ptr::{self, NonNull};
+use core::{cmp, fmt, hash, isize, slice, usize};
+
+use alloc::{
+ borrow::{Borrow, BorrowMut},
+ boxed::Box,
+ string::String,
+ vec,
+ vec::Vec,
+};
+
+use crate::buf::{IntoIter, UninitSlice};
+use crate::bytes::Vtable;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
+use crate::{Buf, BufMut, Bytes};
+
+/// A unique reference to a contiguous slice of memory.
+///
+/// `BytesMut` represents a unique view into a potentially shared memory region.
+/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
+/// mutate the memory.
+///
+/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
+/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
+/// same `buf` overlaps with its slice. That guarantee means that a write lock
+/// is not required.
+///
+/// # Growth
+///
+/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
+/// necessary. However, explicitly reserving the required space up-front before
+/// a series of inserts will be more efficient.
+///
+/// # Examples
+///
+/// ```
+/// use bytes::{BytesMut, BufMut};
+///
+/// let mut buf = BytesMut::with_capacity(64);
+///
+/// buf.put_u8(b'h');
+/// buf.put_u8(b'e');
+/// buf.put(&b"llo"[..]);
+///
+/// assert_eq!(&buf[..], b"hello");
+///
+/// // Freeze the buffer so that it can be shared
+/// let a = buf.freeze();
+///
+/// // This does not allocate, instead `b` points to the same memory.
+/// let b = a.clone();
+///
+/// assert_eq!(&a[..], b"hello");
+/// assert_eq!(&b[..], b"hello");
+/// ```
+pub struct BytesMut {
+ ptr: NonNull<u8>,
+ len: usize,
+ cap: usize,
+ data: *mut Shared,
+}
+
+// Thread-safe reference-counted container for the shared storage. This mostly
+// the same as `core::sync::Arc` but without the weak counter. The ref counting
+// fns are based on the ones found in `std`.
+//
+// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
+// up making the overall code simpler and easier to reason about. This is due to
+// some of the logic around setting `Inner::arc` and other ways the `arc` field
+// is used. Using `Arc` ended up requiring a number of funky transmutes and
+// other shenanigans to make it work.
+struct Shared {
+ vec: Vec<u8>,
+ original_capacity_repr: usize,
+ ref_count: AtomicUsize,
+}
+
+// Buffer storage strategy flags.
+const KIND_ARC: usize = 0b0;
+const KIND_VEC: usize = 0b1;
+const KIND_MASK: usize = 0b1;
+
+// The max original capacity value. Any `Bytes` allocated with a greater initial
+// capacity will default to this.
+const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
+// The original capacity algorithm will not take effect unless the originally
+// allocated capacity was at least 1kb in size.
+const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
+// The original capacity is stored in powers of 2 starting at 1kb to a max of
+// 64kb. Representing it as such requires only 3 bits of storage.
+const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
+const ORIGINAL_CAPACITY_OFFSET: usize = 2;
+
+// When the storage is in the `Vec` representation, the pointer can be advanced
+// at most this value. This is due to the amount of storage available to track
+// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
+// bits.
+const VEC_POS_OFFSET: usize = 5;
+const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
+const NOT_VEC_POS_MASK: usize = 0b11111;
+
+#[cfg(target_pointer_width = "64")]
+const PTR_WIDTH: usize = 64;
+#[cfg(target_pointer_width = "32")]
+const PTR_WIDTH: usize = 32;
+
+/*
+ *
+ * ===== BytesMut =====
+ *
+ */
+
+impl BytesMut {
+ /// Creates a new `BytesMut` with the specified capacity.
+ ///
+ /// The returned `BytesMut` will be able to hold at least `capacity` bytes
+ /// without reallocating.
+ ///
+ /// It is important to note that this function does not specify the length
+ /// of the returned `BytesMut`, but only the capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut bytes = BytesMut::with_capacity(64);
+ ///
+ /// // `bytes` contains no data, even though there is capacity
+ /// assert_eq!(bytes.len(), 0);
+ ///
+ /// bytes.put(&b"hello world"[..]);
+ ///
+ /// assert_eq!(&bytes[..], b"hello world");
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> BytesMut {
+ BytesMut::from_vec(Vec::with_capacity(capacity))
+ }
+
+ /// Creates a new `BytesMut` with default capacity.
+ ///
+ /// Resulting object has length 0 and unspecified capacity.
+ /// This function does not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut bytes = BytesMut::new();
+ ///
+ /// assert_eq!(0, bytes.len());
+ ///
+ /// bytes.reserve(2);
+ /// bytes.put_slice(b"xy");
+ ///
+ /// assert_eq!(&b"xy"[..], &bytes[..]);
+ /// ```
+ #[inline]
+ pub fn new() -> BytesMut {
+ BytesMut::with_capacity(0)
+ }
+
+ /// Returns the number of bytes contained in this `BytesMut`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::from(&b"hello"[..]);
+ /// assert_eq!(b.len(), 5);
+ /// ```
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns true if the `BytesMut` has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::with_capacity(64);
+ /// assert!(b.is_empty());
+ /// ```
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Returns the number of bytes the `BytesMut` can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let b = BytesMut::with_capacity(64);
+ /// assert_eq!(b.capacity(), 64);
+ /// ```
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.cap
+ }
+
+ /// Converts `self` into an immutable `Bytes`.
+ ///
+ /// The conversion is zero cost and is used to indicate that the slice
+ /// referenced by the handle will no longer be mutated. Once the conversion
+ /// is done, the handle can be cloned and shared across threads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ /// use std::thread;
+ ///
+ /// let mut b = BytesMut::with_capacity(64);
+ /// b.put(&b"hello world"[..]);
+ /// let b1 = b.freeze();
+ /// let b2 = b1.clone();
+ ///
+ /// let th = thread::spawn(move || {
+ /// assert_eq!(&b1[..], b"hello world");
+ /// });
+ ///
+ /// assert_eq!(&b2[..], b"hello world");
+ /// th.join().unwrap();
+ /// ```
+ #[inline]
+ pub fn freeze(mut self) -> Bytes {
+ if self.kind() == KIND_VEC {
+ // Just re-use `Bytes` internal Vec vtable
+ unsafe {
+ let (off, _) = self.get_vec_pos();
+ let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+ mem::forget(self);
+ let mut b: Bytes = vec.into();
+ b.advance(off);
+ b
+ }
+ } else {
+ debug_assert_eq!(self.kind(), KIND_ARC);
+
+ let ptr = self.ptr.as_ptr();
+ let len = self.len;
+ let data = AtomicPtr::new(self.data.cast());
+ mem::forget(self);
+ unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
+ }
+ }
+
+ /// Creates a new `BytesMut`, which is initialized with zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let zeros = BytesMut::zeroed(42);
+ ///
+ /// assert_eq!(zeros.len(), 42);
+ /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+ /// ```
+ pub fn zeroed(len: usize) -> BytesMut {
+ BytesMut::from_vec(vec![0; len])
+ }
+
+ /// Splits the bytes into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[0, at)`, and the returned
+ /// `BytesMut` contains elements `[at, capacity)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count
+ /// and sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut a = BytesMut::from(&b"hello world"[..]);
+ /// let mut b = a.split_off(5);
+ ///
+ /// a[0] = b'j';
+ /// b[0] = b'!';
+ ///
+ /// assert_eq!(&a[..], b"jello");
+ /// assert_eq!(&b[..], b"!world");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > capacity`.
+ #[must_use = "consider BytesMut::truncate if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> BytesMut {
+ assert!(
+ at <= self.capacity(),
+ "split_off out of bounds: {:?} <= {:?}",
+ at,
+ self.capacity(),
+ );
+ unsafe {
+ let mut other = self.shallow_clone();
+ other.set_start(at);
+ self.set_end(at);
+ other
+ }
+ }
+
+ /// Removes the bytes from the current view, returning them in a new
+ /// `BytesMut` handle.
+ ///
+ /// Afterwards, `self` will be empty, but will retain any additional
+ /// capacity that it had before the operation. This is identical to
+ /// `self.split_to(self.len())`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut buf = BytesMut::with_capacity(1024);
+ /// buf.put(&b"hello world"[..]);
+ ///
+ /// let other = buf.split();
+ ///
+ /// assert!(buf.is_empty());
+ /// assert_eq!(1013, buf.capacity());
+ ///
+ /// assert_eq!(other, b"hello world"[..]);
+ /// ```
+ #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"]
+ pub fn split(&mut self) -> BytesMut {
+ let len = self.len();
+ self.split_to(len)
+ }
+
+ /// Splits the buffer into two at the given index.
+ ///
+ /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
+ /// contains elements `[0, at)`.
+ ///
+ /// This is an `O(1)` operation that just increases the reference count and
+ /// sets a few indices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut a = BytesMut::from(&b"hello world"[..]);
+ /// let mut b = a.split_to(5);
+ ///
+ /// a[0] = b'!';
+ /// b[0] = b'j';
+ ///
+ /// assert_eq!(&a[..], b"!world");
+ /// assert_eq!(&b[..], b"jello");
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ #[must_use = "consider BytesMut::advance if you don't need the other half"]
+ pub fn split_to(&mut self, at: usize) -> BytesMut {
+ assert!(
+ at <= self.len(),
+ "split_to out of bounds: {:?} <= {:?}",
+ at,
+ self.len(),
+ );
+
+ unsafe {
+ let mut other = self.shallow_clone();
+ other.set_end(at);
+ self.set_start(at);
+ other
+ }
+ }
+
+ /// Shortens the buffer, keeping the first `len` bytes and dropping the
+ /// rest.
+ ///
+ /// If `len` is greater than the buffer's current length, this has no
+ /// effect.
+ ///
+ /// Existing underlying capacity is preserved.
+ ///
+ /// The [`split_off`] method can emulate `truncate`, but this causes the
+ /// excess bytes to be returned instead of dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello world"[..]);
+ /// buf.truncate(5);
+ /// assert_eq!(buf, b"hello"[..]);
+ /// ```
+ ///
+ /// [`split_off`]: #method.split_off
+ pub fn truncate(&mut self, len: usize) {
+ if len <= self.len() {
+ unsafe {
+ self.set_len(len);
+ }
+ }
+ }
+
+ /// Clears the buffer, removing all data. Existing capacity is preserved.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello world"[..]);
+ /// buf.clear();
+ /// assert!(buf.is_empty());
+ /// ```
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Resizes the buffer so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the buffer is extended by the
+ /// difference with each additional byte set to `value`. If `new_len` is
+ /// less than `len`, the buffer is simply truncated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::new();
+ ///
+ /// buf.resize(3, 0x1);
+ /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
+ ///
+ /// buf.resize(2, 0x2);
+ /// assert_eq!(&buf[..], &[0x1, 0x1]);
+ ///
+ /// buf.resize(4, 0x3);
+ /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
+ /// ```
+ pub fn resize(&mut self, new_len: usize, value: u8) {
+ let len = self.len();
+ if new_len > len {
+ let additional = new_len - len;
+ self.reserve(additional);
+ unsafe {
+ let dst = self.chunk_mut().as_mut_ptr();
+ ptr::write_bytes(dst, value, additional);
+ self.set_len(new_len);
+ }
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Sets the length of the buffer.
+ ///
+ /// This will explicitly set the size of the buffer without actually
+ /// modifying the data, so it is up to the caller to ensure that the data
+ /// has been initialized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut b = BytesMut::from(&b"hello world"[..]);
+ ///
+ /// unsafe {
+ /// b.set_len(5);
+ /// }
+ ///
+ /// assert_eq!(&b[..], b"hello");
+ ///
+ /// unsafe {
+ /// b.set_len(11);
+ /// }
+ ///
+ /// assert_eq!(&b[..], b"hello world");
+ /// ```
+ #[inline]
+ pub unsafe fn set_len(&mut self, len: usize) {
+ debug_assert!(len <= self.cap, "set_len out of bounds");
+ self.len = len;
+ }
+
+ /// Reserves capacity for at least `additional` more bytes to be inserted
+ /// into the given `BytesMut`.
+ ///
+ /// More than `additional` bytes may be reserved in order to avoid frequent
+ /// reallocations. A call to `reserve` may result in an allocation.
+ ///
+ /// Before allocating new buffer space, the function will attempt to reclaim
+ /// space in the existing buffer. If the current handle references a view
+ /// into a larger original buffer, and all other handles referencing part
+ /// of the same original buffer have been dropped, then the current view
+ /// can be copied/shifted to the front of the buffer and the handle can take
+ /// ownership of the full buffer, provided that the full buffer is large
+ /// enough to fit the requested additional capacity.
+ ///
+ /// This optimization will only happen if shifting the data from the current
+ /// view to the front of the buffer is not too expensive in terms of the
+ /// (amortized) time required. The precise condition is subject to change;
+ /// as of now, the length of the data being shifted needs to be at least as
+ /// large as the distance that it's shifted by. If the current view is empty
+ /// and the original buffer is large enough to fit the requested additional
+ /// capacity, then reallocations will never happen.
+ ///
+ /// # Examples
+ ///
+ /// In the following example, a new buffer is allocated.
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::from(&b"hello"[..]);
+ /// buf.reserve(64);
+ /// assert!(buf.capacity() >= 69);
+ /// ```
+ ///
+ /// In the following example, the existing buffer is reclaimed.
+ ///
+ /// ```
+ /// use bytes::{BytesMut, BufMut};
+ ///
+ /// let mut buf = BytesMut::with_capacity(128);
+ /// buf.put(&[0; 64][..]);
+ ///
+ /// let ptr = buf.as_ptr();
+ /// let other = buf.split();
+ ///
+ /// assert!(buf.is_empty());
+ /// assert_eq!(buf.capacity(), 64);
+ ///
+ /// drop(other);
+ /// buf.reserve(128);
+ ///
+ /// assert_eq!(buf.capacity(), 128);
+ /// assert_eq!(buf.as_ptr(), ptr);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ let len = self.len();
+ let rem = self.capacity() - len;
+
+ if additional <= rem {
+ // The handle can already store at least `additional` more bytes, so
+ // there is no further work needed to be done.
+ return;
+ }
+
+ self.reserve_inner(additional);
+ }
+
+ // In separate function to allow the short-circuits in `reserve` to
+ // be inline-able. Significant helps performance.
+ fn reserve_inner(&mut self, additional: usize) {
+ let len = self.len();
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ // If there's enough free space before the start of the buffer, then
+ // just copy the data backwards and reuse the already-allocated
+ // space.
+ //
+ // Otherwise, since backed by a vector, use `Vec::reserve`
+ //
+ // We need to make sure that this optimization does not kill the
+ // amortized runtimes of BytesMut's operations.
+ unsafe {
+ let (off, prev) = self.get_vec_pos();
+
+ // Only reuse space if we can satisfy the requested additional space.
+ //
+ // Also check if the value of `off` suggests that enough bytes
+ // have been read to account for the overhead of shifting all
+ // the data (in an amortized analysis).
+ // Hence the condition `off >= self.len()`.
+ //
+ // This condition also already implies that the buffer is going
+ // to be (at least) half-empty in the end; so we do not break
+ // the (amortized) runtime with future resizes of the underlying
+ // `Vec`.
+ //
+ // [For more details check issue #524, and PR #525.]
+ if self.capacity() - self.len() + off >= additional && off >= self.len() {
+ // There's enough space, and it's not too much overhead:
+ // reuse the space!
+ //
+ // Just move the pointer back to the start after copying
+ // data back.
+ let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
+ // Since `off >= self.len()`, the two regions don't overlap.
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
+ self.ptr = vptr(base_ptr);
+ self.set_vec_pos(0, prev);
+
+ // Length stays constant, but since we moved backwards we
+ // can gain capacity back.
+ self.cap += off;
+ } else {
+ // Not enough space, or reusing might be too much overhead:
+ // allocate more space!
+ let mut v =
+ ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+ v.reserve(additional);
+
+ // Update the info
+ self.ptr = vptr(v.as_mut_ptr().add(off));
+ self.len = v.len() - off;
+ self.cap = v.capacity() - off;
+ }
+
+ return;
+ }
+ }
+
+ debug_assert_eq!(kind, KIND_ARC);
+ let shared: *mut Shared = self.data;
+
+ // Reserving involves abandoning the currently shared buffer and
+ // allocating a new vector with the requested capacity.
+ //
+ // Compute the new capacity
+ let mut new_cap = len.checked_add(additional).expect("overflow");
+
+ let original_capacity;
+ let original_capacity_repr;
+
+ unsafe {
+ original_capacity_repr = (*shared).original_capacity_repr;
+ original_capacity = original_capacity_from_repr(original_capacity_repr);
+
+ // First, try to reclaim the buffer. This is possible if the current
+ // handle is the only outstanding handle pointing to the buffer.
+ if (*shared).is_unique() {
+ // This is the only handle to the buffer. It can be reclaimed.
+ // However, before doing the work of copying data, check to make
+ // sure that the vector has enough capacity.
+ let v = &mut (*shared).vec;
+
+ let v_capacity = v.capacity();
+ let ptr = v.as_mut_ptr();
+
+ let offset = offset_from(self.ptr.as_ptr(), ptr);
+
+ // Compare the condition in the `kind == KIND_VEC` case above
+ // for more details.
+ if v_capacity >= new_cap + offset {
+ self.cap = new_cap;
+ // no copy is necessary
+ } else if v_capacity >= new_cap && offset >= len {
+ // The capacity is sufficient, and copying is not too much
+ // overhead: reclaim the buffer!
+
+ // `offset >= len` means: no overlap
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
+
+ self.ptr = vptr(ptr);
+ self.cap = v.capacity();
+ } else {
+ // calculate offset
+ let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
+
+ // new_cap is calculated in terms of `BytesMut`, not the underlying
+ // `Vec`, so it does not take the offset into account.
+ //
+ // Thus we have to manually add it here.
+ new_cap = new_cap.checked_add(off).expect("overflow");
+
+ // The vector capacity is not sufficient. The reserve request is
+ // asking for more than the initial buffer capacity. Allocate more
+ // than requested if `new_cap` is not much bigger than the current
+ // capacity.
+ //
+ // There are some situations, using `reserve_exact` that the
+ // buffer capacity could be below `original_capacity`, so do a
+ // check.
+ let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+ new_cap = cmp::max(double, new_cap);
+
+ // No space - allocate more
+ //
+ // The length field of `Shared::vec` is not used by the `BytesMut`;
+ // instead we use the `len` field in the `BytesMut` itself. However,
+ // when calling `reserve`, it doesn't guarantee that data stored in
+ // the unused capacity of the vector is copied over to the new
+ // allocation, so we need to ensure that we don't have any data we
+ // care about in the unused capacity before calling `reserve`.
+ debug_assert!(off + len <= v.capacity());
+ v.set_len(off + len);
+ v.reserve(new_cap - v.len());
+
+ // Update the info
+ self.ptr = vptr(v.as_mut_ptr().add(off));
+ self.cap = v.capacity() - off;
+ }
+
+ return;
+ } else {
+ new_cap = cmp::max(new_cap, original_capacity);
+ }
+ }
+
+ // Create a new vector to store the data
+ let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
+
+ // Copy the bytes
+ v.extend_from_slice(self.as_ref());
+
+ // Release the shared handle. This must be done *after* the bytes are
+ // copied.
+ unsafe { release_shared(shared) };
+
+ // Update self
+ let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+ self.data = invalid_ptr(data);
+ self.ptr = vptr(v.as_mut_ptr());
+ self.len = v.len();
+ self.cap = v.capacity();
+ }
+
+ /// Appends given bytes to this `BytesMut`.
+ ///
+ /// If this `BytesMut` object does not have enough capacity, it is resized
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::with_capacity(0);
+ /// buf.extend_from_slice(b"aaabbb");
+ /// buf.extend_from_slice(b"cccddd");
+ ///
+ /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+ /// ```
+ pub fn extend_from_slice(&mut self, extend: &[u8]) {
+ let cnt = extend.len();
+ self.reserve(cnt);
+
+ unsafe {
+ let dst = self.spare_capacity_mut();
+ // Reserved above
+ debug_assert!(dst.len() >= cnt);
+
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
+ }
+
+ unsafe {
+ self.advance_mut(cnt);
+ }
+ }
+
+ /// Absorbs a `BytesMut` that was previously split off.
+ ///
+ /// If the two `BytesMut` objects were previously contiguous and not mutated
+ /// in a way that causes re-allocation i.e., if `other` was created by
+ /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
+ /// that just decreases a reference count and sets a few indices.
+ /// Otherwise this method degenerates to
+ /// `self.extend_from_slice(other.as_ref())`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let mut buf = BytesMut::with_capacity(64);
+ /// buf.extend_from_slice(b"aaabbbcccddd");
+ ///
+ /// let split = buf.split_off(6);
+ /// assert_eq!(b"aaabbb", &buf[..]);
+ /// assert_eq!(b"cccddd", &split[..]);
+ ///
+ /// buf.unsplit(split);
+ /// assert_eq!(b"aaabbbcccddd", &buf[..]);
+ /// ```
+ pub fn unsplit(&mut self, other: BytesMut) {
+ if self.is_empty() {
+ *self = other;
+ return;
+ }
+
+ if let Err(other) = self.try_unsplit(other) {
+ self.extend_from_slice(other.as_ref());
+ }
+ }
+
+ // private
+
+ // For now, use a `Vec` to manage the memory for us, but we may want to
+ // change that in the future to some alternate allocator strategy.
+ //
+ // Thus, we don't expose an easy way to construct from a `Vec` since an
+ // internal change could make a simple pattern (`BytesMut::from(vec)`)
+ // suddenly a lot more expensive.
+ #[inline]
+ pub(crate) fn from_vec(mut vec: Vec<u8>) -> BytesMut {
+ let ptr = vptr(vec.as_mut_ptr());
+ let len = vec.len();
+ let cap = vec.capacity();
+ mem::forget(vec);
+
+ let original_capacity_repr = original_capacity_to_repr(cap);
+ let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
+
+ BytesMut {
+ ptr,
+ len,
+ cap,
+ data: invalid_ptr(data),
+ }
+ }
+
+ #[inline]
+ fn as_slice(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
+ }
+
+ #[inline]
+ fn as_slice_mut(&mut self) -> &mut [u8] {
+ unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
+ }
+
+ unsafe fn set_start(&mut self, start: usize) {
+ // Setting the start to 0 is a no-op, so return early if this is the
+ // case.
+ if start == 0 {
+ return;
+ }
+
+ debug_assert!(start <= self.cap, "internal: set_start out of bounds");
+
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ // Setting the start when in vec representation is a little more
+ // complicated. First, we have to track how far ahead the
+ // "start" of the byte buffer from the beginning of the vec. We
+ // also have to ensure that we don't exceed the maximum shift.
+ let (mut pos, prev) = self.get_vec_pos();
+ pos += start;
+
+ if pos <= MAX_VEC_POS {
+ self.set_vec_pos(pos, prev);
+ } else {
+ // The repr must be upgraded to ARC. This will never happen
+ // on 64 bit systems and will only happen on 32 bit systems
+ // when shifting past 134,217,727 bytes. As such, we don't
+ // worry too much about performance here.
+ self.promote_to_shared(/*ref_count = */ 1);
+ }
+ }
+
+ // Updating the start of the view is setting `ptr` to point to the
+ // new start and updating the `len` field to reflect the new length
+ // of the view.
+ self.ptr = vptr(self.ptr.as_ptr().add(start));
+
+ if self.len >= start {
+ self.len -= start;
+ } else {
+ self.len = 0;
+ }
+
+ self.cap -= start;
+ }
+
+ unsafe fn set_end(&mut self, end: usize) {
+ debug_assert_eq!(self.kind(), KIND_ARC);
+ assert!(end <= self.cap, "set_end out of bounds");
+
+ self.cap = end;
+ self.len = cmp::min(self.len, end);
+ }
+
+ fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
+ if other.capacity() == 0 {
+ return Ok(());
+ }
+
+ let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
+ if ptr == other.ptr.as_ptr()
+ && self.kind() == KIND_ARC
+ && other.kind() == KIND_ARC
+ && self.data == other.data
+ {
+ // Contiguous blocks, just combine directly
+ self.len += other.len;
+ self.cap += other.cap;
+ Ok(())
+ } else {
+ Err(other)
+ }
+ }
+
+ #[inline]
+ fn kind(&self) -> usize {
+ self.data as usize & KIND_MASK
+ }
+
+ unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+ debug_assert!(ref_cnt == 1 || ref_cnt == 2);
+
+ let original_capacity_repr =
+ (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
+
+ // The vec offset cannot be concurrently mutated, so there
+ // should be no danger reading it.
+ let off = (self.data as usize) >> VEC_POS_OFFSET;
+
+ // First, allocate a new `Shared` instance containing the
+ // `Vec` fields. It's important to note that `ptr`, `len`,
+ // and `cap` cannot be mutated without having `&mut self`.
+ // This means that these fields will not be concurrently
+ // updated and since the buffer hasn't been promoted to an
+ // `Arc`, those three fields still are the components of the
+ // vector.
+ let shared = Box::new(Shared {
+ vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
+ original_capacity_repr,
+ ref_count: AtomicUsize::new(ref_cnt),
+ });
+
+ let shared = Box::into_raw(shared);
+
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
+
+ self.data = shared;
+ }
+
+ /// Makes an exact shallow clone of `self`.
+ ///
+ /// The kind of `self` doesn't matter, but this is unsafe
+ /// because the clone will have the same offsets. You must
+ /// be sure the returned value to the user doesn't allow
+ /// two views into the same range.
+ #[inline]
+ unsafe fn shallow_clone(&mut self) -> BytesMut {
+ if self.kind() == KIND_ARC {
+ increment_shared(self.data);
+ ptr::read(self)
+ } else {
+ self.promote_to_shared(/*ref_count = */ 2);
+ ptr::read(self)
+ }
+ }
+
+ #[inline]
+ unsafe fn get_vec_pos(&mut self) -> (usize, usize) {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+
+ let prev = self.data as usize;
+ (prev >> VEC_POS_OFFSET, prev)
+ }
+
+ #[inline]
+ unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) {
+ debug_assert_eq!(self.kind(), KIND_VEC);
+ debug_assert!(pos <= MAX_VEC_POS);
+
+ self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
+ }
+
+ /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
+ ///
+ /// The returned slice can be used to fill the buffer with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: BytesMut::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// // Allocate buffer big enough for 10 bytes.
+ /// let mut buf = BytesMut::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = buf.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 bytes of the buffer as being initialized.
+ /// unsafe {
+ /// buf.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&buf[..], &[0, 1, 2]);
+ /// ```
+ #[inline]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ unsafe {
+ let ptr = self.ptr.as_ptr().add(self.len);
+ let len = self.cap - self.len;
+
+ slice::from_raw_parts_mut(ptr.cast(), len)
+ }
+ }
+}
+
+impl Drop for BytesMut {
+ fn drop(&mut self) {
+ let kind = self.kind();
+
+ if kind == KIND_VEC {
+ unsafe {
+ let (off, _) = self.get_vec_pos();
+
+ // Vector storage, free the vector
+ let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
+ }
+ } else if kind == KIND_ARC {
+ unsafe { release_shared(self.data) };
+ }
+ }
+}
+
+impl Buf for BytesMut {
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn chunk(&self) -> &[u8] {
+ self.as_slice()
+ }
+
+ #[inline]
+ fn advance(&mut self, cnt: usize) {
+ assert!(
+ cnt <= self.remaining(),
+ "cannot advance past `remaining`: {:?} <= {:?}",
+ cnt,
+ self.remaining(),
+ );
+ unsafe {
+ self.set_start(cnt);
+ }
+ }
+
+ fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
+ self.split_to(len).freeze()
+ }
+}
+
+unsafe impl BufMut for BytesMut {
+ #[inline]
+ fn remaining_mut(&self) -> usize {
+ usize::MAX - self.len()
+ }
+
+ #[inline]
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ let new_len = self.len() + cnt;
+ assert!(
+ new_len <= self.cap,
+ "new_len = {}; capacity = {}",
+ new_len,
+ self.cap
+ );
+ self.len = new_len;
+ }
+
+ #[inline]
+ fn chunk_mut(&mut self) -> &mut UninitSlice {
+ if self.capacity() == self.len() {
+ self.reserve(64);
+ }
+ UninitSlice::from_slice(self.spare_capacity_mut())
+ }
+
+ // Specialize these methods so they can skip checking `remaining_mut`
+ // and `advance_mut`.
+
+ fn put<T: crate::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
+ while src.has_remaining() {
+ let s = src.chunk();
+ let l = s.len();
+ self.extend_from_slice(s);
+ src.advance(l);
+ }
+ }
+
+ fn put_slice(&mut self, src: &[u8]) {
+ self.extend_from_slice(src);
+ }
+
+ fn put_bytes(&mut self, val: u8, cnt: usize) {
+ self.reserve(cnt);
+ unsafe {
+ let dst = self.spare_capacity_mut();
+ // Reserved above
+ debug_assert!(dst.len() >= cnt);
+
+ ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
+
+ self.advance_mut(cnt);
+ }
+ }
+}
+
+impl AsRef<[u8]> for BytesMut {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+impl Deref for BytesMut {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ self.as_ref()
+ }
+}
+
+impl AsMut<[u8]> for BytesMut {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [u8] {
+ self.as_slice_mut()
+ }
+}
+
+impl DerefMut for BytesMut {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [u8] {
+ self.as_mut()
+ }
+}
+
+impl<'a> From<&'a [u8]> for BytesMut {
+ fn from(src: &'a [u8]) -> BytesMut {
+ BytesMut::from_vec(src.to_vec())
+ }
+}
+
+impl<'a> From<&'a str> for BytesMut {
+ fn from(src: &'a str) -> BytesMut {
+ BytesMut::from(src.as_bytes())
+ }
+}
+
+impl From<BytesMut> for Bytes {
+ fn from(src: BytesMut) -> Bytes {
+ src.freeze()
+ }
+}
+
+impl PartialEq for BytesMut {
+ fn eq(&self, other: &BytesMut) -> bool {
+ self.as_slice() == other.as_slice()
+ }
+}
+
+impl PartialOrd for BytesMut {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ self.as_slice().partial_cmp(other.as_slice())
+ }
+}
+
+impl Ord for BytesMut {
+ fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
+ self.as_slice().cmp(other.as_slice())
+ }
+}
+
+impl Eq for BytesMut {}
+
+impl Default for BytesMut {
+ #[inline]
+ fn default() -> BytesMut {
+ BytesMut::new()
+ }
+}
+
+impl hash::Hash for BytesMut {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
+ let s: &[u8] = self.as_ref();
+ s.hash(state);
+ }
+}
+
+impl Borrow<[u8]> for BytesMut {
+ fn borrow(&self) -> &[u8] {
+ self.as_ref()
+ }
+}
+
+impl BorrowMut<[u8]> for BytesMut {
+ fn borrow_mut(&mut self) -> &mut [u8] {
+ self.as_mut()
+ }
+}
+
+impl fmt::Write for BytesMut {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ if self.remaining_mut() >= s.len() {
+ self.put_slice(s.as_bytes());
+ Ok(())
+ } else {
+ Err(fmt::Error)
+ }
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
+ fmt::write(self, args)
+ }
+}
+
+impl Clone for BytesMut {
+ fn clone(&self) -> BytesMut {
+ BytesMut::from(&self[..])
+ }
+}
+
+impl IntoIterator for BytesMut {
+ type Item = u8;
+ type IntoIter = IntoIter<BytesMut>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self)
+ }
+}
+
+impl<'a> IntoIterator for &'a BytesMut {
+ type Item = &'a u8;
+ type IntoIter = core::slice::Iter<'a, u8>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_ref().iter()
+ }
+}
+
+impl Extend<u8> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = u8>,
+ {
+ let iter = iter.into_iter();
+
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower);
+
+ // TODO: optimize
+ // 1. If self.kind() == KIND_VEC, use Vec::extend
+ // 2. Make `reserve` inline-able
+ for b in iter {
+ self.reserve(1);
+ self.put_u8(b);
+ }
+ }
+}
+
+impl<'a> Extend<&'a u8> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = &'a u8>,
+ {
+ self.extend(iter.into_iter().copied())
+ }
+}
+
+impl Extend<Bytes> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = Bytes>,
+ {
+ for bytes in iter {
+ self.extend_from_slice(&bytes)
+ }
+ }
+}
+
+impl FromIterator<u8> for BytesMut {
+ fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
+ BytesMut::from_vec(Vec::from_iter(into_iter))
+ }
+}
+
+impl<'a> FromIterator<&'a u8> for BytesMut {
+ fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
+ BytesMut::from_iter(into_iter.into_iter().copied())
+ }
+}
+
+/*
+ *
+ * ===== Inner =====
+ *
+ */
+
+unsafe fn increment_shared(ptr: *mut Shared) {
+ let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
+
+ if old_size > isize::MAX as usize {
+ crate::abort();
+ }
+}
+
+unsafe fn release_shared(ptr: *mut Shared) {
+ // `Shared` storage... follow the drop steps from Arc.
+ if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
+ return;
+ }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_count.load(Ordering::Acquire);
+
+ // Drop the data
+ drop(Box::from_raw(ptr));
+}
+
+impl Shared {
+ fn is_unique(&self) -> bool {
+ // The goal is to check if the current handle is the only handle
+ // that currently has access to the buffer. This is done by
+ // checking if the `ref_count` is currently 1.
+ //
+ // The `Acquire` ordering synchronizes with the `Release` as
+ // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
+ // operation guarantees that any mutations done in other threads
+ // are ordered before the `ref_count` is decremented. As such,
+ // this `Acquire` will guarantee that those mutations are
+ // visible to the current thread.
+ self.ref_count.load(Ordering::Acquire) == 1
+ }
+}
+
+#[inline]
+fn original_capacity_to_repr(cap: usize) -> usize {
+ let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
+ cmp::min(
+ width,
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+ )
+}
+
+fn original_capacity_from_repr(repr: usize) -> usize {
+ if repr == 0 {
+ return 0;
+ }
+
+ 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
+}
+
+/*
+#[test]
+fn test_original_capacity_to_repr() {
+ assert_eq!(original_capacity_to_repr(0), 0);
+
+ let max_width = 32;
+
+ for width in 1..(max_width + 1) {
+ let cap = 1 << width - 1;
+
+ let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
+ 0
+ } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
+ width - MIN_ORIGINAL_CAPACITY_WIDTH
+ } else {
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
+ };
+
+ assert_eq!(original_capacity_to_repr(cap), expected);
+
+ if width > 1 {
+ assert_eq!(original_capacity_to_repr(cap + 1), expected);
+ }
+
+ // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
+ if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
+ assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap + 76), expected);
+ } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
+ assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
+ assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
+ }
+ }
+}
+
+#[test]
+fn test_original_capacity_from_repr() {
+ assert_eq!(0, original_capacity_from_repr(0));
+
+ let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
+
+ assert_eq!(min_cap, original_capacity_from_repr(1));
+ assert_eq!(min_cap * 2, original_capacity_from_repr(2));
+ assert_eq!(min_cap * 4, original_capacity_from_repr(3));
+ assert_eq!(min_cap * 8, original_capacity_from_repr(4));
+ assert_eq!(min_cap * 16, original_capacity_from_repr(5));
+ assert_eq!(min_cap * 32, original_capacity_from_repr(6));
+ assert_eq!(min_cap * 64, original_capacity_from_repr(7));
+}
+*/
+
+unsafe impl Send for BytesMut {}
+unsafe impl Sync for BytesMut {}
+
+/*
+ *
+ * ===== PartialEq / PartialOrd =====
+ *
+ */
+
+impl PartialEq<[u8]> for BytesMut {
+ fn eq(&self, other: &[u8]) -> bool {
+ &**self == other
+ }
+}
+
+impl PartialOrd<[u8]> for BytesMut {
+ fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other)
+ }
+}
+
+impl PartialEq<BytesMut> for [u8] {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for [u8] {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<str> for BytesMut {
+ fn eq(&self, other: &str) -> bool {
+ &**self == other.as_bytes()
+ }
+}
+
+impl PartialOrd<str> for BytesMut {
+ fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<BytesMut> for str {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for str {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl PartialEq<Vec<u8>> for BytesMut {
+ fn eq(&self, other: &Vec<u8>) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<Vec<u8>> for BytesMut {
+ fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(&other[..])
+ }
+}
+
+impl PartialEq<BytesMut> for Vec<u8> {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for Vec<u8> {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ other.partial_cmp(self)
+ }
+}
+
+impl PartialEq<String> for BytesMut {
+ fn eq(&self, other: &String) -> bool {
+ *self == other[..]
+ }
+}
+
+impl PartialOrd<String> for BytesMut {
+ fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
+ (**self).partial_cmp(other.as_bytes())
+ }
+}
+
+impl PartialEq<BytesMut> for String {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for String {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
+ }
+}
+
+impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
+where
+ BytesMut: PartialEq<T>,
+{
+ fn eq(&self, other: &&'a T) -> bool {
+ *self == **other
+ }
+}
+
+impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
+where
+ BytesMut: PartialOrd<T>,
+{
+ fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
+ self.partial_cmp(*other)
+ }
+}
+
+impl PartialEq<BytesMut> for &[u8] {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for &[u8] {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
+ }
+}
+
+impl PartialEq<BytesMut> for &str {
+ fn eq(&self, other: &BytesMut) -> bool {
+ *other == *self
+ }
+}
+
+impl PartialOrd<BytesMut> for &str {
+ fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
+ other.partial_cmp(self)
+ }
+}
+
+impl PartialEq<BytesMut> for Bytes {
+ fn eq(&self, other: &BytesMut) -> bool {
+ other[..] == self[..]
+ }
+}
+
+impl PartialEq<Bytes> for BytesMut {
+ fn eq(&self, other: &Bytes) -> bool {
+ other[..] == self[..]
+ }
+}
+
+impl From<BytesMut> for Vec<u8> {
+ fn from(mut bytes: BytesMut) -> Self {
+ let kind = bytes.kind();
+
+ let mut vec = if kind == KIND_VEC {
+ unsafe {
+ let (off, _) = bytes.get_vec_pos();
+ rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
+ }
+ } else if kind == KIND_ARC {
+ let shared = bytes.data as *mut Shared;
+
+ if unsafe { (*shared).is_unique() } {
+ let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
+
+ unsafe { release_shared(shared) };
+
+ vec
+ } else {
+ return bytes.deref().to_vec();
+ }
+ } else {
+ return bytes.deref().to_vec();
+ };
+
+ let len = bytes.len;
+
+ unsafe {
+ ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
+ vec.set_len(len);
+ }
+
+ mem::forget(bytes);
+
+ vec
+ }
+}
+
+#[inline]
+fn vptr(ptr: *mut u8) -> NonNull<u8> {
+ if cfg!(debug_assertions) {
+ NonNull::new(ptr).expect("Vec pointer should be non-null")
+ } else {
+ unsafe { NonNull::new_unchecked(ptr) }
+ }
+}
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+///
+/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
+/// provenance checking is enabled.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+ let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
+ debug_assert_eq!(ptr as usize, addr);
+ ptr.cast::<T>()
+}
+
+/// Precondition: dst >= original
+///
+/// The following line is equivalent to:
+///
+/// ```rust,ignore
+/// self.ptr.as_ptr().offset_from(ptr) as usize;
+/// ```
+///
+/// But due to min rust is 1.39 and it is only stablised
+/// in 1.47, we cannot use it.
+#[inline]
+fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
+ debug_assert!(dst >= original);
+
+ dst as usize - original as usize
+}
+
+unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
+ let ptr = ptr.offset(-(off as isize));
+ len += off;
+ cap += off;
+
+ Vec::from_raw_parts(ptr, len, cap)
+}
+
+// ===== impl SharedVtable =====
+
+static SHARED_VTABLE: Vtable = Vtable {
+ clone: shared_v_clone,
+ to_vec: shared_v_to_vec,
+ drop: shared_v_drop,
+};
+
+unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
+ let shared = data.load(Ordering::Relaxed) as *mut Shared;
+ increment_shared(shared);
+
+ let data = AtomicPtr::new(shared as *mut ());
+ Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
+}
+
+unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+ if (*shared).is_unique() {
+ let shared = &mut *shared;
+
+ // Drop shared
+ let mut vec = mem::replace(&mut shared.vec, Vec::new());
+ release_shared(shared);
+
+ // Copy back buffer
+ ptr::copy(ptr, vec.as_mut_ptr(), len);
+ vec.set_len(len);
+
+ vec
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
+unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
+ data.with_mut(|shared| {
+ release_shared(*shared as *mut Shared);
+ });
+}
+
+// compile-fails
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split_to(6);
+/// }
+/// ```
+fn _split_to_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split_off(6);
+/// }
+/// ```
+fn _split_off_must_use() {}
+
+/// ```compile_fail
+/// use bytes::BytesMut;
+/// #[deny(unused_must_use)]
+/// {
+/// let mut b1 = BytesMut::from("hello world");
+/// b1.split();
+/// }
+/// ```
+fn _split_must_use() {}
+
+// fuzz tests
+#[cfg(all(test, loom))]
+mod fuzz {
+ use loom::sync::Arc;
+ use loom::thread;
+
+ use super::BytesMut;
+ use crate::Bytes;
+
+ #[test]
+ fn bytes_mut_cloning_frozen() {
+ loom::model(|| {
+ let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
+ let addr = a.as_ptr() as usize;
+
+ // test the Bytes::clone is Sync by putting it in an Arc
+ let a1 = Arc::new(a);
+ let a2 = a1.clone();
+
+ let t1 = thread::spawn(move || {
+ let b: Bytes = (*a1).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ let t2 = thread::spawn(move || {
+ let b: Bytes = (*a2).clone();
+ assert_eq!(b.as_ptr() as usize, addr);
+ });
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+ });
+ }
+}