summaryrefslogtreecommitdiffstats
path: root/third_party/rust/crossbeam-queue/src
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/crossbeam-queue/src')
-rw-r--r--third_party/rust/crossbeam-queue/src/array_queue.rs425
-rw-r--r--third_party/rust/crossbeam-queue/src/err.rs46
-rw-r--r--third_party/rust/crossbeam-queue/src/lib.rs22
-rw-r--r--third_party/rust/crossbeam-queue/src/seg_queue.rs481
4 files changed, 974 insertions, 0 deletions
diff --git a/third_party/rust/crossbeam-queue/src/array_queue.rs b/third_party/rust/crossbeam-queue/src/array_queue.rs
new file mode 100644
index 0000000000..7ce939c890
--- /dev/null
+++ b/third_party/rust/crossbeam-queue/src/array_queue.rs
@@ -0,0 +1,425 @@
+//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
+//!
+//! Source:
+//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+//!
+//! Copyright & License:
+//! - Copyright (c) 2010-2011 Dmitry Vyukov
+//! - Simplified BSD License and Apache License, Version 2.0
+//! - http://www.1024cores.net/home/code-license
+
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::{self, AtomicUsize, Ordering};
+
+use crossbeam_utils::{Backoff, CachePadded};
+
+use err::{PopError, PushError};
+
+/// A slot in a queue.
+struct Slot<T> {
+ /// The current stamp.
+ ///
+ /// If the stamp equals the tail, this node will be next written to. If it equals the head,
+ /// this node will be next read from.
+ stamp: AtomicUsize,
+
+ /// The value in this slot.
+ value: UnsafeCell<T>,
+}
+
+/// A bounded multi-producer multi-consumer queue.
+///
+/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed
+/// elements. The queue cannot hold more elements that the buffer allows. Attempting to push an
+/// element into a full queue will fail. Having a buffer allocated upfront makes this queue a bit
+/// faster than [`SegQueue`].
+///
+/// [`SegQueue`]: struct.SegQueue.html
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_queue::{ArrayQueue, PushError};
+///
+/// let q = ArrayQueue::new(2);
+///
+/// assert_eq!(q.push('a'), Ok(()));
+/// assert_eq!(q.push('b'), Ok(()));
+/// assert_eq!(q.push('c'), Err(PushError('c')));
+/// assert_eq!(q.pop(), Ok('a'));
+/// ```
+pub struct ArrayQueue<T> {
+ /// The head of the queue.
+ ///
+ /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
+ /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
+ ///
+ /// Elements are popped from the head of the queue.
+ head: CachePadded<AtomicUsize>,
+
+ /// The tail of the queue.
+ ///
+ /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
+ /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
+ ///
+ /// Elements are pushed into the tail of the queue.
+ tail: CachePadded<AtomicUsize>,
+
+ /// The buffer holding slots.
+ buffer: *mut Slot<T>,
+
+ /// The queue capacity.
+ cap: usize,
+
+ /// A stamp with the value of `{ lap: 1, index: 0 }`.
+ one_lap: usize,
+
+ /// Indicates that dropping an `ArrayQueue<T>` may drop elements of type `T`.
+ _marker: PhantomData<T>,
+}
+
+unsafe impl<T: Send> Sync for ArrayQueue<T> {}
+unsafe impl<T: Send> Send for ArrayQueue<T> {}
+
+impl<T> ArrayQueue<T> {
+ /// Creates a new bounded queue with the given capacity.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the capacity is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::ArrayQueue;
+ ///
+ /// let q = ArrayQueue::<i32>::new(100);
+ /// ```
+ pub fn new(cap: usize) -> ArrayQueue<T> {
+ assert!(cap > 0, "capacity must be non-zero");
+
+ // Head is initialized to `{ lap: 0, index: 0 }`.
+ // Tail is initialized to `{ lap: 0, index: 0 }`.
+ let head = 0;
+ let tail = 0;
+
+ // Allocate a buffer of `cap` slots.
+ let buffer = {
+ let mut v = Vec::<Slot<T>>::with_capacity(cap);
+ let ptr = v.as_mut_ptr();
+ mem::forget(v);
+ ptr
+ };
+
+ // Initialize stamps in the slots.
+ for i in 0..cap {
+ unsafe {
+ // Set the stamp to `{ lap: 0, index: i }`.
+ let slot = buffer.add(i);
+ ptr::write(&mut (*slot).stamp, AtomicUsize::new(i));
+ }
+ }
+
+ // One lap is the smallest power of two greater than `cap`.
+ let one_lap = (cap + 1).next_power_of_two();
+
+ ArrayQueue {
+ buffer,
+ cap,
+ one_lap,
+ head: CachePadded::new(AtomicUsize::new(head)),
+ tail: CachePadded::new(AtomicUsize::new(tail)),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Attempts to push an element into the queue.
+ ///
+ /// If the queue is full, the element is returned back as an error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PushError};
+ ///
+ /// let q = ArrayQueue::new(1);
+ ///
+ /// assert_eq!(q.push(10), Ok(()));
+ /// assert_eq!(q.push(20), Err(PushError(20)));
+ /// ```
+ pub fn push(&self, value: T) -> Result<(), PushError<T>> {
+ let backoff = Backoff::new();
+ let mut tail = self.tail.load(Ordering::Relaxed);
+
+ loop {
+ // Deconstruct the tail.
+ let index = tail & (self.one_lap - 1);
+ let lap = tail & !(self.one_lap - 1);
+
+ // Inspect the corresponding slot.
+ let slot = unsafe { &*self.buffer.add(index) };
+ let stamp = slot.stamp.load(Ordering::Acquire);
+
+ // If the tail and the stamp match, we may attempt to push.
+ if tail == stamp {
+ let new_tail = if index + 1 < self.cap {
+ // Same lap, incremented index.
+ // Set to `{ lap: lap, index: index + 1 }`.
+ tail + 1
+ } else {
+ // One lap forward, index wraps around to zero.
+ // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
+ lap.wrapping_add(self.one_lap)
+ };
+
+ // Try moving the tail.
+ match self
+ .tail
+ .compare_exchange_weak(tail, new_tail, Ordering::SeqCst, Ordering::Relaxed)
+ {
+ Ok(_) => {
+ // Write the value into the slot and update the stamp.
+ unsafe { slot.value.get().write(value); }
+ slot.stamp.store(tail + 1, Ordering::Release);
+ return Ok(());
+ }
+ Err(t) => {
+ tail = t;
+ backoff.spin();
+ }
+ }
+ } else if stamp.wrapping_add(self.one_lap) == tail + 1 {
+ atomic::fence(Ordering::SeqCst);
+ let head = self.head.load(Ordering::Relaxed);
+
+ // If the head lags one lap behind the tail as well...
+ if head.wrapping_add(self.one_lap) == tail {
+ // ...then the queue is full.
+ return Err(PushError(value));
+ }
+
+ backoff.spin();
+ tail = self.tail.load(Ordering::Relaxed);
+ } else {
+ // Snooze because we need to wait for the stamp to get updated.
+ backoff.snooze();
+ tail = self.tail.load(Ordering::Relaxed);
+ }
+ }
+ }
+
+ /// Attempts to pop an element from the queue.
+ ///
+ /// If the queue is empty, an error is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PopError};
+ ///
+ /// let q = ArrayQueue::new(1);
+ /// assert_eq!(q.push(10), Ok(()));
+ ///
+ /// assert_eq!(q.pop(), Ok(10));
+ /// assert_eq!(q.pop(), Err(PopError));
+ /// ```
+ pub fn pop(&self) -> Result<T, PopError> {
+ let backoff = Backoff::new();
+ let mut head = self.head.load(Ordering::Relaxed);
+
+ loop {
+ // Deconstruct the head.
+ let index = head & (self.one_lap - 1);
+ let lap = head & !(self.one_lap - 1);
+
+ // Inspect the corresponding slot.
+ let slot = unsafe { &*self.buffer.add(index) };
+ let stamp = slot.stamp.load(Ordering::Acquire);
+
+ // If the the stamp is ahead of the head by 1, we may attempt to pop.
+ if head + 1 == stamp {
+ let new = if index + 1 < self.cap {
+ // Same lap, incremented index.
+ // Set to `{ lap: lap, index: index + 1 }`.
+ head + 1
+ } else {
+ // One lap forward, index wraps around to zero.
+ // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
+ lap.wrapping_add(self.one_lap)
+ };
+
+ // Try moving the head.
+ match self
+ .head
+ .compare_exchange_weak(head, new, Ordering::SeqCst, Ordering::Relaxed)
+ {
+ Ok(_) => {
+ // Read the value from the slot and update the stamp.
+ let msg = unsafe { slot.value.get().read() };
+ slot.stamp.store(head.wrapping_add(self.one_lap), Ordering::Release);
+ return Ok(msg);
+ }
+ Err(h) => {
+ head = h;
+ backoff.spin();
+ }
+ }
+ } else if stamp == head {
+ atomic::fence(Ordering::SeqCst);
+ let tail = self.tail.load(Ordering::Relaxed);
+
+ // If the tail equals the head, that means the channel is empty.
+ if tail == head {
+ return Err(PopError);
+ }
+
+ backoff.spin();
+ head = self.head.load(Ordering::Relaxed);
+ } else {
+ // Snooze because we need to wait for the stamp to get updated.
+ backoff.snooze();
+ head = self.head.load(Ordering::Relaxed);
+ }
+ }
+ }
+
+ /// Returns the capacity of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PopError};
+ ///
+ /// let q = ArrayQueue::<i32>::new(100);
+ ///
+ /// assert_eq!(q.capacity(), 100);
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.cap
+ }
+
+ /// Returns `true` if the queue is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PopError};
+ ///
+ /// let q = ArrayQueue::new(100);
+ ///
+ /// assert!(q.is_empty());
+ /// q.push(1).unwrap();
+ /// assert!(!q.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ let head = self.head.load(Ordering::SeqCst);
+ let tail = self.tail.load(Ordering::SeqCst);
+
+ // Is the tail lagging one lap behind head?
+ // Is the tail equal to the head?
+ //
+ // Note: If the head changes just before we load the tail, that means there was a moment
+ // when the channel was not empty, so it is safe to just return `false`.
+ tail == head
+ }
+
+ /// Returns `true` if the queue is full.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PopError};
+ ///
+ /// let q = ArrayQueue::new(1);
+ ///
+ /// assert!(!q.is_full());
+ /// q.push(1).unwrap();
+ /// assert!(q.is_full());
+ /// ```
+ pub fn is_full(&self) -> bool {
+ let tail = self.tail.load(Ordering::SeqCst);
+ let head = self.head.load(Ordering::SeqCst);
+
+ // Is the head lagging one lap behind tail?
+ //
+ // Note: If the tail changes just before we load the head, that means there was a moment
+ // when the queue was not full, so it is safe to just return `false`.
+ head.wrapping_add(self.one_lap) == tail
+ }
+
+ /// Returns the number of elements in the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{ArrayQueue, PopError};
+ ///
+ /// let q = ArrayQueue::new(100);
+ /// assert_eq!(q.len(), 0);
+ ///
+ /// q.push(10).unwrap();
+ /// assert_eq!(q.len(), 1);
+ ///
+ /// q.push(20).unwrap();
+ /// assert_eq!(q.len(), 2);
+ /// ```
+ pub fn len(&self) -> usize {
+ loop {
+ // Load the tail, then load the head.
+ let tail = self.tail.load(Ordering::SeqCst);
+ let head = self.head.load(Ordering::SeqCst);
+
+ // If the tail didn't change, we've got consistent values to work with.
+ if self.tail.load(Ordering::SeqCst) == tail {
+ let hix = head & (self.one_lap - 1);
+ let tix = tail & (self.one_lap - 1);
+
+ return if hix < tix {
+ tix - hix
+ } else if hix > tix {
+ self.cap - hix + tix
+ } else if tail == head {
+ 0
+ } else {
+ self.cap
+ };
+ }
+ }
+ }
+}
+
+impl<T> Drop for ArrayQueue<T> {
+ fn drop(&mut self) {
+ // Get the index of the head.
+ let hix = self.head.load(Ordering::Relaxed) & (self.one_lap - 1);
+
+ // Loop over all slots that hold a message and drop them.
+ for i in 0..self.len() {
+ // Compute the index of the next slot holding a message.
+ let index = if hix + i < self.cap {
+ hix + i
+ } else {
+ hix + i - self.cap
+ };
+
+ unsafe {
+ self.buffer.add(index).drop_in_place();
+ }
+ }
+
+ // Finally, deallocate the buffer, but don't run any destructors.
+ unsafe {
+ Vec::from_raw_parts(self.buffer, 0, self.cap);
+ }
+ }
+}
+
+impl<T> fmt::Debug for ArrayQueue<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("ArrayQueue { .. }")
+ }
+}
diff --git a/third_party/rust/crossbeam-queue/src/err.rs b/third_party/rust/crossbeam-queue/src/err.rs
new file mode 100644
index 0000000000..fcc84bb28b
--- /dev/null
+++ b/third_party/rust/crossbeam-queue/src/err.rs
@@ -0,0 +1,46 @@
+use std::error;
+use std::fmt;
+
+/// Error which occurs when popping from an empty queue.
+#[derive(Clone, Copy, Eq, PartialEq)]
+pub struct PopError;
+
+impl fmt::Debug for PopError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "PopError".fmt(f)
+ }
+}
+
+impl fmt::Display for PopError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "popping from an empty queue".fmt(f)
+ }
+}
+
+impl error::Error for PopError {
+ fn description(&self) -> &str {
+ "popping from an empty queue"
+ }
+}
+
+/// Error which occurs when pushing into a full queue.
+#[derive(Clone, Copy, Eq, PartialEq)]
+pub struct PushError<T>(pub T);
+
+impl<T> fmt::Debug for PushError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "PushError(..)".fmt(f)
+ }
+}
+
+impl<T> fmt::Display for PushError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "pushing into a full queue".fmt(f)
+ }
+}
+
+impl<T: Send> error::Error for PushError<T> {
+ fn description(&self) -> &str {
+ "pushing into a full queue"
+ }
+}
diff --git a/third_party/rust/crossbeam-queue/src/lib.rs b/third_party/rust/crossbeam-queue/src/lib.rs
new file mode 100644
index 0000000000..17131d9e98
--- /dev/null
+++ b/third_party/rust/crossbeam-queue/src/lib.rs
@@ -0,0 +1,22 @@
+//! Concurrent queues.
+//!
+//! This crate provides concurrent queues that can be shared among threads:
+//!
+//! * [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.
+//! * [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.
+//!
+//! [`ArrayQueue`]: struct.ArrayQueue.html
+//! [`SegQueue`]: struct.SegQueue.html
+
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+
+extern crate crossbeam_utils;
+
+mod array_queue;
+mod err;
+mod seg_queue;
+
+pub use self::array_queue::ArrayQueue;
+pub use self::seg_queue::SegQueue;
+pub use self::err::{PopError, PushError};
diff --git a/third_party/rust/crossbeam-queue/src/seg_queue.rs b/third_party/rust/crossbeam-queue/src/seg_queue.rs
new file mode 100644
index 0000000000..d9783edafb
--- /dev/null
+++ b/third_party/rust/crossbeam-queue/src/seg_queue.rs
@@ -0,0 +1,481 @@
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::{self, ManuallyDrop};
+use std::ptr;
+use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+
+use crossbeam_utils::{Backoff, CachePadded};
+
+use err::PopError;
+
+// Bits indicating the state of a slot:
+// * If a value has been written into the slot, `WRITE` is set.
+// * If a value has been read from the slot, `READ` is set.
+// * If the block is being destroyed, `DESTROY` is set.
+const WRITE: usize = 1;
+const READ: usize = 2;
+const DESTROY: usize = 4;
+
+// Each block covers one "lap" of indices.
+const LAP: usize = 32;
+// The maximum number of values a block can hold.
+const BLOCK_CAP: usize = LAP - 1;
+// How many lower bits are reserved for metadata.
+const SHIFT: usize = 1;
+// Indicates that the block is not the last one.
+const HAS_NEXT: usize = 1;
+
+/// A slot in a block.
+struct Slot<T> {
+ /// The value.
+ value: UnsafeCell<ManuallyDrop<T>>,
+
+ /// The state of the slot.
+ state: AtomicUsize,
+}
+
+impl<T> Slot<T> {
+ /// Waits until a value is written into the slot.
+ fn wait_write(&self) {
+ let backoff = Backoff::new();
+ while self.state.load(Ordering::Acquire) & WRITE == 0 {
+ backoff.snooze();
+ }
+ }
+}
+
+/// A block in a linked list.
+///
+/// Each block in the list can hold up to `BLOCK_CAP` values.
+struct Block<T> {
+ /// The next block in the linked list.
+ next: AtomicPtr<Block<T>>,
+
+ /// Slots for values.
+ slots: [Slot<T>; BLOCK_CAP],
+}
+
+impl<T> Block<T> {
+ /// Creates an empty block that starts at `start_index`.
+ fn new() -> Block<T> {
+ unsafe { mem::zeroed() }
+ }
+
+ /// Waits until the next pointer is set.
+ fn wait_next(&self) -> *mut Block<T> {
+ let backoff = Backoff::new();
+ loop {
+ let next = self.next.load(Ordering::Acquire);
+ if !next.is_null() {
+ return next;
+ }
+ backoff.snooze();
+ }
+ }
+
+ /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
+ unsafe fn destroy(this: *mut Block<T>, start: usize) {
+ // It is not necessary to set the `DESTROY` bit in the last slot because that slot has
+ // begun destruction of the block.
+ for i in start..BLOCK_CAP - 1 {
+ let slot = (*this).slots.get_unchecked(i);
+
+ // Mark the `DESTROY` bit if a thread is still using the slot.
+ if slot.state.load(Ordering::Acquire) & READ == 0
+ && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
+ {
+ // If a thread is still using the slot, it will continue destruction of the block.
+ return;
+ }
+ }
+
+ // No thread is using the block, now it is safe to destroy it.
+ drop(Box::from_raw(this));
+ }
+}
+
+/// A position in a queue.
+struct Position<T> {
+ /// The index in the queue.
+ index: AtomicUsize,
+
+ /// The block in the linked list.
+ block: AtomicPtr<Block<T>>,
+}
+
+/// An unbounded multi-producer multi-consumer queue.
+///
+/// This queue is implemented as a linked list of segments, where each segment is a small buffer
+/// that can hold a handful of elements. There is no limit to how many elements can be in the queue
+/// at a time. However, since segments need to be dynamically allocated as elements get pushed,
+/// this queue is somewhat slower than [`ArrayQueue`].
+///
+/// [`ArrayQueue`]: struct.ArrayQueue.html
+///
+/// # Examples
+///
+/// ```
+/// use crossbeam_queue::{PopError, SegQueue};
+///
+/// let q = SegQueue::new();
+///
+/// q.push('a');
+/// q.push('b');
+///
+/// assert_eq!(q.pop(), Ok('a'));
+/// assert_eq!(q.pop(), Ok('b'));
+/// assert_eq!(q.pop(), Err(PopError));
+/// ```
+pub struct SegQueue<T> {
+ /// The head of the queue.
+ head: CachePadded<Position<T>>,
+
+ /// The tail of the queue.
+ tail: CachePadded<Position<T>>,
+
+ /// Indicates that dropping a `SegQueue<T>` may drop values of type `T`.
+ _marker: PhantomData<T>,
+}
+
+unsafe impl<T: Send> Send for SegQueue<T> {}
+unsafe impl<T: Send> Sync for SegQueue<T> {}
+
+impl<T> SegQueue<T> {
+ /// Creates a new unbounded queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::SegQueue;
+ ///
+ /// let q = SegQueue::<i32>::new();
+ /// ```
+ pub fn new() -> SegQueue<T> {
+ SegQueue {
+ head: CachePadded::new(Position {
+ block: AtomicPtr::new(ptr::null_mut()),
+ index: AtomicUsize::new(0),
+ }),
+ tail: CachePadded::new(Position {
+ block: AtomicPtr::new(ptr::null_mut()),
+ index: AtomicUsize::new(0),
+ }),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Pushes an element into the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::SegQueue;
+ ///
+ /// let q = SegQueue::new();
+ ///
+ /// q.push(10);
+ /// q.push(20);
+ /// ```
+ pub fn push(&self, value: T) {
+ let backoff = Backoff::new();
+ let mut tail = self.tail.index.load(Ordering::Acquire);
+ let mut block = self.tail.block.load(Ordering::Acquire);
+ let mut next_block = None;
+
+ loop {
+ // Calculate the offset of the index into the block.
+ let offset = (tail >> SHIFT) % LAP;
+
+ // If we reached the end of the block, wait until the next one is installed.
+ if offset == BLOCK_CAP {
+ backoff.snooze();
+ tail = self.tail.index.load(Ordering::Acquire);
+ block = self.tail.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ // If we're going to have to install the next block, allocate it in advance in order to
+ // make the wait for other threads as short as possible.
+ if offset + 1 == BLOCK_CAP && next_block.is_none() {
+ next_block = Some(Box::new(Block::<T>::new()));
+ }
+
+ // If this is the first push operation, we need to allocate the first block.
+ if block.is_null() {
+ let new = Box::into_raw(Box::new(Block::<T>::new()));
+
+ if self.tail.block.compare_and_swap(block, new, Ordering::Release) == block {
+ self.head.block.store(new, Ordering::Release);
+ block = new;
+ } else {
+ next_block = unsafe { Some(Box::from_raw(new)) };
+ tail = self.tail.index.load(Ordering::Acquire);
+ block = self.tail.block.load(Ordering::Acquire);
+ continue;
+ }
+ }
+
+ let new_tail = tail + (1 << SHIFT);
+
+ // Try advancing the tail forward.
+ match self.tail.index
+ .compare_exchange_weak(
+ tail,
+ new_tail,
+ Ordering::SeqCst,
+ Ordering::Acquire,
+ )
+ {
+ Ok(_) => unsafe {
+ // If we've reached the end of the block, install the next one.
+ if offset + 1 == BLOCK_CAP {
+ let next_block = Box::into_raw(next_block.unwrap());
+ let next_index = new_tail.wrapping_add(1 << SHIFT);
+
+ self.tail.block.store(next_block, Ordering::Release);
+ self.tail.index.store(next_index, Ordering::Release);
+ (*block).next.store(next_block, Ordering::Release);
+ }
+
+ // Write the value into the slot.
+ let slot = (*block).slots.get_unchecked(offset);
+ slot.value.get().write(ManuallyDrop::new(value));
+ slot.state.fetch_or(WRITE, Ordering::Release);
+
+ return;
+ }
+ Err(t) => {
+ tail = t;
+ block = self.tail.block.load(Ordering::Acquire);
+ backoff.spin();
+ }
+ }
+ }
+ }
+
+ /// Pops an element from the queue.
+ ///
+ /// If the queue is empty, an error is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{PopError, SegQueue};
+ ///
+ /// let q = SegQueue::new();
+ ///
+ /// q.push(10);
+ /// assert_eq!(q.pop(), Ok(10));
+ /// assert_eq!(q.pop(), Err(PopError));
+ /// ```
+ pub fn pop(&self) -> Result<T, PopError> {
+ let backoff = Backoff::new();
+ let mut head = self.head.index.load(Ordering::Acquire);
+ let mut block = self.head.block.load(Ordering::Acquire);
+
+ loop {
+ // Calculate the offset of the index into the block.
+ let offset = (head >> SHIFT) % LAP;
+
+ // If we reached the end of the block, wait until the next one is installed.
+ if offset == BLOCK_CAP {
+ backoff.snooze();
+ head = self.head.index.load(Ordering::Acquire);
+ block = self.head.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ let mut new_head = head + (1 << SHIFT);
+
+ if new_head & HAS_NEXT == 0 {
+ atomic::fence(Ordering::SeqCst);
+ let tail = self.tail.index.load(Ordering::Relaxed);
+
+ // If the tail equals the head, that means the queue is empty.
+ if head >> SHIFT == tail >> SHIFT {
+ return Err(PopError);
+ }
+
+ // If head and tail are not in the same block, set `HAS_NEXT` in head.
+ if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
+ new_head |= HAS_NEXT;
+ }
+ }
+
+ // The block can be null here only if the first push operation is in progress. In that
+ // case, just wait until it gets initialized.
+ if block.is_null() {
+ backoff.snooze();
+ head = self.head.index.load(Ordering::Acquire);
+ block = self.head.block.load(Ordering::Acquire);
+ continue;
+ }
+
+ // Try moving the head index forward.
+ match self.head.index
+ .compare_exchange_weak(
+ head,
+ new_head,
+ Ordering::SeqCst,
+ Ordering::Acquire,
+ )
+ {
+ Ok(_) => unsafe {
+ // If we've reached the end of the block, move to the next one.
+ if offset + 1 == BLOCK_CAP {
+ let next = (*block).wait_next();
+ let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT);
+ if !(*next).next.load(Ordering::Relaxed).is_null() {
+ next_index |= HAS_NEXT;
+ }
+
+ self.head.block.store(next, Ordering::Release);
+ self.head.index.store(next_index, Ordering::Release);
+ }
+
+ // Read the value.
+ let slot = (*block).slots.get_unchecked(offset);
+ slot.wait_write();
+ let m = slot.value.get().read();
+ let value = ManuallyDrop::into_inner(m);
+
+ // Destroy the block if we've reached the end, or if another thread wanted to
+ // destroy but couldn't because we were busy reading from the slot.
+ if offset + 1 == BLOCK_CAP {
+ Block::destroy(block, 0);
+ } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
+ Block::destroy(block, offset + 1);
+ }
+
+ return Ok(value);
+ }
+ Err(h) => {
+ head = h;
+ block = self.head.block.load(Ordering::Acquire);
+ backoff.spin();
+ }
+ }
+ }
+ }
+
+ /// Returns `true` if the queue is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::SegQueue;
+ ///
+ /// let q = SegQueue::new();
+ ///
+ /// assert!(q.is_empty());
+ /// q.push(1);
+ /// assert!(!q.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ let head = self.head.index.load(Ordering::SeqCst);
+ let tail = self.tail.index.load(Ordering::SeqCst);
+ head >> SHIFT == tail >> SHIFT
+ }
+
+ /// Returns the number of elements in the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_queue::{SegQueue, PopError};
+ ///
+ /// let q = SegQueue::new();
+ /// assert_eq!(q.len(), 0);
+ ///
+ /// q.push(10);
+ /// assert_eq!(q.len(), 1);
+ ///
+ /// q.push(20);
+ /// assert_eq!(q.len(), 2);
+ /// ```
+ pub fn len(&self) -> usize {
+ loop {
+ // Load the tail index, then load the head index.
+ let mut tail = self.tail.index.load(Ordering::SeqCst);
+ let mut head = self.head.index.load(Ordering::SeqCst);
+
+ // If the tail index didn't change, we've got consistent indices to work with.
+ if self.tail.index.load(Ordering::SeqCst) == tail {
+ // Erase the lower bits.
+ tail &= !((1 << SHIFT) - 1);
+ head &= !((1 << SHIFT) - 1);
+
+ // Rotate indices so that head falls into the first block.
+ let lap = (head >> SHIFT) / LAP;
+ tail = tail.wrapping_sub((lap * LAP) << SHIFT);
+ head = head.wrapping_sub((lap * LAP) << SHIFT);
+
+ // Remove the lower bits.
+ tail >>= SHIFT;
+ head >>= SHIFT;
+
+ // Fix up indices if they fall onto block ends.
+ if head == BLOCK_CAP {
+ head = 0;
+ tail -= LAP;
+ }
+ if tail == BLOCK_CAP {
+ tail += 1;
+ }
+
+ // Return the difference minus the number of blocks between tail and head.
+ return tail - head - tail / LAP;
+ }
+ }
+ }
+}
+
+impl<T> Drop for SegQueue<T> {
+ fn drop(&mut self) {
+ let mut head = self.head.index.load(Ordering::Relaxed);
+ let mut tail = self.tail.index.load(Ordering::Relaxed);
+ let mut block = self.head.block.load(Ordering::Relaxed);
+
+ // Erase the lower bits.
+ head &= !((1 << SHIFT) - 1);
+ tail &= !((1 << SHIFT) - 1);
+
+ unsafe {
+ // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks.
+ while head != tail {
+ let offset = (head >> SHIFT) % LAP;
+
+ if offset < BLOCK_CAP {
+ // Drop the value in the slot.
+ let slot = (*block).slots.get_unchecked(offset);
+ ManuallyDrop::drop(&mut *(*slot).value.get());
+ } else {
+ // Deallocate the block and move to the next one.
+ let next = (*block).next.load(Ordering::Relaxed);
+ drop(Box::from_raw(block));
+ block = next;
+ }
+
+ head = head.wrapping_add(1 << SHIFT);
+ }
+
+ // Deallocate the last remaining block.
+ if !block.is_null() {
+ drop(Box::from_raw(block));
+ }
+ }
+ }
+}
+
+impl<T> fmt::Debug for SegQueue<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad("SegQueue { .. }")
+ }
+}
+
+impl<T> Default for SegQueue<T> {
+ fn default() -> SegQueue<T> {
+ SegQueue::new()
+ }
+}