summaryrefslogtreecommitdiffstats
path: root/third_party/rust/tokio/src/time/driver
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/tokio/src/time/driver
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/tokio/src/time/driver')
-rw-r--r--third_party/rust/tokio/src/time/driver/atomic_stack.rs124
-rw-r--r--third_party/rust/tokio/src/time/driver/entry.rs345
-rw-r--r--third_party/rust/tokio/src/time/driver/handle.rs38
-rw-r--r--third_party/rust/tokio/src/time/driver/mod.rs391
-rw-r--r--third_party/rust/tokio/src/time/driver/registration.rs53
-rw-r--r--third_party/rust/tokio/src/time/driver/stack.rs121
-rw-r--r--third_party/rust/tokio/src/time/driver/tests/mod.rs55
7 files changed, 1127 insertions, 0 deletions
diff --git a/third_party/rust/tokio/src/time/driver/atomic_stack.rs b/third_party/rust/tokio/src/time/driver/atomic_stack.rs
new file mode 100644
index 0000000000..7e5a83fa52
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/atomic_stack.rs
@@ -0,0 +1,124 @@
+use crate::time::driver::Entry;
+use crate::time::Error;
+
+use std::ptr;
+use std::sync::atomic::AtomicPtr;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::Arc;
+
+/// A stack of `Entry` nodes
+#[derive(Debug)]
+pub(crate) struct AtomicStack {
+ /// Stack head
+ head: AtomicPtr<Entry>,
+}
+
+/// Entries that were removed from the stack
+#[derive(Debug)]
+pub(crate) struct AtomicStackEntries {
+ ptr: *mut Entry,
+}
+
+/// Used to indicate that the timer has shutdown.
+const SHUTDOWN: *mut Entry = 1 as *mut _;
+
+impl AtomicStack {
+ pub(crate) fn new() -> AtomicStack {
+ AtomicStack {
+ head: AtomicPtr::new(ptr::null_mut()),
+ }
+ }
+
+ /// Pushes an entry onto the stack.
+ ///
+ /// Returns `true` if the entry was pushed, `false` if the entry is already
+ /// on the stack, `Err` if the timer is shutdown.
+ pub(crate) fn push(&self, entry: &Arc<Entry>) -> Result<bool, Error> {
+ // First, set the queued bit on the entry
+ let queued = entry.queued.fetch_or(true, SeqCst);
+
+ if queued {
+ // Already queued, nothing more to do
+ return Ok(false);
+ }
+
+ let ptr = Arc::into_raw(entry.clone()) as *mut _;
+
+ let mut curr = self.head.load(SeqCst);
+
+ loop {
+ if curr == SHUTDOWN {
+ // Don't leak the entry node
+ let _ = unsafe { Arc::from_raw(ptr) };
+
+ return Err(Error::shutdown());
+ }
+
+ // Update the `next` pointer. This is safe because setting the queued
+ // bit is a "lock" on this field.
+ unsafe {
+ *(entry.next_atomic.get()) = curr;
+ }
+
+ let actual = self.head.compare_and_swap(curr, ptr, SeqCst);
+
+ if actual == curr {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ Ok(true)
+ }
+
+ /// Takes all entries from the stack
+ pub(crate) fn take(&self) -> AtomicStackEntries {
+ let ptr = self.head.swap(ptr::null_mut(), SeqCst);
+ AtomicStackEntries { ptr }
+ }
+
+ /// Drains all remaining nodes in the stack and prevent any new nodes from
+ /// being pushed onto the stack.
+ pub(crate) fn shutdown(&self) {
+ // Shutdown the processing queue
+ let ptr = self.head.swap(SHUTDOWN, SeqCst);
+
+ // Let the drop fn of `AtomicStackEntries` handle draining the stack
+ drop(AtomicStackEntries { ptr });
+ }
+}
+
+// ===== impl AtomicStackEntries =====
+
+impl Iterator for AtomicStackEntries {
+ type Item = Arc<Entry>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.ptr.is_null() {
+ return None;
+ }
+
+ // Convert the pointer to an `Arc<Entry>`
+ let entry = unsafe { Arc::from_raw(self.ptr) };
+
+ // Update `self.ptr` to point to the next element of the stack
+ self.ptr = unsafe { *entry.next_atomic.get() };
+
+ // Unset the queued flag
+ let res = entry.queued.fetch_and(false, SeqCst);
+ debug_assert!(res);
+
+ // Return the entry
+ Some(entry)
+ }
+}
+
+impl Drop for AtomicStackEntries {
+ fn drop(&mut self) {
+ for entry in self {
+ // Flag the entry as errored
+ entry.error();
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/entry.rs b/third_party/rust/tokio/src/time/driver/entry.rs
new file mode 100644
index 0000000000..20cc824019
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/entry.rs
@@ -0,0 +1,345 @@
+use crate::loom::sync::atomic::AtomicU64;
+use crate::sync::AtomicWaker;
+use crate::time::driver::{Handle, Inner};
+use crate::time::{Duration, Error, Instant};
+
+use std::cell::UnsafeCell;
+use std::ptr;
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Weak};
+use std::task::{self, Poll};
+use std::u64;
+
+/// Internal state shared between a `Delay` instance and the timer.
+///
+/// This struct is used as a node in two intrusive data structures:
+///
+/// * An atomic stack used to signal to the timer thread that the entry state
+/// has changed. The timer thread will observe the entry on this stack and
+/// perform any actions as necessary.
+///
+/// * A doubly linked list used **only** by the timer thread. Each slot in the
+/// timer wheel is a head pointer to the list of entries that must be
+/// processed during that timer tick.
+#[derive(Debug)]
+pub(crate) struct Entry {
+ /// Only accessed from `Registration`.
+ time: CachePadded<UnsafeCell<Time>>,
+
+ /// Timer internals. Using a weak pointer allows the timer to shutdown
+ /// without all `Delay` instances having completed.
+ ///
+ /// When `None`, the entry has not yet been linked with a timer instance.
+ inner: Weak<Inner>,
+
+ /// Tracks the entry state. This value contains the following information:
+ ///
+ /// * The deadline at which the entry must be "fired".
+ /// * A flag indicating if the entry has already been fired.
+ /// * Whether or not the entry transitioned to the error state.
+ ///
+ /// When an `Entry` is created, `state` is initialized to the instant at
+ /// which the entry must be fired. When a timer is reset to a different
+ /// instant, this value is changed.
+ state: AtomicU64,
+
+ /// Task to notify once the deadline is reached.
+ waker: AtomicWaker,
+
+ /// True when the entry is queued in the "process" stack. This value
+ /// is set before pushing the value and unset after popping the value.
+ ///
+ /// TODO: This could possibly be rolled up into `state`.
+ pub(super) queued: AtomicBool,
+
+ /// Next entry in the "process" linked list.
+ ///
+ /// Access to this field is coordinated by the `queued` flag.
+ ///
+ /// Represents a strong Arc ref.
+ pub(super) next_atomic: UnsafeCell<*mut Entry>,
+
+ /// When the entry expires, relative to the `start` of the timer
+ /// (Inner::start). This is only used by the timer.
+ ///
+ /// A `Delay` instance can be reset to a different deadline by the thread
+ /// that owns the `Delay` instance. In this case, the timer thread will not
+ /// immediately know that this has happened. The timer thread must know the
+ /// last deadline that it saw as it uses this value to locate the entry in
+ /// its wheel.
+ ///
+ /// Once the timer thread observes that the instant has changed, it updates
+ /// the wheel and sets this value. The idea is that this value eventually
+ /// converges to the value of `state` as the timer thread makes updates.
+ when: UnsafeCell<Option<u64>>,
+
+ /// Next entry in the State's linked list.
+ ///
+ /// This is only accessed by the timer
+ pub(super) next_stack: UnsafeCell<Option<Arc<Entry>>>,
+
+ /// Previous entry in the State's linked list.
+ ///
+ /// This is only accessed by the timer and is used to unlink a canceled
+ /// entry.
+ ///
+ /// This is a weak reference.
+ pub(super) prev_stack: UnsafeCell<*const Entry>,
+}
+
+/// Stores the info for `Delay`.
+#[derive(Debug)]
+pub(crate) struct Time {
+ pub(crate) deadline: Instant,
+ pub(crate) duration: Duration,
+}
+
+/// Flag indicating a timer entry has elapsed
+const ELAPSED: u64 = 1 << 63;
+
+/// Flag indicating a timer entry has reached an error state
+const ERROR: u64 = u64::MAX;
+
+// ===== impl Entry =====
+
+impl Entry {
+ pub(crate) fn new(handle: &Handle, deadline: Instant, duration: Duration) -> Arc<Entry> {
+ let inner = handle.inner().unwrap();
+ let entry: Entry;
+
+ // Increment the number of active timeouts
+ if inner.increment().is_err() {
+ entry = Entry::new2(deadline, duration, Weak::new(), ERROR)
+ } else {
+ let when = inner.normalize_deadline(deadline);
+ let state = if when <= inner.elapsed() {
+ ELAPSED
+ } else {
+ when
+ };
+ entry = Entry::new2(deadline, duration, Arc::downgrade(&inner), state);
+ }
+
+ let entry = Arc::new(entry);
+ if inner.queue(&entry).is_err() {
+ entry.error();
+ }
+
+ entry
+ }
+
+ /// Only called by `Registration`
+ pub(crate) fn time_ref(&self) -> &Time {
+ unsafe { &*self.time.0.get() }
+ }
+
+ /// Only called by `Registration`
+ #[allow(clippy::mut_from_ref)] // https://github.com/rust-lang/rust-clippy/issues/4281
+ pub(crate) unsafe fn time_mut(&self) -> &mut Time {
+ &mut *self.time.0.get()
+ }
+
+ /// The current entry state as known by the timer. This is not the value of
+ /// `state`, but lets the timer know how to converge its state to `state`.
+ pub(crate) fn when_internal(&self) -> Option<u64> {
+ unsafe { *self.when.get() }
+ }
+
+ pub(crate) fn set_when_internal(&self, when: Option<u64>) {
+ unsafe {
+ *self.when.get() = when;
+ }
+ }
+
+ /// Called by `Timer` to load the current value of `state` for processing
+ pub(crate) fn load_state(&self) -> Option<u64> {
+ let state = self.state.load(SeqCst);
+
+ if is_elapsed(state) {
+ None
+ } else {
+ Some(state)
+ }
+ }
+
+ pub(crate) fn is_elapsed(&self) -> bool {
+ let state = self.state.load(SeqCst);
+ is_elapsed(state)
+ }
+
+ pub(crate) fn fire(&self, when: u64) {
+ let mut curr = self.state.load(SeqCst);
+
+ loop {
+ if is_elapsed(curr) || curr > when {
+ return;
+ }
+
+ let next = ELAPSED | curr;
+ let actual = self.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ self.waker.wake();
+ }
+
+ pub(crate) fn error(&self) {
+ // Only transition to the error state if not currently elapsed
+ let mut curr = self.state.load(SeqCst);
+
+ loop {
+ if is_elapsed(curr) {
+ return;
+ }
+
+ let next = ERROR;
+
+ let actual = self.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ self.waker.wake();
+ }
+
+ pub(crate) fn cancel(entry: &Arc<Entry>) {
+ let state = entry.state.fetch_or(ELAPSED, SeqCst);
+
+ if is_elapsed(state) {
+ // Nothing more to do
+ return;
+ }
+
+ // If registered with a timer instance, try to upgrade the Arc.
+ let inner = match entry.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ let _ = inner.queue(entry);
+ }
+
+ pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {
+ let mut curr = self.state.load(SeqCst);
+
+ if is_elapsed(curr) {
+ return Poll::Ready(if curr == ERROR {
+ Err(Error::shutdown())
+ } else {
+ Ok(())
+ });
+ }
+
+ self.waker.register_by_ref(cx.waker());
+
+ curr = self.state.load(SeqCst);
+
+ if is_elapsed(curr) {
+ return Poll::Ready(if curr == ERROR {
+ Err(Error::shutdown())
+ } else {
+ Ok(())
+ });
+ }
+
+ Poll::Pending
+ }
+
+ /// Only called by `Registration`
+ pub(crate) fn reset(entry: &mut Arc<Entry>) {
+ let inner = match entry.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ let deadline = entry.time_ref().deadline;
+ let when = inner.normalize_deadline(deadline);
+ let elapsed = inner.elapsed();
+
+ let mut curr = entry.state.load(SeqCst);
+ let mut notify;
+
+ loop {
+ // In these two cases, there is no work to do when resetting the
+ // timer. If the `Entry` is in an error state, then it cannot be
+ // used anymore. If resetting the entry to the current value, then
+ // the reset is a noop.
+ if curr == ERROR || curr == when {
+ return;
+ }
+
+ let next;
+
+ if when <= elapsed {
+ next = ELAPSED;
+ notify = !is_elapsed(curr);
+ } else {
+ next = when;
+ notify = true;
+ }
+
+ let actual = entry.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ if notify {
+ let _ = inner.queue(entry);
+ }
+ }
+
+ fn new2(deadline: Instant, duration: Duration, inner: Weak<Inner>, state: u64) -> Self {
+ Self {
+ time: CachePadded(UnsafeCell::new(Time { deadline, duration })),
+ inner,
+ waker: AtomicWaker::new(),
+ state: AtomicU64::new(state),
+ queued: AtomicBool::new(false),
+ next_atomic: UnsafeCell::new(ptr::null_mut()),
+ when: UnsafeCell::new(None),
+ next_stack: UnsafeCell::new(None),
+ prev_stack: UnsafeCell::new(ptr::null_mut()),
+ }
+ }
+
+ fn upgrade_inner(&self) -> Option<Arc<Inner>> {
+ self.inner.upgrade()
+ }
+}
+
+fn is_elapsed(state: u64) -> bool {
+ state & ELAPSED == ELAPSED
+}
+
+impl Drop for Entry {
+ fn drop(&mut self) {
+ let inner = match self.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ inner.decrement();
+ }
+}
+
+unsafe impl Send for Entry {}
+unsafe impl Sync for Entry {}
+
+#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
+#[derive(Debug)]
+struct CachePadded<T>(T);
diff --git a/third_party/rust/tokio/src/time/driver/handle.rs b/third_party/rust/tokio/src/time/driver/handle.rs
new file mode 100644
index 0000000000..38b1761c8e
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/handle.rs
@@ -0,0 +1,38 @@
+use crate::runtime::context;
+use crate::time::driver::Inner;
+use std::fmt;
+use std::sync::{Arc, Weak};
+
+/// Handle to time driver instance.
+#[derive(Clone)]
+pub(crate) struct Handle {
+ inner: Weak<Inner>,
+}
+
+impl Handle {
+ /// Creates a new timer `Handle` from a shared `Inner` timer state.
+ pub(crate) fn new(inner: Weak<Inner>) -> Self {
+ Handle { inner }
+ }
+
+ /// Tries to get a handle to the current timer.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is no current timer set.
+ pub(crate) fn current() -> Self {
+ context::time_handle()
+ .expect("there is no timer running, must be called from the context of Tokio runtime")
+ }
+
+ /// Tries to return a strong ref to the inner
+ pub(crate) fn inner(&self) -> Option<Arc<Inner>> {
+ self.inner.upgrade()
+ }
+}
+
+impl fmt::Debug for Handle {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Handle")
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/mod.rs b/third_party/rust/tokio/src/time/driver/mod.rs
new file mode 100644
index 0000000000..4616816f3f
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/mod.rs
@@ -0,0 +1,391 @@
+//! Time driver
+
+mod atomic_stack;
+use self::atomic_stack::AtomicStack;
+
+mod entry;
+pub(super) use self::entry::Entry;
+
+mod handle;
+pub(crate) use self::handle::Handle;
+
+mod registration;
+pub(crate) use self::registration::Registration;
+
+mod stack;
+use self::stack::Stack;
+
+use crate::loom::sync::atomic::{AtomicU64, AtomicUsize};
+use crate::park::{Park, Unpark};
+use crate::time::{wheel, Error};
+use crate::time::{Clock, Duration, Instant};
+
+use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
+
+use std::sync::Arc;
+use std::usize;
+use std::{cmp, fmt};
+
+/// Time implementation that drives [`Delay`], [`Interval`], and [`Timeout`].
+///
+/// A `Driver` instance tracks the state necessary for managing time and
+/// notifying the [`Delay`] instances once their deadlines are reached.
+///
+/// It is expected that a single instance manages many individual [`Delay`]
+/// instances. The `Driver` implementation is thread-safe and, as such, is able
+/// to handle callers from across threads.
+///
+/// After creating the `Driver` instance, the caller must repeatedly call
+/// [`turn`]. The time driver will perform no work unless [`turn`] is called
+/// repeatedly.
+///
+/// The driver has a resolution of one millisecond. Any unit of time that falls
+/// between milliseconds are rounded up to the next millisecond.
+///
+/// When an instance is dropped, any outstanding [`Delay`] instance that has not
+/// elapsed will be notified with an error. At this point, calling `poll` on the
+/// [`Delay`] instance will result in `Err` being returned.
+///
+/// # Implementation
+///
+/// THe time driver is based on the [paper by Varghese and Lauck][paper].
+///
+/// A hashed timing wheel is a vector of slots, where each slot handles a time
+/// slice. As time progresses, the timer walks over the slot for the current
+/// instant, and processes each entry for that slot. When the timer reaches the
+/// end of the wheel, it starts again at the beginning.
+///
+/// The implementation maintains six wheels arranged in a set of levels. As the
+/// levels go up, the slots of the associated wheel represent larger intervals
+/// of time. At each level, the wheel has 64 slots. Each slot covers a range of
+/// time equal to the wheel at the lower level. At level zero, each slot
+/// represents one millisecond of time.
+///
+/// The wheels are:
+///
+/// * Level 0: 64 x 1 millisecond slots.
+/// * Level 1: 64 x 64 millisecond slots.
+/// * Level 2: 64 x ~4 second slots.
+/// * Level 3: 64 x ~4 minute slots.
+/// * Level 4: 64 x ~4 hour slots.
+/// * Level 5: 64 x ~12 day slots.
+///
+/// When the timer processes entries at level zero, it will notify all the
+/// `Delay` instances as their deadlines have been reached. For all higher
+/// levels, all entries will be redistributed across the wheel at the next level
+/// down. Eventually, as time progresses, entries will [`Delay`] instances will
+/// either be canceled (dropped) or their associated entries will reach level
+/// zero and be notified.
+#[derive(Debug)]
+pub(crate) struct Driver<T> {
+ /// Shared state
+ inner: Arc<Inner>,
+
+ /// Timer wheel
+ wheel: wheel::Wheel<Stack>,
+
+ /// Thread parker. The `Driver` park implementation delegates to this.
+ park: T,
+
+ /// Source of "now" instances
+ clock: Clock,
+}
+
+/// Timer state shared between `Driver`, `Handle`, and `Registration`.
+pub(crate) struct Inner {
+ /// The instant at which the timer started running.
+ start: Instant,
+
+ /// The last published timer `elapsed` value.
+ elapsed: AtomicU64,
+
+ /// Number of active timeouts
+ num: AtomicUsize,
+
+ /// Head of the "process" linked list.
+ process: AtomicStack,
+
+ /// Unparks the timer thread.
+ unpark: Box<dyn Unpark>,
+}
+
+/// Maximum number of timeouts the system can handle concurrently.
+const MAX_TIMEOUTS: usize = usize::MAX >> 1;
+
+// ===== impl Driver =====
+
+impl<T> Driver<T>
+where
+ T: Park,
+{
+ /// Creates a new `Driver` instance that uses `park` to block the current
+ /// thread and `now` to get the current `Instant`.
+ ///
+ /// Specifying the source of time is useful when testing.
+ pub(crate) fn new(park: T, clock: Clock) -> Driver<T> {
+ let unpark = Box::new(park.unpark());
+
+ Driver {
+ inner: Arc::new(Inner::new(clock.now(), unpark)),
+ wheel: wheel::Wheel::new(),
+ park,
+ clock,
+ }
+ }
+
+ /// Returns a handle to the timer.
+ ///
+ /// The `Handle` is how `Delay` instances are created. The `Delay` instances
+ /// can either be created directly or the `Handle` instance can be passed to
+ /// `with_default`, setting the timer as the default timer for the execution
+ /// context.
+ pub(crate) fn handle(&self) -> Handle {
+ Handle::new(Arc::downgrade(&self.inner))
+ }
+
+ /// Converts an `Expiration` to an `Instant`.
+ fn expiration_instant(&self, when: u64) -> Instant {
+ self.inner.start + Duration::from_millis(when)
+ }
+
+ /// Runs timer related logic
+ fn process(&mut self) {
+ let now = crate::time::ms(
+ self.clock.now() - self.inner.start,
+ crate::time::Round::Down,
+ );
+ let mut poll = wheel::Poll::new(now);
+
+ while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) {
+ let when = entry.when_internal().expect("invalid internal entry state");
+
+ // Fire the entry
+ entry.fire(when);
+
+ // Track that the entry has been fired
+ entry.set_when_internal(None);
+ }
+
+ // Update the elapsed cache
+ self.inner.elapsed.store(self.wheel.elapsed(), SeqCst);
+ }
+
+ /// Processes the entry queue
+ ///
+ /// This handles adding and canceling timeouts.
+ fn process_queue(&mut self) {
+ for entry in self.inner.process.take() {
+ match (entry.when_internal(), entry.load_state()) {
+ (None, None) => {
+ // Nothing to do
+ }
+ (Some(_), None) => {
+ // Remove the entry
+ self.clear_entry(&entry);
+ }
+ (None, Some(when)) => {
+ // Queue the entry
+ self.add_entry(entry, when);
+ }
+ (Some(_), Some(next)) => {
+ self.clear_entry(&entry);
+ self.add_entry(entry, next);
+ }
+ }
+ }
+ }
+
+ fn clear_entry(&mut self, entry: &Arc<Entry>) {
+ self.wheel.remove(entry, &mut ());
+ entry.set_when_internal(None);
+ }
+
+ /// Fires the entry if it needs to, otherwise queue it to be processed later.
+ ///
+ /// Returns `None` if the entry was fired.
+ fn add_entry(&mut self, entry: Arc<Entry>, when: u64) {
+ use crate::time::wheel::InsertError;
+
+ entry.set_when_internal(Some(when));
+
+ match self.wheel.insert(when, entry, &mut ()) {
+ Ok(_) => {}
+ Err((entry, InsertError::Elapsed)) => {
+ // The entry's deadline has elapsed, so fire it and update the
+ // internal state accordingly.
+ entry.set_when_internal(None);
+ entry.fire(when);
+ }
+ Err((entry, InsertError::Invalid)) => {
+ // The entry's deadline is invalid, so error it and update the
+ // internal state accordingly.
+ entry.set_when_internal(None);
+ entry.error();
+ }
+ }
+ }
+}
+
+impl<T> Park for Driver<T>
+where
+ T: Park,
+{
+ type Unpark = T::Unpark;
+ type Error = T::Error;
+
+ fn unpark(&self) -> Self::Unpark {
+ self.park.unpark()
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ self.process_queue();
+
+ match self.wheel.poll_at() {
+ Some(when) => {
+ let now = self.clock.now();
+ let deadline = self.expiration_instant(when);
+
+ if deadline > now {
+ let dur = deadline - now;
+
+ if self.clock.is_paused() {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ self.clock.advance(dur);
+ } else {
+ self.park.park_timeout(dur)?;
+ }
+ } else {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ }
+ }
+ None => {
+ self.park.park()?;
+ }
+ }
+
+ self.process();
+
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ self.process_queue();
+
+ match self.wheel.poll_at() {
+ Some(when) => {
+ let now = self.clock.now();
+ let deadline = self.expiration_instant(when);
+
+ if deadline > now {
+ let duration = cmp::min(deadline - now, duration);
+
+ if self.clock.is_paused() {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ self.clock.advance(duration);
+ } else {
+ self.park.park_timeout(duration)?;
+ }
+ } else {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ }
+ }
+ None => {
+ self.park.park_timeout(duration)?;
+ }
+ }
+
+ self.process();
+
+ Ok(())
+ }
+}
+
+impl<T> Drop for Driver<T> {
+ fn drop(&mut self) {
+ use std::u64;
+
+ // Shutdown the stack of entries to process, preventing any new entries
+ // from being pushed.
+ self.inner.process.shutdown();
+
+ // Clear the wheel, using u64::MAX allows us to drain everything
+ let mut poll = wheel::Poll::new(u64::MAX);
+
+ while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) {
+ entry.error();
+ }
+ }
+}
+
+// ===== impl Inner =====
+
+impl Inner {
+ fn new(start: Instant, unpark: Box<dyn Unpark>) -> Inner {
+ Inner {
+ num: AtomicUsize::new(0),
+ elapsed: AtomicU64::new(0),
+ process: AtomicStack::new(),
+ start,
+ unpark,
+ }
+ }
+
+ fn elapsed(&self) -> u64 {
+ self.elapsed.load(SeqCst)
+ }
+
+ #[cfg(all(test, loom))]
+ fn num(&self, ordering: std::sync::atomic::Ordering) -> usize {
+ self.num.load(ordering)
+ }
+
+ /// Increments the number of active timeouts
+ fn increment(&self) -> Result<(), Error> {
+ let mut curr = self.num.load(Relaxed);
+ loop {
+ if curr == MAX_TIMEOUTS {
+ return Err(Error::at_capacity());
+ }
+
+ match self
+ .num
+ .compare_exchange_weak(curr, curr + 1, Release, Relaxed)
+ {
+ Ok(_) => return Ok(()),
+ Err(next) => curr = next,
+ }
+ }
+ }
+
+ /// Decrements the number of active timeouts
+ fn decrement(&self) {
+ let prev = self.num.fetch_sub(1, Acquire);
+ debug_assert!(prev <= MAX_TIMEOUTS);
+ }
+
+ fn queue(&self, entry: &Arc<Entry>) -> Result<(), Error> {
+ if self.process.push(entry)? {
+ // The timer is notified so that it can process the timeout
+ self.unpark.unpark();
+ }
+
+ Ok(())
+ }
+
+ fn normalize_deadline(&self, deadline: Instant) -> u64 {
+ if deadline < self.start {
+ return 0;
+ }
+
+ crate::time::ms(deadline - self.start, crate::time::Round::Up)
+ }
+}
+
+impl fmt::Debug for Inner {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Inner").finish()
+ }
+}
+
+#[cfg(all(test, loom))]
+mod tests;
diff --git a/third_party/rust/tokio/src/time/driver/registration.rs b/third_party/rust/tokio/src/time/driver/registration.rs
new file mode 100644
index 0000000000..b77357e735
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/registration.rs
@@ -0,0 +1,53 @@
+use crate::time::driver::{Entry, Handle};
+use crate::time::{Duration, Error, Instant};
+
+use std::sync::Arc;
+use std::task::{self, Poll};
+
+/// Registration with a timer.
+///
+/// The association between a `Delay` instance and a timer is done lazily in
+/// `poll`
+#[derive(Debug)]
+pub(crate) struct Registration {
+ entry: Arc<Entry>,
+}
+
+impl Registration {
+ pub(crate) fn new(deadline: Instant, duration: Duration) -> Registration {
+ let handle = Handle::current();
+
+ Registration {
+ entry: Entry::new(&handle, deadline, duration),
+ }
+ }
+
+ pub(crate) fn deadline(&self) -> Instant {
+ self.entry.time_ref().deadline
+ }
+
+ pub(crate) fn reset(&mut self, deadline: Instant) {
+ unsafe {
+ self.entry.time_mut().deadline = deadline;
+ }
+
+ Entry::reset(&mut self.entry);
+ }
+
+ pub(crate) fn is_elapsed(&self) -> bool {
+ self.entry.is_elapsed()
+ }
+
+ pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ self.entry.poll_elapsed(cx)
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ Entry::cancel(&self.entry);
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/stack.rs b/third_party/rust/tokio/src/time/driver/stack.rs
new file mode 100644
index 0000000000..3e2924f265
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/stack.rs
@@ -0,0 +1,121 @@
+use crate::time::driver::Entry;
+use crate::time::wheel;
+
+use std::ptr;
+use std::sync::Arc;
+
+/// A doubly linked stack
+#[derive(Debug)]
+pub(crate) struct Stack {
+ head: Option<Arc<Entry>>,
+}
+
+impl Default for Stack {
+ fn default() -> Stack {
+ Stack { head: None }
+ }
+}
+
+impl wheel::Stack for Stack {
+ type Owned = Arc<Entry>;
+ type Borrowed = Entry;
+ type Store = ();
+
+ fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ fn push(&mut self, entry: Self::Owned, _: &mut Self::Store) {
+ // Get a pointer to the entry to for the prev link
+ let ptr: *const Entry = &*entry as *const _;
+
+ // Remove the old head entry
+ let old = self.head.take();
+
+ unsafe {
+ // Ensure the entry is not already in a stack.
+ debug_assert!((*entry.next_stack.get()).is_none());
+ debug_assert!((*entry.prev_stack.get()).is_null());
+
+ if let Some(ref entry) = old.as_ref() {
+ debug_assert!({
+ // The head is not already set to the entry
+ ptr != &***entry as *const _
+ });
+
+ // Set the previous link on the old head
+ *entry.prev_stack.get() = ptr;
+ }
+
+ // Set this entry's next pointer
+ *entry.next_stack.get() = old;
+ }
+
+ // Update the head pointer
+ self.head = Some(entry);
+ }
+
+ /// Pops an item from the stack
+ fn pop(&mut self, _: &mut ()) -> Option<Arc<Entry>> {
+ let entry = self.head.take();
+
+ unsafe {
+ if let Some(entry) = entry.as_ref() {
+ self.head = (*entry.next_stack.get()).take();
+
+ if let Some(entry) = self.head.as_ref() {
+ *entry.prev_stack.get() = ptr::null();
+ }
+
+ *entry.prev_stack.get() = ptr::null();
+ }
+ }
+
+ entry
+ }
+
+ fn remove(&mut self, entry: &Entry, _: &mut ()) {
+ unsafe {
+ // Ensure that the entry is in fact contained by the stack
+ debug_assert!({
+ // This walks the full linked list even if an entry is found.
+ let mut next = self.head.as_ref();
+ let mut contains = false;
+
+ while let Some(n) = next {
+ if entry as *const _ == &**n as *const _ {
+ debug_assert!(!contains);
+ contains = true;
+ }
+
+ next = (*n.next_stack.get()).as_ref();
+ }
+
+ contains
+ });
+
+ // Unlink `entry` from the next node
+ let next = (*entry.next_stack.get()).take();
+
+ if let Some(next) = next.as_ref() {
+ (*next.prev_stack.get()) = *entry.prev_stack.get();
+ }
+
+ // Unlink `entry` from the prev node
+
+ if let Some(prev) = (*entry.prev_stack.get()).as_ref() {
+ *prev.next_stack.get() = next;
+ } else {
+ // It is the head
+ self.head = next;
+ }
+
+ // Unset the prev pointer
+ *entry.prev_stack.get() = ptr::null();
+ }
+ }
+
+ fn when(item: &Entry, _: &()) -> u64 {
+ item.when_internal().expect("invalid internal state")
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/tests/mod.rs b/third_party/rust/tokio/src/time/driver/tests/mod.rs
new file mode 100644
index 0000000000..88ff5525da
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/tests/mod.rs
@@ -0,0 +1,55 @@
+use crate::park::Unpark;
+use crate::time::driver::Inner;
+use crate::time::Instant;
+
+use loom::thread;
+
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+struct MockUnpark;
+
+impl Unpark for MockUnpark {
+ fn unpark(&self) {}
+}
+
+#[test]
+fn balanced_incr_and_decr() {
+ const OPS: usize = 5;
+
+ fn incr(inner: Arc<Inner>) {
+ for _ in 0..OPS {
+ inner.increment().expect("increment should not have failed");
+ thread::yield_now();
+ }
+ }
+
+ fn decr(inner: Arc<Inner>) {
+ let mut ops_performed = 0;
+ while ops_performed < OPS {
+ if inner.num(Ordering::Relaxed) > 0 {
+ ops_performed += 1;
+ inner.decrement();
+ }
+ thread::yield_now();
+ }
+ }
+
+ loom::model(|| {
+ let unpark = Box::new(MockUnpark);
+ let instant = Instant::now();
+
+ let inner = Arc::new(Inner::new(instant, unpark));
+
+ let incr_inner = inner.clone();
+ let decr_inner = inner.clone();
+
+ let incr_hndle = thread::spawn(move || incr(incr_inner));
+ let decr_hndle = thread::spawn(move || decr(decr_inner));
+
+ incr_hndle.join().expect("should never fail");
+ decr_hndle.join().expect("should never fail");
+
+ assert_eq!(inner.num(Ordering::SeqCst), 0);
+ })
+}