diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/tokio/src | |
parent | Initial commit. (diff) | |
download | firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/tokio/src')
274 files changed, 47932 insertions, 0 deletions
diff --git a/third_party/rust/tokio/src/coop.rs b/third_party/rust/tokio/src/coop.rs new file mode 100644 index 0000000000..1d62459166 --- /dev/null +++ b/third_party/rust/tokio/src/coop.rs @@ -0,0 +1,379 @@ +//! Opt-in yield points for improved cooperative scheduling. +//! +//! A single call to [`poll`] on a top-level task may potentially do a lot of work before it +//! returns `Poll::Pending`. If a task runs for a long period of time without yielding back to the +//! executor, it can starve other tasks waiting on that executor to execute them, or drive +//! underlying resources. Since Rust does not have a runtime, it is difficult to forcibly preempt a +//! long-running task. Instead, this module provides an opt-in mechanism for futures to collaborate +//! with the executor to avoid starvation. +//! +//! Consider a future like this one: +//! +//! ``` +//! # use tokio::stream::{Stream, StreamExt}; +//! async fn drop_all<I: Stream + Unpin>(mut input: I) { +//! while let Some(_) = input.next().await {} +//! } +//! ``` +//! +//! It may look harmless, but consider what happens under heavy load if the input stream is +//! _always_ ready. If we spawn `drop_all`, the task will never yield, and will starve other tasks +//! and resources on the same executor. With opt-in yield points, this problem is alleviated: +//! +//! ```ignore +//! # use tokio::stream::{Stream, StreamExt}; +//! async fn drop_all<I: Stream + Unpin>(mut input: I) { +//! while let Some(_) = input.next().await { +//! tokio::coop::proceed().await; +//! } +//! } +//! ``` +//! +//! The `proceed` future will coordinate with the executor to make sure that every so often control +//! is yielded back to the executor so it can run other tasks. +//! +//! # Placing yield points +//! +//! Voluntary yield points should be placed _after_ at least some work has been done. If they are +//! not, a future sufficiently deep in the task hierarchy may end up _never_ getting to run because +//! of the number of yield points that inevitably appear before it is reached. In general, you will +//! want yield points to only appear in "leaf" futures -- those that do not themselves poll other +//! futures. By doing this, you avoid double-counting each iteration of the outer future against +//! the cooperating budget. +//! +//! [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#tymethod.poll + +// NOTE: The doctests in this module are ignored since the whole module is (currently) private. + +use std::cell::Cell; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Constant used to determine how much "work" a task is allowed to do without yielding. +/// +/// The value itself is chosen somewhat arbitrarily. It needs to be high enough to amortize wakeup +/// and scheduling costs, but low enough that we do not starve other tasks for too long. The value +/// also needs to be high enough that particularly deep tasks are able to do at least some useful +/// work at all. +/// +/// Note that as more yield points are added in the ecosystem, this value will probably also have +/// to be raised. +const BUDGET: usize = 128; + +/// Constant used to determine if budgeting has been disabled. +const UNCONSTRAINED: usize = usize::max_value(); + +thread_local! { + static HITS: Cell<usize> = Cell::new(UNCONSTRAINED); +} + +/// Run the given closure with a cooperative task budget. +/// +/// Enabling budgeting when it is already enabled is a no-op. +#[inline(always)] +pub(crate) fn budget<F, R>(f: F) -> R +where + F: FnOnce() -> R, +{ + HITS.with(move |hits| { + if hits.get() != UNCONSTRAINED { + // We are already being budgeted. + // + // Arguably this should be an error, but it can happen "correctly" + // such as with block_on + LocalSet, so we make it a no-op. + return f(); + } + + struct Guard<'a>(&'a Cell<usize>); + impl<'a> Drop for Guard<'a> { + fn drop(&mut self) { + self.0.set(UNCONSTRAINED); + } + } + + hits.set(BUDGET); + let _guard = Guard(hits); + f() + }) +} + +cfg_rt_threaded! { + #[inline(always)] + pub(crate) fn has_budget_remaining() -> bool { + HITS.with(|hits| hits.get() > 0) + } +} + +cfg_blocking_impl! { + /// Forcibly remove the budgeting constraints early. + pub(crate) fn stop() { + HITS.with(|hits| { + hits.set(UNCONSTRAINED); + }); + } +} + +/// Invoke `f` with a subset of the remaining budget. +/// +/// This is useful if you have sub-futures that you need to poll, but that you want to restrict +/// from using up your entire budget. For example, imagine the following future: +/// +/// ```rust +/// # use std::{future::Future, pin::Pin, task::{Context, Poll}}; +/// use futures::stream::FuturesUnordered; +/// struct MyFuture<F1, F2> { +/// big: FuturesUnordered<F1>, +/// small: F2, +/// } +/// +/// use tokio::stream::Stream; +/// impl<F1, F2> Future for MyFuture<F1, F2> +/// where F1: Future, F2: Future +/// # , F1: Unpin, F2: Unpin +/// { +/// type Output = F2::Output; +/// +/// // fn poll(...) +/// # fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F2::Output> { +/// # let this = &mut *self; +/// let mut big = // something to pin self.big +/// # Pin::new(&mut this.big); +/// let small = // something to pin self.small +/// # Pin::new(&mut this.small); +/// +/// // see if any of the big futures have finished +/// while let Some(e) = futures::ready!(big.as_mut().poll_next(cx)) { +/// // do something with e +/// # let _ = e; +/// } +/// +/// // see if the small future has finished +/// small.poll(cx) +/// } +/// # } +/// ``` +/// +/// It could be that every time `poll` gets called, `big` ends up spending the entire budget, and +/// `small` never gets polled. That would be sad. If you want to stick up for the little future, +/// that's what `limit` is for. It lets you portion out a smaller part of the yield budget to a +/// particular segment of your code. In the code above, you would write +/// +/// ```rust,ignore +/// # use std::{future::Future, pin::Pin, task::{Context, Poll}}; +/// # use futures::stream::FuturesUnordered; +/// # struct MyFuture<F1, F2> { +/// # big: FuturesUnordered<F1>, +/// # small: F2, +/// # } +/// # +/// # use tokio::stream::Stream; +/// # impl<F1, F2> Future for MyFuture<F1, F2> +/// # where F1: Future, F2: Future +/// # , F1: Unpin, F2: Unpin +/// # { +/// # type Output = F2::Output; +/// # fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F2::Output> { +/// # let this = &mut *self; +/// # let mut big = Pin::new(&mut this.big); +/// # let small = Pin::new(&mut this.small); +/// # +/// // see if any of the big futures have finished +/// while let Some(e) = futures::ready!(tokio::coop::limit(64, || big.as_mut().poll_next(cx))) { +/// # // do something with e +/// # let _ = e; +/// # } +/// # small.poll(cx) +/// # } +/// # } +/// ``` +/// +/// Now, even if `big` spends its entire budget, `small` will likely be left with some budget left +/// to also do useful work. In particular, if the remaining budget was `N` at the start of `poll`, +/// `small` will have at least a budget of `N - 64`. It may be more if `big` did not spend its +/// entire budget. +/// +/// Note that you cannot _increase_ your budget by calling `limit`. The budget provided to the code +/// inside the buget is the _minimum_ of the _current_ budget and the bound. +/// +#[allow(unreachable_pub, dead_code)] +pub fn limit<R>(bound: usize, f: impl FnOnce() -> R) -> R { + HITS.with(|hits| { + let budget = hits.get(); + // with_bound cannot _increase_ the remaining budget + let bound = std::cmp::min(budget, bound); + // When f() exits, how much should we add to what is left? + let floor = budget.saturating_sub(bound); + // Make sure we restore the remaining budget even on panic + struct RestoreBudget<'a>(&'a Cell<usize>, usize); + impl<'a> Drop for RestoreBudget<'a> { + fn drop(&mut self) { + let left = self.0.get(); + self.0.set(self.1 + left); + } + } + // Time to restrict! + hits.set(bound); + let _restore = RestoreBudget(&hits, floor); + f() + }) +} + +/// Returns `Poll::Pending` if the current task has exceeded its budget and should yield. +#[allow(unreachable_pub, dead_code)] +#[inline] +pub fn poll_proceed(cx: &mut Context<'_>) -> Poll<()> { + HITS.with(|hits| { + let n = hits.get(); + if n == UNCONSTRAINED { + // opted out of budgeting + Poll::Ready(()) + } else if n == 0 { + cx.waker().wake_by_ref(); + Poll::Pending + } else { + hits.set(n.saturating_sub(1)); + Poll::Ready(()) + } + }) +} + +/// Resolves immediately unless the current task has already exceeded its budget. +/// +/// This should be placed after at least some work has been done. Otherwise a future sufficiently +/// deep in the task hierarchy may end up never getting to run because of the number of yield +/// points that inevitably appear before it is even reached. For example: +/// +/// ```ignore +/// # use tokio::stream::{Stream, StreamExt}; +/// async fn drop_all<I: Stream + Unpin>(mut input: I) { +/// while let Some(_) = input.next().await { +/// tokio::coop::proceed().await; +/// } +/// } +/// ``` +#[allow(unreachable_pub, dead_code)] +#[inline] +pub async fn proceed() { + use crate::future::poll_fn; + poll_fn(|cx| poll_proceed(cx)).await; +} + +pin_project_lite::pin_project! { + /// A future that cooperatively yields to the task scheduler when polling, + /// if the task's budget is exhausted. + /// + /// Internally, this is simply a future combinator which calls + /// [`poll_proceed`] in its `poll` implementation before polling the wrapped + /// future. + /// + /// # Examples + /// + /// ```rust,ignore + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::coop::CoopFutureExt; + /// + /// async { /* ... */ } + /// .cooperate() + /// .await; + /// # } + /// ``` + /// + /// [`poll_proceed`]: fn@poll_proceed + #[derive(Debug)] + #[allow(unreachable_pub, dead_code)] + pub struct CoopFuture<F> { + #[pin] + future: F, + } +} + +impl<F: Future> Future for CoopFuture<F> { + type Output = F::Output; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + ready!(poll_proceed(cx)); + self.project().future.poll(cx) + } +} + +impl<F: Future> CoopFuture<F> { + /// Returns a new `CoopFuture` wrapping the given future. + /// + #[allow(unreachable_pub, dead_code)] + pub fn new(future: F) -> Self { + Self { future } + } +} + +// Currently only used by `tokio::sync`; and if we make this combinator public, +// it should probably be on the `FutureExt` trait instead. +cfg_sync! { + /// Extension trait providing `Future::cooperate` extension method. + /// + /// Note: if/when the co-op API becomes public, this method should probably be + /// provided by `FutureExt`, instead. + pub(crate) trait CoopFutureExt: Future { + /// Wrap `self` to cooperatively yield to the scheduler when polling, if the + /// task's budget is exhausted. + fn cooperate(self) -> CoopFuture<Self> + where + Self: Sized, + { + CoopFuture::new(self) + } + } + + impl<F> CoopFutureExt for F where F: Future {} +} + +#[cfg(all(test, not(loom)))] +mod test { + use super::*; + + fn get() -> usize { + HITS.with(|hits| hits.get()) + } + + #[test] + fn bugeting() { + use tokio_test::*; + + assert_eq!(get(), UNCONSTRAINED); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), UNCONSTRAINED); + budget(|| { + assert_eq!(get(), BUDGET); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), BUDGET - 1); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), BUDGET - 2); + }); + assert_eq!(get(), UNCONSTRAINED); + + budget(|| { + limit(3, || { + assert_eq!(get(), 3); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), 2); + limit(4, || { + assert_eq!(get(), 2); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), 1); + }); + assert_eq!(get(), 1); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), 0); + assert_pending!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), 0); + assert_pending!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), 0); + }); + assert_eq!(get(), BUDGET - 3); + assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx))); + assert_eq!(get(), BUDGET - 4); + assert_ready!(task::spawn(proceed()).poll()); + assert_eq!(get(), BUDGET - 5); + }); + } +} diff --git a/third_party/rust/tokio/src/fs/canonicalize.rs b/third_party/rust/tokio/src/fs/canonicalize.rs new file mode 100644 index 0000000000..403662685c --- /dev/null +++ b/third_party/rust/tokio/src/fs/canonicalize.rs @@ -0,0 +1,51 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::{Path, PathBuf}; + +/// Returns the canonical, absolute form of a path with all intermediate +/// components normalized and symbolic links resolved. +/// +/// This is an async version of [`std::fs::canonicalize`][std] +/// +/// [std]: std::fs::canonicalize +/// +/// # Platform-specific behavior +/// +/// This function currently corresponds to the `realpath` function on Unix +/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows. +/// Note that, this [may change in the future][changes]. +/// +/// On Windows, this converts the path to use [extended length path][path] +/// syntax, which allows your program to use longer path names, but means you +/// can only join backslash-delimited paths to it, and it may be incompatible +/// with other applications (if passed to the application on the command-line, +/// or written to a file another application may read). +/// +/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior +/// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * `path` does not exist. +/// * A non-final component in path is not a directory. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// use std::io; +/// +/// #[tokio::main] +/// async fn main() -> io::Result<()> { +/// let path = fs::canonicalize("../a/../foo.txt").await?; +/// Ok(()) +/// } +/// ``` +pub async fn canonicalize(path: impl AsRef<Path>) -> io::Result<PathBuf> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::canonicalize(path)).await +} diff --git a/third_party/rust/tokio/src/fs/copy.rs b/third_party/rust/tokio/src/fs/copy.rs new file mode 100644 index 0000000000..d4d4d29c85 --- /dev/null +++ b/third_party/rust/tokio/src/fs/copy.rs @@ -0,0 +1,24 @@ +use crate::fs::asyncify; +use std::path::Path; + +/// Copies the contents of one file to another. This function will also copy the permission bits of the original file to the destination file. +/// This function will overwrite the contents of to. +/// +/// This is the async equivalent of `std::fs::copy`. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// # async fn dox() -> std::io::Result<()> { +/// fs::copy("foo.txt", "bar.txt").await?; +/// # Ok(()) +/// # } +/// ``` + +pub async fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64, std::io::Error> { + let from = from.as_ref().to_owned(); + let to = to.as_ref().to_owned(); + asyncify(|| std::fs::copy(from, to)).await +} diff --git a/third_party/rust/tokio/src/fs/create_dir.rs b/third_party/rust/tokio/src/fs/create_dir.rs new file mode 100644 index 0000000000..e03b04dc4b --- /dev/null +++ b/third_party/rust/tokio/src/fs/create_dir.rs @@ -0,0 +1,52 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Creates a new, empty directory at the provided path +/// +/// This is an async version of [`std::fs::create_dir`][std] +/// +/// [std]: std::fs::create_dir +/// +/// # Platform-specific behavior +/// +/// This function currently corresponds to the `mkdir` function on Unix +/// and the `CreateDirectory` function on Windows. +/// Note that, this [may change in the future][changes]. +/// +/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior +/// +/// **NOTE**: If a parent of the given path doesn't exist, this function will +/// return an error. To create a directory and all its missing parents at the +/// same time, use the [`create_dir_all`] function. +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * User lacks permissions to create directory at `path`. +/// * A parent of the given path doesn't exist. (To create a directory and all +/// its missing parents at the same time, use the [`create_dir_all`] +/// function.) +/// * `path` already exists. +/// +/// [`create_dir_all`]: super::create_dir_all() +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// use std::io; +/// +/// #[tokio::main] +/// async fn main() -> io::Result<()> { +/// fs::create_dir("/some/dir").await?; +/// Ok(()) +/// } +/// ``` +pub async fn create_dir(path: impl AsRef<Path>) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::create_dir(path)).await +} diff --git a/third_party/rust/tokio/src/fs/create_dir_all.rs b/third_party/rust/tokio/src/fs/create_dir_all.rs new file mode 100644 index 0000000000..21f0c82d11 --- /dev/null +++ b/third_party/rust/tokio/src/fs/create_dir_all.rs @@ -0,0 +1,53 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Recursively creates a directory and all of its parent components if they +/// are missing. +/// +/// This is an async version of [`std::fs::create_dir_all`][std] +/// +/// [std]: std::fs::create_dir_all +/// +/// # Platform-specific behavior +/// +/// This function currently corresponds to the `mkdir` function on Unix +/// and the `CreateDirectory` function on Windows. +/// Note that, this [may change in the future][changes]. +/// +/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * If any directory in the path specified by `path` does not already exist +/// and it could not be created otherwise. The specific error conditions for +/// when a directory is being created (after it is determined to not exist) are +/// outlined by [`fs::create_dir`]. +/// +/// Notable exception is made for situations where any of the directories +/// specified in the `path` could not be created as it was being created concurrently. +/// Such cases are considered to be successful. That is, calling `create_dir_all` +/// concurrently from multiple threads or processes is guaranteed not to fail +/// due to a race condition with itself. +/// +/// [`fs::create_dir`]: std::fs::create_dir +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// #[tokio::main] +/// async fn main() -> std::io::Result<()> { +/// fs::create_dir_all("/some/dir").await?; +/// Ok(()) +/// } +/// ``` +pub async fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::create_dir_all(path)).await +} diff --git a/third_party/rust/tokio/src/fs/file.rs b/third_party/rust/tokio/src/fs/file.rs new file mode 100644 index 0000000000..a1f22fc9b6 --- /dev/null +++ b/third_party/rust/tokio/src/fs/file.rs @@ -0,0 +1,739 @@ +//! Types for working with [`File`]. +//! +//! [`File`]: File + +use self::State::*; +use crate::fs::{asyncify, sys}; +use crate::io::blocking::Buf; +use crate::io::{AsyncRead, AsyncSeek, AsyncWrite}; + +use std::fmt; +use std::fs::{Metadata, Permissions}; +use std::future::Future; +use std::io::{self, Seek, SeekFrom}; +use std::path::Path; +use std::pin::Pin; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; +use std::task::Poll::*; + +/// A reference to an open file on the filesystem. +/// +/// This is a specialized version of [`std::fs::File`][std] for usage from the +/// Tokio runtime. +/// +/// An instance of a `File` can be read and/or written depending on what options +/// it was opened with. Files also implement Seek to alter the logical cursor +/// that the file contains internally. +/// +/// Files are automatically closed when they go out of scope. +/// +/// [std]: std::fs::File +/// +/// # Examples +/// +/// Create a new file and asynchronously write bytes to it: +/// +/// ```no_run +/// use tokio::fs::File; +/// use tokio::prelude::*; +/// +/// # async fn dox() -> std::io::Result<()> { +/// let mut file = File::create("foo.txt").await?; +/// file.write_all(b"hello, world!").await?; +/// # Ok(()) +/// # } +/// ``` +/// +/// Read the contents of a file into a buffer +/// +/// ```no_run +/// use tokio::fs::File; +/// use tokio::prelude::*; +/// +/// # async fn dox() -> std::io::Result<()> { +/// let mut file = File::open("foo.txt").await?; +/// +/// let mut contents = vec![]; +/// file.read_to_end(&mut contents).await?; +/// +/// println!("len = {}", contents.len()); +/// # Ok(()) +/// # } +/// ``` +pub struct File { + std: Arc<sys::File>, + state: State, + + /// Errors from writes/flushes are returned in write/flush calls. If a write + /// error is observed while performing a read, it is saved until the next + /// write / flush call. + last_write_err: Option<io::ErrorKind>, +} + +#[derive(Debug)] +enum State { + Idle(Option<Buf>), + Busy(sys::Blocking<(Operation, Buf)>), +} + +#[derive(Debug)] +enum Operation { + Read(io::Result<usize>), + Write(io::Result<()>), + Seek(io::Result<u64>), +} + +impl File { + /// Attempts to open a file in read-only mode. + /// + /// See [`OpenOptions`] for more details. + /// + /// [`OpenOptions`]: super::OpenOptions + /// + /// # Errors + /// + /// This function will return an error if called from outside of the Tokio + /// runtime or if path does not already exist. Other errors may also be + /// returned according to OpenOptions::open. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::open("foo.txt").await?; + /// + /// let mut contents = vec![]; + /// file.read_to_end(&mut contents).await?; + /// + /// println!("len = {}", contents.len()); + /// # Ok(()) + /// # } + /// ``` + pub async fn open(path: impl AsRef<Path>) -> io::Result<File> { + let path = path.as_ref().to_owned(); + let std = asyncify(|| sys::File::open(path)).await?; + + Ok(File::from_std(std)) + } + + /// Opens a file in write-only mode. + /// + /// This function will create a file if it does not exist, and will truncate + /// it if it does. + /// + /// See [`OpenOptions`] for more details. + /// + /// [`OpenOptions`]: super::OpenOptions + /// + /// # Errors + /// + /// Results in an error if called from outside of the Tokio runtime or if + /// the underlying [`create`] call results in an error. + /// + /// [`create`]: std::fs::File::create + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// file.write_all(b"hello, world!").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create(path: impl AsRef<Path>) -> io::Result<File> { + let path = path.as_ref().to_owned(); + let std_file = asyncify(move || sys::File::create(path)).await?; + Ok(File::from_std(std_file)) + } + + /// Converts a [`std::fs::File`][std] to a [`tokio::fs::File`][file]. + /// + /// [std]: std::fs::File + /// [file]: File + /// + /// # Examples + /// + /// ```no_run + /// // This line could block. It is not recommended to do this on the Tokio + /// // runtime. + /// let std_file = std::fs::File::open("foo.txt").unwrap(); + /// let file = tokio::fs::File::from_std(std_file); + /// ``` + pub fn from_std(std: sys::File) -> File { + File { + std: Arc::new(std), + state: State::Idle(Some(Buf::with_capacity(0))), + last_write_err: None, + } + } + + /// Seeks to an offset, in bytes, in a stream. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// use std::io::SeekFrom; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::open("foo.txt").await?; + /// file.seek(SeekFrom::Start(6)).await?; + /// + /// let mut contents = vec![0u8; 10]; + /// file.read_exact(&mut contents).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn seek(&mut self, mut pos: SeekFrom) -> io::Result<u64> { + self.complete_inflight().await; + + let mut buf = match self.state { + Idle(ref mut buf_cell) => buf_cell.take().unwrap(), + _ => unreachable!(), + }; + + // Factor in any unread data from the buf + if !buf.is_empty() { + let n = buf.discard_read(); + + if let SeekFrom::Current(ref mut offset) = pos { + *offset += n; + } + } + + let std = self.std.clone(); + + // Start the operation + self.state = Busy(sys::run(move || { + let res = (&*std).seek(pos); + (Operation::Seek(res), buf) + })); + + let (op, buf) = match self.state { + Idle(_) => unreachable!(), + Busy(ref mut rx) => rx.await.unwrap(), + }; + + self.state = Idle(Some(buf)); + + match op { + Operation::Seek(res) => res, + _ => unreachable!(), + } + } + + /// Attempts to sync all OS-internal metadata to disk. + /// + /// This function will attempt to ensure that all in-core data reaches the + /// filesystem before returning. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// file.write_all(b"hello, world!").await?; + /// file.sync_all().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn sync_all(&mut self) -> io::Result<()> { + self.complete_inflight().await; + + let std = self.std.clone(); + asyncify(move || std.sync_all()).await + } + + /// This function is similar to `sync_all`, except that it may not + /// synchronize file metadata to the filesystem. + /// + /// This is intended for use cases that must synchronize content, but don't + /// need the metadata on disk. The goal of this method is to reduce disk + /// operations. + /// + /// Note that some platforms may simply implement this in terms of `sync_all`. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// file.write_all(b"hello, world!").await?; + /// file.sync_data().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn sync_data(&mut self) -> io::Result<()> { + self.complete_inflight().await; + + let std = self.std.clone(); + asyncify(move || std.sync_data()).await + } + + /// Truncates or extends the underlying file, updating the size of this file to become size. + /// + /// If the size is less than the current file's size, then the file will be + /// shrunk. If it is greater than the current file's size, then the file + /// will be extended to size and have all of the intermediate data filled in + /// with 0s. + /// + /// # Errors + /// + /// This function will return an error if the file is not opened for + /// writing. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// file.write_all(b"hello, world!").await?; + /// file.set_len(10).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn set_len(&mut self, size: u64) -> io::Result<()> { + self.complete_inflight().await; + + let mut buf = match self.state { + Idle(ref mut buf_cell) => buf_cell.take().unwrap(), + _ => unreachable!(), + }; + + let seek = if !buf.is_empty() { + Some(SeekFrom::Current(buf.discard_read())) + } else { + None + }; + + let std = self.std.clone(); + + self.state = Busy(sys::run(move || { + let res = if let Some(seek) = seek { + (&*std).seek(seek).and_then(|_| std.set_len(size)) + } else { + std.set_len(size) + } + .map(|_| 0); // the value is discarded later + + // Return the result as a seek + (Operation::Seek(res), buf) + })); + + let (op, buf) = match self.state { + Idle(_) => unreachable!(), + Busy(ref mut rx) => rx.await?, + }; + + self.state = Idle(Some(buf)); + + match op { + Operation::Seek(res) => res.map(|_| ()), + _ => unreachable!(), + } + } + + /// Queries metadata about the underlying file. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let file = File::open("foo.txt").await?; + /// let metadata = file.metadata().await?; + /// + /// println!("{:?}", metadata); + /// # Ok(()) + /// # } + /// ``` + pub async fn metadata(&self) -> io::Result<Metadata> { + let std = self.std.clone(); + asyncify(move || std.metadata()).await + } + + /// Create a new `File` instance that shares the same underlying file handle + /// as the existing `File` instance. Reads, writes, and seeks will affect both + /// File instances simultaneously. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let file = File::open("foo.txt").await?; + /// let file_clone = file.try_clone().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn try_clone(&self) -> io::Result<File> { + let std = self.std.clone(); + let std_file = asyncify(move || std.try_clone()).await?; + Ok(File::from_std(std_file)) + } + + /// Destructures `File` into a [`std::fs::File`][std]. This function is + /// async to allow any in-flight operations to complete. + /// + /// Use `File::try_into_std` to attempt conversion immediately. + /// + /// [std]: std::fs::File + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let tokio_file = File::open("foo.txt").await?; + /// let std_file = tokio_file.into_std().await; + /// # Ok(()) + /// # } + /// ``` + pub async fn into_std(mut self) -> sys::File { + self.complete_inflight().await; + Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed") + } + + /// Tries to immediately destructure `File` into a [`std::fs::File`][std]. + /// + /// [std]: std::fs::File + /// + /// # Errors + /// + /// This function will return an error containing the file if some + /// operation is in-flight. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let tokio_file = File::open("foo.txt").await?; + /// let std_file = tokio_file.try_into_std().unwrap(); + /// # Ok(()) + /// # } + /// ``` + pub fn try_into_std(mut self) -> Result<sys::File, Self> { + match Arc::try_unwrap(self.std) { + Ok(file) => Ok(file), + Err(std_file_arc) => { + self.std = std_file_arc; + Err(self) + } + } + } + + /// Changes the permissions on the underlying file. + /// + /// # Platform-specific behavior + /// + /// This function currently corresponds to the `fchmod` function on Unix and + /// the `SetFileInformationByHandle` function on Windows. Note that, this + /// [may change in the future][changes]. + /// + /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior + /// + /// # Errors + /// + /// This function will return an error if the user lacks permission change + /// attributes on the underlying file. It may also return an error in other + /// os-specific unspecified cases. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let file = File::open("foo.txt").await?; + /// let mut perms = file.metadata().await?.permissions(); + /// perms.set_readonly(true); + /// file.set_permissions(perms).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> { + let std = self.std.clone(); + asyncify(move || std.set_permissions(perm)).await + } + + async fn complete_inflight(&mut self) { + use crate::future::poll_fn; + + if let Err(e) = poll_fn(|cx| Pin::new(&mut *self).poll_flush(cx)).await { + self.last_write_err = Some(e.kind()); + } + } +} + +impl AsyncRead for File { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + dst: &mut [u8], + ) -> Poll<io::Result<usize>> { + loop { + match self.state { + Idle(ref mut buf_cell) => { + let mut buf = buf_cell.take().unwrap(); + + if !buf.is_empty() { + let n = buf.copy_to(dst); + *buf_cell = Some(buf); + return Ready(Ok(n)); + } + + buf.ensure_capacity_for(dst); + let std = self.std.clone(); + + self.state = Busy(sys::run(move || { + let res = buf.read_from(&mut &*std); + (Operation::Read(res), buf) + })); + } + Busy(ref mut rx) => { + let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?; + + match op { + Operation::Read(Ok(_)) => { + let n = buf.copy_to(dst); + self.state = Idle(Some(buf)); + return Ready(Ok(n)); + } + Operation::Read(Err(e)) => { + assert!(buf.is_empty()); + + self.state = Idle(Some(buf)); + return Ready(Err(e)); + } + Operation::Write(Ok(_)) => { + assert!(buf.is_empty()); + self.state = Idle(Some(buf)); + continue; + } + Operation::Write(Err(e)) => { + assert!(self.last_write_err.is_none()); + self.last_write_err = Some(e.kind()); + self.state = Idle(Some(buf)); + } + Operation::Seek(_) => { + assert!(buf.is_empty()); + self.state = Idle(Some(buf)); + continue; + } + } + } + } + } + } +} + +impl AsyncSeek for File { + fn start_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut pos: SeekFrom, + ) -> Poll<io::Result<()>> { + loop { + match self.state { + Idle(ref mut buf_cell) => { + let mut buf = buf_cell.take().unwrap(); + + // Factor in any unread data from the buf + if !buf.is_empty() { + let n = buf.discard_read(); + + if let SeekFrom::Current(ref mut offset) = pos { + *offset += n; + } + } + + let std = self.std.clone(); + + self.state = Busy(sys::run(move || { + let res = (&*std).seek(pos); + (Operation::Seek(res), buf) + })); + + return Ready(Ok(())); + } + Busy(ref mut rx) => { + let (op, buf) = ready!(Pin::new(rx).poll(cx))?; + self.state = Idle(Some(buf)); + + match op { + Operation::Read(_) => {} + Operation::Write(Err(e)) => { + assert!(self.last_write_err.is_none()); + self.last_write_err = Some(e.kind()); + } + Operation::Write(_) => {} + Operation::Seek(_) => {} + } + } + } + } + } + + fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { + loop { + match self.state { + Idle(_) => panic!("must call start_seek before calling poll_complete"), + Busy(ref mut rx) => { + let (op, buf) = ready!(Pin::new(rx).poll(cx))?; + self.state = Idle(Some(buf)); + + match op { + Operation::Read(_) => {} + Operation::Write(Err(e)) => { + assert!(self.last_write_err.is_none()); + self.last_write_err = Some(e.kind()); + } + Operation::Write(_) => {} + Operation::Seek(res) => return Ready(res), + } + } + } + } + } +} + +impl AsyncWrite for File { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + src: &[u8], + ) -> Poll<io::Result<usize>> { + if let Some(e) = self.last_write_err.take() { + return Ready(Err(e.into())); + } + + loop { + match self.state { + Idle(ref mut buf_cell) => { + let mut buf = buf_cell.take().unwrap(); + + let seek = if !buf.is_empty() { + Some(SeekFrom::Current(buf.discard_read())) + } else { + None + }; + + let n = buf.copy_from(src); + let std = self.std.clone(); + + self.state = Busy(sys::run(move || { + let res = if let Some(seek) = seek { + (&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std)) + } else { + buf.write_to(&mut &*std) + }; + + (Operation::Write(res), buf) + })); + + return Ready(Ok(n)); + } + Busy(ref mut rx) => { + let (op, buf) = ready!(Pin::new(rx).poll(cx))?; + self.state = Idle(Some(buf)); + + match op { + Operation::Read(_) => { + // We don't care about the result here. The fact + // that the cursor has advanced will be reflected in + // the next iteration of the loop + continue; + } + Operation::Write(res) => { + // If the previous write was successful, continue. + // Otherwise, error. + res?; + continue; + } + Operation::Seek(_) => { + // Ignore the seek + continue; + } + } + } + } + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + if let Some(e) = self.last_write_err.take() { + return Ready(Err(e.into())); + } + + let (op, buf) = match self.state { + Idle(_) => return Ready(Ok(())), + Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?, + }; + + // The buffer is not used here + self.state = Idle(Some(buf)); + + match op { + Operation::Read(_) => Ready(Ok(())), + Operation::Write(res) => Ready(res), + Operation::Seek(_) => Ready(Ok(())), + } + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Poll::Ready(Ok(())) + } +} + +impl From<sys::File> for File { + fn from(std: sys::File) -> Self { + Self::from_std(std) + } +} + +impl fmt::Debug for File { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("tokio::fs::File") + .field("std", &self.std) + .finish() + } +} + +#[cfg(unix)] +impl std::os::unix::io::AsRawFd for File { + fn as_raw_fd(&self) -> std::os::unix::io::RawFd { + self.std.as_raw_fd() + } +} + +#[cfg(windows)] +impl std::os::windows::io::AsRawHandle for File { + fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { + self.std.as_raw_handle() + } +} diff --git a/third_party/rust/tokio/src/fs/hard_link.rs b/third_party/rust/tokio/src/fs/hard_link.rs new file mode 100644 index 0000000000..50cc17d286 --- /dev/null +++ b/third_party/rust/tokio/src/fs/hard_link.rs @@ -0,0 +1,46 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Creates a new hard link on the filesystem. +/// +/// This is an async version of [`std::fs::hard_link`][std] +/// +/// [std]: std::fs::hard_link +/// +/// The `dst` path will be a link pointing to the `src` path. Note that systems +/// often require these two paths to both be located on the same filesystem. +/// +/// # Platform-specific behavior +/// +/// This function currently corresponds to the `link` function on Unix +/// and the `CreateHardLink` function on Windows. +/// Note that, this [may change in the future][changes]. +/// +/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * The `src` path is not a file or doesn't exist. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// #[tokio::main] +/// async fn main() -> std::io::Result<()> { +/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt +/// Ok(()) +/// } +/// ``` +pub async fn hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> { + let src = src.as_ref().to_owned(); + let dst = dst.as_ref().to_owned(); + + asyncify(move || std::fs::hard_link(src, dst)).await +} diff --git a/third_party/rust/tokio/src/fs/metadata.rs b/third_party/rust/tokio/src/fs/metadata.rs new file mode 100644 index 0000000000..ff9cded79a --- /dev/null +++ b/third_party/rust/tokio/src/fs/metadata.rs @@ -0,0 +1,47 @@ +use crate::fs::asyncify; + +use std::fs::Metadata; +use std::io; +use std::path::Path; + +/// Given a path, queries the file system to get information about a file, +/// directory, etc. +/// +/// This is an async version of [`std::fs::metadata`][std] +/// +/// This function will traverse symbolic links to query information about the +/// destination file. +/// +/// # Platform-specific behavior +/// +/// This function currently corresponds to the `stat` function on Unix and the +/// `GetFileAttributesEx` function on Windows. Note that, this [may change in +/// the future][changes]. +/// +/// [std]: std::fs::metadata +/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior +/// +/// # Errors +/// +/// This function will return an error in the following situations, but is not +/// limited to just these cases: +/// +/// * The user lacks permissions to perform `metadata` call on `path`. +/// * `path` does not exist. +/// +/// # Examples +/// +/// ```rust,no_run +/// use tokio::fs; +/// +/// #[tokio::main] +/// async fn main() -> std::io::Result<()> { +/// let attr = fs::metadata("/some/file/path.txt").await?; +/// // inspect attr ... +/// Ok(()) +/// } +/// ``` +pub async fn metadata(path: impl AsRef<Path>) -> io::Result<Metadata> { + let path = path.as_ref().to_owned(); + asyncify(|| std::fs::metadata(path)).await +} diff --git a/third_party/rust/tokio/src/fs/mod.rs b/third_party/rust/tokio/src/fs/mod.rs new file mode 100644 index 0000000000..3eb0376463 --- /dev/null +++ b/third_party/rust/tokio/src/fs/mod.rs @@ -0,0 +1,109 @@ +#![cfg(not(loom))] + +//! Asynchronous file and standard stream adaptation. +//! +//! This module contains utility methods and adapter types for input/output to +//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and +//! filesystem manipulation, for use within (and only within) a Tokio runtime. +//! +//! Tasks run by *worker* threads should not block, as this could delay +//! servicing reactor events. Portable filesystem operations are blocking, +//! however. This module offers adapters which use a `blocking` annotation +//! to inform the runtime that a blocking operation is required. When +//! necessary, this allows the runtime to convert the current thread from a +//! *worker* to a *backup* thread, where blocking is acceptable. +//! +//! ## Usage +//! +//! Where possible, users should prefer the provided asynchronous-specific +//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll` +//! type. Adaptions also extend to traits like `std::io::Read` where methods +//! return `std::io::Result`. Be warned that these adapted methods may return +//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted +//! to a *backup* thread immediately. +//! +//! [`AsyncRead`]: https://docs.rs/tokio-io/0.1/tokio_io/trait.AsyncRead.html + +mod canonicalize; +pub use self::canonicalize::canonicalize; + +mod create_dir; +pub use self::create_dir::create_dir; + +mod create_dir_all; +pub use self::create_dir_all::create_dir_all; + +mod file; +pub use self::file::File; + +mod hard_link; +pub use self::hard_link::hard_link; + +mod metadata; +pub use self::metadata::metadata; + +mod open_options; +pub use self::open_options::OpenOptions; + +pub mod os; + +mod read; +pub use self::read::read; + +mod read_dir; +pub use self::read_dir::{read_dir, DirEntry, ReadDir}; + +mod read_link; +pub use self::read_link::read_link; + +mod read_to_string; +pub use self::read_to_string::read_to_string; + +mod remove_dir; +pub use self::remove_dir::remove_dir; + +mod remove_dir_all; +pub use self::remove_dir_all::remove_dir_all; + +mod remove_file; +pub use self::remove_file::remove_file; + +mod rename; +pub use self::rename::rename; + +mod set_permissions; +pub use self::set_permissions::set_permissions; + +mod symlink_metadata; +pub use self::symlink_metadata::symlink_metadata; + +mod write; +pub use self::write::write; + +mod copy; +pub use self::copy::copy; + +use std::io; + +pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T> +where + F: FnOnce() -> io::Result<T> + Send + 'static, + T: Send + 'static, +{ + match sys::run(f).await { + Ok(res) => res, + Err(_) => Err(io::Error::new( + io::ErrorKind::Other, + "background task failed", + )), + } +} + +/// Types in this module can be mocked out in tests. +mod sys { + pub(crate) use std::fs::File; + + // TODO: don't rename + pub(crate) use crate::runtime::spawn_blocking as run; + pub(crate) use crate::task::JoinHandle as Blocking; +} diff --git a/third_party/rust/tokio/src/fs/open_options.rs b/third_party/rust/tokio/src/fs/open_options.rs new file mode 100644 index 0000000000..3210f4b7b5 --- /dev/null +++ b/third_party/rust/tokio/src/fs/open_options.rs @@ -0,0 +1,397 @@ +use crate::fs::{asyncify, File}; + +use std::io; +use std::path::Path; + +/// Options and flags which can be used to configure how a file is opened. +/// +/// This builder exposes the ability to configure how a [`File`] is opened and +/// what operations are permitted on the open file. The [`File::open`] and +/// [`File::create`] methods are aliases for commonly used options using this +/// builder. +/// +/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], +/// then chain calls to methods to set each option, then call [`open`], passing +/// the path of the file you're trying to open. This will give you a +/// [`io::Result`][result] with a [`File`] inside that you can further operate +/// on. +/// +/// This is a specialized version of [`std::fs::OpenOptions`] for usage from +/// the Tokio runtime. +/// +/// `From<std::fs::OpenOptions>` is implemented for more advanced configuration +/// than the methods provided here. +/// +/// [`new`]: OpenOptions::new +/// [`open`]: OpenOptions::open +/// [result]: std::io::Result +/// [`File`]: File +/// [`File::open`]: File::open +/// [`File::create`]: File::create +/// [`std::fs::OpenOptions`]: std::fs::OpenOptions +/// +/// # Examples +/// +/// Opening a file to read: +/// +/// ```no_run +/// use tokio::fs::OpenOptions; +/// use std::io; +/// +/// #[tokio::main] +/// async fn main() -> io::Result<()> { +/// let file = OpenOptions::new() +/// .read(true) +/// .open("foo.txt") +/// .await?; +/// +/// Ok(()) +/// } +/// ``` +/// +/// Opening a file for both reading and writing, as well as creating it if it +/// doesn't exist: +/// +/// ```no_run +/// use tokio::fs::OpenOptions; +/// use std::io; +/// +/// #[tokio::main] +/// async fn main() -> io::Result<()> { +/// let file = OpenOptions::new() +/// .read(true) +/// .write(true) +/// .create(true) +/// .open("foo.txt") +/// .await?; +/// +/// Ok(()) +/// } +/// ``` +#[derive(Clone, Debug)] +pub struct OpenOptions(std::fs::OpenOptions); + +impl OpenOptions { + /// Creates a blank new set of options ready for configuration. + /// + /// All options are initially set to `false`. + /// + /// This is an async version of [`std::fs::OpenOptions::new`][std] + /// + /// [std]: std::fs::OpenOptions::new + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// + /// let mut options = OpenOptions::new(); + /// let future = options.read(true).open("foo.txt"); + /// ``` + pub fn new() -> OpenOptions { + OpenOptions(std::fs::OpenOptions::new()) + } + + /// Sets the option for read access. + /// + /// This option, when true, will indicate that the file should be + /// `read`-able if opened. + /// + /// This is an async version of [`std::fs::OpenOptions::read`][std] + /// + /// [std]: std::fs::OpenOptions::read + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .read(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn read(&mut self, read: bool) -> &mut OpenOptions { + self.0.read(read); + self + } + + /// Sets the option for write access. + /// + /// This option, when true, will indicate that the file should be + /// `write`-able if opened. + /// + /// This is an async version of [`std::fs::OpenOptions::write`][std] + /// + /// [std]: std::fs::OpenOptions::write + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .write(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn write(&mut self, write: bool) -> &mut OpenOptions { + self.0.write(write); + self + } + + /// Sets the option for the append mode. + /// + /// This option, when true, means that writes will append to a file instead + /// of overwriting previous contents. Note that setting + /// `.write(true).append(true)` has the same effect as setting only + /// `.append(true)`. + /// + /// For most filesystems, the operating system guarantees that all writes are + /// atomic: no writes get mangled because another process writes at the same + /// time. + /// + /// One maybe obvious note when using append-mode: make sure that all data + /// that belongs together is written to the file in one operation. This + /// can be done by concatenating strings before passing them to [`write()`], + /// or using a buffered writer (with a buffer of adequate size), + /// and calling [`flush()`] when the message is complete. + /// + /// If a file is opened with both read and append access, beware that after + /// opening, and after every write, the position for reading may be set at the + /// end of the file. So, before writing, save the current position (using + /// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read. + /// + /// This is an async version of [`std::fs::OpenOptions::append`][std] + /// + /// [std]: std::fs::OpenOptions::append + /// + /// ## Note + /// + /// This function doesn't create the file if it doesn't exist. Use the [`create`] + /// method to do so. + /// + /// [`write()`]: crate::io::AsyncWriteExt::write + /// [`flush()`]: crate::io::AsyncWriteExt::flush + /// [`seek`]: crate::io::AsyncSeekExt::seek + /// [`SeekFrom`]: std::io::SeekFrom + /// [`Current`]: std::io::SeekFrom::Current + /// [`create`]: OpenOptions::create + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .append(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn append(&mut self, append: bool) -> &mut OpenOptions { + self.0.append(append); + self + } + + /// Sets the option for truncating a previous file. + /// + /// If a file is successfully opened with this option set it will truncate + /// the file to 0 length if it already exists. + /// + /// The file must be opened with write access for truncate to work. + /// + /// This is an async version of [`std::fs::OpenOptions::truncate`][std] + /// + /// [std]: std::fs::OpenOptions::truncate + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .write(true) + /// .truncate(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions { + self.0.truncate(truncate); + self + } + + /// Sets the option for creating a new file. + /// + /// This option indicates whether a new file will be created if the file + /// does not yet already exist. + /// + /// In order for the file to be created, [`write`] or [`append`] access must + /// be used. + /// + /// This is an async version of [`std::fs::OpenOptions::create`][std] + /// + /// [std]: std::fs::OpenOptions::create + /// [`write`]: OpenOptions::write + /// [`append`]: OpenOptions::append + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .write(true) + /// .create(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn create(&mut self, create: bool) -> &mut OpenOptions { + self.0.create(create); + self + } + + /// Sets the option to always create a new file. + /// + /// This option indicates whether a new file will be created. No file is + /// allowed to exist at the target location, also no (dangling) symlink. + /// + /// This option is useful because it is atomic. Otherwise between checking + /// whether a file exists and creating a new one, the file may have been + /// created by another process (a TOCTOU race condition / attack). + /// + /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are + /// ignored. + /// + /// The file must be opened with write or append access in order to create a + /// new file. + /// + /// This is an async version of [`std::fs::OpenOptions::create_new`][std] + /// + /// [std]: std::fs::OpenOptions::create_new + /// [`.create()`]: OpenOptions::create + /// [`.truncate()`]: OpenOptions::truncate + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new() + /// .write(true) + /// .create_new(true) + /// .open("foo.txt") + /// .await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions { + self.0.create_new(create_new); + self + } + + /// Opens a file at `path` with the options specified by `self`. + /// + /// This is an async version of [`std::fs::OpenOptions::open`][std] + /// + /// [std]: std::fs::OpenOptions::open + /// + /// # Errors + /// + /// This function will return an error under a number of different + /// circumstances. Some of these error conditions are listed here, together + /// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of + /// the compatibility contract of the function, especially the `Other` kind + /// might change to more specific kinds in the future. + /// + /// * [`NotFound`]: The specified file does not exist and neither `create` + /// or `create_new` is set. + /// * [`NotFound`]: One of the directory components of the file path does + /// not exist. + /// * [`PermissionDenied`]: The user lacks permission to get the specified + /// access rights for the file. + /// * [`PermissionDenied`]: The user lacks permission to open one of the + /// directory components of the specified path. + /// * [`AlreadyExists`]: `create_new` was specified and the file already + /// exists. + /// * [`InvalidInput`]: Invalid combinations of open options (truncate + /// without write access, no access mode set, etc.). + /// * [`Other`]: One of the directory components of the specified file path + /// was not, in fact, a directory. + /// * [`Other`]: Filesystem-level errors: full disk, write permission + /// requested on a read-only file system, exceeded disk quota, too many + /// open files, too long filename, too many symbolic links in the + /// specified path (Unix-like systems only), etc. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::OpenOptions; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let file = OpenOptions::new().open("foo.txt").await?; + /// Ok(()) + /// } + /// ``` + /// + /// [`ErrorKind`]: std::io::ErrorKind + /// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists + /// [`InvalidInput`]: std::io::ErrorKind::InvalidInput + /// [`NotFound`]: std::io::ErrorKind::NotFound + /// [`Other`]: std::io::ErrorKind::Other + /// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied + pub async fn open(&self, path: impl AsRef<Path>) -> io::Result<File> { + let path = path.as_ref().to_owned(); + let opts = self.0.clone(); + + let std = asyncify(move || opts.open(path)).await?; + Ok(File::from_std(std)) + } +} + +impl From<std::fs::OpenOptions> for OpenOptions { + fn from(options: std::fs::OpenOptions) -> OpenOptions { + OpenOptions(options) + } +} + +impl Default for OpenOptions { + fn default() -> Self { + Self::new() + } +} diff --git a/third_party/rust/tokio/src/fs/os/mod.rs b/third_party/rust/tokio/src/fs/os/mod.rs new file mode 100644 index 0000000000..f4b8bfb617 --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/mod.rs @@ -0,0 +1,7 @@ +//! OS-specific functionality. + +#[cfg(unix)] +pub mod unix; + +#[cfg(windows)] +pub mod windows; diff --git a/third_party/rust/tokio/src/fs/os/unix/mod.rs b/third_party/rust/tokio/src/fs/os/unix/mod.rs new file mode 100644 index 0000000000..3b0bec38bd --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/unix/mod.rs @@ -0,0 +1,4 @@ +//! Unix-specific extensions to primitives in the `tokio_fs` module. + +mod symlink; +pub use self::symlink::symlink; diff --git a/third_party/rust/tokio/src/fs/os/unix/symlink.rs b/third_party/rust/tokio/src/fs/os/unix/symlink.rs new file mode 100644 index 0000000000..22ece7250f --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/unix/symlink.rs @@ -0,0 +1,18 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Creates a new symbolic link on the filesystem. +/// +/// The `dst` path will be a symbolic link pointing to the `src` path. +/// +/// This is an async version of [`std::os::unix::fs::symlink`][std] +/// +/// [std]: std::os::unix::fs::symlink +pub async fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> { + let src = src.as_ref().to_owned(); + let dst = dst.as_ref().to_owned(); + + asyncify(move || std::os::unix::fs::symlink(src, dst)).await +} diff --git a/third_party/rust/tokio/src/fs/os/windows/mod.rs b/third_party/rust/tokio/src/fs/os/windows/mod.rs new file mode 100644 index 0000000000..42eb7bdb92 --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/windows/mod.rs @@ -0,0 +1,7 @@ +//! Windows-specific extensions for the primitives in the `tokio_fs` module. + +mod symlink_dir; +pub use self::symlink_dir::symlink_dir; + +mod symlink_file; +pub use self::symlink_file::symlink_file; diff --git a/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs b/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs new file mode 100644 index 0000000000..736e762b48 --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs @@ -0,0 +1,19 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Creates a new directory symlink on the filesystem. +/// +/// The `dst` path will be a directory symbolic link pointing to the `src` +/// path. +/// +/// This is an async version of [`std::os::windows::fs::symlink_dir`][std] +/// +/// [std]: std::os::windows::fs::symlink_dir +pub async fn symlink_dir(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> { + let src = src.as_ref().to_owned(); + let dst = dst.as_ref().to_owned(); + + asyncify(move || std::os::windows::fs::symlink_dir(src, dst)).await +} diff --git a/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs b/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs new file mode 100644 index 0000000000..07d8e60419 --- /dev/null +++ b/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs @@ -0,0 +1,19 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Creates a new file symbolic link on the filesystem. +/// +/// The `dst` path will be a file symbolic link pointing to the `src` +/// path. +/// +/// This is an async version of [`std::os::windows::fs::symlink_file`][std] +/// +/// [std]: std::os::windows::fs::symlink_file +pub async fn symlink_file(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> { + let src = src.as_ref().to_owned(); + let dst = dst.as_ref().to_owned(); + + asyncify(move || std::os::windows::fs::symlink_file(src, dst)).await +} diff --git a/third_party/rust/tokio/src/fs/read.rs b/third_party/rust/tokio/src/fs/read.rs new file mode 100644 index 0000000000..2d80eb5bd3 --- /dev/null +++ b/third_party/rust/tokio/src/fs/read.rs @@ -0,0 +1,47 @@ +use crate::fs::asyncify; + +use std::{io, path::Path}; + +/// Reads the entire contents of a file into a bytes vector. +/// +/// This is an async version of [`std::fs::read`][std] +/// +/// [std]: std::fs::read +/// +/// This is a convenience function for using [`File::open`] and [`read_to_end`] +/// with fewer imports and without an intermediate variable. It pre-allocates a +/// buffer based on the file size when available, so it is generally faster than +/// reading into a vector created with `Vec::new()`. +/// +/// [`File::open`]: super::File::open +/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end +/// +/// # Errors +/// +/// This function will return an error if `path` does not already exist. +/// Other errors may also be returned according to [`OpenOptions::open`]. +/// +/// [`OpenOptions::open`]: super::OpenOptions::open +/// +/// It will also return an error if it encounters while reading an error +/// of a kind other than [`ErrorKind::Interrupted`]. +/// +/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// use std::net::SocketAddr; +/// +/// #[tokio::main] +/// async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> { +/// let contents = fs::read("address.txt").await?; +/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?; +/// Ok(()) +/// } +/// ``` +pub async fn read(path: impl AsRef<Path>) -> io::Result<Vec<u8>> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::read(path)).await +} diff --git a/third_party/rust/tokio/src/fs/read_dir.rs b/third_party/rust/tokio/src/fs/read_dir.rs new file mode 100644 index 0000000000..f9b16c66c5 --- /dev/null +++ b/third_party/rust/tokio/src/fs/read_dir.rs @@ -0,0 +1,244 @@ +use crate::fs::{asyncify, sys}; + +use std::ffi::OsString; +use std::fs::{FileType, Metadata}; +use std::future::Future; +use std::io; +#[cfg(unix)] +use std::os::unix::fs::DirEntryExt; +use std::path::{Path, PathBuf}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + +/// Returns a stream over the entries within a directory. +/// +/// This is an async version of [`std::fs::read_dir`](std::fs::read_dir) +pub async fn read_dir(path: impl AsRef<Path>) -> io::Result<ReadDir> { + let path = path.as_ref().to_owned(); + let std = asyncify(|| std::fs::read_dir(path)).await?; + + Ok(ReadDir(State::Idle(Some(std)))) +} + +/// Stream of the entries in a directory. +/// +/// This stream is returned from the [`read_dir`] function of this module and +/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] +/// information like the entry's path and possibly other metadata can be +/// learned. +/// +/// # Errors +/// +/// This [`Stream`] will return an [`Err`] if there's some sort of intermittent +/// IO error during iteration. +/// +/// [`read_dir`]: read_dir +/// [`DirEntry`]: DirEntry +/// [`Stream`]: crate::stream::Stream +/// [`Err`]: std::result::Result::Err +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct ReadDir(State); + +#[derive(Debug)] +enum State { + Idle(Option<std::fs::ReadDir>), + Pending(sys::Blocking<(Option<io::Result<std::fs::DirEntry>>, std::fs::ReadDir)>), +} + +impl ReadDir { + /// Returns the next entry in the directory stream. + pub async fn next_entry(&mut self) -> io::Result<Option<DirEntry>> { + use crate::future::poll_fn; + poll_fn(|cx| self.poll_next_entry(cx)).await + } + + #[doc(hidden)] + pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<Option<DirEntry>>> { + loop { + match self.0 { + State::Idle(ref mut std) => { + let mut std = std.take().unwrap(); + + self.0 = State::Pending(sys::run(move || { + let ret = std.next(); + (ret, std) + })); + } + State::Pending(ref mut rx) => { + let (ret, std) = ready!(Pin::new(rx).poll(cx))?; + self.0 = State::Idle(Some(std)); + + let ret = match ret { + Some(Ok(std)) => Ok(Some(DirEntry(Arc::new(std)))), + Some(Err(e)) => Err(e), + None => Ok(None), + }; + + return Poll::Ready(ret); + } + } + } + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for ReadDir { + type Item = io::Result<DirEntry>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + Poll::Ready(match ready!(self.poll_next_entry(cx)) { + Ok(Some(entry)) => Some(Ok(entry)), + Ok(None) => None, + Err(err) => Some(Err(err)), + }) + } +} + +/// Entries returned by the [`ReadDir`] stream. +/// +/// [`ReadDir`]: struct@ReadDir +/// +/// This is a specialized version of [`std::fs::DirEntry`] for usage from the +/// Tokio runtime. +/// +/// An instance of `DirEntry` represents an entry inside of a directory on the +/// filesystem. Each entry can be inspected via methods to learn about the full +/// path or possibly other metadata through per-platform extension traits. +#[derive(Debug)] +pub struct DirEntry(Arc<std::fs::DirEntry>); + +impl DirEntry { + /// Returns the full path to the file that this entry represents. + /// + /// The full path is created by joining the original path to `read_dir` + /// with the filename of this entry. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut entries = fs::read_dir(".").await?; + /// + /// while let Some(entry) = entries.next_entry().await? { + /// println!("{:?}", entry.path()); + /// } + /// # Ok(()) + /// # } + /// ``` + /// + /// This prints output like: + /// + /// ```text + /// "./whatever.txt" + /// "./foo.html" + /// "./hello_world.rs" + /// ``` + /// + /// The exact text, of course, depends on what files you have in `.`. + pub fn path(&self) -> PathBuf { + self.0.path() + } + + /// Returns the bare file name of this directory entry without any other + /// leading path component. + /// + /// # Examples + /// + /// ``` + /// use tokio::fs; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut entries = fs::read_dir(".").await?; + /// + /// while let Some(entry) = entries.next_entry().await? { + /// println!("{:?}", entry.file_name()); + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn file_name(&self) -> OsString { + self.0.file_name() + } + + /// Returns the metadata for the file that this entry points at. + /// + /// This function will not traverse symlinks if this entry points at a + /// symlink. + /// + /// # Platform-specific behavior + /// + /// On Windows this function is cheap to call (no extra system calls + /// needed), but on Unix platforms this function is the equivalent of + /// calling `symlink_metadata` on the path. + /// + /// # Examples + /// + /// ``` + /// use tokio::fs; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut entries = fs::read_dir(".").await?; + /// + /// while let Some(entry) = entries.next_entry().await? { + /// if let Ok(metadata) = entry.metadata().await { + /// // Now let's show our entry's permissions! + /// println!("{:?}: {:?}", entry.path(), metadata.permissions()); + /// } else { + /// println!("Couldn't get file type for {:?}", entry.path()); + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn metadata(&self) -> io::Result<Metadata> { + let std = self.0.clone(); + asyncify(move || std.metadata()).await + } + + /// Returns the file type for the file that this entry points at. + /// + /// This function will not traverse symlinks if this entry points at a + /// symlink. + /// + /// # Platform-specific behavior + /// + /// On Windows and most Unix platforms this function is free (no extra + /// system calls needed), but some Unix platforms may require the equivalent + /// call to `symlink_metadata` to learn about the target file type. + /// + /// # Examples + /// + /// ``` + /// use tokio::fs; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut entries = fs::read_dir(".").await?; + /// + /// while let Some(entry) = entries.next_entry().await? { + /// if let Ok(file_type) = entry.file_type().await { + /// // Now let's show our entry's file type! + /// println!("{:?}: {:?}", entry.path(), file_type); + /// } else { + /// println!("Couldn't get file type for {:?}", entry.path()); + /// } + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn file_type(&self) -> io::Result<FileType> { + let std = self.0.clone(); + asyncify(move || std.file_type()).await + } +} + +#[cfg(unix)] +impl DirEntryExt for DirEntry { + fn ino(&self) -> u64 { + self.0.ino() + } +} diff --git a/third_party/rust/tokio/src/fs/read_link.rs b/third_party/rust/tokio/src/fs/read_link.rs new file mode 100644 index 0000000000..6c48c5e156 --- /dev/null +++ b/third_party/rust/tokio/src/fs/read_link.rs @@ -0,0 +1,14 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::{Path, PathBuf}; + +/// Reads a symbolic link, returning the file that the link points to. +/// +/// This is an async version of [`std::fs::read_link`][std] +/// +/// [std]: std::fs::read_link +pub async fn read_link(path: impl AsRef<Path>) -> io::Result<PathBuf> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::read_link(path)).await +} diff --git a/third_party/rust/tokio/src/fs/read_to_string.rs b/third_party/rust/tokio/src/fs/read_to_string.rs new file mode 100644 index 0000000000..c743bb4ddc --- /dev/null +++ b/third_party/rust/tokio/src/fs/read_to_string.rs @@ -0,0 +1,24 @@ +use crate::fs::asyncify; + +use std::{io, path::Path}; + +/// Creates a future which will open a file for reading and read the entire +/// contents into a string and return said string. +/// +/// This is the async equivalent of `std::fs::read_to_string`. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// # async fn dox() -> std::io::Result<()> { +/// let contents = fs::read_to_string("foo.txt").await?; +/// println!("foo.txt contains {} bytes", contents.len()); +/// # Ok(()) +/// # } +/// ``` +pub async fn read_to_string(path: impl AsRef<Path>) -> io::Result<String> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::read_to_string(path)).await +} diff --git a/third_party/rust/tokio/src/fs/remove_dir.rs b/third_party/rust/tokio/src/fs/remove_dir.rs new file mode 100644 index 0000000000..6e7cbd08f6 --- /dev/null +++ b/third_party/rust/tokio/src/fs/remove_dir.rs @@ -0,0 +1,12 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Removes an existing, empty directory. +/// +/// This is an async version of [`std::fs::remove_dir`](std::fs::remove_dir) +pub async fn remove_dir(path: impl AsRef<Path>) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::remove_dir(path)).await +} diff --git a/third_party/rust/tokio/src/fs/remove_dir_all.rs b/third_party/rust/tokio/src/fs/remove_dir_all.rs new file mode 100644 index 0000000000..3b2b2e0453 --- /dev/null +++ b/third_party/rust/tokio/src/fs/remove_dir_all.rs @@ -0,0 +1,14 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Removes a directory at this path, after removing all its contents. Use carefully! +/// +/// This is an async version of [`std::fs::remove_dir_all`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.remove_dir_all.html +pub async fn remove_dir_all(path: impl AsRef<Path>) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::remove_dir_all(path)).await +} diff --git a/third_party/rust/tokio/src/fs/remove_file.rs b/third_party/rust/tokio/src/fs/remove_file.rs new file mode 100644 index 0000000000..d22a5bfc88 --- /dev/null +++ b/third_party/rust/tokio/src/fs/remove_file.rs @@ -0,0 +1,18 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Removes a file from the filesystem. +/// +/// Note that there is no guarantee that the file is immediately deleted (e.g. +/// depending on platform, other open file descriptors may prevent immediate +/// removal). +/// +/// This is an async version of [`std::fs::remove_file`][std] +/// +/// [std]: std::fs::remove_file +pub async fn remove_file(path: impl AsRef<Path>) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(move || std::fs::remove_file(path)).await +} diff --git a/third_party/rust/tokio/src/fs/rename.rs b/third_party/rust/tokio/src/fs/rename.rs new file mode 100644 index 0000000000..4f980821d2 --- /dev/null +++ b/third_party/rust/tokio/src/fs/rename.rs @@ -0,0 +1,17 @@ +use crate::fs::asyncify; + +use std::io; +use std::path::Path; + +/// Renames a file or directory to a new name, replacing the original file if +/// `to` already exists. +/// +/// This will not work if the new name is on a different mount point. +/// +/// This is an async version of [`std::fs::rename`](std::fs::rename) +pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> { + let from = from.as_ref().to_owned(); + let to = to.as_ref().to_owned(); + + asyncify(move || std::fs::rename(from, to)).await +} diff --git a/third_party/rust/tokio/src/fs/set_permissions.rs b/third_party/rust/tokio/src/fs/set_permissions.rs new file mode 100644 index 0000000000..b6249d13f0 --- /dev/null +++ b/third_party/rust/tokio/src/fs/set_permissions.rs @@ -0,0 +1,15 @@ +use crate::fs::asyncify; + +use std::fs::Permissions; +use std::io; +use std::path::Path; + +/// Changes the permissions found on a file or a directory. +/// +/// This is an async version of [`std::fs::set_permissions`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.set_permissions.html +pub async fn set_permissions(path: impl AsRef<Path>, perm: Permissions) -> io::Result<()> { + let path = path.as_ref().to_owned(); + asyncify(|| std::fs::set_permissions(path, perm)).await +} diff --git a/third_party/rust/tokio/src/fs/symlink_metadata.rs b/third_party/rust/tokio/src/fs/symlink_metadata.rs new file mode 100644 index 0000000000..682b43a70e --- /dev/null +++ b/third_party/rust/tokio/src/fs/symlink_metadata.rs @@ -0,0 +1,15 @@ +use crate::fs::asyncify; + +use std::fs::Metadata; +use std::io; +use std::path::Path; + +/// Queries the file system metadata for a path. +/// +/// This is an async version of [`std::fs::symlink_metadata`][std] +/// +/// [std]: https://doc.rust-lang.org/std/fs/fn.symlink_metadata.html +pub async fn symlink_metadata(path: impl AsRef<Path>) -> io::Result<Metadata> { + let path = path.as_ref().to_owned(); + asyncify(|| std::fs::symlink_metadata(path)).await +} diff --git a/third_party/rust/tokio/src/fs/write.rs b/third_party/rust/tokio/src/fs/write.rs new file mode 100644 index 0000000000..0114cab8a8 --- /dev/null +++ b/third_party/rust/tokio/src/fs/write.rs @@ -0,0 +1,25 @@ +use crate::fs::asyncify; + +use std::{io, path::Path}; + +/// Creates a future that will open a file for writing and write the entire +/// contents of `contents` to it. +/// +/// This is the async equivalent of `std::fs::write`. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::fs; +/// +/// # async fn dox() -> std::io::Result<()> { +/// fs::write("foo.txt", b"Hello world!").await?; +/// # Ok(()) +/// # } +/// ``` +pub async fn write<C: AsRef<[u8]> + Unpin>(path: impl AsRef<Path>, contents: C) -> io::Result<()> { + let path = path.as_ref().to_owned(); + let contents = contents.as_ref().to_owned(); + + asyncify(move || std::fs::write(path, contents)).await +} diff --git a/third_party/rust/tokio/src/future/maybe_done.rs b/third_party/rust/tokio/src/future/maybe_done.rs new file mode 100644 index 0000000000..1e083ad7fd --- /dev/null +++ b/third_party/rust/tokio/src/future/maybe_done.rs @@ -0,0 +1,76 @@ +//! Definition of the MaybeDone combinator + +use std::future::Future; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// A future that may have completed. +#[derive(Debug)] +pub enum MaybeDone<Fut: Future> { + /// A not-yet-completed future + Future(Fut), + /// The output of the completed future + Done(Fut::Output), + /// The empty variant after the result of a [`MaybeDone`] has been + /// taken using the [`take_output`](MaybeDone::take_output) method. + Gone, +} + +// Safe because we never generate `Pin<&mut Fut::Output>` +impl<Fut: Future + Unpin> Unpin for MaybeDone<Fut> {} + +/// Wraps a future into a `MaybeDone` +pub fn maybe_done<Fut: Future>(future: Fut) -> MaybeDone<Fut> { + MaybeDone::Future(future) +} + +impl<Fut: Future> MaybeDone<Fut> { + /// Returns an [`Option`] containing a mutable reference to the output of the future. + /// The output of this method will be [`Some`] if and only if the inner + /// future has been completed and [`take_output`](MaybeDone::take_output) + /// has not yet been called. + pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> { + unsafe { + let this = self.get_unchecked_mut(); + match this { + MaybeDone::Done(res) => Some(res), + _ => None, + } + } + } + + /// Attempts to take the output of a `MaybeDone` without driving it + /// towards completion. + #[inline] + pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Output> { + unsafe { + let this = self.get_unchecked_mut(); + match this { + MaybeDone::Done(_) => {} + MaybeDone::Future(_) | MaybeDone::Gone => return None, + }; + if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) { + Some(output) + } else { + unreachable!() + } + } + } +} + +impl<Fut: Future> Future for MaybeDone<Fut> { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let res = unsafe { + match self.as_mut().get_unchecked_mut() { + MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)), + MaybeDone::Done(_) => return Poll::Ready(()), + MaybeDone::Gone => panic!("MaybeDone polled after value taken"), + } + }; + self.set(MaybeDone::Done(res)); + Poll::Ready(()) + } +} diff --git a/third_party/rust/tokio/src/future/mod.rs b/third_party/rust/tokio/src/future/mod.rs new file mode 100644 index 0000000000..770753f319 --- /dev/null +++ b/third_party/rust/tokio/src/future/mod.rs @@ -0,0 +1,15 @@ +#![allow(unused_imports, dead_code)] + +//! Asynchronous values. + +mod maybe_done; +pub use maybe_done::{maybe_done, MaybeDone}; + +mod poll_fn; +pub use poll_fn::poll_fn; + +mod ready; +pub(crate) use ready::{ok, Ready}; + +mod try_join; +pub(crate) use try_join::try_join3; diff --git a/third_party/rust/tokio/src/future/pending.rs b/third_party/rust/tokio/src/future/pending.rs new file mode 100644 index 0000000000..287e836fd3 --- /dev/null +++ b/third_party/rust/tokio/src/future/pending.rs @@ -0,0 +1,44 @@ +use sdt::pin::Pin; +use std::future::Future; +use std::marker; +use std::task::{Context, Poll}; + +/// Future for the [`pending()`] function. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +struct Pending<T> { + _data: marker::PhantomData<T>, +} + +/// Creates a future which never resolves, representing a computation that never +/// finishes. +/// +/// The returned future will forever return [`Poll::Pending`]. +/// +/// # Examples +/// +/// ```no_run +/// use tokio::future; +/// +/// #[tokio::main] +/// async fn main { +/// future::pending().await; +/// unreachable!(); +/// } +/// ``` +pub async fn pending() -> ! { + Pending { + _data: marker::PhantomData, + } + .await +} + +impl<T> Future for Pending<T> { + type Output = !; + + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<T> { + Poll::Pending + } +} + +impl<T> Unpin for Pending<T> {} diff --git a/third_party/rust/tokio/src/future/poll_fn.rs b/third_party/rust/tokio/src/future/poll_fn.rs new file mode 100644 index 0000000000..9b3d1370ea --- /dev/null +++ b/third_party/rust/tokio/src/future/poll_fn.rs @@ -0,0 +1,38 @@ +//! Definition of the `PollFn` adapter combinator + +use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Future for the [`poll_fn`] function. +pub struct PollFn<F> { + f: F, +} + +impl<F> Unpin for PollFn<F> {} + +/// Creates a new future wrapping around a function returning [`Poll`]. +pub fn poll_fn<T, F>(f: F) -> PollFn<F> +where + F: FnMut(&mut Context<'_>) -> Poll<T>, +{ + PollFn { f } +} + +impl<F> fmt::Debug for PollFn<F> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PollFn").finish() + } +} + +impl<T, F> Future for PollFn<F> +where + F: FnMut(&mut Context<'_>) -> Poll<T>, +{ + type Output = T; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> { + (&mut self.f)(cx) + } +} diff --git a/third_party/rust/tokio/src/future/ready.rs b/third_party/rust/tokio/src/future/ready.rs new file mode 100644 index 0000000000..d74f999e5d --- /dev/null +++ b/third_party/rust/tokio/src/future/ready.rs @@ -0,0 +1,27 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Future for the [`ready`](ready()) function. +/// +/// `pub` in order to use the future as an associated type in a sealed trait. +#[derive(Debug)] +// Used as an associated type in a "sealed" trait. +#[allow(unreachable_pub)] +pub struct Ready<T>(Option<T>); + +impl<T> Unpin for Ready<T> {} + +impl<T> Future for Ready<T> { + type Output = T; + + #[inline] + fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> { + Poll::Ready(self.0.take().unwrap()) + } +} + +/// Creates a future that is immediately ready with a success value. +pub(crate) fn ok<T, E>(t: T) -> Ready<Result<T, E>> { + Ready(Some(Ok(t))) +} diff --git a/third_party/rust/tokio/src/future/try_join.rs b/third_party/rust/tokio/src/future/try_join.rs new file mode 100644 index 0000000000..5bd80dc89a --- /dev/null +++ b/third_party/rust/tokio/src/future/try_join.rs @@ -0,0 +1,82 @@ +use crate::future::{maybe_done, MaybeDone}; + +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pub(crate) fn try_join3<T1, F1, T2, F2, T3, F3, E>( + future1: F1, + future2: F2, + future3: F3, +) -> TryJoin3<F1, F2, F3> +where + F1: Future<Output = Result<T1, E>>, + F2: Future<Output = Result<T2, E>>, + F3: Future<Output = Result<T3, E>>, +{ + TryJoin3 { + future1: maybe_done(future1), + future2: maybe_done(future2), + future3: maybe_done(future3), + } +} + +pin_project! { + pub(crate) struct TryJoin3<F1, F2, F3> + where + F1: Future, + F2: Future, + F3: Future, + { + #[pin] + future1: MaybeDone<F1>, + #[pin] + future2: MaybeDone<F2>, + #[pin] + future3: MaybeDone<F3>, + } +} + +impl<T1, F1, T2, F2, T3, F3, E> Future for TryJoin3<F1, F2, F3> +where + F1: Future<Output = Result<T1, E>>, + F2: Future<Output = Result<T2, E>>, + F3: Future<Output = Result<T3, E>>, +{ + type Output = Result<(T1, T2, T3), E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let mut all_done = true; + + let mut me = self.project(); + + if me.future1.as_mut().poll(cx).is_pending() { + all_done = false; + } else if me.future1.as_mut().output_mut().unwrap().is_err() { + return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap())); + } + + if me.future2.as_mut().poll(cx).is_pending() { + all_done = false; + } else if me.future2.as_mut().output_mut().unwrap().is_err() { + return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap())); + } + + if me.future3.as_mut().poll(cx).is_pending() { + all_done = false; + } else if me.future3.as_mut().output_mut().unwrap().is_err() { + return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap())); + } + + if all_done { + Poll::Ready(Ok(( + me.future1.take_output().unwrap().ok().unwrap(), + me.future2.take_output().unwrap().ok().unwrap(), + me.future3.take_output().unwrap().ok().unwrap(), + ))) + } else { + Poll::Pending + } + } +} diff --git a/third_party/rust/tokio/src/io/async_buf_read.rs b/third_party/rust/tokio/src/io/async_buf_read.rs new file mode 100644 index 0000000000..1ab73cd9b7 --- /dev/null +++ b/third_party/rust/tokio/src/io/async_buf_read.rs @@ -0,0 +1,115 @@ +use crate::io::AsyncRead; + +use std::io; +use std::ops::DerefMut; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Reads bytes asynchronously. +/// +/// This trait inherits from [`std::io::BufRead`] and indicates that an I/O object is +/// **non-blocking**. All non-blocking I/O objects must return an error when +/// bytes are unavailable instead of blocking the current thread. +/// +/// Utilities for working with `AsyncBufRead` values are provided by +/// [`AsyncBufReadExt`]. +/// +/// [`std::io::BufRead`]: std::io::BufRead +/// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt +pub trait AsyncBufRead: AsyncRead { + /// Attempts to return the contents of the internal buffer, filling it with more data + /// from the inner reader if it is empty. + /// + /// On success, returns `Poll::Ready(Ok(buf))`. + /// + /// If no data is available for reading, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// readable or is closed. + /// + /// This function is a lower-level call. It needs to be paired with the + /// [`consume`] method to function properly. When calling this + /// method, none of the contents will be "read" in the sense that later + /// calling [`poll_read`] may return the same contents. As such, [`consume`] must + /// be called with the number of bytes that are consumed from this buffer to + /// ensure that the bytes are never returned twice. + /// + /// An empty buffer returned indicates that the stream has reached EOF. + /// + /// [`poll_read`]: AsyncRead::poll_read + /// [`consume`]: AsyncBufRead::consume + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>>; + + /// Tells this buffer that `amt` bytes have been consumed from the buffer, + /// so they should no longer be returned in calls to [`poll_read`]. + /// + /// This function is a lower-level call. It needs to be paired with the + /// [`poll_fill_buf`] method to function properly. This function does + /// not perform any I/O, it simply informs this object that some amount of + /// its buffer, returned from [`poll_fill_buf`], has been consumed and should + /// no longer be returned. As such, this function may do odd things if + /// [`poll_fill_buf`] isn't called before calling it. + /// + /// The `amt` must be `<=` the number of bytes in the buffer returned by + /// [`poll_fill_buf`]. + /// + /// [`poll_read`]: AsyncRead::poll_read + /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf + fn consume(self: Pin<&mut Self>, amt: usize); +} + +macro_rules! deref_async_buf_read { + () => { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) + -> Poll<io::Result<&[u8]>> + { + Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) + } + + fn consume(mut self: Pin<&mut Self>, amt: usize) { + Pin::new(&mut **self).consume(amt) + } + } +} + +impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for Box<T> { + deref_async_buf_read!(); +} + +impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for &mut T { + deref_async_buf_read!(); +} + +impl<P> AsyncBufRead for Pin<P> +where + P: DerefMut + Unpin, + P::Target: AsyncBufRead, +{ + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + self.get_mut().as_mut().poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.get_mut().as_mut().consume(amt) + } +} + +impl AsyncBufRead for &[u8] { + fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + Poll::Ready(Ok(*self)) + } + + fn consume(mut self: Pin<&mut Self>, amt: usize) { + *self = &self[amt..]; + } +} + +impl<T: AsRef<[u8]> + Unpin> AsyncBufRead for io::Cursor<T> { + fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + Poll::Ready(io::BufRead::fill_buf(self.get_mut())) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + io::BufRead::consume(self.get_mut(), amt) + } +} diff --git a/third_party/rust/tokio/src/io/async_read.rs b/third_party/rust/tokio/src/io/async_read.rs new file mode 100644 index 0000000000..de08d65810 --- /dev/null +++ b/third_party/rust/tokio/src/io/async_read.rs @@ -0,0 +1,203 @@ +use bytes::BufMut; +use std::io; +use std::mem::MaybeUninit; +use std::ops::DerefMut; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Reads bytes from a source. +/// +/// This trait is analogous to the [`std::io::Read`] trait, but integrates with +/// the asynchronous task system. In particular, the [`poll_read`] method, +/// unlike [`Read::read`], will automatically queue the current task for wakeup +/// and return if data is not yet available, rather than blocking the calling +/// thread. +/// +/// Specifically, this means that the `poll_read` function will return one of +/// the following: +/// +/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately read +/// and placed into the output buffer, where `n` == 0 implies that EOF has +/// been reached. +/// +/// * `Poll::Pending` means that no data was read into the buffer +/// provided. The I/O object is not currently readable but may become readable +/// in the future. Most importantly, **the current future's task is scheduled +/// to get unparked when the object is readable**. This means that like +/// `Future::poll` you'll receive a notification when the I/O object is +/// readable again. +/// +/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the +/// underlying object. +/// +/// This trait importantly means that the `read` method only works in the +/// context of a future's task. The object may panic if used outside of a task. +/// +/// Utilities for working with `AsyncRead` values are provided by +/// [`AsyncReadExt`]. +/// +/// [`poll_read`]: AsyncRead::poll_read +/// [`std::io::Read`]: std::io::Read +/// [`Read::read`]: std::io::Read::read +/// [`AsyncReadExt`]: crate::io::AsyncReadExt +pub trait AsyncRead { + /// Prepares an uninitialized buffer to be safe to pass to `read`. Returns + /// `true` if the supplied buffer was zeroed out. + /// + /// While it would be highly unusual, implementations of [`io::Read`] are + /// able to read data from the buffer passed as an argument. Because of + /// this, the buffer passed to [`io::Read`] must be initialized memory. In + /// situations where large numbers of buffers are used, constantly having to + /// zero out buffers can be expensive. + /// + /// This function does any necessary work to prepare an uninitialized buffer + /// to be safe to pass to `read`. If `read` guarantees to never attempt to + /// read data out of the supplied buffer, then `prepare_uninitialized_buffer` + /// doesn't need to do any work. + /// + /// If this function returns `true`, then the memory has been zeroed out. + /// This allows implementations of `AsyncRead` which are composed of + /// multiple subimplementations to efficiently implement + /// `prepare_uninitialized_buffer`. + /// + /// This function isn't actually `unsafe` to call but `unsafe` to implement. + /// The implementer must ensure that either the whole `buf` has been zeroed + /// or `poll_read_buf()` overwrites the buffer without reading it and returns + /// correct value. + /// + /// This function is called from [`poll_read_buf`]. + /// + /// # Safety + /// + /// Implementations that return `false` must never read from data slices + /// that they did not write to. + /// + /// [`io::Read`]: std::io::Read + /// [`poll_read_buf`]: #method.poll_read_buf + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + for x in buf { + *x.as_mut_ptr() = 0; + } + + true + } + + /// Attempts to read from the `AsyncRead` into `buf`. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object becomes + /// readable or is closed. + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>>; + + /// Pulls some bytes from this source into the specified `BufMut`, returning + /// how many bytes were read. + /// + /// The `buf` provided will have bytes read into it and the internal cursor + /// will be advanced if any bytes were read. Note that this method typically + /// will not reallocate the buffer provided. + fn poll_read_buf<B: BufMut>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<io::Result<usize>> + where + Self: Sized, + { + if !buf.has_remaining_mut() { + return Poll::Ready(Ok(0)); + } + + unsafe { + let n = { + let b = buf.bytes_mut(); + + self.prepare_uninitialized_buffer(b); + + // Convert to `&mut [u8]` + let b = &mut *(b as *mut [MaybeUninit<u8>] as *mut [u8]); + + let n = ready!(self.poll_read(cx, b))?; + assert!(n <= b.len(), "Bad AsyncRead implementation, more bytes were reported as read than the buffer can hold"); + n + }; + + buf.advance_mut(n); + Poll::Ready(Ok(n)) + } + } +} + +macro_rules! deref_async_read { + () => { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + (**self).prepare_uninitialized_buffer(buf) + } + + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) + -> Poll<io::Result<usize>> + { + Pin::new(&mut **self).poll_read(cx, buf) + } + } +} + +impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for Box<T> { + deref_async_read!(); +} + +impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for &mut T { + deref_async_read!(); +} + +impl<P> AsyncRead for Pin<P> +where + P: DerefMut + Unpin, + P::Target: AsyncRead, +{ + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + (**self).prepare_uninitialized_buffer(buf) + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.get_mut().as_mut().poll_read(cx, buf) + } +} + +impl AsyncRead for &[u8] { + unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Read::read(self.get_mut(), buf)) + } +} + +impl<T: AsRef<[u8]> + Unpin> AsyncRead for io::Cursor<T> { + unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Read::read(self.get_mut(), buf)) + } +} diff --git a/third_party/rust/tokio/src/io/async_seek.rs b/third_party/rust/tokio/src/io/async_seek.rs new file mode 100644 index 0000000000..0be9c90d56 --- /dev/null +++ b/third_party/rust/tokio/src/io/async_seek.rs @@ -0,0 +1,104 @@ +use std::io::{self, SeekFrom}; +use std::ops::DerefMut; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Seek bytes asynchronously. +/// +/// This trait is analogous to the [`std::io::Seek`] trait, but integrates +/// with the asynchronous task system. In particular, the `start_seek` +/// method, unlike [`Seek::seek`], will not block the calling thread. +/// +/// Utilities for working with `AsyncSeek` values are provided by +/// [`AsyncSeekExt`]. +/// +/// [`std::io::Seek`]: std::io::Seek +/// [`Seek::seek`]: std::io::Seek::seek() +/// [`AsyncSeekExt`]: crate::io::AsyncSeekExt +pub trait AsyncSeek { + /// Attempts to seek to an offset, in bytes, in a stream. + /// + /// A seek beyond the end of a stream is allowed, but behavior is defined + /// by the implementation. + /// + /// If this function returns successfully, then the job has been submitted. + /// To find out when it completes, call `poll_complete`. + fn start_seek( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + position: SeekFrom, + ) -> Poll<io::Result<()>>; + + /// Waits for a seek operation to complete. + /// + /// If the seek operation completed successfully, + /// this method returns the new position from the start of the stream. + /// That position can be used later with [`SeekFrom::Start`]. + /// + /// # Errors + /// + /// Seeking to a negative offset is considered an error. + /// + /// # Panics + /// + /// Calling this method without calling `start_seek` first is an error. + fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>>; +} + +macro_rules! deref_async_seek { + () => { + fn start_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll<io::Result<()>> { + Pin::new(&mut **self).start_seek(cx, pos) + } + + fn poll_complete( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<io::Result<u64>> { + Pin::new(&mut **self).poll_complete(cx) + } + } +} + +impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for Box<T> { + deref_async_seek!(); +} + +impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for &mut T { + deref_async_seek!(); +} + +impl<P> AsyncSeek for Pin<P> +where + P: DerefMut + Unpin, + P::Target: AsyncSeek, +{ + fn start_seek( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll<io::Result<()>> { + self.get_mut().as_mut().start_seek(cx, pos) + } + + fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { + self.get_mut().as_mut().poll_complete(cx) + } +} + +impl<T: AsRef<[u8]> + Unpin> AsyncSeek for io::Cursor<T> { + fn start_seek( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll<io::Result<()>> { + Poll::Ready(io::Seek::seek(&mut *self, pos).map(drop)) + } + fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> { + Poll::Ready(Ok(self.get_mut().position())) + } +} diff --git a/third_party/rust/tokio/src/io/async_write.rs b/third_party/rust/tokio/src/io/async_write.rs new file mode 100644 index 0000000000..0bfed056ef --- /dev/null +++ b/third_party/rust/tokio/src/io/async_write.rs @@ -0,0 +1,291 @@ +use bytes::Buf; +use std::io; +use std::ops::DerefMut; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Writes bytes asynchronously. +/// +/// The trait inherits from [`std::io::Write`] and indicates that an I/O object is +/// **nonblocking**. All non-blocking I/O objects must return an error when +/// bytes cannot be written instead of blocking the current thread. +/// +/// Specifically, this means that the [`poll_write`] function will return one of +/// the following: +/// +/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately +/// written. +/// +/// * `Poll::Pending` means that no data was written from the buffer +/// provided. The I/O object is not currently writable but may become writable +/// in the future. Most importantly, **the current future's task is scheduled +/// to get unparked when the object is writable**. This means that like +/// `Future::poll` you'll receive a notification when the I/O object is +/// writable again. +/// +/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the +/// underlying object. +/// +/// This trait importantly means that the [`write`][stdwrite] method only works in +/// the context of a future's task. The object may panic if used outside of a task. +/// +/// Note that this trait also represents that the [`Write::flush`][stdflush] method +/// works very similarly to the `write` method, notably that `Ok(())` means that the +/// writer has successfully been flushed, a "would block" error means that the +/// current task is ready to receive a notification when flushing can make more +/// progress, and otherwise normal errors can happen as well. +/// +/// Utilities for working with `AsyncWrite` values are provided by +/// [`AsyncWriteExt`]. +/// +/// [`std::io::Write`]: std::io::Write +/// [`poll_write`]: AsyncWrite::poll_write() +/// [stdwrite]: std::io::Write::write() +/// [stdflush]: std::io::Write::flush() +/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt +pub trait AsyncWrite { + /// Attempt to write bytes from `buf` into the object. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. + /// + /// If the object is not ready for writing, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object becomes + /// readable or is closed. + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<Result<usize, io::Error>>; + + /// Attempts to flush the object, ensuring that any buffered data reach + /// their destination. + /// + /// On success, returns `Poll::Ready(Ok(()))`. + /// + /// If flushing cannot immediately complete, this method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker()`) to receive a notification when the object can make + /// progress towards flushing. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>>; + + /// Initiates or attempts to shut down this writer, returning success when + /// the I/O connection has completely shut down. + /// + /// This method is intended to be used for asynchronous shutdown of I/O + /// connections. For example this is suitable for implementing shutdown of a + /// TLS connection or calling `TcpStream::shutdown` on a proxied connection. + /// Protocols sometimes need to flush out final pieces of data or otherwise + /// perform a graceful shutdown handshake, reading/writing more data as + /// appropriate. This method is the hook for such protocols to implement the + /// graceful shutdown logic. + /// + /// This `shutdown` method is required by implementers of the + /// `AsyncWrite` trait. Wrappers typically just want to proxy this call + /// through to the wrapped type, and base types will typically implement + /// shutdown logic here or just return `Ok(().into())`. Note that if you're + /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that + /// transitively the entire stream has been shut down. After your wrapper's + /// shutdown logic has been executed you should shut down the underlying + /// stream. + /// + /// Invocation of a `shutdown` implies an invocation of `flush`. Once this + /// method returns `Ready` it implies that a flush successfully happened + /// before the shutdown happened. That is, callers don't need to call + /// `flush` before calling `shutdown`. They can rely that by calling + /// `shutdown` any pending buffered data will be written out. + /// + /// # Return value + /// + /// This function returns a `Poll<io::Result<()>>` classified as such: + /// + /// * `Poll::Ready(Ok(()))` - indicates that the connection was + /// successfully shut down and is now safe to deallocate/drop/close + /// resources associated with it. This method means that the current task + /// will no longer receive any notifications due to this method and the + /// I/O object itself is likely no longer usable. + /// + /// * `Poll::Pending` - indicates that shutdown is initiated but could + /// not complete just yet. This may mean that more I/O needs to happen to + /// continue this shutdown operation. The current task is scheduled to + /// receive a notification when it's otherwise ready to continue the + /// shutdown operation. When woken up this method should be called again. + /// + /// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown, + /// indicating that the shutdown operation did not complete successfully. + /// This typically means that the I/O object is no longer usable. + /// + /// # Errors + /// + /// This function can return normal I/O errors through `Err`, described + /// above. Additionally this method may also render the underlying + /// `Write::write` method no longer usable (e.g. will return errors in the + /// future). It's recommended that once `shutdown` is called the + /// `write` method is no longer called. + /// + /// # Panics + /// + /// This function will panic if not called within the context of a future's + /// task. + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>>; + + /// Writes a `Buf` into this value, returning how many bytes were written. + /// + /// Note that this method will advance the `buf` provided automatically by + /// the number of bytes written. + fn poll_write_buf<B: Buf>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<Result<usize, io::Error>> + where + Self: Sized, + { + if !buf.has_remaining() { + return Poll::Ready(Ok(0)); + } + + let n = ready!(self.poll_write(cx, buf.bytes()))?; + buf.advance(n); + Poll::Ready(Ok(n)) + } +} + +macro_rules! deref_async_write { + () => { + fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) + -> Poll<io::Result<usize>> + { + Pin::new(&mut **self).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Pin::new(&mut **self).poll_shutdown(cx) + } + } +} + +impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> { + deref_async_write!(); +} + +impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T { + deref_async_write!(); +} + +impl<P> AsyncWrite for Pin<P> +where + P: DerefMut + Unpin, + P::Target: AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.get_mut().as_mut().poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.get_mut().as_mut().poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.get_mut().as_mut().poll_shutdown(cx) + } +} + +impl AsyncWrite for Vec<u8> { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.get_mut().extend_from_slice(buf); + Poll::Ready(Ok(buf.len())) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(Ok(())) + } +} + +impl AsyncWrite for io::Cursor<&mut [u8]> { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Write::write(&mut *self, buf)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(io::Write::flush(&mut *self)) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.poll_flush(cx) + } +} + +impl AsyncWrite for io::Cursor<&mut Vec<u8>> { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Write::write(&mut *self, buf)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(io::Write::flush(&mut *self)) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.poll_flush(cx) + } +} + +impl AsyncWrite for io::Cursor<Vec<u8>> { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Write::write(&mut *self, buf)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(io::Write::flush(&mut *self)) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.poll_flush(cx) + } +} + +impl AsyncWrite for io::Cursor<Box<[u8]>> { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(io::Write::write(&mut *self, buf)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(io::Write::flush(&mut *self)) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.poll_flush(cx) + } +} diff --git a/third_party/rust/tokio/src/io/blocking.rs b/third_party/rust/tokio/src/io/blocking.rs new file mode 100644 index 0000000000..2491039a3f --- /dev/null +++ b/third_party/rust/tokio/src/io/blocking.rs @@ -0,0 +1,279 @@ +use crate::io::sys; +use crate::io::{AsyncRead, AsyncWrite}; + +use std::cmp; +use std::future::Future; +use std::io; +use std::io::prelude::*; +use std::pin::Pin; +use std::task::Poll::*; +use std::task::{Context, Poll}; + +use self::State::*; + +/// `T` should not implement _both_ Read and Write. +#[derive(Debug)] +pub(crate) struct Blocking<T> { + inner: Option<T>, + state: State<T>, + /// `true` if the lower IO layer needs flushing + need_flush: bool, +} + +#[derive(Debug)] +pub(crate) struct Buf { + buf: Vec<u8>, + pos: usize, +} + +pub(crate) const MAX_BUF: usize = 16 * 1024; + +#[derive(Debug)] +enum State<T> { + Idle(Option<Buf>), + Busy(sys::Blocking<(io::Result<usize>, Buf, T)>), +} + +cfg_io_std! { + impl<T> Blocking<T> { + pub(crate) fn new(inner: T) -> Blocking<T> { + Blocking { + inner: Some(inner), + state: State::Idle(Some(Buf::with_capacity(0))), + need_flush: false, + } + } + } +} + +impl<T> AsyncRead for Blocking<T> +where + T: Read + Unpin + Send + 'static, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + dst: &mut [u8], + ) -> Poll<io::Result<usize>> { + loop { + match self.state { + Idle(ref mut buf_cell) => { + let mut buf = buf_cell.take().unwrap(); + + if !buf.is_empty() { + let n = buf.copy_to(dst); + *buf_cell = Some(buf); + return Ready(Ok(n)); + } + + buf.ensure_capacity_for(dst); + let mut inner = self.inner.take().unwrap(); + + self.state = Busy(sys::run(move || { + let res = buf.read_from(&mut inner); + (res, buf, inner) + })); + } + Busy(ref mut rx) => { + let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?; + self.inner = Some(inner); + + match res { + Ok(_) => { + let n = buf.copy_to(dst); + self.state = Idle(Some(buf)); + return Ready(Ok(n)); + } + Err(e) => { + assert!(buf.is_empty()); + + self.state = Idle(Some(buf)); + return Ready(Err(e)); + } + } + } + } + } + } +} + +impl<T> AsyncWrite for Blocking<T> +where + T: Write + Unpin + Send + 'static, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + src: &[u8], + ) -> Poll<io::Result<usize>> { + loop { + match self.state { + Idle(ref mut buf_cell) => { + let mut buf = buf_cell.take().unwrap(); + + assert!(buf.is_empty()); + + let n = buf.copy_from(src); + let mut inner = self.inner.take().unwrap(); + + self.state = Busy(sys::run(move || { + let n = buf.len(); + let res = buf.write_to(&mut inner).map(|_| n); + + (res, buf, inner) + })); + self.need_flush = true; + + return Ready(Ok(n)); + } + Busy(ref mut rx) => { + let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; + self.state = Idle(Some(buf)); + self.inner = Some(inner); + + // If error, return + res?; + } + } + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + loop { + let need_flush = self.need_flush; + match self.state { + // The buffer is not used here + Idle(ref mut buf_cell) => { + if need_flush { + let buf = buf_cell.take().unwrap(); + let mut inner = self.inner.take().unwrap(); + + self.state = Busy(sys::run(move || { + let res = inner.flush().map(|_| 0); + (res, buf, inner) + })); + + self.need_flush = false; + } else { + return Ready(Ok(())); + } + } + Busy(ref mut rx) => { + let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?; + self.state = Idle(Some(buf)); + self.inner = Some(inner); + + // If error, return + res?; + } + } + } + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Poll::Ready(Ok(())) + } +} + +/// Repeates operations that are interrupted +macro_rules! uninterruptibly { + ($e:expr) => {{ + loop { + match $e { + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + res => break res, + } + } + }}; +} + +impl Buf { + pub(crate) fn with_capacity(n: usize) -> Buf { + Buf { + buf: Vec::with_capacity(n), + pos: 0, + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub(crate) fn len(&self) -> usize { + self.buf.len() - self.pos + } + + pub(crate) fn copy_to(&mut self, dst: &mut [u8]) -> usize { + let n = cmp::min(self.len(), dst.len()); + dst[..n].copy_from_slice(&self.bytes()[..n]); + self.pos += n; + + if self.pos == self.buf.len() { + self.buf.truncate(0); + self.pos = 0; + } + + n + } + + pub(crate) fn copy_from(&mut self, src: &[u8]) -> usize { + assert!(self.is_empty()); + + let n = cmp::min(src.len(), MAX_BUF); + + self.buf.extend_from_slice(&src[..n]); + n + } + + pub(crate) fn bytes(&self) -> &[u8] { + &self.buf[self.pos..] + } + + pub(crate) fn ensure_capacity_for(&mut self, bytes: &[u8]) { + assert!(self.is_empty()); + + let len = cmp::min(bytes.len(), MAX_BUF); + + if self.buf.len() < len { + self.buf.reserve(len - self.buf.len()); + } + + unsafe { + self.buf.set_len(len); + } + } + + pub(crate) fn read_from<T: Read>(&mut self, rd: &mut T) -> io::Result<usize> { + let res = uninterruptibly!(rd.read(&mut self.buf)); + + if let Ok(n) = res { + self.buf.truncate(n); + } else { + self.buf.clear(); + } + + assert_eq!(self.pos, 0); + + res + } + + pub(crate) fn write_to<T: Write>(&mut self, wr: &mut T) -> io::Result<()> { + assert_eq!(self.pos, 0); + + // `write_all` already ignores interrupts + let res = wr.write_all(&self.buf); + self.buf.clear(); + res + } +} + +cfg_fs! { + impl Buf { + pub(crate) fn discard_read(&mut self) -> i64 { + let ret = -(self.bytes().len() as i64); + self.pos = 0; + self.buf.truncate(0); + ret + } + } +} diff --git a/third_party/rust/tokio/src/io/driver/mod.rs b/third_party/rust/tokio/src/io/driver/mod.rs new file mode 100644 index 0000000000..d8535d9ab2 --- /dev/null +++ b/third_party/rust/tokio/src/io/driver/mod.rs @@ -0,0 +1,396 @@ +pub(crate) mod platform; + +mod scheduled_io; +pub(crate) use scheduled_io::ScheduledIo; // pub(crate) for tests + +use crate::loom::sync::atomic::AtomicUsize; +use crate::park::{Park, Unpark}; +use crate::runtime::context; +use crate::util::slab::{Address, Slab}; + +use mio::event::Evented; +use std::fmt; +use std::io; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::{Arc, Weak}; +use std::task::Waker; +use std::time::Duration; + +/// I/O driver, backed by Mio +pub(crate) struct Driver { + /// Reuse the `mio::Events` value across calls to poll. + events: mio::Events, + + /// State shared between the reactor and the handles. + inner: Arc<Inner>, + + _wakeup_registration: mio::Registration, +} + +/// A reference to an I/O driver +#[derive(Clone)] +pub(crate) struct Handle { + inner: Weak<Inner>, +} + +pub(super) struct Inner { + /// The underlying system event queue. + io: mio::Poll, + + /// Dispatch slabs for I/O and futures events + pub(super) io_dispatch: Slab<ScheduledIo>, + + /// The number of sources in `io_dispatch`. + n_sources: AtomicUsize, + + /// Used to wake up the reactor from a call to `turn` + wakeup: mio::SetReadiness, +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy)] +pub(super) enum Direction { + Read, + Write, +} + +const TOKEN_WAKEUP: mio::Token = mio::Token(Address::NULL); + +fn _assert_kinds() { + fn _assert<T: Send + Sync>() {} + + _assert::<Handle>(); +} + +// ===== impl Driver ===== + +impl Driver { + /// Creates a new event loop, returning any error that happened during the + /// creation. + pub(crate) fn new() -> io::Result<Driver> { + let io = mio::Poll::new()?; + let wakeup_pair = mio::Registration::new2(); + + io.register( + &wakeup_pair.0, + TOKEN_WAKEUP, + mio::Ready::readable(), + mio::PollOpt::level(), + )?; + + Ok(Driver { + events: mio::Events::with_capacity(1024), + _wakeup_registration: wakeup_pair.0, + inner: Arc::new(Inner { + io, + io_dispatch: Slab::new(), + n_sources: AtomicUsize::new(0), + wakeup: wakeup_pair.1, + }), + }) + } + + /// Returns a handle to this event loop which can be sent across threads + /// and can be used as a proxy to the event loop itself. + /// + /// Handles are cloneable and clones always refer to the same event loop. + /// This handle is typically passed into functions that create I/O objects + /// to bind them to this event loop. + pub(crate) fn handle(&self) -> Handle { + Handle { + inner: Arc::downgrade(&self.inner), + } + } + + fn turn(&mut self, max_wait: Option<Duration>) -> io::Result<()> { + // Block waiting for an event to happen, peeling out how many events + // happened. + match self.inner.io.poll(&mut self.events, max_wait) { + Ok(_) => {} + Err(e) => return Err(e), + } + + // Process all the events that came in, dispatching appropriately + + for event in self.events.iter() { + let token = event.token(); + + if token == TOKEN_WAKEUP { + self.inner + .wakeup + .set_readiness(mio::Ready::empty()) + .unwrap(); + } else { + self.dispatch(token, event.readiness()); + } + } + + Ok(()) + } + + fn dispatch(&self, token: mio::Token, ready: mio::Ready) { + let mut rd = None; + let mut wr = None; + + let address = Address::from_usize(token.0); + + let io = match self.inner.io_dispatch.get(address) { + Some(io) => io, + None => return, + }; + + if io + .set_readiness(address, |curr| curr | ready.as_usize()) + .is_err() + { + // token no longer valid! + return; + } + + if ready.is_writable() || platform::is_hup(ready) || platform::is_error(ready) { + wr = io.writer.take_waker(); + } + + if !(ready & (!mio::Ready::writable())).is_empty() { + rd = io.reader.take_waker(); + } + + if let Some(w) = rd { + w.wake(); + } + + if let Some(w) = wr { + w.wake(); + } + } +} + +impl Park for Driver { + type Unpark = Handle; + type Error = io::Error; + + fn unpark(&self) -> Self::Unpark { + self.handle() + } + + fn park(&mut self) -> io::Result<()> { + self.turn(None)?; + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> io::Result<()> { + self.turn(Some(duration))?; + Ok(()) + } +} + +impl fmt::Debug for Driver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Driver") + } +} + +// ===== impl Handle ===== + +impl Handle { + /// Returns a handle to the current reactor + /// + /// # Panics + /// + /// This function panics if there is no current reactor set. + pub(super) fn current() -> Self { + context::io_handle() + .expect("there is no reactor running, must be called from the context of Tokio runtime") + } + + /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise + /// makes the next call to `turn` return immediately. + /// + /// This method is intended to be used in situations where a notification + /// needs to otherwise be sent to the main reactor. If the reactor is + /// currently blocked inside of `turn` then it will wake up and soon return + /// after this method has been called. If the reactor is not currently + /// blocked in `turn`, then the next call to `turn` will not block and + /// return immediately. + fn wakeup(&self) { + if let Some(inner) = self.inner() { + inner.wakeup.set_readiness(mio::Ready::readable()).unwrap(); + } + } + + pub(super) fn inner(&self) -> Option<Arc<Inner>> { + self.inner.upgrade() + } +} + +impl Unpark for Handle { + fn unpark(&self) { + self.wakeup(); + } +} + +impl fmt::Debug for Handle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Handle") + } +} + +// ===== impl Inner ===== + +impl Inner { + /// Registers an I/O resource with the reactor. + /// + /// The registration token is returned. + pub(super) fn add_source(&self, source: &dyn Evented) -> io::Result<Address> { + let address = self.io_dispatch.alloc().ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "reactor at max registered I/O resources", + ) + })?; + + self.n_sources.fetch_add(1, SeqCst); + + self.io.register( + source, + mio::Token(address.to_usize()), + mio::Ready::all(), + mio::PollOpt::edge(), + )?; + + Ok(address) + } + + /// Deregisters an I/O resource from the reactor. + pub(super) fn deregister_source(&self, source: &dyn Evented) -> io::Result<()> { + self.io.deregister(source) + } + + pub(super) fn drop_source(&self, address: Address) { + self.io_dispatch.remove(address); + self.n_sources.fetch_sub(1, SeqCst); + } + + /// Registers interest in the I/O resource associated with `token`. + pub(super) fn register(&self, token: Address, dir: Direction, w: Waker) { + let sched = self + .io_dispatch + .get(token) + .unwrap_or_else(|| panic!("IO resource for token {:?} does not exist!", token)); + + let waker = match dir { + Direction::Read => &sched.reader, + Direction::Write => &sched.writer, + }; + + waker.register(w); + } +} + +impl Direction { + pub(super) fn mask(self) -> mio::Ready { + match self { + Direction::Read => { + // Everything except writable is signaled through read. + mio::Ready::all() - mio::Ready::writable() + } + Direction::Write => mio::Ready::writable() | platform::hup() | platform::error(), + } + } +} + +#[cfg(all(test, loom))] +mod tests { + use super::*; + use loom::thread; + + // No-op `Evented` impl just so we can have something to pass to `add_source`. + struct NotEvented; + + impl Evented for NotEvented { + fn register( + &self, + _: &mio::Poll, + _: mio::Token, + _: mio::Ready, + _: mio::PollOpt, + ) -> io::Result<()> { + Ok(()) + } + + fn reregister( + &self, + _: &mio::Poll, + _: mio::Token, + _: mio::Ready, + _: mio::PollOpt, + ) -> io::Result<()> { + Ok(()) + } + + fn deregister(&self, _: &mio::Poll) -> io::Result<()> { + Ok(()) + } + } + + #[test] + fn tokens_unique_when_dropped() { + loom::model(|| { + let reactor = Driver::new().unwrap(); + let inner = reactor.inner; + let inner2 = inner.clone(); + + let token_1 = inner.add_source(&NotEvented).unwrap(); + let thread = thread::spawn(move || { + inner2.drop_source(token_1); + }); + + let token_2 = inner.add_source(&NotEvented).unwrap(); + thread.join().unwrap(); + + assert!(token_1 != token_2); + }) + } + + #[test] + fn tokens_unique_when_dropped_on_full_page() { + loom::model(|| { + let reactor = Driver::new().unwrap(); + let inner = reactor.inner; + let inner2 = inner.clone(); + // add sources to fill up the first page so that the dropped index + // may be reused. + for _ in 0..31 { + inner.add_source(&NotEvented).unwrap(); + } + + let token_1 = inner.add_source(&NotEvented).unwrap(); + let thread = thread::spawn(move || { + inner2.drop_source(token_1); + }); + + let token_2 = inner.add_source(&NotEvented).unwrap(); + thread.join().unwrap(); + + assert!(token_1 != token_2); + }) + } + + #[test] + fn tokens_unique_concurrent_add() { + loom::model(|| { + let reactor = Driver::new().unwrap(); + let inner = reactor.inner; + let inner2 = inner.clone(); + + let thread = thread::spawn(move || { + let token_2 = inner2.add_source(&NotEvented).unwrap(); + token_2 + }); + + let token_1 = inner.add_source(&NotEvented).unwrap(); + let token_2 = thread.join().unwrap(); + + assert!(token_1 != token_2); + }) + } +} diff --git a/third_party/rust/tokio/src/io/driver/platform.rs b/third_party/rust/tokio/src/io/driver/platform.rs new file mode 100644 index 0000000000..6b27988ce6 --- /dev/null +++ b/third_party/rust/tokio/src/io/driver/platform.rs @@ -0,0 +1,44 @@ +pub(crate) use self::sys::*; + +#[cfg(unix)] +mod sys { + use mio::unix::UnixReady; + use mio::Ready; + + pub(crate) fn hup() -> Ready { + UnixReady::hup().into() + } + + pub(crate) fn is_hup(ready: Ready) -> bool { + UnixReady::from(ready).is_hup() + } + + pub(crate) fn error() -> Ready { + UnixReady::error().into() + } + + pub(crate) fn is_error(ready: Ready) -> bool { + UnixReady::from(ready).is_error() + } +} + +#[cfg(windows)] +mod sys { + use mio::Ready; + + pub(crate) fn hup() -> Ready { + Ready::empty() + } + + pub(crate) fn is_hup(_: Ready) -> bool { + false + } + + pub(crate) fn error() -> Ready { + Ready::empty() + } + + pub(crate) fn is_error(_: Ready) -> bool { + false + } +} diff --git a/third_party/rust/tokio/src/io/driver/scheduled_io.rs b/third_party/rust/tokio/src/io/driver/scheduled_io.rs new file mode 100644 index 0000000000..7f6446e3f5 --- /dev/null +++ b/third_party/rust/tokio/src/io/driver/scheduled_io.rs @@ -0,0 +1,141 @@ +use crate::loom::future::AtomicWaker; +use crate::loom::sync::atomic::AtomicUsize; +use crate::util::bit; +use crate::util::slab::{Address, Entry, Generation}; + +use std::sync::atomic::Ordering::{AcqRel, Acquire, SeqCst}; + +#[derive(Debug)] +pub(crate) struct ScheduledIo { + readiness: AtomicUsize, + pub(crate) reader: AtomicWaker, + pub(crate) writer: AtomicWaker, +} + +const PACK: bit::Pack = bit::Pack::most_significant(Generation::WIDTH); + +impl Entry for ScheduledIo { + fn generation(&self) -> Generation { + unpack_generation(self.readiness.load(SeqCst)) + } + + fn reset(&self, generation: Generation) -> bool { + let mut current = self.readiness.load(Acquire); + + loop { + if unpack_generation(current) != generation { + return false; + } + + let next = PACK.pack(generation.next().to_usize(), 0); + + match self + .readiness + .compare_exchange(current, next, AcqRel, Acquire) + { + Ok(_) => break, + Err(actual) => current = actual, + } + } + + drop(self.reader.take_waker()); + drop(self.writer.take_waker()); + + true + } +} + +impl Default for ScheduledIo { + fn default() -> ScheduledIo { + ScheduledIo { + readiness: AtomicUsize::new(0), + reader: AtomicWaker::new(), + writer: AtomicWaker::new(), + } + } +} + +impl ScheduledIo { + #[cfg(all(test, loom))] + /// Returns the current readiness value of this `ScheduledIo`, if the + /// provided `token` is still a valid access. + /// + /// # Returns + /// + /// If the given token's generation no longer matches the `ScheduledIo`'s + /// generation, then the corresponding IO resource has been removed and + /// replaced with a new resource. In that case, this method returns `None`. + /// Otherwise, this returns the current readiness. + pub(crate) fn get_readiness(&self, address: Address) -> Option<usize> { + let ready = self.readiness.load(Acquire); + + if unpack_generation(ready) != address.generation() { + return None; + } + + Some(ready & !PACK.mask()) + } + + /// Sets the readiness on this `ScheduledIo` by invoking the given closure on + /// the current value, returning the previous readiness value. + /// + /// # Arguments + /// - `token`: the token for this `ScheduledIo`. + /// - `f`: a closure returning a new readiness value given the previous + /// readiness. + /// + /// # Returns + /// + /// If the given token's generation no longer matches the `ScheduledIo`'s + /// generation, then the corresponding IO resource has been removed and + /// replaced with a new resource. In that case, this method returns `Err`. + /// Otherwise, this returns the previous readiness. + pub(crate) fn set_readiness( + &self, + address: Address, + f: impl Fn(usize) -> usize, + ) -> Result<usize, ()> { + let generation = address.generation(); + + let mut current = self.readiness.load(Acquire); + + loop { + // Check that the generation for this access is still the current + // one. + if unpack_generation(current) != generation { + return Err(()); + } + // Mask out the generation bits so that the modifying function + // doesn't see them. + let current_readiness = current & mio::Ready::all().as_usize(); + let new = f(current_readiness); + + debug_assert!( + new <= !PACK.max_value(), + "new readiness value would overwrite generation bits!" + ); + + match self.readiness.compare_exchange( + current, + PACK.pack(generation.to_usize(), new), + AcqRel, + Acquire, + ) { + Ok(_) => return Ok(current), + // we lost the race, retry! + Err(actual) => current = actual, + } + } + } +} + +impl Drop for ScheduledIo { + fn drop(&mut self) { + self.writer.wake(); + self.reader.wake(); + } +} + +fn unpack_generation(src: usize) -> Generation { + Generation::new(PACK.unpack(src)) +} diff --git a/third_party/rust/tokio/src/io/mod.rs b/third_party/rust/tokio/src/io/mod.rs new file mode 100644 index 0000000000..29d8bc5554 --- /dev/null +++ b/third_party/rust/tokio/src/io/mod.rs @@ -0,0 +1,229 @@ +#![cfg_attr(loom, allow(dead_code, unreachable_pub))] + +//! Traits, helpers, and type definitions for asynchronous I/O functionality. +//! +//! This module is the asynchronous version of `std::io`. Primarily, it +//! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous +//! versions of the [`Read`] and [`Write`] traits in the standard library. +//! +//! # AsyncRead and AsyncWrite +//! +//! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and +//! [`AsyncWrite`] provide the most general interface for reading and writing +//! input and output. Unlike the standard library's traits, however, they are +//! _asynchronous_ — meaning that reading from or writing to a `tokio::io` +//! type will _yield_ to the Tokio scheduler when IO is not ready, rather than +//! blocking. This allows other tasks to run while waiting on IO. +//! +//! Another difference is that [`AsyncRead`] and [`AsyncWrite`] only contain +//! core methods needed to provide asynchronous reading and writing +//! functionality. Instead, utility methods are defined in the [`AsyncReadExt`] +//! and [`AsyncWriteExt`] extension traits. These traits are automatically +//! implemented for all values that implement [`AsyncRead`] and [`AsyncWrite`] +//! respectively. +//! +//! End users will rarely interact directly with [`AsyncRead`] and +//! [`AsyncWrite`]. Instead, they will use the async functions defined in the +//! extension traits. Library authors are expected to implement [`AsyncRead`] +//! and [`AsyncWrite`] in order to provide types that behave like byte streams. +//! +//! Even with these differences, Tokio's [`AsyncRead`] and [`AsyncWrite`] traits +//! can be used in almost exactly the same manner as the standard library's +//! `Read` and `Write`. Most types in the standard library that implement `Read` +//! and `Write` have asynchronous equivalents in `tokio` that implement +//! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`]. +//! +//! For example, the standard library documentation introduces `Read` by +//! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We +//! can do the same with [`tokio::fs::File`][`File`]: +//! +//! ```no_run +//! use tokio::io::{self, AsyncReadExt}; +//! use tokio::fs::File; +//! +//! #[tokio::main] +//! async fn main() -> io::Result<()> { +//! let mut f = File::open("foo.txt").await?; +//! let mut buffer = [0; 10]; +//! +//! // read up to 10 bytes +//! let n = f.read(&mut buffer).await?; +//! +//! println!("The bytes: {:?}", &buffer[..n]); +//! Ok(()) +//! } +//! ``` +//! +//! [`File`]: crate::fs::File +//! [`TcpStream`]: crate::net::TcpStream +//! [`std::fs::File`]: std::fs::File +//! [std_example]: https://doc.rust-lang.org/std/io/index.html#read-and-write +//! +//! ## Buffered Readers and Writers +//! +//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be +//! making near-constant calls to the operating system. To help with this, +//! `std::io` comes with [support for _buffered_ readers and writers][stdbuf], +//! and therefore, `tokio::io` does as well. +//! +//! Tokio provides an async version of the [`std::io::BufRead`] trait, +//! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which +//! wrap readers and writers. These wrappers use a buffer, reducing the number +//! of calls and providing nicer methods for accessing exactly what you want. +//! +//! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add +//! extra methods to any async reader: +//! +//! ```no_run +//! use tokio::io::{self, BufReader, AsyncBufReadExt}; +//! use tokio::fs::File; +//! +//! #[tokio::main] +//! async fn main() -> io::Result<()> { +//! let f = File::open("foo.txt").await?; +//! let mut reader = BufReader::new(f); +//! let mut buffer = String::new(); +//! +//! // read a line into buffer +//! reader.read_line(&mut buffer).await?; +//! +//! println!("{}", buffer); +//! Ok(()) +//! } +//! ``` +//! +//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call +//! to [`write`](crate::io::AsyncWriteExt::write): +//! +//! ```no_run +//! use tokio::io::{self, BufWriter, AsyncWriteExt}; +//! use tokio::fs::File; +//! +//! #[tokio::main] +//! async fn main() -> io::Result<()> { +//! let f = File::create("foo.txt").await?; +//! { +//! let mut writer = BufWriter::new(f); +//! +//! // write a byte to the buffer +//! writer.write(&[42u8]).await?; +//! +//! } // the buffer is flushed once writer goes out of scope +//! +//! Ok(()) +//! } +//! ``` +//! +//! [stdbuf]: https://doc.rust-lang.org/std/io/index.html#bufreader-and-bufwriter +//! [`std::io::BufRead`]: std::io::BufRead +//! [`AsyncBufRead`]: crate::io::AsyncBufRead +//! [`BufReader`]: crate::io::BufReader +//! [`BufWriter`]: crate::io::BufWriter +//! +//! ## Implementing AsyncRead and AsyncWrite +//! +//! Because they are traits, we can implement `AsyncRead` and `AsyncWrite` for +//! our own types, as well. Note that these traits must only be implemented for +//! non-blocking I/O types that integrate with the futures type system. In +//! other words, these types must never block the thread, and instead the +//! current task is notified when the I/O resource is ready. +//! +//! # Standard input and output +//! +//! Tokio provides asynchronous APIs to standard [input], [output], and [error]. +//! These APIs are very similar to the ones provided by `std`, but they also +//! implement [`AsyncRead`] and [`AsyncWrite`]. +//! +//! Note that the standard input / output APIs **must** be used from the +//! context of the Tokio runtime, as they require Tokio-specific features to +//! function. Calling these functions outside of a Tokio runtime will panic. +//! +//! [input]: fn@stdin +//! [output]: fn@stdout +//! [error]: fn@stderr +//! +//! # `std` re-exports +//! +//! Additionally, [`Error`], [`ErrorKind`], and [`Result`] are re-exported +//! from `std::io` for ease of use. +//! +//! [`AsyncRead`]: trait@AsyncRead +//! [`AsyncWrite`]: trait@AsyncWrite +//! [`Error`]: struct@Error +//! [`ErrorKind`]: enum@ErrorKind +//! [`Result`]: type@Result +//! [`Read`]: std::io::Read +//! [`Write`]: std::io::Write +cfg_io_blocking! { + pub(crate) mod blocking; +} + +mod async_buf_read; +pub use self::async_buf_read::AsyncBufRead; + +mod async_read; +pub use self::async_read::AsyncRead; + +mod async_seek; +pub use self::async_seek::AsyncSeek; + +mod async_write; +pub use self::async_write::AsyncWrite; + +cfg_io_driver! { + pub(crate) mod driver; + + mod poll_evented; + pub use poll_evented::PollEvented; + + mod registration; + pub use registration::Registration; +} + +cfg_io_std! { + mod stderr; + pub use stderr::{stderr, Stderr}; + + mod stdin; + pub use stdin::{stdin, Stdin}; + + mod stdout; + pub use stdout::{stdout, Stdout}; +} + +cfg_io_util! { + mod split; + pub use split::{split, ReadHalf, WriteHalf}; + + pub(crate) mod seek; + pub use self::seek::Seek; + + pub(crate) mod util; + pub use util::{ + copy, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt, + BufReader, BufStream, BufWriter, Copy, Empty, Lines, Repeat, Sink, Split, Take, + }; + + cfg_stream! { + pub use util::{stream_reader, StreamReader}; + } + + // Re-export io::Error so that users don't have to deal with conflicts when + // `use`ing `tokio::io` and `std::io`. + pub use std::io::{Error, ErrorKind, Result}; +} + +cfg_not_io_util! { + cfg_process! { + pub(crate) mod util; + } +} + +cfg_io_blocking! { + /// Types in this module can be mocked out in tests. + mod sys { + // TODO: don't rename + pub(crate) use crate::runtime::spawn_blocking as run; + pub(crate) use crate::task::JoinHandle as Blocking; + } +} diff --git a/third_party/rust/tokio/src/io/poll_evented.rs b/third_party/rust/tokio/src/io/poll_evented.rs new file mode 100644 index 0000000000..298e6e58cf --- /dev/null +++ b/third_party/rust/tokio/src/io/poll_evented.rs @@ -0,0 +1,423 @@ +use crate::io::driver::platform; +use crate::io::{AsyncRead, AsyncWrite, Registration}; + +use mio::event::Evented; +use std::fmt; +use std::io::{self, Read, Write}; +use std::marker::Unpin; +use std::pin::Pin; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::Relaxed; +use std::task::{Context, Poll}; + +cfg_io_driver! { + /// Associates an I/O resource that implements the [`std::io::Read`] and/or + /// [`std::io::Write`] traits with the reactor that drives it. + /// + /// `PollEvented` uses [`Registration`] internally to take a type that + /// implements [`mio::Evented`] as well as [`std::io::Read`] and or + /// [`std::io::Write`] and associate it with a reactor that will drive it. + /// + /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be + /// used from within the future's execution model. As such, the + /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] + /// implementations using the underlying I/O resource as well as readiness + /// events provided by the reactor. + /// + /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is + /// `Sync`), the caller must ensure that there are at most two tasks that + /// use a `PollEvented` instance concurrently. One for reading and one for + /// writing. While violating this requirement is "safe" from a Rust memory + /// model point of view, it will result in unexpected behavior in the form + /// of lost notifications and tasks hanging. + /// + /// ## Readiness events + /// + /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, + /// this type also supports access to the underlying readiness event stream. + /// While similar in function to what [`Registration`] provides, the + /// semantics are a bit different. + /// + /// Two functions are provided to access the readiness events: + /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the + /// current readiness state of the `PollEvented` instance. If + /// [`poll_read_ready`] indicates read readiness, immediately calling + /// [`poll_read_ready`] again will also indicate read readiness. + /// + /// When the operation is attempted and is unable to succeed due to the I/O + /// resource not being ready, the caller must call [`clear_read_ready`] or + /// [`clear_write_ready`]. This clears the readiness state until a new + /// readiness event is received. + /// + /// This allows the caller to implement additional functions. For example, + /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and + /// [`clear_read_ready`]. + /// + /// ```rust + /// use tokio::io::PollEvented; + /// + /// use futures::ready; + /// use mio::Ready; + /// use mio::net::{TcpStream, TcpListener}; + /// use std::io; + /// use std::task::{Context, Poll}; + /// + /// struct MyListener { + /// poll_evented: PollEvented<TcpListener>, + /// } + /// + /// impl MyListener { + /// pub fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<Result<TcpStream, io::Error>> { + /// let ready = Ready::readable(); + /// + /// ready!(self.poll_evented.poll_read_ready(cx, ready))?; + /// + /// match self.poll_evented.get_ref().accept() { + /// Ok((socket, _)) => Poll::Ready(Ok(socket)), + /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + /// self.poll_evented.clear_read_ready(cx, ready)?; + /// Poll::Pending + /// } + /// Err(e) => Poll::Ready(Err(e)), + /// } + /// } + /// } + /// ``` + /// + /// ## Platform-specific events + /// + /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. + /// These events are included as part of the read readiness event stream. The + /// write readiness event stream is only for `Ready::writable()` events. + /// + /// [`std::io::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html + /// [`std::io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html + /// [`AsyncRead`]: ../io/trait.AsyncRead.html + /// [`AsyncWrite`]: ../io/trait.AsyncWrite.html + /// [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html + /// [`Registration`]: struct@Registration + /// [`TcpListener`]: ../net/struct.TcpListener.html + /// [`clear_read_ready`]: #method.clear_read_ready + /// [`clear_write_ready`]: #method.clear_write_ready + /// [`poll_read_ready`]: #method.poll_read_ready + /// [`poll_write_ready`]: #method.poll_write_ready + pub struct PollEvented<E: Evented> { + io: Option<E>, + inner: Inner, + } +} + +struct Inner { + registration: Registration, + + /// Currently visible read readiness + read_readiness: AtomicUsize, + + /// Currently visible write readiness + write_readiness: AtomicUsize, +} + +// ===== impl PollEvented ===== + +macro_rules! poll_ready { + ($me:expr, $mask:expr, $cache:ident, $take:ident, $poll:expr) => {{ + // Load cached & encoded readiness. + let mut cached = $me.inner.$cache.load(Relaxed); + let mask = $mask | platform::hup() | platform::error(); + + // See if the current readiness matches any bits. + let mut ret = mio::Ready::from_usize(cached) & $mask; + + if ret.is_empty() { + // Readiness does not match, consume the registration's readiness + // stream. This happens in a loop to ensure that the stream gets + // drained. + loop { + let ready = match $poll? { + Poll::Ready(v) => v, + Poll::Pending => return Poll::Pending, + }; + cached |= ready.as_usize(); + + // Update the cache store + $me.inner.$cache.store(cached, Relaxed); + + ret |= ready & mask; + + if !ret.is_empty() { + return Poll::Ready(Ok(ret)); + } + } + } else { + // Check what's new with the registration stream. This will not + // request to be notified + if let Some(ready) = $me.inner.registration.$take()? { + cached |= ready.as_usize(); + $me.inner.$cache.store(cached, Relaxed); + } + + Poll::Ready(Ok(mio::Ready::from_usize(cached))) + } + }}; +} + +impl<E> PollEvented<E> +where + E: Evented, +{ + /// Creates a new `PollEvented` associated with the default reactor. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn new(io: E) -> io::Result<Self> { + let registration = Registration::new(&io)?; + Ok(Self { + io: Some(io), + inner: Inner { + registration, + read_readiness: AtomicUsize::new(0), + write_readiness: AtomicUsize::new(0), + }, + }) + } + + /// Returns a shared reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_ref(&self) -> &E { + self.io.as_ref().unwrap() + } + + /// Returns a mutable reference to the underlying I/O object this readiness + /// stream is wrapping. + pub fn get_mut(&mut self) -> &mut E { + self.io.as_mut().unwrap() + } + + /// Consumes self, returning the inner I/O object + /// + /// This function will deregister the I/O resource from the reactor before + /// returning. If the deregistration operation fails, an error is returned. + /// + /// Note that deregistering does not guarantee that the I/O resource can be + /// registered with a different reactor. Some I/O resource types can only be + /// associated with a single reactor instance for their lifetime. + pub fn into_inner(mut self) -> io::Result<E> { + let io = self.io.take().unwrap(); + self.inner.registration.deregister(&io)?; + Ok(io) + } + + /// Checks the I/O resource's read readiness state. + /// + /// The mask argument allows specifying what readiness to notify on. This + /// can be any value, including platform specific readiness, **except** + /// `writable`. HUP is always implicitly included on platforms that support + /// it. + /// + /// If the resource is not ready for a read then `Poll::Pending` is returned + /// and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a read-ready state until readiness is + /// cleared by calling [`clear_read_ready`]. + /// + /// [`clear_read_ready`]: #method.clear_read_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable. + /// * called from outside of a task context. + pub fn poll_read_ready( + &self, + cx: &mut Context<'_>, + mask: mio::Ready, + ) -> Poll<io::Result<mio::Ready>> { + assert!(!mask.is_writable(), "cannot poll for write readiness"); + poll_ready!( + self, + mask, + read_readiness, + take_read_ready, + self.inner.registration.poll_read_ready(cx) + ) + } + + /// Clears the I/O resource's read readiness state and registers the current + /// task to be notified once a read readiness event is received. + /// + /// After calling this function, `poll_read_ready` will return + /// `Poll::Pending` until a new read readiness event has been received. + /// + /// The `mask` argument specifies the readiness bits to clear. This may not + /// include `writable` or `hup`. + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` includes writable or HUP + /// * called from outside of a task context. + pub fn clear_read_ready(&self, cx: &mut Context<'_>, ready: mio::Ready) -> io::Result<()> { + // Cannot clear write readiness + assert!(!ready.is_writable(), "cannot clear write readiness"); + assert!(!platform::is_hup(ready), "cannot clear HUP readiness"); + + self.inner + .read_readiness + .fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_read_ready(cx, ready)?.is_ready() { + // Notify the current task + cx.waker().wake_by_ref(); + } + + Ok(()) + } + + /// Checks the I/O resource's write readiness state. + /// + /// This always checks for writable readiness and also checks for HUP + /// readiness on platforms that support it. + /// + /// If the resource is not ready for a write then `Poll::Pending` is + /// returned and the current task is notified once a new event is received. + /// + /// The I/O resource will remain in a write-ready state until readiness is + /// cleared by calling [`clear_write_ready`]. + /// + /// [`clear_write_ready`]: #method.clear_write_ready + /// + /// # Panics + /// + /// This function panics if: + /// + /// * `ready` contains bits besides `writable` and `hup`. + /// * called from outside of a task context. + pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> { + poll_ready!( + self, + mio::Ready::writable(), + write_readiness, + take_write_ready, + self.inner.registration.poll_write_ready(cx) + ) + } + + /// Resets the I/O resource's write readiness state and registers the current + /// task to be notified once a write readiness event is received. + /// + /// This only clears writable readiness. HUP (on platforms that support HUP) + /// cannot be cleared as it is a final state. + /// + /// After calling this function, `poll_write_ready(Ready::writable())` will + /// return `NotReady` until a new write readiness event has been received. + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn clear_write_ready(&self, cx: &mut Context<'_>) -> io::Result<()> { + let ready = mio::Ready::writable(); + + self.inner + .write_readiness + .fetch_and(!ready.as_usize(), Relaxed); + + if self.poll_write_ready(cx)?.is_ready() { + // Notify the current task + cx.waker().wake_by_ref(); + } + + Ok(()) + } +} + +// ===== Read / Write impls ===== + +impl<E> AsyncRead for PollEvented<E> +where + E: Evented + Read + Unpin, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + ready!(self.poll_read_ready(cx, mio::Ready::readable()))?; + + let r = (*self).get_mut().read(buf); + + if is_wouldblock(&r) { + self.clear_read_ready(cx, mio::Ready::readable())?; + return Poll::Pending; + } + + Poll::Ready(r) + } +} + +impl<E> AsyncWrite for PollEvented<E> +where + E: Evented + Write + Unpin, +{ + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + ready!(self.poll_write_ready(cx))?; + + let r = (*self).get_mut().write(buf); + + if is_wouldblock(&r) { + self.clear_write_ready(cx)?; + return Poll::Pending; + } + + Poll::Ready(r) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + ready!(self.poll_write_ready(cx))?; + + let r = (*self).get_mut().flush(); + + if is_wouldblock(&r) { + self.clear_write_ready(cx)?; + return Poll::Pending; + } + + Poll::Ready(r) + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(Ok(())) + } +} + +fn is_wouldblock<T>(r: &io::Result<T>) -> bool { + match *r { + Ok(_) => false, + Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, + } +} + +impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PollEvented").field("io", &self.io).finish() + } +} + +impl<E: Evented> Drop for PollEvented<E> { + fn drop(&mut self) { + if let Some(io) = self.io.take() { + // Ignore errors + let _ = self.inner.registration.deregister(&io); + } + } +} diff --git a/third_party/rust/tokio/src/io/registration.rs b/third_party/rust/tokio/src/io/registration.rs new file mode 100644 index 0000000000..4df11999f5 --- /dev/null +++ b/third_party/rust/tokio/src/io/registration.rs @@ -0,0 +1,299 @@ +use crate::io::driver::{platform, Direction, Handle}; +use crate::util::slab::Address; + +use mio::{self, Evented}; +use std::io; +use std::task::{Context, Poll}; + +cfg_io_driver! { + /// Associates an I/O resource with the reactor instance that drives it. + /// + /// A registration represents an I/O resource registered with a Reactor such + /// that it will receive task notifications on readiness. This is the lowest + /// level API for integrating with a reactor. + /// + /// The association between an I/O resource is made by calling [`new`]. Once + /// the association is established, it remains established until the + /// registration instance is dropped. + /// + /// A registration instance represents two separate readiness streams. One + /// for the read readiness and one for write readiness. These streams are + /// independent and can be consumed from separate tasks. + /// + /// **Note**: while `Registration` is `Sync`, the caller must ensure that + /// there are at most two tasks that use a registration instance + /// concurrently. One task for [`poll_read_ready`] and one task for + /// [`poll_write_ready`]. While violating this requirement is "safe" from a + /// Rust memory safety point of view, it will result in unexpected behavior + /// in the form of lost notifications and tasks hanging. + /// + /// ## Platform-specific events + /// + /// `Registration` also allows receiving platform-specific `mio::Ready` + /// events. These events are included as part of the read readiness event + /// stream. The write readiness event stream is only for `Ready::writable()` + /// events. + /// + /// [`new`]: #method.new + /// [`poll_read_ready`]: #method.poll_read_ready`] + /// [`poll_write_ready`]: #method.poll_write_ready`] + #[derive(Debug)] + pub struct Registration { + handle: Handle, + address: Address, + } +} + +// ===== impl Registration ===== + +impl Registration { + /// Registers the I/O resource with the default reactor. + /// + /// # Return + /// + /// - `Ok` if the registration happened successfully + /// - `Err` if an error was encountered during registration + /// + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn new<T>(io: &T) -> io::Result<Registration> + where + T: Evented, + { + let handle = Handle::current(); + let address = if let Some(inner) = handle.inner() { + inner.add_source(io)? + } else { + return Err(io::Error::new( + io::ErrorKind::Other, + "failed to find event loop", + )); + }; + + Ok(Registration { handle, address }) + } + + /// Deregisters the I/O resource from the reactor it is associated with. + /// + /// This function must be called before the I/O resource associated with the + /// registration is dropped. + /// + /// Note that deregistering does not guarantee that the I/O resource can be + /// registered with a different reactor. Some I/O resource types can only be + /// associated with a single reactor instance for their lifetime. + /// + /// # Return + /// + /// If the deregistration was successful, `Ok` is returned. Any calls to + /// `Reactor::turn` that happen after a successful call to `deregister` will + /// no longer result in notifications getting sent for this registration. + /// + /// `Err` is returned if an error is encountered. + pub fn deregister<T>(&mut self, io: &T) -> io::Result<()> + where + T: Evented, + { + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), + }; + inner.deregister_source(io) + } + + /// Polls for events on the I/O resource's read readiness stream. + /// + /// If the I/O resource receives a new read readiness event since the last + /// call to `poll_read_ready`, it is returned. If it has not, the current + /// task is notified once a new event is received. + /// + /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, + /// the function will always return `Ready(HUP)`. This should be treated as + /// the end of the readiness stream. + /// + /// Ensure that [`register`] has been called first. + /// + /// # Return value + /// + /// There are several possible return values: + /// + /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received + /// a new readiness event. The readiness value is included. + /// + /// * `Poll::Pending` means that no new readiness events have been received + /// since the last call to `poll_read_ready`. + /// + /// * `Poll::Ready(Err(err))` means that the registration has encountered an + /// error. This error either represents a permanent internal error **or** + /// the fact that [`register`] was not called first. + /// + /// [`register`]: #method.register + /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + let v = self.poll_ready(Direction::Read, Some(cx))?; + match v { + Some(v) => Poll::Ready(Ok(v)), + None => Poll::Pending, + } + } + + /// Consume any pending read readiness event. + /// + /// This function is identical to [`poll_read_ready`] **except** that it + /// will not notify the current task when a new event is received. As such, + /// it is safe to call this function from outside of a task context. + /// + /// [`poll_read_ready`]: #method.poll_read_ready + pub fn take_read_ready(&self) -> io::Result<Option<mio::Ready>> { + self.poll_ready(Direction::Read, None) + } + + /// Polls for events on the I/O resource's write readiness stream. + /// + /// If the I/O resource receives a new write readiness event since the last + /// call to `poll_write_ready`, it is returned. If it has not, the current + /// task is notified once a new event is received. + /// + /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, + /// the function will always return `Ready(HUP)`. This should be treated as + /// the end of the readiness stream. + /// + /// Ensure that [`register`] has been called first. + /// + /// # Return value + /// + /// There are several possible return values: + /// + /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received + /// a new readiness event. The readiness value is included. + /// + /// * `Poll::Pending` means that no new readiness events have been received + /// since the last call to `poll_write_ready`. + /// + /// * `Poll::Ready(Err(err))` means that the registration has encountered an + /// error. This error either represents a permanent internal error **or** + /// the fact that [`register`] was not called first. + /// + /// [`register`]: #method.register + /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered + /// + /// # Panics + /// + /// This function will panic if called from outside of a task context. + pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + let v = self.poll_ready(Direction::Write, Some(cx))?; + match v { + Some(v) => Poll::Ready(Ok(v)), + None => Poll::Pending, + } + } + + /// Consumes any pending write readiness event. + /// + /// This function is identical to [`poll_write_ready`] **except** that it + /// will not notify the current task when a new event is received. As such, + /// it is safe to call this function from outside of a task context. + /// + /// [`poll_write_ready`]: #method.poll_write_ready + pub fn take_write_ready(&self) -> io::Result<Option<mio::Ready>> { + self.poll_ready(Direction::Write, None) + } + + /// Polls for events on the I/O resource's `direction` readiness stream. + /// + /// If called with a task context, notify the task when a new event is + /// received. + fn poll_ready( + &self, + direction: Direction, + cx: Option<&mut Context<'_>>, + ) -> io::Result<Option<mio::Ready>> { + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), + }; + + // If the task should be notified about new events, ensure that it has + // been registered + if let Some(ref cx) = cx { + inner.register(self.address, direction, cx.waker().clone()) + } + + let mask = direction.mask(); + let mask_no_hup = (mask - platform::hup() - platform::error()).as_usize(); + + let sched = inner.io_dispatch.get(self.address).unwrap(); + + // This consumes the current readiness state **except** for HUP and + // error. HUP and error are excluded because a) they are final states + // and never transitition out and b) both the read AND the write + // directions need to be able to obvserve these states. + // + // # Platform-specific behavior + // + // HUP and error readiness are platform-specific. On epoll platforms, + // HUP has specific conditions that must be met by both peers of a + // connection in order to be triggered. + // + // On epoll platforms, `EPOLLERR` is signaled through + // `UnixReady::error()` and is important to be observable by both read + // AND write. A specific case that `EPOLLERR` occurs is when the read + // end of a pipe is closed. When this occurs, a peer blocked by + // writing to the pipe should be notified. + let curr_ready = sched + .set_readiness(self.address, |curr| curr & (!mask_no_hup)) + .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address)); + + let mut ready = mask & mio::Ready::from_usize(curr_ready); + + if ready.is_empty() { + if let Some(cx) = cx { + // Update the task info + match direction { + Direction::Read => sched.reader.register_by_ref(cx.waker()), + Direction::Write => sched.writer.register_by_ref(cx.waker()), + } + + // Try again + let curr_ready = sched + .set_readiness(self.address, |curr| curr & (!mask_no_hup)) + .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address)); + ready = mask & mio::Ready::from_usize(curr_ready); + } + } + + if ready.is_empty() { + Ok(None) + } else { + Ok(Some(ready)) + } + } +} + +unsafe impl Send for Registration {} +unsafe impl Sync for Registration {} + +impl Drop for Registration { + fn drop(&mut self) { + let inner = match self.handle.inner() { + Some(inner) => inner, + None => return, + }; + inner.drop_source(self.address); + } +} diff --git a/third_party/rust/tokio/src/io/seek.rs b/third_party/rust/tokio/src/io/seek.rs new file mode 100644 index 0000000000..e3b5bf6b6f --- /dev/null +++ b/third_party/rust/tokio/src/io/seek.rs @@ -0,0 +1,56 @@ +use crate::io::AsyncSeek; +use std::future::Future; +use std::io::{self, SeekFrom}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Seek<'a, S: ?Sized> { + seek: &'a mut S, + pos: Option<SeekFrom>, +} + +pub(crate) fn seek<S>(seek: &mut S, pos: SeekFrom) -> Seek<'_, S> +where + S: AsyncSeek + ?Sized + Unpin, +{ + Seek { + seek, + pos: Some(pos), + } +} + +impl<S> Future for Seek<'_, S> +where + S: AsyncSeek + ?Sized + Unpin, +{ + type Output = io::Result<u64>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = &mut *self; + match me.pos { + Some(pos) => match Pin::new(&mut me.seek).start_seek(cx, pos) { + Poll::Ready(Ok(())) => { + me.pos = None; + Pin::new(&mut me.seek).poll_complete(cx) + } + Poll::Ready(Err(e)) => Poll::Ready(Err(e)), + Poll::Pending => Poll::Pending, + }, + None => Pin::new(&mut me.seek).poll_complete(cx), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<Seek<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/split.rs b/third_party/rust/tokio/src/io/split.rs new file mode 100644 index 0000000000..134b937a5f --- /dev/null +++ b/third_party/rust/tokio/src/io/split.rs @@ -0,0 +1,195 @@ +//! Split a single value implementing `AsyncRead + AsyncWrite` into separate +//! `AsyncRead` and `AsyncWrite` handles. +//! +//! To restore this read/write object from its `split::ReadHalf` and +//! `split::WriteHalf` use `unsplit`. + +use crate::io::{AsyncRead, AsyncWrite}; + +use bytes::{Buf, BufMut}; +use std::cell::UnsafeCell; +use std::fmt; +use std::io; +use std::pin::Pin; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::{Acquire, Release}; +use std::sync::Arc; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// The readable half of a value returned from [`split`](split()). + pub struct ReadHalf<T> { + inner: Arc<Inner<T>>, + } + + /// The writable half of a value returned from [`split`](split()). + pub struct WriteHalf<T> { + inner: Arc<Inner<T>>, + } + + /// Splits a single value implementing `AsyncRead + AsyncWrite` into separate + /// `AsyncRead` and `AsyncWrite` handles. + /// + /// To restore this read/write object from its `ReadHalf` and + /// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()). + pub fn split<T>(stream: T) -> (ReadHalf<T>, WriteHalf<T>) + where + T: AsyncRead + AsyncWrite, + { + let inner = Arc::new(Inner { + locked: AtomicBool::new(false), + stream: UnsafeCell::new(stream), + }); + + let rd = ReadHalf { + inner: inner.clone(), + }; + + let wr = WriteHalf { inner }; + + (rd, wr) + } +} + +struct Inner<T> { + locked: AtomicBool, + stream: UnsafeCell<T>, +} + +struct Guard<'a, T> { + inner: &'a Inner<T>, +} + +impl<T> ReadHalf<T> { + /// Checks if this `ReadHalf` and some `WriteHalf` were split from the same + /// stream. + pub fn is_pair_of(&self, other: &WriteHalf<T>) -> bool { + other.is_pair_of(&self) + } + + /// Reunites with a previously split `WriteHalf`. + /// + /// # Panics + /// + /// If this `ReadHalf` and the given `WriteHalf` do not originate from the + /// same `split` operation this method will panic. + /// This can be checked ahead of time by comparing the stream ID + /// of the two halves. + pub fn unsplit(self, wr: WriteHalf<T>) -> T { + if self.is_pair_of(&wr) { + drop(wr); + + let inner = Arc::try_unwrap(self.inner) + .ok() + .expect("`Arc::try_unwrap` failed"); + + inner.stream.into_inner() + } else { + panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.") + } + } +} + +impl<T> WriteHalf<T> { + /// Check if this `WriteHalf` and some `ReadHalf` were split from the same + /// stream. + pub fn is_pair_of(&self, other: &ReadHalf<T>) -> bool { + Arc::ptr_eq(&self.inner, &other.inner) + } +} + +impl<T: AsyncRead> AsyncRead for ReadHalf<T> { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_read(cx, buf) + } + + fn poll_read_buf<B: BufMut>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<io::Result<usize>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_read_buf(cx, buf) + } +} + +impl<T: AsyncWrite> AsyncWrite for WriteHalf<T> { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<Result<usize, io::Error>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_shutdown(cx) + } + + fn poll_write_buf<B: Buf>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<Result<usize, io::Error>> { + let mut inner = ready!(self.inner.poll_lock(cx)); + inner.stream_pin().poll_write_buf(cx, buf) + } +} + +impl<T> Inner<T> { + fn poll_lock(&self, cx: &mut Context<'_>) -> Poll<Guard<'_, T>> { + if !self.locked.compare_and_swap(false, true, Acquire) { + Poll::Ready(Guard { inner: self }) + } else { + // Spin... but investigate a better strategy + + std::thread::yield_now(); + cx.waker().wake_by_ref(); + + Poll::Pending + } + } +} + +impl<T> Guard<'_, T> { + fn stream_pin(&mut self) -> Pin<&mut T> { + // safety: the stream is pinned in `Arc` and the `Guard` ensures mutual + // exclusion. + unsafe { Pin::new_unchecked(&mut *self.inner.stream.get()) } + } +} + +impl<T> Drop for Guard<'_, T> { + fn drop(&mut self) { + self.inner.locked.store(false, Release); + } +} + +unsafe impl<T: Send> Send for ReadHalf<T> {} +unsafe impl<T: Send> Send for WriteHalf<T> {} +unsafe impl<T: Sync> Sync for ReadHalf<T> {} +unsafe impl<T: Sync> Sync for WriteHalf<T> {} + +impl<T: fmt::Debug> fmt::Debug for ReadHalf<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("split::ReadHalf").finish() + } +} + +impl<T: fmt::Debug> fmt::Debug for WriteHalf<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("split::WriteHalf").finish() + } +} diff --git a/third_party/rust/tokio/src/io/stderr.rs b/third_party/rust/tokio/src/io/stderr.rs new file mode 100644 index 0000000000..99607dc604 --- /dev/null +++ b/third_party/rust/tokio/src/io/stderr.rs @@ -0,0 +1,108 @@ +use crate::io::blocking::Blocking; +use crate::io::AsyncWrite; + +use std::io; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +cfg_io_std! { + /// A handle to the standard error stream of a process. + /// + /// Concurrent writes to stderr must be executed with care: Only individual + /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular + /// you should be aware that writes using [`write_all`] are not guaranteed + /// to occur as a single write, so multiple threads writing data with + /// [`write_all`] may result in interleaved output. + /// + /// Created by the [`stderr`] function. + /// + /// [`stderr`]: stderr() + /// [`AsyncWrite`]: AsyncWrite + /// [`write_all`]: crate::io::AsyncWriteExt::write_all() + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stderr = io::stdout(); + /// stderr.write_all(b"Print some error here.").await?; + /// Ok(()) + /// } + /// ``` + #[derive(Debug)] + pub struct Stderr { + std: Blocking<std::io::Stderr>, + } + + /// Constructs a new handle to the standard error of the current process. + /// + /// The returned handle allows writing to standard error from the within the + /// Tokio runtime. + /// + /// Concurrent writes to stderr must be executed with care: Only individual + /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular + /// you should be aware that writes using [`write_all`] are not guaranteed + /// to occur as a single write, so multiple threads writing data with + /// [`write_all`] may result in interleaved output. + /// + /// [`AsyncWrite`]: AsyncWrite + /// [`write_all`]: crate::io::AsyncWriteExt::write_all() + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stderr = io::stdout(); + /// stderr.write_all(b"Print some error here.").await?; + /// Ok(()) + /// } + /// ``` + pub fn stderr() -> Stderr { + let std = io::stderr(); + Stderr { + std: Blocking::new(std), + } + } +} + +#[cfg(unix)] +impl std::os::unix::io::AsRawFd for Stderr { + fn as_raw_fd(&self) -> std::os::unix::io::RawFd { + std::io::stderr().as_raw_fd() + } +} + +#[cfg(windows)] +impl std::os::windows::io::AsRawHandle for Stderr { + fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { + std::io::stderr().as_raw_handle() + } +} + +impl AsyncWrite for Stderr { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.std).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Pin::new(&mut self.std).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Result<(), io::Error>> { + Pin::new(&mut self.std).poll_shutdown(cx) + } +} diff --git a/third_party/rust/tokio/src/io/stdin.rs b/third_party/rust/tokio/src/io/stdin.rs new file mode 100644 index 0000000000..214c4d0564 --- /dev/null +++ b/third_party/rust/tokio/src/io/stdin.rs @@ -0,0 +1,70 @@ +use crate::io::blocking::Blocking; +use crate::io::AsyncRead; + +use std::io; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +cfg_io_std! { + /// A handle to the standard input stream of a process. + /// + /// The handle implements the [`AsyncRead`] trait, but beware that concurrent + /// reads of `Stdin` must be executed with care. + /// + /// As an additional caveat, reading from the handle may block the calling + /// future indefinitely if there is not enough data available. This makes this + /// handle unsuitable for use in any circumstance where immediate reaction to + /// available data is required, e.g. interactive use or when implementing a + /// subprocess driven by requests on the standard input. + /// + /// Created by the [`stdin`] function. + /// + /// [`stdin`]: fn@stdin + /// [`AsyncRead`]: trait@AsyncRead + #[derive(Debug)] + pub struct Stdin { + std: Blocking<std::io::Stdin>, + } + + /// Constructs a new handle to the standard input of the current process. + /// + /// The returned handle allows reading from standard input from the within the + /// Tokio runtime. + /// + /// As an additional caveat, reading from the handle may block the calling + /// future indefinitely if there is not enough data available. This makes this + /// handle unsuitable for use in any circumstance where immediate reaction to + /// available data is required, e.g. interactive use or when implementing a + /// subprocess driven by requests on the standard input. + pub fn stdin() -> Stdin { + let std = io::stdin(); + Stdin { + std: Blocking::new(std), + } + } +} + +#[cfg(unix)] +impl std::os::unix::io::AsRawFd for Stdin { + fn as_raw_fd(&self) -> std::os::unix::io::RawFd { + std::io::stdin().as_raw_fd() + } +} + +#[cfg(windows)] +impl std::os::windows::io::AsRawHandle for Stdin { + fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { + std::io::stdin().as_raw_handle() + } +} + +impl AsyncRead for Stdin { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.std).poll_read(cx, buf) + } +} diff --git a/third_party/rust/tokio/src/io/stdout.rs b/third_party/rust/tokio/src/io/stdout.rs new file mode 100644 index 0000000000..5377993a46 --- /dev/null +++ b/third_party/rust/tokio/src/io/stdout.rs @@ -0,0 +1,108 @@ +use crate::io::blocking::Blocking; +use crate::io::AsyncWrite; + +use std::io; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +cfg_io_std! { + /// A handle to the standard output stream of a process. + /// + /// Concurrent writes to stdout must be executed with care: Only individual + /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular + /// you should be aware that writes using [`write_all`] are not guaranteed + /// to occur as a single write, so multiple threads writing data with + /// [`write_all`] may result in interleaved output. + /// + /// Created by the [`stdout`] function. + /// + /// [`stdout`]: stdout() + /// [`AsyncWrite`]: AsyncWrite + /// [`write_all`]: crate::io::AsyncWriteExt::write_all() + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stdout = io::stdout(); + /// stdout.write_all(b"Hello world!").await?; + /// Ok(()) + /// } + /// ``` + #[derive(Debug)] + pub struct Stdout { + std: Blocking<std::io::Stdout>, + } + + /// Constructs a new handle to the standard output of the current process. + /// + /// The returned handle allows writing to standard out from the within the + /// Tokio runtime. + /// + /// Concurrent writes to stdout must be executed with care: Only individual + /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular + /// you should be aware that writes using [`write_all`] are not guaranteed + /// to occur as a single write, so multiple threads writing data with + /// [`write_all`] may result in interleaved output. + /// + /// [`AsyncWrite`]: AsyncWrite + /// [`write_all`]: crate::io::AsyncWriteExt::write_all() + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stdout = io::stdout(); + /// stdout.write_all(b"Hello world!").await?; + /// Ok(()) + /// } + /// ``` + pub fn stdout() -> Stdout { + let std = io::stdout(); + Stdout { + std: Blocking::new(std), + } + } +} + +#[cfg(unix)] +impl std::os::unix::io::AsRawFd for Stdout { + fn as_raw_fd(&self) -> std::os::unix::io::RawFd { + std::io::stdout().as_raw_fd() + } +} + +#[cfg(windows)] +impl std::os::windows::io::AsRawHandle for Stdout { + fn as_raw_handle(&self) -> std::os::windows::io::RawHandle { + std::io::stdout().as_raw_handle() + } +} + +impl AsyncWrite for Stdout { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.std).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Pin::new(&mut self.std).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Result<(), io::Error>> { + Pin::new(&mut self.std).poll_shutdown(cx) + } +} diff --git a/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs b/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs new file mode 100644 index 0000000000..1bfab90220 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs @@ -0,0 +1,258 @@ +use crate::io::util::lines::{lines, Lines}; +use crate::io::util::read_line::{read_line, ReadLine}; +use crate::io::util::read_until::{read_until, ReadUntil}; +use crate::io::util::split::{split, Split}; +use crate::io::AsyncBufRead; + +cfg_io_util! { + /// An extension trait which adds utility methods to [`AsyncBufRead`] types. + /// + /// [`AsyncBufRead`]: crate::io::AsyncBufRead + pub trait AsyncBufReadExt: AsyncBufRead { + /// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_until(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>; + /// ``` + /// + /// This function will read bytes from the underlying stream until the + /// delimiter or EOF is found. Once found, all bytes up to, and including, + /// the delimiter (if found) will be appended to `buf`. + /// + /// If successful, this function will return the total number of bytes read. + /// + /// # Errors + /// + /// This function will ignore all instances of [`ErrorKind::Interrupted`] and + /// will otherwise return any errors returned by [`fill_buf`]. + /// + /// If an I/O error is encountered then all bytes read so far will be + /// present in `buf` and its length will have been adjusted appropriately. + /// + /// [`fill_buf`]: AsyncBufRead::poll_fill_buf + /// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted + /// + /// # Examples + /// + /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In + /// this example, we use [`Cursor`] to read all the bytes in a byte slice + /// in hyphen delimited segments: + /// + /// [`Cursor`]: std::io::Cursor + /// + /// ``` + /// use tokio::io::AsyncBufReadExt; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut cursor = Cursor::new(b"lorem-ipsum"); + /// let mut buf = vec![]; + /// + /// // cursor is at 'l' + /// let num_bytes = cursor.read_until(b'-', &mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// + /// assert_eq!(num_bytes, 6); + /// assert_eq!(buf, b"lorem-"); + /// buf.clear(); + /// + /// // cursor is at 'i' + /// let num_bytes = cursor.read_until(b'-', &mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// + /// assert_eq!(num_bytes, 5); + /// assert_eq!(buf, b"ipsum"); + /// buf.clear(); + /// + /// // cursor is at EOF + /// let num_bytes = cursor.read_until(b'-', &mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// assert_eq!(num_bytes, 0); + /// assert_eq!(buf, b""); + /// } + /// ``` + fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec<u8>) -> ReadUntil<'a, Self> + where + Self: Unpin, + { + read_until(self, byte, buf) + } + + /// Reads all bytes until a newline (the 0xA byte) is reached, and append + /// them to the provided buffer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_line(&mut self, buf: &mut String) -> io::Result<usize>; + /// ``` + /// + /// This function will read bytes from the underlying stream until the + /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes + /// up to, and including, the delimiter (if found) will be appended to + /// `buf`. + /// + /// If successful, this function will return the total number of bytes read. + /// + /// If this function returns `Ok(0)`, the stream has reached EOF. + /// + /// # Errors + /// + /// This function has the same error semantics as [`read_until`] and will + /// also return an error if the read bytes are not valid UTF-8. If an I/O + /// error is encountered then `buf` may contain some bytes already read in + /// the event that all data read so far was valid UTF-8. + /// + /// [`read_until`]: AsyncBufReadExt::read_until + /// + /// # Examples + /// + /// [`std::io::Cursor`][`Cursor`] is a type that implements + /// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the + /// lines in a byte slice: + /// + /// [`Cursor`]: std::io::Cursor + /// + /// ``` + /// use tokio::io::AsyncBufReadExt; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut cursor = Cursor::new(b"foo\nbar"); + /// let mut buf = String::new(); + /// + /// // cursor is at 'f' + /// let num_bytes = cursor.read_line(&mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// + /// assert_eq!(num_bytes, 4); + /// assert_eq!(buf, "foo\n"); + /// buf.clear(); + /// + /// // cursor is at 'b' + /// let num_bytes = cursor.read_line(&mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// + /// assert_eq!(num_bytes, 3); + /// assert_eq!(buf, "bar"); + /// buf.clear(); + /// + /// // cursor is at EOF + /// let num_bytes = cursor.read_line(&mut buf) + /// .await + /// .expect("reading from cursor won't fail"); + /// + /// assert_eq!(num_bytes, 0); + /// assert_eq!(buf, ""); + /// } + /// ``` + fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self> + where + Self: Unpin, + { + read_line(self, buf) + } + + /// Returns a stream of the contents of this reader split on the byte + /// `byte`. + /// + /// This method is the asynchronous equivalent to + /// [`BufRead::split`](std::io::BufRead::split). + /// + /// The stream returned from this function will yield instances of + /// [`io::Result`]`<`[`Vec<u8>`]`>`. Each vector returned will *not* have + /// the delimiter byte at the end. + /// + /// [`io::Result`]: std::io::Result + /// [`Vec<u8>`]: std::vec::Vec + /// + /// # Errors + /// + /// Each item of the stream has the same error semantics as + /// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until). + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::AsyncBufRead; + /// use tokio::io::AsyncBufReadExt; + /// + /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { + /// let mut segments = my_buf_read.split(b'f'); + /// + /// while let Some(segment) = segments.next_segment().await? { + /// println!("length = {}", segment.len()) + /// } + /// # Ok(()) + /// # } + /// ``` + fn split(self, byte: u8) -> Split<Self> + where + Self: Sized + Unpin, + { + split(self, byte) + } + + /// Returns a stream over the lines of this reader. + /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines). + /// + /// The stream returned from this function will yield instances of + /// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline + /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. + /// + /// [`io::Result`]: std::io::Result + /// [`String`]: String + /// + /// # Errors + /// + /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`]. + /// + /// # Examples + /// + /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In + /// this example, we use [`Cursor`] to iterate over all the lines in a byte + /// slice. + /// + /// [`Cursor`]: std::io::Cursor + /// + /// ``` + /// use tokio::io::AsyncBufReadExt; + /// use tokio::stream::StreamExt; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() { + /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor"); + /// + /// let mut lines = cursor.lines().map(|res| res.unwrap()); + /// + /// assert_eq!(lines.next().await, Some(String::from("lorem"))); + /// assert_eq!(lines.next().await, Some(String::from("ipsum"))); + /// assert_eq!(lines.next().await, Some(String::from("dolor"))); + /// assert_eq!(lines.next().await, None); + /// } + /// ``` + /// + /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line + fn lines(self) -> Lines<Self> + where + Self: Sized, + { + lines(self) + } + } +} + +impl<R: AsyncBufRead + ?Sized> AsyncBufReadExt for R {} diff --git a/third_party/rust/tokio/src/io/util/async_read_ext.rs b/third_party/rust/tokio/src/io/util/async_read_ext.rs new file mode 100644 index 0000000000..d4402db621 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/async_read_ext.rs @@ -0,0 +1,807 @@ +use crate::io::util::chain::{chain, Chain}; +use crate::io::util::read::{read, Read}; +use crate::io::util::read_buf::{read_buf, ReadBuf}; +use crate::io::util::read_exact::{read_exact, ReadExact}; +use crate::io::util::read_int::{ReadI128, ReadI16, ReadI32, ReadI64, ReadI8}; +use crate::io::util::read_int::{ReadU128, ReadU16, ReadU32, ReadU64, ReadU8}; +use crate::io::util::read_to_end::{read_to_end, ReadToEnd}; +use crate::io::util::read_to_string::{read_to_string, ReadToString}; +use crate::io::util::take::{take, Take}; +use crate::io::AsyncRead; + +use bytes::BufMut; + +cfg_io_util! { + /// Defines numeric reader + macro_rules! read_impl { + ( + $( + $(#[$outer:meta])* + fn $name:ident(&mut self) -> $($fut:ident)*; + )* + ) => { + $( + $(#[$outer])* + fn $name<'a>(&'a mut self) -> $($fut)*<&'a mut Self> where Self: Unpin { + $($fut)*::new(self) + } + )* + } + } + + /// Reads bytes from a source. + /// + /// Implemented as an extention trait, adding utility methods to all + /// [`AsyncRead`] types. Callers will tend to import this trait instead of + /// [`AsyncRead`]. + /// + /// As a convenience, this trait may be imported using the [`prelude`]: + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = [0; 10]; + /// + /// // The `read` method is defined by this trait. + /// let n = f.read(&mut buffer[..]).await?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// See [module][crate::io] documentation for more details. + /// + /// [`AsyncRead`]: AsyncRead + /// [`prelude`]: crate::prelude + pub trait AsyncReadExt: AsyncRead { + /// Creates a new `AsyncRead` instance that chains this stream with + /// `next`. + /// + /// The returned `AsyncRead` instance will first read all bytes from this object + /// until EOF is encountered. Afterwards the output is equivalent to the + /// output of `next`. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `AsyncRead`: + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::{self, AsyncReadExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let f1 = File::open("foo.txt").await?; + /// let f2 = File::open("bar.txt").await?; + /// + /// let mut handle = f1.chain(f2); + /// let mut buffer = String::new(); + /// + /// // read the value into a String. We could use any AsyncRead + /// // method here, this is just one example. + /// handle.read_to_string(&mut buffer).await?; + /// Ok(()) + /// } + /// ``` + fn chain<R>(self, next: R) -> Chain<Self, R> + where + Self: Sized, + R: AsyncRead, + { + chain(self, next) + } + + /// Pulls some bytes from this source into the specified buffer, + /// returning how many bytes were read. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>; + /// ``` + /// + /// This function does not provide any guarantees about whether it + /// completes immediately or asynchronously + /// + /// If the return value of this method is `Ok(n)`, then it must be + /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates + /// that the buffer `buf` has been filled in with `n` bytes of data from + /// this source. If `n` is `0`, then it can indicate one of two + /// scenarios: + /// + /// 1. This reader has reached its "end of file" and will likely no longer + /// be able to produce bytes. Note that this does not mean that the + /// reader will *always* no longer be able to produce bytes. + /// 2. The buffer specified was 0 bytes in length. + /// + /// No guarantees are provided about the contents of `buf` when this + /// function is called, implementations cannot rely on any property of the + /// contents of `buf` being `true`. It is recommended that *implementations* + /// only write data to `buf` instead of reading its contents. + /// + /// Correspondingly, however, *callers* of this method may not assume + /// any guarantees about how the implementation uses `buf`. It is + /// possible that the code that's supposed to write to the buffer might + /// also read from it. It is your responsibility to make sure that `buf` + /// is initialized before calling `read`. + /// + /// # Errors + /// + /// If this function encounters any form of I/O or other error, an error + /// variant will be returned. If an error is returned then it must be + /// guaranteed that no bytes were read. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `Read`: + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::{self, AsyncReadExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = [0; 10]; + /// + /// // read up to 10 bytes + /// let n = f.read(&mut buffer[..]).await?; + /// + /// println!("The bytes: {:?}", &buffer[..n]); + /// Ok(()) + /// } + /// ``` + fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self> + where + Self: Unpin, + { + read(self, buf) + } + + /// Pulls some bytes from this source into the specified buffer, + /// advancing the buffer's internal cursor. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> io::Result<usize>; + /// ``` + /// + /// Usually, only a single `read` syscall is issued, even if there is + /// more space in the supplied buffer. + /// + /// This function does not provide any guarantees about whether it + /// completes immediately or asynchronously + /// + /// # Return + /// + /// On a successful read, the number of read bytes is returned. If the + /// supplied buffer is not empty and the function returns `Ok(0)` then + /// the source as reached an "end-of-file" event. + /// + /// # Errors + /// + /// If this function encounters any form of I/O or other error, an error + /// variant will be returned. If an error is returned then it must be + /// guaranteed that no bytes were read. + /// + /// # Examples + /// + /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]: + /// + /// [`File`]: crate::fs::File + /// [`BytesMut`]: bytes::BytesMut + /// [`BufMut`]: bytes::BufMut + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use bytes::BytesMut; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = BytesMut::with_capacity(10); + /// + /// assert!(buffer.is_empty()); + /// + /// // read up to 10 bytes, note that the return value is not needed + /// // to access the data that was read as `buffer`'s internal + /// // cursor is updated. + /// f.read_buf(&mut buffer).await?; + /// + /// println!("The bytes: {:?}", &buffer[..]); + /// Ok(()) + /// } + /// ``` + fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B> + where + Self: Sized, + B: BufMut, + { + read_buf(self, buf) + } + + /// Reads the exact number of bytes required to fill `buf`. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<usize>; + /// ``` + /// + /// This function reads as many bytes as necessary to completely fill + /// the specified buffer `buf`. + /// + /// No guarantees are provided about the contents of `buf` when this + /// function is called, implementations cannot rely on any property of + /// the contents of `buf` being `true`. It is recommended that + /// implementations only write data to `buf` instead of reading its + /// contents. + /// + /// # Errors + /// + /// If the operation encounters an "end of file" before completely + /// filling the buffer, it returns an error of the kind + /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified + /// in this case. + /// + /// If any other read error is encountered then the operation + /// immediately returns. The contents of `buf` are unspecified in this + /// case. + /// + /// If this operation returns an error, it is unspecified how many bytes + /// it has read, but it will never read more than would be necessary to + /// completely fill the buffer. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `Read`: + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::io::{self, AsyncReadExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = [0; 10]; + /// + /// // read exactly 10 bytes + /// f.read_exact(&mut buffer).await?; + /// Ok(()) + /// } + /// ``` + /// + /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof + fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self> + where + Self: Unpin, + { + read_exact(self, buf) + } + + read_impl! { + /// Reads an unsigned 8 bit integer from the underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_u8(&mut self) -> io::Result<u8>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 8 bit integers from an `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![2, 5]); + /// + /// assert_eq!(2, reader.read_u8().await?); + /// assert_eq!(5, reader.read_u8().await?); + /// + /// Ok(()) + /// } + /// ``` + fn read_u8(&mut self) -> ReadU8; + + /// Reads a signed 8 bit integer from the underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_i8(&mut self) -> io::Result<i8>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 8 bit integers from an `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![0x02, 0xfb]); + /// + /// assert_eq!(2, reader.read_i8().await?); + /// assert_eq!(-5, reader.read_i8().await?); + /// + /// Ok(()) + /// } + /// ``` + fn read_i8(&mut self) -> ReadI8; + + /// Reads an unsigned 16-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_u16(&mut self) -> io::Result<u16>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 16 bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![2, 5, 3, 0]); + /// + /// assert_eq!(517, reader.read_u16().await?); + /// assert_eq!(768, reader.read_u16().await?); + /// Ok(()) + /// } + /// ``` + fn read_u16(&mut self) -> ReadU16; + + /// Reads a signed 16-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_i16(&mut self) -> io::Result<i16>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read signed 16 bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); + /// + /// assert_eq!(193, reader.read_i16().await?); + /// assert_eq!(-132, reader.read_i16().await?); + /// Ok(()) + /// } + /// ``` + fn read_i16(&mut self) -> ReadI16; + + /// Reads an unsigned 32-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_u32(&mut self) -> io::Result<u32>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 32-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); + /// + /// assert_eq!(267, reader.read_u32().await?); + /// Ok(()) + /// } + /// ``` + fn read_u32(&mut self) -> ReadU32; + + /// Reads a signed 32-bit integer in big-endian order from the + /// underlying reader. + /// + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_i32(&mut self) -> io::Result<i32>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read signed 32-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); + /// + /// assert_eq!(-34253, reader.read_i32().await?); + /// Ok(()) + /// } + /// ``` + fn read_i32(&mut self) -> ReadI32; + + /// Reads an unsigned 64-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_u64(&mut self) -> io::Result<u64>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 64-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![ + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 + /// ]); + /// + /// assert_eq!(918733457491587, reader.read_u64().await?); + /// Ok(()) + /// } + /// ``` + fn read_u64(&mut self) -> ReadU64; + + /// Reads an signed 64-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_i64(&mut self) -> io::Result<i64>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read signed 64-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); + /// + /// assert_eq!(i64::min_value(), reader.read_i64().await?); + /// Ok(()) + /// } + /// ``` + fn read_i64(&mut self) -> ReadI64; + + /// Reads an unsigned 128-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_u128(&mut self) -> io::Result<u128>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read unsigned 128-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![ + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 + /// ]); + /// + /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?); + /// Ok(()) + /// } + /// ``` + fn read_u128(&mut self) -> ReadU128; + + /// Reads an signed 128-bit integer in big-endian order from the + /// underlying reader. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_i128(&mut self) -> io::Result<i128>; + /// ``` + /// + /// It is recommended to use a buffered reader to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncReadExt::read_exact`]. + /// + /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact + /// + /// # Examples + /// + /// Read signed 128-bit big-endian integers from a `AsyncRead`: + /// + /// ```rust + /// use tokio::io::{self, AsyncReadExt}; + /// + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut reader = Cursor::new(vec![ + /// 0x80, 0, 0, 0, 0, 0, 0, 0, + /// 0, 0, 0, 0, 0, 0, 0, 0 + /// ]); + /// + /// assert_eq!(i128::min_value(), reader.read_i128().await?); + /// Ok(()) + /// } + /// ``` + fn read_i128(&mut self) -> ReadI128; + } + + /// Reads all bytes until EOF in this source, placing them into `buf`. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>; + /// ``` + /// + /// All bytes read from this source will be appended to the specified + /// buffer `buf`. This function will continuously call [`read()`] to + /// append more data to `buf` until [`read()`][read] returns `Ok(0)`. + /// + /// If successful, the total number of bytes read is returned. + /// + /// # Errors + /// + /// If a read error is encountered then the `read_to_end` operation + /// immediately completes. Any bytes which have already been read will + /// be appended to `buf`. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `Read`: + /// + /// ```no_run + /// use tokio::io::{self, AsyncReadExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = Vec::new(); + /// + /// // read the whole file + /// f.read_to_end(&mut buffer).await?; + /// Ok(()) + /// } + /// ``` + /// + /// (See also the [`tokio::fs::read`] convenience function for reading from a + /// file.) + /// + /// [`tokio::fs::read`]: crate::fs::read::read + fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, Self> + where + Self: Unpin, + { + read_to_end(self, buf) + } + + /// Reads all bytes until EOF in this source, appending them to `buf`. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize>; + /// ``` + /// + /// If successful, the number of bytes which were read and appended to + /// `buf` is returned. + /// + /// # Errors + /// + /// If the data in this stream is *not* valid UTF-8 then an error is + /// returned and `buf` is unchanged. + /// + /// See [`read_to_end`][AsyncReadExt::read_to_end] for other error semantics. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `Read`: + /// + /// ```no_run + /// use tokio::io::{self, AsyncReadExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut f = File::open("foo.txt").await?; + /// let mut buffer = String::new(); + /// + /// f.read_to_string(&mut buffer).await?; + /// Ok(()) + /// } + /// ``` + /// + /// (See also the [`crate::fs::read_to_string`] convenience function for + /// reading from a file.) + /// + /// [`crate::fs::read_to_string`]: crate::fs::read_to_string::read_to_string + fn read_to_string<'a>(&'a mut self, dst: &'a mut String) -> ReadToString<'a, Self> + where + Self: Unpin, + { + read_to_string(self, dst) + } + + /// Creates an adaptor which reads at most `limit` bytes from it. + /// + /// This function returns a new instance of `AsyncRead` which will read + /// at most `limit` bytes, after which it will always return EOF + /// (`Ok(0)`). Any read errors will not count towards the number of + /// bytes read and future calls to [`read()`][read] may succeed. + /// + /// # Examples + /// + /// [`File`][crate::fs::File]s implement `Read`: + /// + /// ```no_run + /// use tokio::io::{self, AsyncReadExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let f = File::open("foo.txt").await?; + /// let mut buffer = [0; 5]; + /// + /// // read at most five bytes + /// let mut handle = f.take(5); + /// + /// handle.read(&mut buffer).await?; + /// Ok(()) + /// } + /// ``` + fn take(self, limit: u64) -> Take<Self> + where + Self: Sized, + { + take(self, limit) + } + } +} + +impl<R: AsyncRead + ?Sized> AsyncReadExt for R {} diff --git a/third_party/rust/tokio/src/io/util/async_seek_ext.rs b/third_party/rust/tokio/src/io/util/async_seek_ext.rs new file mode 100644 index 0000000000..c7243c7f3e --- /dev/null +++ b/third_party/rust/tokio/src/io/util/async_seek_ext.rs @@ -0,0 +1,60 @@ +use crate::io::seek::{seek, Seek}; +use crate::io::AsyncSeek; +use std::io::SeekFrom; + +/// An extension trait which adds utility methods to `AsyncSeek` types. +/// +/// # Examples +/// +/// ``` +/// use std::io::{Cursor, SeekFrom}; +/// use tokio::prelude::*; +/// +/// #[tokio::main] +/// async fn main() -> io::Result<()> { +/// let mut cursor = Cursor::new(b"abcdefg"); +/// +/// // the `seek` method is defined by this trait +/// cursor.seek(SeekFrom::Start(3)).await?; +/// +/// let mut buf = [0; 1]; +/// let n = cursor.read(&mut buf).await?; +/// assert_eq!(n, 1); +/// assert_eq!(buf, [b'd']); +/// +/// Ok(()) +/// } +/// ``` +pub trait AsyncSeekExt: AsyncSeek { + /// Creates a future which will seek an IO object, and then yield the + /// new position in the object and the object itself. + /// + /// In the case of an error the buffer and the object will be discarded, with + /// the error yielded. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::fs::File; + /// use tokio::prelude::*; + /// + /// use std::io::SeekFrom; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut file = File::open("foo.txt").await?; + /// file.seek(SeekFrom::Start(6)).await?; + /// + /// let mut contents = vec![0u8; 10]; + /// file.read_exact(&mut contents).await?; + /// # Ok(()) + /// # } + /// ``` + fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self> + where + Self: Unpin, + { + seek(self, pos) + } +} + +impl<S: AsyncSeek + ?Sized> AsyncSeekExt for S {} diff --git a/third_party/rust/tokio/src/io/util/async_write_ext.rs b/third_party/rust/tokio/src/io/util/async_write_ext.rs new file mode 100644 index 0000000000..377f4ecaf8 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/async_write_ext.rs @@ -0,0 +1,689 @@ +use crate::io::util::flush::{flush, Flush}; +use crate::io::util::shutdown::{shutdown, Shutdown}; +use crate::io::util::write::{write, Write}; +use crate::io::util::write_all::{write_all, WriteAll}; +use crate::io::util::write_buf::{write_buf, WriteBuf}; +use crate::io::util::write_int::{WriteI128, WriteI16, WriteI32, WriteI64, WriteI8}; +use crate::io::util::write_int::{WriteU128, WriteU16, WriteU32, WriteU64, WriteU8}; +use crate::io::AsyncWrite; + +use bytes::Buf; + +cfg_io_util! { + /// Defines numeric writer + macro_rules! write_impl { + ( + $( + $(#[$outer:meta])* + fn $name:ident(&mut self, n: $ty:ty) -> $($fut:ident)*; + )* + ) => { + $( + $(#[$outer])* + fn $name<'a>(&'a mut self, n: $ty) -> $($fut)*<&'a mut Self> where Self: Unpin { + $($fut)*::new(self, n) + } + )* + } + } + + /// Writes bytes to a sink. + /// + /// Implemented as an extention trait, adding utility methods to all + /// [`AsyncWrite`] types. Callers will tend to import this trait instead of + /// [`AsyncWrite`]. + /// + /// As a convenience, this trait may be imported using the [`prelude`]: + /// + /// ```no_run + /// use tokio::prelude::*; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let data = b"some bytes"; + /// + /// let mut pos = 0; + /// let mut buffer = File::create("foo.txt").await?; + /// + /// while pos < data.len() { + /// let bytes_written = buffer.write(&data[pos..]).await?; + /// pos += bytes_written; + /// } + /// + /// Ok(()) + /// } + /// ``` + /// + /// See [module][crate::io] documentation for more details. + /// + /// [`AsyncWrite`]: AsyncWrite + /// [`prelude`]: crate::prelude + pub trait AsyncWriteExt: AsyncWrite { + /// Writes a buffer into this writer, returning how many bytes were + /// written. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write(&mut self, buf: &[u8]) -> io::Result<usize>; + /// ``` + /// + /// This function will attempt to write the entire contents of `buf`, but + /// the entire write may not succeed, or the write may also generate an + /// error. A call to `write` represents *at most one* attempt to write to + /// any wrapped object. + /// + /// # Return + /// + /// If the return value is `Ok(n)` then it must be guaranteed that `n <= + /// buf.len()`. A return value of `0` typically means that the + /// underlying object is no longer able to accept bytes and will likely + /// not be able to in the future as well, or that the buffer provided is + /// empty. + /// + /// # Errors + /// + /// Each call to `write` may generate an I/O error indicating that the + /// operation could not be completed. If an error is returned then no bytes + /// in the buffer were written to this writer. + /// + /// It is **not** considered an error if the entire buffer could not be + /// written to this writer. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io::{self, AsyncWriteExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// + /// // Writes some prefix of the byte string, not necessarily all of it. + /// file.write(b"some bytes").await?; + /// Ok(()) + /// } + /// ``` + fn write<'a>(&'a mut self, src: &'a [u8]) -> Write<'a, Self> + where + Self: Unpin, + { + write(self, src) + } + + /// Writes a buffer into this writer, advancing the buffer's internal + /// cursor. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_buf<B: Buf>(&mut self, buf: &mut B) -> io::Result<usize>; + /// ``` + /// + /// This function will attempt to write the entire contents of `buf`, but + /// the entire write may not succeed, or the write may also generate an + /// error. After the operation completes, the buffer's + /// internal cursor is advanced by the number of bytes written. A + /// subsequent call to `write_buf` using the **same** `buf` value will + /// resume from the point that the first call to `write_buf` completed. + /// A call to `write` represents *at most one* attempt to write to any + /// wrapped object. + /// + /// # Return + /// + /// If the return value is `Ok(n)` then it must be guaranteed that `n <= + /// buf.len()`. A return value of `0` typically means that the + /// underlying object is no longer able to accept bytes and will likely + /// not be able to in the future as well, or that the buffer provided is + /// empty. + /// + /// # Errors + /// + /// Each call to `write` may generate an I/O error indicating that the + /// operation could not be completed. If an error is returned then no bytes + /// in the buffer were written to this writer. + /// + /// It is **not** considered an error if the entire buffer could not be + /// written to this writer. + /// + /// # Examples + /// + /// [`File`] implements `Read` and [`Cursor<&[u8]>`] implements [`Buf`]: + /// + /// [`File`]: crate::fs::File + /// [`Buf`]: bytes::Buf + /// + /// ```no_run + /// use tokio::io::{self, AsyncWriteExt}; + /// use tokio::fs::File; + /// + /// use bytes::Buf; + /// use std::io::Cursor; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut file = File::create("foo.txt").await?; + /// let mut buffer = Cursor::new(b"data to write"); + /// + /// // Loop until the entire contents of the buffer are written to + /// // the file. + /// while buffer.has_remaining() { + /// // Writes some prefix of the byte string, not necessarily + /// // all of it. + /// file.write_buf(&mut buffer).await?; + /// } + /// + /// Ok(()) + /// } + /// ``` + fn write_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteBuf<'a, Self, B> + where + Self: Sized, + B: Buf, + { + write_buf(self, src) + } + + /// Attempts to write an entire buffer into this writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_all(&mut self, buf: &[u8]) -> io::Result<()>; + /// ``` + /// + /// This method will continuously call [`write`] until there is no more data + /// to be written. This method will not return until the entire buffer + /// has been successfully written or such an error occurs. The first + /// error generated from this method will be returned. + /// + /// # Errors + /// + /// This function will return the first error that [`write`] returns. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io::{self, AsyncWriteExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut buffer = File::create("foo.txt").await?; + /// + /// buffer.write_all(b"some bytes").await?; + /// Ok(()) + /// } + /// ``` + /// + /// [`write`]: AsyncWriteExt::write + fn write_all<'a>(&'a mut self, src: &'a [u8]) -> WriteAll<'a, Self> + where + Self: Unpin, + { + write_all(self, src) + } + + write_impl! { + /// Writes an unsigned 8-bit integer to the underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_u8(&mut self, n: u8) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 8 bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u8(2).await?; + /// writer.write_u8(5).await?; + /// + /// assert_eq!(writer, b"\x02\x05"); + /// Ok(()) + /// } + /// ``` + fn write_u8(&mut self, n: u8) -> WriteU8; + + /// Writes an unsigned 8-bit integer to the underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_i8(&mut self, n: i8) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 8 bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u8(2).await?; + /// writer.write_u8(5).await?; + /// + /// assert_eq!(writer, b"\x02\x05"); + /// Ok(()) + /// } + /// ``` + fn write_i8(&mut self, n: i8) -> WriteI8; + + /// Writes an unsigned 16-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_u16(&mut self, n: u16) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 16-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u16(517).await?; + /// writer.write_u16(768).await?; + /// + /// assert_eq!(writer, b"\x02\x05\x03\x00"); + /// Ok(()) + /// } + /// ``` + fn write_u16(&mut self, n: u16) -> WriteU16; + + /// Writes a signed 16-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_i16(&mut self, n: i16) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write signed 16-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_i16(193).await?; + /// writer.write_i16(-132).await?; + /// + /// assert_eq!(writer, b"\x00\xc1\xff\x7c"); + /// Ok(()) + /// } + /// ``` + fn write_i16(&mut self, n: i16) -> WriteI16; + + /// Writes an unsigned 32-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_u32(&mut self, n: u32) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 32-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u32(267).await?; + /// writer.write_u32(1205419366).await?; + /// + /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); + /// Ok(()) + /// } + /// ``` + fn write_u32(&mut self, n: u32) -> WriteU32; + + /// Writes a signed 32-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_i32(&mut self, n: i32) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write signed 32-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_i32(267).await?; + /// writer.write_i32(1205419366).await?; + /// + /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); + /// Ok(()) + /// } + /// ``` + fn write_i32(&mut self, n: i32) -> WriteI32; + + /// Writes an unsigned 64-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_u64(&mut self, n: u64) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 64-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u64(918733457491587).await?; + /// writer.write_u64(143).await?; + /// + /// assert_eq!(writer, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f"); + /// Ok(()) + /// } + /// ``` + fn write_u64(&mut self, n: u64) -> WriteU64; + + /// Writes an signed 64-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_i64(&mut self, n: i64) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write signed 64-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_i64(i64::min_value()).await?; + /// writer.write_i64(i64::max_value()).await?; + /// + /// assert_eq!(writer, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff"); + /// Ok(()) + /// } + /// ``` + fn write_i64(&mut self, n: i64) -> WriteI64; + + /// Writes an unsigned 128-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_u128(&mut self, n: u128) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write unsigned 128-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_u128(16947640962301618749969007319746179).await?; + /// + /// assert_eq!(writer, vec![ + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 + /// ]); + /// Ok(()) + /// } + /// ``` + fn write_u128(&mut self, n: u128) -> WriteU128; + + /// Writes an signed 128-bit integer in big-endian order to the + /// underlying writer. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn write_i128(&mut self, n: i128) -> io::Result<()>; + /// ``` + /// + /// It is recommended to use a buffered writer to avoid excessive + /// syscalls. + /// + /// # Errors + /// + /// This method returns the same errors as [`AsyncWriteExt::write_all`]. + /// + /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all + /// + /// # Examples + /// + /// Write signed 128-bit integers to a `AsyncWrite`: + /// + /// ```rust + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut writer = Vec::new(); + /// + /// writer.write_i128(i128::min_value()).await?; + /// + /// assert_eq!(writer, vec![ + /// 0x80, 0, 0, 0, 0, 0, 0, 0, + /// 0, 0, 0, 0, 0, 0, 0, 0 + /// ]); + /// Ok(()) + /// } + /// ``` + fn write_i128(&mut self, n: i128) -> WriteI128; + } + + /// Flushes this output stream, ensuring that all intermediately buffered + /// contents reach their destination. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn flush(&mut self) -> io::Result<()>; + /// ``` + /// + /// # Errors + /// + /// It is considered an error if not all bytes could be written due to + /// I/O errors or EOF being reached. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io::{self, BufWriter, AsyncWriteExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let f = File::create("foo.txt").await?; + /// let mut buffer = BufWriter::new(f); + /// + /// buffer.write_all(b"some bytes").await?; + /// buffer.flush().await?; + /// Ok(()) + /// } + /// ``` + fn flush(&mut self) -> Flush<'_, Self> + where + Self: Unpin, + { + flush(self) + } + + /// Shuts down the output stream, ensuring that the value can be dropped + /// cleanly. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn shutdown(&mut self) -> io::Result<()>; + /// ``` + /// + /// Similar to [`flush`], all intermediately buffered is written to the + /// underlying stream. Once the operation completes, the caller should + /// no longer attempt to write to the stream. For example, the + /// `TcpStream` implementation will issue a `shutdown(Write)` sys call. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io::{self, BufWriter, AsyncWriteExt}; + /// use tokio::fs::File; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let f = File::create("foo.txt").await?; + /// let mut buffer = BufWriter::new(f); + /// + /// buffer.write_all(b"some bytes").await?; + /// buffer.shutdown().await?; + /// Ok(()) + /// } + /// ``` + fn shutdown(&mut self) -> Shutdown<'_, Self> + where + Self: Unpin, + { + shutdown(self) + } + } +} + +impl<W: AsyncWrite + ?Sized> AsyncWriteExt for W {} diff --git a/third_party/rust/tokio/src/io/util/buf_reader.rs b/third_party/rust/tokio/src/io/util/buf_reader.rs new file mode 100644 index 0000000000..0177c0e344 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/buf_reader.rs @@ -0,0 +1,194 @@ +use crate::io::util::DEFAULT_BUF_SIZE; +use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; + +use pin_project_lite::pin_project; +use std::io::{self, Read}; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{cmp, fmt}; + +pin_project! { + /// The `BufReader` struct adds buffering to any reader. + /// + /// It can be excessively inefficient to work directly with a [`AsyncRead`] + /// instance. A `BufReader` performs large, infrequent reads on the underlying + /// [`AsyncRead`] and maintains an in-memory buffer of the results. + /// + /// `BufReader` can improve the speed of programs that make *small* and + /// *repeated* read calls to the same file or network socket. It does not + /// help when reading very large amounts at once, or reading just one or a few + /// times. It also provides no advantage when reading from a source that is + /// already in memory, like a `Vec<u8>`. + /// + /// When the `BufReader` is dropped, the contents of its buffer will be + /// discarded. Creating multiple instances of a `BufReader` on the same + /// stream can cause data loss. + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct BufReader<R> { + #[pin] + pub(super) inner: R, + pub(super) buf: Box<[u8]>, + pub(super) pos: usize, + pub(super) cap: usize, + } +} + +impl<R: AsyncRead> BufReader<R> { + /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB, + /// but may change in the future. + pub fn new(inner: R) -> Self { + Self::with_capacity(DEFAULT_BUF_SIZE, inner) + } + + /// Creates a new `BufReader` with the specified buffer capacity. + pub fn with_capacity(capacity: usize, inner: R) -> Self { + unsafe { + let mut buffer = Vec::with_capacity(capacity); + buffer.set_len(capacity); + + { + // Convert to MaybeUninit + let b = &mut *(&mut buffer[..] as *mut [u8] as *mut [MaybeUninit<u8>]); + inner.prepare_uninitialized_buffer(b); + } + Self { + inner, + buf: buffer.into_boxed_slice(), + pos: 0, + cap: 0, + } + } + } + + /// Gets a reference to the underlying reader. + /// + /// It is inadvisable to directly read from the underlying reader. + pub fn get_ref(&self) -> &R { + &self.inner + } + + /// Gets a mutable reference to the underlying reader. + /// + /// It is inadvisable to directly read from the underlying reader. + pub fn get_mut(&mut self) -> &mut R { + &mut self.inner + } + + /// Gets a pinned mutable reference to the underlying reader. + /// + /// It is inadvisable to directly read from the underlying reader. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { + self.project().inner + } + + /// Consumes this `BufWriter`, returning the underlying reader. + /// + /// Note that any leftover data in the internal buffer is lost. + pub fn into_inner(self) -> R { + self.inner + } + + /// Returns a reference to the internally buffered data. + /// + /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. + pub fn buffer(&self) -> &[u8] { + &self.buf[self.pos..self.cap] + } + + /// Invalidates all data in the internal buffer. + #[inline] + fn discard_buffer(self: Pin<&mut Self>) { + let me = self.project(); + *me.pos = 0; + *me.cap = 0; + } +} + +impl<R: AsyncRead> AsyncRead for BufReader<R> { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + // If we don't have any buffered data and we're doing a massive read + // (larger than our internal buffer), bypass our internal buffer + // entirely. + if self.pos == self.cap && buf.len() >= self.buf.len() { + let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf)); + self.discard_buffer(); + return Poll::Ready(res); + } + let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?; + let nread = rem.read(buf)?; + self.consume(nread); + Poll::Ready(Ok(nread)) + } + + // we can't skip unconditionally because of the large buffer case in read. + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } +} + +impl<R: AsyncRead> AsyncBufRead for BufReader<R> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + let me = self.project(); + + // If we've reached the end of our internal buffer then we need to fetch + // some more data from the underlying reader. + // Branch using `>=` instead of the more correct `==` + // to tell the compiler that the pos..cap slice is always valid. + if *me.pos >= *me.cap { + debug_assert!(*me.pos == *me.cap); + *me.cap = ready!(me.inner.poll_read(cx, me.buf))?; + *me.pos = 0; + } + Poll::Ready(Ok(&me.buf[*me.pos..*me.cap])) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + let me = self.project(); + *me.pos = cmp::min(*me.pos + amt, *me.cap); + } +} + +impl<R: AsyncRead + AsyncWrite> AsyncWrite for BufReader<R> { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.get_pin_mut().poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.get_pin_mut().poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.get_pin_mut().poll_shutdown(cx) + } +} + +impl<R: fmt::Debug> fmt::Debug for BufReader<R> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BufReader") + .field("reader", &self.inner) + .field( + "buffer", + &format_args!("{}/{}", self.cap - self.pos, self.buf.len()), + ) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<BufReader<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/buf_stream.rs b/third_party/rust/tokio/src/io/util/buf_stream.rs new file mode 100644 index 0000000000..a56a4517fa --- /dev/null +++ b/third_party/rust/tokio/src/io/util/buf_stream.rs @@ -0,0 +1,169 @@ +use crate::io::util::{BufReader, BufWriter}; +use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; + +use pin_project_lite::pin_project; +use std::io; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output. + /// + /// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`] + /// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall + /// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`] + /// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps + /// one in the other so that both directions are buffered. See their documentation for details. + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct BufStream<RW> { + #[pin] + inner: BufReader<BufWriter<RW>>, + } +} + +impl<RW: AsyncRead + AsyncWrite> BufStream<RW> { + /// Wraps a type in both [`BufWriter`] and [`BufReader`]. + /// + /// See the documentation for those types and [`BufStream`] for details. + pub fn new(stream: RW) -> BufStream<RW> { + BufStream { + inner: BufReader::new(BufWriter::new(stream)), + } + } + + /// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`] + /// capacity. + /// + /// See the documentation for those types and [`BufStream`] for details. + pub fn with_capacity( + reader_capacity: usize, + writer_capacity: usize, + stream: RW, + ) -> BufStream<RW> { + BufStream { + inner: BufReader::with_capacity( + reader_capacity, + BufWriter::with_capacity(writer_capacity, stream), + ), + } + } + + /// Gets a reference to the underlying I/O object. + /// + /// It is inadvisable to directly read from the underlying I/O object. + pub fn get_ref(&self) -> &RW { + self.inner.get_ref().get_ref() + } + + /// Gets a mutable reference to the underlying I/O object. + /// + /// It is inadvisable to directly read from the underlying I/O object. + pub fn get_mut(&mut self) -> &mut RW { + self.inner.get_mut().get_mut() + } + + /// Gets a pinned mutable reference to the underlying I/O object. + /// + /// It is inadvisable to directly read from the underlying I/O object. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> { + self.project().inner.get_pin_mut().get_pin_mut() + } + + /// Consumes this `BufStream`, returning the underlying I/O object. + /// + /// Note that any leftover data in the internal buffer is lost. + pub fn into_inner(self) -> RW { + self.inner.into_inner().into_inner() + } +} + +impl<RW> From<BufReader<BufWriter<RW>>> for BufStream<RW> { + fn from(b: BufReader<BufWriter<RW>>) -> Self { + BufStream { inner: b } + } +} + +impl<RW> From<BufWriter<BufReader<RW>>> for BufStream<RW> { + fn from(b: BufWriter<BufReader<RW>>) -> Self { + // we need to "invert" the reader and writer + let BufWriter { + inner: + BufReader { + inner, + buf: rbuf, + pos, + cap, + }, + buf: wbuf, + written, + } = b; + + BufStream { + inner: BufReader { + inner: BufWriter { + inner, + buf: wbuf, + written, + }, + buf: rbuf, + pos, + cap, + }, + } + } +} + +impl<RW: AsyncRead + AsyncWrite> AsyncWrite for BufStream<RW> { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.project().inner.poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.project().inner.poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + self.project().inner.poll_shutdown(cx) + } +} + +impl<RW: AsyncRead + AsyncWrite> AsyncRead for BufStream<RW> { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.project().inner.poll_read(cx, buf) + } + + // we can't skip unconditionally because of the large buffer case in read. + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } +} + +impl<RW: AsyncRead + AsyncWrite> AsyncBufRead for BufStream<RW> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + self.project().inner.poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.project().inner.consume(amt) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<BufStream<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/buf_writer.rs b/third_party/rust/tokio/src/io/util/buf_writer.rs new file mode 100644 index 0000000000..efd053ebac --- /dev/null +++ b/third_party/rust/tokio/src/io/util/buf_writer.rs @@ -0,0 +1,192 @@ +use crate::io::util::DEFAULT_BUF_SIZE; +use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite}; + +use pin_project_lite::pin_project; +use std::fmt; +use std::io::{self, Write}; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Wraps a writer and buffers its output. + /// + /// It can be excessively inefficient to work directly with something that + /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and + /// writes it to an underlying writer in large, infrequent batches. + /// + /// `BufWriter` can improve the speed of programs that make *small* and + /// *repeated* write calls to the same file or network socket. It does not + /// help when writing very large amounts at once, or writing just one or a few + /// times. It also provides no advantage when writing to a destination that is + /// in memory, like a `Vec<u8>`. + /// + /// When the `BufWriter` is dropped, the contents of its buffer will be + /// discarded. Creating multiple instances of a `BufWriter` on the same + /// stream can cause data loss. If you need to write out the contents of its + /// buffer, you must manually call flush before the writer is dropped. + /// + /// [`AsyncWrite`]: AsyncWrite + /// [`flush`]: super::AsyncWriteExt::flush + /// + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct BufWriter<W> { + #[pin] + pub(super) inner: W, + pub(super) buf: Vec<u8>, + pub(super) written: usize, + } +} + +impl<W: AsyncWrite> BufWriter<W> { + /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB, + /// but may change in the future. + pub fn new(inner: W) -> Self { + Self::with_capacity(DEFAULT_BUF_SIZE, inner) + } + + /// Creates a new `BufWriter` with the specified buffer capacity. + pub fn with_capacity(cap: usize, inner: W) -> Self { + Self { + inner, + buf: Vec::with_capacity(cap), + written: 0, + } + } + + fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + let mut me = self.project(); + + let len = me.buf.len(); + let mut ret = Ok(()); + while *me.written < len { + match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) { + Ok(0) => { + ret = Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to write the buffered data", + )); + break; + } + Ok(n) => *me.written += n, + Err(e) => { + ret = Err(e); + break; + } + } + } + if *me.written > 0 { + me.buf.drain(..*me.written); + } + *me.written = 0; + Poll::Ready(ret) + } + + /// Gets a reference to the underlying writer. + pub fn get_ref(&self) -> &W { + &self.inner + } + + /// Gets a mutable reference to the underlying writer. + /// + /// It is inadvisable to directly write to the underlying writer. + pub fn get_mut(&mut self) -> &mut W { + &mut self.inner + } + + /// Gets a pinned mutable reference to the underlying writer. + /// + /// It is inadvisable to directly write to the underlying writer. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> { + self.project().inner + } + + /// Consumes this `BufWriter`, returning the underlying writer. + /// + /// Note that any leftover data in the internal buffer is lost. + pub fn into_inner(self) -> W { + self.inner + } + + /// Returns a reference to the internally buffered data. + pub fn buffer(&self) -> &[u8] { + &self.buf + } +} + +impl<W: AsyncWrite> AsyncWrite for BufWriter<W> { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + if self.buf.len() + buf.len() > self.buf.capacity() { + ready!(self.as_mut().flush_buf(cx))?; + } + + let me = self.project(); + if buf.len() >= me.buf.capacity() { + me.inner.poll_write(cx, buf) + } else { + Poll::Ready(me.buf.write(buf)) + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + ready!(self.as_mut().flush_buf(cx))?; + self.get_pin_mut().poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + ready!(self.as_mut().flush_buf(cx))?; + self.get_pin_mut().poll_shutdown(cx) + } +} + +impl<W: AsyncWrite + AsyncRead> AsyncRead for BufWriter<W> { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.get_pin_mut().poll_read(cx, buf) + } + + // we can't skip unconditionally because of the large buffer case in read. + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + self.get_ref().prepare_uninitialized_buffer(buf) + } +} + +impl<W: AsyncWrite + AsyncBufRead> AsyncBufRead for BufWriter<W> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + self.get_pin_mut().poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.get_pin_mut().consume(amt) + } +} + +impl<W: fmt::Debug> fmt::Debug for BufWriter<W> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BufWriter") + .field("writer", &self.inner) + .field( + "buffer", + &format_args!("{}/{}", self.buf.len(), self.buf.capacity()), + ) + .field("written", &self.written) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<BufWriter<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/chain.rs b/third_party/rust/tokio/src/io/util/chain.rs new file mode 100644 index 0000000000..bc76af341d --- /dev/null +++ b/third_party/rust/tokio/src/io/util/chain.rs @@ -0,0 +1,141 @@ +use crate::io::{AsyncBufRead, AsyncRead}; + +use pin_project_lite::pin_project; +use std::fmt; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Stream for the [`chain`](super::AsyncReadExt::chain) method. + #[must_use = "streams do nothing unless polled"] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct Chain<T, U> { + #[pin] + first: T, + #[pin] + second: U, + done_first: bool, + } +} + +pub(super) fn chain<T, U>(first: T, second: U) -> Chain<T, U> +where + T: AsyncRead, + U: AsyncRead, +{ + Chain { + first, + second, + done_first: false, + } +} + +impl<T, U> Chain<T, U> +where + T: AsyncRead, + U: AsyncRead, +{ + /// Gets references to the underlying readers in this `Chain`. + pub fn get_ref(&self) -> (&T, &U) { + (&self.first, &self.second) + } + + /// Gets mutable references to the underlying readers in this `Chain`. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying readers as doing so may corrupt the internal state of this + /// `Chain`. + pub fn get_mut(&mut self) -> (&mut T, &mut U) { + (&mut self.first, &mut self.second) + } + + /// Gets pinned mutable references to the underlying readers in this `Chain`. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying readers as doing so may corrupt the internal state of this + /// `Chain`. + pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { + let me = self.project(); + (me.first, me.second) + } + + /// Consumes the `Chain`, returning the wrapped readers. + pub fn into_inner(self) -> (T, U) { + (self.first, self.second) + } +} + +impl<T, U> fmt::Debug for Chain<T, U> +where + T: fmt::Debug, + U: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Chain") + .field("t", &self.first) + .field("u", &self.second) + .finish() + } +} + +impl<T, U> AsyncRead for Chain<T, U> +where + T: AsyncRead, + U: AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + let me = self.project(); + + if !*me.done_first { + match ready!(me.first.poll_read(cx, buf)?) { + 0 if !buf.is_empty() => *me.done_first = true, + n => return Poll::Ready(Ok(n)), + } + } + me.second.poll_read(cx, buf) + } +} + +impl<T, U> AsyncBufRead for Chain<T, U> +where + T: AsyncBufRead, + U: AsyncBufRead, +{ + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + let me = self.project(); + + if !*me.done_first { + match ready!(me.first.poll_fill_buf(cx)?) { + buf if buf.is_empty() => { + *me.done_first = true; + } + buf => return Poll::Ready(Ok(buf)), + } + } + me.second.poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + let me = self.project(); + if !*me.done_first { + me.first.consume(amt) + } else { + me.second.consume(amt) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Chain<(), ()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/copy.rs b/third_party/rust/tokio/src/io/util/copy.rs new file mode 100644 index 0000000000..8e0058c1c2 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/copy.rs @@ -0,0 +1,135 @@ +use crate::io::{AsyncRead, AsyncWrite}; + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// A future that asynchronously copies the entire contents of a reader into a + /// writer. + /// + /// This struct is generally created by calling [`copy`][copy]. Please + /// see the documentation of `copy()` for more details. + /// + /// [copy]: copy() + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Copy<'a, R: ?Sized, W: ?Sized> { + reader: &'a mut R, + read_done: bool, + writer: &'a mut W, + pos: usize, + cap: usize, + amt: u64, + buf: Box<[u8]>, + } + + /// Asynchronously copies the entire contents of a reader into a writer. + /// + /// This function returns a future that will continuously read data from + /// `reader` and then write it into `writer` in a streaming fashion until + /// `reader` returns EOF. + /// + /// On success, the total number of bytes that were copied from `reader` to + /// `writer` is returned. + /// + /// This is an asynchronous version of [`std::io::copy`][std]. + /// + /// [std]: std::io::copy + /// + /// # Errors + /// + /// The returned future will finish with an error will return an error + /// immediately if any call to `poll_read` or `poll_write` returns an error. + /// + /// # Examples + /// + /// ``` + /// use tokio::io; + /// + /// # async fn dox() -> std::io::Result<()> { + /// let mut reader: &[u8] = b"hello"; + /// let mut writer: Vec<u8> = vec![]; + /// + /// io::copy(&mut reader, &mut writer).await?; + /// + /// assert_eq!(&b"hello"[..], &writer[..]); + /// # Ok(()) + /// # } + /// ``` + pub fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> Copy<'a, R, W> + where + R: AsyncRead + Unpin + ?Sized, + W: AsyncWrite + Unpin + ?Sized, + { + Copy { + reader, + read_done: false, + writer, + amt: 0, + pos: 0, + cap: 0, + buf: Box::new([0; 2048]), + } + } +} + +impl<R, W> Future for Copy<'_, R, W> +where + R: AsyncRead + Unpin + ?Sized, + W: AsyncWrite + Unpin + ?Sized, +{ + type Output = io::Result<u64>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> { + loop { + // If our buffer is empty, then we need to read some data to + // continue. + if self.pos == self.cap && !self.read_done { + let me = &mut *self; + let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf))?; + if n == 0 { + self.read_done = true; + } else { + self.pos = 0; + self.cap = n; + } + } + + // If our buffer has some data, let's write it out! + while self.pos < self.cap { + let me = &mut *self; + let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, &me.buf[me.pos..me.cap]))?; + if i == 0 { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::WriteZero, + "write zero byte into writer", + ))); + } else { + self.pos += i; + self.amt += i as u64; + } + } + + // If we've written all the data and we've seen EOF, flush out the + // data and finish the transfer. + if self.pos == self.cap && self.read_done { + let me = &mut *self; + ready!(Pin::new(&mut *me.writer).poll_flush(cx))?; + return Poll::Ready(Ok(self.amt)); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<Copy<'_, PhantomPinned, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/empty.rs b/third_party/rust/tokio/src/io/util/empty.rs new file mode 100644 index 0000000000..121102c78f --- /dev/null +++ b/third_party/rust/tokio/src/io/util/empty.rs @@ -0,0 +1,84 @@ +use crate::io::{AsyncBufRead, AsyncRead}; + +use std::fmt; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// An async reader which is always at EOF. + /// + /// This struct is generally created by calling [`empty`]. Please see + /// the documentation of [`empty()`][`empty`] for more details. + /// + /// This is an asynchronous version of [`std::io::empty`][std]. + /// + /// [`empty`]: fn@empty + /// [std]: std::io::empty + pub struct Empty { + _p: (), + } + + /// Creates a new empty async reader. + /// + /// All reads from the returned reader will return `Poll::Ready(Ok(0))`. + /// + /// This is an asynchronous version of [`std::io::empty`][std]. + /// + /// [std]: std::io::empty + /// + /// # Examples + /// + /// A slightly sad example of not reading anything into a buffer: + /// + /// ``` + /// use tokio::io::{self, AsyncReadExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut buffer = String::new(); + /// io::empty().read_to_string(&mut buffer).await.unwrap(); + /// assert!(buffer.is_empty()); + /// } + /// ``` + pub fn empty() -> Empty { + Empty { _p: () } + } +} + +impl AsyncRead for Empty { + #[inline] + fn poll_read( + self: Pin<&mut Self>, + _: &mut Context<'_>, + _: &mut [u8], + ) -> Poll<io::Result<usize>> { + Poll::Ready(Ok(0)) + } +} + +impl AsyncBufRead for Empty { + #[inline] + fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + Poll::Ready(Ok(&[])) + } + + #[inline] + fn consume(self: Pin<&mut Self>, _: usize) {} +} + +impl fmt::Debug for Empty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Empty { .. }") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Empty>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/flush.rs b/third_party/rust/tokio/src/io/util/flush.rs new file mode 100644 index 0000000000..1465f30448 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/flush.rs @@ -0,0 +1,47 @@ +use crate::io::AsyncWrite; + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// A future used to fully flush an I/O object. + /// + /// Created by the [`AsyncWriteExt::flush`] function. + #[derive(Debug)] + pub struct Flush<'a, A: ?Sized> { + a: &'a mut A, + } +} + +/// Creates a future which will entirely flush an I/O object. +pub(super) fn flush<A>(a: &mut A) -> Flush<'_, A> +where + A: AsyncWrite + Unpin + ?Sized, +{ + Flush { a } +} + +impl<A> Future for Flush<'_, A> +where + A: AsyncWrite + Unpin + ?Sized, +{ + type Output = io::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = &mut *self; + Pin::new(&mut *me.a).poll_flush(cx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<Flush<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/lines.rs b/third_party/rust/tokio/src/io/util/lines.rs new file mode 100644 index 0000000000..f0e75de4b1 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/lines.rs @@ -0,0 +1,114 @@ +use crate::io::util::read_line::read_line_internal; +use crate::io::AsyncBufRead; + +use pin_project_lite::pin_project; +use std::io; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Stream for the [`lines`](crate::io::AsyncBufReadExt::lines) method. + #[derive(Debug)] + #[must_use = "streams do nothing unless polled"] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct Lines<R> { + #[pin] + reader: R, + buf: String, + bytes: Vec<u8>, + read: usize, + } +} + +pub(crate) fn lines<R>(reader: R) -> Lines<R> +where + R: AsyncBufRead, +{ + Lines { + reader, + buf: String::new(), + bytes: Vec::new(), + read: 0, + } +} + +impl<R> Lines<R> +where + R: AsyncBufRead + Unpin, +{ + /// Returns the next line in the stream. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::AsyncBufRead; + /// use tokio::io::AsyncBufReadExt; + /// + /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { + /// let mut lines = my_buf_read.lines(); + /// + /// while let Some(line) = lines.next_line().await? { + /// println!("length = {}", line.len()) + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn next_line(&mut self) -> io::Result<Option<String>> { + use crate::future::poll_fn; + + poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await + } +} + +impl<R> Lines<R> +where + R: AsyncBufRead, +{ + #[doc(hidden)] + pub fn poll_next_line( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<io::Result<Option<String>>> { + let me = self.project(); + + let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?; + + if n == 0 && me.buf.is_empty() { + return Poll::Ready(Ok(None)); + } + + if me.buf.ends_with('\n') { + me.buf.pop(); + + if me.buf.ends_with('\r') { + me.buf.pop(); + } + } + + Poll::Ready(Ok(Some(mem::replace(me.buf, String::new())))) + } +} + +#[cfg(feature = "stream")] +impl<R: AsyncBufRead> crate::stream::Stream for Lines<R> { + type Item = io::Result<String>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + Poll::Ready(match ready!(self.poll_next_line(cx)) { + Ok(Some(line)) => Some(Ok(line)), + Ok(None) => None, + Err(err) => Some(Err(err)), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Lines<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/mod.rs b/third_party/rust/tokio/src/io/util/mod.rs new file mode 100644 index 0000000000..c4754abf05 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/mod.rs @@ -0,0 +1,88 @@ +#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 + +cfg_io_util! { + mod async_buf_read_ext; + pub use async_buf_read_ext::AsyncBufReadExt; + + mod async_read_ext; + pub use async_read_ext::AsyncReadExt; + + mod async_seek_ext; + pub use async_seek_ext::AsyncSeekExt; + + mod async_write_ext; + pub use async_write_ext::AsyncWriteExt; + + mod buf_reader; + pub use buf_reader::BufReader; + + mod buf_stream; + pub use buf_stream::BufStream; + + mod buf_writer; + pub use buf_writer::BufWriter; + + mod chain; + + mod copy; + pub use copy::{copy, Copy}; + + mod empty; + pub use empty::{empty, Empty}; + + mod flush; + + mod lines; + pub use lines::Lines; + + mod read; + mod read_buf; + mod read_exact; + mod read_int; + mod read_line; + + mod read_to_end; + cfg_process! { + pub(crate) use read_to_end::read_to_end; + } + + mod read_to_string; + mod read_until; + + mod repeat; + pub use repeat::{repeat, Repeat}; + + mod shutdown; + + mod sink; + pub use sink::{sink, Sink}; + + mod split; + pub use split::Split; + + cfg_stream! { + mod stream_reader; + pub use stream_reader::{stream_reader, StreamReader}; + } + + mod take; + pub use take::Take; + + mod write; + mod write_all; + mod write_buf; + mod write_int; + + + // used by `BufReader` and `BufWriter` + // https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1 + const DEFAULT_BUF_SIZE: usize = 8 * 1024; +} + +cfg_not_io_util! { + cfg_process! { + mod read_to_end; + // Used by process + pub(crate) use read_to_end::read_to_end; + } +} diff --git a/third_party/rust/tokio/src/io/util/read.rs b/third_party/rust/tokio/src/io/util/read.rs new file mode 100644 index 0000000000..a8ca370ea8 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read.rs @@ -0,0 +1,55 @@ +use crate::io::AsyncRead; + +use std::future::Future; +use std::io; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Tries to read some bytes directly into the given `buf` in asynchronous +/// manner, returning a future type. +/// +/// The returned future will resolve to both the I/O stream and the buffer +/// as well as the number of bytes read once the read operation is completed. +pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R> +where + R: AsyncRead + Unpin + ?Sized, +{ + Read { reader, buf } +} + +cfg_io_util! { + /// A future which can be used to easily read available number of bytes to fill + /// a buffer. + /// + /// Created by the [`read`] function. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Read<'a, R: ?Sized> { + reader: &'a mut R, + buf: &'a mut [u8], + } +} + +impl<R> Future for Read<'_, R> +where + R: AsyncRead + Unpin + ?Sized, +{ + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { + let me = &mut *self; + Pin::new(&mut *me.reader).poll_read(cx, me.buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<Read<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/read_buf.rs b/third_party/rust/tokio/src/io/util/read_buf.rs new file mode 100644 index 0000000000..550499b933 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_buf.rs @@ -0,0 +1,41 @@ +use crate::io::AsyncRead; + +use bytes::BufMut; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B> +where + R: AsyncRead, + B: BufMut, +{ + ReadBuf { reader, buf } +} + +cfg_io_util! { + /// Future returned by [`read_buf`](AsyncReadExt::read_buf). + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct ReadBuf<'a, R, B> { + reader: &'a mut R, + buf: &'a mut B, + } +} + +impl<R, B> Future for ReadBuf<'_, R, B> +where + R: AsyncRead, + B: BufMut, +{ + type Output = io::Result<usize>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { + // safety: no data is moved from self + unsafe { + let me = self.get_unchecked_mut(); + Pin::new_unchecked(&mut *me.reader).poll_read_buf(cx, &mut me.buf) + } + } +} diff --git a/third_party/rust/tokio/src/io/util/read_exact.rs b/third_party/rust/tokio/src/io/util/read_exact.rs new file mode 100644 index 0000000000..d6983c9953 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_exact.rs @@ -0,0 +1,76 @@ +use crate::io::AsyncRead; + +use std::future::Future; +use std::io; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// A future which can be used to easily read exactly enough bytes to fill +/// a buffer. +/// +/// Created by the [`AsyncRead::read_exact`]. +pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A> +where + A: AsyncRead + Unpin + ?Sized, +{ + ReadExact { + reader, + buf, + pos: 0, + } +} + +cfg_io_util! { + /// Creates a future which will read exactly enough bytes to fill `buf`, + /// returning an error if EOF is hit sooner. + /// + /// On success the number of bytes is returned + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct ReadExact<'a, A: ?Sized> { + reader: &'a mut A, + buf: &'a mut [u8], + pos: usize, + } +} + +fn eof() -> io::Error { + io::Error::new(io::ErrorKind::UnexpectedEof, "early eof") +} + +impl<A> Future for ReadExact<'_, A> +where + A: AsyncRead + Unpin + ?Sized, +{ + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { + loop { + // if our buffer is empty, then we need to read some data to continue. + if self.pos < self.buf.len() { + let me = &mut *self; + let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf[me.pos..]))?; + me.pos += n; + if n == 0 { + return Err(eof()).into(); + } + } + + if self.pos >= self.buf.len() { + return Poll::Ready(Ok(self.pos)); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<ReadExact<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/read_int.rs b/third_party/rust/tokio/src/io/util/read_int.rs new file mode 100644 index 0000000000..9dc4402f88 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_int.rs @@ -0,0 +1,123 @@ +use crate::io::AsyncRead; + +use bytes::Buf; +use pin_project_lite::pin_project; +use std::future::Future; +use std::io; +use std::io::ErrorKind::UnexpectedEof; +use std::mem::size_of; +use std::pin::Pin; +use std::task::{Context, Poll}; + +macro_rules! reader { + ($name:ident, $ty:ty, $reader:ident) => { + reader!($name, $ty, $reader, size_of::<$ty>()); + }; + ($name:ident, $ty:ty, $reader:ident, $bytes:expr) => { + pin_project! { + #[doc(hidden)] + pub struct $name<R> { + #[pin] + src: R, + buf: [u8; $bytes], + read: u8, + } + } + + impl<R> $name<R> { + pub(crate) fn new(src: R) -> Self { + $name { + src, + buf: [0; $bytes], + read: 0, + } + } + } + + impl<R> Future for $name<R> + where + R: AsyncRead, + { + type Output = io::Result<$ty>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let mut me = self.project(); + + if *me.read == $bytes as u8 { + return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..]))); + } + + while *me.read < $bytes as u8 { + *me.read += match me + .src + .as_mut() + .poll_read(cx, &mut me.buf[*me.read as usize..]) + { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), + Poll::Ready(Ok(0)) => { + return Poll::Ready(Err(UnexpectedEof.into())); + } + Poll::Ready(Ok(n)) => n as u8, + }; + } + + let num = Buf::$reader(&mut &me.buf[..]); + + Poll::Ready(Ok(num)) + } + } + }; +} + +macro_rules! reader8 { + ($name:ident, $ty:ty) => { + pin_project! { + /// Future returned from `read_u8` + #[doc(hidden)] + pub struct $name<R> { + #[pin] + reader: R, + } + } + + impl<R> $name<R> { + pub(crate) fn new(reader: R) -> $name<R> { + $name { reader } + } + } + + impl<R> Future for $name<R> + where + R: AsyncRead, + { + type Output = io::Result<$ty>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = self.project(); + + let mut buf = [0; 1]; + match me.reader.poll_read(cx, &mut buf[..]) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), + Poll::Ready(Ok(0)) => Poll::Ready(Err(UnexpectedEof.into())), + Poll::Ready(Ok(1)) => Poll::Ready(Ok(buf[0] as $ty)), + Poll::Ready(Ok(_)) => unreachable!(), + } + } + } + }; +} + +reader8!(ReadU8, u8); +reader8!(ReadI8, i8); + +reader!(ReadU16, u16, get_u16); +reader!(ReadU32, u32, get_u32); +reader!(ReadU64, u64, get_u64); +reader!(ReadU128, u128, get_u128); + +reader!(ReadI16, i16, get_i16); +reader!(ReadI32, i32, get_i32); +reader!(ReadI64, i64, get_i64); +reader!(ReadI128, i128, get_i128); diff --git a/third_party/rust/tokio/src/io/util/read_line.rs b/third_party/rust/tokio/src/io/util/read_line.rs new file mode 100644 index 0000000000..c5ee597486 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_line.rs @@ -0,0 +1,82 @@ +use crate::io::util::read_until::read_until_internal; +use crate::io::AsyncBufRead; + +use std::future::Future; +use std::io; +use std::mem; +use std::pin::Pin; +use std::str; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct ReadLine<'a, R: ?Sized> { + reader: &'a mut R, + buf: &'a mut String, + bytes: Vec<u8>, + read: usize, + } +} + +pub(crate) fn read_line<'a, R>(reader: &'a mut R, buf: &'a mut String) -> ReadLine<'a, R> +where + R: AsyncBufRead + ?Sized + Unpin, +{ + ReadLine { + reader, + bytes: unsafe { mem::replace(buf.as_mut_vec(), Vec::new()) }, + buf, + read: 0, + } +} + +pub(super) fn read_line_internal<R: AsyncBufRead + ?Sized>( + reader: Pin<&mut R>, + cx: &mut Context<'_>, + buf: &mut String, + bytes: &mut Vec<u8>, + read: &mut usize, +) -> Poll<io::Result<usize>> { + let ret = ready!(read_until_internal(reader, cx, b'\n', bytes, read)); + if str::from_utf8(&bytes).is_err() { + Poll::Ready(ret.and_then(|_| { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "stream did not contain valid UTF-8", + )) + })) + } else { + debug_assert!(buf.is_empty()); + debug_assert_eq!(*read, 0); + // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`. + mem::swap(unsafe { buf.as_mut_vec() }, bytes); + Poll::Ready(ret) + } +} + +impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadLine<'_, R> { + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let Self { + reader, + buf, + bytes, + read, + } = &mut *self; + read_line_internal(Pin::new(reader), cx, buf, bytes, read) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<ReadLine<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/read_to_end.rs b/third_party/rust/tokio/src/io/util/read_to_end.rs new file mode 100644 index 0000000000..a2cd99bed0 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_to_end.rs @@ -0,0 +1,113 @@ +use crate::io::AsyncRead; + +use std::future::Future; +use std::io; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; + +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] +pub struct ReadToEnd<'a, R: ?Sized> { + reader: &'a mut R, + buf: &'a mut Vec<u8>, + start_len: usize, +} + +pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, R> +where + R: AsyncRead + Unpin + ?Sized, +{ + let start_len = buf.len(); + ReadToEnd { + reader, + buf, + start_len, + } +} + +struct Guard<'a> { + buf: &'a mut Vec<u8>, + len: usize, +} + +impl Drop for Guard<'_> { + fn drop(&mut self) { + unsafe { + self.buf.set_len(self.len); + } + } +} + +// This uses an adaptive system to extend the vector when it fills. We want to +// avoid paying to allocate and zero a huge chunk of memory if the reader only +// has 4 bytes while still making large reads if the reader does have a ton +// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every +// time is 4,500 times (!) slower than this if the reader has a very small +// amount of data to return. +// +// Because we're extending the buffer with uninitialized data for trusted +// readers, we need to make sure to truncate that if any of this panics. +pub(super) fn read_to_end_internal<R: AsyncRead + ?Sized>( + mut rd: Pin<&mut R>, + cx: &mut Context<'_>, + buf: &mut Vec<u8>, + start_len: usize, +) -> Poll<io::Result<usize>> { + let mut g = Guard { + len: buf.len(), + buf, + }; + let ret; + loop { + if g.len == g.buf.len() { + unsafe { + g.buf.reserve(32); + let capacity = g.buf.capacity(); + g.buf.set_len(capacity); + + let b = &mut *(&mut g.buf[g.len..] as *mut [u8] as *mut [MaybeUninit<u8>]); + + rd.prepare_uninitialized_buffer(b); + } + } + + match ready!(rd.as_mut().poll_read(cx, &mut g.buf[g.len..])) { + Ok(0) => { + ret = Poll::Ready(Ok(g.len - start_len)); + break; + } + Ok(n) => g.len += n, + Err(e) => { + ret = Poll::Ready(Err(e)); + break; + } + } + } + + ret +} + +impl<A> Future for ReadToEnd<'_, A> +where + A: AsyncRead + ?Sized + Unpin, +{ + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let this = &mut *self; + read_to_end_internal(Pin::new(&mut this.reader), cx, this.buf, this.start_len) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<ReadToEnd<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/read_to_string.rs b/third_party/rust/tokio/src/io/util/read_to_string.rs new file mode 100644 index 0000000000..e77d836dee --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_to_string.rs @@ -0,0 +1,83 @@ +use crate::io::util::read_to_end::read_to_end_internal; +use crate::io::AsyncRead; + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{io, mem, str}; + +cfg_io_util! { + /// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct ReadToString<'a, R: ?Sized> { + reader: &'a mut R, + buf: &'a mut String, + bytes: Vec<u8>, + start_len: usize, + } +} + +pub(crate) fn read_to_string<'a, R>(reader: &'a mut R, buf: &'a mut String) -> ReadToString<'a, R> +where + R: AsyncRead + ?Sized + Unpin, +{ + let start_len = buf.len(); + ReadToString { + reader, + bytes: unsafe { mem::replace(buf.as_mut_vec(), Vec::new()) }, + buf, + start_len, + } +} + +fn read_to_string_internal<R: AsyncRead + ?Sized>( + reader: Pin<&mut R>, + cx: &mut Context<'_>, + buf: &mut String, + bytes: &mut Vec<u8>, + start_len: usize, +) -> Poll<io::Result<usize>> { + let ret = ready!(read_to_end_internal(reader, cx, bytes, start_len)); + if str::from_utf8(&bytes).is_err() { + Poll::Ready(ret.and_then(|_| { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "stream did not contain valid UTF-8", + )) + })) + } else { + debug_assert!(buf.is_empty()); + // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`. + mem::swap(unsafe { buf.as_mut_vec() }, bytes); + Poll::Ready(ret) + } +} + +impl<A> Future for ReadToString<'_, A> +where + A: AsyncRead + ?Sized + Unpin, +{ + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let Self { + reader, + buf, + bytes, + start_len, + } = &mut *self; + read_to_string_internal(Pin::new(reader), cx, buf, bytes, *start_len) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<ReadToString<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/read_until.rs b/third_party/rust/tokio/src/io/util/read_until.rs new file mode 100644 index 0000000000..1adeda66f0 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/read_until.rs @@ -0,0 +1,86 @@ +use crate::io::AsyncBufRead; + +use std::future::Future; +use std::io; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct ReadUntil<'a, R: ?Sized> { + reader: &'a mut R, + byte: u8, + buf: &'a mut Vec<u8>, + read: usize, + } +} + +pub(crate) fn read_until<'a, R>( + reader: &'a mut R, + byte: u8, + buf: &'a mut Vec<u8>, +) -> ReadUntil<'a, R> +where + R: AsyncBufRead + ?Sized + Unpin, +{ + ReadUntil { + reader, + byte, + buf, + read: 0, + } +} + +pub(super) fn read_until_internal<R: AsyncBufRead + ?Sized>( + mut reader: Pin<&mut R>, + cx: &mut Context<'_>, + byte: u8, + buf: &mut Vec<u8>, + read: &mut usize, +) -> Poll<io::Result<usize>> { + loop { + let (done, used) = { + let available = ready!(reader.as_mut().poll_fill_buf(cx))?; + if let Some(i) = memchr::memchr(byte, available) { + buf.extend_from_slice(&available[..=i]); + (true, i + 1) + } else { + buf.extend_from_slice(available); + (false, available.len()) + } + }; + reader.as_mut().consume(used); + *read += used; + if done || used == 0 { + return Poll::Ready(Ok(mem::replace(read, 0))); + } + } +} + +impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadUntil<'_, R> { + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let Self { + reader, + byte, + buf, + read, + } = &mut *self; + read_until_internal(Pin::new(reader), cx, *byte, buf, read) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<ReadUntil<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/repeat.rs b/third_party/rust/tokio/src/io/util/repeat.rs new file mode 100644 index 0000000000..6b9067e853 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/repeat.rs @@ -0,0 +1,71 @@ +use crate::io::AsyncRead; + +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// An async reader which yields one byte over and over and over and over and + /// over and... + /// + /// This struct is generally created by calling [`repeat`][repeat]. Please + /// see the documentation of `repeat()` for more details. + /// + /// This is an asynchronous version of [`std::io::Repeat`][std]. + /// + /// [repeat]: fn@repeat + /// [std]: std::io::Repeat + #[derive(Debug)] + pub struct Repeat { + byte: u8, + } + + /// Creates an instance of an async reader that infinitely repeats one byte. + /// + /// All reads from this reader will succeed by filling the specified buffer with + /// the given byte. + /// + /// This is an asynchronous version of [`std::io::repeat`][std]. + /// + /// [std]: std::io::repeat + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncReadExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut buffer = [0; 3]; + /// io::repeat(0b101).read_exact(&mut buffer).await.unwrap(); + /// assert_eq!(buffer, [0b101, 0b101, 0b101]); + /// } + /// ``` + pub fn repeat(byte: u8) -> Repeat { + Repeat { byte } + } +} + +impl AsyncRead for Repeat { + #[inline] + fn poll_read( + self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + for byte in &mut *buf { + *byte = self.byte; + } + Poll::Ready(Ok(buf.len())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Repeat>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/shutdown.rs b/third_party/rust/tokio/src/io/util/shutdown.rs new file mode 100644 index 0000000000..f24e288541 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/shutdown.rs @@ -0,0 +1,47 @@ +use crate::io::AsyncWrite; + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// A future used to shutdown an I/O object. + /// + /// Created by the [`AsyncWriteExt::shutdown`] function. + #[derive(Debug)] + pub struct Shutdown<'a, A: ?Sized> { + a: &'a mut A, + } +} + +/// Creates a future which will shutdown an I/O object. +pub(super) fn shutdown<A>(a: &mut A) -> Shutdown<'_, A> +where + A: AsyncWrite + Unpin + ?Sized, +{ + Shutdown { a } +} + +impl<A> Future for Shutdown<'_, A> +where + A: AsyncWrite + Unpin + ?Sized, +{ + type Output = io::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = &mut *self; + Pin::new(&mut *me.a).poll_shutdown(cx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<Shutdown<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/sink.rs b/third_party/rust/tokio/src/io/util/sink.rs new file mode 100644 index 0000000000..05ee773fa3 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/sink.rs @@ -0,0 +1,87 @@ +use crate::io::AsyncWrite; + +use std::fmt; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// An async writer which will move data into the void. + /// + /// This struct is generally created by calling [`sink`][sink]. Please + /// see the documentation of `sink()` for more details. + /// + /// This is an asynchronous version of [`std::io::Sink`][std]. + /// + /// [sink]: sink() + /// [std]: std::io::Sink + pub struct Sink { + _p: (), + } + + /// Creates an instance of an async writer which will successfully consume all + /// data. + /// + /// All calls to [`poll_write`] on the returned instance will return + /// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be + /// inspected. + /// + /// This is an asynchronous version of [`std::io::sink`][std]. + /// + /// [`poll_write`]: crate::io::AsyncWrite::poll_write() + /// [std]: std::io::sink + /// + /// # Examples + /// + /// ``` + /// use tokio::io::{self, AsyncWriteExt}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let buffer = vec![1, 2, 3, 5, 8]; + /// let num_bytes = io::sink().write(&buffer).await?; + /// assert_eq!(num_bytes, 5); + /// Ok(()) + /// } + /// ``` + pub fn sink() -> Sink { + Sink { _p: () } + } +} + +impl AsyncWrite for Sink { + #[inline] + fn poll_write( + self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll<Result<usize, io::Error>> { + Poll::Ready(Ok(buf.len())) + } + + #[inline] + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Poll::Ready(Ok(())) + } + + #[inline] + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> { + Poll::Ready(Ok(())) + } +} + +impl fmt::Debug for Sink { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Sink { .. }") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Sink>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/split.rs b/third_party/rust/tokio/src/io/util/split.rs new file mode 100644 index 0000000000..f1ed2fd89d --- /dev/null +++ b/third_party/rust/tokio/src/io/util/split.rs @@ -0,0 +1,112 @@ +use crate::io::util::read_until::read_until_internal; +use crate::io::AsyncBufRead; + +use pin_project_lite::pin_project; +use std::io; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Stream for the [`split`](crate::io::AsyncBufReadExt::split) method. + #[derive(Debug)] + #[must_use = "streams do nothing unless polled"] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct Split<R> { + #[pin] + reader: R, + buf: Vec<u8>, + delim: u8, + read: usize, + } +} + +pub(crate) fn split<R>(reader: R, delim: u8) -> Split<R> +where + R: AsyncBufRead, +{ + Split { + reader, + buf: Vec::new(), + delim, + read: 0, + } +} + +impl<R> Split<R> +where + R: AsyncBufRead + Unpin, +{ + /// Returns the next segment in the stream. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::AsyncBufRead; + /// use tokio::io::AsyncBufReadExt; + /// + /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> { + /// let mut segments = my_buf_read.split(b'f'); + /// + /// while let Some(segment) = segments.next_segment().await? { + /// println!("length = {}", segment.len()) + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn next_segment(&mut self) -> io::Result<Option<Vec<u8>>> { + use crate::future::poll_fn; + + poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await + } +} + +impl<R> Split<R> +where + R: AsyncBufRead, +{ + #[doc(hidden)] + pub fn poll_next_segment( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<io::Result<Option<Vec<u8>>>> { + let me = self.project(); + + let n = ready!(read_until_internal( + me.reader, cx, *me.delim, me.buf, me.read, + ))?; + + if n == 0 && me.buf.is_empty() { + return Poll::Ready(Ok(None)); + } + + if me.buf.last() == Some(me.delim) { + me.buf.pop(); + } + + Poll::Ready(Ok(Some(mem::replace(me.buf, Vec::new())))) + } +} + +#[cfg(feature = "stream")] +impl<R: AsyncBufRead> crate::stream::Stream for Split<R> { + type Item = io::Result<Vec<u8>>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + Poll::Ready(match ready!(self.poll_next_segment(cx)) { + Ok(Some(segment)) => Some(Ok(segment)), + Ok(None) => None, + Err(err) => Some(Err(err)), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Split<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/stream_reader.rs b/third_party/rust/tokio/src/io/util/stream_reader.rs new file mode 100644 index 0000000000..b98f8bdfc2 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/stream_reader.rs @@ -0,0 +1,184 @@ +use crate::io::{AsyncBufRead, AsyncRead}; +use crate::stream::Stream; +use bytes::{Buf, BufMut}; +use pin_project_lite::pin_project; +use std::io; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Convert a stream of byte chunks into an [`AsyncRead`]. + /// + /// This type is usually created using the [`stream_reader`] function. + /// + /// [`AsyncRead`]: crate::io::AsyncRead + /// [`stream_reader`]: crate::io::stream_reader + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct StreamReader<S, B> { + #[pin] + inner: S, + chunk: Option<B>, + } +} + +/// Convert a stream of byte chunks into an [`AsyncRead`](crate::io::AsyncRead). +/// +/// # Example +/// +/// ``` +/// use bytes::Bytes; +/// use tokio::io::{stream_reader, AsyncReadExt}; +/// # #[tokio::main] +/// # async fn main() -> std::io::Result<()> { +/// +/// // Create a stream from an iterator. +/// let stream = tokio::stream::iter(vec![ +/// Ok(Bytes::from_static(&[0, 1, 2, 3])), +/// Ok(Bytes::from_static(&[4, 5, 6, 7])), +/// Ok(Bytes::from_static(&[8, 9, 10, 11])), +/// ]); +/// +/// // Convert it to an AsyncRead. +/// let mut read = stream_reader(stream); +/// +/// // Read five bytes from the stream. +/// let mut buf = [0; 5]; +/// read.read_exact(&mut buf).await?; +/// assert_eq!(buf, [0, 1, 2, 3, 4]); +/// +/// // Read the rest of the current chunk. +/// assert_eq!(read.read(&mut buf).await?, 3); +/// assert_eq!(&buf[..3], [5, 6, 7]); +/// +/// // Read the next chunk. +/// assert_eq!(read.read(&mut buf).await?, 4); +/// assert_eq!(&buf[..4], [8, 9, 10, 11]); +/// +/// // We have now reached the end. +/// assert_eq!(read.read(&mut buf).await?, 0); +/// +/// # Ok(()) +/// # } +/// ``` +#[cfg_attr(docsrs, doc(cfg(feature = "stream")))] +#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] +pub fn stream_reader<S, B>(stream: S) -> StreamReader<S, B> +where + S: Stream<Item = Result<B, io::Error>>, + B: Buf, +{ + StreamReader::new(stream) +} + +impl<S, B> StreamReader<S, B> +where + S: Stream<Item = Result<B, io::Error>>, + B: Buf, +{ + /// Convert the provided stream into an `AsyncRead`. + fn new(stream: S) -> Self { + Self { + inner: stream, + chunk: None, + } + } + /// Do we have a chunk and is it non-empty? + fn has_chunk(self: Pin<&mut Self>) -> bool { + if let Some(chunk) = self.project().chunk { + chunk.remaining() > 0 + } else { + false + } + } +} + +impl<S, B> AsyncRead for StreamReader<S, B> +where + S: Stream<Item = Result<B, io::Error>>, + B: Buf, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + if buf.is_empty() { + return Poll::Ready(Ok(0)); + } + + let inner_buf = match self.as_mut().poll_fill_buf(cx) { + Poll::Ready(Ok(buf)) => buf, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }; + let len = std::cmp::min(inner_buf.len(), buf.len()); + (&mut buf[..len]).copy_from_slice(&inner_buf[..len]); + + self.consume(len); + Poll::Ready(Ok(len)) + } + fn poll_read_buf<BM: BufMut>( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut BM, + ) -> Poll<io::Result<usize>> + where + Self: Sized, + { + if !buf.has_remaining_mut() { + return Poll::Ready(Ok(0)); + } + + let inner_buf = match self.as_mut().poll_fill_buf(cx) { + Poll::Ready(Ok(buf)) => buf, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Pending => return Poll::Pending, + }; + let len = std::cmp::min(inner_buf.len(), buf.remaining_mut()); + buf.put_slice(&inner_buf[..len]); + + self.consume(len); + Poll::Ready(Ok(len)) + } + unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool { + false + } +} + +impl<S, B> AsyncBufRead for StreamReader<S, B> +where + S: Stream<Item = Result<B, io::Error>>, + B: Buf, +{ + fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + loop { + if self.as_mut().has_chunk() { + // This unwrap is very sad, but it can't be avoided. + let buf = self.project().chunk.as_ref().unwrap().bytes(); + return Poll::Ready(Ok(buf)); + } else { + match self.as_mut().project().inner.poll_next(cx) { + Poll::Ready(Some(Ok(chunk))) => { + // Go around the loop in case the chunk is empty. + *self.as_mut().project().chunk = Some(chunk); + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)), + Poll::Ready(None) => return Poll::Ready(Ok(&[])), + Poll::Pending => return Poll::Pending, + } + } + } + } + fn consume(self: Pin<&mut Self>, amt: usize) { + if amt > 0 { + self.project() + .chunk + .as_mut() + .expect("No chunk present") + .advance(amt); + } + } +} diff --git a/third_party/rust/tokio/src/io/util/take.rs b/third_party/rust/tokio/src/io/util/take.rs new file mode 100644 index 0000000000..5d6bd90aa3 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/take.rs @@ -0,0 +1,131 @@ +use crate::io::{AsyncBufRead, AsyncRead}; + +use pin_project_lite::pin_project; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{cmp, io}; + +pin_project! { + /// Stream for the [`take`](super::AsyncReadExt::take) method. + #[derive(Debug)] + #[must_use = "streams do nothing unless you `.await` or poll them"] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + pub struct Take<R> { + #[pin] + inner: R, + // Add '_' to avoid conflicts with `limit` method. + limit_: u64, + } +} + +pub(super) fn take<R: AsyncRead>(inner: R, limit: u64) -> Take<R> { + Take { + inner, + limit_: limit, + } +} + +impl<R: AsyncRead> Take<R> { + /// Returns the remaining number of bytes that can be + /// read before this instance will return EOF. + /// + /// # Note + /// + /// This instance may reach `EOF` after reading fewer bytes than indicated by + /// this method if the underlying [`AsyncRead`] instance reaches EOF. + pub fn limit(&self) -> u64 { + self.limit_ + } + + /// Sets the number of bytes that can be read before this instance will + /// return EOF. This is the same as constructing a new `Take` instance, so + /// the amount of bytes read and the previous limit value don't matter when + /// calling this method. + pub fn set_limit(&mut self, limit: u64) { + self.limit_ = limit + } + + /// Gets a reference to the underlying reader. + pub fn get_ref(&self) -> &R { + &self.inner + } + + /// Gets a mutable reference to the underlying reader. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying reader as doing so may corrupt the internal limit of this + /// `Take`. + pub fn get_mut(&mut self) -> &mut R { + &mut self.inner + } + + /// Gets a pinned mutable reference to the underlying reader. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying reader as doing so may corrupt the internal limit of this + /// `Take`. + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> { + self.project().inner + } + + /// Consumes the `Take`, returning the wrapped reader. + pub fn into_inner(self) -> R { + self.inner + } +} + +impl<R: AsyncRead> AsyncRead for Take<R> { + unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool { + self.inner.prepare_uninitialized_buffer(buf) + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<Result<usize, io::Error>> { + if self.limit_ == 0 { + return Poll::Ready(Ok(0)); + } + + let me = self.project(); + let max = std::cmp::min(buf.len() as u64, *me.limit_) as usize; + let n = ready!(me.inner.poll_read(cx, &mut buf[..max]))?; + *me.limit_ -= n as u64; + Poll::Ready(Ok(n)) + } +} + +impl<R: AsyncBufRead> AsyncBufRead for Take<R> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { + let me = self.project(); + + // Don't call into inner reader at all at EOF because it may still block + if *me.limit_ == 0 { + return Poll::Ready(Ok(&[])); + } + + let buf = ready!(me.inner.poll_fill_buf(cx)?); + let cap = cmp::min(buf.len() as u64, *me.limit_) as usize; + Poll::Ready(Ok(&buf[..cap])) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + let me = self.project(); + // Don't let callers reset the limit by passing an overlarge value + let amt = cmp::min(amt as u64, *me.limit_) as usize; + *me.limit_ -= amt as u64; + me.inner.consume(amt); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + crate::is_unpin::<Take<()>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/write.rs b/third_party/rust/tokio/src/io/util/write.rs new file mode 100644 index 0000000000..433a421d34 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/write.rs @@ -0,0 +1,37 @@ +use crate::io::AsyncWrite; + +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// A future to write some of the buffer to an `AsyncWrite`. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct Write<'a, W: ?Sized> { + writer: &'a mut W, + buf: &'a [u8], + } +} + +/// Tries to write some bytes from the given `buf` to the writer in an +/// asynchronous manner, returning a future. +pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W> +where + W: AsyncWrite + Unpin + ?Sized, +{ + Write { writer, buf } +} + +impl<W> Future for Write<'_, W> +where + W: AsyncWrite + Unpin + ?Sized, +{ + type Output = io::Result<usize>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { + let me = &mut *self; + Pin::new(&mut *me.writer).poll_write(cx, me.buf) + } +} diff --git a/third_party/rust/tokio/src/io/util/write_all.rs b/third_party/rust/tokio/src/io/util/write_all.rs new file mode 100644 index 0000000000..898006c56c --- /dev/null +++ b/third_party/rust/tokio/src/io/util/write_all.rs @@ -0,0 +1,57 @@ +use crate::io::AsyncWrite; + +use std::future::Future; +use std::io; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct WriteAll<'a, W: ?Sized> { + writer: &'a mut W, + buf: &'a [u8], + } +} + +pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W> +where + W: AsyncWrite + Unpin + ?Sized, +{ + WriteAll { writer, buf } +} + +impl<W> Future for WriteAll<'_, W> +where + W: AsyncWrite + Unpin + ?Sized, +{ + type Output = io::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + let me = &mut *self; + while !me.buf.is_empty() { + let n = ready!(Pin::new(&mut me.writer).poll_write(cx, me.buf))?; + { + let (_, rest) = mem::replace(&mut me.buf, &[]).split_at(n); + me.buf = rest; + } + if n == 0 { + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); + } + } + + Poll::Ready(Ok(())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assert_unpin() { + use std::marker::PhantomPinned; + crate::is_unpin::<WriteAll<'_, PhantomPinned>>(); + } +} diff --git a/third_party/rust/tokio/src/io/util/write_buf.rs b/third_party/rust/tokio/src/io/util/write_buf.rs new file mode 100644 index 0000000000..e49282fe0c --- /dev/null +++ b/third_party/rust/tokio/src/io/util/write_buf.rs @@ -0,0 +1,43 @@ +use crate::io::AsyncWrite; + +use bytes::Buf; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_io_util! { + /// A future to write some of the buffer to an `AsyncWrite`. + #[derive(Debug)] + #[must_use = "futures do nothing unless you `.await` or poll them"] + pub struct WriteBuf<'a, W, B> { + writer: &'a mut W, + buf: &'a mut B, + } +} + +/// Tries to write some bytes from the given `buf` to the writer in an +/// asynchronous manner, returning a future. +pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B> +where + W: AsyncWrite, + B: Buf, +{ + WriteBuf { writer, buf } +} + +impl<W, B> Future for WriteBuf<'_, W, B> +where + W: AsyncWrite, + B: Buf, +{ + type Output = io::Result<usize>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> { + // safety: no data is moved from self + unsafe { + let me = self.get_unchecked_mut(); + Pin::new_unchecked(&mut *me.writer).poll_write_buf(cx, &mut me.buf) + } + } +} diff --git a/third_party/rust/tokio/src/io/util/write_int.rs b/third_party/rust/tokio/src/io/util/write_int.rs new file mode 100644 index 0000000000..672c35f076 --- /dev/null +++ b/third_party/rust/tokio/src/io/util/write_int.rs @@ -0,0 +1,122 @@ +use crate::io::AsyncWrite; + +use bytes::BufMut; +use pin_project_lite::pin_project; +use std::future::Future; +use std::io; +use std::mem::size_of; +use std::pin::Pin; +use std::task::{Context, Poll}; + +macro_rules! writer { + ($name:ident, $ty:ty, $writer:ident) => { + writer!($name, $ty, $writer, size_of::<$ty>()); + }; + ($name:ident, $ty:ty, $writer:ident, $bytes:expr) => { + pin_project! { + #[doc(hidden)] + pub struct $name<W> { + #[pin] + dst: W, + buf: [u8; $bytes], + written: u8, + } + } + + impl<W> $name<W> { + pub(crate) fn new(w: W, value: $ty) -> Self { + let mut writer = $name { + buf: [0; $bytes], + written: 0, + dst: w, + }; + BufMut::$writer(&mut &mut writer.buf[..], value); + writer + } + } + + impl<W> Future for $name<W> + where + W: AsyncWrite, + { + type Output = io::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let mut me = self.project(); + + if *me.written == $bytes as u8 { + return Poll::Ready(Ok(())); + } + + while *me.written < $bytes as u8 { + *me.written += match me + .dst + .as_mut() + .poll_write(cx, &me.buf[*me.written as usize..]) + { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), + Poll::Ready(Ok(0)) => { + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); + } + Poll::Ready(Ok(n)) => n as u8, + }; + } + Poll::Ready(Ok(())) + } + } + }; +} + +macro_rules! writer8 { + ($name:ident, $ty:ty) => { + pin_project! { + #[doc(hidden)] + pub struct $name<W> { + #[pin] + dst: W, + byte: $ty, + } + } + + impl<W> $name<W> { + pub(crate) fn new(dst: W, byte: $ty) -> Self { + Self { dst, byte } + } + } + + impl<W> Future for $name<W> + where + W: AsyncWrite, + { + type Output = io::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = self.project(); + + let buf = [*me.byte as u8]; + + match me.dst.poll_write(cx, &buf[..]) { + Poll::Pending => Poll::Pending, + Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), + Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())), + Poll::Ready(Ok(1)) => Poll::Ready(Ok(())), + Poll::Ready(Ok(_)) => unreachable!(), + } + } + } + }; +} + +writer8!(WriteU8, u8); +writer8!(WriteI8, i8); + +writer!(WriteU16, u16, put_u16); +writer!(WriteU32, u32, put_u32); +writer!(WriteU64, u64, put_u64); +writer!(WriteU128, u128, put_u128); + +writer!(WriteI16, i16, put_i16); +writer!(WriteI32, i32, put_i32); +writer!(WriteI64, i64, put_i64); +writer!(WriteI128, i128, put_i128); diff --git a/third_party/rust/tokio/src/lib.rs b/third_party/rust/tokio/src/lib.rs new file mode 100644 index 0000000000..8172f40a3b --- /dev/null +++ b/third_party/rust/tokio/src/lib.rs @@ -0,0 +1,390 @@ +#![doc(html_root_url = "https://docs.rs/tokio/0.2.18")] +#![allow( + clippy::cognitive_complexity, + clippy::large_enum_variant, + clippy::needless_doctest_main +)] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + unreachable_pub +)] +#![deny(intra_doc_link_resolution_failure)] +#![doc(test( + no_crate_inject, + attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) +))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +//! A runtime for writing reliable, asynchronous, and slim applications. +//! +//! Tokio is an event-driven, non-blocking I/O platform for writing asynchronous +//! applications with the Rust programming language. At a high level, it +//! provides a few major components: +//! +//! * Tools for [working with asynchronous tasks][tasks], including +//! [synchronization primitives and channels][sync] and [timeouts, delays, and +//! intervals][time]. +//! * APIs for [performing asynchronous I/O][io], including [TCP and UDP][net] sockets, +//! [filesystem][fs] operations, and [process] and [signal] management. +//! * A [runtime] for executing asynchronous code, including a task scheduler, +//! an I/O driver backed by the operating system's event queue (epoll, kqueue, +//! IOCP, etc...), and a high performance timer. +//! +//! Guide level documentation is found on the [website]. +//! +//! [tasks]: #working-with-tasks +//! [sync]: crate::sync +//! [time]: crate::time +//! [io]: #asynchronous-io +//! [net]: crate::net +//! [fs]: crate::fs +//! [process]: crate::process +//! [signal]: crate::signal +//! [fs]: crate::fs +//! [runtime]: crate::runtime +//! [website]: https://tokio.rs/docs/overview/ +//! +//! # A Tour of Tokio +//! +//! Tokio consists of a number of modules that provide a range of functionality +//! essential for implementing asynchronous applications in Rust. In this +//! section, we will take a brief tour of Tokio, summarizing the major APIs and +//! their uses. +//! +//! The easiest way to get started is to enable all features. Do this by +//! enabling the `full` feature flag: +//! +//! ```toml +//! tokio = { version = "0.2", features = ["full"] } +//! ``` +//! +//! ## Feature flags +//! +//! Tokio uses a set of [feature flags] to reduce the amount of compiled code. It +//! is possible to just enable certain features over others. By default, Tokio +//! does not enable any features but allows one to enable a subset for their use +//! case. Below is a list of the available feature flags. You may also notice +//! above each function, struct and trait there is listed one or more feature flags +//! that are required for that item to be used. If you are new to Tokio it is +//! recommended that you use the `full` feature flag which will enable all public APIs. +//! Beware though that this will pull in many extra dependencies that you may not +//! need. +//! +//! - `full`: Enables all Tokio public API features listed below. +//! - `rt-core`: Enables `tokio::spawn` and the basic (single-threaded) scheduler. +//! - `rt-threaded`: Enables the heavier, multi-threaded, work-stealing scheduler. +//! - `rt-util`: Enables non-scheduler utilities. +//! - `io-driver`: Enables the `mio` based IO driver. +//! - `io-util`: Enables the IO based `Ext` traits. +//! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types. +//! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and `UdpSocket`. +//! - `tcp`: Enables all `tokio::net::tcp` types. +//! - `udp`: Enables all `tokio::net::udp` types. +//! - `uds`: Enables all `tokio::net::unix` types. +//! - `time`: Enables `tokio::time` types and allows the schedulers to enable +//! the built in timer. +//! - `process`: Enables `tokio::process` types. +//! - `macros`: Enables `#[tokio::main]` and `#[tokio::test]` macros. +//! - `sync`: Enables all `tokio::sync` types. +//! - `stream`: Enables optional `Stream` implementations for types within Tokio. +//! - `signal`: Enables all `tokio::signal` types. +//! - `fs`: Enables `tokio::fs` types. +//! - `dns`: Enables async `tokio::net::ToSocketAddrs`. +//! - `test-util`: Enables testing based infrastructure for the Tokio runtime. +//! - `blocking`: Enables `block_in_place` and `spawn_blocking`. +//! +//! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are +//! always available._ +//! +//! ### Internal features +//! +//! These features do not expose any new API, but influence internal +//! implementation aspects of Tokio, and can pull in additional +//! dependencies. They are not included in `full`: +//! +//! - `parking_lot`: As a potential optimization, use the _parking_lot_ crate's +//! synchronization primitives internally. MSRV may increase according to the +//! _parking_lot_ release in use. +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section +//! +//! ### Authoring applications +//! +//! Tokio is great for writing applications and most users in this case shouldn't +//! worry too much about what features they should pick. If you're unsure, we suggest +//! going with `full` to ensure that you don't run into any road blocks while you're +//! building your application. +//! +//! #### Example +//! +//! This example shows the quickest way to get started with Tokio. +//! +//! ```toml +//! tokio = { version = "0.2", features = ["full"] } +//! ``` +//! +//! ### Authoring libraries +//! +//! As a library author your goal should be to provide the lighest weight crate +//! that is based on Tokio. To achieve this you should ensure that you only enable +//! the features you need. This allows users to pick up your crate without having +//! to enable unnecessary features. +//! +//! #### Example +//! +//! This example shows how you may want to import features for a library that just +//! needs to `tokio::spawn` and use a `TcpStream`. +//! +//! ```toml +//! tokio = { version = "0.2", features = ["rt-core", "tcp"] } +//! ``` +//! +//! ## Working With Tasks +//! +//! Asynchronous programs in Rust are based around lightweight, non-blocking +//! units of execution called [_tasks_][tasks]. The [`tokio::task`] module provides +//! important tools for working with tasks: +//! +//! * The [`spawn`] function and [`JoinHandle`] type, for scheduling a new task +//! on the Tokio runtime and awaiting the output of a spawned task, respectively, +//! * Functions for [running blocking operations][blocking] in an asynchronous +//! task context. +//! +//! The [`tokio::task`] module is present only when the "rt-core" feature flag +//! is enabled. +//! +//! [tasks]: task/index.html#what-are-tasks +//! [`tokio::task`]: crate::task +//! [`spawn`]: crate::task::spawn() +//! [`JoinHandle`]: crate::task::JoinHandle +//! [blocking]: task/index.html#blocking-and-yielding +//! +//! The [`tokio::sync`] module contains synchronization primitives to use when +//! needing to communicate or share data. These include: +//! +//! * channels ([`oneshot`], [`mpsc`], and [`watch`]), for sending values +//! between tasks, +//! * a non-blocking [`Mutex`], for controlling access to a shared, mutable +//! value, +//! * an asynchronous [`Barrier`] type, for multiple tasks to synchronize before +//! beginning a computation. +//! +//! The `tokio::sync` module is present only when the "sync" feature flag is +//! enabled. +//! +//! [`tokio::sync`]: crate::sync +//! [`Mutex`]: crate::sync::Mutex +//! [`Barrier`]: crate::sync::Barrier +//! [`oneshot`]: crate::sync::oneshot +//! [`mpsc`]: crate::sync::mpsc +//! [`watch`]: crate::sync::watch +//! +//! The [`tokio::time`] module provides utilities for tracking time and +//! scheduling work. This includes functions for setting [timeouts][timeout] for +//! tasks, [delaying][delay] work to run in the future, or [repeating an operation at an +//! interval][interval]. +//! +//! In order to use `tokio::time`, the "time" feature flag must be enabled. +//! +//! [`tokio::time`]: crate::time +//! [delay]: crate::time::delay_for() +//! [interval]: crate::time::interval() +//! [timeout]: crate::time::timeout() +//! +//! Finally, Tokio provides a _runtime_ for executing asynchronous tasks. Most +//! applications can use the [`#[tokio::main]`][main] macro to run their code on the +//! Tokio runtime. In use-cases where manual control over the runtime is +//! required, the [`tokio::runtime`] module provides APIs for configuring and +//! managing runtimes. +//! +//! Using the runtime requires the "rt-core" or "rt-threaded" feature flags, to +//! enable the basic [single-threaded scheduler][rt-core] and the [thread-pool +//! scheduler][rt-threaded], respectively. See the [`runtime` module +//! documentation][rt-features] for details. In addition, the "macros" feature +//! flag enables the `#[tokio::main]` and `#[tokio::test]` attributes. +//! +//! [main]: attr.main.html +//! [`tokio::runtime`]: crate::runtime +//! [`Builder`]: crate::runtime::Builder +//! [`Runtime`]: crate::runtime::Runtime +//! [rt-core]: runtime/index.html#basic-scheduler +//! [rt-threaded]: runtime/index.html#threaded-scheduler +//! [rt-features]: runtime/index.html#runtime-scheduler +//! +//! ## Asynchronous IO +//! +//! As well as scheduling and running tasks, Tokio provides everything you need +//! to perform input and output asynchronously. +//! +//! The [`tokio::io`] module provides Tokio's asynchronous core I/O primitives, +//! the [`AsyncRead`], [`AsyncWrite`], and [`AsyncBufRead`] traits. In addition, +//! when the "io-util" feature flag is enabled, it also provides combinators and +//! functions for working with these traits, forming as an asynchronous +//! counterpart to [`std::io`]. When the "io-driver" feature flag is enabled, it +//! also provides utilities for library authors implementing I/O resources. +//! +//! Tokio also includes APIs for performing various kinds of I/O and interacting +//! with the operating system asynchronously. These include: +//! +//! * [`tokio::net`], which contains non-blocking versions of [TCP], [UDP], and +//! [Unix Domain Sockets][UDS] (enabled by the "net" feature flag), +//! * [`tokio::fs`], similar to [`std::fs`] but for performing filesystem I/O +//! asynchronously (enabled by the "fs" feature flag), +//! * [`tokio::signal`], for asynchronously handling Unix and Windows OS signals +//! (enabled by the "signal" feature flag), +//! * [`tokio::process`], for spawning and managing child processes (enabled by +//! the "process" feature flag). +//! +//! [`tokio::io`]: crate::io +//! [`AsyncRead`]: crate::io::AsyncRead +//! [`AsyncWrite`]: crate::io::AsyncWrite +//! [`AsyncBufRead`]: crate::io::AsyncBufRead +//! [`std::io`]: std::io +//! [`tokio::net`]: crate::net +//! [TCP]: crate::net::tcp +//! [UDP]: crate::net::udp +//! [UDS]: crate::net::unix +//! [`tokio::fs`]: crate::fs +//! [`std::fs`]: std::fs +//! [`tokio::signal`]: crate::signal +//! [`tokio::process`]: crate::process +//! +//! # Examples +//! +//! A simple TCP echo server: +//! +//! ```no_run +//! use tokio::net::TcpListener; +//! use tokio::prelude::*; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; +//! +//! loop { +//! let (mut socket, _) = listener.accept().await?; +//! +//! tokio::spawn(async move { +//! let mut buf = [0; 1024]; +//! +//! // In a loop, read data from the socket and write the data back. +//! loop { +//! let n = match socket.read(&mut buf).await { +//! // socket closed +//! Ok(n) if n == 0 => return, +//! Ok(n) => n, +//! Err(e) => { +//! eprintln!("failed to read from socket; err = {:?}", e); +//! return; +//! } +//! }; +//! +//! // Write the data back +//! if let Err(e) = socket.write_all(&buf[0..n]).await { +//! eprintln!("failed to write to socket; err = {:?}", e); +//! return; +//! } +//! } +//! }); +//! } +//! } +//! ``` + +// Includes re-exports used by macros. +// +// This module is not intended to be part of the public API. In general, any +// `doc(hidden)` code is not part of Tokio's public and stable API. +#[macro_use] +#[doc(hidden)] +pub mod macros; + +cfg_fs! { + pub mod fs; +} + +#[doc(hidden)] +pub mod future; + +pub mod io; +pub mod net; + +mod loom; +mod park; + +pub mod prelude; + +cfg_process! { + pub mod process; +} + +pub mod runtime; + +pub(crate) mod coop; + +cfg_signal! { + pub mod signal; +} + +cfg_stream! { + pub mod stream; +} + +cfg_sync! { + pub mod sync; +} +cfg_not_sync! { + mod sync; +} + +cfg_rt_core! { + pub mod task; + pub use task::spawn; +} + +cfg_time! { + pub mod time; +} + +mod util; + +cfg_macros! { + /// Implementation detail of the `select!` macro. This macro is **not** + /// intended to be used as part of the public API and is permitted to + /// change. + #[doc(hidden)] + pub use tokio_macros::select_priv_declare_output_enum; + + doc_rt_core! { + cfg_rt_threaded! { + // This is the docs.rs case (with all features) so make sure macros + // is included in doc(cfg). + + #[cfg(not(test))] // Work around for rust-lang/rust#62127 + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + pub use tokio_macros::main_threaded as main; + + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + pub use tokio_macros::test_threaded as test; + } + + cfg_not_rt_threaded! { + #[cfg(not(test))] // Work around for rust-lang/rust#62127 + pub use tokio_macros::main_basic as main; + pub use tokio_macros::test_basic as test; + } + } + + // Maintains old behavior + cfg_not_rt_core! { + #[cfg(not(test))] + pub use tokio_macros::main; + pub use tokio_macros::test; + } +} + +// TODO: rm +#[cfg(feature = "io-util")] +#[cfg(test)] +fn is_unpin<T: Unpin>() {} diff --git a/third_party/rust/tokio/src/loom/mocked.rs b/third_party/rust/tokio/src/loom/mocked.rs new file mode 100644 index 0000000000..7891395225 --- /dev/null +++ b/third_party/rust/tokio/src/loom/mocked.rs @@ -0,0 +1,13 @@ +pub(crate) use loom::*; + +pub(crate) mod rand { + pub(crate) fn seed() -> u64 { + 1 + } +} + +pub(crate) mod sys { + pub(crate) fn num_cpus() -> usize { + 2 + } +} diff --git a/third_party/rust/tokio/src/loom/mod.rs b/third_party/rust/tokio/src/loom/mod.rs new file mode 100644 index 0000000000..56a41f25a0 --- /dev/null +++ b/third_party/rust/tokio/src/loom/mod.rs @@ -0,0 +1,12 @@ +//! This module abstracts over `loom` and `std::sync` depending on whether we +//! are running tests or not. + +#[cfg(not(all(test, loom)))] +mod std; +#[cfg(not(all(test, loom)))] +pub(crate) use self::std::*; + +#[cfg(all(test, loom))] +mod mocked; +#[cfg(all(test, loom))] +pub(crate) use self::mocked::*; diff --git a/third_party/rust/tokio/src/loom/std/atomic_ptr.rs b/third_party/rust/tokio/src/loom/std/atomic_ptr.rs new file mode 100644 index 0000000000..eb8e47557a --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_ptr.rs @@ -0,0 +1,32 @@ +use std::fmt; +use std::ops::Deref; + +/// `AtomicPtr` providing an additional `load_unsync` function. +pub(crate) struct AtomicPtr<T> { + inner: std::sync::atomic::AtomicPtr<T>, +} + +impl<T> AtomicPtr<T> { + pub(crate) fn new(ptr: *mut T) -> AtomicPtr<T> { + let inner = std::sync::atomic::AtomicPtr::new(ptr); + AtomicPtr { inner } + } + + pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut *mut T) -> R) -> R { + f(self.inner.get_mut()) + } +} + +impl<T> Deref for AtomicPtr<T> { + type Target = std::sync::atomic::AtomicPtr<T>; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl<T> fmt::Debug for AtomicPtr<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(fmt) + } +} diff --git a/third_party/rust/tokio/src/loom/std/atomic_u16.rs b/third_party/rust/tokio/src/loom/std/atomic_u16.rs new file mode 100644 index 0000000000..70390972b4 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_u16.rs @@ -0,0 +1,44 @@ +use std::cell::UnsafeCell; +use std::fmt; +use std::ops::Deref; + +/// `AtomicU16` providing an additional `load_unsync` function. +pub(crate) struct AtomicU16 { + inner: UnsafeCell<std::sync::atomic::AtomicU16>, +} + +unsafe impl Send for AtomicU16 {} +unsafe impl Sync for AtomicU16 {} + +impl AtomicU16 { + pub(crate) fn new(val: u16) -> AtomicU16 { + let inner = UnsafeCell::new(std::sync::atomic::AtomicU16::new(val)); + AtomicU16 { inner } + } + + /// Performs an unsynchronized load. + /// + /// # Safety + /// + /// All mutations must have happened before the unsynchronized load. + /// Additionally, there must be no concurrent mutations. + pub(crate) unsafe fn unsync_load(&self) -> u16 { + *(*self.inner.get()).get_mut() + } +} + +impl Deref for AtomicU16 { + type Target = std::sync::atomic::AtomicU16; + + fn deref(&self) -> &Self::Target { + // safety: it is always safe to access `&self` fns on the inner value as + // we never perform unsafe mutations. + unsafe { &*self.inner.get() } + } +} + +impl fmt::Debug for AtomicU16 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(fmt) + } +} diff --git a/third_party/rust/tokio/src/loom/std/atomic_u32.rs b/third_party/rust/tokio/src/loom/std/atomic_u32.rs new file mode 100644 index 0000000000..6f786c519f --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_u32.rs @@ -0,0 +1,34 @@ +use std::cell::UnsafeCell; +use std::fmt; +use std::ops::Deref; + +/// `AtomicU32` providing an additional `load_unsync` function. +pub(crate) struct AtomicU32 { + inner: UnsafeCell<std::sync::atomic::AtomicU32>, +} + +unsafe impl Send for AtomicU32 {} +unsafe impl Sync for AtomicU32 {} + +impl AtomicU32 { + pub(crate) fn new(val: u32) -> AtomicU32 { + let inner = UnsafeCell::new(std::sync::atomic::AtomicU32::new(val)); + AtomicU32 { inner } + } +} + +impl Deref for AtomicU32 { + type Target = std::sync::atomic::AtomicU32; + + fn deref(&self) -> &Self::Target { + // safety: it is always safe to access `&self` fns on the inner value as + // we never perform unsafe mutations. + unsafe { &*self.inner.get() } + } +} + +impl fmt::Debug for AtomicU32 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(fmt) + } +} diff --git a/third_party/rust/tokio/src/loom/std/atomic_u64.rs b/third_party/rust/tokio/src/loom/std/atomic_u64.rs new file mode 100644 index 0000000000..206954fcc3 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_u64.rs @@ -0,0 +1,60 @@ +//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a +//! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a +//! `Mutex`. + +pub(crate) use self::imp::AtomicU64; + +// `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater. +// Once `cfg_target_has_atomic` feature is stable, we can replace it with +// `#[cfg(target_has_atomic = "64")]`. +// Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target +#[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))] +mod imp { + pub(crate) use std::sync::atomic::AtomicU64; +} + +#[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))] +mod imp { + use std::sync::atomic::Ordering; + use std::sync::Mutex; + + #[derive(Debug)] + pub(crate) struct AtomicU64 { + inner: Mutex<u64>, + } + + impl AtomicU64 { + pub(crate) fn new(val: u64) -> AtomicU64 { + AtomicU64 { + inner: Mutex::new(val), + } + } + + pub(crate) fn load(&self, _: Ordering) -> u64 { + *self.inner.lock().unwrap() + } + + pub(crate) fn store(&self, val: u64, _: Ordering) { + *self.inner.lock().unwrap() = val; + } + + pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 { + let mut lock = self.inner.lock().unwrap(); + let prev = *lock; + *lock = prev | val; + prev + } + + pub(crate) fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 { + let mut lock = self.inner.lock().unwrap(); + let prev = *lock; + + if prev != old { + return prev; + } + + *lock = new; + prev + } + } +} diff --git a/third_party/rust/tokio/src/loom/std/atomic_u8.rs b/third_party/rust/tokio/src/loom/std/atomic_u8.rs new file mode 100644 index 0000000000..4fcd0df3d4 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_u8.rs @@ -0,0 +1,34 @@ +use std::cell::UnsafeCell; +use std::fmt; +use std::ops::Deref; + +/// `AtomicU8` providing an additional `load_unsync` function. +pub(crate) struct AtomicU8 { + inner: UnsafeCell<std::sync::atomic::AtomicU8>, +} + +unsafe impl Send for AtomicU8 {} +unsafe impl Sync for AtomicU8 {} + +impl AtomicU8 { + pub(crate) fn new(val: u8) -> AtomicU8 { + let inner = UnsafeCell::new(std::sync::atomic::AtomicU8::new(val)); + AtomicU8 { inner } + } +} + +impl Deref for AtomicU8 { + type Target = std::sync::atomic::AtomicU8; + + fn deref(&self) -> &Self::Target { + // safety: it is always safe to access `&self` fns on the inner value as + // we never perform unsafe mutations. + unsafe { &*self.inner.get() } + } +} + +impl fmt::Debug for AtomicU8 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.deref().fmt(fmt) + } +} diff --git a/third_party/rust/tokio/src/loom/std/atomic_usize.rs b/third_party/rust/tokio/src/loom/std/atomic_usize.rs new file mode 100644 index 0000000000..0fe998f1f9 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/atomic_usize.rs @@ -0,0 +1,56 @@ +use std::cell::UnsafeCell; +use std::fmt; +use std::ops; + +/// `AtomicUsize` providing an additional `load_unsync` function. +pub(crate) struct AtomicUsize { + inner: UnsafeCell<std::sync::atomic::AtomicUsize>, +} + +unsafe impl Send for AtomicUsize {} +unsafe impl Sync for AtomicUsize {} + +impl AtomicUsize { + pub(crate) fn new(val: usize) -> AtomicUsize { + let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); + AtomicUsize { inner } + } + + /// Performs an unsynchronized load. + /// + /// # Safety + /// + /// All mutations must have happened before the unsynchronized load. + /// Additionally, there must be no concurrent mutations. + pub(crate) unsafe fn unsync_load(&self) -> usize { + *(*self.inner.get()).get_mut() + } + + pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { + // safety: we have mutable access + f(unsafe { (*self.inner.get()).get_mut() }) + } +} + +impl ops::Deref for AtomicUsize { + type Target = std::sync::atomic::AtomicUsize; + + fn deref(&self) -> &Self::Target { + // safety: it is always safe to access `&self` fns on the inner value as + // we never perform unsafe mutations. + unsafe { &*self.inner.get() } + } +} + +impl ops::DerefMut for AtomicUsize { + fn deref_mut(&mut self) -> &mut Self::Target { + // safety: we hold `&mut self` + unsafe { &mut *self.inner.get() } + } +} + +impl fmt::Debug for AtomicUsize { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(fmt) + } +} diff --git a/third_party/rust/tokio/src/loom/std/mod.rs b/third_party/rust/tokio/src/loom/std/mod.rs new file mode 100644 index 0000000000..595bdf60ed --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/mod.rs @@ -0,0 +1,87 @@ +#![cfg_attr(any(not(feature = "full"), loom), allow(unused_imports, dead_code))] + +mod atomic_ptr; +mod atomic_u16; +mod atomic_u32; +mod atomic_u64; +mod atomic_u8; +mod atomic_usize; +mod unsafe_cell; + +pub(crate) mod cell { + pub(crate) use super::unsafe_cell::UnsafeCell; +} + +#[cfg(any(feature = "sync", feature = "io-driver"))] +pub(crate) mod future { + pub(crate) use crate::sync::AtomicWaker; +} + +pub(crate) mod rand { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hash, Hasher}; + use std::sync::atomic::AtomicU32; + use std::sync::atomic::Ordering::Relaxed; + + static COUNTER: AtomicU32 = AtomicU32::new(1); + + pub(crate) fn seed() -> u64 { + let rand_state = RandomState::new(); + + let mut hasher = rand_state.build_hasher(); + + // Hash some unique-ish data to generate some new state + COUNTER.fetch_add(1, Relaxed).hash(&mut hasher); + + // Get the seed + hasher.finish() + } +} + +pub(crate) mod sync { + pub(crate) use std::sync::Arc; + + #[cfg(feature = "parking_lot")] + mod pl_wrappers; + + // Below, make sure all the feature-influenced types are exported for + // internal use. Note however that some are not _currently_ named by + // consuming code. + + #[cfg(feature = "parking_lot")] + #[allow(unused_imports)] + pub(crate) use pl_wrappers::{Condvar, Mutex}; + + #[cfg(feature = "parking_lot")] + #[allow(unused_imports)] + pub(crate) use parking_lot::{MutexGuard, WaitTimeoutResult}; + + #[cfg(not(feature = "parking_lot"))] + #[allow(unused_imports)] + pub(crate) use std::sync::{Condvar, Mutex, MutexGuard, WaitTimeoutResult}; + + pub(crate) mod atomic { + pub(crate) use crate::loom::std::atomic_ptr::AtomicPtr; + pub(crate) use crate::loom::std::atomic_u16::AtomicU16; + pub(crate) use crate::loom::std::atomic_u32::AtomicU32; + pub(crate) use crate::loom::std::atomic_u64::AtomicU64; + pub(crate) use crate::loom::std::atomic_u8::AtomicU8; + pub(crate) use crate::loom::std::atomic_usize::AtomicUsize; + + pub(crate) use std::sync::atomic::{spin_loop_hint, AtomicBool}; + } +} + +pub(crate) mod sys { + #[cfg(feature = "rt-threaded")] + pub(crate) fn num_cpus() -> usize { + usize::max(1, num_cpus::get()) + } + + #[cfg(not(feature = "rt-threaded"))] + pub(crate) fn num_cpus() -> usize { + 1 + } +} + +pub(crate) use std::thread; diff --git a/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs b/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs new file mode 100644 index 0000000000..3be8ba1c10 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs @@ -0,0 +1,79 @@ +//! A minimal adaption of the `parking_lot` synchronization primitives to the +//! equivalent `std::sync` types. +//! +//! This can be extended to additional types/methods as required. + +use std::sync::{LockResult, TryLockError, TryLockResult}; +use std::time::Duration; + +use parking_lot as pl; + +/// Adapter for `parking_lot::Mutex` to the `std::sync::Mutex` interface. +#[derive(Debug)] +pub(crate) struct Mutex<T: ?Sized>(pl::Mutex<T>); + +impl<T> Mutex<T> { + #[inline] + pub(crate) fn new(t: T) -> Mutex<T> { + Mutex(pl::Mutex::new(t)) + } + + #[inline] + pub(crate) fn lock(&self) -> LockResult<pl::MutexGuard<'_, T>> { + Ok(self.0.lock()) + } + + #[inline] + pub(crate) fn try_lock(&self) -> TryLockResult<pl::MutexGuard<'_, T>> { + match self.0.try_lock() { + Some(guard) => Ok(guard), + None => Err(TryLockError::WouldBlock), + } + } + + // Note: Additional methods `is_poisoned` and `into_inner`, can be + // provided here as needed. +} + +/// Adapter for `parking_lot::Condvar` to the `std::sync::Condvar` interface. +#[derive(Debug)] +pub(crate) struct Condvar(pl::Condvar); + +impl Condvar { + #[inline] + pub(crate) fn new() -> Condvar { + Condvar(pl::Condvar::new()) + } + + #[inline] + pub(crate) fn notify_one(&self) { + self.0.notify_one(); + } + + #[inline] + pub(crate) fn notify_all(&self) { + self.0.notify_all(); + } + + #[inline] + pub(crate) fn wait<'a, T>( + &self, + mut guard: pl::MutexGuard<'a, T>, + ) -> LockResult<pl::MutexGuard<'a, T>> { + self.0.wait(&mut guard); + Ok(guard) + } + + #[inline] + pub(crate) fn wait_timeout<'a, T>( + &self, + mut guard: pl::MutexGuard<'a, T>, + timeout: Duration, + ) -> LockResult<(pl::MutexGuard<'a, T>, pl::WaitTimeoutResult)> { + let wtr = self.0.wait_for(&mut guard, timeout); + Ok((guard, wtr)) + } + + // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`, + // `wait_until` can be provided here as needed. +} diff --git a/third_party/rust/tokio/src/loom/std/unsafe_cell.rs b/third_party/rust/tokio/src/loom/std/unsafe_cell.rs new file mode 100644 index 0000000000..f2b03d8dc2 --- /dev/null +++ b/third_party/rust/tokio/src/loom/std/unsafe_cell.rs @@ -0,0 +1,16 @@ +#[derive(Debug)] +pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>); + +impl<T> UnsafeCell<T> { + pub(crate) fn new(data: T) -> UnsafeCell<T> { + UnsafeCell(std::cell::UnsafeCell::new(data)) + } + + pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R { + f(self.0.get()) + } + + pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R { + f(self.0.get()) + } +} diff --git a/third_party/rust/tokio/src/macros/cfg.rs b/third_party/rust/tokio/src/macros/cfg.rs new file mode 100644 index 0000000000..18beb1bdbb --- /dev/null +++ b/third_party/rust/tokio/src/macros/cfg.rs @@ -0,0 +1,322 @@ +#![allow(unused_macros)] + +macro_rules! cfg_resource_drivers { + ($($item:item)*) => { + $( + #[cfg(any(feature = "io-driver", feature = "time"))] + $item + )* + } +} + +macro_rules! cfg_blocking { + ($($item:item)*) => { + $( + #[cfg(feature = "blocking")] + #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))] + $item + )* + } +} + +/// Enables blocking API internals +macro_rules! cfg_blocking_impl { + ($($item:item)*) => { + $( + #[cfg(any( + feature = "blocking", + feature = "fs", + feature = "dns", + feature = "io-std", + feature = "rt-threaded", + ))] + $item + )* + } +} + +/// Enables blocking API internals +macro_rules! cfg_not_blocking_impl { + ($($item:item)*) => { + $( + #[cfg(not(any( + feature = "blocking", + feature = "fs", + feature = "dns", + feature = "io-std", + feature = "rt-threaded", + )))] + $item + )* + } +} + +/// Enables internal `AtomicWaker` impl +macro_rules! cfg_atomic_waker_impl { + ($($item:item)*) => { + $( + #[cfg(any( + feature = "io-driver", + feature = "time", + all(feature = "rt-core", feature = "rt-util") + ))] + #[cfg(not(loom))] + $item + )* + } +} + +macro_rules! cfg_dns { + ($($item:item)*) => { + $( + #[cfg(feature = "dns")] + #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] + $item + )* + } +} + +macro_rules! cfg_fs { + ($($item:item)*) => { + $( + #[cfg(feature = "fs")] + #[cfg_attr(docsrs, doc(cfg(feature = "fs")))] + $item + )* + } +} + +macro_rules! cfg_io_blocking { + ($($item:item)*) => { + $( #[cfg(any(feature = "io-std", feature = "fs"))] $item )* + } +} + +macro_rules! cfg_io_driver { + ($($item:item)*) => { + $( + #[cfg(feature = "io-driver")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-driver")))] + $item + )* + } +} + +macro_rules! cfg_not_io_driver { + ($($item:item)*) => { + $( + #[cfg(not(feature = "io-driver"))] + $item + )* + } +} + +macro_rules! cfg_io_std { + ($($item:item)*) => { + $( + #[cfg(feature = "io-std")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-std")))] + $item + )* + } +} + +macro_rules! cfg_io_util { + ($($item:item)*) => { + $( + #[cfg(feature = "io-util")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))] + $item + )* + } +} + +macro_rules! cfg_not_io_util { + ($($item:item)*) => { + $( #[cfg(not(feature = "io-util"))] $item )* + } +} + +macro_rules! cfg_loom { + ($($item:item)*) => { + $( #[cfg(loom)] $item )* + } +} + +macro_rules! cfg_not_loom { + ($($item:item)*) => { + $( #[cfg(not(loom))] $item )* + } +} + +macro_rules! cfg_macros { + ($($item:item)*) => { + $( + #[cfg(feature = "macros")] + #[cfg_attr(docsrs, doc(cfg(feature = "macros")))] + #[doc(inline)] + $item + )* + } +} + +macro_rules! cfg_process { + ($($item:item)*) => { + $( + #[cfg(feature = "process")] + #[cfg_attr(docsrs, doc(cfg(feature = "process")))] + #[cfg(not(loom))] + $item + )* + } +} + +macro_rules! cfg_signal { + ($($item:item)*) => { + $( + #[cfg(feature = "signal")] + #[cfg_attr(docsrs, doc(cfg(feature = "signal")))] + #[cfg(not(loom))] + $item + )* + } +} + +macro_rules! cfg_stream { + ($($item:item)*) => { + $( + #[cfg(feature = "stream")] + #[cfg_attr(docsrs, doc(cfg(feature = "stream")))] + $item + )* + } +} + +macro_rules! cfg_sync { + ($($item:item)*) => { + $( + #[cfg(feature = "sync")] + #[cfg_attr(docsrs, doc(cfg(feature = "sync")))] + $item + )* + } +} + +macro_rules! cfg_not_sync { + ($($item:item)*) => { + $( #[cfg(not(feature = "sync"))] $item )* + } +} + +macro_rules! cfg_rt_core { + ($($item:item)*) => { + $( + #[cfg(feature = "rt-core")] + $item + )* + } +} + +macro_rules! doc_rt_core { + ($($item:item)*) => { + $( + #[cfg(feature = "rt-core")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt-core")))] + $item + )* + } +} + +macro_rules! cfg_not_rt_core { + ($($item:item)*) => { + $( #[cfg(not(feature = "rt-core"))] $item )* + } +} + +macro_rules! cfg_rt_threaded { + ($($item:item)*) => { + $( + #[cfg(feature = "rt-threaded")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt-threaded")))] + $item + )* + } +} + +macro_rules! cfg_rt_util { + ($($item:item)*) => { + $( + #[cfg(feature = "rt-util")] + #[cfg_attr(docsrs, doc(cfg(feature = "rt-util")))] + $item + )* + } +} + +macro_rules! cfg_not_rt_threaded { + ($($item:item)*) => { + $( #[cfg(not(feature = "rt-threaded"))] $item )* + } +} + +macro_rules! cfg_tcp { + ($($item:item)*) => { + $( + #[cfg(feature = "tcp")] + #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))] + $item + )* + } +} + +macro_rules! cfg_test_util { + ($($item:item)*) => { + $( + #[cfg(feature = "test-util")] + #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] + $item + )* + } +} + +macro_rules! cfg_not_test_util { + ($($item:item)*) => { + $( #[cfg(not(feature = "test-util"))] $item )* + } +} + +macro_rules! cfg_time { + ($($item:item)*) => { + $( + #[cfg(feature = "time")] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] + $item + )* + } +} + +macro_rules! cfg_not_time { + ($($item:item)*) => { + $( #[cfg(not(feature = "time"))] $item )* + } +} + +macro_rules! cfg_udp { + ($($item:item)*) => { + $( + #[cfg(feature = "udp")] + #[cfg_attr(docsrs, doc(cfg(feature = "udp")))] + $item + )* + } +} + +macro_rules! cfg_uds { + ($($item:item)*) => { + $( + #[cfg(all(unix, feature = "uds"))] + #[cfg_attr(docsrs, doc(cfg(feature = "uds")))] + $item + )* + } +} diff --git a/third_party/rust/tokio/src/macros/join.rs b/third_party/rust/tokio/src/macros/join.rs new file mode 100644 index 0000000000..5f37af510d --- /dev/null +++ b/third_party/rust/tokio/src/macros/join.rs @@ -0,0 +1,119 @@ +/// Wait on multiple concurrent branches, returning when **all** branches +/// complete. +/// +/// The `join!` macro must be used inside of async functions, closures, and +/// blocks. +/// +/// The `join!` macro takes a list of async expressions and evaluates them +/// concurrently on the same task. Each async expression evaluates to a future +/// and the futures from each expression are multiplexed on the current task. +/// +/// When working with async expressions returning `Result`, `join!` will wait +/// for **all** branches complete regardless if any complete with `Err`. Use +/// [`try_join!`] to return early when `Err` is encountered. +/// +/// [`try_join!`]: macro@try_join +/// +/// # Notes +/// +/// The supplied futures are stored inline and does not require allocating a +/// `Vec`. +/// +/// ### Runtime characteristics +/// +/// By running all async expressions on the current task, the expressions are +/// able to run **concurrently** but not in **parallel**. This means all +/// expressions are run on the same thread and if one branch blocks the thread, +/// all other expressions will be unable to continue. If parallelism is +/// required, spawn each async expression using [`tokio::spawn`] and pass the +/// join handle to `join!`. +/// +/// [`tokio::spawn`]: crate::spawn +/// +/// # Examples +/// +/// Basic join with two branches +/// +/// ``` +/// async fn do_stuff_async() { +/// // async work +/// } +/// +/// async fn more_async_work() { +/// // more here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let (first, second) = tokio::join!( +/// do_stuff_async(), +/// more_async_work()); +/// +/// // do something with the values +/// } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] +macro_rules! join { + (@ { + // One `_` for each branch in the `join!` macro. This is not used once + // normalization is complete. + ( $($count:tt)* ) + + // Normalized join! branches + $( ( $($skip:tt)* ) $e:expr, )* + + }) => {{ + use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; + use $crate::macros::support::Poll::{Ready, Pending}; + + // Safety: nothing must be moved out of `futures`. This is to satisfy + // the requirement of `Pin::new_unchecked` called below. + let mut futures = ( $( maybe_done($e), )* ); + + poll_fn(move |cx| { + let mut is_pending = false; + + $( + // Extract the future for this branch from the tuple. + let ( $($skip,)* fut, .. ) = &mut futures; + + // Safety: future is stored on the stack above + // and never moved. + let mut fut = unsafe { Pin::new_unchecked(fut) }; + + // Try polling + if fut.poll(cx).is_pending() { + is_pending = true; + } + )* + + if is_pending { + Pending + } else { + Ready(($({ + // Extract the future for this branch from the tuple. + let ( $($skip,)* fut, .. ) = &mut futures; + + // Safety: future is stored on the stack above + // and never moved. + let mut fut = unsafe { Pin::new_unchecked(fut) }; + + fut.take_output().expect("expected completed future") + },)*)) + } + }).await + }}; + + // ===== Normalize ===== + + (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { + $crate::join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*) + }; + + // ===== Entry point ===== + + ( $($e:expr),* $(,)?) => { + $crate::join!(@{ () } $($e,)*) + }; +} diff --git a/third_party/rust/tokio/src/macros/loom.rs b/third_party/rust/tokio/src/macros/loom.rs new file mode 100644 index 0000000000..d57d9fb0f7 --- /dev/null +++ b/third_party/rust/tokio/src/macros/loom.rs @@ -0,0 +1,12 @@ +macro_rules! if_loom { + ($($t:tt)*) => {{ + #[cfg(loom)] + const LOOM: bool = true; + #[cfg(not(loom))] + const LOOM: bool = false; + + if LOOM { + $($t)* + } + }} +} diff --git a/third_party/rust/tokio/src/macros/mod.rs b/third_party/rust/tokio/src/macros/mod.rs new file mode 100644 index 0000000000..2643c36018 --- /dev/null +++ b/third_party/rust/tokio/src/macros/mod.rs @@ -0,0 +1,35 @@ +#![cfg_attr(not(feature = "full"), allow(unused_macros))] + +#[macro_use] +mod cfg; + +#[macro_use] +mod loom; + +#[macro_use] +mod pin; + +#[macro_use] +mod ready; + +#[macro_use] +mod thread_local; + +#[macro_use] +#[cfg(feature = "rt-core")] +pub(crate) mod scoped_tls; + +cfg_macros! { + #[macro_use] + mod select; + + #[macro_use] + mod join; + + #[macro_use] + mod try_join; +} + +// Includes re-exports needed to implement macros +#[doc(hidden)] +pub mod support; diff --git a/third_party/rust/tokio/src/macros/pin.rs b/third_party/rust/tokio/src/macros/pin.rs new file mode 100644 index 0000000000..33d8499e10 --- /dev/null +++ b/third_party/rust/tokio/src/macros/pin.rs @@ -0,0 +1,144 @@ +/// Pins a value on the stack. +/// +/// Calls to `async fn` return anonymous [`Future`] values that are `!Unpin`. +/// These values must be pinned before they can be polled. Calling `.await` will +/// handle this, but consumes the future. If it is required to call `.await` on +/// a `&mut _` reference, the caller is responsible for pinning the future. +/// +/// Pinning may be done by allocating with [`Box::pin`] or by using the stack +/// with the `pin!` macro. +/// +/// The following will **fail to compile**: +/// +/// ```compile_fail +/// async fn my_async_fn() { +/// // async logic here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut future = my_async_fn(); +/// (&mut future).await; +/// } +/// ``` +/// +/// To make this work requires pinning: +/// +/// ``` +/// use tokio::pin; +/// +/// async fn my_async_fn() { +/// // async logic here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let future = my_async_fn(); +/// pin!(future); +/// +/// (&mut future).await; +/// } +/// ``` +/// +/// Pinning is useful when using `select!` and stream operators that require `T: +/// Stream + Unpin`. +/// +/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html +/// [`Box::pin`]: # +/// +/// # Usage +/// +/// The `pin!` macro takes **identifiers** as arguments. It does **not** work +/// with expressions. +/// +/// The following does not compile as an expression is passed to `pin!`. +/// +/// ```compile_fail +/// async fn my_async_fn() { +/// // async logic here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut future = pin!(my_async_fn()); +/// (&mut future).await; +/// } +/// ``` +/// +/// # Examples +/// +/// Using with select: +/// +/// ``` +/// use tokio::{pin, select}; +/// use tokio::stream::{self, StreamExt}; +/// +/// async fn my_async_fn() { +/// // async logic here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut stream = stream::iter(vec![1, 2, 3, 4]); +/// +/// let future = my_async_fn(); +/// pin!(future); +/// +/// loop { +/// select! { +/// _ = &mut future => { +/// // Stop looping `future` will be polled after completion +/// break; +/// } +/// Some(val) = stream.next() => { +/// println!("got value = {}", val); +/// } +/// } +/// } +/// } +/// ``` +/// +/// Because assigning to a variable followed by pinning is common, there is also +/// a variant of the macro that supports doing both in one go. +/// +/// ``` +/// use tokio::{pin, select}; +/// +/// async fn my_async_fn() { +/// // async logic here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// pin! { +/// let future1 = my_async_fn(); +/// let future2 = my_async_fn(); +/// } +/// +/// select! { +/// _ = &mut future1 => {} +/// _ = &mut future2 => {} +/// } +/// } +/// ``` +#[macro_export] +macro_rules! pin { + ($($x:ident),*) => { $( + // Move the value to ensure that it is owned + let mut $x = $x; + // Shadow the original binding so that it can't be directly accessed + // ever again. + #[allow(unused_mut)] + let mut $x = unsafe { + $crate::macros::support::Pin::new_unchecked(&mut $x) + }; + )* }; + ($( + let $x:ident = $init:expr; + )*) => { + $( + let $x = $init; + $crate::pin!($x); + )* + }; +} diff --git a/third_party/rust/tokio/src/macros/ready.rs b/third_party/rust/tokio/src/macros/ready.rs new file mode 100644 index 0000000000..1f48623b80 --- /dev/null +++ b/third_party/rust/tokio/src/macros/ready.rs @@ -0,0 +1,8 @@ +macro_rules! ready { + ($e:expr $(,)?) => { + match $e { + std::task::Poll::Ready(t) => t, + std::task::Poll::Pending => return std::task::Poll::Pending, + } + }; +} diff --git a/third_party/rust/tokio/src/macros/scoped_tls.rs b/third_party/rust/tokio/src/macros/scoped_tls.rs new file mode 100644 index 0000000000..666f382b28 --- /dev/null +++ b/third_party/rust/tokio/src/macros/scoped_tls.rs @@ -0,0 +1,80 @@ +use crate::loom::thread::LocalKey; + +use std::cell::Cell; +use std::marker; + +/// Set a reference as a thread-local +#[macro_export] +macro_rules! scoped_thread_local { + ($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => ( + $(#[$attrs])* + $vis static $name: $crate::macros::scoped_tls::ScopedKey<$ty> + = $crate::macros::scoped_tls::ScopedKey { + inner: { + thread_local!(static FOO: ::std::cell::Cell<*const ()> = { + std::cell::Cell::new(::std::ptr::null()) + }); + &FOO + }, + _marker: ::std::marker::PhantomData, + }; + ) +} + +/// Type representing a thread local storage key corresponding to a reference +/// to the type parameter `T`. +pub(crate) struct ScopedKey<T> { + #[doc(hidden)] + pub(crate) inner: &'static LocalKey<Cell<*const ()>>, + #[doc(hidden)] + pub(crate) _marker: marker::PhantomData<T>, +} + +unsafe impl<T> Sync for ScopedKey<T> {} + +impl<T> ScopedKey<T> { + /// Inserts a value into this scoped thread local storage slot for a + /// duration of a closure. + pub(crate) fn set<F, R>(&'static self, t: &T, f: F) -> R + where + F: FnOnce() -> R, + { + struct Reset { + key: &'static LocalKey<Cell<*const ()>>, + val: *const (), + } + + impl Drop for Reset { + fn drop(&mut self) { + self.key.with(|c| c.set(self.val)); + } + } + + let prev = self.inner.with(|c| { + let prev = c.get(); + c.set(t as *const _ as *const ()); + prev + }); + + let _reset = Reset { + key: self.inner, + val: prev, + }; + + f() + } + + /// Gets a value out of this scoped variable. + pub(crate) fn with<F, R>(&'static self, f: F) -> R + where + F: FnOnce(Option<&T>) -> R, + { + let val = self.inner.with(|c| c.get()); + + if val.is_null() { + f(None) + } else { + unsafe { f(Some(&*(val as *const T))) } + } + } +} diff --git a/third_party/rust/tokio/src/macros/select.rs b/third_party/rust/tokio/src/macros/select.rs new file mode 100644 index 0000000000..51b6fcd608 --- /dev/null +++ b/third_party/rust/tokio/src/macros/select.rs @@ -0,0 +1,876 @@ +/// Wait on multiple concurrent branches, returning when the **first** branch +/// completes, cancelling the remaining branches. +/// +/// The `select!` macro must be used inside of async functions, closures, and +/// blocks. +/// +/// The `select!` macro accepts one or more branches with the following pattern: +/// +/// ```text +/// <pattern> = <async expression> (, if <precondition>)? => <handler>, +/// ``` +/// +/// Additionally, the `select!` macro may include a single, optional `else` +/// branch, which evaluates if none of the other branches match their patterns: +/// +/// ```text +/// else <expression> +/// ``` +/// +/// The macro aggregates all `<async expression>` expressions and runs them +/// concurrently on the **current** task. Once the **first** expression +/// completes with a value that matches its `<pattern>`, the `select!` macro +/// returns the result of evaluating the completed branch's `<handler>` +/// expression. +/// +/// Additionally, each branch may include an optional `if` precondition. This +/// precondition is evaluated **before** the `<async expression>`. If the +/// precondition returns `false`, the branch is entirely disabled. This +/// capability is useful when using `select!` within a loop. +/// +/// The complete lifecycle of a `select!` expression is as follows: +/// +/// 1. Evaluate all provded `<precondition>` expressions. If the precondition +/// returns `false`, disable the branch for the remainder of the current call +/// to `select!`. Re-entering `select!` due to a loop clears the "disabled" +/// state. +/// 2. Aggregate the `<async expression>`s from each branch, including the +/// disabled ones. If the branch is disabled, `<async expression>` is still +/// evaluated, but the resulting future is not polled. +/// 3. Concurrently await on the results for all remaining `<async expression>`s. +/// 4. Once an `<async expression>` returns a value, attempt to apply the value +/// to the provided `<pattern>`, if the pattern matches, evaluate `<handler>` +/// and return. If the pattern **does not** match, disable the current branch +/// and for the remainder of the current call to `select!`. Continue from step 3. +/// 5. If **all** branches are disabled, evaluate the `else` expression. If none +/// is provided, panic. +/// +/// # Notes +/// +/// ### Runtime characteristics +/// +/// By running all async expressions on the current task, the expressions are +/// able to run **concurrently** but not in **parallel**. This means all +/// expressions are run on the same thread and if one branch blocks the thread, +/// all other expressions will be unable to continue. If parallelism is +/// required, spawn each async expression using [`tokio::spawn`] and pass the +/// join handle to `select!`. +/// +/// [`tokio::spawn`]: crate::spawn +/// +/// ### Avoid racy `if` preconditions +/// +/// Given that `if` preconditions are used to disable `select!` branches, some +/// caution must be used to avoid missing values. +/// +/// For example, here is **incorrect** usage of `delay` with `if`. The objective +/// is to repeatedly run an asynchronous task for up to 50 milliseconds. +/// However, there is a potential for the `delay` completion to be missed. +/// +/// ```no_run +/// use tokio::time::{self, Duration}; +/// +/// async fn some_async_work() { +/// // do work +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut delay = time::delay_for(Duration::from_millis(50)); +/// +/// while !delay.is_elapsed() { +/// tokio::select! { +/// _ = &mut delay, if !delay.is_elapsed() => { +/// println!("operation timed out"); +/// } +/// _ = some_async_work() => { +/// println!("operation completed"); +/// } +/// } +/// } +/// } +/// ``` +/// +/// In the above example, `delay.is_elapsed()` may return `true` even if +/// `delay.poll()` never returned `Ready`. This opens up a potential race +/// condition where `delay` expires between the `while !delay.is_elapsed()` +/// check and the call to `select!` resulting in the `some_async_work()` call to +/// run uninterrupted despite the delay having elapsed. +/// +/// One way to write the above example without the race would be: +/// +/// ``` +/// use tokio::time::{self, Duration}; +/// +/// async fn some_async_work() { +/// # time::delay_for(Duration::from_millis(10)).await; +/// // do work +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut delay = time::delay_for(Duration::from_millis(50)); +/// +/// loop { +/// tokio::select! { +/// _ = &mut delay => { +/// println!("operation timed out"); +/// break; +/// } +/// _ = some_async_work() => { +/// println!("operation completed"); +/// } +/// } +/// } +/// } +/// ``` +/// +/// ### Fairness +/// +/// `select!` randomly picks a branch to check first. This provides some level +/// of fairness when calling `select!` in a loop with branches that are always +/// ready. +/// +/// # Panics +/// +/// `select!` panics if all branches are disabled **and** there is no provided +/// `else` branch. A branch is disabled when the provided `if` precondition +/// returns `false` **or** when the pattern does not match the result of `<async +/// expression>. +/// +/// # Examples +/// +/// Basic select with two branches. +/// +/// ``` +/// async fn do_stuff_async() { +/// // async work +/// } +/// +/// async fn more_async_work() { +/// // more here +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// tokio::select! { +/// _ = do_stuff_async() => { +/// println!("do_stuff_async() completed first") +/// } +/// _ = more_async_work() => { +/// println!("more_async_work() completed first") +/// } +/// }; +/// } +/// ``` +/// +/// Basic stream selecting. +/// +/// ``` +/// use tokio::stream::{self, StreamExt}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut stream1 = stream::iter(vec![1, 2, 3]); +/// let mut stream2 = stream::iter(vec![4, 5, 6]); +/// +/// let next = tokio::select! { +/// v = stream1.next() => v.unwrap(), +/// v = stream2.next() => v.unwrap(), +/// }; +/// +/// assert!(next == 1 || next == 4); +/// } +/// ``` +/// +/// Collect the contents of two streams. In this example, we rely on pattern +/// matching and the fact that `stream::iter` is "fused", i.e. once the stream +/// is complete, all calls to `next()` return `None`. +/// +/// ``` +/// use tokio::stream::{self, StreamExt}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut stream1 = stream::iter(vec![1, 2, 3]); +/// let mut stream2 = stream::iter(vec![4, 5, 6]); +/// +/// let mut values = vec![]; +/// +/// loop { +/// tokio::select! { +/// Some(v) = stream1.next() => values.push(v), +/// Some(v) = stream2.next() => values.push(v), +/// else => break, +/// } +/// } +/// +/// values.sort(); +/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]); +/// } +/// ``` +/// +/// Using the same future in multiple `select!` expressions can be done by passing +/// a reference to the future. Doing so requires the future to be [`Unpin`]. A +/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning. +/// +/// [`Unpin`]: std::marker::Unpin +/// [`Box::pin`]: std::boxed::Box::pin +/// +/// Here, a stream is consumed for at most 1 second. +/// +/// ``` +/// use tokio::stream::{self, StreamExt}; +/// use tokio::time::{self, Duration}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut stream = stream::iter(vec![1, 2, 3]); +/// let mut delay = time::delay_for(Duration::from_secs(1)); +/// +/// loop { +/// tokio::select! { +/// maybe_v = stream.next() => { +/// if let Some(v) = maybe_v { +/// println!("got = {}", v); +/// } else { +/// break; +/// } +/// } +/// _ = &mut delay => { +/// println!("timeout"); +/// break; +/// } +/// } +/// } +/// } +/// ``` +/// +/// Joining two values using `select!`. +/// +/// ``` +/// use tokio::sync::oneshot; +/// +/// #[tokio::main] +/// async fn main() { +/// let (tx1, mut rx1) = oneshot::channel(); +/// let (tx2, mut rx2) = oneshot::channel(); +/// +/// tokio::spawn(async move { +/// tx1.send("first").unwrap(); +/// }); +/// +/// tokio::spawn(async move { +/// tx2.send("second").unwrap(); +/// }); +/// +/// let mut a = None; +/// let mut b = None; +/// +/// while a.is_none() || b.is_none() { +/// tokio::select! { +/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()), +/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()), +/// } +/// } +/// +/// let res = (a.unwrap(), b.unwrap()); +/// +/// assert_eq!(res.0, "first"); +/// assert_eq!(res.1, "second"); +/// } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] +macro_rules! select { + // Uses a declarative macro to do **most** of the work. While it is possible + // to implement fully with a declarative macro, a procedural macro is used + // to enable improved error messages. + // + // The macro is structured as a tt-muncher. All branches are processed and + // normalized. Once the input is normalized, it is passed to the top-most + // rule. When entering the macro, `@{ }` is inserted at the front. This is + // used to collect the normalized input. + // + // The macro only recurses once per branch. This allows using `select!` + // without requiring the user to increase the recursion limit. + + // All input is normalized, now transform. + (@ { + // One `_` for each branch in the `select!` macro. Passing this to + // `count!` converts $skip to an integer. + ( $($count:tt)* ) + + // Normalized select branches. `( $skip )` is a set of `_` characters. + // There is one `_` for each select branch **before** this one. Given + // that all input futures are stored in a tuple, $skip is useful for + // generating a pattern to reference the future for the current branch. + // $skip is also used as an argument to `count!`, returning the index of + // the current select branch. + $( ( $($skip:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+ + + // Fallback expression used when all select branches have been disabled. + ; $else:expr + + }) => {{ + // Enter a context where stable "function-like" proc macros can be used. + // + // This module is defined within a scope and should not leak out of this + // macro. + mod util { + // Generate an enum with one variant per select branch + $crate::select_priv_declare_output_enum!( ( $($count)* ) ); + } + + // `tokio::macros::support` is a public, but doc(hidden) module + // including a re-export of all types needed by this macro. + use $crate::macros::support::Future; + use $crate::macros::support::Pin; + use $crate::macros::support::Poll::{Ready, Pending}; + + const BRANCHES: u32 = $crate::count!( $($count)* ); + + let mut disabled: util::Mask = Default::default(); + + // First, invoke all the pre-conditions. For any that return true, + // set the appropriate bit in `disabled`. + $( + if !$c { + let mask = 1 << $crate::count!( $($skip)* ); + disabled |= mask; + } + )* + + // Create a scope to separate polling from handling the output. This + // adds borrow checker flexibility when using the macro. + let mut output = { + // Safety: Nothing must be moved out of `futures`. This is to + // satisfy the requirement of `Pin::new_unchecked` called below. + let mut futures = ( $( $fut , )+ ); + + $crate::macros::support::poll_fn(|cx| { + // Track if any branch returns pending. If no branch completes + // **or** returns pending, this implies that all branches are + // disabled. + let mut is_pending = false; + + // Randomly generate a starting point. This makes `select!` a + // bit more fair and avoids always polling the first future. + let start = $crate::macros::support::thread_rng_n(BRANCHES); + + for i in 0..BRANCHES { + let branch = (start + i) % BRANCHES; + + match branch { + $( + $crate::count!( $($skip)* ) => { + // First, if the future has previously been + // disabled, do not poll it again. This is done + // by checking the associated bit in the + // `disabled` bit field. + let mask = 1 << branch; + + if disabled & mask == mask { + // The future has been disabled. + continue; + } + + // Extract the future for this branch from the + // tuple + let ( $($skip,)* fut, .. ) = &mut futures; + + // Safety: future is stored on the stack above + // and never moved. + let mut fut = unsafe { Pin::new_unchecked(fut) }; + + // Try polling it + let out = match fut.poll(cx) { + Ready(out) => out, + Pending => { + // Track that at least one future is + // still pending and continue polling. + is_pending = true; + continue; + } + }; + + // Disable the future from future polling. + disabled |= mask; + + // The future returned a value, check if matches + // the specified pattern. + #[allow(unused_variables)] + match &out { + $bind => {} + _ => continue, + } + + // The select is complete, return the value + return Ready($crate::select_variant!(util::Out, ($($skip)*))(out)); + } + )* + _ => unreachable!("reaching this means there probably is an off by one bug"), + } + } + + if is_pending { + Pending + } else { + // All branches have been disabled. + Ready(util::Out::Disabled) + } + }).await + }; + + match output { + $( + $crate::select_variant!(util::Out, ($($skip)*) ($bind)) => $handle, + )* + util::Out::Disabled => $else, + _ => unreachable!("failed to match bind"), + } + }}; + + // ==== Normalize ===== + + // These rules match a single `select!` branch and normalize it for + // processing by the first rule. + + (@ { $($t:tt)* } ) => { + // No `else` branch + $crate::select!(@{ $($t)*; unreachable!() }) + }; + (@ { $($t:tt)* } else => $else:expr $(,)?) => { + $crate::select!(@{ $($t)*; $else }) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, }) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, }) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*) + }; + (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => { + $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*) + }; + + // ===== Entry point ===== + + ( $p:pat = $($t:tt)* ) => { + $crate::select!(@{ () } $p = $($t)*) + }; + () => { + compile_error!("select! requires at least one branch.") + }; +} + +// And here... we manually list out matches for up to 64 branches... I'm not +// happy about it either, but this is how we manage to use a declarative macro! + +#[macro_export] +#[doc(hidden)] +macro_rules! count { + () => { + 0 + }; + (_) => { + 1 + }; + (_ _) => { + 2 + }; + (_ _ _) => { + 3 + }; + (_ _ _ _) => { + 4 + }; + (_ _ _ _ _) => { + 5 + }; + (_ _ _ _ _ _) => { + 6 + }; + (_ _ _ _ _ _ _) => { + 7 + }; + (_ _ _ _ _ _ _ _) => { + 8 + }; + (_ _ _ _ _ _ _ _ _) => { + 9 + }; + (_ _ _ _ _ _ _ _ _ _) => { + 10 + }; + (_ _ _ _ _ _ _ _ _ _ _) => { + 11 + }; + (_ _ _ _ _ _ _ _ _ _ _ _) => { + 12 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _) => { + 13 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 14 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 15 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 16 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 17 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 18 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 19 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 20 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 21 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 22 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 23 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 24 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 25 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 26 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 27 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 28 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 29 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 30 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 31 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 32 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 33 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 34 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 35 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 36 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 37 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 38 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 39 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 40 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 41 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 42 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 43 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 44 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 45 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 46 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 47 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 48 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 49 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 50 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 51 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 52 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 53 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 54 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 55 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 56 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 57 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 58 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 59 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 60 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 61 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 62 + }; + (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => { + 63 + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! select_variant { + ($($p:ident)::*, () $($t:tt)*) => { + $($p)::*::_0 $($t)* + }; + ($($p:ident)::*, (_) $($t:tt)*) => { + $($p)::*::_1 $($t)* + }; + ($($p:ident)::*, (_ _) $($t:tt)*) => { + $($p)::*::_2 $($t)* + }; + ($($p:ident)::*, (_ _ _) $($t:tt)*) => { + $($p)::*::_3 $($t)* + }; + ($($p:ident)::*, (_ _ _ _) $($t:tt)*) => { + $($p)::*::_4 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _) $($t:tt)*) => { + $($p)::*::_5 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_6 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_7 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_8 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_9 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_10 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_11 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_12 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_13 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_14 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_15 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_16 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_17 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_18 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_19 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_20 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_21 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_22 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_23 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_24 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_25 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_26 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_27 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_28 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_29 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_30 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_31 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_32 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_33 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_34 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_35 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_36 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_37 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_38 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_39 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_40 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_41 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_42 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_43 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_44 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_45 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_46 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_47 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_48 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_49 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_50 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_51 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_52 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_53 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_54 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_55 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_56 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_57 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_58 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_59 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_60 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_61 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_62 $($t)* + }; + ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => { + $($p)::*::_63 $($t)* + }; +} diff --git a/third_party/rust/tokio/src/macros/support.rs b/third_party/rust/tokio/src/macros/support.rs new file mode 100644 index 0000000000..fc1cdfcfa0 --- /dev/null +++ b/third_party/rust/tokio/src/macros/support.rs @@ -0,0 +1,8 @@ +cfg_macros! { + pub use crate::future::{maybe_done, poll_fn}; + pub use crate::util::thread_rng_n; +} + +pub use std::future::Future; +pub use std::pin::Pin; +pub use std::task::Poll; diff --git a/third_party/rust/tokio/src/macros/thread_local.rs b/third_party/rust/tokio/src/macros/thread_local.rs new file mode 100644 index 0000000000..d848947350 --- /dev/null +++ b/third_party/rust/tokio/src/macros/thread_local.rs @@ -0,0 +1,4 @@ +#[cfg(all(loom, test))] +macro_rules! thread_local { + ($($tts:tt)+) => { loom::thread_local!{ $($tts)+ } } +} diff --git a/third_party/rust/tokio/src/macros/try_join.rs b/third_party/rust/tokio/src/macros/try_join.rs new file mode 100644 index 0000000000..fa5850ef0e --- /dev/null +++ b/third_party/rust/tokio/src/macros/try_join.rs @@ -0,0 +1,132 @@ +/// Wait on multiple concurrent branches, returning when **all** branches +/// complete with `Ok(_)` or on the first `Err(_)`. +/// +/// The `try_join!` macro must be used inside of async functions, closures, and +/// blocks. +/// +/// Similar to [`join!`], the `try_join!` macro takes a list of async +/// expressions and evaluates them concurrently on the same task. Each async +/// expression evaluates to a future and the futures from each expression are +/// multiplexed on the current task. The `try_join!` macro returns when **all** +/// branches return with `Ok` or when the **first** branch returns with `Err`. +/// +/// [`join!`]: macro@join +/// +/// # Notes +/// +/// The supplied futures are stored inline and does not require allocating a +/// `Vec`. +/// +/// ### Runtime characteristics +/// +/// By running all async expressions on the current task, the expressions are +/// able to run **concurrently** but not in **parallel**. This means all +/// expressions are run on the same thread and if one branch blocks the thread, +/// all other expressions will be unable to continue. If parallelism is +/// required, spawn each async expression using [`tokio::spawn`] and pass the +/// join handle to `try_join!`. +/// +/// [`tokio::spawn`]: crate::spawn +/// +/// # Examples +/// +/// Basic try_join with two branches. +/// +/// ``` +/// async fn do_stuff_async() -> Result<(), &'static str> { +/// // async work +/// # Ok(()) +/// } +/// +/// async fn more_async_work() -> Result<(), &'static str> { +/// // more here +/// # Ok(()) +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let res = tokio::try_join!( +/// do_stuff_async(), +/// more_async_work()); +/// +/// match res { +/// Ok((first, second)) => { +/// // do something with the values +/// } +/// Err(err) => { +/// println!("processing failed; error = {}", err); +/// } +/// } +/// } +/// ``` +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "macros")))] +macro_rules! try_join { + (@ { + // One `_` for each branch in the `try_join!` macro. This is not used once + // normalization is complete. + ( $($count:tt)* ) + + // Normalized try_join! branches + $( ( $($skip:tt)* ) $e:expr, )* + + }) => {{ + use $crate::macros::support::{maybe_done, poll_fn, Future, Pin}; + use $crate::macros::support::Poll::{Ready, Pending}; + + // Safety: nothing must be moved out of `futures`. This is to satisfy + // the requirement of `Pin::new_unchecked` called below. + let mut futures = ( $( maybe_done($e), )* ); + + poll_fn(move |cx| { + let mut is_pending = false; + + $( + // Extract the future for this branch from the tuple. + let ( $($skip,)* fut, .. ) = &mut futures; + + // Safety: future is stored on the stack above + // and never moved. + let mut fut = unsafe { Pin::new_unchecked(fut) }; + + // Try polling + if fut.as_mut().poll(cx).is_pending() { + is_pending = true; + } else if fut.as_mut().output_mut().expect("expected completed future").is_err() { + return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap())) + } + )* + + if is_pending { + Pending + } else { + Ready(Ok(($({ + // Extract the future for this branch from the tuple. + let ( $($skip,)* fut, .. ) = &mut futures; + + // Safety: future is stored on the stack above + // and never moved. + let mut fut = unsafe { Pin::new_unchecked(fut) }; + + fut + .take_output() + .expect("expected completed future") + .ok() + .expect("expected Ok(_)") + },)*))) + } + }).await + }}; + + // ===== Normalize ===== + + (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => { + $crate::try_join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*) + }; + + // ===== Entry point ===== + + ( $($e:expr),* $(,)?) => { + $crate::try_join!(@{ () } $($e,)*) + }; +} diff --git a/third_party/rust/tokio/src/net/addr.rs b/third_party/rust/tokio/src/net/addr.rs new file mode 100644 index 0000000000..343d4e21ff --- /dev/null +++ b/third_party/rust/tokio/src/net/addr.rs @@ -0,0 +1,281 @@ +use crate::future; + +use std::io; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +/// Converts or resolves without blocking to one or more `SocketAddr` values. +/// +/// # DNS +/// +/// Implementations of `ToSocketAddrs` for string types require a DNS lookup. +/// These implementations are only provided when Tokio is used with the +/// **`dns`** feature flag. +/// +/// # Calling +/// +/// Currently, this trait is only used as an argument to Tokio functions that +/// need to reference a target socket address. To perform a `SocketAddr` +/// conversion directly, use [`lookup_host()`](super::lookup_host()). +/// +/// This trait is sealed and is intended to be opaque. The details of the trait +/// will change. Stabilization is pending enhancements to the Rust langague. +pub trait ToSocketAddrs: sealed::ToSocketAddrsPriv {} + +type ReadyFuture<T> = future::Ready<io::Result<T>>; + +// ===== impl &impl ToSocketAddrs ===== + +impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {} + +impl<T> sealed::ToSocketAddrsPriv for &T +where + T: sealed::ToSocketAddrsPriv + ?Sized, +{ + type Iter = T::Iter; + type Future = T::Future; + + fn to_socket_addrs(&self) -> Self::Future { + (**self).to_socket_addrs() + } +} + +// ===== impl SocketAddr ===== + +impl ToSocketAddrs for SocketAddr {} + +impl sealed::ToSocketAddrsPriv for SocketAddr { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + let iter = Some(*self).into_iter(); + future::ok(iter) + } +} + +// ===== impl SocketAddrV4 ===== + +impl ToSocketAddrs for SocketAddrV4 {} + +impl sealed::ToSocketAddrsPriv for SocketAddrV4 { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + SocketAddr::V4(*self).to_socket_addrs() + } +} + +// ===== impl SocketAddrV6 ===== + +impl ToSocketAddrs for SocketAddrV6 {} + +impl sealed::ToSocketAddrsPriv for SocketAddrV6 { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + SocketAddr::V6(*self).to_socket_addrs() + } +} + +// ===== impl (IpAddr, u16) ===== + +impl ToSocketAddrs for (IpAddr, u16) {} + +impl sealed::ToSocketAddrsPriv for (IpAddr, u16) { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + let iter = Some(SocketAddr::from(*self)).into_iter(); + future::ok(iter) + } +} + +// ===== impl (Ipv4Addr, u16) ===== + +impl ToSocketAddrs for (Ipv4Addr, u16) {} + +impl sealed::ToSocketAddrsPriv for (Ipv4Addr, u16) { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + let (ip, port) = *self; + SocketAddrV4::new(ip, port).to_socket_addrs() + } +} + +// ===== impl (Ipv6Addr, u16) ===== + +impl ToSocketAddrs for (Ipv6Addr, u16) {} + +impl sealed::ToSocketAddrsPriv for (Ipv6Addr, u16) { + type Iter = std::option::IntoIter<SocketAddr>; + type Future = ReadyFuture<Self::Iter>; + + fn to_socket_addrs(&self) -> Self::Future { + let (ip, port) = *self; + SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs() + } +} + +cfg_dns! { + // ===== impl str ===== + + impl ToSocketAddrs for str {} + + impl sealed::ToSocketAddrsPriv for str { + type Iter = sealed::OneOrMore; + type Future = sealed::MaybeReady; + + fn to_socket_addrs(&self) -> Self::Future { + use crate::runtime::spawn_blocking; + use sealed::MaybeReady; + + // First check if the input parses as a socket address + let res: Result<SocketAddr, _> = self.parse(); + + if let Ok(addr) = res { + return MaybeReady::Ready(Some(addr)); + } + + // Run DNS lookup on the blocking pool + let s = self.to_owned(); + + MaybeReady::Blocking(spawn_blocking(move || { + std::net::ToSocketAddrs::to_socket_addrs(&s) + })) + } + } + + // ===== impl (&str, u16) ===== + + impl ToSocketAddrs for (&str, u16) {} + + impl sealed::ToSocketAddrsPriv for (&str, u16) { + type Iter = sealed::OneOrMore; + type Future = sealed::MaybeReady; + + fn to_socket_addrs(&self) -> Self::Future { + use crate::runtime::spawn_blocking; + use sealed::MaybeReady; + + let (host, port) = *self; + + // try to parse the host as a regular IP address first + if let Ok(addr) = host.parse::<Ipv4Addr>() { + let addr = SocketAddrV4::new(addr, port); + let addr = SocketAddr::V4(addr); + + return MaybeReady::Ready(Some(addr)); + } + + if let Ok(addr) = host.parse::<Ipv6Addr>() { + let addr = SocketAddrV6::new(addr, port, 0, 0); + let addr = SocketAddr::V6(addr); + + return MaybeReady::Ready(Some(addr)); + } + + let host = host.to_owned(); + + MaybeReady::Blocking(spawn_blocking(move || { + std::net::ToSocketAddrs::to_socket_addrs(&(&host[..], port)) + })) + } + } + + // ===== impl String ===== + + impl ToSocketAddrs for String {} + + impl sealed::ToSocketAddrsPriv for String { + type Iter = <str as sealed::ToSocketAddrsPriv>::Iter; + type Future = <str as sealed::ToSocketAddrsPriv>::Future; + + fn to_socket_addrs(&self) -> Self::Future { + (&self[..]).to_socket_addrs() + } + } +} + +pub(crate) mod sealed { + //! The contents of this trait are intended to remain private and __not__ + //! part of the `ToSocketAddrs` public API. The details will change over + //! time. + + use std::future::Future; + use std::io; + use std::net::SocketAddr; + + cfg_dns! { + use crate::task::JoinHandle; + + use std::option; + use std::pin::Pin; + use std::task::{Context, Poll}; + use std::vec; + } + + #[doc(hidden)] + pub trait ToSocketAddrsPriv { + type Iter: Iterator<Item = SocketAddr> + Send + 'static; + type Future: Future<Output = io::Result<Self::Iter>> + Send + 'static; + + fn to_socket_addrs(&self) -> Self::Future; + } + + cfg_dns! { + #[doc(hidden)] + #[derive(Debug)] + pub enum MaybeReady { + Ready(Option<SocketAddr>), + Blocking(JoinHandle<io::Result<vec::IntoIter<SocketAddr>>>), + } + + #[doc(hidden)] + #[derive(Debug)] + pub enum OneOrMore { + One(option::IntoIter<SocketAddr>), + More(vec::IntoIter<SocketAddr>), + } + + impl Future for MaybeReady { + type Output = io::Result<OneOrMore>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + match *self { + MaybeReady::Ready(ref mut i) => { + let iter = OneOrMore::One(i.take().into_iter()); + Poll::Ready(Ok(iter)) + } + MaybeReady::Blocking(ref mut rx) => { + let res = ready!(Pin::new(rx).poll(cx))?.map(OneOrMore::More); + + Poll::Ready(res) + } + } + } + } + + impl Iterator for OneOrMore { + type Item = SocketAddr; + + fn next(&mut self) -> Option<Self::Item> { + match self { + OneOrMore::One(i) => i.next(), + OneOrMore::More(i) => i.next(), + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + match self { + OneOrMore::One(i) => i.size_hint(), + OneOrMore::More(i) => i.size_hint(), + } + } + } + } +} diff --git a/third_party/rust/tokio/src/net/lookup_host.rs b/third_party/rust/tokio/src/net/lookup_host.rs new file mode 100644 index 0000000000..3098b463e3 --- /dev/null +++ b/third_party/rust/tokio/src/net/lookup_host.rs @@ -0,0 +1,38 @@ +cfg_dns! { + use crate::net::addr::ToSocketAddrs; + + use std::io; + use std::net::SocketAddr; + + /// Performs a DNS resolution. + /// + /// The returned iterator may not actually yield any values depending on the + /// outcome of any resolution performed. + /// + /// This API is not intended to cover all DNS use cases. Anything beyond the + /// basic use case should be done with a specialized library. + /// + /// # Examples + /// + /// To resolve a DNS entry: + /// + /// ```no_run + /// use tokio::net; + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// for addr in net::lookup_host("localhost:3000").await? { + /// println!("socket address is {}", addr); + /// } + /// + /// Ok(()) + /// } + /// ``` + pub async fn lookup_host<T>(host: T) -> io::Result<impl Iterator<Item = SocketAddr>> + where + T: ToSocketAddrs + { + host.to_socket_addrs().await + } +} diff --git a/third_party/rust/tokio/src/net/mod.rs b/third_party/rust/tokio/src/net/mod.rs new file mode 100644 index 0000000000..eb24ac0ba5 --- /dev/null +++ b/third_party/rust/tokio/src/net/mod.rs @@ -0,0 +1,49 @@ +#![cfg(not(loom))] + +//! TCP/UDP/Unix bindings for `tokio`. +//! +//! This module contains the TCP/UDP/Unix networking types, similar to the standard +//! library, which can be used to implement networking protocols. +//! +//! # Organization +//! +//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP +//! * [`UdpSocket`] provides functionality for communication over UDP +//! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a +//! Unix Domain Stream Socket **(available on Unix only)** +//! * [`UnixDatagram`] provides functionality for communication +//! over Unix Domain Datagram Socket **(available on Unix only)** + +//! +//! [`TcpListener`]: TcpListener +//! [`TcpStream`]: TcpStream +//! [`UdpSocket`]: UdpSocket +//! [`UnixListener`]: UnixListener +//! [`UnixStream`]: UnixStream +//! [`UnixDatagram`]: UnixDatagram + +mod addr; +pub use addr::ToSocketAddrs; + +cfg_dns! { + mod lookup_host; + pub use lookup_host::lookup_host; +} + +cfg_tcp! { + pub mod tcp; + pub use tcp::listener::TcpListener; + pub use tcp::stream::TcpStream; +} + +cfg_udp! { + pub mod udp; + pub use udp::socket::UdpSocket; +} + +cfg_uds! { + pub mod unix; + pub use unix::datagram::UnixDatagram; + pub use unix::listener::UnixListener; + pub use unix::stream::UnixStream; +} diff --git a/third_party/rust/tokio/src/net/tcp/incoming.rs b/third_party/rust/tokio/src/net/tcp/incoming.rs new file mode 100644 index 0000000000..062be1e9cf --- /dev/null +++ b/third_party/rust/tokio/src/net/tcp/incoming.rs @@ -0,0 +1,42 @@ +use crate::net::tcp::{TcpListener, TcpStream}; + +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Stream returned by the `TcpListener::incoming` function representing the +/// stream of sockets received from a listener. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct Incoming<'a> { + inner: &'a mut TcpListener, +} + +impl Incoming<'_> { + pub(crate) fn new(listener: &mut TcpListener) -> Incoming<'_> { + Incoming { inner: listener } + } + + /// Attempts to poll `TcpStream` by polling inner `TcpListener` to accept + /// connection. + /// + /// If `TcpListener` isn't ready yet, `Poll::Pending` is returned and + /// current task will be notified by a waker. + pub fn poll_accept( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<io::Result<TcpStream>> { + let (socket, _) = ready!(self.inner.poll_accept(cx))?; + Poll::Ready(Ok(socket)) + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for Incoming<'_> { + type Item = io::Result<TcpStream>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + let (socket, _) = ready!(self.inner.poll_accept(cx))?; + Poll::Ready(Some(Ok(socket))) + } +} diff --git a/third_party/rust/tokio/src/net/tcp/listener.rs b/third_party/rust/tokio/src/net/tcp/listener.rs new file mode 100644 index 0000000000..cde22cb636 --- /dev/null +++ b/third_party/rust/tokio/src/net/tcp/listener.rs @@ -0,0 +1,441 @@ +use crate::future::poll_fn; +use crate::io::PollEvented; +use crate::net::tcp::{Incoming, TcpStream}; +use crate::net::ToSocketAddrs; + +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::net::{self, SocketAddr}; +use std::task::{Context, Poll}; + +cfg_tcp! { + /// A TCP socket server, listening for connections. + /// + /// You can accept a new connection by using the [`accept`](`TcpListener::accept`) method. Alternatively `TcpListener` + /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a + /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over + /// it is equivalent to calling accept in a loop. + /// + /// # Errors + /// + /// Note that accepting a connection can lead to various errors and not all + /// of them are necessarily fatal ‒ for example having too many open file + /// descriptors or the other side closing the connection while it waits in + /// an accept queue. These would terminate the stream if not handled in any + /// way. + /// + /// # Examples + /// + /// Using `accept`: + /// ```no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// + /// async fn process_socket<T>(socket: T) { + /// # drop(socket); + /// // do work with socket here + /// } + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; + /// + /// loop { + /// let (socket, _) = listener.accept().await?; + /// process_socket(socket).await; + /// } + /// } + /// ``` + /// + /// Using `impl Stream`: + /// ```no_run + /// use tokio::{net::TcpListener, stream::StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap(); + /// while let Some(stream) = listener.next().await { + /// match stream { + /// Ok(stream) => { + /// println!("new client!"); + /// } + /// Err(e) => { /* connection failed */ } + /// } + /// } + /// } + /// ``` + pub struct TcpListener { + io: PollEvented<mio::net::TcpListener>, + } +} + +impl TcpListener { + /// Creates a new TcpListener which will be bound to the specified address. + /// + /// The returned listener is ready for accepting connections. + /// + /// Binding with a port number of 0 will request that the OS assigns a port + /// to this listener. The port allocated can be queried via the `local_addr` + /// method. + /// + /// The address type can be any implementor of `ToSocketAddrs` trait. + /// + /// If `addr` yields multiple addresses, bind will be attempted with each of + /// the addresses until one succeeds and returns the listener. If none of + /// the addresses succeed in creating a listener, the error returned from + /// the last attempt (the last address) is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let listener = TcpListener::bind("127.0.0.1:0").await?; + /// + /// // use the listener + /// + /// # let _ = listener; + /// Ok(()) + /// } + /// ``` + pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> { + let addrs = addr.to_socket_addrs().await?; + + let mut last_err = None; + + for addr in addrs { + match TcpListener::bind_addr(addr) { + Ok(listener) => return Ok(listener), + Err(e) => last_err = Some(e), + } + } + + Err(last_err.unwrap_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve to any address", + ) + })) + } + + fn bind_addr(addr: SocketAddr) -> io::Result<TcpListener> { + let listener = mio::net::TcpListener::bind(&addr)?; + TcpListener::new(listener) + } + + /// Accepts a new incoming connection from this listener. + /// + /// This function will yield once a new TCP connection is established. When + /// established, the corresponding [`TcpStream`] and the remote peer's + /// address will be returned. + /// + /// [`TcpStream`]: ../struct.TcpStream.html + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; + /// + /// match listener.accept().await { + /// Ok((_socket, addr)) => println!("new client: {:?}", addr), + /// Err(e) => println!("couldn't get client: {:?}", e), + /// } + /// + /// Ok(()) + /// } + /// ``` + pub async fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { + poll_fn(|cx| self.poll_accept(cx)).await + } + + /// Attempts to poll `SocketAddr` and `TcpStream` bound to this address. + /// + /// In case if I/O resource isn't ready yet, `Poll::Pending` is returned and + /// current task will be notified by a waker. + pub fn poll_accept( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<io::Result<(TcpStream, SocketAddr)>> { + let (io, addr) = ready!(self.poll_accept_std(cx))?; + + let io = mio::net::TcpStream::from_stream(io)?; + let io = TcpStream::new(io)?; + + Poll::Ready(Ok((io, addr))) + } + + fn poll_accept_std( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<io::Result<(net::TcpStream, SocketAddr)>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().accept_std() { + Ok(pair) => Poll::Ready(Ok(pair)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + Err(e) => Poll::Ready(Err(e)), + } + } + + /// Creates a new TCP listener from the standard library's TCP listener. + /// + /// This method can be used when the `Handle::tcp_listen` method isn't + /// sufficient because perhaps some more configuration is needed in terms of + /// before the calls to `bind` and `listen`. + /// + /// This API is typically paired with the `net2` crate and the `TcpBuilder` + /// type to build up and customize a listener before it's shipped off to the + /// backing event loop. This allows configuration of options like + /// `SO_REUSEPORT`, binding to multiple addresses, etc. + /// + /// The `addr` argument here is one of the addresses that `listener` is + /// bound to and the listener will only be guaranteed to accept connections + /// of the same address type currently. + /// + /// The platform specific behavior of this function looks like: + /// + /// * On Unix, the socket is placed into nonblocking mode and connections + /// can be accepted as normal + /// + /// * On Windows, the address is stored internally and all future accepts + /// will only be for the same IP version as `addr` specified. That is, if + /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as + /// well (same for IPv6). + /// + /// # Examples + /// + /// ```rust,no_run + /// use std::error::Error; + /// use tokio::net::TcpListener; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// let std_listener = std::net::TcpListener::bind("127.0.0.1:0")?; + /// let listener = TcpListener::from_std(std_listener)?; + /// Ok(()) + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(listener: net::TcpListener) -> io::Result<TcpListener> { + let io = mio::net::TcpListener::from_std(listener)?; + let io = PollEvented::new(io)?; + Ok(TcpListener { io }) + } + + fn new(listener: mio::net::TcpListener) -> io::Result<TcpListener> { + let io = PollEvented::new(listener)?; + Ok(TcpListener { io }) + } + + /// Returns the local address that this listener is bound to. + /// + /// This can be useful, for example, when binding to port 0 to figure out + /// which port was actually bound. + /// + /// # Examples + /// + /// ```rust,no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let listener = TcpListener::bind("127.0.0.1:8080").await?; + /// + /// assert_eq!(listener.local_addr()?, + /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080))); + /// + /// Ok(()) + /// } + /// ``` + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Returns a stream over the connections being received on this listener. + /// + /// Note that `TcpListener` also directly implements `Stream`. + /// + /// The returned stream will never return `None` and will also not yield the + /// peer's `SocketAddr` structure. Iterating over it is equivalent to + /// calling accept in a loop. + /// + /// # Errors + /// + /// Note that accepting a connection can lead to various errors and not all + /// of them are necessarily fatal ‒ for example having too many open file + /// descriptors or the other side closing the connection while it waits in + /// an accept queue. These would terminate the stream if not handled in any + /// way. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::{net::TcpListener, stream::StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap(); + /// let mut incoming = listener.incoming(); + /// + /// while let Some(stream) = incoming.next().await { + /// match stream { + /// Ok(stream) => { + /// println!("new client!"); + /// } + /// Err(e) => { /* connection failed */ } + /// } + /// } + /// } + /// ``` + pub fn incoming(&mut self) -> Incoming<'_> { + Incoming::new(self) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: #method.set_ttl + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let listener = TcpListener::bind("127.0.0.1:0").await?; + /// + /// listener.set_ttl(100).expect("could not set TTL"); + /// assert_eq!(listener.ttl()?, 100); + /// + /// Ok(()) + /// } + /// ``` + pub fn ttl(&self) -> io::Result<u32> { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpListener; + /// + /// use std::io; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let listener = TcpListener::bind("127.0.0.1:0").await?; + /// + /// listener.set_ttl(100).expect("could not set TTL"); + /// + /// Ok(()) + /// } + /// ``` + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for TcpListener { + type Item = io::Result<TcpStream>; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Option<Self::Item>> { + let (socket, _) = ready!(self.poll_accept(cx))?; + Poll::Ready(Some(Ok(socket))) + } +} + +impl TryFrom<TcpListener> for mio::net::TcpListener { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: TcpListener) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::TcpListener> for TcpListener { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`TcpListener::from_std(stream)`](TcpListener::from_std). + fn try_from(stream: net::TcpListener) -> Result<Self, Self::Error> { + Self::from_std(stream) + } +} + +impl fmt::Debug for TcpListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +#[cfg(unix)] +mod sys { + use super::TcpListener; + use std::os::unix::prelude::*; + + impl AsRawFd for TcpListener { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::{TcpListener; + // + // impl AsRawHandle for TcpListener { + // fn as_raw_handle(&self) -> RawHandle { + // self.listener.io().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio/src/net/tcp/mod.rs b/third_party/rust/tokio/src/net/tcp/mod.rs new file mode 100644 index 0000000000..d5354b38d2 --- /dev/null +++ b/third_party/rust/tokio/src/net/tcp/mod.rs @@ -0,0 +1,13 @@ +//! TCP utility types + +pub(crate) mod listener; +pub(crate) use listener::TcpListener; + +mod incoming; +pub use incoming::Incoming; + +mod split; +pub use split::{ReadHalf, WriteHalf}; + +pub(crate) mod stream; +pub(crate) use stream::TcpStream; diff --git a/third_party/rust/tokio/src/net/tcp/split.rs b/third_party/rust/tokio/src/net/tcp/split.rs new file mode 100644 index 0000000000..cce50f6ab3 --- /dev/null +++ b/third_party/rust/tokio/src/net/tcp/split.rs @@ -0,0 +1,163 @@ +//! `TcpStream` split support. +//! +//! A `TcpStream` can be split into a `ReadHalf` and a +//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf` +//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`. +//! +//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized +//! split has no associated overhead and enforces all invariants at the type +//! level. + +use crate::future::poll_fn; +use crate::io::{AsyncRead, AsyncWrite}; +use crate::net::TcpStream; + +use bytes::Buf; +use std::io; +use std::mem::MaybeUninit; +use std::net::Shutdown; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Read half of a `TcpStream`. +#[derive(Debug)] +pub struct ReadHalf<'a>(&'a TcpStream); + +/// Write half of a `TcpStream`. +/// +/// Note that in the `AsyncWrite` implemenation of `TcpStreamWriteHalf`, +/// `poll_shutdown` actually shuts down the TCP stream in the write direction. +#[derive(Debug)] +pub struct WriteHalf<'a>(&'a TcpStream); + +pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) { + (ReadHalf(&*stream), WriteHalf(&*stream)) +} + +impl ReadHalf<'_> { + /// Attempt to receive data on the socket, without removing that data from + /// the queue, registering the current task for wakeup if data is not yet + /// available. + /// + /// See the [`TcpStream::poll_peek`] level documenation for more details. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io; + /// use tokio::net::TcpStream; + /// + /// use futures::future::poll_fn; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; + /// let (mut read_half, _) = stream.split(); + /// let mut buf = [0; 10]; + /// + /// poll_fn(|cx| { + /// read_half.poll_peek(cx, &mut buf) + /// }).await?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// [`TcpStream::poll_peek`]: TcpStream::poll_peek + pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> { + self.0.poll_peek2(cx, buf) + } + + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// See the [`TcpStream::peek`] level documenation for more details. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// use tokio::prelude::*; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// // Connect to a peer + /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; + /// let (mut read_half, _) = stream.split(); + /// + /// let mut b1 = [0; 10]; + /// let mut b2 = [0; 10]; + /// + /// // Peek at the data + /// let n = read_half.peek(&mut b1).await?; + /// + /// // Read the data + /// assert_eq!(n, read_half.read(&mut b2[..n]).await?); + /// assert_eq!(&b1[..n], &b2[..n]); + /// + /// Ok(()) + /// } + /// ``` + /// + /// [`TcpStream::peek`]: TcpStream::peek + pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_peek(cx, buf)).await + } +} + +impl AsyncRead for ReadHalf<'_> { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.0.poll_read_priv(cx, buf) + } +} + +impl AsyncWrite for WriteHalf<'_> { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.0.poll_write_priv(cx, buf) + } + + fn poll_write_buf<B: Buf>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<io::Result<usize>> { + self.0.poll_write_buf_priv(cx, buf) + } + + #[inline] + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + // tcp flush is a no-op + Poll::Ready(Ok(())) + } + + // `poll_shutdown` on a write half shutdowns the stream in the "write" direction. + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + self.0.shutdown(Shutdown::Write).into() + } +} + +impl AsRef<TcpStream> for ReadHalf<'_> { + fn as_ref(&self) -> &TcpStream { + self.0 + } +} + +impl AsRef<TcpStream> for WriteHalf<'_> { + fn as_ref(&self) -> &TcpStream { + self.0 + } +} diff --git a/third_party/rust/tokio/src/net/tcp/stream.rs b/third_party/rust/tokio/src/net/tcp/stream.rs new file mode 100644 index 0000000000..732c0ca381 --- /dev/null +++ b/third_party/rust/tokio/src/net/tcp/stream.rs @@ -0,0 +1,869 @@ +use crate::future::poll_fn; +use crate::io::{AsyncRead, AsyncWrite, PollEvented}; +use crate::net::tcp::split::{split, ReadHalf, WriteHalf}; +use crate::net::ToSocketAddrs; + +use bytes::Buf; +use iovec::IoVec; +use std::convert::TryFrom; +use std::fmt; +use std::io::{self, Read, Write}; +use std::mem::MaybeUninit; +use std::net::{self, Shutdown, SocketAddr}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +cfg_tcp! { + /// A TCP stream between a local and a remote socket. + /// + /// A TCP stream can either be created by connecting to an endpoint, via the + /// [`connect`] method, or by [accepting] a connection from a [listener]. + /// + /// [`connect`]: method@TcpStream::connect + /// [accepting]: method@super::TcpListener::accept + /// [listener]: struct@super::TcpListener + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// use tokio::prelude::*; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// // Connect to a peer + /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// // Write some data. + /// stream.write_all(b"hello world!").await?; + /// + /// Ok(()) + /// } + /// ``` + pub struct TcpStream { + io: PollEvented<mio::net::TcpStream>, + } +} + +impl TcpStream { + /// Opens a TCP connection to a remote host. + /// + /// `addr` is an address of the remote host. Anything which implements + /// `ToSocketAddrs` trait can be supplied for the address. + /// + /// If `addr` yields multiple addresses, connect will be attempted with each + /// of the addresses until a connection is successful. If none of the + /// addresses result in a successful connection, the error returned from the + /// last connection attempt (the last address) is returned. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// use tokio::prelude::*; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// // Connect to a peer + /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// // Write some data. + /// stream.write_all(b"hello world!").await?; + /// + /// Ok(()) + /// } + /// ``` + pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> { + let addrs = addr.to_socket_addrs().await?; + + let mut last_err = None; + + for addr in addrs { + match TcpStream::connect_addr(addr).await { + Ok(stream) => return Ok(stream), + Err(e) => last_err = Some(e), + } + } + + Err(last_err.unwrap_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve to any address", + ) + })) + } + + /// Establishes a connection to the specified `addr`. + async fn connect_addr(addr: SocketAddr) -> io::Result<TcpStream> { + let sys = mio::net::TcpStream::connect(&addr)?; + let stream = TcpStream::new(sys)?; + + // Once we've connected, wait for the stream to be writable as + // that's when the actual connection has been initiated. Once we're + // writable we check for `take_socket_error` to see if the connect + // actually hit an error or not. + // + // If all that succeeded then we ship everything on up. + poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; + + if let Some(e) = stream.io.get_ref().take_error()? { + return Err(e); + } + + Ok(stream) + } + + pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result<TcpStream> { + let io = PollEvented::new(connected)?; + Ok(TcpStream { io }) + } + + /// Creates new `TcpStream` from a `std::net::TcpStream`. + /// + /// This function will convert a TCP stream created by the standard library + /// to a TCP stream ready to be used with the provided event loop handle. + /// + /// # Examples + /// + /// ```rust,no_run + /// use std::error::Error; + /// use tokio::net::TcpStream; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?; + /// let stream = TcpStream::from_std(std_stream)?; + /// Ok(()) + /// } + /// ``` + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(stream: net::TcpStream) -> io::Result<TcpStream> { + let io = mio::net::TcpStream::from_stream(stream)?; + let io = PollEvented::new(io)?; + Ok(TcpStream { io }) + } + + // Connects `TcpStream` asynchronously that may be built with a net2 `TcpBuilder`. + // + // This should be removed in favor of some in-crate TcpSocket builder API. + #[doc(hidden)] + pub async fn connect_std(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> { + let io = mio::net::TcpStream::connect_stream(stream, addr)?; + let io = PollEvented::new(io)?; + let stream = TcpStream { io }; + + // Once we've connected, wait for the stream to be writable as + // that's when the actual connection has been initiated. Once we're + // writable we check for `take_socket_error` to see if the connect + // actually hit an error or not. + // + // If all that succeeded then we ship everything on up. + poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; + + if let Some(e) = stream.io.get_ref().take_error()? { + return Err(e); + } + + Ok(stream) + } + + /// Returns the local address that this stream is bound to. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.local_addr()?); + /// # Ok(()) + /// # } + /// ``` + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Returns the remote address that this stream is connected to. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.peer_addr()?); + /// # Ok(()) + /// # } + /// ``` + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().peer_addr() + } + + /// Attempts to receive data on the socket, without removing that data from + /// the queue, registering the current task for wakeup if data is not yet + /// available. + /// + /// # Return value + /// + /// The function returns: + /// + /// * `Poll::Pending` if data is not yet available. + /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked. + /// * `Poll::Ready(Err(e))` if an error is encountered. + /// + /// # Errors + /// + /// This function may encounter any standard I/O error except `WouldBlock`. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::io; + /// use tokio::net::TcpStream; + /// + /// use futures::future::poll_fn; + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?; + /// let mut buf = [0; 10]; + /// + /// poll_fn(|cx| { + /// stream.poll_peek(cx, &mut buf) + /// }).await?; + /// + /// Ok(()) + /// } + /// ``` + pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> { + self.poll_peek2(cx, buf) + } + + pub(super) fn poll_peek2( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().peek(buf) { + Ok(ret) => Poll::Ready(Ok(ret)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + Err(e) => Poll::Ready(Err(e)), + } + } + + /// Receives data on the socket from the remote address to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying recv system call. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// use tokio::prelude::*; + /// use std::error::Error; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// // Connect to a peer + /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// let mut b1 = [0; 10]; + /// let mut b2 = [0; 10]; + /// + /// // Peek at the data + /// let n = stream.peek(&mut b1).await?; + /// + /// // Read the data + /// assert_eq!(n, stream.read(&mut b2[..n]).await?); + /// assert_eq!(&b1[..n], &b2[..n]); + /// + /// Ok(()) + /// } + /// ``` + pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_peek(cx, buf)).await + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value (see the + /// documentation of `Shutdown`). + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// use std::error::Error; + /// use std::net::Shutdown; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn Error>> { + /// // Connect to a peer + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// // Shutdown the stream + /// stream.shutdown(Shutdown::Write)?; + /// + /// Ok(()) + /// } + /// ``` + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.io.get_ref().shutdown(how) + } + + /// Gets the value of the `TCP_NODELAY` option on this socket. + /// + /// For more information about this option, see [`set_nodelay`]. + /// + /// [`set_nodelay`]: TcpStream::set_nodelay + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.nodelay()?); + /// # Ok(()) + /// # } + /// ``` + pub fn nodelay(&self) -> io::Result<bool> { + self.io.get_ref().nodelay() + } + + /// Sets the value of the `TCP_NODELAY` option on this socket. + /// + /// If set, this option disables the Nagle algorithm. This means that + /// segments are always sent as soon as possible, even if there is only a + /// small amount of data. When not set, data is buffered until there is a + /// sufficient amount to send out, thereby avoiding the frequent sending of + /// small packets. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_nodelay(true)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + self.io.get_ref().set_nodelay(nodelay) + } + + /// Gets the value of the `SO_RCVBUF` option on this socket. + /// + /// For more information about this option, see [`set_recv_buffer_size`]. + /// + /// [`set_recv_buffer_size`]: TcpStream::set_recv_buffer_size + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.recv_buffer_size()?); + /// # Ok(()) + /// # } + /// ``` + pub fn recv_buffer_size(&self) -> io::Result<usize> { + self.io.get_ref().recv_buffer_size() + } + + /// Sets the value of the `SO_RCVBUF` option on this socket. + /// + /// Changes the size of the operating system's receive buffer associated + /// with the socket. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_recv_buffer_size(100)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_recv_buffer_size(size) + } + + /// Gets the value of the `SO_SNDBUF` option on this socket. + /// + /// For more information about this option, see [`set_send_buffer_size`]. + /// + /// [`set_send_buffer_size`]: TcpStream::set_send_buffer_size + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.send_buffer_size()?); + /// # Ok(()) + /// # } + /// ``` + pub fn send_buffer_size(&self) -> io::Result<usize> { + self.io.get_ref().send_buffer_size() + } + + /// Sets the value of the `SO_SNDBUF` option on this socket. + /// + /// Changes the size of the operating system's send buffer associated with + /// the socket. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_send_buffer_size(100)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + self.io.get_ref().set_send_buffer_size(size) + } + + /// Returns whether keepalive messages are enabled on this socket, and if so + /// the duration of time between them. + /// + /// For more information about this option, see [`set_keepalive`]. + /// + /// [`set_keepalive`]: TcpStream::set_keepalive + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.keepalive()?); + /// # Ok(()) + /// # } + /// ``` + pub fn keepalive(&self) -> io::Result<Option<Duration>> { + self.io.get_ref().keepalive() + } + + /// Sets whether keepalive messages are enabled to be sent on this socket. + /// + /// On Unix, this option will set the `SO_KEEPALIVE` as well as the + /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). + /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. + /// + /// If `None` is specified then keepalive messages are disabled, otherwise + /// the duration specified will be the time to remain idle before sending a + /// TCP keepalive probe. + /// + /// Some platforms specify this value in seconds, so sub-second + /// specifications may be omitted. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_keepalive(None)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> { + self.io.get_ref().set_keepalive(keepalive) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: TcpStream::set_ttl + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.ttl()?); + /// # Ok(()) + /// # } + /// ``` + pub fn ttl(&self) -> io::Result<u32> { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_ttl(123)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } + + /// Reads the linger duration for this socket by getting the `SO_LINGER` + /// option. + /// + /// For more information about this option, see [`set_linger`]. + /// + /// [`set_linger`]: TcpStream::set_linger + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// println!("{:?}", stream.linger()?); + /// # Ok(()) + /// # } + /// ``` + pub fn linger(&self) -> io::Result<Option<Duration>> { + self.io.get_ref().linger() + } + + /// Sets the linger duration of this socket by setting the `SO_LINGER` + /// option. + /// + /// This option controls the action taken when a stream has unsent messages + /// and the stream is closed. If `SO_LINGER` is set, the system + /// shall block the process until it can transmit the data or until the + /// time expires. + /// + /// If `SO_LINGER` is not specified, and the stream is closed, the system + /// handles the call in a way that allows the process to continue as quickly + /// as possible. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::TcpStream; + /// + /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { + /// let stream = TcpStream::connect("127.0.0.1:8080").await?; + /// + /// stream.set_linger(None)?; + /// # Ok(()) + /// # } + /// ``` + pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> { + self.io.get_ref().set_linger(dur) + } + + /// Splits a `TcpStream` into a read half and a write half, which can be used + /// to read and write the stream concurrently. + pub fn split(&mut self) -> (ReadHalf<'_>, WriteHalf<'_>) { + split(self) + } + + // == Poll IO functions that takes `&self` == + // + // They are not public because (taken from the doc of `PollEvented`): + // + // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the + // caller must ensure that there are at most two tasks that use a + // `PollEvented` instance concurrently. One for reading and one for writing. + // While violating this requirement is "safe" from a Rust memory model point + // of view, it will result in unexpected behavior in the form of lost + // notifications and tasks hanging. + + pub(crate) fn poll_read_priv( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().read(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + pub(super) fn poll_write_priv( + &self, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().write(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + pub(super) fn poll_write_buf_priv<B: Buf>( + &self, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<io::Result<usize>> { + use std::io::IoSlice; + + ready!(self.io.poll_write_ready(cx))?; + + // The `IoVec` (v0.1.x) type can't have a zero-length size, so create + // a dummy version from a 1-length slice which we'll overwrite with + // the `bytes_vectored` method. + static S: &[u8] = &[0]; + const MAX_BUFS: usize = 64; + + // IoSlice isn't Copy, so we must expand this manually ;_; + let mut slices: [IoSlice<'_>; MAX_BUFS] = [ + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + IoSlice::new(S), + ]; + let cnt = buf.bytes_vectored(&mut slices); + + let iovec = <&IoVec>::from(S); + let mut vecs = [iovec; MAX_BUFS]; + for i in 0..cnt { + vecs[i] = (*slices[i]).into(); + } + + match self.io.get_ref().write_bufs(&vecs[..cnt]) { + Ok(n) => { + buf.advance(n); + Poll::Ready(Ok(n)) + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +impl TryFrom<TcpStream> for mio::net::TcpStream { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: TcpStream) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::TcpStream> for TcpStream { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`TcpStream::from_std(stream)`](TcpStream::from_std). + fn try_from(stream: net::TcpStream) -> Result<Self, Self::Error> { + Self::from_std(stream) + } +} + +// ===== impl Read / Write ===== + +impl AsyncRead for TcpStream { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.poll_read_priv(cx, buf) + } +} + +impl AsyncWrite for TcpStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.poll_write_priv(cx, buf) + } + + fn poll_write_buf<B: Buf>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut B, + ) -> Poll<io::Result<usize>> { + self.poll_write_buf_priv(cx, buf) + } + + #[inline] + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + // tcp flush is a no-op + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + self.shutdown(std::net::Shutdown::Write)?; + Poll::Ready(Ok(())) + } +} + +impl fmt::Debug for TcpStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +#[cfg(unix)] +mod sys { + use super::TcpStream; + use std::os::unix::prelude::*; + + impl AsRawFd for TcpStream { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::TcpStream; + // + // impl AsRawHandle for TcpStream { + // fn as_raw_handle(&self) -> RawHandle { + // self.io.get_ref().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio/src/net/udp/mod.rs b/third_party/rust/tokio/src/net/udp/mod.rs new file mode 100644 index 0000000000..d43121a1ca --- /dev/null +++ b/third_party/rust/tokio/src/net/udp/mod.rs @@ -0,0 +1,7 @@ +//! UDP utility types. + +pub(crate) mod socket; +pub(crate) use socket::UdpSocket; + +mod split; +pub use split::{RecvHalf, ReuniteError, SendHalf}; diff --git a/third_party/rust/tokio/src/net/udp/socket.rs b/third_party/rust/tokio/src/net/udp/socket.rs new file mode 100644 index 0000000000..faf1dca615 --- /dev/null +++ b/third_party/rust/tokio/src/net/udp/socket.rs @@ -0,0 +1,425 @@ +use crate::future::poll_fn; +use crate::io::PollEvented; +use crate::net::udp::split::{split, RecvHalf, SendHalf}; +use crate::net::ToSocketAddrs; + +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::task::{Context, Poll}; + +cfg_udp! { + /// A UDP socket + pub struct UdpSocket { + io: PollEvented<mio::net::UdpSocket>, + } +} + +impl UdpSocket { + /// This function will create a new UDP socket and attempt to bind it to + /// the `addr` provided. + pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> { + let addrs = addr.to_socket_addrs().await?; + let mut last_err = None; + + for addr in addrs { + match UdpSocket::bind_addr(addr) { + Ok(socket) => return Ok(socket), + Err(e) => last_err = Some(e), + } + } + + Err(last_err.unwrap_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve to any address", + ) + })) + } + + fn bind_addr(addr: SocketAddr) -> io::Result<UdpSocket> { + let sys = mio::net::UdpSocket::bind(&addr)?; + UdpSocket::new(sys) + } + + fn new(socket: mio::net::UdpSocket) -> io::Result<UdpSocket> { + let io = PollEvented::new(socket)?; + Ok(UdpSocket { io }) + } + + /// Creates a new `UdpSocket` from the previously bound socket provided. + /// + /// The socket given will be registered with the event loop that `handle` + /// is associated with. This function requires that `socket` has previously + /// been bound to an address to work correctly. + /// + /// This can be used in conjunction with net2's `UdpBuilder` interface to + /// configure a socket before it's handed off, such as setting options like + /// `reuse_address` or binding to multiple addresses. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(socket: net::UdpSocket) -> io::Result<UdpSocket> { + let io = mio::net::UdpSocket::from_socket(socket)?; + let io = PollEvented::new(io)?; + Ok(UdpSocket { io }) + } + + /// Splits the `UdpSocket` into a receive half and a send half. The two parts + /// can be used to receive and send datagrams concurrently, even from two + /// different tasks. + pub fn split(self) -> (RecvHalf, SendHalf) { + split(self) + } + + /// Returns the local address that this socket is bound to. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Connects the UDP socket setting the default destination for send() and + /// limiting packets that are read via recv from the address specified in + /// `addr`. + pub async fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> { + let addrs = addr.to_socket_addrs().await?; + let mut last_err = None; + + for addr in addrs { + match self.io.get_ref().connect(addr) { + Ok(_) => return Ok(()), + Err(e) => last_err = Some(e), + } + } + + Err(last_err.unwrap_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve to any address", + ) + })) + } + + /// Returns a future that sends data on the socket to the remote address to which it is connected. + /// On success, the future will resolve to the number of bytes written. + /// + /// The [`connect`] method will connect this socket to a remote address. The future + /// will resolve to an error if the socket is not connected. + /// + /// [`connect`]: #method.connect + pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_send(cx, buf)).await + } + + // Poll IO functions that takes `&self` are provided for the split API. + // + // They are not public because (taken from the doc of `PollEvented`): + // + // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the + // caller must ensure that there are at most two tasks that use a + // `PollEvented` instance concurrently. One for reading and one for writing. + // While violating this requirement is "safe" from a Rust memory model point + // of view, it will result in unexpected behavior in the form of lost + // notifications and tasks hanging. + #[doc(hidden)] + pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().send(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Returns a future that receives a single datagram message on the socket from + /// the remote address to which it is connected. On success, the future will resolve + /// to the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient size to + /// hold the message bytes. If a message is too long to fit in the supplied buffer, + /// excess bytes may be discarded. + /// + /// The [`connect`] method will connect this socket to a remote address. The future + /// will fail if the socket is not connected. + /// + /// [`connect`]: #method.connect + pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_recv(cx, buf)).await + } + + #[doc(hidden)] + pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().recv(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Returns a future that sends data on the socket to the given address. + /// On success, the future will resolve to the number of bytes written. + /// + /// The future will resolve to an error if the IP version of the socket does + /// not match that of `target`. + pub async fn send_to<A: ToSocketAddrs>(&mut self, buf: &[u8], target: A) -> io::Result<usize> { + let mut addrs = target.to_socket_addrs().await?; + + match addrs.next() { + Some(target) => poll_fn(|cx| self.poll_send_to(cx, buf, &target)).await, + None => Err(io::Error::new( + io::ErrorKind::InvalidInput, + "no addresses to send data to", + )), + } + } + + // TODO: Public or not? + #[doc(hidden)] + pub fn poll_send_to( + &self, + cx: &mut Context<'_>, + buf: &[u8], + target: &SocketAddr, + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().send_to(buf, target) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Returns a future that receives a single datagram on the socket. On success, + /// the future resolves to the number of bytes read and the origin. + /// + /// The function must be called with valid byte array `buf` of sufficient size + /// to hold the message bytes. If a message is too long to fit in the supplied + /// buffer, excess bytes may be discarded. + pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + poll_fn(|cx| self.poll_recv_from(cx, buf)).await + } + + #[doc(hidden)] + pub fn poll_recv_from( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<Result<(usize, SocketAddr), io::Error>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().recv_from(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Gets the value of the `SO_BROADCAST` option for this socket. + /// + /// For more information about this option, see [`set_broadcast`]. + /// + /// [`set_broadcast`]: #method.set_broadcast + pub fn broadcast(&self) -> io::Result<bool> { + self.io.get_ref().broadcast() + } + + /// Sets the value of the `SO_BROADCAST` option for this socket. + /// + /// When enabled, this socket is allowed to send packets to a broadcast + /// address. + pub fn set_broadcast(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_broadcast(on) + } + + /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v4`]. + /// + /// [`set_multicast_loop_v4`]: #method.set_multicast_loop_v4 + pub fn multicast_loop_v4(&self) -> io::Result<bool> { + self.io.get_ref().multicast_loop_v4() + } + + /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// If enabled, multicast packets will be looped back to the local socket. + /// + /// # Note + /// + /// This may not have any affect on IPv6 sockets. + pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_multicast_loop_v4(on) + } + + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see [`set_multicast_ttl_v4`]. + /// + /// [`set_multicast_ttl_v4`]: #method.set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result<u32> { + self.io.get_ref().multicast_ttl_v4() + } + + /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// Indicates the time-to-live value of outgoing multicast packets for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + /// + /// # Note + /// + /// This may not have any affect on IPv6 sockets. + pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_multicast_ttl_v4(ttl) + } + + /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see [`set_multicast_loop_v6`]. + /// + /// [`set_multicast_loop_v6`]: #method.set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result<bool> { + self.io.get_ref().multicast_loop_v6() + } + + /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// Controls whether this socket sees the multicast packets it sends itself. + /// + /// # Note + /// + /// This may not have any affect on IPv4 sockets. + pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> { + self.io.get_ref().set_multicast_loop_v6(on) + } + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`]. + /// + /// [`set_ttl`]: #method.set_ttl + pub fn ttl(&self) -> io::Result<u32> { + self.io.get_ref().ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.io.get_ref().set_ttl(ttl) + } + + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// address of the local interface with which the system should join the + /// multicast group. If it's equal to `INADDR_ANY` then an appropriate + /// interface is chosen by the system. + pub fn join_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { + self.io.get_ref().join_multicast_v4(&multiaddr, &interface) + } + + /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// index of the interface to join/leave (or 0 to indicate any interface). + pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + self.io.get_ref().join_multicast_v6(multiaddr, interface) + } + + /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see [`join_multicast_v4`]. + /// + /// [`join_multicast_v4`]: #method.join_multicast_v4 + pub fn leave_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> { + self.io.get_ref().leave_multicast_v4(&multiaddr, &interface) + } + + /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see [`join_multicast_v6`]. + /// + /// [`join_multicast_v6`]: #method.join_multicast_v6 + pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> { + self.io.get_ref().leave_multicast_v6(multiaddr, interface) + } +} + +impl TryFrom<UdpSocket> for mio::net::UdpSocket { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: UdpSocket) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::UdpSocket> for UdpSocket { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`UdpSocket::from_std(stream)`](UdpSocket::from_std). + fn try_from(stream: net::UdpSocket) -> Result<Self, Self::Error> { + Self::from_std(stream) + } +} + +impl fmt::Debug for UdpSocket { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +#[cfg(all(unix))] +mod sys { + use super::UdpSocket; + use std::os::unix::prelude::*; + + impl AsRawFd for UdpSocket { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + // TODO: let's land these upstream with mio and then we can add them here. + // + // use std::os::windows::prelude::*; + // use super::UdpSocket; + // + // impl AsRawHandle for UdpSocket { + // fn as_raw_handle(&self) -> RawHandle { + // self.io.get_ref().as_raw_handle() + // } + // } +} diff --git a/third_party/rust/tokio/src/net/udp/split.rs b/third_party/rust/tokio/src/net/udp/split.rs new file mode 100644 index 0000000000..55542cb631 --- /dev/null +++ b/third_party/rust/tokio/src/net/udp/split.rs @@ -0,0 +1,148 @@ +//! [`UdpSocket`](../struct.UdpSocket.html) split support. +//! +//! The [`split`](../struct.UdpSocket.html#method.split) method splits a +//! `UdpSocket` into a receive half and a send half, which can be used to +//! receive and send datagrams concurrently, even from two different tasks. +//! +//! The halves provide access to the underlying socket, implementing +//! `AsRef<UdpSocket>`. This allows you to call `UdpSocket` methods that takes +//! `&self`, e.g., to get local address, to get and set socket options, to join +//! or leave multicast groups, etc. +//! +//! The halves can be reunited to the original socket with their `reunite` +//! methods. + +use crate::future::poll_fn; +use crate::net::udp::UdpSocket; + +use std::error::Error; +use std::fmt; +use std::io; +use std::net::SocketAddr; +use std::sync::Arc; + +/// The send half after [`split`](super::UdpSocket::split). +/// +/// Use [`send_to`](#method.send_to) or [`send`](#method.send) to send +/// datagrams. +#[derive(Debug)] +pub struct SendHalf(Arc<UdpSocket>); + +/// The recv half after [`split`](super::UdpSocket::split). +/// +/// Use [`recv_from`](#method.recv_from) or [`recv`](#method.recv) to receive +/// datagrams. +#[derive(Debug)] +pub struct RecvHalf(Arc<UdpSocket>); + +pub(crate) fn split(socket: UdpSocket) -> (RecvHalf, SendHalf) { + let shared = Arc::new(socket); + let send = shared.clone(); + let recv = shared; + (RecvHalf(recv), SendHalf(send)) +} + +/// Error indicating two halves were not from the same socket, and thus could +/// not be `reunite`d. +#[derive(Debug)] +pub struct ReuniteError(pub SendHalf, pub RecvHalf); + +impl fmt::Display for ReuniteError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "tried to reunite halves that are not from the same socket" + ) + } +} + +impl Error for ReuniteError {} + +fn reunite(s: SendHalf, r: RecvHalf) -> Result<UdpSocket, ReuniteError> { + if Arc::ptr_eq(&s.0, &r.0) { + drop(r); + // Only two instances of the `Arc` are ever created, one for the + // receiver and one for the sender, and those `Arc`s are never exposed + // externally. And so when we drop one here, the other one must be the + // only remaining one. + Ok(Arc::try_unwrap(s.0).expect("udp: try_unwrap failed in reunite")) + } else { + Err(ReuniteError(s, r)) + } +} + +impl RecvHalf { + /// Attempts to put the two "halves" of a `UdpSocket` back together and + /// recover the original socket. Succeeds only if the two "halves" + /// originated from the same call to `UdpSocket::split`. + pub fn reunite(self, other: SendHalf) -> Result<UdpSocket, ReuniteError> { + reunite(other, self) + } + + /// Returns a future that receives a single datagram on the socket. On success, + /// the future resolves to the number of bytes read and the origin. + /// + /// The function must be called with valid byte array `buf` of sufficient size + /// to hold the message bytes. If a message is too long to fit in the supplied + /// buffer, excess bytes may be discarded. + pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + poll_fn(|cx| self.0.poll_recv_from(cx, buf)).await + } + + /// Returns a future that receives a single datagram message on the socket from + /// the remote address to which it is connected. On success, the future will resolve + /// to the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient size to + /// hold the message bytes. If a message is too long to fit in the supplied buffer, + /// excess bytes may be discarded. + /// + /// The [`connect`] method will connect this socket to a remote address. The future + /// will fail if the socket is not connected. + /// + /// [`connect`]: super::UdpSocket::connect + pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> { + poll_fn(|cx| self.0.poll_recv(cx, buf)).await + } +} + +impl SendHalf { + /// Attempts to put the two "halves" of a `UdpSocket` back together and + /// recover the original socket. Succeeds only if the two "halves" + /// originated from the same call to `UdpSocket::split`. + pub fn reunite(self, other: RecvHalf) -> Result<UdpSocket, ReuniteError> { + reunite(self, other) + } + + /// Returns a future that sends data on the socket to the given address. + /// On success, the future will resolve to the number of bytes written. + /// + /// The future will resolve to an error if the IP version of the socket does + /// not match that of `target`. + pub async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> { + poll_fn(|cx| self.0.poll_send_to(cx, buf, target)).await + } + + /// Returns a future that sends data on the socket to the remote address to which it is connected. + /// On success, the future will resolve to the number of bytes written. + /// + /// The [`connect`] method will connect this socket to a remote address. The future + /// will resolve to an error if the socket is not connected. + /// + /// [`connect`]: super::UdpSocket::connect + pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> { + poll_fn(|cx| self.0.poll_send(cx, buf)).await + } +} + +impl AsRef<UdpSocket> for SendHalf { + fn as_ref(&self) -> &UdpSocket { + &self.0 + } +} + +impl AsRef<UdpSocket> for RecvHalf { + fn as_ref(&self) -> &UdpSocket { + &self.0 + } +} diff --git a/third_party/rust/tokio/src/net/unix/datagram.rs b/third_party/rust/tokio/src/net/unix/datagram.rs new file mode 100644 index 0000000000..ff0f4241d5 --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/datagram.rs @@ -0,0 +1,242 @@ +use crate::future::poll_fn; +use crate::io::PollEvented; + +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::net::Shutdown; +use std::os::unix::io::{AsRawFd, RawFd}; +use std::os::unix::net::{self, SocketAddr}; +use std::path::Path; +use std::task::{Context, Poll}; + +cfg_uds! { + /// An I/O object representing a Unix datagram socket. + pub struct UnixDatagram { + io: PollEvented<mio_uds::UnixDatagram>, + } +} + +impl UnixDatagram { + /// Creates a new `UnixDatagram` bound to the specified path. + pub fn bind<P>(path: P) -> io::Result<UnixDatagram> + where + P: AsRef<Path>, + { + let socket = mio_uds::UnixDatagram::bind(path)?; + UnixDatagram::new(socket) + } + + /// Creates an unnamed pair of connected sockets. + /// + /// This function will create a pair of interconnected Unix sockets for + /// communicating back and forth between one another. Each socket will + /// be associated with the default event loop's handle. + pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { + let (a, b) = mio_uds::UnixDatagram::pair()?; + let a = UnixDatagram::new(a)?; + let b = UnixDatagram::new(b)?; + + Ok((a, b)) + } + + /// Consumes a `UnixDatagram` in the standard library and returns a + /// nonblocking `UnixDatagram` from this crate. + /// + /// The returned datagram will be associated with the given event loop + /// specified by `handle` and is ready to perform I/O. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(datagram: net::UnixDatagram) -> io::Result<UnixDatagram> { + let socket = mio_uds::UnixDatagram::from_datagram(datagram)?; + let io = PollEvented::new(socket)?; + Ok(UnixDatagram { io }) + } + + fn new(socket: mio_uds::UnixDatagram) -> io::Result<UnixDatagram> { + let io = PollEvented::new(socket)?; + Ok(UnixDatagram { io }) + } + + /// Creates a new `UnixDatagram` which is not bound to any address. + pub fn unbound() -> io::Result<UnixDatagram> { + let socket = mio_uds::UnixDatagram::unbound()?; + UnixDatagram::new(socket) + } + + /// Connects the socket to the specified address. + /// + /// The `send` method may be used to send data to the specified address. + /// `recv` and `recv_from` will only receive data from that address. + pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { + self.io.get_ref().connect(path) + } + + /// Sends data on the socket to the socket's peer. + pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_send_priv(cx, buf)).await + } + + // Poll IO functions that takes `&self` are provided for the split API. + // + // They are not public because (taken from the doc of `PollEvented`): + // + // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the + // caller must ensure that there are at most two tasks that use a + // `PollEvented` instance concurrently. One for reading and one for writing. + // While violating this requirement is "safe" from a Rust memory model point + // of view, it will result in unexpected behavior in the form of lost + // notifications and tasks hanging. + pub(crate) fn poll_send_priv( + &self, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().send(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Receives data from the socket. + pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> { + poll_fn(|cx| self.poll_recv_priv(cx, buf)).await + } + + pub(crate) fn poll_recv_priv( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().recv(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Sends data on the socket to the specified address. + pub async fn send_to<P>(&mut self, buf: &[u8], target: P) -> io::Result<usize> + where + P: AsRef<Path> + Unpin, + { + poll_fn(|cx| self.poll_send_to_priv(cx, buf, target.as_ref())).await + } + + pub(crate) fn poll_send_to_priv( + &self, + cx: &mut Context<'_>, + buf: &[u8], + target: &Path, + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().send_to(buf, target) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Receives data from the socket. + pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + poll_fn(|cx| self.poll_recv_from_priv(cx, buf)).await + } + + pub(crate) fn poll_recv_from_priv( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<Result<(usize, SocketAddr), io::Error>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().recv_from(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + /// Returns the local address that this socket is bound to. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Returns the address of this socket's peer. + /// + /// The `connect` method will connect the socket to a peer. + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().peer_addr() + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.io.get_ref().take_error() + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.io.get_ref().shutdown(how) + } +} + +impl TryFrom<UnixDatagram> for mio_uds::UnixDatagram { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: UnixDatagram) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::UnixDatagram> for UnixDatagram { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`UnixDatagram::from_std(stream)`](UnixDatagram::from_std). + fn try_from(stream: net::UnixDatagram) -> Result<Self, Self::Error> { + Self::from_std(stream) + } +} + +impl fmt::Debug for UnixDatagram { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +impl AsRawFd for UnixDatagram { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } +} diff --git a/third_party/rust/tokio/src/net/unix/incoming.rs b/third_party/rust/tokio/src/net/unix/incoming.rs new file mode 100644 index 0000000000..af49360435 --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/incoming.rs @@ -0,0 +1,42 @@ +use crate::net::unix::{UnixListener, UnixStream}; + +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Stream of listeners +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Incoming<'a> { + inner: &'a mut UnixListener, +} + +impl Incoming<'_> { + pub(crate) fn new(listener: &mut UnixListener) -> Incoming<'_> { + Incoming { inner: listener } + } + + /// Attempts to poll `UnixStream` by polling inner `UnixListener` to accept + /// connection. + /// + /// If `UnixListener` isn't ready yet, `Poll::Pending` is returned and + /// current task will be notified by a waker. Otherwise `Poll::Ready` with + /// `Result` containing `UnixStream` will be returned. + pub fn poll_accept( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<io::Result<UnixStream>> { + let (socket, _) = ready!(self.inner.poll_accept(cx))?; + Poll::Ready(Ok(socket)) + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for Incoming<'_> { + type Item = io::Result<UnixStream>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + let (socket, _) = ready!(self.inner.poll_accept(cx))?; + Poll::Ready(Some(Ok(socket))) + } +} diff --git a/third_party/rust/tokio/src/net/unix/listener.rs b/third_party/rust/tokio/src/net/unix/listener.rs new file mode 100644 index 0000000000..5acc1b7e82 --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/listener.rs @@ -0,0 +1,229 @@ +use crate::future::poll_fn; +use crate::io::PollEvented; +use crate::net::unix::{Incoming, UnixStream}; + +use mio::Ready; +use mio_uds; +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::os::unix::io::{AsRawFd, RawFd}; +use std::os::unix::net::{self, SocketAddr}; +use std::path::Path; +use std::task::{Context, Poll}; + +cfg_uds! { + /// A Unix socket which can accept connections from other Unix sockets. + /// + /// You can accept a new connection by using the [`accept`](`UnixListener::accept`) method. Alternatively `UnixListener` + /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a + /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over + /// it is equivalent to calling accept in a loop. + /// + /// # Errors + /// + /// Note that accepting a connection can lead to various errors and not all + /// of them are necessarily fatal ‒ for example having too many open file + /// descriptors or the other side closing the connection while it waits in + /// an accept queue. These would terminate the stream if not handled in any + /// way. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::UnixListener; + /// use tokio::stream::StreamExt; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap(); + /// while let Some(stream) = listener.next().await { + /// match stream { + /// Ok(stream) => { + /// println!("new client!"); + /// } + /// Err(e) => { /* connection failed */ } + /// } + /// } + /// } + /// ``` + pub struct UnixListener { + io: PollEvented<mio_uds::UnixListener>, + } +} + +impl UnixListener { + /// Creates a new `UnixListener` bound to the specified path. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn bind<P>(path: P) -> io::Result<UnixListener> + where + P: AsRef<Path>, + { + let listener = mio_uds::UnixListener::bind(path)?; + let io = PollEvented::new(listener)?; + Ok(UnixListener { io }) + } + + /// Consumes a `UnixListener` in the standard library and returns a + /// nonblocking `UnixListener` from this crate. + /// + /// The returned listener will be associated with the given event loop + /// specified by `handle` and is ready to perform I/O. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(listener: net::UnixListener) -> io::Result<UnixListener> { + let listener = mio_uds::UnixListener::from_listener(listener)?; + let io = PollEvented::new(listener)?; + Ok(UnixListener { io }) + } + + /// Returns the local socket address of this listener. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.io.get_ref().take_error() + } + + /// Accepts a new incoming connection to this listener. + pub async fn accept(&mut self) -> io::Result<(UnixStream, SocketAddr)> { + poll_fn(|cx| self.poll_accept(cx)).await + } + + pub(crate) fn poll_accept( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<io::Result<(UnixStream, SocketAddr)>> { + let (io, addr) = ready!(self.poll_accept_std(cx))?; + + let io = mio_uds::UnixStream::from_stream(io)?; + Ok((UnixStream::new(io)?, addr)).into() + } + + fn poll_accept_std( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<io::Result<(net::UnixStream, SocketAddr)>> { + ready!(self.io.poll_read_ready(cx, Ready::readable()))?; + + match self.io.get_ref().accept_std() { + Ok(None) => { + self.io.clear_read_ready(cx, Ready::readable())?; + Poll::Pending + } + Ok(Some((sock, addr))) => Ok((sock, addr)).into(), + Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, Ready::readable())?; + Poll::Pending + } + Err(err) => Err(err).into(), + } + } + + /// Returns a stream over the connections being received on this listener. + /// + /// Note that `UnixListener` also directly implements `Stream`. + /// + /// The returned stream will never return `None` and will also not yield the + /// peer's `SocketAddr` structure. Iterating over it is equivalent to + /// calling accept in a loop. + /// + /// # Errors + /// + /// Note that accepting a connection can lead to various errors and not all + /// of them are necessarily fatal ‒ for example having too many open file + /// descriptors or the other side closing the connection while it waits in + /// an accept queue. These would terminate the stream if not handled in any + /// way. + /// + /// # Examples + /// + /// ```no_run + /// use tokio::net::UnixListener; + /// use tokio::stream::StreamExt; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap(); + /// let mut incoming = listener.incoming(); + /// + /// while let Some(stream) = incoming.next().await { + /// match stream { + /// Ok(stream) => { + /// println!("new client!"); + /// } + /// Err(e) => { /* connection failed */ } + /// } + /// } + /// } + /// ``` + pub fn incoming(&mut self) -> Incoming<'_> { + Incoming::new(self) + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for UnixListener { + type Item = io::Result<UnixStream>; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Option<Self::Item>> { + let (socket, _) = ready!(self.poll_accept(cx))?; + Poll::Ready(Some(Ok(socket))) + } +} + +impl TryFrom<UnixListener> for mio_uds::UnixListener { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: UnixListener) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::UnixListener> for UnixListener { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`UnixListener::from_std(stream)`](UnixListener::from_std). + fn try_from(stream: net::UnixListener) -> io::Result<Self> { + Self::from_std(stream) + } +} + +impl fmt::Debug for UnixListener { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +impl AsRawFd for UnixListener { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } +} diff --git a/third_party/rust/tokio/src/net/unix/mod.rs b/third_party/rust/tokio/src/net/unix/mod.rs new file mode 100644 index 0000000000..ddba60d10a --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/mod.rs @@ -0,0 +1,18 @@ +//! Unix domain socket utility types + +pub(crate) mod datagram; + +mod incoming; +pub use incoming::Incoming; + +pub(crate) mod listener; +pub(crate) use listener::UnixListener; + +mod split; +pub use split::{ReadHalf, WriteHalf}; + +pub(crate) mod stream; +pub(crate) use stream::UnixStream; + +mod ucred; +pub use ucred::UCred; diff --git a/third_party/rust/tokio/src/net/unix/split.rs b/third_party/rust/tokio/src/net/unix/split.rs new file mode 100644 index 0000000000..9b9fa5ee1d --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/split.rs @@ -0,0 +1,74 @@ +//! `UnixStream` split support. +//! +//! A `UnixStream` can be split into a read half and a write half with +//! `UnixStream::split`. The read half implements `AsyncRead` while the write +//! half implements `AsyncWrite`. +//! +//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized +//! split has no associated overhead and enforces all invariants at the type +//! level. + +use crate::io::{AsyncRead, AsyncWrite}; +use crate::net::UnixStream; + +use std::io; +use std::mem::MaybeUninit; +use std::net::Shutdown; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Read half of a `UnixStream`. +#[derive(Debug)] +pub struct ReadHalf<'a>(&'a UnixStream); + +/// Write half of a `UnixStream`. +#[derive(Debug)] +pub struct WriteHalf<'a>(&'a UnixStream); + +pub(crate) fn split(stream: &mut UnixStream) -> (ReadHalf<'_>, WriteHalf<'_>) { + (ReadHalf(stream), WriteHalf(stream)) +} + +impl AsyncRead for ReadHalf<'_> { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.0.poll_read_priv(cx, buf) + } +} + +impl AsyncWrite for WriteHalf<'_> { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.0.poll_write_priv(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + self.0.shutdown(Shutdown::Write).into() + } +} + +impl AsRef<UnixStream> for ReadHalf<'_> { + fn as_ref(&self) -> &UnixStream { + self.0 + } +} + +impl AsRef<UnixStream> for WriteHalf<'_> { + fn as_ref(&self) -> &UnixStream { + self.0 + } +} diff --git a/third_party/rust/tokio/src/net/unix/stream.rs b/third_party/rust/tokio/src/net/unix/stream.rs new file mode 100644 index 0000000000..beae699962 --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/stream.rs @@ -0,0 +1,233 @@ +use crate::future::poll_fn; +use crate::io::{AsyncRead, AsyncWrite, PollEvented}; +use crate::net::unix::split::{split, ReadHalf, WriteHalf}; +use crate::net::unix::ucred::{self, UCred}; + +use std::convert::TryFrom; +use std::fmt; +use std::io::{self, Read, Write}; +use std::mem::MaybeUninit; +use std::net::Shutdown; +use std::os::unix::io::{AsRawFd, RawFd}; +use std::os::unix::net::{self, SocketAddr}; +use std::path::Path; +use std::pin::Pin; +use std::task::{Context, Poll}; + +cfg_uds! { + /// A structure representing a connected Unix socket. + /// + /// This socket can be connected directly with `UnixStream::connect` or accepted + /// from a listener with `UnixListener::incoming`. Additionally, a pair of + /// anonymous Unix sockets can be created with `UnixStream::pair`. + pub struct UnixStream { + io: PollEvented<mio_uds::UnixStream>, + } +} + +impl UnixStream { + /// Connects to the socket named by `path`. + /// + /// This function will create a new Unix socket and connect to the path + /// specified, associating the returned stream with the default event loop's + /// handle. + pub async fn connect<P>(path: P) -> io::Result<UnixStream> + where + P: AsRef<Path>, + { + let stream = mio_uds::UnixStream::connect(path)?; + let stream = UnixStream::new(stream)?; + + poll_fn(|cx| stream.io.poll_write_ready(cx)).await?; + Ok(stream) + } + + /// Consumes a `UnixStream` in the standard library and returns a + /// nonblocking `UnixStream` from this crate. + /// + /// The returned stream will be associated with the given event loop + /// specified by `handle` and is ready to perform I/O. + /// + /// # Panics + /// + /// This function panics if thread-local runtime is not set. + /// + /// The runtime is usually set implicitly when this function is called + /// from a future driven by a tokio runtime, otherwise runtime can be set + /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function. + pub fn from_std(stream: net::UnixStream) -> io::Result<UnixStream> { + let stream = mio_uds::UnixStream::from_stream(stream)?; + let io = PollEvented::new(stream)?; + + Ok(UnixStream { io }) + } + + /// Creates an unnamed pair of connected sockets. + /// + /// This function will create a pair of interconnected Unix sockets for + /// communicating back and forth between one another. Each socket will + /// be associated with the default event loop's handle. + pub fn pair() -> io::Result<(UnixStream, UnixStream)> { + let (a, b) = mio_uds::UnixStream::pair()?; + let a = UnixStream::new(a)?; + let b = UnixStream::new(b)?; + + Ok((a, b)) + } + + pub(crate) fn new(stream: mio_uds::UnixStream) -> io::Result<UnixStream> { + let io = PollEvented::new(stream)?; + Ok(UnixStream { io }) + } + + /// Returns the socket address of the local half of this connection. + pub fn local_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().local_addr() + } + + /// Returns the socket address of the remote half of this connection. + pub fn peer_addr(&self) -> io::Result<SocketAddr> { + self.io.get_ref().peer_addr() + } + + /// Returns effective credentials of the process which called `connect` or `pair`. + pub fn peer_cred(&self) -> io::Result<UCred> { + ucred::get_peer_cred(self) + } + + /// Returns the value of the `SO_ERROR` option. + pub fn take_error(&self) -> io::Result<Option<io::Error>> { + self.io.get_ref().take_error() + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O calls on the + /// specified portions to immediately return with an appropriate value + /// (see the documentation of `Shutdown`). + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.io.get_ref().shutdown(how) + } + + /// Split a `UnixStream` into a read half and a write half, which can be used + /// to read and write the stream concurrently. + pub fn split(&mut self) -> (ReadHalf<'_>, WriteHalf<'_>) { + split(self) + } +} + +impl TryFrom<UnixStream> for mio_uds::UnixStream { + type Error = io::Error; + + /// Consumes value, returning the mio I/O object. + /// + /// See [`PollEvented::into_inner`] for more details about + /// resource deregistration that happens during the call. + /// + /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner + fn try_from(value: UnixStream) -> Result<Self, Self::Error> { + value.io.into_inner() + } +} + +impl TryFrom<net::UnixStream> for UnixStream { + type Error = io::Error; + + /// Consumes stream, returning the tokio I/O object. + /// + /// This is equivalent to + /// [`UnixStream::from_std(stream)`](UnixStream::from_std). + fn try_from(stream: net::UnixStream) -> io::Result<Self> { + Self::from_std(stream) + } +} + +impl AsyncRead for UnixStream { + unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool { + false + } + + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + self.poll_read_priv(cx, buf) + } +} + +impl AsyncWrite for UnixStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + self.poll_write_priv(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { + self.shutdown(std::net::Shutdown::Write)?; + Poll::Ready(Ok(())) + } +} + +impl UnixStream { + // == Poll IO functions that takes `&self` == + // + // They are not public because (taken from the doc of `PollEvented`): + // + // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the + // caller must ensure that there are at most two tasks that use a + // `PollEvented` instance concurrently. One for reading and one for writing. + // While violating this requirement is "safe" from a Rust memory model point + // of view, it will result in unexpected behavior in the form of lost + // notifications and tasks hanging. + + pub(crate) fn poll_read_priv( + &self, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + + match self.io.get_ref().read(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_read_ready(cx, mio::Ready::readable())?; + Poll::Pending + } + x => Poll::Ready(x), + } + } + + pub(crate) fn poll_write_priv( + &self, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + ready!(self.io.poll_write_ready(cx))?; + + match self.io.get_ref().write(buf) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_write_ready(cx)?; + Poll::Pending + } + x => Poll::Ready(x), + } + } +} + +impl fmt::Debug for UnixStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.io.get_ref().fmt(f) + } +} + +impl AsRawFd for UnixStream { + fn as_raw_fd(&self) -> RawFd { + self.io.get_ref().as_raw_fd() + } +} diff --git a/third_party/rust/tokio/src/net/unix/ucred.rs b/third_party/rust/tokio/src/net/unix/ucred.rs new file mode 100644 index 0000000000..cdd77ea414 --- /dev/null +++ b/third_party/rust/tokio/src/net/unix/ucred.rs @@ -0,0 +1,151 @@ +use libc::{gid_t, uid_t}; + +/// Credentials of a process +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub struct UCred { + /// UID (user ID) of the process + pub uid: uid_t, + /// GID (group ID) of the process + pub gid: gid_t, +} + +#[cfg(any(target_os = "linux", target_os = "android"))] +pub(crate) use self::impl_linux::get_peer_cred; + +#[cfg(any( + target_os = "dragonfly", + target_os = "macos", + target_os = "ios", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" +))] +pub(crate) use self::impl_macos::get_peer_cred; + +#[cfg(any(target_os = "solaris"))] +pub(crate) use self::impl_solaris::get_peer_cred; + +#[cfg(any(target_os = "linux", target_os = "android"))] +pub(crate) mod impl_linux { + use crate::net::unix::UnixStream; + + use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED}; + use std::{io, mem}; + + use libc::ucred; + + pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { + use std::os::unix::io::AsRawFd; + + unsafe { + let raw_fd = sock.as_raw_fd(); + + let mut ucred = ucred { + pid: 0, + uid: 0, + gid: 0, + }; + + let ucred_size = mem::size_of::<ucred>(); + + // These paranoid checks should be optimized-out + assert!(mem::size_of::<u32>() <= mem::size_of::<usize>()); + assert!(ucred_size <= u32::max_value() as usize); + + let mut ucred_size = ucred_size as socklen_t; + + let ret = getsockopt( + raw_fd, + SOL_SOCKET, + SO_PEERCRED, + &mut ucred as *mut ucred as *mut c_void, + &mut ucred_size, + ); + if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() { + Ok(super::UCred { + uid: ucred.uid, + gid: ucred.gid, + }) + } else { + Err(io::Error::last_os_error()) + } + } + } +} + +#[cfg(any( + target_os = "dragonfly", + target_os = "macos", + target_os = "ios", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd" +))] +pub(crate) mod impl_macos { + use crate::net::unix::UnixStream; + + use libc::getpeereid; + use std::io; + use std::mem::MaybeUninit; + use std::os::unix::io::AsRawFd; + + pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { + unsafe { + let raw_fd = sock.as_raw_fd(); + + let mut uid = MaybeUninit::uninit(); + let mut gid = MaybeUninit::uninit(); + + let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr()); + + if ret == 0 { + Ok(super::UCred { + uid: uid.assume_init(), + gid: gid.assume_init(), + }) + } else { + Err(io::Error::last_os_error()) + } + } + } +} + +#[cfg(any(target_os = "solaris"))] +pub(crate) mod impl_solaris { + use crate::net::unix::UnixStream; + use std::io; + use std::os::unix::io::AsRawFd; + use std::ptr; + + #[allow(non_camel_case_types)] + enum ucred_t {} + + extern "C" { + fn ucred_free(cred: *mut ucred_t); + fn ucred_geteuid(cred: *const ucred_t) -> super::uid_t; + fn ucred_getegid(cred: *const ucred_t) -> super::gid_t; + + fn getpeerucred(fd: std::os::raw::c_int, cred: *mut *mut ucred_t) -> std::os::raw::c_int; + } + + pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> { + unsafe { + let raw_fd = sock.as_raw_fd(); + + let mut cred = ptr::null_mut::<*mut ucred_t>() as *mut ucred_t; + + let ret = getpeerucred(raw_fd, &mut cred); + + if ret == 0 { + let uid = ucred_geteuid(cred); + let gid = ucred_getegid(cred); + + ucred_free(cred); + + Ok(super::UCred { uid, gid }) + } else { + Err(io::Error::last_os_error()) + } + } + } +} diff --git a/third_party/rust/tokio/src/park/either.rs b/third_party/rust/tokio/src/park/either.rs new file mode 100644 index 0000000000..67f1e17274 --- /dev/null +++ b/third_party/rust/tokio/src/park/either.rs @@ -0,0 +1,65 @@ +use crate::park::{Park, Unpark}; + +use std::fmt; +use std::time::Duration; + +pub(crate) enum Either<A, B> { + A(A), + B(B), +} + +impl<A, B> Park for Either<A, B> +where + A: Park, + B: Park, +{ + type Unpark = Either<A::Unpark, B::Unpark>; + type Error = Either<A::Error, B::Error>; + + fn unpark(&self) -> Self::Unpark { + match self { + Either::A(a) => Either::A(a.unpark()), + Either::B(b) => Either::B(b.unpark()), + } + } + + fn park(&mut self) -> Result<(), Self::Error> { + match self { + Either::A(a) => a.park().map_err(Either::A), + Either::B(b) => b.park().map_err(Either::B), + } + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + match self { + Either::A(a) => a.park_timeout(duration).map_err(Either::A), + Either::B(b) => b.park_timeout(duration).map_err(Either::B), + } + } +} + +impl<A, B> Unpark for Either<A, B> +where + A: Unpark, + B: Unpark, +{ + fn unpark(&self) { + match self { + Either::A(a) => a.unpark(), + Either::B(b) => b.unpark(), + } + } +} + +impl<A, B> fmt::Debug for Either<A, B> +where + A: fmt::Debug, + B: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Either::A(a) => a.fmt(fmt), + Either::B(b) => b.fmt(fmt), + } + } +} diff --git a/third_party/rust/tokio/src/park/mod.rs b/third_party/rust/tokio/src/park/mod.rs new file mode 100644 index 0000000000..a3e49bbede --- /dev/null +++ b/third_party/rust/tokio/src/park/mod.rs @@ -0,0 +1,118 @@ +//! Abstraction over blocking and unblocking the current thread. +//! +//! Provides an abstraction over blocking the current thread. This is similar to +//! the park / unpark constructs provided by `std` but made generic. This allows +//! embedding custom functionality to perform when the thread is blocked. +//! +//! A blocked `Park` instance is unblocked by calling `unpark` on its +//! `Unpark` handle. +//! +//! The `ParkThread` struct implements `Park` using `thread::park` to put the +//! thread to sleep. The Tokio reactor also implements park, but uses +//! `mio::Poll` to block the thread instead. +//! +//! The `Park` trait is composable. A timer implementation might decorate a +//! `Park` implementation by checking if any timeouts have elapsed after the +//! inner `Park` implementation unblocks. +//! +//! # Model +//! +//! Conceptually, each `Park` instance has an associated token, which is +//! initially not present: +//! +//! * The `park` method blocks the current thread unless or until the token is +//! available, at which point it atomically consumes the token. +//! * The `unpark` method atomically makes the token available if it wasn't +//! already. +//! +//! Some things to note: +//! +//! * If `unpark` is called before `park`, the next call to `park` will +//! **not** block the thread. +//! * **Spurious** wakeups are permitted, i.e., the `park` method may unblock +//! even if `unpark` was not called. +//! * `park_timeout` does the same as `park` but allows specifying a maximum +//! time to block the thread for. + +cfg_resource_drivers! { + mod either; + pub(crate) use self::either::Either; +} + +mod thread; +pub(crate) use self::thread::ParkThread; + +cfg_blocking_impl! { + pub(crate) use self::thread::{CachedParkThread, ParkError}; +} + +use std::sync::Arc; +use std::time::Duration; + +/// Block the current thread. +pub(crate) trait Park { + /// Unpark handle type for the `Park` implementation. + type Unpark: Unpark; + + /// Error returned by `park` + type Error; + + /// Gets a new `Unpark` handle associated with this `Park` instance. + fn unpark(&self) -> Self::Unpark; + + /// Blocks the current thread unless or until the token is available. + /// + /// A call to `park` does not guarantee that the thread will remain blocked + /// forever, and callers should be prepared for this possibility. This + /// function may wakeup spuriously for any reason. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Park` implementation + fn park(&mut self) -> Result<(), Self::Error>; + + /// Parks the current thread for at most `duration`. + /// + /// This function is the same as `park` but allows specifying a maximum time + /// to block the thread for. + /// + /// Same as `park`, there is no guarantee that the thread will remain + /// blocked for any amount of time. Spurious wakeups are permitted for any + /// reason. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Park` implementation + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error>; +} + +/// Unblock a thread blocked by the associated `Park` instance. +pub(crate) trait Unpark: Sync + Send + 'static { + /// Unblocks a thread that is blocked by the associated `Park` handle. + /// + /// Calling `unpark` atomically makes available the unpark token, if it is + /// not already available. + /// + /// # Panics + /// + /// This function **should** not panic, but ultimately, panics are left as + /// an implementation detail. Refer to the documentation for the specific + /// `Unpark` implementation + fn unpark(&self); +} + +impl Unpark for Box<dyn Unpark> { + fn unpark(&self) { + (**self).unpark() + } +} + +impl Unpark for Arc<dyn Unpark> { + fn unpark(&self) { + (**self).unpark() + } +} diff --git a/third_party/rust/tokio/src/park/thread.rs b/third_party/rust/tokio/src/park/thread.rs new file mode 100644 index 0000000000..a8cdf1432b --- /dev/null +++ b/third_party/rust/tokio/src/park/thread.rs @@ -0,0 +1,317 @@ +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::{Arc, Condvar, Mutex}; +use crate::park::{Park, Unpark}; + +use std::sync::atomic::Ordering::SeqCst; +use std::time::Duration; + +#[derive(Debug)] +pub(crate) struct ParkThread { + inner: Arc<Inner>, +} + +pub(crate) type ParkError = (); + +/// Unblocks a thread that was blocked by `ParkThread`. +#[derive(Clone, Debug)] +pub(crate) struct UnparkThread { + inner: Arc<Inner>, +} + +#[derive(Debug)] +struct Inner { + state: AtomicUsize, + mutex: Mutex<()>, + condvar: Condvar, +} + +const EMPTY: usize = 0; +const PARKED: usize = 1; +const NOTIFIED: usize = 2; + +thread_local! { + static CURRENT_PARKER: ParkThread = ParkThread::new(); +} + +// ==== impl ParkThread ==== + +impl ParkThread { + pub(crate) fn new() -> Self { + Self { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + mutex: Mutex::new(()), + condvar: Condvar::new(), + }), + } + } +} + +impl Park for ParkThread { + type Unpark = UnparkThread; + type Error = ParkError; + + fn unpark(&self) -> Self::Unpark { + let inner = self.inner.clone(); + UnparkThread { inner } + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.inner.park(); + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.inner.park_timeout(duration); + Ok(()) + } +} + +// ==== impl Inner ==== + +impl Inner { + /// Park the current thread for at most `dur`. + fn park(&self) { + // If we were previously notified then we consume this notification and + // return quickly. + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + return; + } + + // Otherwise we need to coordinate going to sleep + let mut m = self.mutex.lock().unwrap(); + + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + + return; + } + Err(actual) => panic!("inconsistent park state; actual = {}", actual), + } + + loop { + m = self.condvar.wait(m).unwrap(); + + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + // got a notification + return; + } + + // spurious wakeup, go back to sleep + } + } + + fn park_timeout(&self, dur: Duration) { + // Like `park` above we have a fast path for an already-notified thread, + // and afterwards we start coordinating for a sleep. Return quickly. + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + return; + } + + if dur == Duration::from_millis(0) { + return; + } + + let m = self.mutex.lock().unwrap(); + + match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read again here, see `park`. + let old = self.state.swap(EMPTY, SeqCst); + debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + + return; + } + Err(actual) => panic!("inconsistent park_timeout state; actual = {}", actual), + } + + // Wait with a timeout, and if we spuriously wake up or otherwise wake up + // from a notification, we just want to unconditionally set the state back to + // empty, either consuming a notification or un-flagging ourselves as + // parked. + let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap(); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => {} // got a notification, hurray! + PARKED => {} // no notification, alas + n => panic!("inconsistent park_timeout state: {}", n), + } + } + + fn unpark(&self) { + // To ensure the unparked thread will observe any writes we made before + // this call, we must perform a release operation that `park` can + // synchronize with. To do that we must write `NOTIFIED` even if `state` + // is already `NOTIFIED`. That is why this must be a swap rather than a + // compare-and-swap that returns if it reads `NOTIFIED` on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => return, // no one was waiting + NOTIFIED => return, // already unparked + PARKED => {} // gotta go wake someone up + _ => panic!("inconsistent state in unpark"), + } + + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + drop(self.mutex.lock().unwrap()); + + self.condvar.notify_one() + } +} + +impl Default for ParkThread { + fn default() -> Self { + Self::new() + } +} + +// ===== impl UnparkThread ===== + +impl Unpark for UnparkThread { + fn unpark(&self) { + self.inner.unpark(); + } +} + +cfg_blocking_impl! { + use std::marker::PhantomData; + use std::rc::Rc; + + use std::mem; + use std::task::{RawWaker, RawWakerVTable, Waker}; + + /// Blocks the current thread using a condition variable. + #[derive(Debug)] + pub(crate) struct CachedParkThread { + _anchor: PhantomData<Rc<()>>, + } + + impl CachedParkThread { + /// Create a new `ParkThread` handle for the current thread. + /// + /// This type cannot be moved to other threads, so it should be created on + /// the thread that the caller intends to park. + pub(crate) fn new() -> CachedParkThread { + CachedParkThread { + _anchor: PhantomData, + } + } + + pub(crate) fn get_unpark(&self) -> Result<UnparkThread, ParkError> { + self.with_current(|park_thread| park_thread.unpark()) + } + + /// Get a reference to the `ParkThread` handle for this thread. + fn with_current<F, R>(&self, f: F) -> Result<R, ParkError> + where + F: FnOnce(&ParkThread) -> R, + { + CURRENT_PARKER.try_with(|inner| f(inner)) + .map_err(|_| ()) + } + } + + impl Park for CachedParkThread { + type Unpark = UnparkThread; + type Error = ParkError; + + fn unpark(&self) -> Self::Unpark { + self.get_unpark().unwrap() + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.with_current(|park_thread| park_thread.inner.park())?; + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.with_current(|park_thread| park_thread.inner.park_timeout(duration))?; + Ok(()) + } + } + + + impl UnparkThread { + pub(crate) fn into_waker(self) -> Waker { + unsafe { + let raw = unparker_to_raw_waker(self.inner); + Waker::from_raw(raw) + } + } + } + + impl Inner { + #[allow(clippy::wrong_self_convention)] + fn into_raw(this: Arc<Inner>) -> *const () { + Arc::into_raw(this) as *const () + } + + unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> { + Arc::from_raw(ptr as *const Inner) + } + } + + unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker { + RawWaker::new( + Inner::into_raw(unparker), + &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker), + ) + } + + unsafe fn clone(raw: *const ()) -> RawWaker { + let unparker = Inner::from_raw(raw); + + // Increment the ref count + mem::forget(unparker.clone()); + + unparker_to_raw_waker(unparker) + } + + unsafe fn drop_waker(raw: *const ()) { + let _ = Inner::from_raw(raw); + } + + unsafe fn wake(raw: *const ()) { + let unparker = Inner::from_raw(raw); + unparker.unpark(); + } + + unsafe fn wake_by_ref(raw: *const ()) { + let unparker = Inner::from_raw(raw); + unparker.unpark(); + + // We don't actually own a reference to the unparker + mem::forget(unparker); + } +} diff --git a/third_party/rust/tokio/src/prelude.rs b/third_party/rust/tokio/src/prelude.rs new file mode 100644 index 0000000000..1909f9da6a --- /dev/null +++ b/third_party/rust/tokio/src/prelude.rs @@ -0,0 +1,21 @@ +#![cfg(not(loom))] + +//! A "prelude" for users of the `tokio` crate. +//! +//! This prelude is similar to the standard library's prelude in that you'll +//! almost always want to import its entire contents, but unlike the standard +//! library's prelude you'll have to do so manually: +//! +//! ``` +//! # #![allow(warnings)] +//! use tokio::prelude::*; +//! ``` +//! +//! The prelude may grow over time as additional items see ubiquitous use. + +pub use crate::io::{self, AsyncBufRead, AsyncRead, AsyncWrite}; + +cfg_io_util! { + #[doc(no_inline)] + pub use crate::io::{AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _}; +} diff --git a/third_party/rust/tokio/src/process/kill.rs b/third_party/rust/tokio/src/process/kill.rs new file mode 100644 index 0000000000..a1f1652281 --- /dev/null +++ b/third_party/rust/tokio/src/process/kill.rs @@ -0,0 +1,13 @@ +use std::io; + +/// An interface for killing a running process. +pub(crate) trait Kill { + /// Forcefully kills the process. + fn kill(&mut self) -> io::Result<()>; +} + +impl<T: Kill> Kill for &mut T { + fn kill(&mut self) -> io::Result<()> { + (**self).kill() + } +} diff --git a/third_party/rust/tokio/src/process/mod.rs b/third_party/rust/tokio/src/process/mod.rs new file mode 100644 index 0000000000..7231511235 --- /dev/null +++ b/third_party/rust/tokio/src/process/mod.rs @@ -0,0 +1,1078 @@ +//! An implementation of asynchronous process management for Tokio. +//! +//! This module provides a [`Command`] struct that imitates the interface of the +//! [`std::process::Command`] type in the standard library, but provides asynchronous versions of +//! functions that create processes. These functions (`spawn`, `status`, `output` and their +//! variants) return "future aware" types that interoperate with Tokio. The asynchronous process +//! support is provided through signal handling on Unix and system APIs on Windows. +//! +//! # Examples +//! +//! Here's an example program which will spawn `echo hello world` and then wait +//! for it complete. +//! +//! ```no_run +//! use tokio::process::Command; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! // The usage is the same as with the standard library's `Command` type, however the value +//! // returned from `spawn` is a `Result` containing a `Future`. +//! let child = Command::new("echo").arg("hello").arg("world") +//! .spawn(); +//! +//! // Make sure our child succeeded in spawning and process the result +//! let future = child.expect("failed to spawn"); +//! +//! // Await until the future (and the command) completes +//! let status = future.await?; +//! println!("the command exited with: {}", status); +//! Ok(()) +//! } +//! ``` +//! +//! Next, let's take a look at an example where we not only spawn `echo hello +//! world` but we also capture its output. +//! +//! ```no_run +//! use tokio::process::Command; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! // Like above, but use `output` which returns a future instead of +//! // immediately returning the `Child`. +//! let output = Command::new("echo").arg("hello").arg("world") +//! .output(); +//! +//! let output = output.await?; +//! +//! assert!(output.status.success()); +//! assert_eq!(output.stdout, b"hello world\n"); +//! Ok(()) +//! } +//! ``` +//! +//! We can also read input line by line. +//! +//! ```no_run +//! use tokio::io::{BufReader, AsyncBufReadExt}; +//! use tokio::process::Command; +//! +//! use std::process::Stdio; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let mut cmd = Command::new("cat"); +//! +//! // Specify that we want the command's standard output piped back to us. +//! // By default, standard input/output/error will be inherited from the +//! // current process (for example, this means that standard input will +//! // come from the keyboard and standard output/error will go directly to +//! // the terminal if this process is invoked from the command line). +//! cmd.stdout(Stdio::piped()); +//! +//! let mut child = cmd.spawn() +//! .expect("failed to spawn command"); +//! +//! let stdout = child.stdout.take() +//! .expect("child did not have a handle to stdout"); +//! +//! let mut reader = BufReader::new(stdout).lines(); +//! +//! // Ensure the child process is spawned in the runtime so it can +//! // make progress on its own while we await for any output. +//! tokio::spawn(async { +//! let status = child.await +//! .expect("child process encountered an error"); +//! +//! println!("child status was: {}", status); +//! }); +//! +//! while let Some(line) = reader.next_line().await? { +//! println!("Line: {}", line); +//! } +//! +//! Ok(()) +//! } +//! ``` +//! +//! # Caveats +//! +//! Similar to the behavior to the standard library, and unlike the futures +//! paradigm of dropping-implies-cancellation, a spawned process will, by +//! default, continue to execute even after the `Child` handle has been dropped. +//! +//! The `Command::kill_on_drop` method can be used to modify this behavior +//! and kill the child process if the `Child` wrapper is dropped before it +//! has exited. +//! +//! [`Command`]: crate::process::Command + +#[path = "unix/mod.rs"] +#[cfg(unix)] +mod imp; + +#[path = "windows.rs"] +#[cfg(windows)] +mod imp; + +mod kill; + +use crate::io::{AsyncRead, AsyncWrite}; +use crate::process::kill::Kill; + +use std::ffi::OsStr; +use std::future::Future; +use std::io; +#[cfg(unix)] +use std::os::unix::process::CommandExt; +#[cfg(windows)] +use std::os::windows::process::CommandExt; +use std::path::Path; +use std::pin::Pin; +use std::process::{Command as StdCommand, ExitStatus, Output, Stdio}; +use std::task::Context; +use std::task::Poll; + +/// This structure mimics the API of [`std::process::Command`] found in the standard library, but +/// replaces functions that create a process with an asynchronous variant. The main provided +/// asynchronous functions are [spawn](Command::spawn), [status](Command::status), and +/// [output](Command::output). +/// +/// `Command` uses asynchronous versions of some `std` types (for example [`Child`]). +#[derive(Debug)] +pub struct Command { + std: StdCommand, + kill_on_drop: bool, +} + +pub(crate) struct SpawnedChild { + child: imp::Child, + stdin: Option<imp::ChildStdin>, + stdout: Option<imp::ChildStdout>, + stderr: Option<imp::ChildStderr>, +} + +impl Command { + /// Constructs a new `Command` for launching the program at + /// path `program`, with the following default configuration: + /// + /// * No arguments to the program + /// * Inherit the current process's environment + /// * Inherit the current process's working directory + /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output` + /// + /// Builder methods are provided to change these defaults and + /// otherwise configure the process. + /// + /// If `program` is not an absolute path, the `PATH` will be searched in + /// an OS-defined way. + /// + /// The search path to be used may be controlled by setting the + /// `PATH` environment variable on the Command, + /// but this has some implementation limitations on Windows + /// (see issue rust-lang/rust#37519). + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// let command = Command::new("sh"); + /// ``` + pub fn new<S: AsRef<OsStr>>(program: S) -> Command { + Self::from(StdCommand::new(program)) + } + + /// Adds an argument to pass to the program. + /// + /// Only one argument can be passed per use. So instead of: + /// + /// ```no_run + /// tokio::process::Command::new("sh") + /// .arg("-C /path/to/repo"); + /// ``` + /// + /// usage would be: + /// + /// ```no_run + /// tokio::process::Command::new("sh") + /// .arg("-C") + /// .arg("/path/to/repo"); + /// ``` + /// + /// To pass multiple arguments see [`args`]. + /// + /// [`args`]: #method.args + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .arg("-l") + /// .arg("-a"); + /// ``` + pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command { + self.std.arg(arg); + self + } + + /// Adds multiple arguments to pass to the program. + /// + /// To pass a single argument see [`arg`]. + /// + /// [`arg`]: #method.arg + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .args(&["-l", "-a"]); + /// ``` + pub fn args<I, S>(&mut self, args: I) -> &mut Command + where + I: IntoIterator<Item = S>, + S: AsRef<OsStr>, + { + self.std.args(args); + self + } + + /// Inserts or updates an environment variable mapping. + /// + /// Note that environment variable names are case-insensitive (but case-preserving) on Windows, + /// and case-sensitive on all other platforms. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .env("PATH", "/bin"); + /// ``` + pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command + where + K: AsRef<OsStr>, + V: AsRef<OsStr>, + { + self.std.env(key, val); + self + } + + /// Adds or updates multiple environment variable mappings. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// use std::process::{Stdio}; + /// use std::env; + /// use std::collections::HashMap; + /// + /// let filtered_env : HashMap<String, String> = + /// env::vars().filter(|&(ref k, _)| + /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" + /// ).collect(); + /// + /// let command = Command::new("printenv") + /// .stdin(Stdio::null()) + /// .stdout(Stdio::inherit()) + /// .env_clear() + /// .envs(&filtered_env); + /// ``` + pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command + where + I: IntoIterator<Item = (K, V)>, + K: AsRef<OsStr>, + V: AsRef<OsStr>, + { + self.std.envs(vars); + self + } + + /// Removes an environment variable mapping. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .env_remove("PATH"); + /// ``` + pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command { + self.std.env_remove(key); + self + } + + /// Clears the entire environment map for the child process. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .env_clear(); + /// ``` + pub fn env_clear(&mut self) -> &mut Command { + self.std.env_clear(); + self + } + + /// Sets the working directory for the child process. + /// + /// # Platform-specific behavior + /// + /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous + /// whether it should be interpreted relative to the parent's working + /// directory or relative to `current_dir`. The behavior in this case is + /// platform specific and unstable, and it's recommended to use + /// [`canonicalize`] to get an absolute program path instead. + /// + /// [`canonicalize`]: crate::fs::canonicalize() + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .current_dir("/bin"); + /// ``` + pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command { + self.std.current_dir(dir); + self + } + + /// Sets configuration for the child process's standard input (stdin) handle. + /// + /// Defaults to [`inherit`] when used with `spawn` or `status`, and + /// defaults to [`piped`] when used with `output`. + /// + /// [`inherit`]: std::process::Stdio::inherit + /// [`piped`]: std::process::Stdio::piped + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use std::process::{Stdio}; + /// use tokio::process::Command; + /// + /// let command = Command::new("ls") + /// .stdin(Stdio::null()); + /// ``` + pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { + self.std.stdin(cfg); + self + } + + /// Sets configuration for the child process's standard output (stdout) handle. + /// + /// Defaults to [`inherit`] when used with `spawn` or `status`, and + /// defaults to [`piped`] when used with `output`. + /// + /// [`inherit`]: std::process::Stdio::inherit + /// [`piped`]: std::process::Stdio::piped + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command;; + /// use std::process::Stdio; + /// + /// let command = Command::new("ls") + /// .stdout(Stdio::null()); + /// ``` + pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { + self.std.stdout(cfg); + self + } + + /// Sets configuration for the child process's standard error (stderr) handle. + /// + /// Defaults to [`inherit`] when used with `spawn` or `status`, and + /// defaults to [`piped`] when used with `output`. + /// + /// [`inherit`]: std::process::Stdio::inherit + /// [`piped`]: std::process::Stdio::piped + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command;; + /// use std::process::{Stdio}; + /// + /// let command = Command::new("ls") + /// .stderr(Stdio::null()); + /// ``` + pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { + self.std.stderr(cfg); + self + } + + /// Controls whether a `kill` operation should be invoked on a spawned child + /// process when its corresponding `Child` handle is dropped. + /// + /// By default, this value is assumed to be `false`, meaning the next spawned + /// process will not be killed on drop, similar to the behavior of the standard + /// library. + pub fn kill_on_drop(&mut self, kill_on_drop: bool) -> &mut Command { + self.kill_on_drop = kill_on_drop; + self + } + + /// Sets the [process creation flags][1] to be passed to `CreateProcess`. + /// + /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`. + /// + /// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx + #[cfg(windows)] + pub fn creation_flags(&mut self, flags: u32) -> &mut Command { + self.std.creation_flags(flags); + self + } + + /// Sets the child process's user ID. This translates to a + /// `setuid` call in the child process. Failure in the `setuid` + /// call will cause the spawn to fail. + #[cfg(unix)] + pub fn uid(&mut self, id: u32) -> &mut Command { + self.std.uid(id); + self + } + + /// Similar to `uid` but sets the group ID of the child process. This has + /// the same semantics as the `uid` field. + #[cfg(unix)] + pub fn gid(&mut self, id: u32) -> &mut Command { + self.std.gid(id); + self + } + + /// Schedules a closure to be run just before the `exec` function is + /// invoked. + /// + /// The closure is allowed to return an I/O error whose OS error code will + /// be communicated back to the parent and returned as an error from when + /// the spawn was requested. + /// + /// Multiple closures can be registered and they will be called in order of + /// their registration. If a closure returns `Err` then no further closures + /// will be called and the spawn operation will immediately return with a + /// failure. + /// + /// # Safety + /// + /// This closure will be run in the context of the child process after a + /// `fork`. This primarily means that any modifications made to memory on + /// behalf of this closure will **not** be visible to the parent process. + /// This is often a very constrained environment where normal operations + /// like `malloc` or acquiring a mutex are not guaranteed to work (due to + /// other threads perhaps still running when the `fork` was run). + /// + /// This also means that all resources such as file descriptors and + /// memory-mapped regions got duplicated. It is your responsibility to make + /// sure that the closure does not violate library invariants by making + /// invalid use of these duplicates. + /// + /// When this closure is run, aspects such as the stdio file descriptors and + /// working directory have successfully been changed, so output to these + /// locations may not appear where intended. + #[cfg(unix)] + pub unsafe fn pre_exec<F>(&mut self, f: F) -> &mut Command + where + F: FnMut() -> io::Result<()> + Send + Sync + 'static, + { + self.std.pre_exec(f); + self + } + + /// Executes the command as a child process, returning a handle to it. + /// + /// By default, stdin, stdout and stderr are inherited from the parent. + /// + /// This method will spawn the child process synchronously and return a + /// handle to a future-aware child process. The `Child` returned implements + /// `Future` itself to acquire the `ExitStatus` of the child, and otherwise + /// the `Child` has methods to acquire handles to the stdin, stdout, and + /// stderr streams. + /// + /// All I/O this child does will be associated with the current default + /// event loop. + /// + /// # Caveats + /// + /// Similar to the behavior to the standard library, and unlike the futures + /// paradigm of dropping-implies-cancellation, the spawned process will, by + /// default, continue to execute even after the `Child` handle has been dropped. + /// + /// The `Command::kill_on_drop` method can be used to modify this behavior + /// and kill the child process if the `Child` wrapper is dropped before it + /// has exited. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// async fn run_ls() -> std::process::ExitStatus { + /// Command::new("ls") + /// .spawn() + /// .expect("ls command failed to start") + /// .await + /// .expect("ls command failed to run") + /// } + /// ``` + pub fn spawn(&mut self) -> io::Result<Child> { + imp::spawn_child(&mut self.std).map(|spawned_child| Child { + child: ChildDropGuard { + inner: spawned_child.child, + kill_on_drop: self.kill_on_drop, + }, + stdin: spawned_child.stdin.map(|inner| ChildStdin { inner }), + stdout: spawned_child.stdout.map(|inner| ChildStdout { inner }), + stderr: spawned_child.stderr.map(|inner| ChildStderr { inner }), + }) + } + + /// Executes the command as a child process, waiting for it to finish and + /// collecting its exit status. + /// + /// By default, stdin, stdout and stderr are inherited from the parent. + /// If any input/output handles are set to a pipe then they will be immediately + /// closed after the child is spawned. + /// + /// All I/O this child does will be associated with the current default + /// event loop. + /// + /// If this future is dropped before the future resolves, then + /// the child will be killed, if it was spawned. + /// + /// # Errors + /// + /// This future will return an error if the child process cannot be spawned + /// or if there is an error while awaiting its status. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// async fn run_ls() -> std::process::ExitStatus { + /// Command::new("ls") + /// .status() + /// .await + /// .expect("ls command failed to run") + /// } + pub fn status(&mut self) -> impl Future<Output = io::Result<ExitStatus>> { + let child = self.spawn(); + + async { + let mut child = child?; + + // Ensure we close any stdio handles so we can't deadlock + // waiting on the child which may be waiting to read/write + // to a pipe we're holding. + child.stdin.take(); + child.stdout.take(); + child.stderr.take(); + + child.await + } + } + + /// Executes the command as a child process, waiting for it to finish and + /// collecting all of its output. + /// + /// > **Note**: this method, unlike the standard library, will + /// > unconditionally configure the stdout/stderr handles to be pipes, even + /// > if they have been previously configured. If this is not desired then + /// > the `spawn` method should be used in combination with the + /// > `wait_with_output` method on child. + /// + /// This method will return a future representing the collection of the + /// child process's stdout/stderr. It will resolve to + /// the `Output` type in the standard library, containing `stdout` and + /// `stderr` as `Vec<u8>` along with an `ExitStatus` representing how the + /// process exited. + /// + /// All I/O this child does will be associated with the current default + /// event loop. + /// + /// If this future is dropped before the future resolves, then + /// the child will be killed, if it was spawned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```no_run + /// use tokio::process::Command; + /// + /// async fn run_ls() { + /// let output: std::process::Output = Command::new("ls") + /// .output() + /// .await + /// .expect("ls command failed to run"); + /// println!("stderr of ls: {:?}", output.stderr); + /// } + pub fn output(&mut self) -> impl Future<Output = io::Result<Output>> { + self.std.stdout(Stdio::piped()); + self.std.stderr(Stdio::piped()); + + let child = self.spawn(); + + async { child?.wait_with_output().await } + } +} + +impl From<StdCommand> for Command { + fn from(std: StdCommand) -> Command { + Command { + std, + kill_on_drop: false, + } + } +} + +/// A drop guard which can ensure the child process is killed on drop if specified. +#[derive(Debug)] +struct ChildDropGuard<T: Kill> { + inner: T, + kill_on_drop: bool, +} + +impl<T: Kill> Kill for ChildDropGuard<T> { + fn kill(&mut self) -> io::Result<()> { + let ret = self.inner.kill(); + + if ret.is_ok() { + self.kill_on_drop = false; + } + + ret + } +} + +impl<T: Kill> Drop for ChildDropGuard<T> { + fn drop(&mut self) { + if self.kill_on_drop { + drop(self.kill()); + } + } +} + +impl<T, E, F> Future for ChildDropGuard<F> +where + F: Future<Output = Result<T, E>> + Kill + Unpin, +{ + type Output = Result<T, E>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + let ret = Pin::new(&mut self.inner).poll(cx); + + if let Poll::Ready(Ok(_)) = ret { + // Avoid the overhead of trying to kill a reaped process + self.kill_on_drop = false; + } + + ret + } +} + +/// Representation of a child process spawned onto an event loop. +/// +/// This type is also a future which will yield the `ExitStatus` of the +/// underlying child process. A `Child` here also provides access to information +/// like the OS-assigned identifier and the stdio streams. +/// +/// # Caveats +/// Similar to the behavior to the standard library, and unlike the futures +/// paradigm of dropping-implies-cancellation, a spawned process will, by +/// default, continue to execute even after the `Child` handle has been dropped. +/// +/// The `Command::kill_on_drop` method can be used to modify this behavior +/// and kill the child process if the `Child` wrapper is dropped before it +/// has exited. +#[must_use = "futures do nothing unless polled"] +#[derive(Debug)] +pub struct Child { + child: ChildDropGuard<imp::Child>, + + /// The handle for writing to the child's standard input (stdin), if it has + /// been captured. + pub stdin: Option<ChildStdin>, + + /// The handle for reading from the child's standard output (stdout), if it + /// has been captured. + pub stdout: Option<ChildStdout>, + + /// The handle for reading from the child's standard error (stderr), if it + /// has been captured. + pub stderr: Option<ChildStderr>, +} + +impl Child { + /// Returns the OS-assigned process identifier associated with this child. + pub fn id(&self) -> u32 { + self.child.inner.id() + } + + /// Forces the child to exit. + /// + /// This is equivalent to sending a SIGKILL on unix platforms. + pub fn kill(&mut self) -> io::Result<()> { + self.child.kill() + } + + #[doc(hidden)] + #[deprecated(note = "please use `child.stdin` instead")] + pub fn stdin(&mut self) -> &mut Option<ChildStdin> { + &mut self.stdin + } + + #[doc(hidden)] + #[deprecated(note = "please use `child.stdout` instead")] + pub fn stdout(&mut self) -> &mut Option<ChildStdout> { + &mut self.stdout + } + + #[doc(hidden)] + #[deprecated(note = "please use `child.stderr` instead")] + pub fn stderr(&mut self) -> &mut Option<ChildStderr> { + &mut self.stderr + } + + /// Returns a future that will resolve to an `Output`, containing the exit + /// status, stdout, and stderr of the child process. + /// + /// The returned future will simultaneously waits for the child to exit and + /// collect all remaining output on the stdout/stderr handles, returning an + /// `Output` instance. + /// + /// The stdin handle to the child process, if any, will be closed before + /// waiting. This helps avoid deadlock: it ensures that the child does not + /// block waiting for input from the parent, while the parent waits for the + /// child to exit. + /// + /// By default, stdin, stdout and stderr are inherited from the parent. In + /// order to capture the output into this `Output` it is necessary to create + /// new pipes between parent and child. Use `stdout(Stdio::piped())` or + /// `stderr(Stdio::piped())`, respectively, when creating a `Command`. + pub async fn wait_with_output(mut self) -> io::Result<Output> { + use crate::future::try_join3; + + async fn read_to_end<A: AsyncRead + Unpin>(io: Option<A>) -> io::Result<Vec<u8>> { + let mut vec = Vec::new(); + if let Some(mut io) = io { + crate::io::util::read_to_end(&mut io, &mut vec).await?; + } + Ok(vec) + } + + drop(self.stdin.take()); + let stdout_fut = read_to_end(self.stdout.take()); + let stderr_fut = read_to_end(self.stderr.take()); + + let (status, stdout, stderr) = try_join3(self, stdout_fut, stderr_fut).await?; + + Ok(Output { + status, + stdout, + stderr, + }) + } +} + +impl Future for Child { + type Output = io::Result<ExitStatus>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + Pin::new(&mut self.child).poll(cx) + } +} + +/// The standard input stream for spawned children. +/// +/// This type implements the `AsyncWrite` trait to pass data to the stdin handle of +/// handle of a child process asynchronously. +#[derive(Debug)] +pub struct ChildStdin { + inner: imp::ChildStdin, +} + +/// The standard output stream for spawned children. +/// +/// This type implements the `AsyncRead` trait to read data from the stdout +/// handle of a child process asynchronously. +#[derive(Debug)] +pub struct ChildStdout { + inner: imp::ChildStdout, +} + +/// The standard error stream for spawned children. +/// +/// This type implements the `AsyncRead` trait to read data from the stderr +/// handle of a child process asynchronously. +#[derive(Debug)] +pub struct ChildStderr { + inner: imp::ChildStderr, +} + +impl AsyncWrite for ChildStdin { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } +} + +impl AsyncRead for ChildStdout { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl AsyncRead for ChildStderr { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll<io::Result<usize>> { + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +#[cfg(unix)] +mod sys { + use std::os::unix::io::{AsRawFd, RawFd}; + + use super::{ChildStderr, ChildStdin, ChildStdout}; + + impl AsRawFd for ChildStdin { + fn as_raw_fd(&self) -> RawFd { + self.inner.get_ref().as_raw_fd() + } + } + + impl AsRawFd for ChildStdout { + fn as_raw_fd(&self) -> RawFd { + self.inner.get_ref().as_raw_fd() + } + } + + impl AsRawFd for ChildStderr { + fn as_raw_fd(&self) -> RawFd { + self.inner.get_ref().as_raw_fd() + } + } +} + +#[cfg(windows)] +mod sys { + use std::os::windows::io::{AsRawHandle, RawHandle}; + + use super::{ChildStderr, ChildStdin, ChildStdout}; + + impl AsRawHandle for ChildStdin { + fn as_raw_handle(&self) -> RawHandle { + self.inner.get_ref().as_raw_handle() + } + } + + impl AsRawHandle for ChildStdout { + fn as_raw_handle(&self) -> RawHandle { + self.inner.get_ref().as_raw_handle() + } + } + + impl AsRawHandle for ChildStderr { + fn as_raw_handle(&self) -> RawHandle { + self.inner.get_ref().as_raw_handle() + } + } +} + +#[cfg(all(test, not(loom)))] +mod test { + use super::kill::Kill; + use super::ChildDropGuard; + + use futures::future::FutureExt; + use std::future::Future; + use std::io; + use std::pin::Pin; + use std::task::{Context, Poll}; + + struct Mock { + num_kills: usize, + num_polls: usize, + poll_result: Poll<Result<(), ()>>, + } + + impl Mock { + fn new() -> Self { + Self::with_result(Poll::Pending) + } + + fn with_result(result: Poll<Result<(), ()>>) -> Self { + Self { + num_kills: 0, + num_polls: 0, + poll_result: result, + } + } + } + + impl Kill for Mock { + fn kill(&mut self) -> io::Result<()> { + self.num_kills += 1; + Ok(()) + } + } + + impl Future for Mock { + type Output = Result<(), ()>; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> { + let inner = Pin::get_mut(self); + inner.num_polls += 1; + inner.poll_result + } + } + + #[test] + fn kills_on_drop_if_specified() { + let mut mock = Mock::new(); + + { + let guard = ChildDropGuard { + inner: &mut mock, + kill_on_drop: true, + }; + drop(guard); + } + + assert_eq!(1, mock.num_kills); + assert_eq!(0, mock.num_polls); + } + + #[test] + fn no_kill_on_drop_by_default() { + let mut mock = Mock::new(); + + { + let guard = ChildDropGuard { + inner: &mut mock, + kill_on_drop: false, + }; + drop(guard); + } + + assert_eq!(0, mock.num_kills); + assert_eq!(0, mock.num_polls); + } + + #[test] + fn no_kill_if_already_killed() { + let mut mock = Mock::new(); + + { + let mut guard = ChildDropGuard { + inner: &mut mock, + kill_on_drop: true, + }; + let _ = guard.kill(); + drop(guard); + } + + assert_eq!(1, mock.num_kills); + assert_eq!(0, mock.num_polls); + } + + #[test] + fn no_kill_if_reaped() { + let mut mock_pending = Mock::with_result(Poll::Pending); + let mut mock_reaped = Mock::with_result(Poll::Ready(Ok(()))); + let mut mock_err = Mock::with_result(Poll::Ready(Err(()))); + + let waker = futures::task::noop_waker(); + let mut context = Context::from_waker(&waker); + { + let mut guard = ChildDropGuard { + inner: &mut mock_pending, + kill_on_drop: true, + }; + let _ = guard.poll_unpin(&mut context); + + let mut guard = ChildDropGuard { + inner: &mut mock_reaped, + kill_on_drop: true, + }; + let _ = guard.poll_unpin(&mut context); + + let mut guard = ChildDropGuard { + inner: &mut mock_err, + kill_on_drop: true, + }; + let _ = guard.poll_unpin(&mut context); + } + + assert_eq!(1, mock_pending.num_kills); + assert_eq!(1, mock_pending.num_polls); + + assert_eq!(0, mock_reaped.num_kills); + assert_eq!(1, mock_reaped.num_polls); + + assert_eq!(1, mock_err.num_kills); + assert_eq!(1, mock_err.num_polls); + } +} diff --git a/third_party/rust/tokio/src/process/unix/mod.rs b/third_party/rust/tokio/src/process/unix/mod.rs new file mode 100644 index 0000000000..c25d98974a --- /dev/null +++ b/third_party/rust/tokio/src/process/unix/mod.rs @@ -0,0 +1,227 @@ +//! Unix handling of child processes +//! +//! Right now the only "fancy" thing about this is how we implement the +//! `Future` implementation on `Child` to get the exit status. Unix offers +//! no way to register a child with epoll, and the only real way to get a +//! notification when a process exits is the SIGCHLD signal. +//! +//! Signal handling in general is *super* hairy and complicated, and it's even +//! more complicated here with the fact that signals are coalesced, so we may +//! not get a SIGCHLD-per-child. +//! +//! Our best approximation here is to check *all spawned processes* for all +//! SIGCHLD signals received. To do that we create a `Signal`, implemented in +//! the `tokio-net` crate, which is a stream over signals being received. +//! +//! Later when we poll the process's exit status we simply check to see if a +//! SIGCHLD has happened since we last checked, and while that returns "yes" we +//! keep trying. +//! +//! Note that this means that this isn't really scalable, but then again +//! processes in general aren't scalable (e.g. millions) so it shouldn't be that +//! bad in theory... + +mod orphan; +use orphan::{OrphanQueue, OrphanQueueImpl, Wait}; + +mod reap; +use reap::Reaper; + +use crate::io::PollEvented; +use crate::process::kill::Kill; +use crate::process::SpawnedChild; +use crate::signal::unix::{signal, Signal, SignalKind}; + +use mio::event::Evented; +use mio::unix::{EventedFd, UnixReady}; +use mio::{Poll as MioPoll, PollOpt, Ready, Token}; +use std::fmt; +use std::future::Future; +use std::io; +use std::os::unix::io::{AsRawFd, RawFd}; +use std::pin::Pin; +use std::process::ExitStatus; +use std::task::Context; +use std::task::Poll; + +impl Wait for std::process::Child { + fn id(&self) -> u32 { + self.id() + } + + fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + self.try_wait() + } +} + +impl Kill for std::process::Child { + fn kill(&mut self) -> io::Result<()> { + self.kill() + } +} + +lazy_static::lazy_static! { + static ref ORPHAN_QUEUE: OrphanQueueImpl<std::process::Child> = OrphanQueueImpl::new(); +} + +struct GlobalOrphanQueue; + +impl fmt::Debug for GlobalOrphanQueue { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + ORPHAN_QUEUE.fmt(fmt) + } +} + +impl OrphanQueue<std::process::Child> for GlobalOrphanQueue { + fn push_orphan(&self, orphan: std::process::Child) { + ORPHAN_QUEUE.push_orphan(orphan) + } + + fn reap_orphans(&self) { + ORPHAN_QUEUE.reap_orphans() + } +} + +#[must_use = "futures do nothing unless polled"] +pub(crate) struct Child { + inner: Reaper<std::process::Child, GlobalOrphanQueue, Signal>, +} + +impl fmt::Debug for Child { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Child") + .field("pid", &self.inner.id()) + .finish() + } +} + +pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result<SpawnedChild> { + let mut child = cmd.spawn()?; + let stdin = stdio(child.stdin.take())?; + let stdout = stdio(child.stdout.take())?; + let stderr = stdio(child.stderr.take())?; + + let signal = signal(SignalKind::child())?; + + Ok(SpawnedChild { + child: Child { + inner: Reaper::new(child, GlobalOrphanQueue, signal), + }, + stdin, + stdout, + stderr, + }) +} + +impl Child { + pub(crate) fn id(&self) -> u32 { + self.inner.id() + } +} + +impl Kill for Child { + fn kill(&mut self) -> io::Result<()> { + self.inner.kill() + } +} + +impl Future for Child { + type Output = io::Result<ExitStatus>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + Pin::new(&mut self.inner).poll(cx) + } +} + +#[derive(Debug)] +pub(crate) struct Fd<T> { + inner: T, +} + +impl<T> io::Read for Fd<T> +where + T: io::Read, +{ + fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { + self.inner.read(bytes) + } +} + +impl<T> io::Write for Fd<T> +where + T: io::Write, +{ + fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { + self.inner.write(bytes) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl<T> AsRawFd for Fd<T> +where + T: AsRawFd, +{ + fn as_raw_fd(&self) -> RawFd { + self.inner.as_raw_fd() + } +} + +impl<T> Evented for Fd<T> +where + T: AsRawFd, +{ + fn register( + &self, + poll: &MioPoll, + token: Token, + interest: Ready, + opts: PollOpt, + ) -> io::Result<()> { + EventedFd(&self.as_raw_fd()).register(poll, token, interest | UnixReady::hup(), opts) + } + + fn reregister( + &self, + poll: &MioPoll, + token: Token, + interest: Ready, + opts: PollOpt, + ) -> io::Result<()> { + EventedFd(&self.as_raw_fd()).reregister(poll, token, interest | UnixReady::hup(), opts) + } + + fn deregister(&self, poll: &MioPoll) -> io::Result<()> { + EventedFd(&self.as_raw_fd()).deregister(poll) + } +} + +pub(crate) type ChildStdin = PollEvented<Fd<std::process::ChildStdin>>; +pub(crate) type ChildStdout = PollEvented<Fd<std::process::ChildStdout>>; +pub(crate) type ChildStderr = PollEvented<Fd<std::process::ChildStderr>>; + +fn stdio<T>(option: Option<T>) -> io::Result<Option<PollEvented<Fd<T>>>> +where + T: AsRawFd, +{ + let io = match option { + Some(io) => io, + None => return Ok(None), + }; + + // Set the fd to nonblocking before we pass it to the event loop + unsafe { + let fd = io.as_raw_fd(); + let r = libc::fcntl(fd, libc::F_GETFL); + if r == -1 { + return Err(io::Error::last_os_error()); + } + let r = libc::fcntl(fd, libc::F_SETFL, r | libc::O_NONBLOCK); + if r == -1 { + return Err(io::Error::last_os_error()); + } + } + Ok(Some(PollEvented::new(Fd { inner: io })?)) +} diff --git a/third_party/rust/tokio/src/process/unix/orphan.rs b/third_party/rust/tokio/src/process/unix/orphan.rs new file mode 100644 index 0000000000..6c449a9093 --- /dev/null +++ b/third_party/rust/tokio/src/process/unix/orphan.rs @@ -0,0 +1,191 @@ +use std::io; +use std::process::ExitStatus; +use std::sync::Mutex; + +/// An interface for waiting on a process to exit. +pub(crate) trait Wait { + /// Get the identifier for this process or diagnostics. + fn id(&self) -> u32; + /// Try waiting for a process to exit in a non-blocking manner. + fn try_wait(&mut self) -> io::Result<Option<ExitStatus>>; +} + +impl<T: Wait> Wait for &mut T { + fn id(&self) -> u32 { + (**self).id() + } + + fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + (**self).try_wait() + } +} + +/// An interface for queueing up an orphaned process so that it can be reaped. +pub(crate) trait OrphanQueue<T> { + /// Adds an orphan to the queue. + fn push_orphan(&self, orphan: T); + /// Attempts to reap every process in the queue, ignoring any errors and + /// enqueueing any orphans which have not yet exited. + fn reap_orphans(&self); +} + +impl<T, O: OrphanQueue<T>> OrphanQueue<T> for &O { + fn push_orphan(&self, orphan: T) { + (**self).push_orphan(orphan); + } + + fn reap_orphans(&self) { + (**self).reap_orphans() + } +} + +/// An implementation of `OrphanQueue`. +#[derive(Debug)] +pub(crate) struct OrphanQueueImpl<T> { + queue: Mutex<Vec<T>>, +} + +impl<T> OrphanQueueImpl<T> { + pub(crate) fn new() -> Self { + Self { + queue: Mutex::new(Vec::new()), + } + } + + #[cfg(test)] + fn len(&self) -> usize { + self.queue.lock().unwrap().len() + } +} + +impl<T: Wait> OrphanQueue<T> for OrphanQueueImpl<T> { + fn push_orphan(&self, orphan: T) { + self.queue.lock().unwrap().push(orphan) + } + + fn reap_orphans(&self) { + let mut queue = self.queue.lock().unwrap(); + let queue = &mut *queue; + + let mut i = 0; + while i < queue.len() { + match queue[i].try_wait() { + Ok(Some(_)) => {} + Err(_) => { + // TODO: bubble up error some how. Is this an internal bug? + // Shoudl we panic? Is it OK for this to be silently + // dropped? + } + // Still not done yet + Ok(None) => { + i += 1; + continue; + } + } + + queue.remove(i); + } + } +} + +#[cfg(all(test, not(loom)))] +mod test { + use super::Wait; + use super::{OrphanQueue, OrphanQueueImpl}; + use std::cell::Cell; + use std::io; + use std::os::unix::process::ExitStatusExt; + use std::process::ExitStatus; + use std::rc::Rc; + + struct MockWait { + total_waits: Rc<Cell<usize>>, + num_wait_until_status: usize, + return_err: bool, + } + + impl MockWait { + fn new(num_wait_until_status: usize) -> Self { + Self { + total_waits: Rc::new(Cell::new(0)), + num_wait_until_status, + return_err: false, + } + } + + fn with_err() -> Self { + Self { + total_waits: Rc::new(Cell::new(0)), + num_wait_until_status: 0, + return_err: true, + } + } + } + + impl Wait for MockWait { + fn id(&self) -> u32 { + 42 + } + + fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + let waits = self.total_waits.get(); + + let ret = if self.num_wait_until_status == waits { + if self.return_err { + Ok(Some(ExitStatus::from_raw(0))) + } else { + Err(io::Error::new(io::ErrorKind::Other, "mock err")) + } + } else { + Ok(None) + }; + + self.total_waits.set(waits + 1); + ret + } + } + + #[test] + fn drain_attempts_a_single_reap_of_all_queued_orphans() { + let first_orphan = MockWait::new(0); + let second_orphan = MockWait::new(1); + let third_orphan = MockWait::new(2); + let fourth_orphan = MockWait::with_err(); + + let first_waits = first_orphan.total_waits.clone(); + let second_waits = second_orphan.total_waits.clone(); + let third_waits = third_orphan.total_waits.clone(); + let fourth_waits = fourth_orphan.total_waits.clone(); + + let orphanage = OrphanQueueImpl::new(); + orphanage.push_orphan(first_orphan); + orphanage.push_orphan(third_orphan); + orphanage.push_orphan(second_orphan); + orphanage.push_orphan(fourth_orphan); + + assert_eq!(orphanage.len(), 4); + + orphanage.reap_orphans(); + assert_eq!(orphanage.len(), 2); + assert_eq!(first_waits.get(), 1); + assert_eq!(second_waits.get(), 1); + assert_eq!(third_waits.get(), 1); + assert_eq!(fourth_waits.get(), 1); + + orphanage.reap_orphans(); + assert_eq!(orphanage.len(), 1); + assert_eq!(first_waits.get(), 1); + assert_eq!(second_waits.get(), 2); + assert_eq!(third_waits.get(), 2); + assert_eq!(fourth_waits.get(), 1); + + orphanage.reap_orphans(); + assert_eq!(orphanage.len(), 0); + assert_eq!(first_waits.get(), 1); + assert_eq!(second_waits.get(), 2); + assert_eq!(third_waits.get(), 3); + assert_eq!(fourth_waits.get(), 1); + + orphanage.reap_orphans(); // Safe to reap when empty + } +} diff --git a/third_party/rust/tokio/src/process/unix/reap.rs b/third_party/rust/tokio/src/process/unix/reap.rs new file mode 100644 index 0000000000..8963805afe --- /dev/null +++ b/third_party/rust/tokio/src/process/unix/reap.rs @@ -0,0 +1,342 @@ +use crate::process::imp::orphan::{OrphanQueue, Wait}; +use crate::process::kill::Kill; +use crate::signal::unix::Signal; + +use std::future::Future; +use std::io; +use std::ops::Deref; +use std::pin::Pin; +use std::process::ExitStatus; +use std::task::Context; +use std::task::Poll; + +/// Orchestrates between registering interest for receiving signals when a +/// child process has exited, and attempting to poll for process completion. +#[derive(Debug)] +pub(crate) struct Reaper<W, Q, S> +where + W: Wait + Unpin, + Q: OrphanQueue<W>, +{ + inner: Option<W>, + orphan_queue: Q, + signal: S, +} + +// Work around removal of `futures_core` dependency +pub(crate) trait Stream: Unpin { + fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>>; +} + +impl Stream for Signal { + fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { + Signal::poll_recv(self, cx) + } +} + +impl<W, Q, S> Deref for Reaper<W, Q, S> +where + W: Wait + Unpin, + Q: OrphanQueue<W>, +{ + type Target = W; + + fn deref(&self) -> &Self::Target { + self.inner() + } +} + +impl<W, Q, S> Reaper<W, Q, S> +where + W: Wait + Unpin, + Q: OrphanQueue<W>, +{ + pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self { + Self { + inner: Some(inner), + orphan_queue, + signal, + } + } + + fn inner(&self) -> &W { + self.inner.as_ref().expect("inner has gone away") + } + + fn inner_mut(&mut self) -> &mut W { + self.inner.as_mut().expect("inner has gone away") + } +} + +impl<W, Q, S> Future for Reaper<W, Q, S> +where + W: Wait + Unpin, + Q: OrphanQueue<W> + Unpin, + S: Stream, +{ + type Output = io::Result<ExitStatus>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + loop { + // If the child hasn't exited yet, then it's our responsibility to + // ensure the current task gets notified when it might be able to + // make progress. + // + // As described in `spawn` above, we just indicate that we can + // next make progress once a SIGCHLD is received. + // + // However, we will register for a notification on the next signal + // BEFORE we poll the child. Otherwise it is possible that the child + // can exit and the signal can arrive after we last polled the child, + // but before we've registered for a notification on the next signal + // (this can cause a deadlock if there are no more spawned children + // which can generate a different signal for us). A side effect of + // pre-registering for signal notifications is that when the child + // exits, we will have already registered for an additional + // notification we don't need to consume. If another signal arrives, + // this future's task will be notified/woken up again. Since the + // futures model allows for spurious wake ups this extra wakeup + // should not cause significant issues with parent futures. + let registered_interest = self.signal.poll_recv(cx).is_pending(); + + self.orphan_queue.reap_orphans(); + if let Some(status) = self.inner_mut().try_wait()? { + return Poll::Ready(Ok(status)); + } + + // If our attempt to poll for the next signal was not ready, then + // we've arranged for our task to get notified and we can bail out. + if registered_interest { + return Poll::Pending; + } else { + // Otherwise, if the signal stream delivered a signal to us, we + // won't get notified at the next signal, so we'll loop and try + // again. + continue; + } + } + } +} + +impl<W, Q, S> Kill for Reaper<W, Q, S> +where + W: Kill + Wait + Unpin, + Q: OrphanQueue<W>, +{ + fn kill(&mut self) -> io::Result<()> { + self.inner_mut().kill() + } +} + +impl<W, Q, S> Drop for Reaper<W, Q, S> +where + W: Wait + Unpin, + Q: OrphanQueue<W>, +{ + fn drop(&mut self) { + if let Ok(Some(_)) = self.inner_mut().try_wait() { + return; + } + + let orphan = self.inner.take().unwrap(); + self.orphan_queue.push_orphan(orphan); + } +} + +#[cfg(all(test, not(loom)))] +mod test { + use super::*; + + use futures::future::FutureExt; + use std::cell::{Cell, RefCell}; + use std::os::unix::process::ExitStatusExt; + use std::process::ExitStatus; + use std::task::Context; + use std::task::Poll; + + #[derive(Debug)] + struct MockWait { + total_kills: usize, + total_waits: usize, + num_wait_until_status: usize, + status: ExitStatus, + } + + impl MockWait { + fn new(status: ExitStatus, num_wait_until_status: usize) -> Self { + Self { + total_kills: 0, + total_waits: 0, + num_wait_until_status, + status, + } + } + } + + impl Wait for MockWait { + fn id(&self) -> u32 { + 0 + } + + fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { + let ret = if self.num_wait_until_status == self.total_waits { + Some(self.status) + } else { + None + }; + + self.total_waits += 1; + Ok(ret) + } + } + + impl Kill for MockWait { + fn kill(&mut self) -> io::Result<()> { + self.total_kills += 1; + Ok(()) + } + } + + struct MockStream { + total_polls: usize, + values: Vec<Option<()>>, + } + + impl MockStream { + fn new(values: Vec<Option<()>>) -> Self { + Self { + total_polls: 0, + values, + } + } + } + + impl Stream for MockStream { + fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll<Option<()>> { + self.total_polls += 1; + match self.values.remove(0) { + Some(()) => Poll::Ready(Some(())), + None => Poll::Pending, + } + } + } + + struct MockQueue<W> { + all_enqueued: RefCell<Vec<W>>, + total_reaps: Cell<usize>, + } + + impl<W> MockQueue<W> { + fn new() -> Self { + Self { + all_enqueued: RefCell::new(Vec::new()), + total_reaps: Cell::new(0), + } + } + } + + impl<W: Wait> OrphanQueue<W> for MockQueue<W> { + fn push_orphan(&self, orphan: W) { + self.all_enqueued.borrow_mut().push(orphan); + } + + fn reap_orphans(&self) { + self.total_reaps.set(self.total_reaps.get() + 1); + } + } + + #[test] + fn reaper() { + let exit = ExitStatus::from_raw(0); + let mock = MockWait::new(exit, 3); + let mut grim = Reaper::new( + mock, + MockQueue::new(), + MockStream::new(vec![None, Some(()), None, None, None]), + ); + + let waker = futures::task::noop_waker(); + let mut context = Context::from_waker(&waker); + + // Not yet exited, interest registered + assert!(grim.poll_unpin(&mut context).is_pending()); + assert_eq!(1, grim.signal.total_polls); + assert_eq!(1, grim.total_waits); + assert_eq!(1, grim.orphan_queue.total_reaps.get()); + assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); + + // Not yet exited, couldn't register interest the first time + // but managed to register interest the second time around + assert!(grim.poll_unpin(&mut context).is_pending()); + assert_eq!(3, grim.signal.total_polls); + assert_eq!(3, grim.total_waits); + assert_eq!(3, grim.orphan_queue.total_reaps.get()); + assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); + + // Exited + if let Poll::Ready(r) = grim.poll_unpin(&mut context) { + assert!(r.is_ok()); + let exit_code = r.unwrap(); + assert_eq!(exit_code, exit); + } else { + unreachable!(); + } + assert_eq!(4, grim.signal.total_polls); + assert_eq!(4, grim.total_waits); + assert_eq!(4, grim.orphan_queue.total_reaps.get()); + assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); + } + + #[test] + fn kill() { + let exit = ExitStatus::from_raw(0); + let mut grim = Reaper::new( + MockWait::new(exit, 0), + MockQueue::new(), + MockStream::new(vec![None]), + ); + + grim.kill().unwrap(); + assert_eq!(1, grim.total_kills); + assert_eq!(0, grim.orphan_queue.total_reaps.get()); + assert!(grim.orphan_queue.all_enqueued.borrow().is_empty()); + } + + #[test] + fn drop_reaps_if_possible() { + let exit = ExitStatus::from_raw(0); + let mut mock = MockWait::new(exit, 0); + + { + let queue = MockQueue::new(); + + let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); + + drop(grim); + + assert_eq!(0, queue.total_reaps.get()); + assert!(queue.all_enqueued.borrow().is_empty()); + } + + assert_eq!(1, mock.total_waits); + assert_eq!(0, mock.total_kills); + } + + #[test] + fn drop_enqueues_orphan_if_wait_fails() { + let exit = ExitStatus::from_raw(0); + let mut mock = MockWait::new(exit, 2); + + { + let queue = MockQueue::<&mut MockWait>::new(); + let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![])); + drop(grim); + + assert_eq!(0, queue.total_reaps.get()); + assert_eq!(1, queue.all_enqueued.borrow().len()); + } + + assert_eq!(1, mock.total_waits); + assert_eq!(0, mock.total_kills); + } +} diff --git a/third_party/rust/tokio/src/process/windows.rs b/third_party/rust/tokio/src/process/windows.rs new file mode 100644 index 0000000000..cbe2fa7596 --- /dev/null +++ b/third_party/rust/tokio/src/process/windows.rs @@ -0,0 +1,191 @@ +//! Windows asynchronous process handling. +//! +//! Like with Unix we don't actually have a way of registering a process with an +//! IOCP object. As a result we similarly need another mechanism for getting a +//! signal when a process has exited. For now this is implemented with the +//! `RegisterWaitForSingleObject` function in the kernel32.dll. +//! +//! This strategy is the same that libuv takes and essentially just queues up a +//! wait for the process in a kernel32-specific thread pool. Once the object is +//! notified (e.g. the process exits) then we have a callback that basically +//! just completes a `Oneshot`. +//! +//! The `poll_exit` implementation will attempt to wait for the process in a +//! nonblocking fashion, but failing that it'll fire off a +//! `RegisterWaitForSingleObject` and then wait on the other end of the oneshot +//! from then on out. + +use crate::io::PollEvented; +use crate::process::kill::Kill; +use crate::process::SpawnedChild; +use crate::sync::oneshot; + +use mio_named_pipes::NamedPipe; +use std::fmt; +use std::future::Future; +use std::io; +use std::os::windows::prelude::*; +use std::os::windows::process::ExitStatusExt; +use std::pin::Pin; +use std::process::{Child as StdChild, Command as StdCommand, ExitStatus}; +use std::ptr; +use std::task::Context; +use std::task::Poll; +use winapi::shared::minwindef::FALSE; +use winapi::shared::winerror::WAIT_TIMEOUT; +use winapi::um::handleapi::INVALID_HANDLE_VALUE; +use winapi::um::processthreadsapi::GetExitCodeProcess; +use winapi::um::synchapi::WaitForSingleObject; +use winapi::um::threadpoollegacyapiset::UnregisterWaitEx; +use winapi::um::winbase::{RegisterWaitForSingleObject, INFINITE, WAIT_OBJECT_0}; +use winapi::um::winnt::{BOOLEAN, HANDLE, PVOID, WT_EXECUTEINWAITTHREAD, WT_EXECUTEONLYONCE}; + +#[must_use = "futures do nothing unless polled"] +pub(crate) struct Child { + child: StdChild, + waiting: Option<Waiting>, +} + +impl fmt::Debug for Child { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Child") + .field("pid", &self.id()) + .field("child", &self.child) + .field("waiting", &"..") + .finish() + } +} + +struct Waiting { + rx: oneshot::Receiver<()>, + wait_object: HANDLE, + tx: *mut Option<oneshot::Sender<()>>, +} + +unsafe impl Sync for Waiting {} +unsafe impl Send for Waiting {} + +pub(crate) fn spawn_child(cmd: &mut StdCommand) -> io::Result<SpawnedChild> { + let mut child = cmd.spawn()?; + let stdin = stdio(child.stdin.take()); + let stdout = stdio(child.stdout.take()); + let stderr = stdio(child.stderr.take()); + + Ok(SpawnedChild { + child: Child { + child, + waiting: None, + }, + stdin, + stdout, + stderr, + }) +} + +impl Child { + pub(crate) fn id(&self) -> u32 { + self.child.id() + } +} + +impl Kill for Child { + fn kill(&mut self) -> io::Result<()> { + self.child.kill() + } +} + +impl Future for Child { + type Output = io::Result<ExitStatus>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let inner = Pin::get_mut(self); + loop { + if let Some(ref mut w) = inner.waiting { + match Pin::new(&mut w.rx).poll(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(_)) => panic!("should not be canceled"), + Poll::Pending => return Poll::Pending, + } + let status = try_wait(&inner.child)?.expect("not ready yet"); + return Poll::Ready(Ok(status)); + } + + if let Some(e) = try_wait(&inner.child)? { + return Poll::Ready(Ok(e)); + } + let (tx, rx) = oneshot::channel(); + let ptr = Box::into_raw(Box::new(Some(tx))); + let mut wait_object = ptr::null_mut(); + let rc = unsafe { + RegisterWaitForSingleObject( + &mut wait_object, + inner.child.as_raw_handle(), + Some(callback), + ptr as *mut _, + INFINITE, + WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE, + ) + }; + if rc == 0 { + let err = io::Error::last_os_error(); + drop(unsafe { Box::from_raw(ptr) }); + return Poll::Ready(Err(err)); + } + inner.waiting = Some(Waiting { + rx, + wait_object, + tx: ptr, + }); + } + } +} + +impl Drop for Waiting { + fn drop(&mut self) { + unsafe { + let rc = UnregisterWaitEx(self.wait_object, INVALID_HANDLE_VALUE); + if rc == 0 { + panic!("failed to unregister: {}", io::Error::last_os_error()); + } + drop(Box::from_raw(self.tx)); + } + } +} + +unsafe extern "system" fn callback(ptr: PVOID, _timer_fired: BOOLEAN) { + let complete = &mut *(ptr as *mut Option<oneshot::Sender<()>>); + let _ = complete.take().unwrap().send(()); +} + +pub(crate) fn try_wait(child: &StdChild) -> io::Result<Option<ExitStatus>> { + unsafe { + match WaitForSingleObject(child.as_raw_handle(), 0) { + WAIT_OBJECT_0 => {} + WAIT_TIMEOUT => return Ok(None), + _ => return Err(io::Error::last_os_error()), + } + let mut status = 0; + let rc = GetExitCodeProcess(child.as_raw_handle(), &mut status); + if rc == FALSE { + Err(io::Error::last_os_error()) + } else { + Ok(Some(ExitStatus::from_raw(status))) + } + } +} + +pub(crate) type ChildStdin = PollEvented<NamedPipe>; +pub(crate) type ChildStdout = PollEvented<NamedPipe>; +pub(crate) type ChildStderr = PollEvented<NamedPipe>; + +fn stdio<T>(option: Option<T>) -> Option<PollEvented<NamedPipe>> +where + T: IntoRawHandle, +{ + let io = match option { + Some(io) => io, + None => return None, + }; + let pipe = unsafe { NamedPipe::from_raw_handle(io.into_raw_handle()) }; + PollEvented::new(pipe).ok() +} diff --git a/third_party/rust/tokio/src/runtime/basic_scheduler.rs b/third_party/rust/tokio/src/runtime/basic_scheduler.rs new file mode 100644 index 0000000000..301554280f --- /dev/null +++ b/third_party/rust/tokio/src/runtime/basic_scheduler.rs @@ -0,0 +1,326 @@ +use crate::park::{Park, Unpark}; +use crate::runtime; +use crate::runtime::task::{self, JoinHandle, Schedule, Task}; +use crate::util::linked_list::LinkedList; +use crate::util::{waker_ref, Wake}; + +use std::cell::RefCell; +use std::collections::VecDeque; +use std::fmt; +use std::future::Future; +use std::sync::{Arc, Mutex}; +use std::task::Poll::Ready; +use std::time::Duration; + +/// Executes tasks on the current thread +pub(crate) struct BasicScheduler<P> +where + P: Park, +{ + /// Scheduler run queue + /// + /// When the scheduler is executed, the queue is removed from `self` and + /// moved into `Context`. + /// + /// This indirection is to allow `BasicScheduler` to be `Send`. + tasks: Option<Tasks>, + + /// Sendable task spawner + spawner: Spawner, + + /// Current tick + tick: u8, + + /// Thread park handle + park: P, +} + +#[derive(Clone)] +pub(crate) struct Spawner { + shared: Arc<Shared>, +} + +struct Tasks { + /// Collection of all active tasks spawned onto this executor. + owned: LinkedList<Task<Arc<Shared>>>, + + /// Local run queue. + /// + /// Tasks notified from the current thread are pushed into this queue. + queue: VecDeque<task::Notified<Arc<Shared>>>, +} + +/// Scheduler state shared between threads. +struct Shared { + /// Remote run queue + queue: Mutex<VecDeque<task::Notified<Arc<Shared>>>>, + + /// Unpark the blocked thread + unpark: Box<dyn Unpark>, +} + +/// Thread-local context +struct Context { + /// Shared scheduler state + shared: Arc<Shared>, + + /// Local queue + tasks: RefCell<Tasks>, +} + +/// Initial queue capacity +const INITIAL_CAPACITY: usize = 64; + +/// Max number of tasks to poll per tick. +const MAX_TASKS_PER_TICK: usize = 61; + +/// How often ot check the remote queue first +const REMOTE_FIRST_INTERVAL: u8 = 31; + +// Tracks the current BasicScheduler +scoped_thread_local!(static CURRENT: Context); + +impl<P> BasicScheduler<P> +where + P: Park, +{ + pub(crate) fn new(park: P) -> BasicScheduler<P> { + let unpark = Box::new(park.unpark()); + + BasicScheduler { + tasks: Some(Tasks { + owned: LinkedList::new(), + queue: VecDeque::with_capacity(INITIAL_CAPACITY), + }), + spawner: Spawner { + shared: Arc::new(Shared { + queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)), + unpark: unpark as Box<dyn Unpark>, + }), + }, + tick: 0, + park, + } + } + + pub(crate) fn spawner(&self) -> &Spawner { + &self.spawner + } + + /// Spawns a future onto the thread pool + pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + self.spawner.spawn(future) + } + + pub(crate) fn block_on<F>(&mut self, future: F) -> F::Output + where + F: Future, + { + enter(self, |scheduler, context| { + let _enter = runtime::enter(); + let waker = waker_ref(&scheduler.spawner.shared); + let mut cx = std::task::Context::from_waker(&waker); + + pin!(future); + + 'outer: loop { + if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) { + return v; + } + + for _ in 0..MAX_TASKS_PER_TICK { + // Get and increment the current tick + let tick = scheduler.tick; + scheduler.tick = scheduler.tick.wrapping_add(1); + + let next = if tick % REMOTE_FIRST_INTERVAL == 0 { + scheduler + .spawner + .pop() + .or_else(|| context.tasks.borrow_mut().queue.pop_front()) + } else { + context + .tasks + .borrow_mut() + .queue + .pop_front() + .or_else(|| scheduler.spawner.pop()) + }; + + match next { + Some(task) => crate::coop::budget(|| task.run()), + None => { + // Park until the thread is signaled + scheduler.park.park().ok().expect("failed to park"); + + // Try polling the `block_on` future next + continue 'outer; + } + } + } + + // Yield to the park, this drives the timer and pulls any pending + // I/O events. + scheduler + .park + .park_timeout(Duration::from_millis(0)) + .ok() + .expect("failed to park"); + } + }) + } +} + +/// Enter the scheduler context. This sets the queue and other necessary +/// scheduler state in the thread-local +fn enter<F, R, P>(scheduler: &mut BasicScheduler<P>, f: F) -> R +where + F: FnOnce(&mut BasicScheduler<P>, &Context) -> R, + P: Park, +{ + // Ensures the run queue is placed back in the `BasicScheduler` instance + // once `block_on` returns.` + struct Guard<'a, P: Park> { + context: Option<Context>, + scheduler: &'a mut BasicScheduler<P>, + } + + impl<P: Park> Drop for Guard<'_, P> { + fn drop(&mut self) { + let Context { tasks, .. } = self.context.take().expect("context missing"); + self.scheduler.tasks = Some(tasks.into_inner()); + } + } + + // Remove `tasks` from `self` and place it in a `Context`. + let tasks = scheduler.tasks.take().expect("invalid state"); + + let guard = Guard { + context: Some(Context { + shared: scheduler.spawner.shared.clone(), + tasks: RefCell::new(tasks), + }), + scheduler, + }; + + let context = guard.context.as_ref().unwrap(); + let scheduler = &mut *guard.scheduler; + + CURRENT.set(context, || f(scheduler, context)) +} + +impl<P> Drop for BasicScheduler<P> +where + P: Park, +{ + fn drop(&mut self) { + enter(self, |scheduler, context| { + // Loop required here to ensure borrow is dropped between iterations + #[allow(clippy::while_let_loop)] + loop { + let task = match context.tasks.borrow_mut().owned.pop_back() { + Some(task) => task, + None => break, + }; + + task.shutdown(); + } + + // Drain local queue + for task in context.tasks.borrow_mut().queue.drain(..) { + task.shutdown(); + } + + // Drain remote queue + for task in scheduler.spawner.shared.queue.lock().unwrap().drain(..) { + task.shutdown(); + } + + assert!(context.tasks.borrow().owned.is_empty()); + }); + } +} + +impl<P: Park> fmt::Debug for BasicScheduler<P> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("BasicScheduler").finish() + } +} + +// ===== impl Spawner ===== + +impl Spawner { + /// Spawns a future onto the thread pool + pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + let (task, handle) = task::joinable(future); + self.shared.schedule(task); + handle + } + + fn pop(&self) -> Option<task::Notified<Arc<Shared>>> { + self.shared.queue.lock().unwrap().pop_front() + } +} + +impl fmt::Debug for Spawner { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Spawner").finish() + } +} + +// ===== impl Shared ===== + +impl Schedule for Arc<Shared> { + fn bind(task: Task<Self>) -> Arc<Shared> { + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + cx.tasks.borrow_mut().owned.push_front(task); + cx.shared.clone() + }) + } + + fn release(&self, task: &Task<Self>) -> Option<Task<Self>> { + use std::ptr::NonNull; + + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + + // safety: the task is inserted in the list in `bind`. + unsafe { + let ptr = NonNull::from(task.header()); + cx.tasks.borrow_mut().owned.remove(ptr) + } + }) + } + + fn schedule(&self, task: task::Notified<Self>) { + CURRENT.with(|maybe_cx| match maybe_cx { + Some(cx) if Arc::ptr_eq(self, &cx.shared) => { + cx.tasks.borrow_mut().queue.push_back(task); + } + _ => { + self.queue.lock().unwrap().push_back(task); + self.unpark.unpark(); + } + }); + } +} + +impl Wake for Shared { + fn wake(self: Arc<Self>) { + Wake::wake_by_ref(&self) + } + + /// Wake by reference + fn wake_by_ref(arc_self: &Arc<Self>) { + arc_self.unpark.unpark(); + } +} diff --git a/third_party/rust/tokio/src/runtime/blocking/mod.rs b/third_party/rust/tokio/src/runtime/blocking/mod.rs new file mode 100644 index 0000000000..5c808335cc --- /dev/null +++ b/third_party/rust/tokio/src/runtime/blocking/mod.rs @@ -0,0 +1,43 @@ +//! Abstracts out the APIs necessary to `Runtime` for integrating the blocking +//! pool. When the `blocking` feature flag is **not** enabled, these APIs are +//! shells. This isolates the complexity of dealing with conditional +//! compilation. + +cfg_blocking_impl! { + mod pool; + pub(crate) use pool::{spawn_blocking, try_spawn_blocking, BlockingPool, Spawner}; + + mod schedule; + mod shutdown; + mod task; + + use crate::runtime::Builder; + + pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool { + BlockingPool::new(builder, thread_cap) + + } +} + +cfg_not_blocking_impl! { + use crate::runtime::Builder; + use std::time::Duration; + + #[derive(Debug, Clone)] + pub(crate) struct BlockingPool {} + + pub(crate) use BlockingPool as Spawner; + + pub(crate) fn create_blocking_pool(_builder: &Builder, _thread_cap: usize) -> BlockingPool { + BlockingPool {} + } + + impl BlockingPool { + pub(crate) fn spawner(&self) -> &BlockingPool { + self + } + + pub(crate) fn shutdown(&mut self, _duration: Option<Duration>) { + } + } +} diff --git a/third_party/rust/tokio/src/runtime/blocking/pool.rs b/third_party/rust/tokio/src/runtime/blocking/pool.rs new file mode 100644 index 0000000000..a3b208d171 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/blocking/pool.rs @@ -0,0 +1,307 @@ +//! Thread pool for blocking operations + +use crate::loom::sync::{Arc, Condvar, Mutex}; +use crate::loom::thread; +use crate::runtime::blocking::schedule::NoopSchedule; +use crate::runtime::blocking::shutdown; +use crate::runtime::blocking::task::BlockingTask; +use crate::runtime::task::{self, JoinHandle}; +use crate::runtime::{Builder, Callback, Handle}; + +use std::collections::VecDeque; +use std::fmt; +use std::time::Duration; + +pub(crate) struct BlockingPool { + spawner: Spawner, + shutdown_rx: shutdown::Receiver, +} + +#[derive(Clone)] +pub(crate) struct Spawner { + inner: Arc<Inner>, +} + +struct Inner { + /// State shared between worker threads + shared: Mutex<Shared>, + + /// Pool threads wait on this. + condvar: Condvar, + + /// Spawned threads use this name + thread_name: String, + + /// Spawned thread stack size + stack_size: Option<usize>, + + /// Call after a thread starts + after_start: Option<Callback>, + + /// Call before a thread stops + before_stop: Option<Callback>, + + thread_cap: usize, +} + +struct Shared { + queue: VecDeque<Task>, + num_th: usize, + num_idle: u32, + num_notify: u32, + shutdown: bool, + shutdown_tx: Option<shutdown::Sender>, +} + +type Task = task::Notified<NoopSchedule>; + +const KEEP_ALIVE: Duration = Duration::from_secs(10); + +/// Run the provided function on an executor dedicated to blocking operations. +pub(crate) fn spawn_blocking<F, R>(func: F) -> JoinHandle<R> +where + F: FnOnce() -> R + Send + 'static, +{ + let rt = Handle::current(); + + let (task, handle) = task::joinable(BlockingTask::new(func)); + let _ = rt.blocking_spawner.spawn(task, &rt); + handle +} + +#[allow(dead_code)] +pub(crate) fn try_spawn_blocking<F, R>(func: F) -> Result<(), ()> +where + F: FnOnce() -> R + Send + 'static, +{ + let rt = Handle::current(); + + let (task, _handle) = task::joinable(BlockingTask::new(func)); + rt.blocking_spawner.spawn(task, &rt) +} + +// ===== impl BlockingPool ===== + +impl BlockingPool { + pub(crate) fn new(builder: &Builder, thread_cap: usize) -> BlockingPool { + let (shutdown_tx, shutdown_rx) = shutdown::channel(); + + BlockingPool { + spawner: Spawner { + inner: Arc::new(Inner { + shared: Mutex::new(Shared { + queue: VecDeque::new(), + num_th: 0, + num_idle: 0, + num_notify: 0, + shutdown: false, + shutdown_tx: Some(shutdown_tx), + }), + condvar: Condvar::new(), + thread_name: builder.thread_name.clone(), + stack_size: builder.thread_stack_size, + after_start: builder.after_start.clone(), + before_stop: builder.before_stop.clone(), + thread_cap, + }), + }, + shutdown_rx, + } + } + + pub(crate) fn spawner(&self) -> &Spawner { + &self.spawner + } + + pub(crate) fn shutdown(&mut self, timeout: Option<Duration>) { + let mut shared = self.spawner.inner.shared.lock().unwrap(); + + // The function can be called multiple times. First, by explicitly + // calling `shutdown` then by the drop handler calling `shutdown`. This + // prevents shutting down twice. + if shared.shutdown { + return; + } + + shared.shutdown = true; + shared.shutdown_tx = None; + self.spawner.inner.condvar.notify_all(); + + drop(shared); + + self.shutdown_rx.wait(timeout); + } +} + +impl Drop for BlockingPool { + fn drop(&mut self) { + self.shutdown(None); + } +} + +impl fmt::Debug for BlockingPool { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("BlockingPool").finish() + } +} + +// ===== impl Spawner ===== + +impl Spawner { + fn spawn(&self, task: Task, rt: &Handle) -> Result<(), ()> { + let shutdown_tx = { + let mut shared = self.inner.shared.lock().unwrap(); + + if shared.shutdown { + // Shutdown the task + task.shutdown(); + + // no need to even push this task; it would never get picked up + return Err(()); + } + + shared.queue.push_back(task); + + if shared.num_idle == 0 { + // No threads are able to process the task. + + if shared.num_th == self.inner.thread_cap { + // At max number of threads + None + } else { + shared.num_th += 1; + assert!(shared.shutdown_tx.is_some()); + shared.shutdown_tx.clone() + } + } else { + // Notify an idle worker thread. The notification counter + // is used to count the needed amount of notifications + // exactly. Thread libraries may generate spurious + // wakeups, this counter is used to keep us in a + // consistent state. + shared.num_idle -= 1; + shared.num_notify += 1; + self.inner.condvar.notify_one(); + None + } + }; + + if let Some(shutdown_tx) = shutdown_tx { + self.spawn_thread(shutdown_tx, rt); + } + + Ok(()) + } + + fn spawn_thread(&self, shutdown_tx: shutdown::Sender, rt: &Handle) { + let mut builder = thread::Builder::new().name(self.inner.thread_name.clone()); + + if let Some(stack_size) = self.inner.stack_size { + builder = builder.stack_size(stack_size); + } + + let rt = rt.clone(); + + builder + .spawn(move || { + // Only the reference should be moved into the closure + let rt = &rt; + rt.enter(move || { + rt.blocking_spawner.inner.run(); + drop(shutdown_tx); + }) + }) + .unwrap(); + } +} + +impl Inner { + fn run(&self) { + if let Some(f) = &self.after_start { + f() + } + + let mut shared = self.shared.lock().unwrap(); + + 'main: loop { + // BUSY + while let Some(task) = shared.queue.pop_front() { + drop(shared); + task.run(); + + shared = self.shared.lock().unwrap(); + } + + // IDLE + shared.num_idle += 1; + + while !shared.shutdown { + let lock_result = self.condvar.wait_timeout(shared, KEEP_ALIVE).unwrap(); + + shared = lock_result.0; + let timeout_result = lock_result.1; + + if shared.num_notify != 0 { + // We have received a legitimate wakeup, + // acknowledge it by decrementing the counter + // and transition to the BUSY state. + shared.num_notify -= 1; + break; + } + + // Even if the condvar "timed out", if the pool is entering the + // shutdown phase, we want to perform the cleanup logic. + if !shared.shutdown && timeout_result.timed_out() { + break 'main; + } + + // Spurious wakeup detected, go back to sleep. + } + + if shared.shutdown { + // Drain the queue + while let Some(task) = shared.queue.pop_front() { + drop(shared); + task.shutdown(); + + shared = self.shared.lock().unwrap(); + } + + // Work was produced, and we "took" it (by decrementing num_notify). + // This means that num_idle was decremented once for our wakeup. + // But, since we are exiting, we need to "undo" that, as we'll stay idle. + shared.num_idle += 1; + // NOTE: Technically we should also do num_notify++ and notify again, + // but since we're shutting down anyway, that won't be necessary. + break; + } + } + + // Thread exit + shared.num_th -= 1; + + // num_idle should now be tracked exactly, panic + // with a descriptive message if it is not the + // case. + shared.num_idle = shared + .num_idle + .checked_sub(1) + .expect("num_idle underflowed on thread exit"); + + if shared.shutdown && shared.num_th == 0 { + self.condvar.notify_one(); + } + + drop(shared); + + if let Some(f) = &self.before_stop { + f() + } + } +} + +impl fmt::Debug for Spawner { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("blocking::Spawner").finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/blocking/schedule.rs b/third_party/rust/tokio/src/runtime/blocking/schedule.rs new file mode 100644 index 0000000000..e10778d530 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/blocking/schedule.rs @@ -0,0 +1,24 @@ +use crate::runtime::task::{self, Task}; + +/// `task::Schedule` implementation that does nothing. This is unique to the +/// blocking scheduler as tasks scheduled are not really futures but blocking +/// operations. +/// +/// We avoid storing the task by forgetting it in `bind` and re-materializing it +/// in `release. +pub(super) struct NoopSchedule; + +impl task::Schedule for NoopSchedule { + fn bind(_task: Task<Self>) -> NoopSchedule { + // Do nothing w/ the task + NoopSchedule + } + + fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> { + None + } + + fn schedule(&self, _task: task::Notified<Self>) { + unreachable!(); + } +} diff --git a/third_party/rust/tokio/src/runtime/blocking/shutdown.rs b/third_party/rust/tokio/src/runtime/blocking/shutdown.rs new file mode 100644 index 0000000000..5ee8af0fbc --- /dev/null +++ b/third_party/rust/tokio/src/runtime/blocking/shutdown.rs @@ -0,0 +1,58 @@ +//! A shutdown channel. +//! +//! Each worker holds the `Sender` half. When all the `Sender` halves are +//! dropped, the `Receiver` receives a notification. + +use crate::loom::sync::Arc; +use crate::sync::oneshot; + +use std::time::Duration; + +#[derive(Debug, Clone)] +pub(super) struct Sender { + tx: Arc<oneshot::Sender<()>>, +} + +#[derive(Debug)] +pub(super) struct Receiver { + rx: oneshot::Receiver<()>, +} + +pub(super) fn channel() -> (Sender, Receiver) { + let (tx, rx) = oneshot::channel(); + let tx = Sender { tx: Arc::new(tx) }; + let rx = Receiver { rx }; + + (tx, rx) +} + +impl Receiver { + /// Blocks the current thread until all `Sender` handles drop. + /// + /// If `timeout` is `Some`, the thread is blocked for **at most** `timeout` + /// duration. If `timeout` is `None`, then the thread is blocked until the + /// shutdown signal is received. + pub(crate) fn wait(&mut self, timeout: Option<Duration>) { + use crate::runtime::enter::{enter, try_enter}; + + let mut e = if std::thread::panicking() { + match try_enter() { + Some(enter) => enter, + _ => return, + } + } else { + enter() + }; + + // The oneshot completes with an Err + // + // If blocking fails to wait, this indicates a problem parking the + // current thread (usually, shutting down a runtime stored in a + // thread-local). + if let Some(timeout) = timeout { + let _ = e.block_on_timeout(&mut self.rx, timeout); + } else { + let _ = e.block_on(&mut self.rx); + } + } +} diff --git a/third_party/rust/tokio/src/runtime/blocking/task.rs b/third_party/rust/tokio/src/runtime/blocking/task.rs new file mode 100644 index 0000000000..f98b85494c --- /dev/null +++ b/third_party/rust/tokio/src/runtime/blocking/task.rs @@ -0,0 +1,40 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Converts a function to a future that completes on poll +pub(super) struct BlockingTask<T> { + func: Option<T>, +} + +impl<T> BlockingTask<T> { + /// Initializes a new blocking task from the given function + pub(super) fn new(func: T) -> BlockingTask<T> { + BlockingTask { func: Some(func) } + } +} + +impl<T, R> Future for BlockingTask<T> +where + T: FnOnce() -> R, +{ + type Output = R; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<R> { + let me = unsafe { self.get_unchecked_mut() }; + let func = me + .func + .take() + .expect("[internal exception] blocking task ran twice."); + + // This is a little subtle: + // For convenience, we'd like _every_ call tokio ever makes to Task::poll() to be budgeted + // using coop. However, the way things are currently modeled, even running a blocking task + // currently goes through Task::poll(), and so is subject to budgeting. That isn't really + // what we want; a blocking task may itself want to run tasks (it might be a Worker!), so + // we want it to start without any budgeting. + crate::coop::stop(); + + Poll::Ready(func()) + } +} diff --git a/third_party/rust/tokio/src/runtime/builder.rs b/third_party/rust/tokio/src/runtime/builder.rs new file mode 100644 index 0000000000..cfde998251 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/builder.rs @@ -0,0 +1,519 @@ +use crate::runtime::handle::Handle; +use crate::runtime::shell::Shell; +use crate::runtime::{blocking, io, time, Callback, Runtime, Spawner}; + +use std::fmt; +#[cfg(not(loom))] +use std::sync::Arc; + +/// Builds Tokio Runtime with custom configuration values. +/// +/// Methods can be chained in order to set the configuration values. The +/// Runtime is constructed by calling [`build`]. +/// +/// New instances of `Builder` are obtained via [`Builder::new`]. +/// +/// See function level documentation for details on the various configuration +/// settings. +/// +/// [`build`]: #method.build +/// [`Builder::new`]: #method.new +/// +/// # Examples +/// +/// ``` +/// use tokio::runtime::Builder; +/// +/// fn main() { +/// // build runtime +/// let runtime = Builder::new() +/// .threaded_scheduler() +/// .core_threads(4) +/// .thread_name("my-custom-name") +/// .thread_stack_size(3 * 1024 * 1024) +/// .build() +/// .unwrap(); +/// +/// // use runtime ... +/// } +/// ``` +pub struct Builder { + /// The task execution model to use. + kind: Kind, + + /// Whether or not to enable the I/O driver + enable_io: bool, + + /// Whether or not to enable the time driver + enable_time: bool, + + /// The number of worker threads, used by Runtime. + /// + /// Only used when not using the current-thread executor. + core_threads: Option<usize>, + + /// Cap on thread usage. + max_threads: usize, + + /// Name used for threads spawned by the runtime. + pub(super) thread_name: String, + + /// Stack size used for threads spawned by the runtime. + pub(super) thread_stack_size: Option<usize>, + + /// Callback to run after each thread starts. + pub(super) after_start: Option<Callback>, + + /// To run before each worker thread stops + pub(super) before_stop: Option<Callback>, +} + +#[derive(Debug, Clone, Copy)] +enum Kind { + Shell, + #[cfg(feature = "rt-core")] + Basic, + #[cfg(feature = "rt-threaded")] + ThreadPool, +} + +impl Builder { + /// Returns a new runtime builder initialized with default configuration + /// values. + /// + /// Configuration methods can be chained on the return value. + pub fn new() -> Builder { + Builder { + // No task execution by default + kind: Kind::Shell, + + // I/O defaults to "off" + enable_io: false, + + // Time defaults to "off" + enable_time: false, + + // Default to lazy auto-detection (one thread per CPU core) + core_threads: None, + + max_threads: 512, + + // Default thread name + thread_name: "tokio-runtime-worker".into(), + + // Do not set a stack size by default + thread_stack_size: None, + + // No worker thread callbacks + after_start: None, + before_stop: None, + } + } + + /// Enables both I/O and time drivers. + /// + /// Doing this is a shorthand for calling `enable_io` and `enable_time` + /// individually. If additional components are added to Tokio in the future, + /// `enable_all` will include these future components. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime; + /// + /// let rt = runtime::Builder::new() + /// .threaded_scheduler() + /// .enable_all() + /// .build() + /// .unwrap(); + /// ``` + pub fn enable_all(&mut self) -> &mut Self { + #[cfg(feature = "io-driver")] + self.enable_io(); + #[cfg(feature = "time")] + self.enable_time(); + + self + } + + #[deprecated(note = "In future will be replaced by core_threads method")] + /// Sets the maximum number of worker threads for the `Runtime`'s thread pool. + /// + /// This must be a number between 1 and 32,768 though it is advised to keep + /// this value on the smaller side. + /// + /// The default value is the number of cores available to the system. + pub fn num_threads(&mut self, val: usize) -> &mut Self { + self.core_threads = Some(val); + self + } + + /// Sets the core number of worker threads for the `Runtime`'s thread pool. + /// + /// This should be a number between 1 and 32,768 though it is advised to keep + /// this value on the smaller side. + /// + /// The default value is the number of cores available to the system. + /// + /// These threads will be always active and running. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime; + /// + /// let rt = runtime::Builder::new() + /// .threaded_scheduler() + /// .core_threads(4) + /// .build() + /// .unwrap(); + /// ``` + pub fn core_threads(&mut self, val: usize) -> &mut Self { + assert_ne!(val, 0, "Core threads cannot be zero"); + self.core_threads = Some(val); + self + } + + /// Specifies limit for threads, spawned by the Runtime. + /// + /// This is number of threads to be used by Runtime, including `core_threads` + /// Having `max_threads` less than `core_threads` results in invalid configuration + /// when building multi-threaded `Runtime`, which would cause a panic. + /// + /// Similarly to the `core_threads`, this number should be between 1 and 32,768. + /// + /// The default value is 512. + /// + /// When multi-threaded runtime is not used, will act as limit on additional threads. + /// + /// Otherwise as `core_threads` are always active, it limits additional threads (e.g. for + /// blocking annotations) as `max_threads - core_threads`. + pub fn max_threads(&mut self, val: usize) -> &mut Self { + assert_ne!(val, 0, "Thread limit cannot be zero"); + self.max_threads = val; + self + } + + /// Sets name of threads spawned by the `Runtime`'s thread pool. + /// + /// The default name is "tokio-runtime-worker". + /// + /// # Examples + /// + /// ``` + /// # use tokio::runtime; + /// + /// # pub fn main() { + /// let rt = runtime::Builder::new() + /// .thread_name("my-pool") + /// .build(); + /// # } + /// ``` + pub fn thread_name(&mut self, val: impl Into<String>) -> &mut Self { + self.thread_name = val.into(); + self + } + + /// Sets the stack size (in bytes) for worker threads. + /// + /// The actual stack size may be greater than this value if the platform + /// specifies minimal stack size. + /// + /// The default stack size for spawned threads is 2 MiB, though this + /// particular stack size is subject to change in the future. + /// + /// # Examples + /// + /// ``` + /// # use tokio::runtime; + /// + /// # pub fn main() { + /// let rt = runtime::Builder::new() + /// .threaded_scheduler() + /// .thread_stack_size(32 * 1024) + /// .build(); + /// # } + /// ``` + pub fn thread_stack_size(&mut self, val: usize) -> &mut Self { + self.thread_stack_size = Some(val); + self + } + + /// Executes function `f` after each thread is started but before it starts + /// doing work. + /// + /// This is intended for bookkeeping and monitoring use cases. + /// + /// # Examples + /// + /// ``` + /// # use tokio::runtime; + /// + /// # pub fn main() { + /// let runtime = runtime::Builder::new() + /// .threaded_scheduler() + /// .on_thread_start(|| { + /// println!("thread started"); + /// }) + /// .build(); + /// # } + /// ``` + #[cfg(not(loom))] + pub fn on_thread_start<F>(&mut self, f: F) -> &mut Self + where + F: Fn() + Send + Sync + 'static, + { + self.after_start = Some(Arc::new(f)); + self + } + + /// Executes function `f` before each thread stops. + /// + /// This is intended for bookkeeping and monitoring use cases. + /// + /// # Examples + /// + /// ``` + /// # use tokio::runtime; + /// + /// # pub fn main() { + /// let runtime = runtime::Builder::new() + /// .threaded_scheduler() + /// .on_thread_stop(|| { + /// println!("thread stopping"); + /// }) + /// .build(); + /// # } + /// ``` + #[cfg(not(loom))] + pub fn on_thread_stop<F>(&mut self, f: F) -> &mut Self + where + F: Fn() + Send + Sync + 'static, + { + self.before_stop = Some(Arc::new(f)); + self + } + + /// Creates the configured `Runtime`. + /// + /// The returned `ThreadPool` instance is ready to spawn tasks. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Builder; + /// + /// let mut rt = Builder::new().build().unwrap(); + /// + /// rt.block_on(async { + /// println!("Hello from the Tokio runtime"); + /// }); + /// ``` + pub fn build(&mut self) -> io::Result<Runtime> { + match self.kind { + Kind::Shell => self.build_shell_runtime(), + #[cfg(feature = "rt-core")] + Kind::Basic => self.build_basic_runtime(), + #[cfg(feature = "rt-threaded")] + Kind::ThreadPool => self.build_threaded_runtime(), + } + } + + fn build_shell_runtime(&mut self) -> io::Result<Runtime> { + use crate::runtime::Kind; + + let clock = time::create_clock(); + + // Create I/O driver + let (io_driver, io_handle) = io::create_driver(self.enable_io)?; + let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); + + let spawner = Spawner::Shell; + + let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); + let blocking_spawner = blocking_pool.spawner().clone(); + + Ok(Runtime { + kind: Kind::Shell(Shell::new(driver)), + handle: Handle { + spawner, + io_handle, + time_handle, + clock, + blocking_spawner, + }, + blocking_pool, + }) + } +} + +cfg_io_driver! { + impl Builder { + /// Enables the I/O driver. + /// + /// Doing this enables using net, process, signal, and some I/O types on + /// the runtime. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime; + /// + /// let rt = runtime::Builder::new() + /// .enable_io() + /// .build() + /// .unwrap(); + /// ``` + pub fn enable_io(&mut self) -> &mut Self { + self.enable_io = true; + self + } + } +} + +cfg_time! { + impl Builder { + /// Enables the time driver. + /// + /// Doing this enables using `tokio::time` on the runtime. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime; + /// + /// let rt = runtime::Builder::new() + /// .enable_time() + /// .build() + /// .unwrap(); + /// ``` + pub fn enable_time(&mut self) -> &mut Self { + self.enable_time = true; + self + } + } +} + +cfg_rt_core! { + impl Builder { + /// Sets runtime to use a simpler scheduler that runs all tasks on the current-thread. + /// + /// The executor and all necessary drivers will all be run on the current + /// thread during `block_on` calls. + /// + /// See also [the module level documentation][1], which has a section on scheduler + /// types. + /// + /// [1]: index.html#runtime-configurations + pub fn basic_scheduler(&mut self) -> &mut Self { + self.kind = Kind::Basic; + self + } + + fn build_basic_runtime(&mut self) -> io::Result<Runtime> { + use crate::runtime::{BasicScheduler, Kind}; + + let clock = time::create_clock(); + + // Create I/O driver + let (io_driver, io_handle) = io::create_driver(self.enable_io)?; + + let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); + + // And now put a single-threaded scheduler on top of the timer. When + // there are no futures ready to do something, it'll let the timer or + // the reactor to generate some new stimuli for the futures to continue + // in their life. + let scheduler = BasicScheduler::new(driver); + let spawner = Spawner::Basic(scheduler.spawner().clone()); + + // Blocking pool + let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); + let blocking_spawner = blocking_pool.spawner().clone(); + + Ok(Runtime { + kind: Kind::Basic(scheduler), + handle: Handle { + spawner, + io_handle, + time_handle, + clock, + blocking_spawner, + }, + blocking_pool, + }) + } + } +} + +cfg_rt_threaded! { + impl Builder { + /// Sets runtime to use a multi-threaded scheduler for executing tasks. + /// + /// See also [the module level documentation][1], which has a section on scheduler + /// types. + /// + /// [1]: index.html#runtime-configurations + pub fn threaded_scheduler(&mut self) -> &mut Self { + self.kind = Kind::ThreadPool; + self + } + + fn build_threaded_runtime(&mut self) -> io::Result<Runtime> { + use crate::runtime::{Kind, ThreadPool}; + use crate::runtime::park::Parker; + + let core_threads = self.core_threads.unwrap_or_else(crate::loom::sys::num_cpus); + assert!(core_threads <= self.max_threads, "Core threads number cannot be above max limit"); + + let clock = time::create_clock(); + + let (io_driver, io_handle) = io::create_driver(self.enable_io)?; + let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone()); + let (scheduler, launch) = ThreadPool::new(core_threads, Parker::new(driver)); + let spawner = Spawner::ThreadPool(scheduler.spawner().clone()); + + // Create the blocking pool + let blocking_pool = blocking::create_blocking_pool(self, self.max_threads); + let blocking_spawner = blocking_pool.spawner().clone(); + + // Create the runtime handle + let handle = Handle { + spawner, + io_handle, + time_handle, + clock, + blocking_spawner, + }; + + // Spawn the thread pool workers + handle.enter(|| launch.launch()); + + Ok(Runtime { + kind: Kind::ThreadPool(scheduler), + handle, + blocking_pool, + }) + } + } +} + +impl Default for Builder { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Builder") + .field("kind", &self.kind) + .field("core_threads", &self.core_threads) + .field("max_threads", &self.max_threads) + .field("thread_name", &self.thread_name) + .field("thread_stack_size", &self.thread_stack_size) + .field("after_start", &self.after_start.as_ref().map(|_| "...")) + .field("before_stop", &self.after_start.as_ref().map(|_| "...")) + .finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/context.rs b/third_party/rust/tokio/src/runtime/context.rs new file mode 100644 index 0000000000..4af2df23eb --- /dev/null +++ b/third_party/rust/tokio/src/runtime/context.rs @@ -0,0 +1,73 @@ +//! Thread local runtime context +use crate::runtime::Handle; + +use std::cell::RefCell; + +thread_local! { + static CONTEXT: RefCell<Option<Handle>> = RefCell::new(None) +} + +pub(crate) fn current() -> Option<Handle> { + CONTEXT.with(|ctx| ctx.borrow().clone()) +} + +cfg_io_driver! { + pub(crate) fn io_handle() -> crate::runtime::io::Handle { + CONTEXT.with(|ctx| match *ctx.borrow() { + Some(ref ctx) => ctx.io_handle.clone(), + None => Default::default(), + }) + } +} + +cfg_time! { + pub(crate) fn time_handle() -> crate::runtime::time::Handle { + CONTEXT.with(|ctx| match *ctx.borrow() { + Some(ref ctx) => ctx.time_handle.clone(), + None => Default::default(), + }) + } + + cfg_test_util! { + pub(crate) fn clock() -> Option<crate::runtime::time::Clock> { + CONTEXT.with(|ctx| match *ctx.borrow() { + Some(ref ctx) => Some(ctx.clock.clone()), + None => None, + }) + } + } +} + +cfg_rt_core! { + pub(crate) fn spawn_handle() -> Option<crate::runtime::Spawner> { + CONTEXT.with(|ctx| match *ctx.borrow() { + Some(ref ctx) => Some(ctx.spawner.clone()), + None => None, + }) + } +} + +/// Set this [`ThreadContext`] as the current active [`ThreadContext`]. +/// +/// [`ThreadContext`]: struct@ThreadContext +pub(crate) fn enter<F, R>(new: Handle, f: F) -> R +where + F: FnOnce() -> R, +{ + struct DropGuard(Option<Handle>); + + impl Drop for DropGuard { + fn drop(&mut self) { + CONTEXT.with(|ctx| { + *ctx.borrow_mut() = self.0.take(); + }); + } + } + + let _guard = CONTEXT.with(|ctx| { + let old = ctx.borrow_mut().replace(new); + DropGuard(old) + }); + + f() +} diff --git a/third_party/rust/tokio/src/runtime/enter.rs b/third_party/rust/tokio/src/runtime/enter.rs new file mode 100644 index 0000000000..afdb67a3b7 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/enter.rs @@ -0,0 +1,162 @@ +use std::cell::{Cell, RefCell}; +use std::fmt; +use std::marker::PhantomData; + +thread_local!(static ENTERED: Cell<bool> = Cell::new(false)); + +/// Represents an executor context. +pub(crate) struct Enter { + _p: PhantomData<RefCell<()>>, +} + +/// Marks the current thread as being within the dynamic extent of an +/// executor. +pub(crate) fn enter() -> Enter { + if let Some(enter) = try_enter() { + return enter; + } + + panic!( + "Cannot start a runtime from within a runtime. This happens \ + because a function (like `block_on`) attempted to block the \ + current thread while the thread is being used to drive \ + asynchronous tasks." + ); +} + +/// Tries to enter a runtime context, returns `None` if already in a runtime +/// context. +pub(crate) fn try_enter() -> Option<Enter> { + ENTERED.with(|c| { + if c.get() { + None + } else { + c.set(true); + Some(Enter { _p: PhantomData }) + } + }) +} + +// Forces the current "entered" state to be cleared while the closure +// is executed. +// +// # Warning +// +// This is hidden for a reason. Do not use without fully understanding +// executors. Misuing can easily cause your program to deadlock. +#[cfg(all(feature = "rt-threaded", feature = "blocking"))] +pub(crate) fn exit<F: FnOnce() -> R, R>(f: F) -> R { + // Reset in case the closure panics + struct Reset; + impl Drop for Reset { + fn drop(&mut self) { + ENTERED.with(|c| { + c.set(true); + }); + } + } + + ENTERED.with(|c| { + debug_assert!(c.get()); + c.set(false); + }); + + let reset = Reset; + let ret = f(); + std::mem::forget(reset); + + ENTERED.with(|c| { + assert!(!c.get(), "closure claimed permanent executor"); + c.set(true); + }); + + ret +} + +cfg_blocking_impl! { + use crate::park::ParkError; + use std::time::Duration; + + impl Enter { + /// Blocks the thread on the specified future, returning the value with + /// which that future completes. + pub(crate) fn block_on<F>(&mut self, mut f: F) -> Result<F::Output, ParkError> + where + F: std::future::Future, + { + use crate::park::{CachedParkThread, Park}; + use std::pin::Pin; + use std::task::Context; + use std::task::Poll::Ready; + + let mut park = CachedParkThread::new(); + let waker = park.get_unpark()?.into_waker(); + let mut cx = Context::from_waker(&waker); + + // `block_on` takes ownership of `f`. Once it is pinned here, the original `f` binding can + // no longer be accessed, making the pinning safe. + let mut f = unsafe { Pin::new_unchecked(&mut f) }; + + loop { + if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { + return Ok(v); + } + + park.park()?; + } + } + + /// Blocks the thread on the specified future for **at most** `timeout` + /// + /// If the future completes before `timeout`, the result is returned. If + /// `timeout` elapses, then `Err` is returned. + pub(crate) fn block_on_timeout<F>(&mut self, mut f: F, timeout: Duration) -> Result<F::Output, ParkError> + where + F: std::future::Future, + { + use crate::park::{CachedParkThread, Park}; + use std::pin::Pin; + use std::task::Context; + use std::task::Poll::Ready; + use std::time::Instant; + + let mut park = CachedParkThread::new(); + let waker = park.get_unpark()?.into_waker(); + let mut cx = Context::from_waker(&waker); + + // `block_on` takes ownership of `f`. Once it is pinned here, the original `f` binding can + // no longer be accessed, making the pinning safe. + let mut f = unsafe { Pin::new_unchecked(&mut f) }; + let when = Instant::now() + timeout; + + loop { + if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { + return Ok(v); + } + + let now = Instant::now(); + + if now >= when { + return Err(()); + } + + park.park_timeout(when - now)?; + } + } + } +} + +impl fmt::Debug for Enter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Enter").finish() + } +} + +impl Drop for Enter { + fn drop(&mut self) { + ENTERED.with(|c| { + assert!(c.get()); + c.set(false); + }); + } +} diff --git a/third_party/rust/tokio/src/runtime/handle.rs b/third_party/rust/tokio/src/runtime/handle.rs new file mode 100644 index 0000000000..db53543e85 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/handle.rs @@ -0,0 +1,140 @@ +use crate::runtime::{blocking, context, io, time, Spawner}; +use std::{error, fmt}; + +cfg_rt_core! { + use crate::task::JoinHandle; + + use std::future::Future; +} + +/// Handle to the runtime. +/// +/// The handle is internally reference-counted and can be freely cloned. A handle can be +/// obtained using the [`Runtime::handle`] method. +/// +/// [`Runtime::handle`]: crate::runtime::Runtime::handle() +#[derive(Debug, Clone)] +pub struct Handle { + pub(super) spawner: Spawner, + + /// Handles to the I/O drivers + pub(super) io_handle: io::Handle, + + /// Handles to the time drivers + pub(super) time_handle: time::Handle, + + /// Source of `Instant::now()` + pub(super) clock: time::Clock, + + /// Blocking pool spawner + pub(super) blocking_spawner: blocking::Spawner, +} + +impl Handle { + /// Enter the runtime context. + pub fn enter<F, R>(&self, f: F) -> R + where + F: FnOnce() -> R, + { + context::enter(self.clone(), f) + } + + /// Returns a Handle view over the currently running Runtime + /// + /// # Panic + /// + /// This will panic if called outside the context of a Tokio runtime. + /// + /// # Examples + /// + /// This can be used to obtain the handle of the surrounding runtime from an async + /// block or function running on that runtime. + /// + /// ``` + /// # use tokio::runtime::Runtime; + /// # fn dox() { + /// # let rt = Runtime::new().unwrap(); + /// # rt.spawn(async { + /// use tokio::runtime::Handle; + /// + /// // Inside an async block or function. + /// let handle = Handle::current(); + /// handle.spawn(async { + /// println!("now running in the existing Runtime"); + /// }) + /// # }); + /// # } + /// ``` + pub fn current() -> Self { + context::current().expect("not currently running on the Tokio runtime.") + } + + /// Returns a Handle view over the currently running Runtime + /// + /// Returns an error if no Runtime has been started + /// + /// Contrary to `current`, this never panics + pub fn try_current() -> Result<Self, TryCurrentError> { + context::current().ok_or(TryCurrentError(())) + } +} + +cfg_rt_core! { + impl Handle { + /// Spawns a future onto the Tokio runtime. + /// + /// This spawns the given future onto the runtime's executor, usually a + /// thread pool. The thread pool is then responsible for polling the future + /// until it completes. + /// + /// See [module level][mod] documentation for more details. + /// + /// [mod]: index.html + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// # fn dox() { + /// // Create the runtime + /// let rt = Runtime::new().unwrap(); + /// let handle = rt.handle(); + /// + /// // Spawn a future onto the runtime + /// handle.spawn(async { + /// println!("now running on a worker thread"); + /// }); + /// # } + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + self.spawner.spawn(future) + } + } +} + +/// Error returned by `try_current` when no Runtime has been started +pub struct TryCurrentError(()); + +impl fmt::Debug for TryCurrentError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TryCurrentError").finish() + } +} + +impl fmt::Display for TryCurrentError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("no tokio Runtime has been initialized") + } +} + +impl error::Error for TryCurrentError {} diff --git a/third_party/rust/tokio/src/runtime/io.rs b/third_party/rust/tokio/src/runtime/io.rs new file mode 100644 index 0000000000..6a0953af85 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/io.rs @@ -0,0 +1,63 @@ +//! Abstracts out the APIs necessary to `Runtime` for integrating the I/O +//! driver. When the `time` feature flag is **not** enabled. These APIs are +//! shells. This isolates the complexity of dealing with conditional +//! compilation. + +/// Re-exported for convenience. +pub(crate) use std::io::Result; + +pub(crate) use variant::*; + +#[cfg(feature = "io-driver")] +mod variant { + use crate::io::driver; + use crate::park::{Either, ParkThread}; + + use std::io; + + /// The driver value the runtime passes to the `timer` layer. + /// + /// When the `io-driver` feature is enabled, this is the "real" I/O driver + /// backed by Mio. Without the `io-driver` feature, this is a thread parker + /// backed by a condition variable. + pub(crate) type Driver = Either<driver::Driver, ParkThread>; + + /// The handle the runtime stores for future use. + /// + /// When the `io-driver` feature is **not** enabled, this is `()`. + pub(crate) type Handle = Option<driver::Handle>; + + pub(crate) fn create_driver(enable: bool) -> io::Result<(Driver, Handle)> { + #[cfg(loom)] + assert!(!enable); + + if enable { + let driver = driver::Driver::new()?; + let handle = driver.handle(); + + Ok((Either::A(driver), Some(handle))) + } else { + let driver = ParkThread::new(); + Ok((Either::B(driver), None)) + } + } +} + +#[cfg(not(feature = "io-driver"))] +mod variant { + use crate::park::ParkThread; + + use std::io; + + /// I/O is not enabled, use a condition variable based parker + pub(crate) type Driver = ParkThread; + + /// There is no handle + pub(crate) type Handle = (); + + pub(crate) fn create_driver(_enable: bool) -> io::Result<(Driver, Handle)> { + let driver = ParkThread::new(); + + Ok((driver, ())) + } +} diff --git a/third_party/rust/tokio/src/runtime/mod.rs b/third_party/rust/tokio/src/runtime/mod.rs new file mode 100644 index 0000000000..36b2b442ee --- /dev/null +++ b/third_party/rust/tokio/src/runtime/mod.rs @@ -0,0 +1,494 @@ +//! The Tokio runtime. +//! +//! Unlike other Rust programs, asynchronous applications require +//! runtime support. In particular, the following runtime services are +//! necessary: +//! +//! * An **I/O event loop**, called the driver, which drives I/O resources and +//! dispatches I/O events to tasks that depend on them. +//! * A **scheduler** to execute [tasks] that use these I/O resources. +//! * A **timer** for scheduling work to run after a set period of time. +//! +//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing +//! them to be started, shut down, and configured together. However, most +//! applications won't need to use [`Runtime`] directly. Instead, they can +//! use the [`tokio::main`] attribute macro, which creates a [`Runtime`] under +//! the hood. +//! +//! # Usage +//! +//! Most applications will use the [`tokio::main`] attribute macro. +//! +//! ```no_run +//! use tokio::net::TcpListener; +//! use tokio::prelude::*; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; +//! +//! loop { +//! let (mut socket, _) = listener.accept().await?; +//! +//! tokio::spawn(async move { +//! let mut buf = [0; 1024]; +//! +//! // In a loop, read data from the socket and write the data back. +//! loop { +//! let n = match socket.read(&mut buf).await { +//! // socket closed +//! Ok(n) if n == 0 => return, +//! Ok(n) => n, +//! Err(e) => { +//! println!("failed to read from socket; err = {:?}", e); +//! return; +//! } +//! }; +//! +//! // Write the data back +//! if let Err(e) = socket.write_all(&buf[0..n]).await { +//! println!("failed to write to socket; err = {:?}", e); +//! return; +//! } +//! } +//! }); +//! } +//! } +//! ``` +//! +//! From within the context of the runtime, additional tasks are spawned using +//! the [`tokio::spawn`] function. Futures spawned using this function will be +//! executed on the same thread pool used by the [`Runtime`]. +//! +//! A [`Runtime`] instance can also be used directly. +//! +//! ```no_run +//! use tokio::net::TcpListener; +//! use tokio::prelude::*; +//! use tokio::runtime::Runtime; +//! +//! fn main() -> Result<(), Box<dyn std::error::Error>> { +//! // Create the runtime +//! let mut rt = Runtime::new()?; +//! +//! // Spawn the root task +//! rt.block_on(async { +//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?; +//! +//! loop { +//! let (mut socket, _) = listener.accept().await?; +//! +//! tokio::spawn(async move { +//! let mut buf = [0; 1024]; +//! +//! // In a loop, read data from the socket and write the data back. +//! loop { +//! let n = match socket.read(&mut buf).await { +//! // socket closed +//! Ok(n) if n == 0 => return, +//! Ok(n) => n, +//! Err(e) => { +//! println!("failed to read from socket; err = {:?}", e); +//! return; +//! } +//! }; +//! +//! // Write the data back +//! if let Err(e) = socket.write_all(&buf[0..n]).await { +//! println!("failed to write to socket; err = {:?}", e); +//! return; +//! } +//! } +//! }); +//! } +//! }) +//! } +//! ``` +//! +//! ## Runtime Configurations +//! +//! Tokio provides multiple task scheduling strategies, suitable for different +//! applications. The [runtime builder] or `#[tokio::main]` attribute may be +//! used to select which scheduler to use. +//! +//! #### Basic Scheduler +//! +//! The basic scheduler provides a _single-threaded_ future executor. All tasks +//! will be created and executed on the current thread. The basic scheduler +//! requires the `rt-core` feature flag, and can be selected using the +//! [`Builder::basic_scheduler`] method: +//! ``` +//! use tokio::runtime; +//! +//! # fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let basic_rt = runtime::Builder::new() +//! .basic_scheduler() +//! .build()?; +//! # Ok(()) } +//! ``` +//! +//! If the `rt-core` feature is enabled and `rt-threaded` is not, +//! [`Runtime::new`] will return a basic scheduler runtime by default. +//! +//! #### Threaded Scheduler +//! +//! The threaded scheduler executes futures on a _thread pool_, using a +//! work-stealing strategy. By default, it will start a worker thread for each +//! CPU core available on the system. This tends to be the ideal configurations +//! for most applications. The threaded scheduler requires the `rt-threaded` feature +//! flag, and can be selected using the [`Builder::threaded_scheduler`] method: +//! ``` +//! use tokio::runtime; +//! +//! # fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let threaded_rt = runtime::Builder::new() +//! .threaded_scheduler() +//! .build()?; +//! # Ok(()) } +//! ``` +//! +//! If the `rt-threaded` feature flag is enabled, [`Runtime::new`] will return a +//! threaded scheduler runtime by default. +//! +//! Most applications should use the threaded scheduler, except in some niche +//! use-cases, such as when running only a single thread is required. +//! +//! #### Resource drivers +//! +//! When configuring a runtime by hand, no resource drivers are enabled by +//! default. In this case, attempting to use networking types or time types will +//! fail. In order to enable these types, the resource drivers must be enabled. +//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a +//! shorthand, [`Builder::enable_all`] enables both resource drivers. +//! +//! ## Lifetime of spawned threads +//! +//! The runtime may spawn threads depending on its configuration and usage. The +//! threaded scheduler spawns threads to schedule tasks and calls to +//! `spawn_blocking` spawn threads to run blocking operations. +//! +//! While the `Runtime` is active, threads may shutdown after periods of being +//! idle. Once `Runtime` is dropped, all runtime threads are forcibly shutdown. +//! Any tasks that have not yet completed will be dropped. +//! +//! [tasks]: crate::task +//! [`Runtime`]: Runtime +//! [`tokio::spawn`]: crate::spawn +//! [`tokio::main`]: ../attr.main.html +//! [runtime builder]: crate::runtime::Builder +//! [`Runtime::new`]: crate::runtime::Runtime::new +//! [`Builder::basic_scheduler`]: crate::runtime::Builder::basic_scheduler +//! [`Builder::threaded_scheduler`]: crate::runtime::Builder::threaded_scheduler +//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io +//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time +//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all + +// At the top due to macros +#[cfg(test)] +#[macro_use] +mod tests; + +pub(crate) mod context; + +cfg_rt_core! { + mod basic_scheduler; + use basic_scheduler::BasicScheduler; + + pub(crate) mod task; +} + +mod blocking; +use blocking::BlockingPool; + +cfg_blocking_impl! { + #[allow(unused_imports)] + pub(crate) use blocking::{spawn_blocking, try_spawn_blocking}; +} + +mod builder; +pub use self::builder::Builder; + +pub(crate) mod enter; +use self::enter::enter; + +mod handle; +pub use self::handle::{Handle, TryCurrentError}; + +mod io; + +cfg_rt_threaded! { + mod park; + use park::Parker; +} + +mod shell; +use self::shell::Shell; + +mod spawner; +use self::spawner::Spawner; + +mod time; + +cfg_rt_threaded! { + mod queue; + + pub(crate) mod thread_pool; + use self::thread_pool::ThreadPool; +} + +cfg_rt_core! { + use crate::task::JoinHandle; +} + +use std::future::Future; +use std::time::Duration; + +/// The Tokio runtime. +/// +/// The runtime provides an I/O [driver], task scheduler, [timer], and blocking +/// pool, necessary for running asynchronous tasks. +/// +/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However, +/// most users will use the `#[tokio::main]` annotation on their entry point instead. +/// +/// See [module level][mod] documentation for more details. +/// +/// # Shutdown +/// +/// Shutting down the runtime is done by dropping the value. The current thread +/// will block until the shut down operation has completed. +/// +/// * Drain any scheduled work queues. +/// * Drop any futures that have not yet completed. +/// * Drop the reactor. +/// +/// Once the reactor has dropped, any outstanding I/O resources bound to +/// that reactor will no longer function. Calling any method on them will +/// result in an error. +/// +/// [driver]: crate::io::driver +/// [timer]: crate::time +/// [mod]: index.html +/// [`new`]: #method.new +/// [`Builder`]: struct@Builder +/// [`tokio::run`]: fn@run +#[derive(Debug)] +pub struct Runtime { + /// Task executor + kind: Kind, + + /// Handle to runtime, also contains driver handles + handle: Handle, + + /// Blocking pool handle, used to signal shutdown + blocking_pool: BlockingPool, +} + +/// The runtime executor is either a thread-pool or a current-thread executor. +#[derive(Debug)] +enum Kind { + /// Not able to execute concurrent tasks. This variant is mostly used to get + /// access to the driver handles. + Shell(Shell), + + /// Execute all tasks on the current-thread. + #[cfg(feature = "rt-core")] + Basic(BasicScheduler<time::Driver>), + + /// Execute tasks across multiple threads. + #[cfg(feature = "rt-threaded")] + ThreadPool(ThreadPool), +} + +/// After thread starts / before thread stops +type Callback = std::sync::Arc<dyn Fn() + Send + Sync>; + +impl Runtime { + /// Create a new runtime instance with default configuration values. + /// + /// This results in a scheduler, I/O driver, and time driver being + /// initialized. The type of scheduler used depends on what feature flags + /// are enabled: if the `rt-threaded` feature is enabled, the [threaded + /// scheduler] is used, while if only the `rt-core` feature is enabled, the + /// [basic scheduler] is used instead. + /// + /// If the threaded scheduler is selected, it will not spawn + /// any worker threads until it needs to, i.e. tasks are scheduled to run. + /// + /// Most applications will not need to call this function directly. Instead, + /// they will use the [`#[tokio::main]` attribute][main]. When more complex + /// configuration is necessary, the [runtime builder] may be used. + /// + /// See [module level][mod] documentation for more details. + /// + /// # Examples + /// + /// Creating a new `Runtime` with default configuration values. + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// // Use the runtime... + /// ``` + /// + /// [mod]: index.html + /// [main]: ../../tokio_macros/attr.main.html + /// [threaded scheduler]: index.html#threaded-scheduler + /// [basic scheduler]: index.html#basic-scheduler + /// [runtime builder]: crate::runtime::Builder + pub fn new() -> io::Result<Runtime> { + #[cfg(feature = "rt-threaded")] + let ret = Builder::new().threaded_scheduler().enable_all().build(); + + #[cfg(all(not(feature = "rt-threaded"), feature = "rt-core"))] + let ret = Builder::new().basic_scheduler().enable_all().build(); + + #[cfg(not(feature = "rt-core"))] + let ret = Builder::new().enable_all().build(); + + ret + } + + /// Spawn a future onto the Tokio runtime. + /// + /// This spawns the given future onto the runtime's executor, usually a + /// thread pool. The thread pool is then responsible for polling the future + /// until it completes. + /// + /// See [module level][mod] documentation for more details. + /// + /// [mod]: index.html + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// # fn dox() { + /// // Create the runtime + /// let rt = Runtime::new().unwrap(); + /// + /// // Spawn a future onto the runtime + /// rt.spawn(async { + /// println!("now running on a worker thread"); + /// }); + /// # } + /// ``` + /// + /// # Panics + /// + /// This function panics if the spawn fails. Failure occurs if the executor + /// is currently at capacity and is unable to spawn a new future. + #[cfg(feature = "rt-core")] + pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + match &self.kind { + Kind::Shell(_) => panic!("task execution disabled"), + #[cfg(feature = "rt-threaded")] + Kind::ThreadPool(exec) => exec.spawn(future), + Kind::Basic(exec) => exec.spawn(future), + } + } + + /// Run a future to completion on the Tokio runtime. This is the runtime's + /// entry point. + /// + /// This runs the given future on the runtime, blocking until it is + /// complete, and yielding its resolved result. Any tasks or timers which + /// the future spawns internally will be executed on the runtime. + /// + /// This method should not be called from an asynchronous context. + /// + /// # Panics + /// + /// This function panics if the executor is at capacity, if the provided + /// future panics, or if called within an asynchronous execution context. + pub fn block_on<F: Future>(&mut self, future: F) -> F::Output { + let kind = &mut self.kind; + + self.handle.enter(|| match kind { + Kind::Shell(exec) => exec.block_on(future), + #[cfg(feature = "rt-core")] + Kind::Basic(exec) => exec.block_on(future), + #[cfg(feature = "rt-threaded")] + Kind::ThreadPool(exec) => exec.block_on(future), + }) + } + + /// Enter the runtime context. + pub fn enter<F, R>(&self, f: F) -> R + where + F: FnOnce() -> R, + { + self.handle.enter(f) + } + + /// Return a handle to the runtime's spawner. + /// + /// The returned handle can be used to spawn tasks that run on this runtime, and can + /// be cloned to allow moving the `Handle` to other threads. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// + /// let rt = Runtime::new() + /// .unwrap(); + /// + /// let handle = rt.handle(); + /// + /// handle.spawn(async { println!("hello"); }); + /// ``` + pub fn handle(&self) -> &Handle { + &self.handle + } + + /// Shutdown the runtime, waiting for at most `duration` for all spawned + /// task to shutdown. + /// + /// Usually, dropping a `Runtime` handle is sufficient as tasks are able to + /// shutdown in a timely fashion. However, dropping a `Runtime` will wait + /// indefinitely for all tasks to terminate, and there are cases where a long + /// blocking task has been spawned which can block dropping `Runtime`. + /// + /// In this case, calling `shutdown_timeout` with an explicit wait timeout + /// can work. The `shutdown_timeout` will signal all tasks to shutdown and + /// will wait for at most `duration` for all spawned tasks to terminate. If + /// `timeout` elapses before all tasks are dropped, the function returns and + /// outstanding tasks are potentially leaked. + /// + /// # Examples + /// + /// ``` + /// use tokio::runtime::Runtime; + /// use tokio::task; + /// + /// use std::thread; + /// use std::time::Duration; + /// + /// fn main() { + /// let mut runtime = Runtime::new().unwrap(); + /// + /// runtime.block_on(async move { + /// task::spawn_blocking(move || { + /// thread::sleep(Duration::from_secs(10_000)); + /// }); + /// }); + /// + /// runtime.shutdown_timeout(Duration::from_millis(100)); + /// } + /// ``` + pub fn shutdown_timeout(self, duration: Duration) { + let Runtime { + mut blocking_pool, .. + } = self; + blocking_pool.shutdown(Some(duration)); + } +} diff --git a/third_party/rust/tokio/src/runtime/park.rs b/third_party/rust/tokio/src/runtime/park.rs new file mode 100644 index 0000000000..ee437d1d94 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/park.rs @@ -0,0 +1,245 @@ +//! Parks the runtime. +//! +//! A combination of the various resource driver park handles. + +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::{Arc, Condvar, Mutex}; +use crate::loom::thread; +use crate::park::{Park, Unpark}; +use crate::runtime::time; +use crate::util::TryLock; + +use std::sync::atomic::Ordering::SeqCst; +use std::time::Duration; + +pub(crate) struct Parker { + inner: Arc<Inner>, +} + +pub(crate) struct Unparker { + inner: Arc<Inner>, +} + +struct Inner { + /// Avoids entering the park if possible + state: AtomicUsize, + + /// Used to coordinate access to the driver / condvar + mutex: Mutex<()>, + + /// Condvar to block on if the driver is unavailable. + condvar: Condvar, + + /// Resource (I/O, time, ...) driver + shared: Arc<Shared>, +} + +const EMPTY: usize = 0; +const PARKED_CONDVAR: usize = 1; +const PARKED_DRIVER: usize = 2; +const NOTIFIED: usize = 3; + +/// Shared across multiple Parker handles +struct Shared { + /// Shared driver. Only one thread at a time can use this + driver: TryLock<time::Driver>, + + /// Unpark handle + handle: <time::Driver as Park>::Unpark, +} + +impl Parker { + pub(crate) fn new(driver: time::Driver) -> Parker { + let handle = driver.unpark(); + + Parker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + mutex: Mutex::new(()), + condvar: Condvar::new(), + shared: Arc::new(Shared { + driver: TryLock::new(driver), + handle, + }), + }), + } + } +} + +impl Clone for Parker { + fn clone(&self) -> Parker { + Parker { + inner: Arc::new(Inner { + state: AtomicUsize::new(EMPTY), + mutex: Mutex::new(()), + condvar: Condvar::new(), + shared: self.inner.shared.clone(), + }), + } + } +} + +impl Park for Parker { + type Unpark = Unparker; + type Error = (); + + fn unpark(&self) -> Unparker { + Unparker { + inner: self.inner.clone(), + } + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.inner.park(); + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + // Only parking with zero is supported... + assert_eq!(duration, Duration::from_millis(0)); + + if let Some(mut driver) = self.inner.shared.driver.try_lock() { + driver.park_timeout(duration).map_err(|_| ()) + } else { + Ok(()) + } + } +} + +impl Unpark for Unparker { + fn unpark(&self) { + self.inner.unpark(); + } +} + +impl Inner { + /// Parks the current thread for at most `dur`. + fn park(&self) { + for _ in 0..3 { + // If we were previously notified then we consume this notification and + // return quickly. + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + return; + } + + thread::yield_now(); + } + + if let Some(mut driver) = self.shared.driver.try_lock() { + self.park_driver(&mut driver); + } else { + self.park_condvar(); + } + } + + fn park_condvar(&self) { + // Otherwise we need to coordinate going to sleep + let mut m = self.mutex.lock().unwrap(); + + match self + .state + .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) + { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + + return; + } + Err(actual) => panic!("inconsistent park state; actual = {}", actual), + } + + loop { + m = self.condvar.wait(m).unwrap(); + + if self + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) + .is_ok() + { + // got a notification + return; + } + + // spurious wakeup, go back to sleep + } + } + + fn park_driver(&self, driver: &mut time::Driver) { + match self + .state + .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) + { + Ok(_) => {} + Err(NOTIFIED) => { + // We must read here, even though we know it will be `NOTIFIED`. + // This is because `unpark` may have been called again since we read + // `NOTIFIED` in the `compare_exchange` above. We must perform an + // acquire operation that synchronizes with that `unpark` to observe + // any writes it made before the call to unpark. To do that we must + // read from the write it made to `state`. + let old = self.state.swap(EMPTY, SeqCst); + debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); + + return; + } + Err(actual) => panic!("inconsistent park state; actual = {}", actual), + } + + // TODO: don't unwrap + driver.park().unwrap(); + + match self.state.swap(EMPTY, SeqCst) { + NOTIFIED => {} // got a notification, hurray! + PARKED_DRIVER => {} // no notification, alas + n => panic!("inconsistent park_timeout state: {}", n), + } + } + + fn unpark(&self) { + // To ensure the unparked thread will observe any writes we made before + // this call, we must perform a release operation that `park` can + // synchronize with. To do that we must write `NOTIFIED` even if `state` + // is already `NOTIFIED`. That is why this must be a swap rather than a + // compare-and-swap that returns if it reads `NOTIFIED` on failure. + match self.state.swap(NOTIFIED, SeqCst) { + EMPTY => {} // no one was waiting + NOTIFIED => {} // already unparked + PARKED_CONDVAR => self.unpark_condvar(), + PARKED_DRIVER => self.unpark_driver(), + actual => panic!("inconsistent state in unpark; actual = {}", actual), + } + } + + fn unpark_condvar(&self) { + // There is a period between when the parked thread sets `state` to + // `PARKED` (or last checked `state` in the case of a spurious wake + // up) and when it actually waits on `cvar`. If we were to notify + // during this period it would be ignored and then when the parked + // thread went to sleep it would never wake up. Fortunately, it has + // `lock` locked at this stage so we can acquire `lock` to wait until + // it is ready to receive the notification. + // + // Releasing `lock` before the call to `notify_one` means that when the + // parked thread wakes it doesn't get woken only to have to wait for us + // to release `lock`. + drop(self.mutex.lock().unwrap()); + + self.condvar.notify_one() + } + + fn unpark_driver(&self) { + self.shared.handle.unpark(); + } +} diff --git a/third_party/rust/tokio/src/runtime/queue.rs b/third_party/rust/tokio/src/runtime/queue.rs new file mode 100644 index 0000000000..c654514bbc --- /dev/null +++ b/third_party/rust/tokio/src/runtime/queue.rs @@ -0,0 +1,630 @@ +//! Run-queue structures to support a work-stealing scheduler + +use crate::loom::cell::UnsafeCell; +use crate::loom::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize}; +use crate::loom::sync::{Arc, Mutex}; +use crate::runtime::task; + +use std::marker::PhantomData; +use std::mem::MaybeUninit; +use std::ptr::{self, NonNull}; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; + +/// Producer handle. May only be used from a single thread. +pub(super) struct Local<T: 'static> { + inner: Arc<Inner<T>>, +} + +/// Consumer handle. May be used from many threads. +pub(super) struct Steal<T: 'static>(Arc<Inner<T>>); + +/// Growable, MPMC queue used to inject new tasks into the scheduler and as an +/// overflow queue when the local, fixed-size, array queue overflows. +pub(super) struct Inject<T: 'static> { + /// Pointers to the head and tail of the queue + pointers: Mutex<Pointers>, + + /// Number of pending tasks in the queue. This helps prevent unnecessary + /// locking in the hot path. + len: AtomicUsize, + + _p: PhantomData<T>, +} + +pub(super) struct Inner<T: 'static> { + /// Concurrently updated by many threads. + /// + /// Contains two `u16` values. The LSB byte is the "real" head of the queue. + /// The `u16` in the MSB is set by a stealer in process of stealing values. + /// It represents the first value being stolen in the batch. `u16` is used + /// in order to distinguish between `head == tail` and `head == tail - + /// capacity`. + /// + /// When both `u16` values are the same, there is no active stealer. + /// + /// Tracking an in-progress stealer prevents a wrapping scenario. + head: AtomicU32, + + /// Only updated by producer thread but read by many threads. + tail: AtomicU16, + + /// Elements + buffer: Box<[UnsafeCell<MaybeUninit<task::Notified<T>>>]>, +} + +struct Pointers { + /// True if the queue is closed + is_closed: bool, + + /// Linked-list head + head: Option<NonNull<task::Header>>, + + /// Linked-list tail + tail: Option<NonNull<task::Header>>, +} + +unsafe impl<T> Send for Inner<T> {} +unsafe impl<T> Sync for Inner<T> {} +unsafe impl<T> Send for Inject<T> {} +unsafe impl<T> Sync for Inject<T> {} + +#[cfg(not(loom))] +const LOCAL_QUEUE_CAPACITY: usize = 256; + +// Shrink the size of the local queue when using loom. This shouldn't impact +// logic, but allows loom to test more edge cases in a reasonable a mount of +// time. +#[cfg(loom)] +const LOCAL_QUEUE_CAPACITY: usize = 4; + +const MASK: usize = LOCAL_QUEUE_CAPACITY - 1; + +/// Create a new local run-queue +pub(super) fn local<T: 'static>() -> (Steal<T>, Local<T>) { + let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY); + + for _ in 0..LOCAL_QUEUE_CAPACITY { + buffer.push(UnsafeCell::new(MaybeUninit::uninit())); + } + + let inner = Arc::new(Inner { + head: AtomicU32::new(0), + tail: AtomicU16::new(0), + buffer: buffer.into(), + }); + + let local = Local { + inner: inner.clone(), + }; + + let remote = Steal(inner); + + (remote, local) +} + +impl<T> Local<T> { + /// Returns true if the queue has entries that can be stealed. + pub(super) fn is_stealable(&self) -> bool { + !self.inner.is_empty() + } + + /// Pushes a task to the back of the local queue, skipping the LIFO slot. + pub(super) fn push_back(&mut self, mut task: task::Notified<T>, inject: &Inject<T>) { + let tail = loop { + let head = self.inner.head.load(Acquire); + let (steal, real) = unpack(head); + + // safety: this is the **only** thread that updates this cell. + let tail = unsafe { self.inner.tail.unsync_load() }; + + if tail.wrapping_sub(steal) < LOCAL_QUEUE_CAPACITY as u16 { + // There is capacity for the task + break tail; + } else if steal != real { + // Concurrently stealing, this will free up capacity, so + // only push the new task onto the inject queue + inject.push(task); + return; + } else { + // Push the current task and half of the queue into the + // inject queue. + match self.push_overflow(task, real, tail, inject) { + Ok(_) => return, + // Lost the race, try again + Err(v) => { + task = v; + } + } + } + }; + + // Map the position to a slot index. + let idx = tail as usize & MASK; + + self.inner.buffer[idx].with_mut(|ptr| { + // Write the task to the slot + // + // Safety: There is only one producer and the above `if` + // condition ensures we don't touch a cell if there is a + // value, thus no consumer. + unsafe { + ptr::write((*ptr).as_mut_ptr(), task); + } + }); + + // Make the task available. Synchronizes with a load in + // `steal_into2`. + self.inner.tail.store(tail.wrapping_add(1), Release); + } + + /// Moves a batch of tasks into the inject queue. + /// + /// This will temporarily make some of the tasks unavailable to stealers. + /// Once `push_overflow` is done, a notification is sent out, so if other + /// workers "missed" some of the tasks during a steal, they will get + /// another opportunity. + #[inline(never)] + fn push_overflow( + &mut self, + task: task::Notified<T>, + head: u16, + tail: u16, + inject: &Inject<T>, + ) -> Result<(), task::Notified<T>> { + const BATCH_LEN: usize = LOCAL_QUEUE_CAPACITY / 2 + 1; + + let n = (LOCAL_QUEUE_CAPACITY / 2) as u16; + assert_eq!( + tail.wrapping_sub(head) as usize, + LOCAL_QUEUE_CAPACITY, + "queue is not full; tail = {}; head = {}", + tail, + head + ); + + let prev = pack(head, head); + + // Claim a bunch of tasks + // + // We are claiming the tasks **before** reading them out of the buffer. + // This is safe because only the **current** thread is able to push new + // tasks. + // + // There isn't really any need for memory ordering... Relaxed would + // work. This is because all tasks are pushed into the queue from the + // current thread (or memory has been acquired if the local queue handle + // moved). + let actual = self.inner.head.compare_and_swap( + prev, + pack(head.wrapping_add(n), head.wrapping_add(n)), + Release, + ); + + if actual != prev { + // We failed to claim the tasks, losing the race. Return out of + // this function and try the full `push` routine again. The queue + // may not be full anymore. + return Err(task); + } + + // link the tasks + for i in 0..n { + let j = i + 1; + + let i_idx = i.wrapping_add(head) as usize & MASK; + let j_idx = j.wrapping_add(head) as usize & MASK; + + // Get the next pointer + let next = if j == n { + // The last task in the local queue being moved + task.header().into() + } else { + // safety: The above CAS prevents a stealer from accessing these + // tasks and we are the only producer. + self.inner.buffer[j_idx].with(|ptr| unsafe { + let value = (*ptr).as_ptr(); + (*value).header().into() + }) + }; + + // safety: the above CAS prevents a stealer from accessing these + // tasks and we are the only producer. + self.inner.buffer[i_idx].with_mut(|ptr| unsafe { + let ptr = (*ptr).as_ptr(); + (*ptr).header().queue_next.with_mut(|ptr| *ptr = Some(next)); + }); + } + + // safety: the above CAS prevents a stealer from accessing these tasks + // and we are the only producer. + let head = self.inner.buffer[head as usize & MASK] + .with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); + + // Push the tasks onto the inject queue + inject.push_batch(head, task, BATCH_LEN); + + Ok(()) + } + + /// Pops a task from the local queue. + pub(super) fn pop(&mut self) -> Option<task::Notified<T>> { + let mut head = self.inner.head.load(Acquire); + + let idx = loop { + let (steal, real) = unpack(head); + + // safety: this is the **only** thread that updates this cell. + let tail = unsafe { self.inner.tail.unsync_load() }; + + if real == tail { + // queue is empty + return None; + } + + let next_real = real.wrapping_add(1); + + // If `steal == real` there are no concurrent stealers. Both `steal` + // and `real` are updated. + let next = if steal == real { + pack(next_real, next_real) + } else { + assert_ne!(steal, next_real); + pack(steal, next_real) + }; + + // Attempt to claim a task. + let res = self + .inner + .head + .compare_exchange(head, next, AcqRel, Acquire); + + match res { + Ok(_) => break real as usize & MASK, + Err(actual) => head = actual, + } + }; + + Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() })) + } +} + +impl<T> Steal<T> { + pub(super) fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Steals half the tasks from self and place them into `dst`. + pub(super) fn steal_into(&self, dst: &mut Local<T>) -> Option<task::Notified<T>> { + // Safety: the caller is the only thread that mutates `dst.tail` and + // holds a mutable reference. + let dst_tail = unsafe { dst.inner.tail.unsync_load() }; + + // To the caller, `dst` may **look** empty but still have values + // contained in the buffer. If another thread is concurrently stealing + // from `dst` there may not be enough capacity to steal. + let (steal, _) = unpack(dst.inner.head.load(Acquire)); + + if dst_tail.wrapping_sub(steal) > LOCAL_QUEUE_CAPACITY as u16 / 2 { + // we *could* try to steal less here, but for simplicity, we're just + // going to abort. + return None; + } + + // Steal the tasks into `dst`'s buffer. This does not yet expose the + // tasks in `dst`. + let mut n = self.steal_into2(dst, dst_tail); + + if n == 0 { + // No tasks were stolen + return None; + } + + // We are returning a task here + n -= 1; + + let ret_pos = dst_tail.wrapping_add(n); + let ret_idx = ret_pos as usize & MASK; + + // safety: the value was written as part of `steal_into2` and not + // exposed to stealers, so no other thread can access it. + let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); + + if n == 0 { + // The `dst` queue is empty, but a single task was stolen + return Some(ret); + } + + // Make the stolen items available to consumers + dst.inner.tail.store(dst_tail.wrapping_add(n), Release); + + Some(ret) + } + + // Steal tasks from `self`, placing them into `dst`. Returns the number of + // tasks that were stolen. + fn steal_into2(&self, dst: &mut Local<T>, dst_tail: u16) -> u16 { + let mut prev_packed = self.0.head.load(Acquire); + let mut next_packed; + + let n = loop { + let (src_head_steal, src_head_real) = unpack(prev_packed); + let src_tail = self.0.tail.load(Acquire); + + // If these two do not match, another thread is concurrently + // stealing from the queue. + if src_head_steal != src_head_real { + return 0; + } + + // Number of available tasks to steal + let n = src_tail.wrapping_sub(src_head_real); + let n = n - n / 2; + + if n == 0 { + // No tasks available to steal + return 0; + } + + // Update the real head index to acquire the tasks. + let steal_to = src_head_real.wrapping_add(n); + assert_ne!(src_head_steal, steal_to); + next_packed = pack(src_head_steal, steal_to); + + // Claim all those tasks. This is done by incrementing the "real" + // head but not the steal. By doing this, no other thread is able to + // steal from this queue until the current thread completes. + let res = self + .0 + .head + .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); + + match res { + Ok(_) => break n, + Err(actual) => prev_packed = actual, + } + }; + + assert!(n <= LOCAL_QUEUE_CAPACITY as u16 / 2, "actual = {}", n); + + let (first, _) = unpack(next_packed); + + // Take all the tasks + for i in 0..n { + // Compute the positions + let src_pos = first.wrapping_add(i); + let dst_pos = dst_tail.wrapping_add(i); + + // Map to slots + let src_idx = src_pos as usize & MASK; + let dst_idx = dst_pos as usize & MASK; + + // Read the task + // + // safety: We acquired the task with the atomic exchange above. + let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) }); + + // Write the task to the new slot + // + // safety: `dst` queue is empty and we are the only producer to + // this queue. + dst.inner.buffer[dst_idx] + .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) }); + } + + let mut prev_packed = next_packed; + + // Update `src_head_steal` to match `src_head_real` signalling that the + // stealing routine is complete. + loop { + let head = unpack(prev_packed).1; + next_packed = pack(head, head); + + let res = self + .0 + .head + .compare_exchange(prev_packed, next_packed, AcqRel, Acquire); + + match res { + Ok(_) => return n, + Err(actual) => { + let (actual_steal, actual_real) = unpack(actual); + + assert_ne!(actual_steal, actual_real); + + prev_packed = actual; + } + } + } + } +} + +impl<T> Clone for Steal<T> { + fn clone(&self) -> Steal<T> { + Steal(self.0.clone()) + } +} + +impl<T> Drop for Local<T> { + fn drop(&mut self) { + if !std::thread::panicking() { + assert!(self.pop().is_none(), "queue not empty"); + } + } +} + +impl<T> Inner<T> { + fn is_empty(&self) -> bool { + let (_, head) = unpack(self.head.load(Acquire)); + let tail = self.tail.load(Acquire); + + head == tail + } +} + +impl<T: 'static> Inject<T> { + pub(super) fn new() -> Inject<T> { + Inject { + pointers: Mutex::new(Pointers { + is_closed: false, + head: None, + tail: None, + }), + len: AtomicUsize::new(0), + _p: PhantomData, + } + } + + pub(super) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Close the injection queue, returns `true` if the queue is open when the + /// transition is made. + pub(super) fn close(&self) -> bool { + let mut p = self.pointers.lock().unwrap(); + + if p.is_closed { + return false; + } + + p.is_closed = true; + true + } + + pub(super) fn is_closed(&self) -> bool { + self.pointers.lock().unwrap().is_closed + } + + pub(super) fn len(&self) -> usize { + self.len.load(Acquire) + } + + /// Pushes a value into the queue. + pub(super) fn push(&self, task: task::Notified<T>) { + // Acquire queue lock + let mut p = self.pointers.lock().unwrap(); + + if p.is_closed { + // Drop the mutex to avoid a potential deadlock when + // re-entering. + drop(p); + drop(task); + return; + } + + // safety: only mutated with the lock held + let len = unsafe { self.len.unsync_load() }; + let task = task.into_raw(); + + // The next pointer should already be null + debug_assert!(get_next(task).is_none()); + + if let Some(tail) = p.tail { + set_next(tail, Some(task)); + } else { + p.head = Some(task); + } + + p.tail = Some(task); + + self.len.store(len + 1, Release); + } + + pub(super) fn push_batch( + &self, + batch_head: task::Notified<T>, + batch_tail: task::Notified<T>, + num: usize, + ) { + let batch_head = batch_head.into_raw(); + let batch_tail = batch_tail.into_raw(); + + debug_assert!(get_next(batch_tail).is_none()); + + let mut p = self.pointers.lock().unwrap(); + + if let Some(tail) = p.tail { + set_next(tail, Some(batch_head)); + } else { + p.head = Some(batch_head); + } + + p.tail = Some(batch_tail); + + // Increment the count. + // + // safety: All updates to the len atomic are guarded by the mutex. As + // such, a non-atomic load followed by a store is safe. + let len = unsafe { self.len.unsync_load() }; + + self.len.store(len + num, Release); + } + + pub(super) fn pop(&self) -> Option<task::Notified<T>> { + // Fast path, if len == 0, then there are no values + if self.is_empty() { + return None; + } + + let mut p = self.pointers.lock().unwrap(); + + // It is possible to hit null here if another thread poped the last + // task between us checking `len` and acquiring the lock. + let task = p.head?; + + p.head = get_next(task); + + if p.head.is_none() { + p.tail = None; + } + + set_next(task, None); + + // Decrement the count. + // + // safety: All updates to the len atomic are guarded by the mutex. As + // such, a non-atomic load followed by a store is safe. + self.len + .store(unsafe { self.len.unsync_load() } - 1, Release); + + // safety: a `Notified` is pushed into the queue and now it is popped! + Some(unsafe { task::Notified::from_raw(task) }) + } +} + +impl<T: 'static> Drop for Inject<T> { + fn drop(&mut self) { + if !std::thread::panicking() { + assert!(self.pop().is_none(), "queue not empty"); + } + } +} + +fn get_next(header: NonNull<task::Header>) -> Option<NonNull<task::Header>> { + unsafe { header.as_ref().queue_next.with(|ptr| *ptr) } +} + +fn set_next(header: NonNull<task::Header>, val: Option<NonNull<task::Header>>) { + unsafe { + header.as_ref().queue_next.with_mut(|ptr| *ptr = val); + } +} + +/// Split the head value into the real head and the index a stealer is working +/// on. +fn unpack(n: u32) -> (u16, u16) { + let real = n & u16::max_value() as u32; + let steal = n >> 16; + + (steal as u16, real as u16) +} + +/// Join the two head values +fn pack(steal: u16, real: u16) -> u32 { + (real as u32) | ((steal as u32) << 16) +} + +#[test] +fn test_local_queue_capacity() { + assert!(LOCAL_QUEUE_CAPACITY - 1 <= u8::max_value() as usize); +} diff --git a/third_party/rust/tokio/src/runtime/shell.rs b/third_party/rust/tokio/src/runtime/shell.rs new file mode 100644 index 0000000000..294f2a16d8 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/shell.rs @@ -0,0 +1,62 @@ +#![allow(clippy::redundant_clone)] + +use crate::park::{Park, Unpark}; +use crate::runtime::enter; +use crate::runtime::time; +use crate::util::{waker_ref, Wake}; + +use std::future::Future; +use std::sync::Arc; +use std::task::Context; +use std::task::Poll::Ready; + +#[derive(Debug)] +pub(super) struct Shell { + driver: time::Driver, + + /// TODO: don't store this + unpark: Arc<Handle>, +} + +#[derive(Debug)] +struct Handle(<time::Driver as Park>::Unpark); + +impl Shell { + pub(super) fn new(driver: time::Driver) -> Shell { + let unpark = Arc::new(Handle(driver.unpark())); + + Shell { driver, unpark } + } + + pub(super) fn block_on<F>(&mut self, f: F) -> F::Output + where + F: Future, + { + let _e = enter(); + + pin!(f); + + let waker = waker_ref(&self.unpark); + let mut cx = Context::from_waker(&waker); + + loop { + if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) { + return v; + } + + self.driver.park().unwrap(); + } + } +} + +impl Wake for Handle { + /// Wake by value + fn wake(self: Arc<Self>) { + Wake::wake_by_ref(&self); + } + + /// Wake by reference + fn wake_by_ref(arc_self: &Arc<Self>) { + arc_self.0.unpark(); + } +} diff --git a/third_party/rust/tokio/src/runtime/spawner.rs b/third_party/rust/tokio/src/runtime/spawner.rs new file mode 100644 index 0000000000..d136945cdc --- /dev/null +++ b/third_party/rust/tokio/src/runtime/spawner.rs @@ -0,0 +1,37 @@ +cfg_rt_core! { + use crate::runtime::basic_scheduler; + use crate::task::JoinHandle; + + use std::future::Future; +} + +cfg_rt_threaded! { + use crate::runtime::thread_pool; +} + +#[derive(Debug, Clone)] +pub(crate) enum Spawner { + Shell, + #[cfg(feature = "rt-core")] + Basic(basic_scheduler::Spawner), + #[cfg(feature = "rt-threaded")] + ThreadPool(thread_pool::Spawner), +} + +cfg_rt_core! { + impl Spawner { + pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + match self { + Spawner::Shell => panic!("spawning not enabled for runtime"), + #[cfg(feature = "rt-core")] + Spawner::Basic(spawner) => spawner.spawn(future), + #[cfg(feature = "rt-threaded")] + Spawner::ThreadPool(spawner) => spawner.spawn(future), + } + } + } +} diff --git a/third_party/rust/tokio/src/runtime/task/core.rs b/third_party/rust/tokio/src/runtime/task/core.rs new file mode 100644 index 0000000000..573b9f3c9c --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/core.rs @@ -0,0 +1,279 @@ +use crate::loom::cell::UnsafeCell; +use crate::runtime::task::raw::{self, Vtable}; +use crate::runtime::task::state::State; +use crate::runtime::task::waker::waker_ref; +use crate::runtime::task::{Notified, Schedule, Task}; +use crate::util::linked_list; + +use std::future::Future; +use std::pin::Pin; +use std::ptr::NonNull; +use std::task::{Context, Poll, Waker}; + +/// The task cell. Contains the components of the task. +/// +/// It is critical for `Header` to be the first field as the task structure will +/// be referenced by both *mut Cell and *mut Header. +#[repr(C)] +pub(super) struct Cell<T: Future, S> { + /// Hot task state data + pub(super) header: Header, + + /// Either the future or output, depending on the execution stage. + pub(super) core: Core<T, S>, + + /// Cold data + pub(super) trailer: Trailer, +} + +/// The core of the task. +/// +/// Holds the future or output, depending on the stage of execution. +pub(super) struct Core<T: Future, S> { + /// Scheduler used to drive this future + pub(super) scheduler: UnsafeCell<Option<S>>, + + /// Either the future or the output + pub(super) stage: UnsafeCell<Stage<T>>, +} + +/// Crate public as this is also needed by the pool. +#[repr(C)] +pub(crate) struct Header { + /// Task state + pub(super) state: State, + + pub(crate) owned: UnsafeCell<linked_list::Pointers<Header>>, + + /// Pointer to next task, used with the injection queue + pub(crate) queue_next: UnsafeCell<Option<NonNull<Header>>>, + + /// Pointer to the next task in the transfer stack + pub(super) stack_next: UnsafeCell<Option<NonNull<Header>>>, + + /// Table of function pointers for executing actions on the task. + pub(super) vtable: &'static Vtable, +} + +unsafe impl Send for Header {} +unsafe impl Sync for Header {} + +/// Cold data is stored after the future. +pub(super) struct Trailer { + /// Consumer task waiting on completion of this task. + pub(super) waker: UnsafeCell<Option<Waker>>, +} + +/// Either the future or the output. +pub(super) enum Stage<T: Future> { + Running(T), + Finished(super::Result<T::Output>), + Consumed, +} + +impl<T: Future, S: Schedule> Cell<T, S> { + /// Allocates a new task cell, containing the header, trailer, and core + /// structures. + pub(super) fn new(future: T, state: State) -> Box<Cell<T, S>> { + Box::new(Cell { + header: Header { + state, + owned: UnsafeCell::new(linked_list::Pointers::new()), + queue_next: UnsafeCell::new(None), + stack_next: UnsafeCell::new(None), + vtable: raw::vtable::<T, S>(), + }, + core: Core { + scheduler: UnsafeCell::new(None), + stage: UnsafeCell::new(Stage::Running(future)), + }, + trailer: Trailer { + waker: UnsafeCell::new(None), + }, + }) + } +} + +impl<T: Future, S: Schedule> Core<T, S> { + /// If needed, bind a scheduler to the task. + /// + /// This only happens on the first poll. + pub(super) fn bind_scheduler(&self, task: Task<S>) { + use std::mem::ManuallyDrop; + + // TODO: it would be nice to not have to wrap with a ManuallyDrop + let task = ManuallyDrop::new(task); + + // This function may be called concurrently, but the __first__ time it + // is called, the caller has unique access to this field. All subsequent + // concurrent calls will be via the `Waker`, which will "happens after" + // the first poll. + // + // In other words, it is always safe to read the field and it is safe to + // write to the field when it is `None`. + if self.is_bound() { + return; + } + + // Bind the task to the scheduler + let scheduler = S::bind(ManuallyDrop::into_inner(task)); + + // Safety: As `scheduler` is not set, this is the first poll + self.scheduler.with_mut(|ptr| unsafe { + *ptr = Some(scheduler); + }); + } + + /// Returns true if the task is bound to a scheduler. + pub(super) fn is_bound(&self) -> bool { + // Safety: never called concurrently w/ a mutation. + self.scheduler.with(|ptr| unsafe { (*ptr).is_some() }) + } + + /// Poll the future + /// + /// # Safety + /// + /// The caller must ensure it is safe to mutate the `state` field. This + /// requires ensuring mutal exclusion between any concurrent thread that + /// might modify the future or output field. + /// + /// The mutual exclusion is implemented by `Harness` and the `Lifecycle` + /// component of the task state. + /// + /// `self` must also be pinned. This is handled by storing the task on the + /// heap. + pub(super) fn poll(&self, header: &Header) -> Poll<T::Output> { + let res = { + self.stage.with_mut(|ptr| { + // Safety: The caller ensures mutual exclusion to the field. + let future = match unsafe { &mut *ptr } { + Stage::Running(future) => future, + _ => unreachable!("unexpected stage"), + }; + + // Safety: The caller ensures the future is pinned. + let future = unsafe { Pin::new_unchecked(future) }; + + // The waker passed into the `poll` function does not require a ref + // count increment. + let waker_ref = waker_ref::<T, S>(header); + let mut cx = Context::from_waker(&*waker_ref); + + future.poll(&mut cx) + }) + }; + + if res.is_ready() { + self.drop_future_or_output(); + } + + res + } + + /// Drop the future + /// + /// # Safety + /// + /// The caller must ensure it is safe to mutate the `stage` field. + pub(super) fn drop_future_or_output(&self) { + self.stage.with_mut(|ptr| { + // Safety: The caller ensures mutal exclusion to the field. + unsafe { *ptr = Stage::Consumed }; + }); + } + + /// Store the task output + /// + /// # Safety + /// + /// The caller must ensure it is safe to mutate the `stage` field. + pub(super) fn store_output(&self, output: super::Result<T::Output>) { + self.stage.with_mut(|ptr| { + // Safety: the caller ensures mutual exclusion to the field. + unsafe { *ptr = Stage::Finished(output) }; + }); + } + + /// Take the task output + /// + /// # Safety + /// + /// The caller must ensure it is safe to mutate the `stage` field. + pub(super) fn take_output(&self) -> super::Result<T::Output> { + use std::mem; + + self.stage.with_mut(|ptr| { + // Safety:: the caller ensures mutal exclusion to the field. + match mem::replace(unsafe { &mut *ptr }, Stage::Consumed) { + Stage::Finished(output) => output, + _ => panic!("unexpected task state"), + } + }) + } + + /// Schedule the future for execution + pub(super) fn schedule(&self, task: Notified<S>) { + self.scheduler.with(|ptr| { + // Safety: Can only be called after initial `poll`, which is the + // only time the field is mutated. + match unsafe { &*ptr } { + Some(scheduler) => scheduler.schedule(task), + None => panic!("no scheduler set"), + } + }); + } + + /// Schedule the future for execution in the near future, yielding the + /// thread to other tasks. + pub(super) fn yield_now(&self, task: Notified<S>) { + self.scheduler.with(|ptr| { + // Safety: Can only be called after initial `poll`, which is the + // only time the field is mutated. + match unsafe { &*ptr } { + Some(scheduler) => scheduler.yield_now(task), + None => panic!("no scheduler set"), + } + }); + } + + /// Release the task + /// + /// If the `Scheduler` implementation is able to, it returns the `Task` + /// handle immediately. The caller of this function will batch a ref-dec + /// with a state change. + pub(super) fn release(&self, task: Task<S>) -> Option<Task<S>> { + use std::mem::ManuallyDrop; + + let task = ManuallyDrop::new(task); + + self.scheduler.with(|ptr| { + // Safety: Can only be called after initial `poll`, which is the + // only time the field is mutated. + match unsafe { &*ptr } { + Some(scheduler) => scheduler.release(&*task), + // Task was never polled + None => None, + } + }) + } +} + +cfg_rt_threaded! { + impl Header { + pub(crate) fn shutdown(&self) { + use crate::runtime::task::RawTask; + + let task = unsafe { RawTask::from_raw(self.into()) }; + task.shutdown(); + } + } +} + +#[test] +#[cfg(not(loom))] +fn header_lte_cache_line() { + use std::mem::size_of; + + assert!(size_of::<Header>() <= 8 * size_of::<*const ()>()); +} diff --git a/third_party/rust/tokio/src/runtime/task/error.rs b/third_party/rust/tokio/src/runtime/task/error.rs new file mode 100644 index 0000000000..d5f65a4981 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/error.rs @@ -0,0 +1,163 @@ +use std::any::Any; +use std::fmt; +use std::io; +use std::sync::Mutex; + +doc_rt_core! { + /// Task failed to execute to completion. + pub struct JoinError { + repr: Repr, + } +} + +enum Repr { + Cancelled, + Panic(Mutex<Box<dyn Any + Send + 'static>>), +} + +impl JoinError { + #[doc(hidden)] + #[deprecated] + pub fn cancelled() -> JoinError { + Self::cancelled2() + } + + pub(crate) fn cancelled2() -> JoinError { + JoinError { + repr: Repr::Cancelled, + } + } + + #[doc(hidden)] + #[deprecated] + pub fn panic(err: Box<dyn Any + Send + 'static>) -> JoinError { + Self::panic2(err) + } + + pub(crate) fn panic2(err: Box<dyn Any + Send + 'static>) -> JoinError { + JoinError { + repr: Repr::Panic(Mutex::new(err)), + } + } + + /// Returns true if the error was caused by the task being cancelled + pub fn is_cancelled(&self) -> bool { + match &self.repr { + Repr::Cancelled => true, + _ => false, + } + } + + /// Returns true if the error was caused by the task panicking + /// + /// # Examples + /// + /// ``` + /// use std::panic; + /// + /// #[tokio::main] + /// async fn main() { + /// let err = tokio::spawn(async { + /// panic!("boom"); + /// }).await.unwrap_err(); + /// + /// assert!(err.is_panic()); + /// } + /// ``` + pub fn is_panic(&self) -> bool { + match &self.repr { + Repr::Panic(_) => true, + _ => false, + } + } + + /// Consumes the join error, returning the object with which the task panicked. + /// + /// # Panics + /// + /// `into_panic()` panics if the `Error` does not represent the underlying + /// task terminating with a panic. Use `is_panic` to check the error reason + /// or `try_into_panic` for a variant that does not panic. + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// #[tokio::main] + /// async fn main() { + /// let err = tokio::spawn(async { + /// panic!("boom"); + /// }).await.unwrap_err(); + /// + /// if err.is_panic() { + /// // Resume the panic on the main task + /// panic::resume_unwind(err.into_panic()); + /// } + /// } + /// ``` + pub fn into_panic(self) -> Box<dyn Any + Send + 'static> { + self.try_into_panic() + .expect("`JoinError` reason is not a panic.") + } + + /// Consumes the join error, returning the object with which the task + /// panicked if the task terminated due to a panic. Otherwise, `self` is + /// returned. + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// #[tokio::main] + /// async fn main() { + /// let err = tokio::spawn(async { + /// panic!("boom"); + /// }).await.unwrap_err(); + /// + /// if let Ok(reason) = err.try_into_panic() { + /// // Resume the panic on the main task + /// panic::resume_unwind(reason); + /// } + /// } + /// ``` + pub fn try_into_panic(self) -> Result<Box<dyn Any + Send + 'static>, JoinError> { + match self.repr { + Repr::Panic(p) => Ok(p.into_inner().expect("Extracting panic from mutex")), + _ => Err(self), + } + } +} + +impl fmt::Display for JoinError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.repr { + Repr::Cancelled => write!(fmt, "cancelled"), + Repr::Panic(_) => write!(fmt, "panic"), + } + } +} + +impl fmt::Debug for JoinError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.repr { + Repr::Cancelled => write!(fmt, "JoinError::Cancelled"), + Repr::Panic(_) => write!(fmt, "JoinError::Panic(...)"), + } + } +} + +impl std::error::Error for JoinError {} + +impl From<JoinError> for io::Error { + fn from(src: JoinError) -> io::Error { + io::Error::new( + io::ErrorKind::Other, + match src.repr { + Repr::Cancelled => "task was cancelled", + Repr::Panic(_) => "task panicked", + }, + ) + } +} diff --git a/third_party/rust/tokio/src/runtime/task/harness.rs b/third_party/rust/tokio/src/runtime/task/harness.rs new file mode 100644 index 0000000000..29b231ea88 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/harness.rs @@ -0,0 +1,372 @@ +use crate::runtime::task::core::{Cell, Core, Header, Trailer}; +use crate::runtime::task::state::Snapshot; +use crate::runtime::task::{JoinError, Notified, Schedule, Task}; + +use std::future::Future; +use std::mem; +use std::panic; +use std::ptr::NonNull; +use std::task::{Poll, Waker}; + +/// Typed raw task handle +pub(super) struct Harness<T: Future, S: 'static> { + cell: NonNull<Cell<T, S>>, +} + +impl<T, S> Harness<T, S> +where + T: Future, + S: 'static, +{ + pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> Harness<T, S> { + Harness { + cell: ptr.cast::<Cell<T, S>>(), + } + } + + fn header(&self) -> &Header { + unsafe { &self.cell.as_ref().header } + } + + fn trailer(&self) -> &Trailer { + unsafe { &self.cell.as_ref().trailer } + } + + fn core(&self) -> &Core<T, S> { + unsafe { &self.cell.as_ref().core } + } +} + +impl<T, S> Harness<T, S> +where + T: Future, + S: Schedule, +{ + /// Polls the inner future. + /// + /// All necessary state checks and transitions are performed. + /// + /// Panics raised while polling the future are handled. + pub(super) fn poll(self) { + // If this is the first time the task is polled, the task will be bound + // to the scheduler, in which case the task ref count must be + // incremented. + let ref_inc = !self.core().is_bound(); + + // Transition the task to the running state. + // + // A failure to transition here indicates the task has been cancelled + // while in the run queue pending execution. + let snapshot = match self.header().state.transition_to_running(ref_inc) { + Ok(snapshot) => snapshot, + Err(_) => { + // The task was shutdown while in the run queue. At this point, + // we just hold a ref counted reference. Drop it here. + self.drop_reference(); + return; + } + }; + + // Ensure the task is bound to a scheduler instance. If this is the + // first time polling the task, a scheduler instance is pulled from the + // local context and assigned to the task. + // + // The scheduler maintains ownership of the task and responds to `wake` + // calls. + // + // The task reference count has been incremented. + self.core().bind_scheduler(self.to_task()); + + // The transition to `Running` done above ensures that a lock on the + // future has been obtained. This also ensures the `*mut T` pointer + // contains the future (as opposed to the output) and is initialized. + + let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { + struct Guard<'a, T: Future, S: Schedule> { + core: &'a Core<T, S>, + polled: bool, + } + + impl<T: Future, S: Schedule> Drop for Guard<'_, T, S> { + fn drop(&mut self) { + if !self.polled { + self.core.drop_future_or_output(); + } + } + } + + let mut guard = Guard { + core: self.core(), + polled: false, + }; + + // If the task is cancelled, avoid polling it, instead signalling it + // is complete. + if snapshot.is_cancelled() { + Poll::Ready(Err(JoinError::cancelled2())) + } else { + let res = guard.core.poll(self.header()); + + // prevent the guard from dropping the future + guard.polled = true; + + res.map(Ok) + } + })); + + match res { + Ok(Poll::Ready(out)) => { + self.complete(out, snapshot.is_join_interested()); + } + Ok(Poll::Pending) => { + match self.header().state.transition_to_idle() { + Ok(snapshot) => { + if snapshot.is_notified() { + // Signal yield + self.core().yield_now(Notified(self.to_task())); + // The ref-count was incremented as part of + // `transition_to_idle`. + self.drop_reference(); + } + } + Err(_) => self.cancel_task(), + } + } + Err(err) => { + self.complete(Err(JoinError::panic2(err)), snapshot.is_join_interested()); + } + } + } + + pub(super) fn dealloc(self) { + // Release the join waker, if there is one. + self.trailer().waker.with_mut(|_| ()); + + // Check causality + self.core().stage.with_mut(|_| {}); + self.core().scheduler.with_mut(|_| {}); + + unsafe { + drop(Box::from_raw(self.cell.as_ptr())); + } + } + + // ===== join handle ===== + + /// Read the task output into `dst`. + pub(super) fn try_read_output(self, dst: &mut Poll<super::Result<T::Output>>, waker: &Waker) { + // Load a snapshot of the current task state + let snapshot = self.header().state.load(); + + debug_assert!(snapshot.is_join_interested()); + + if !snapshot.is_complete() { + // The waker must be stored in the task struct. + let res = if snapshot.has_join_waker() { + // There already is a waker stored in the struct. If it matches + // the provided waker, then there is no further work to do. + // Otherwise, the waker must be swapped. + let will_wake = unsafe { + // Safety: when `JOIN_INTEREST` is set, only `JOIN_HANDLE` + // may mutate the `waker` field. + self.trailer() + .waker + .with(|ptr| (*ptr).as_ref().unwrap().will_wake(waker)) + }; + + if will_wake { + // The task is not complete **and** the waker is up to date, + // there is nothing further that needs to be done. + return; + } + + // Unset the `JOIN_WAKER` to gain mutable access to the `waker` + // field then update the field with the new join worker. + // + // This requires two atomic operations, unsetting the bit and + // then resetting it. If the task transitions to complete + // concurrently to either one of those operations, then setting + // the join waker fails and we proceed to reading the task + // output. + self.header() + .state + .unset_waker() + .and_then(|snapshot| self.set_join_waker(waker.clone(), snapshot)) + } else { + self.set_join_waker(waker.clone(), snapshot) + }; + + match res { + Ok(_) => return, + Err(snapshot) => { + assert!(snapshot.is_complete()); + } + } + } + + *dst = Poll::Ready(self.core().take_output()); + } + + fn set_join_waker(&self, waker: Waker, snapshot: Snapshot) -> Result<Snapshot, Snapshot> { + assert!(snapshot.is_join_interested()); + assert!(!snapshot.has_join_waker()); + + // Safety: Only the `JoinHandle` may set the `waker` field. When + // `JOIN_INTEREST` is **not** set, nothing else will touch the field. + unsafe { + self.trailer().waker.with_mut(|ptr| { + *ptr = Some(waker); + }); + } + + // Update the `JoinWaker` state accordingly + let res = self.header().state.set_join_waker(); + + // If the state could not be updated, then clear the join waker + if res.is_err() { + unsafe { + self.trailer().waker.with_mut(|ptr| { + *ptr = None; + }); + } + } + + res + } + + pub(super) fn drop_join_handle_slow(self) { + // Try to unset `JOIN_INTEREST`. This must be done as a first step in + // case the task concurrently completed. + if self.header().state.unset_join_interested().is_err() { + // It is our responsibility to drop the output. This is critical as + // the task output may not be `Send` and as such must remain with + // the scheduler or `JoinHandle`. i.e. if the output remains in the + // task structure until the task is deallocated, it may be dropped + // by a Waker on any arbitrary thread. + self.core().drop_future_or_output(); + } + + // Drop the `JoinHandle` reference, possibly deallocating the task + self.drop_reference(); + } + + // ===== waker behavior ===== + + pub(super) fn wake_by_val(self) { + self.wake_by_ref(); + self.drop_reference(); + } + + pub(super) fn wake_by_ref(&self) { + if self.header().state.transition_to_notified() { + self.core().schedule(Notified(self.to_task())); + } + } + + pub(super) fn drop_reference(self) { + if self.header().state.ref_dec() { + self.dealloc(); + } + } + + /// Forcibly shutdown the task + /// + /// Attempt to transition to `Running` in order to forcibly shutdown the + /// task. If the task is currently running or in a state of completion, then + /// there is nothing further to do. When the task completes running, it will + /// notice the `CANCELLED` bit and finalize the task. + pub(super) fn shutdown(self) { + if !self.header().state.transition_to_shutdown() { + // The task is concurrently running. No further work needed. + return; + } + + // By transitioning the lifcycle to `Running`, we have permission to + // drop the future. + self.cancel_task(); + } + + // ====== internal ====== + + fn cancel_task(self) { + // Drop the future from a panic guard. + let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { + self.core().drop_future_or_output(); + })); + + if let Err(err) = res { + // Dropping the future panicked, complete the join + // handle with the panic to avoid dropping the panic + // on the ground. + self.complete(Err(JoinError::panic2(err)), true); + } else { + self.complete(Err(JoinError::cancelled2()), true); + } + } + + fn complete(mut self, output: super::Result<T::Output>, is_join_interested: bool) { + if is_join_interested { + // Store the output. The future has already been dropped + // + // Safety: Mutual exclusion is obtained by having transitioned the task + // state -> Running + self.core().store_output(output); + + // Transition to `Complete`, notifying the `JoinHandle` if necessary. + self.transition_to_complete(); + } + + // The task has completed execution and will no longer be scheduled. + // + // Attempts to batch a ref-dec with the state transition below. + let ref_dec = if self.core().is_bound() { + if let Some(task) = self.core().release(self.to_task()) { + mem::forget(task); + true + } else { + false + } + } else { + false + }; + + // This might deallocate + let snapshot = self + .header() + .state + .transition_to_terminal(!is_join_interested, ref_dec); + + if snapshot.ref_count() == 0 { + self.dealloc() + } + } + + /// Transitions the task's lifecycle to `Complete`. Notifies the + /// `JoinHandle` if it still has interest in the completion. + fn transition_to_complete(&mut self) { + // Transition the task's lifecycle to `Complete` and get a snapshot of + // the task's sate. + let snapshot = self.header().state.transition_to_complete(); + + if !snapshot.is_join_interested() { + // The `JoinHandle` is not interested in the output of this task. It + // is our responsibility to drop the output. + self.core().drop_future_or_output(); + } else if snapshot.has_join_waker() { + // Notify the join handle. The previous transition obtains the + // lock on the waker cell. + self.wake_join(); + } + } + + fn wake_join(&self) { + self.trailer().waker.with(|ptr| match unsafe { &*ptr } { + Some(waker) => waker.wake_by_ref(), + None => panic!("waker missing"), + }); + } + + fn to_task(&self) -> Task<S> { + unsafe { Task::from_raw(self.header().into()) } + } +} diff --git a/third_party/rust/tokio/src/runtime/task/join.rs b/third_party/rust/tokio/src/runtime/task/join.rs new file mode 100644 index 0000000000..fdcc346e5c --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/join.rs @@ -0,0 +1,152 @@ +use crate::runtime::task::RawTask; + +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{Context, Poll}; + +doc_rt_core! { + /// An owned permission to join on a task (await its termination). + /// + /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] for + /// a task rather than a thread. + /// + /// A `JoinHandle` *detaches* the associated task when it is dropped, which + /// means that there is no longer any handle to the task, and no way to `join` + /// on it. + /// + /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`] + /// functions. + /// + /// # Examples + /// + /// Creation from [`task::spawn`]: + /// + /// ``` + /// use tokio::task; + /// + /// # async fn doc() { + /// let join_handle: task::JoinHandle<_> = task::spawn(async { + /// // some work here + /// }); + /// # } + /// ``` + /// + /// Creation from [`task::spawn_blocking`]: + /// + /// ``` + /// use tokio::task; + /// + /// # async fn doc() { + /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| { + /// // some blocking work here + /// }); + /// # } + /// ``` + /// + /// Child being detached and outliving its parent: + /// + /// ```no_run + /// use tokio::task; + /// use tokio::time; + /// use std::time::Duration; + /// + /// # #[tokio::main] async fn main() { + /// let original_task = task::spawn(async { + /// let _detached_task = task::spawn(async { + /// // Here we sleep to make sure that the first task returns before. + /// time::delay_for(Duration::from_millis(10)).await; + /// // This will be called, even though the JoinHandle is dropped. + /// println!("♫ Still alive ♫"); + /// }); + /// }); + /// + /// original_task.await.expect("The task being joined has panicked"); + /// println!("Original task is joined."); + /// + /// // We make sure that the new task has time to run, before the main + /// // task returns. + /// + /// time::delay_for(Duration::from_millis(1000)).await; + /// # } + /// ``` + /// + /// [`task::spawn`]: crate::task::spawn() + /// [`task::spawn_blocking`]: crate::task::spawn_blocking + /// [`std::thread::JoinHandle`]: std::thread::JoinHandle + pub struct JoinHandle<T> { + raw: Option<RawTask>, + _p: PhantomData<T>, + } +} + +unsafe impl<T: Send> Send for JoinHandle<T> {} +unsafe impl<T: Send> Sync for JoinHandle<T> {} + +impl<T> JoinHandle<T> { + pub(super) fn new(raw: RawTask) -> JoinHandle<T> { + JoinHandle { + raw: Some(raw), + _p: PhantomData, + } + } +} + +impl<T> Unpin for JoinHandle<T> {} + +impl<T> Future for JoinHandle<T> { + type Output = super::Result<T>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let mut ret = Poll::Pending; + + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + // Raw should always be set. If it is not, this is due to polling after + // completion + let raw = self + .raw + .as_ref() + .expect("polling after `JoinHandle` already completed"); + + // Try to read the task output. If the task is not yet complete, the + // waker is stored and is notified once the task does complete. + // + // The function must go via the vtable, which requires erasing generic + // types. To do this, the function "return" is placed on the stack + // **before** calling the function and is passed into the function using + // `*mut ()`. + // + // Safety: + // + // The type of `T` must match the task's output type. + unsafe { + raw.try_read_output(&mut ret as *mut _ as *mut (), cx.waker()); + } + + ret + } +} + +impl<T> Drop for JoinHandle<T> { + fn drop(&mut self) { + if let Some(raw) = self.raw.take() { + if raw.header().state.drop_join_handle_fast().is_ok() { + return; + } + + raw.drop_join_handle_slow(); + } + } +} + +impl<T> fmt::Debug for JoinHandle<T> +where + T: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("JoinHandle").finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/task/mod.rs b/third_party/rust/tokio/src/runtime/task/mod.rs new file mode 100644 index 0000000000..17b5157e84 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/mod.rs @@ -0,0 +1,220 @@ +mod core; +use self::core::Cell; +pub(crate) use self::core::Header; + +mod error; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 +pub use self::error::JoinError; + +mod harness; +use self::harness::Harness; + +mod join; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 +pub use self::join::JoinHandle; + +mod raw; +use self::raw::RawTask; + +mod state; +use self::state::State; + +mod waker; + +cfg_rt_threaded! { + mod stack; + pub(crate) use self::stack::TransferStack; +} + +use crate::util::linked_list; + +use std::future::Future; +use std::marker::PhantomData; +use std::ptr::NonNull; +use std::{fmt, mem}; + +/// An owned handle to the task, tracked by ref count +#[repr(transparent)] +pub(crate) struct Task<S: 'static> { + raw: RawTask, + _p: PhantomData<S>, +} + +unsafe impl<S> Send for Task<S> {} +unsafe impl<S> Sync for Task<S> {} + +/// A task was notified +#[repr(transparent)] +pub(crate) struct Notified<S: 'static>(Task<S>); + +unsafe impl<S: Schedule> Send for Notified<S> {} +unsafe impl<S: Schedule> Sync for Notified<S> {} + +/// Task result sent back +pub(crate) type Result<T> = std::result::Result<T, JoinError>; + +pub(crate) trait Schedule: Sync + Sized + 'static { + /// Bind a task to the executor. + /// + /// Guaranteed to be called from the thread that called `poll` on the task. + /// The returned `Schedule` instance is associated with the task and is used + /// as `&self` in the other methods on this trait. + fn bind(task: Task<Self>) -> Self; + + /// The task has completed work and is ready to be released. The scheduler + /// is free to drop it whenever. + /// + /// If the scheduler can immediately release the task, it should return + /// it as part of the function. This enables the task module to batch + /// the ref-dec with other options. + fn release(&self, task: &Task<Self>) -> Option<Task<Self>>; + + /// Schedule the task + fn schedule(&self, task: Notified<Self>); + + /// Schedule the task to run in the near future, yielding the thread to + /// other tasks. + fn yield_now(&self, task: Notified<Self>) { + self.schedule(task); + } +} + +/// Create a new task with an associated join handle +pub(crate) fn joinable<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>) +where + T: Future + Send + 'static, + S: Schedule, +{ + let raw = RawTask::new::<_, S>(task); + + let task = Task { + raw, + _p: PhantomData, + }; + + let join = JoinHandle::new(raw); + + (Notified(task), join) +} + +cfg_rt_util! { + /// Create a new `!Send` task with an associated join handle + pub(crate) unsafe fn joinable_local<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>) + where + T: Future + 'static, + S: Schedule, + { + let raw = RawTask::new::<_, S>(task); + + let task = Task { + raw, + _p: PhantomData, + }; + + let join = JoinHandle::new(raw); + + (Notified(task), join) + } +} + +impl<S: 'static> Task<S> { + pub(crate) unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> { + Task { + raw: RawTask::from_raw(ptr), + _p: PhantomData, + } + } + + pub(crate) fn header(&self) -> &Header { + self.raw.header() + } +} + +cfg_rt_threaded! { + impl<S: 'static> Notified<S> { + pub(crate) unsafe fn from_raw(ptr: NonNull<Header>) -> Notified<S> { + Notified(Task::from_raw(ptr)) + } + + pub(crate) fn header(&self) -> &Header { + self.0.header() + } + } + + impl<S: 'static> Task<S> { + pub(crate) fn into_raw(self) -> NonNull<Header> { + let ret = self.header().into(); + mem::forget(self); + ret + } + } + + impl<S: 'static> Notified<S> { + pub(crate) fn into_raw(self) -> NonNull<Header> { + self.0.into_raw() + } + } +} + +impl<S: Schedule> Task<S> { + /// Pre-emptively cancel the task as part of the shutdown process. + pub(crate) fn shutdown(&self) { + self.raw.shutdown(); + } +} + +impl<S: Schedule> Notified<S> { + /// Run the task + pub(crate) fn run(self) { + self.0.raw.poll(); + mem::forget(self); + } + + /// Pre-emptively cancel the task as part of the shutdown process. + pub(crate) fn shutdown(self) { + self.0.shutdown(); + } +} + +impl<S: 'static> Drop for Task<S> { + fn drop(&mut self) { + // Decrement the ref count + if self.header().state.ref_dec() { + // Deallocate if this is the final ref count + self.raw.dealloc(); + } + } +} + +impl<S> fmt::Debug for Task<S> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "Task({:p})", self.header()) + } +} + +impl<S> fmt::Debug for Notified<S> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "task::Notified({:p})", self.0.header()) + } +} + +/// # Safety +/// +/// Tasks are pinned +unsafe impl<S> linked_list::Link for Task<S> { + type Handle = Task<S>; + type Target = Header; + + fn as_raw(handle: &Task<S>) -> NonNull<Header> { + handle.header().into() + } + + unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> { + Task::from_raw(ptr) + } + + unsafe fn pointers(target: NonNull<Header>) -> NonNull<linked_list::Pointers<Header>> { + // Not super great as it avoids some of looms checking... + NonNull::from(target.as_ref().owned.with_mut(|ptr| &mut *ptr)) + } +} diff --git a/third_party/rust/tokio/src/runtime/task/raw.rs b/third_party/rust/tokio/src/runtime/task/raw.rs new file mode 100644 index 0000000000..cae56d037d --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/raw.rs @@ -0,0 +1,131 @@ +use crate::runtime::task::{Cell, Harness, Header, Schedule, State}; + +use std::future::Future; +use std::ptr::NonNull; +use std::task::{Poll, Waker}; + +/// Raw task handle +pub(super) struct RawTask { + ptr: NonNull<Header>, +} + +pub(super) struct Vtable { + /// Poll the future + pub(super) poll: unsafe fn(NonNull<Header>), + + /// Deallocate the memory + pub(super) dealloc: unsafe fn(NonNull<Header>), + + /// Read the task output, if complete + pub(super) try_read_output: unsafe fn(NonNull<Header>, *mut (), &Waker), + + /// The join handle has been dropped + pub(super) drop_join_handle_slow: unsafe fn(NonNull<Header>), + + /// Scheduler is being shutdown + pub(super) shutdown: unsafe fn(NonNull<Header>), +} + +/// Get the vtable for the requested `T` and `S` generics. +pub(super) fn vtable<T: Future, S: Schedule>() -> &'static Vtable { + &Vtable { + poll: poll::<T, S>, + dealloc: dealloc::<T, S>, + try_read_output: try_read_output::<T, S>, + drop_join_handle_slow: drop_join_handle_slow::<T, S>, + shutdown: shutdown::<T, S>, + } +} + +impl RawTask { + pub(super) fn new<T, S>(task: T) -> RawTask + where + T: Future, + S: Schedule, + { + let ptr = Box::into_raw(Cell::<_, S>::new(task, State::new())); + let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; + + RawTask { ptr } + } + + pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> RawTask { + RawTask { ptr } + } + + /// Returns a reference to the task's meta structure. + /// + /// Safe as `Header` is `Sync`. + pub(super) fn header(&self) -> &Header { + unsafe { self.ptr.as_ref() } + } + + /// Safety: mutual exclusion is required to call this function. + pub(super) fn poll(self) { + let vtable = self.header().vtable; + unsafe { (vtable.poll)(self.ptr) } + } + + pub(super) fn dealloc(self) { + let vtable = self.header().vtable; + unsafe { + (vtable.dealloc)(self.ptr); + } + } + + /// Safety: `dst` must be a `*mut Poll<super::Result<T::Output>>` where `T` + /// is the future stored by the task. + pub(super) unsafe fn try_read_output(self, dst: *mut (), waker: &Waker) { + let vtable = self.header().vtable; + (vtable.try_read_output)(self.ptr, dst, waker); + } + + pub(super) fn drop_join_handle_slow(self) { + let vtable = self.header().vtable; + unsafe { (vtable.drop_join_handle_slow)(self.ptr) } + } + + pub(super) fn shutdown(self) { + let vtable = self.header().vtable; + unsafe { (vtable.shutdown)(self.ptr) } + } +} + +impl Clone for RawTask { + fn clone(&self) -> Self { + RawTask { ptr: self.ptr } + } +} + +impl Copy for RawTask {} + +unsafe fn poll<T: Future, S: Schedule>(ptr: NonNull<Header>) { + let harness = Harness::<T, S>::from_raw(ptr); + harness.poll(); +} + +unsafe fn dealloc<T: Future, S: Schedule>(ptr: NonNull<Header>) { + let harness = Harness::<T, S>::from_raw(ptr); + harness.dealloc(); +} + +unsafe fn try_read_output<T: Future, S: Schedule>( + ptr: NonNull<Header>, + dst: *mut (), + waker: &Waker, +) { + let out = &mut *(dst as *mut Poll<super::Result<T::Output>>); + + let harness = Harness::<T, S>::from_raw(ptr); + harness.try_read_output(out, waker); +} + +unsafe fn drop_join_handle_slow<T: Future, S: Schedule>(ptr: NonNull<Header>) { + let harness = Harness::<T, S>::from_raw(ptr); + harness.drop_join_handle_slow() +} + +unsafe fn shutdown<T: Future, S: Schedule>(ptr: NonNull<Header>) { + let harness = Harness::<T, S>::from_raw(ptr); + harness.shutdown() +} diff --git a/third_party/rust/tokio/src/runtime/task/stack.rs b/third_party/rust/tokio/src/runtime/task/stack.rs new file mode 100644 index 0000000000..9dd8d3f43f --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/stack.rs @@ -0,0 +1,83 @@ +use crate::loom::sync::atomic::AtomicPtr; +use crate::runtime::task::{Header, Task}; + +use std::marker::PhantomData; +use std::ptr::{self, NonNull}; +use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +/// Concurrent stack of tasks, used to pass ownership of a task from one worker +/// to another. +pub(crate) struct TransferStack<T: 'static> { + head: AtomicPtr<Header>, + _p: PhantomData<T>, +} + +impl<T: 'static> TransferStack<T> { + pub(crate) fn new() -> TransferStack<T> { + TransferStack { + head: AtomicPtr::new(ptr::null_mut()), + _p: PhantomData, + } + } + + pub(crate) fn push(&self, task: Task<T>) { + let task = task.into_raw(); + + // We don't care about any memory associated w/ setting the `head` + // field, just the current value. + // + // The compare-exchange creates a release sequence. + let mut curr = self.head.load(Relaxed); + + loop { + unsafe { + task.as_ref() + .stack_next + .with_mut(|ptr| *ptr = NonNull::new(curr)) + }; + + let res = self + .head + .compare_exchange(curr, task.as_ptr() as *mut _, Release, Relaxed); + + match res { + Ok(_) => return, + Err(actual) => { + curr = actual; + } + } + } + } + + pub(crate) fn drain(&self) -> impl Iterator<Item = Task<T>> { + struct Iter<T: 'static>(Option<NonNull<Header>>, PhantomData<T>); + + impl<T: 'static> Iterator for Iter<T> { + type Item = Task<T>; + + fn next(&mut self) -> Option<Task<T>> { + let task = self.0?; + + // Move the cursor forward + self.0 = unsafe { task.as_ref().stack_next.with(|ptr| *ptr) }; + + // Return the task + unsafe { Some(Task::from_raw(task)) } + } + } + + impl<T: 'static> Drop for Iter<T> { + fn drop(&mut self) { + use std::process; + + if self.0.is_some() { + // we have bugs + process::abort(); + } + } + } + + let ptr = self.head.swap(ptr::null_mut(), Acquire); + Iter(NonNull::new(ptr), PhantomData) + } +} diff --git a/third_party/rust/tokio/src/runtime/task/state.rs b/third_party/rust/tokio/src/runtime/task/state.rs new file mode 100644 index 0000000000..21e90430db --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/state.rs @@ -0,0 +1,446 @@ +use crate::loom::sync::atomic::AtomicUsize; + +use std::fmt; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; +use std::usize; + +pub(super) struct State { + val: AtomicUsize, +} + +/// Current state value +#[derive(Copy, Clone)] +pub(super) struct Snapshot(usize); + +type UpdateResult = Result<Snapshot, Snapshot>; + +/// The task is currently being run. +const RUNNING: usize = 0b0001; + +/// The task is complete. +/// +/// Once this bit is set, it is never unset +const COMPLETE: usize = 0b0010; + +/// Extracts the task's lifecycle value from the state +const LIFECYCLE_MASK: usize = 0b11; + +/// Flag tracking if the task has been pushed into a run queue. +const NOTIFIED: usize = 0b100; + +/// The join handle is still around +const JOIN_INTEREST: usize = 0b1_000; + +/// A join handle waker has been set +const JOIN_WAKER: usize = 0b10_000; + +/// The task has been forcibly cancelled. +const CANCELLED: usize = 0b100_000; + +/// All bits +const STATE_MASK: usize = LIFECYCLE_MASK | NOTIFIED | JOIN_INTEREST | JOIN_WAKER | CANCELLED; + +/// Bits used by the ref count portion of the state. +const REF_COUNT_MASK: usize = !STATE_MASK; + +/// Number of positions to shift the ref count +const REF_COUNT_SHIFT: usize = REF_COUNT_MASK.count_zeros() as usize; + +/// One ref count +const REF_ONE: usize = 1 << REF_COUNT_SHIFT; + +/// State a task is initialized with +/// +/// A task is initialized with two references: one for the scheduler and one for +/// the `JoinHandle`. As the task starts with a `JoinHandle`, `JOIN_INTERST` is +/// set. A new task is immediately pushed into the run queue for execution and +/// starts with the `NOTIFIED` flag set. +const INITIAL_STATE: usize = (REF_ONE * 2) | JOIN_INTEREST | NOTIFIED; + +/// All transitions are performed via RMW operations. This establishes an +/// unambiguous modification order. +impl State { + /// Return a task's initial state + pub(super) fn new() -> State { + // A task is initialized with three references: one for the scheduler, + // one for the `JoinHandle`, one for the task handle made available in + // release. As the task starts with a `JoinHandle`, `JOIN_INTERST` is + // set. A new task is immediately pushed into the run queue for + // execution and starts with the `NOTIFIED` flag set. + State { + val: AtomicUsize::new(INITIAL_STATE), + } + } + + /// Loads the current state, establishes `Acquire` ordering. + pub(super) fn load(&self) -> Snapshot { + Snapshot(self.val.load(Acquire)) + } + + /// Attempt to transition the lifecycle to `Running`. + /// + /// If `ref_inc` is set, the reference count is also incremented. + /// + /// The `NOTIFIED` bit is always unset. + pub(super) fn transition_to_running(&self, ref_inc: bool) -> UpdateResult { + self.fetch_update(|curr| { + assert!(curr.is_notified()); + + let mut next = curr; + + if !next.is_idle() { + return None; + } + + if ref_inc { + next.ref_inc(); + } + + next.set_running(); + next.unset_notified(); + Some(next) + }) + } + + /// Transitions the task from `Running` -> `Idle`. + /// + /// Returns `Ok` if the transition to `Idle` is successful, `Err` otherwise. + /// In both cases, a snapshot of the state from **after** the transition is + /// returned. + /// + /// The transition to `Idle` fails if the task has been flagged to be + /// cancelled. + pub(super) fn transition_to_idle(&self) -> UpdateResult { + self.fetch_update(|curr| { + assert!(curr.is_running()); + + if curr.is_cancelled() { + return None; + } + + let mut next = curr; + next.unset_running(); + + if next.is_notified() { + // The caller needs to schedule the task. To do this, it needs a + // waker. The waker requires a ref count. + next.ref_inc(); + } + + Some(next) + }) + } + + /// Transitions the task from `Running` -> `Complete`. + pub(super) fn transition_to_complete(&self) -> Snapshot { + const DELTA: usize = RUNNING | COMPLETE; + + let prev = Snapshot(self.val.fetch_xor(DELTA, AcqRel)); + assert!(prev.is_running()); + assert!(!prev.is_complete()); + + Snapshot(prev.0 ^ DELTA) + } + + /// Transition from `Complete` -> `Terminal`, decrementing the reference + /// count by 1. + /// + /// When `ref_dec` is set, an additional ref count decrement is performed. + /// This is used to batch atomic ops when possible. + pub(super) fn transition_to_terminal(&self, complete: bool, ref_dec: bool) -> Snapshot { + self.fetch_update(|mut snapshot| { + if complete { + snapshot.set_complete(); + } else { + assert!(snapshot.is_complete()); + } + + // Decrement the primary handle + snapshot.ref_dec(); + + if ref_dec { + // Decrement a second time + snapshot.ref_dec(); + } + + Some(snapshot) + }) + .unwrap() + } + + /// Transitions the state to `NOTIFIED`. + /// + /// Returns `true` if the task needs to be submitted to the pool for + /// execution + pub(super) fn transition_to_notified(&self) -> bool { + let prev = Snapshot(self.val.fetch_or(NOTIFIED, AcqRel)); + prev.will_need_queueing() + } + + /// Set the `CANCELLED` bit and attempt to transition to `Running`. + /// + /// Returns `true` if the transition to `Running` succeeded. + pub(super) fn transition_to_shutdown(&self) -> bool { + let mut prev = Snapshot(0); + + let _ = self.fetch_update(|mut snapshot| { + prev = snapshot; + + if snapshot.is_idle() { + snapshot.set_running(); + + if snapshot.is_notified() { + // If the task is idle and notified, this indicates the task is + // in the run queue and is considered owned by the scheduler. + // The shutdown operation claims ownership of the task, which + // means we need to assign an additional ref-count to the task + // in the queue. + snapshot.ref_inc(); + } + } + + snapshot.set_cancelled(); + Some(snapshot) + }); + + prev.is_idle() + } + + /// Optimistically tries to swap the state assuming the join handle is + /// __immediately__ dropped on spawn + pub(super) fn drop_join_handle_fast(&self) -> Result<(), ()> { + use std::sync::atomic::Ordering::Relaxed; + + // Relaxed is acceptable as if this function is called and succeeds, + // then nothing has been done w/ the join handle. + // + // The moment the join handle is used (polled), the `JOIN_WAKER` flag is + // set, at which point the CAS will fail. + // + // Given this, there is no risk if this operation is reordered. + self.val + .compare_exchange_weak( + INITIAL_STATE, + (INITIAL_STATE - REF_ONE) & !JOIN_INTEREST, + Release, + Relaxed, + ) + .map(|_| ()) + .map_err(|_| ()) + } + + /// Try to unset the JOIN_INTEREST flag. + /// + /// Returns `Ok` if the operation happens before the task transitions to a + /// completed state, `Err` otherwise. + pub(super) fn unset_join_interested(&self) -> UpdateResult { + self.fetch_update(|curr| { + assert!(curr.is_join_interested()); + + if curr.is_complete() { + return None; + } + + let mut next = curr; + next.unset_join_interested(); + + Some(next) + }) + } + + /// Set the `JOIN_WAKER` bit. + /// + /// Returns `Ok` if the bit is set, `Err` otherwise. This operation fails if + /// the task has completed. + pub(super) fn set_join_waker(&self) -> UpdateResult { + self.fetch_update(|curr| { + assert!(curr.is_join_interested()); + assert!(!curr.has_join_waker()); + + if curr.is_complete() { + return None; + } + + let mut next = curr; + next.set_join_waker(); + + Some(next) + }) + } + + /// Unsets the `JOIN_WAKER` bit. + /// + /// Returns `Ok` has been unset, `Err` otherwise. This operation fails if + /// the task has completed. + pub(super) fn unset_waker(&self) -> UpdateResult { + self.fetch_update(|curr| { + assert!(curr.is_join_interested()); + assert!(curr.has_join_waker()); + + if curr.is_complete() { + return None; + } + + let mut next = curr; + next.unset_join_waker(); + + Some(next) + }) + } + + pub(super) fn ref_inc(&self) { + use std::process; + use std::sync::atomic::Ordering::Relaxed; + + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let prev = self.val.fetch_add(REF_ONE, Relaxed); + + // If the reference count overflowed, abort. + if prev > isize::max_value() as usize { + process::abort(); + } + } + + /// Returns `true` if the task should be released. + pub(super) fn ref_dec(&self) -> bool { + let prev = Snapshot(self.val.fetch_sub(REF_ONE, AcqRel)); + prev.ref_count() == 1 + } + + fn fetch_update<F>(&self, mut f: F) -> Result<Snapshot, Snapshot> + where + F: FnMut(Snapshot) -> Option<Snapshot>, + { + let mut curr = self.load(); + + loop { + let next = match f(curr) { + Some(next) => next, + None => return Err(curr), + }; + + let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire); + + match res { + Ok(_) => return Ok(next), + Err(actual) => curr = Snapshot(actual), + } + } + } +} + +// ===== impl Snapshot ===== + +impl Snapshot { + /// Returns `true` if the task is in an idle state. + pub(super) fn is_idle(self) -> bool { + self.0 & (RUNNING | COMPLETE) == 0 + } + + /// Returns `true` if the task has been flagged as notified. + pub(super) fn is_notified(self) -> bool { + self.0 & NOTIFIED == NOTIFIED + } + + fn unset_notified(&mut self) { + self.0 &= !NOTIFIED + } + + pub(super) fn is_running(self) -> bool { + self.0 & RUNNING == RUNNING + } + + fn set_running(&mut self) { + self.0 |= RUNNING; + } + + fn unset_running(&mut self) { + self.0 &= !RUNNING; + } + + pub(super) fn is_cancelled(self) -> bool { + self.0 & CANCELLED == CANCELLED + } + + fn set_cancelled(&mut self) { + self.0 |= CANCELLED; + } + + fn set_complete(&mut self) { + self.0 |= COMPLETE; + } + + /// Returns `true` if the task's future has completed execution. + pub(super) fn is_complete(self) -> bool { + self.0 & COMPLETE == COMPLETE + } + + pub(super) fn is_join_interested(self) -> bool { + self.0 & JOIN_INTEREST == JOIN_INTEREST + } + + fn unset_join_interested(&mut self) { + self.0 &= !JOIN_INTEREST + } + + pub(super) fn has_join_waker(self) -> bool { + self.0 & JOIN_WAKER == JOIN_WAKER + } + + fn set_join_waker(&mut self) { + self.0 |= JOIN_WAKER; + } + + fn unset_join_waker(&mut self) { + self.0 &= !JOIN_WAKER + } + + pub(super) fn ref_count(self) -> usize { + (self.0 & REF_COUNT_MASK) >> REF_COUNT_SHIFT + } + + fn ref_inc(&mut self) { + assert!(self.0 <= isize::max_value() as usize); + self.0 += REF_ONE; + } + + pub(super) fn ref_dec(&mut self) { + assert!(self.ref_count() > 0); + self.0 -= REF_ONE + } + + fn will_need_queueing(self) -> bool { + !self.is_notified() && self.is_idle() + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let snapshot = self.load(); + snapshot.fmt(fmt) + } +} + +impl fmt::Debug for Snapshot { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Snapshot") + .field("is_running", &self.is_running()) + .field("is_complete", &self.is_complete()) + .field("is_notified", &self.is_notified()) + .field("is_cancelled", &self.is_cancelled()) + .field("is_join_interested", &self.is_join_interested()) + .field("has_join_waker", &self.has_join_waker()) + .field("ref_count", &self.ref_count()) + .finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/task/waker.rs b/third_party/rust/tokio/src/runtime/task/waker.rs new file mode 100644 index 0000000000..5c2d478fbb --- /dev/null +++ b/third_party/rust/tokio/src/runtime/task/waker.rs @@ -0,0 +1,101 @@ +use crate::runtime::task::harness::Harness; +use crate::runtime::task::{Header, Schedule}; + +use std::future::Future; +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::ops; +use std::ptr::NonNull; +use std::task::{RawWaker, RawWakerVTable, Waker}; + +pub(super) struct WakerRef<'a, S: 'static> { + waker: ManuallyDrop<Waker>, + _p: PhantomData<(&'a Header, S)>, +} + +/// Returns a `WakerRef` which avoids having to pre-emptively increase the +/// refcount if there is no need to do so. +pub(super) fn waker_ref<T, S>(header: &Header) -> WakerRef<'_, S> +where + T: Future, + S: Schedule, +{ + // `Waker::will_wake` uses the VTABLE pointer as part of the check. This + // means that `will_wake` will always return false when using the current + // task's waker. (discussion at rust-lang/rust#66281). + // + // To fix this, we use a single vtable. Since we pass in a reference at this + // point and not an *owned* waker, we must ensure that `drop` is never + // called on this waker instance. This is done by wrapping it with + // `ManuallyDrop` and then never calling drop. + let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker::<T, S>(header))) }; + + WakerRef { + waker, + _p: PhantomData, + } +} + +impl<S> ops::Deref for WakerRef<'_, S> { + type Target = Waker; + + fn deref(&self) -> &Waker { + &self.waker + } +} + +unsafe fn clone_waker<T, S>(ptr: *const ()) -> RawWaker +where + T: Future, + S: Schedule, +{ + let header = ptr as *const Header; + (*header).state.ref_inc(); + raw_waker::<T, S>(header) +} + +unsafe fn drop_waker<T, S>(ptr: *const ()) +where + T: Future, + S: Schedule, +{ + let ptr = NonNull::new_unchecked(ptr as *mut Header); + let harness = Harness::<T, S>::from_raw(ptr); + harness.drop_reference(); +} + +unsafe fn wake_by_val<T, S>(ptr: *const ()) +where + T: Future, + S: Schedule, +{ + let ptr = NonNull::new_unchecked(ptr as *mut Header); + let harness = Harness::<T, S>::from_raw(ptr); + harness.wake_by_val(); +} + +// Wake without consuming the waker +unsafe fn wake_by_ref<T, S>(ptr: *const ()) +where + T: Future, + S: Schedule, +{ + let ptr = NonNull::new_unchecked(ptr as *mut Header); + let harness = Harness::<T, S>::from_raw(ptr); + harness.wake_by_ref(); +} + +fn raw_waker<T, S>(header: *const Header) -> RawWaker +where + T: Future, + S: Schedule, +{ + let ptr = header as *const (); + let vtable = &RawWakerVTable::new( + clone_waker::<T, S>, + wake_by_val::<T, S>, + wake_by_ref::<T, S>, + drop_waker::<T, S>, + ); + RawWaker::new(ptr, vtable) +} diff --git a/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs b/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs new file mode 100644 index 0000000000..db7048e3f9 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs @@ -0,0 +1,31 @@ +use crate::runtime::{self, Runtime}; + +use std::sync::Arc; + +#[test] +fn blocking_shutdown() { + loom::model(|| { + let v = Arc::new(()); + + let rt = mk_runtime(1); + rt.enter(|| { + for _ in 0..2 { + let v = v.clone(); + crate::task::spawn_blocking(move || { + assert!(1 < Arc::strong_count(&v)); + }); + } + }); + + drop(rt); + assert_eq!(1, Arc::strong_count(&v)); + }); +} + +fn mk_runtime(num_threads: usize) -> Runtime { + runtime::Builder::new() + .threaded_scheduler() + .core_threads(num_threads) + .build() + .unwrap() +} diff --git a/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs b/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs new file mode 100644 index 0000000000..c126fe479a --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs @@ -0,0 +1,49 @@ +use loom::sync::Notify; + +use std::sync::{Arc, Mutex}; + +pub(crate) fn channel<T>() -> (Sender<T>, Receiver<T>) { + let inner = Arc::new(Inner { + notify: Notify::new(), + value: Mutex::new(None), + }); + + let tx = Sender { + inner: inner.clone(), + }; + let rx = Receiver { inner }; + + (tx, rx) +} + +pub(crate) struct Sender<T> { + inner: Arc<Inner<T>>, +} + +pub(crate) struct Receiver<T> { + inner: Arc<Inner<T>>, +} + +struct Inner<T> { + notify: Notify, + value: Mutex<Option<T>>, +} + +impl<T> Sender<T> { + pub(crate) fn send(self, value: T) { + *self.inner.value.lock().unwrap() = Some(value); + self.inner.notify.notify(); + } +} + +impl<T> Receiver<T> { + pub(crate) fn recv(self) -> T { + loop { + if let Some(v) = self.inner.value.lock().unwrap().take() { + return v; + } + + self.inner.notify.wait(); + } + } +} diff --git a/third_party/rust/tokio/src/runtime/tests/loom_pool.rs b/third_party/rust/tokio/src/runtime/tests/loom_pool.rs new file mode 100644 index 0000000000..c08658cde8 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/loom_pool.rs @@ -0,0 +1,380 @@ +/// Full runtime loom tests. These are heavy tests and take significant time to +/// run on CI. +/// +/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test. +/// +/// In order to speed up the C +use crate::future::poll_fn; +use crate::runtime::tests::loom_oneshot as oneshot; +use crate::runtime::{self, Runtime}; +use crate::{spawn, task}; +use tokio_test::assert_ok; + +use loom::sync::atomic::{AtomicBool, AtomicUsize}; +use loom::sync::{Arc, Mutex}; + +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::Ordering::{Relaxed, SeqCst}; +use std::task::{Context, Poll}; + +/// Tests are divided into groups to make the runs faster on CI. +mod group_a { + use super::*; + + #[test] + fn racy_shutdown() { + loom::model(|| { + let pool = mk_pool(1); + + // here's the case we want to exercise: + // + // a worker that still has tasks in its local queue gets sent to the blocking pool (due to + // block_in_place). the blocking pool is shut down, so drops the worker. the worker's + // shutdown method never gets run. + // + // we do this by spawning two tasks on one worker, the first of which does block_in_place, + // and then immediately drop the pool. + + pool.spawn(track(async { + crate::task::block_in_place(|| {}); + })); + pool.spawn(track(async {})); + drop(pool); + }); + } + + #[test] + fn pool_multi_spawn() { + loom::model(|| { + let pool = mk_pool(2); + let c1 = Arc::new(AtomicUsize::new(0)); + + let (tx, rx) = oneshot::channel(); + let tx1 = Arc::new(Mutex::new(Some(tx))); + + // Spawn a task + let c2 = c1.clone(); + let tx2 = tx1.clone(); + pool.spawn(track(async move { + spawn(track(async move { + if 1 == c1.fetch_add(1, Relaxed) { + tx1.lock().unwrap().take().unwrap().send(()); + } + })); + })); + + // Spawn a second task + pool.spawn(track(async move { + spawn(track(async move { + if 1 == c2.fetch_add(1, Relaxed) { + tx2.lock().unwrap().take().unwrap().send(()); + } + })); + })); + + rx.recv(); + }); + } + + fn only_blocking_inner(first_pending: bool) { + loom::model(move || { + let pool = mk_pool(1); + let (block_tx, block_rx) = oneshot::channel(); + + pool.spawn(track(async move { + crate::task::block_in_place(move || { + block_tx.send(()); + }); + if first_pending { + task::yield_now().await + } + })); + + block_rx.recv(); + drop(pool); + }); + } + + #[test] + fn only_blocking_without_pending() { + only_blocking_inner(false) + } + + #[test] + fn only_blocking_with_pending() { + only_blocking_inner(true) + } +} + +mod group_b { + use super::*; + + fn blocking_and_regular_inner(first_pending: bool) { + const NUM: usize = 3; + loom::model(move || { + let pool = mk_pool(1); + let cnt = Arc::new(AtomicUsize::new(0)); + + let (block_tx, block_rx) = oneshot::channel(); + let (done_tx, done_rx) = oneshot::channel(); + let done_tx = Arc::new(Mutex::new(Some(done_tx))); + + pool.spawn(track(async move { + crate::task::block_in_place(move || { + block_tx.send(()); + }); + if first_pending { + task::yield_now().await + } + })); + + for _ in 0..NUM { + let cnt = cnt.clone(); + let done_tx = done_tx.clone(); + + pool.spawn(track(async move { + if NUM == cnt.fetch_add(1, Relaxed) + 1 { + done_tx.lock().unwrap().take().unwrap().send(()); + } + })); + } + + done_rx.recv(); + block_rx.recv(); + + drop(pool); + }); + } + + #[test] + fn blocking_and_regular() { + blocking_and_regular_inner(false); + } + + #[test] + fn blocking_and_regular_with_pending() { + blocking_and_regular_inner(true); + } + + #[test] + fn pool_shutdown() { + loom::model(|| { + let pool = mk_pool(2); + + pool.spawn(track(async move { + gated2(true).await; + })); + + pool.spawn(track(async move { + gated2(false).await; + })); + + drop(pool); + }); + } + + #[test] + fn join_output() { + loom::model(|| { + let mut rt = mk_pool(1); + + rt.block_on(async { + let t = crate::spawn(track(async { "hello" })); + + let out = assert_ok!(t.await); + assert_eq!("hello", out.into_inner()); + }); + }); + } + + #[test] + fn poll_drop_handle_then_drop() { + loom::model(|| { + let mut rt = mk_pool(1); + + rt.block_on(async move { + let mut t = crate::spawn(track(async { "hello" })); + + poll_fn(|cx| { + let _ = Pin::new(&mut t).poll(cx); + Poll::Ready(()) + }) + .await; + }); + }) + } + + #[test] + fn complete_block_on_under_load() { + loom::model(|| { + let mut pool = mk_pool(1); + + pool.block_on(async { + // Trigger a re-schedule + crate::spawn(track(async { + for _ in 0..2 { + task::yield_now().await; + } + })); + + gated2(true).await + }); + }); + } +} + +mod group_c { + use super::*; + + #[test] + fn shutdown_with_notification() { + use crate::sync::oneshot; + + loom::model(|| { + let rt = mk_pool(2); + let (done_tx, done_rx) = oneshot::channel::<()>(); + + rt.spawn(track(async move { + let (tx, rx) = oneshot::channel::<()>(); + + crate::spawn(async move { + crate::task::spawn_blocking(move || { + let _ = tx.send(()); + }); + + let _ = done_rx.await; + }); + + let _ = rx.await; + + let _ = done_tx.send(()); + })); + }); + } +} + +mod group_d { + use super::*; + + #[test] + fn pool_multi_notify() { + loom::model(|| { + let pool = mk_pool(2); + + let c1 = Arc::new(AtomicUsize::new(0)); + + let (done_tx, done_rx) = oneshot::channel(); + let done_tx1 = Arc::new(Mutex::new(Some(done_tx))); + + // Spawn a task + let c2 = c1.clone(); + let done_tx2 = done_tx1.clone(); + pool.spawn(track(async move { + gated().await; + gated().await; + + if 1 == c1.fetch_add(1, Relaxed) { + done_tx1.lock().unwrap().take().unwrap().send(()); + } + })); + + // Spawn a second task + pool.spawn(track(async move { + gated().await; + gated().await; + + if 1 == c2.fetch_add(1, Relaxed) { + done_tx2.lock().unwrap().take().unwrap().send(()); + } + })); + + done_rx.recv(); + }); + } +} + +fn mk_pool(num_threads: usize) -> Runtime { + runtime::Builder::new() + .threaded_scheduler() + .core_threads(num_threads) + .build() + .unwrap() +} + +fn gated() -> impl Future<Output = &'static str> { + gated2(false) +} + +fn gated2(thread: bool) -> impl Future<Output = &'static str> { + use loom::thread; + use std::sync::Arc; + + let gate = Arc::new(AtomicBool::new(false)); + let mut fired = false; + + poll_fn(move |cx| { + if !fired { + let gate = gate.clone(); + let waker = cx.waker().clone(); + + if thread { + thread::spawn(move || { + gate.store(true, SeqCst); + waker.wake_by_ref(); + }); + } else { + spawn(track(async move { + gate.store(true, SeqCst); + waker.wake_by_ref(); + })); + } + + fired = true; + + return Poll::Pending; + } + + if gate.load(SeqCst) { + Poll::Ready("hello world") + } else { + Poll::Pending + } + }) +} + +fn track<T: Future>(f: T) -> Track<T> { + Track { + inner: f, + arc: Arc::new(()), + } +} + +pin_project! { + struct Track<T> { + #[pin] + inner: T, + // Arc is used to hook into loom's leak tracking. + arc: Arc<()>, + } +} + +impl<T> Track<T> { + fn into_inner(self) -> T { + self.inner + } +} + +impl<T: Future> Future for Track<T> { + type Output = Track<T::Output>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let me = self.project(); + + Poll::Ready(Track { + inner: ready!(me.inner.poll(cx)), + arc: me.arc.clone(), + }) + } +} diff --git a/third_party/rust/tokio/src/runtime/tests/loom_queue.rs b/third_party/rust/tokio/src/runtime/tests/loom_queue.rs new file mode 100644 index 0000000000..de02610db0 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/loom_queue.rs @@ -0,0 +1,216 @@ +use crate::runtime::queue; +use crate::runtime::task::{self, Schedule, Task}; + +use loom::thread; + +#[test] +fn basic() { + loom::model(|| { + let (steal, mut local) = queue::local(); + let inject = queue::Inject::new(); + + let th = thread::spawn(move || { + let (_, mut local) = queue::local(); + let mut n = 0; + + for _ in 0..3 { + if steal.steal_into(&mut local).is_some() { + n += 1; + } + + while local.pop().is_some() { + n += 1; + } + } + + n + }); + + let mut n = 0; + + for _ in 0..2 { + for _ in 0..2 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + if local.pop().is_some() { + n += 1; + } + + // Push another task + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + + while local.pop().is_some() { + n += 1; + } + } + + while inject.pop().is_some() { + n += 1; + } + + n += th.join().unwrap(); + + assert_eq!(6, n); + }); +} + +#[test] +fn steal_overflow() { + loom::model(|| { + let (steal, mut local) = queue::local(); + let inject = queue::Inject::new(); + + let th = thread::spawn(move || { + let (_, mut local) = queue::local(); + let mut n = 0; + + if steal.steal_into(&mut local).is_some() { + n += 1; + } + + while local.pop().is_some() { + n += 1; + } + + n + }); + + let mut n = 0; + + // push a task, pop a task + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + + if local.pop().is_some() { + n += 1; + } + + for _ in 0..6 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + n += th.join().unwrap(); + + while local.pop().is_some() { + n += 1; + } + + while inject.pop().is_some() { + n += 1; + } + + assert_eq!(7, n); + }); +} + +#[test] +fn multi_stealer() { + const NUM_TASKS: usize = 5; + + fn steal_tasks(steal: queue::Steal<Runtime>) -> usize { + let (_, mut local) = queue::local(); + + if steal.steal_into(&mut local).is_none() { + return 0; + } + + let mut n = 1; + + while local.pop().is_some() { + n += 1; + } + + n + } + + loom::model(|| { + let (steal, mut local) = queue::local(); + let inject = queue::Inject::new(); + + // Push work + for _ in 0..NUM_TASKS { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + let th1 = { + let steal = steal.clone(); + thread::spawn(move || steal_tasks(steal)) + }; + + let th2 = thread::spawn(move || steal_tasks(steal)); + + let mut n = 0; + + while local.pop().is_some() { + n += 1; + } + + while inject.pop().is_some() { + n += 1; + } + + n += th1.join().unwrap(); + n += th2.join().unwrap(); + + assert_eq!(n, NUM_TASKS); + }); +} + +#[test] +fn chained_steal() { + loom::model(|| { + let (s1, mut l1) = queue::local(); + let (s2, mut l2) = queue::local(); + let inject = queue::Inject::new(); + + // Load up some tasks + for _ in 0..4 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + l1.push_back(task, &inject); + + let (task, _) = task::joinable::<_, Runtime>(async {}); + l2.push_back(task, &inject); + } + + // Spawn a task to steal from **our** queue + let th = thread::spawn(move || { + let (_, mut local) = queue::local(); + s1.steal_into(&mut local); + + while local.pop().is_some() {} + }); + + // Drain our tasks, then attempt to steal + while l1.pop().is_some() {} + + s2.steal_into(&mut l1); + + th.join().unwrap(); + + while l1.pop().is_some() {} + while l2.pop().is_some() {} + while inject.pop().is_some() {} + }); +} + +struct Runtime; + +impl Schedule for Runtime { + fn bind(task: Task<Self>) -> Runtime { + std::mem::forget(task); + Runtime + } + + fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> { + None + } + + fn schedule(&self, _task: task::Notified<Self>) { + unreachable!(); + } +} diff --git a/third_party/rust/tokio/src/runtime/tests/mod.rs b/third_party/rust/tokio/src/runtime/tests/mod.rs new file mode 100644 index 0000000000..123a7e35a3 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/mod.rs @@ -0,0 +1,13 @@ +cfg_loom! { + mod loom_blocking; + mod loom_oneshot; + mod loom_pool; + mod loom_queue; +} + +cfg_not_loom! { + mod queue; + + #[cfg(miri)] + mod task; +} diff --git a/third_party/rust/tokio/src/runtime/tests/queue.rs b/third_party/rust/tokio/src/runtime/tests/queue.rs new file mode 100644 index 0000000000..d228d5dcc7 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/queue.rs @@ -0,0 +1,202 @@ +use crate::runtime::queue; +use crate::runtime::task::{self, Schedule, Task}; + +use std::thread; +use std::time::Duration; + +#[test] +fn fits_256() { + let (_, mut local) = queue::local(); + let inject = queue::Inject::new(); + + for _ in 0..256 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + assert!(inject.pop().is_none()); + + while local.pop().is_some() {} +} + +#[test] +fn overflow() { + let (_, mut local) = queue::local(); + let inject = queue::Inject::new(); + + for _ in 0..257 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + let mut n = 0; + + while inject.pop().is_some() { + n += 1; + } + + while local.pop().is_some() { + n += 1; + } + + assert_eq!(n, 257); +} + +#[test] +fn steal_batch() { + let (steal1, mut local1) = queue::local(); + let (_, mut local2) = queue::local(); + let inject = queue::Inject::new(); + + for _ in 0..4 { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local1.push_back(task, &inject); + } + + assert!(steal1.steal_into(&mut local2).is_some()); + + for _ in 0..1 { + assert!(local2.pop().is_some()); + } + + assert!(local2.pop().is_none()); + + for _ in 0..2 { + assert!(local1.pop().is_some()); + } + + assert!(local1.pop().is_none()); +} + +#[test] +fn stress1() { + const NUM_ITER: usize = 1; + const NUM_STEAL: usize = 1_000; + const NUM_LOCAL: usize = 1_000; + const NUM_PUSH: usize = 500; + const NUM_POP: usize = 250; + + for _ in 0..NUM_ITER { + let (steal, mut local) = queue::local(); + let inject = queue::Inject::new(); + + let th = thread::spawn(move || { + let (_, mut local) = queue::local(); + let mut n = 0; + + for _ in 0..NUM_STEAL { + if steal.steal_into(&mut local).is_some() { + n += 1; + } + + while local.pop().is_some() { + n += 1; + } + + thread::yield_now(); + } + + n + }); + + let mut n = 0; + + for _ in 0..NUM_LOCAL { + for _ in 0..NUM_PUSH { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + } + + for _ in 0..NUM_POP { + if local.pop().is_some() { + n += 1; + } else { + break; + } + } + } + + while inject.pop().is_some() { + n += 1; + } + + n += th.join().unwrap(); + + assert_eq!(n, NUM_LOCAL * NUM_PUSH); + } +} + +#[test] +fn stress2() { + const NUM_ITER: usize = 1; + const NUM_TASKS: usize = 1_000_000; + const NUM_STEAL: usize = 1_000; + + for _ in 0..NUM_ITER { + let (steal, mut local) = queue::local(); + let inject = queue::Inject::new(); + + let th = thread::spawn(move || { + let (_, mut local) = queue::local(); + let mut n = 0; + + for _ in 0..NUM_STEAL { + if steal.steal_into(&mut local).is_some() { + n += 1; + } + + while local.pop().is_some() { + n += 1; + } + + thread::sleep(Duration::from_micros(10)); + } + + n + }); + + let mut num_pop = 0; + + for i in 0..NUM_TASKS { + let (task, _) = task::joinable::<_, Runtime>(async {}); + local.push_back(task, &inject); + + if i % 128 == 0 && local.pop().is_some() { + num_pop += 1; + } + + while inject.pop().is_some() { + num_pop += 1; + } + } + + num_pop += th.join().unwrap(); + + while local.pop().is_some() { + num_pop += 1; + } + + while inject.pop().is_some() { + num_pop += 1; + } + + assert_eq!(num_pop, NUM_TASKS); + } +} + +struct Runtime; + +impl Schedule for Runtime { + fn bind(task: Task<Self>) -> Runtime { + std::mem::forget(task); + Runtime + } + + fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> { + None + } + + fn schedule(&self, _task: task::Notified<Self>) { + unreachable!(); + } +} diff --git a/third_party/rust/tokio/src/runtime/tests/task.rs b/third_party/rust/tokio/src/runtime/tests/task.rs new file mode 100644 index 0000000000..82315a04ff --- /dev/null +++ b/third_party/rust/tokio/src/runtime/tests/task.rs @@ -0,0 +1,159 @@ +use crate::runtime::task::{self, Schedule, Task}; +use crate::util::linked_list::LinkedList; +use crate::util::TryLock; + +use std::collections::VecDeque; +use std::sync::Arc; + +#[test] +fn create_drop() { + let _ = task::joinable::<_, Runtime>(async { unreachable!() }); +} + +#[test] +fn schedule() { + with(|rt| { + let (task, _) = task::joinable(async { + crate::task::yield_now().await; + }); + + rt.schedule(task); + + assert_eq!(2, rt.tick()); + }) +} + +#[test] +fn shutdown() { + with(|rt| { + let (task, _) = task::joinable(async { + loop { + crate::task::yield_now().await; + } + }); + + rt.schedule(task); + rt.tick_max(1); + + rt.shutdown(); + }) +} + +fn with(f: impl FnOnce(Runtime)) { + struct Reset; + + impl Drop for Reset { + fn drop(&mut self) { + let _rt = CURRENT.try_lock().unwrap().take(); + } + } + + let _reset = Reset; + + let rt = Runtime(Arc::new(Inner { + released: task::TransferStack::new(), + core: TryLock::new(Core { + queue: VecDeque::new(), + tasks: LinkedList::new(), + }), + })); + + *CURRENT.try_lock().unwrap() = Some(rt.clone()); + f(rt) +} + +#[derive(Clone)] +struct Runtime(Arc<Inner>); + +struct Inner { + released: task::TransferStack<Runtime>, + core: TryLock<Core>, +} + +struct Core { + queue: VecDeque<task::Notified<Runtime>>, + tasks: LinkedList<Task<Runtime>>, +} + +static CURRENT: TryLock<Option<Runtime>> = TryLock::new(None); + +impl Runtime { + fn tick(&self) -> usize { + self.tick_max(usize::max_value()) + } + + fn tick_max(&self, max: usize) -> usize { + let mut n = 0; + + while !self.is_empty() && n < max { + let task = self.next_task(); + n += 1; + task.run(); + } + + self.0.maintenance(); + + n + } + + fn is_empty(&self) -> bool { + self.0.core.try_lock().unwrap().queue.is_empty() + } + + fn next_task(&self) -> task::Notified<Runtime> { + self.0.core.try_lock().unwrap().queue.pop_front().unwrap() + } + + fn shutdown(&self) { + let mut core = self.0.core.try_lock().unwrap(); + + for task in core.tasks.iter() { + task.shutdown(); + } + + while let Some(task) = core.queue.pop_back() { + task.shutdown(); + } + + drop(core); + + while !self.0.core.try_lock().unwrap().tasks.is_empty() { + self.0.maintenance(); + } + } +} + +impl Inner { + fn maintenance(&self) { + use std::mem::ManuallyDrop; + + for task in self.released.drain() { + let task = ManuallyDrop::new(task); + + // safety: see worker.rs + unsafe { + let ptr = task.header().into(); + self.core.try_lock().unwrap().tasks.remove(ptr); + } + } + } +} + +impl Schedule for Runtime { + fn bind(task: Task<Self>) -> Runtime { + let rt = CURRENT.try_lock().unwrap().as_ref().unwrap().clone(); + rt.0.core.try_lock().unwrap().tasks.push_front(task); + rt + } + + fn release(&self, task: &Task<Self>) -> Option<Task<Self>> { + // safety: copying worker.rs + let task = unsafe { Task::from_raw(task.header().into()) }; + self.0.released.push(task); + None + } + + fn schedule(&self, task: task::Notified<Self>) { + self.0.core.try_lock().unwrap().queue.push_back(task); + } +} diff --git a/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs b/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs new file mode 100644 index 0000000000..2bda0fc738 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs @@ -0,0 +1,52 @@ +use crate::loom::sync::atomic::AtomicPtr; + +use std::ptr; +use std::sync::atomic::Ordering::AcqRel; + +pub(super) struct AtomicCell<T> { + data: AtomicPtr<T>, +} + +unsafe impl<T: Send> Send for AtomicCell<T> {} +unsafe impl<T: Send> Sync for AtomicCell<T> {} + +impl<T> AtomicCell<T> { + pub(super) fn new(data: Option<Box<T>>) -> AtomicCell<T> { + AtomicCell { + data: AtomicPtr::new(to_raw(data)), + } + } + + pub(super) fn swap(&self, val: Option<Box<T>>) -> Option<Box<T>> { + let old = self.data.swap(to_raw(val), AcqRel); + from_raw(old) + } + + #[cfg(feature = "blocking")] + pub(super) fn set(&self, val: Box<T>) { + let _ = self.swap(Some(val)); + } + + pub(super) fn take(&self) -> Option<Box<T>> { + self.swap(None) + } +} + +fn to_raw<T>(data: Option<Box<T>>) -> *mut T { + data.map(Box::into_raw).unwrap_or(ptr::null_mut()) +} + +fn from_raw<T>(val: *mut T) -> Option<Box<T>> { + if val.is_null() { + None + } else { + Some(unsafe { Box::from_raw(val) }) + } +} + +impl<T> Drop for AtomicCell<T> { + fn drop(&mut self) { + // Free any data still held by the cell + let _ = self.take(); + } +} diff --git a/third_party/rust/tokio/src/runtime/thread_pool/idle.rs b/third_party/rust/tokio/src/runtime/thread_pool/idle.rs new file mode 100644 index 0000000000..ae87ca4ba1 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/thread_pool/idle.rs @@ -0,0 +1,222 @@ +//! Coordinates idling workers + +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::Mutex; + +use std::fmt; +use std::sync::atomic::Ordering::{self, SeqCst}; + +pub(super) struct Idle { + /// Tracks both the number of searching workers and the number of unparked + /// workers. + /// + /// Used as a fast-path to avoid acquiring the lock when needed. + state: AtomicUsize, + + /// Sleeping workers + sleepers: Mutex<Vec<usize>>, + + /// Total number of workers. + num_workers: usize, +} + +const UNPARK_SHIFT: usize = 16; +const UNPARK_MASK: usize = !SEARCH_MASK; +const SEARCH_MASK: usize = (1 << UNPARK_SHIFT) - 1; + +#[derive(Copy, Clone)] +struct State(usize); + +impl Idle { + pub(super) fn new(num_workers: usize) -> Idle { + let init = State::new(num_workers); + + Idle { + state: AtomicUsize::new(init.into()), + sleepers: Mutex::new(Vec::with_capacity(num_workers)), + num_workers, + } + } + + /// If there are no workers actively searching, returns the index of a + /// worker currently sleeping. + pub(super) fn worker_to_notify(&self) -> Option<usize> { + // If at least one worker is spinning, work being notified will + // eventully be found. A searching thread will find **some** work and + // notify another worker, eventually leading to our work being found. + // + // For this to happen, this load must happen before the thread + // transitioning `num_searching` to zero. Acquire / Relese does not + // provide sufficient guarantees, so this load is done with `SeqCst` and + // will pair with the `fetch_sub(1)` when transitioning out of + // searching. + if !self.notify_should_wakeup() { + return None; + } + + // Acquire the lock + let mut sleepers = self.sleepers.lock().unwrap(); + + // Check again, now that the lock is acquired + if !self.notify_should_wakeup() { + return None; + } + + // A worker should be woken up, atomically increment the number of + // searching workers as well as the number of unparked workers. + State::unpark_one(&self.state); + + // Get the worker to unpark + let ret = sleepers.pop(); + debug_assert!(ret.is_some()); + + ret + } + + /// Returns `true` if the worker needs to do a final check for submitted + /// work. + pub(super) fn transition_worker_to_parked(&self, worker: usize, is_searching: bool) -> bool { + // Acquire the lock + let mut sleepers = self.sleepers.lock().unwrap(); + + // Decrement the number of unparked threads + let ret = State::dec_num_unparked(&self.state, is_searching); + + // Track the sleeping worker + sleepers.push(worker); + + ret + } + + pub(super) fn transition_worker_to_searching(&self) -> bool { + let state = State::load(&self.state, SeqCst); + if 2 * state.num_searching() >= self.num_workers { + return false; + } + + // It is possible for this routine to allow more than 50% of the workers + // to search. That is OK. Limiting searchers is only an optimization to + // prevent too much contention. + State::inc_num_searching(&self.state, SeqCst); + true + } + + /// A lightweight transition from searching -> running. + /// + /// Returns `true` if this is the final searching worker. The caller + /// **must** notify a new worker. + pub(super) fn transition_worker_from_searching(&self) -> bool { + State::dec_num_searching(&self.state) + } + + /// Unpark a specific worker. This happens if tasks are submitted from + /// within the worker's park routine. + pub(super) fn unpark_worker_by_id(&self, worker_id: usize) { + let mut sleepers = self.sleepers.lock().unwrap(); + + for index in 0..sleepers.len() { + if sleepers[index] == worker_id { + sleepers.swap_remove(index); + + // Update the state accordingly whle the lock is held. + State::unpark_one(&self.state); + + return; + } + } + } + + /// Returns `true` if `worker_id` is contained in the sleep set + pub(super) fn is_parked(&self, worker_id: usize) -> bool { + let sleepers = self.sleepers.lock().unwrap(); + sleepers.contains(&worker_id) + } + + fn notify_should_wakeup(&self) -> bool { + let state = State(self.state.fetch_add(0, SeqCst)); + state.num_searching() == 0 && state.num_unparked() < self.num_workers + } +} + +impl State { + fn new(num_workers: usize) -> State { + // All workers start in the unparked state + let ret = State(num_workers << UNPARK_SHIFT); + debug_assert_eq!(num_workers, ret.num_unparked()); + debug_assert_eq!(0, ret.num_searching()); + ret + } + + fn load(cell: &AtomicUsize, ordering: Ordering) -> State { + State(cell.load(ordering)) + } + + fn unpark_one(cell: &AtomicUsize) { + cell.fetch_add(1 | (1 << UNPARK_SHIFT), SeqCst); + } + + fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) { + cell.fetch_add(1, ordering); + } + + /// Returns `true` if this is the final searching worker + fn dec_num_searching(cell: &AtomicUsize) -> bool { + let state = State(cell.fetch_sub(1, SeqCst)); + state.num_searching() == 1 + } + + /// Track a sleeping worker + /// + /// Returns `true` if this is the final searching worker. + fn dec_num_unparked(cell: &AtomicUsize, is_searching: bool) -> bool { + let mut dec = 1 << UNPARK_SHIFT; + + if is_searching { + dec += 1; + } + + let prev = State(cell.fetch_sub(dec, SeqCst)); + is_searching && prev.num_searching() == 1 + } + + /// Number of workers currently searching + fn num_searching(self) -> usize { + self.0 & SEARCH_MASK + } + + /// Number of workers currently unparked + fn num_unparked(self) -> usize { + (self.0 & UNPARK_MASK) >> UNPARK_SHIFT + } +} + +impl From<usize> for State { + fn from(src: usize) -> State { + State(src) + } +} + +impl From<State> for usize { + fn from(src: State) -> usize { + src.0 + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("worker::State") + .field("num_unparked", &self.num_unparked()) + .field("num_searching", &self.num_searching()) + .finish() + } +} + +#[test] +fn test_state() { + assert_eq!(0, UNPARK_MASK & SEARCH_MASK); + assert_eq!(0, !(UNPARK_MASK | SEARCH_MASK)); + + let state = State::new(10); + assert_eq!(10, state.num_unparked()); + assert_eq!(0, state.num_searching()); +} diff --git a/third_party/rust/tokio/src/runtime/thread_pool/mod.rs b/third_party/rust/tokio/src/runtime/thread_pool/mod.rs new file mode 100644 index 0000000000..82e82d5b30 --- /dev/null +++ b/third_party/rust/tokio/src/runtime/thread_pool/mod.rs @@ -0,0 +1,117 @@ +//! Threadpool + +mod atomic_cell; +use atomic_cell::AtomicCell; + +mod idle; +use self::idle::Idle; + +mod worker; +pub(crate) use worker::Launch; + +cfg_blocking! { + pub(crate) use worker::block_in_place; +} + +use crate::loom::sync::Arc; +use crate::runtime::task::{self, JoinHandle}; +use crate::runtime::Parker; + +use std::fmt; +use std::future::Future; + +/// Work-stealing based thread pool for executing futures. +pub(crate) struct ThreadPool { + spawner: Spawner, +} + +/// Submit futures to the associated thread pool for execution. +/// +/// A `Spawner` instance is a handle to a single thread pool that allows the owner +/// of the handle to spawn futures onto the thread pool. +/// +/// The `Spawner` handle is *only* used for spawning new futures. It does not +/// impact the lifecycle of the thread pool in any way. The thread pool may +/// shutdown while there are outstanding `Spawner` instances. +/// +/// `Spawner` instances are obtained by calling [`ThreadPool::spawner`]. +/// +/// [`ThreadPool::spawner`]: method@ThreadPool::spawner +#[derive(Clone)] +pub(crate) struct Spawner { + shared: Arc<worker::Shared>, +} + +// ===== impl ThreadPool ===== + +impl ThreadPool { + pub(crate) fn new(size: usize, parker: Parker) -> (ThreadPool, Launch) { + let (shared, launch) = worker::create(size, parker); + let spawner = Spawner { shared }; + let thread_pool = ThreadPool { spawner }; + + (thread_pool, launch) + } + + /// Returns reference to `Spawner`. + /// + /// The `Spawner` handle can be cloned and enables spawning tasks from other + /// threads. + pub(crate) fn spawner(&self) -> &Spawner { + &self.spawner + } + + /// Spawns a task + pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + self.spawner.spawn(future) + } + + /// Blocks the current thread waiting for the future to complete. + /// + /// The future will execute on the current thread, but all spawned tasks + /// will be executed on the thread pool. + pub(crate) fn block_on<F>(&self, future: F) -> F::Output + where + F: Future, + { + let mut enter = crate::runtime::enter(); + enter.block_on(future).expect("failed to park thread") + } +} + +impl fmt::Debug for ThreadPool { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ThreadPool").finish() + } +} + +impl Drop for ThreadPool { + fn drop(&mut self) { + self.spawner.shared.close(); + } +} + +// ==== impl Spawner ===== + +impl Spawner { + /// Spawns a future onto the thread pool + pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + let (task, handle) = task::joinable(future); + self.shared.schedule(task, false); + handle + } +} + +impl fmt::Debug for Spawner { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Spawner").finish() + } +} diff --git a/third_party/rust/tokio/src/runtime/thread_pool/worker.rs b/third_party/rust/tokio/src/runtime/thread_pool/worker.rs new file mode 100644 index 0000000000..400e2a938c --- /dev/null +++ b/third_party/rust/tokio/src/runtime/thread_pool/worker.rs @@ -0,0 +1,761 @@ +//! A scheduler is initialized with a fixed number of workers. Each worker is +//! driven by a thread. Each worker has a "core" which contains data such as the +//! run queue and other state. When `block_in_place` is called, the worker's +//! "core" is handed off to a new thread allowing the scheduler to continue to +//! make progress while the originating thread blocks. + +use crate::loom::rand::seed; +use crate::loom::sync::{Arc, Mutex}; +use crate::park::{Park, Unpark}; +use crate::runtime; +use crate::runtime::park::{Parker, Unparker}; +use crate::runtime::thread_pool::{AtomicCell, Idle}; +use crate::runtime::{queue, task}; +use crate::util::linked_list::LinkedList; +use crate::util::FastRand; + +use std::cell::RefCell; +use std::time::Duration; + +/// A scheduler worker +pub(super) struct Worker { + /// Reference to shared state + shared: Arc<Shared>, + + /// Index holding this worker's remote state + index: usize, + + /// Used to hand-off a worker's core to another thread. + core: AtomicCell<Core>, +} + +/// Core data +struct Core { + /// Used to schedule bookkeeping tasks every so often. + tick: u8, + + /// When a task is scheduled from a worker, it is stored in this slot. The + /// worker will check this slot for a task **before** checking the run + /// queue. This effectively results in the **last** scheduled task to be run + /// next (LIFO). This is an optimization for message passing patterns and + /// helps to reduce latency. + lifo_slot: Option<Notified>, + + /// The worker-local run queue. + run_queue: queue::Local<Arc<Worker>>, + + /// True if the worker is currently searching for more work. Searching + /// involves attempting to steal from other workers. + is_searching: bool, + + /// True if the scheduler is being shutdown + is_shutdown: bool, + + /// Tasks owned by the core + tasks: LinkedList<Task>, + + /// Parker + /// + /// Stored in an `Option` as the parker is added / removed to make the + /// borrow checker happy. + park: Option<Parker>, + + /// Fast random number generator. + rand: FastRand, +} + +/// State shared across all workers +pub(super) struct Shared { + /// Per-worker remote state. All other workers have access to this and is + /// how they communicate between each other. + remotes: Box<[Remote]>, + + /// Submit work to the scheduler while **not** currently on a worker thread. + inject: queue::Inject<Arc<Worker>>, + + /// Coordinates idle workers + idle: Idle, + + /// Workers have have observed the shutdown signal + /// + /// The core is **not** placed back in the worker to avoid it from being + /// stolen by a thread that was spawned as part of `block_in_place`. + shutdown_workers: Mutex<Vec<(Box<Core>, Arc<Worker>)>>, +} + +/// Used to communicate with a worker from other threads. +struct Remote { + /// Steal tasks from this worker. + steal: queue::Steal<Arc<Worker>>, + + /// Transfers tasks to be released. Any worker pushes tasks, only the owning + /// worker pops. + pending_drop: task::TransferStack<Arc<Worker>>, + + /// Unparks the associated worker thread + unpark: Unparker, +} + +/// Thread-local context +struct Context { + /// Worker + worker: Arc<Worker>, + + /// Core data + core: RefCell<Option<Box<Core>>>, +} + +/// Starts the workers +pub(crate) struct Launch(Vec<Arc<Worker>>); + +/// Running a task may consume the core. If the core is still available when +/// running the task completes, it is returned. Otherwise, the worker will need +/// to stop processing. +type RunResult = Result<Box<Core>, ()>; + +/// A task handle +type Task = task::Task<Arc<Worker>>; + +/// A notified task handle +type Notified = task::Notified<Arc<Worker>>; + +// Tracks thread-local state +scoped_thread_local!(static CURRENT: Context); + +pub(super) fn create(size: usize, park: Parker) -> (Arc<Shared>, Launch) { + let mut cores = vec![]; + let mut remotes = vec![]; + + // Create the local queues + for _ in 0..size { + let (steal, run_queue) = queue::local(); + + let park = park.clone(); + let unpark = park.unpark(); + + cores.push(Box::new(Core { + tick: 0, + lifo_slot: None, + run_queue, + is_searching: false, + is_shutdown: false, + tasks: LinkedList::new(), + park: Some(park), + rand: FastRand::new(seed()), + })); + + remotes.push(Remote { + steal, + pending_drop: task::TransferStack::new(), + unpark, + }); + } + + let shared = Arc::new(Shared { + remotes: remotes.into_boxed_slice(), + inject: queue::Inject::new(), + idle: Idle::new(size), + shutdown_workers: Mutex::new(vec![]), + }); + + let mut launch = Launch(vec![]); + + for (index, core) in cores.drain(..).enumerate() { + launch.0.push(Arc::new(Worker { + shared: shared.clone(), + index, + core: AtomicCell::new(Some(core)), + })); + } + + (shared, launch) +} + +cfg_blocking! { + pub(crate) fn block_in_place<F, R>(f: F) -> R + where + F: FnOnce() -> R, + { + // Try to steal the worker core back + struct Reset; + + impl Drop for Reset { + fn drop(&mut self) { + CURRENT.with(|maybe_cx| { + if let Some(cx) = maybe_cx { + let core = cx.worker.core.take(); + *cx.core.borrow_mut() = core; + } + }); + } + } + + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("can call blocking only when running in a spawned task"); + + // Get the worker core. If none is set, then blocking is fine! + let core = match cx.core.borrow_mut().take() { + Some(core) => { + // We are effectively leaving the executor, so we need to + // forcibly end budgeting. + crate::coop::stop(); + core + }, + None => return, + }; + + // The parker should be set here + assert!(core.park.is_some()); + + // In order to block, the core must be sent to another thread for + // execution. + // + // First, move the core back into the worker's shared core slot. + cx.worker.core.set(core); + + // Next, clone the worker handle and send it to a new thread for + // processing. + // + // Once the blocking task is done executing, we will attempt to + // steal the core back. + let worker = cx.worker.clone(); + runtime::spawn_blocking(move || run(worker)); + }); + + let _reset = Reset; + + f() + } +} + +/// After how many ticks is the global queue polled. This helps to ensure +/// fairness. +/// +/// The number is fairly arbitrary. I believe this value was copied from golang. +const GLOBAL_POLL_INTERVAL: u8 = 61; + +impl Launch { + pub(crate) fn launch(mut self) { + for worker in self.0.drain(..) { + runtime::spawn_blocking(move || run(worker)); + } + } +} + +fn run(worker: Arc<Worker>) { + // Acquire a core. If this fails, then another thread is running this + // worker and there is nothing further to do. + let core = match worker.core.take() { + Some(core) => core, + None => return, + }; + + // Set the worker context. + let cx = Context { + worker, + core: RefCell::new(None), + }; + + let _enter = crate::runtime::enter(); + + CURRENT.set(&cx, || { + // This should always be an error. It only returns a `Result` to support + // using `?` to short circuit. + assert!(cx.run(core).is_err()); + }); +} + +impl Context { + fn run(&self, mut core: Box<Core>) -> RunResult { + while !core.is_shutdown { + // Increment the tick + core.tick(); + + // Run maintenance, if needed + core = self.maintenance(core); + + // First, check work available to the current worker. + if let Some(task) = core.next_task(&self.worker) { + core = self.run_task(task, core)?; + continue; + } + + // There is no more **local** work to process, try to steal work + // from other workers. + if let Some(task) = core.steal_work(&self.worker) { + core = self.run_task(task, core)?; + } else { + // Wait for work + core = self.park(core); + } + } + + // Signal shutdown + self.worker.shared.shutdown(core, self.worker.clone()); + Err(()) + } + + fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult { + // Make sure thew orker is not in the **searching** state. This enables + // another idle worker to try to steal work. + core.transition_from_searching(&self.worker); + + // Make the core available to the runtime context + *self.core.borrow_mut() = Some(core); + + // Run the task + crate::coop::budget(|| { + task.run(); + + // As long as there is budget remaining and a task exists in the + // `lifo_slot`, then keep running. + loop { + // Check if we still have the core. If not, the core was stolen + // by another worker. + let mut core = match self.core.borrow_mut().take() { + Some(core) => core, + None => return Err(()), + }; + + // Check for a task in the LIFO slot + let task = match core.lifo_slot.take() { + Some(task) => task, + None => return Ok(core), + }; + + if crate::coop::has_budget_remaining() { + // Run the LIFO task, then loop + *self.core.borrow_mut() = Some(core); + task.run(); + } else { + // Not enough budget left to run the LIFO task, push it to + // the back of the queue and return. + core.run_queue.push_back(task, self.worker.inject()); + return Ok(core); + } + } + }) + } + + fn maintenance(&self, mut core: Box<Core>) -> Box<Core> { + if core.tick % GLOBAL_POLL_INTERVAL == 0 { + // Call `park` with a 0 timeout. This enables the I/O driver, timer, ... + // to run without actually putting the thread to sleep. + core = self.park_timeout(core, Some(Duration::from_millis(0))); + + // Run regularly scheduled maintenance + core.maintenance(&self.worker); + } + + core + } + + fn park(&self, mut core: Box<Core>) -> Box<Core> { + core.transition_to_parked(&self.worker); + + while !core.is_shutdown { + core = self.park_timeout(core, None); + + // Run regularly scheduled maintenance + core.maintenance(&self.worker); + + if core.transition_from_parked(&self.worker) { + return core; + } + } + + core + } + + fn park_timeout(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> { + // Take the parker out of core + let mut park = core.park.take().expect("park missing"); + + // Store `core` in context + *self.core.borrow_mut() = Some(core); + + // Park thread + if let Some(timeout) = duration { + park.park_timeout(timeout).expect("park failed"); + } else { + park.park().expect("park failed"); + } + + // Remove `core` from context + core = self.core.borrow_mut().take().expect("core missing"); + + // Place `park` back in `core` + core.park = Some(park); + + // If there are tasks available to steal, notify a worker + if core.run_queue.is_stealable() { + self.worker.shared.notify_parked(); + } + + core + } +} + +impl Core { + /// Increment the tick + fn tick(&mut self) { + self.tick = self.tick.wrapping_add(1); + } + + /// Return the next notified task available to this worker. + fn next_task(&mut self, worker: &Worker) -> Option<Notified> { + if self.tick % GLOBAL_POLL_INTERVAL == 0 { + worker.inject().pop().or_else(|| self.next_local_task()) + } else { + self.next_local_task().or_else(|| worker.inject().pop()) + } + } + + fn next_local_task(&mut self) -> Option<Notified> { + self.lifo_slot.take().or_else(|| self.run_queue.pop()) + } + + fn steal_work(&mut self, worker: &Worker) -> Option<Notified> { + if !self.transition_to_searching(worker) { + return None; + } + + let num = worker.shared.remotes.len(); + // Start from a random worker + let start = self.rand.fastrand_n(num as u32) as usize; + + for i in 0..num { + let i = (start + i) % num; + + // Don't steal from ourself! We know we don't have work. + if i == worker.index { + continue; + } + + let target = &worker.shared.remotes[i]; + if let Some(task) = target.steal.steal_into(&mut self.run_queue) { + return Some(task); + } + } + + // Fallback on checking the global queue + worker.shared.inject.pop() + } + + fn transition_to_searching(&mut self, worker: &Worker) -> bool { + if !self.is_searching { + self.is_searching = worker.shared.idle.transition_worker_to_searching(); + } + + self.is_searching + } + + fn transition_from_searching(&mut self, worker: &Worker) { + if !self.is_searching { + return; + } + + self.is_searching = false; + worker.shared.transition_worker_from_searching(); + } + + /// Prepare the worker state for parking + fn transition_to_parked(&mut self, worker: &Worker) { + // When the final worker transitions **out** of searching to parked, it + // must check all the queues one last time in case work materialized + // between the last work scan and transitioning out of searching. + let is_last_searcher = worker + .shared + .idle + .transition_worker_to_parked(worker.index, self.is_searching); + + // The worker is no longer searching. Setting this is the local cache + // only. + self.is_searching = false; + + if is_last_searcher { + worker.shared.notify_if_work_pending(); + } + } + + /// Returns `true` if the transition happened. + fn transition_from_parked(&mut self, worker: &Worker) -> bool { + // If a task is in the lifo slot, then we must unpark regardless of + // being notified + if self.lifo_slot.is_some() { + worker.shared.idle.unpark_worker_by_id(worker.index); + self.is_searching = true; + return true; + } + + if worker.shared.idle.is_parked(worker.index) { + return false; + } + + // When unparked, the worker is in the searching state. + self.is_searching = true; + true + } + + /// Runs maintenance work such as free pending tasks and check the pool's + /// state. + fn maintenance(&mut self, worker: &Worker) { + self.drain_pending_drop(worker); + + if !self.is_shutdown { + // Check if the scheduler has been shutdown + self.is_shutdown = worker.inject().is_closed(); + } + } + + // Shutdown the core + fn shutdown(&mut self, worker: &Worker) { + // Take the core + let mut park = self.park.take().expect("park missing"); + + // Signal to all tasks to shut down. + for header in self.tasks.iter() { + header.shutdown(); + } + + loop { + self.drain_pending_drop(worker); + + if self.tasks.is_empty() { + break; + } + + // Wait until signalled + park.park().expect("park failed"); + } + + // Drain the queue + while let Some(_) = self.next_local_task() {} + } + + fn drain_pending_drop(&mut self, worker: &Worker) { + use std::mem::ManuallyDrop; + + for task in worker.remote().pending_drop.drain() { + let task = ManuallyDrop::new(task); + + // safety: tasks are only pushed into the `pending_drop` stacks that + // are associated with the list they are inserted into. When a task + // is pushed into `pending_drop`, the ref-inc is skipped, so we must + // not ref-dec here. + // + // See `bind` and `release` implementations. + unsafe { + self.tasks.remove(task.header().into()); + } + } + } +} + +impl Worker { + /// Returns a reference to the scheduler's injection queue + fn inject(&self) -> &queue::Inject<Arc<Worker>> { + &self.shared.inject + } + + /// Return a reference to this worker's remote data + fn remote(&self) -> &Remote { + &self.shared.remotes[self.index] + } + + fn eq(&self, other: &Worker) -> bool { + self.shared.ptr_eq(&other.shared) && self.index == other.index + } +} + +impl task::Schedule for Arc<Worker> { + fn bind(task: Task) -> Arc<Worker> { + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + + // Track the task + cx.core + .borrow_mut() + .as_mut() + .expect("scheduler core missing") + .tasks + .push_front(task); + + // Return a clone of the worker + cx.worker.clone() + }) + } + + fn release(&self, task: &Task) -> Option<Task> { + use std::ptr::NonNull; + + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + + if self.eq(&cx.worker) { + let mut maybe_core = cx.core.borrow_mut(); + + if let Some(core) = &mut *maybe_core { + // Directly remove the task + // + // safety: the task is inserted in the list in `bind`. + unsafe { + let ptr = NonNull::from(task.header()); + return core.tasks.remove(ptr); + } + } + } + + // Track the task to be released by the worker that owns it + // + // Safety: We get a new handle without incrementing the ref-count. + // A ref-count is held by the "owned" linked list and it is only + // ever removed from that list as part of the release process: this + // method or popping the task from `pending_drop`. Thus, we can rely + // on the ref-count held by the linked-list to keep the memory + // alive. + // + // When the task is removed from the stack, it is forgotten instead + // of dropped. + let task = unsafe { Task::from_raw(task.header().into()) }; + + self.remote().pending_drop.push(task); + + if cx.core.borrow().is_some() { + return None; + } + + // The worker core has been handed off to another thread. In the + // event that the scheduler is currently shutting down, the thread + // that owns the task may be waiting on the release to complete + // shutdown. + if self.inject().is_closed() { + self.remote().unpark.unpark(); + } + + None + }) + } + + fn schedule(&self, task: Notified) { + self.shared.schedule(task, false); + } + + fn yield_now(&self, task: Notified) { + self.shared.schedule(task, true); + } +} + +impl Shared { + pub(super) fn schedule(&self, task: Notified, is_yield: bool) { + CURRENT.with(|maybe_cx| { + if let Some(cx) = maybe_cx { + // Make sure the task is part of the **current** scheduler. + if self.ptr_eq(&cx.worker.shared) { + // And the current thread still holds a core + if let Some(core) = cx.core.borrow_mut().as_mut() { + self.schedule_local(core, task, is_yield); + return; + } + } + } + + // Otherwise, use the inject queue + self.inject.push(task); + self.notify_parked(); + }); + } + + fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) { + // Spawning from the worker thread. If scheduling a "yield" then the + // task must always be pushed to the back of the queue, enabling other + // tasks to be executed. If **not** a yield, then there is more + // flexibility and the task may go to the front of the queue. + let should_notify = if is_yield { + core.run_queue.push_back(task, &self.inject); + true + } else { + // Push to the LIFO slot + let prev = core.lifo_slot.take(); + let ret = prev.is_some(); + + if let Some(prev) = prev { + core.run_queue.push_back(prev, &self.inject); + } + + core.lifo_slot = Some(task); + + ret + }; + + // Only notify if not currently parked. If `park` is `None`, then the + // scheduling is from a resource driver. As notifications often come in + // batches, the notification is delayed until the park is complete. + if should_notify && core.park.is_some() { + self.notify_parked(); + } + } + + pub(super) fn close(&self) { + if self.inject.close() { + self.notify_all(); + } + } + + fn notify_parked(&self) { + if let Some(index) = self.idle.worker_to_notify() { + self.remotes[index].unpark.unpark(); + } + } + + fn notify_all(&self) { + for remote in &self.remotes[..] { + remote.unpark.unpark(); + } + } + + fn notify_if_work_pending(&self) { + for remote in &self.remotes[..] { + if !remote.steal.is_empty() { + self.notify_parked(); + return; + } + } + + if !self.inject.is_empty() { + self.notify_parked(); + } + } + + fn transition_worker_from_searching(&self) { + if self.idle.transition_worker_from_searching() { + // We are the final searching worker. Because work was found, we + // need to notify another worker. + self.notify_parked(); + } + } + + /// Signals that a worker has observed the shutdown signal and has replaced + /// its core back into its handle. + /// + /// If all workers have reached this point, the final cleanup is performed. + fn shutdown(&self, core: Box<Core>, worker: Arc<Worker>) { + let mut workers = self.shutdown_workers.lock().unwrap(); + workers.push((core, worker)); + + if workers.len() != self.remotes.len() { + return; + } + + for (mut core, worker) in workers.drain(..) { + core.shutdown(&worker); + } + + // Drain the injection queue + while let Some(_) = self.inject.pop() {} + } + + fn ptr_eq(&self, other: &Shared) -> bool { + self as *const _ == other as *const _ + } +} diff --git a/third_party/rust/tokio/src/runtime/time.rs b/third_party/rust/tokio/src/runtime/time.rs new file mode 100644 index 0000000000..c623d9641a --- /dev/null +++ b/third_party/rust/tokio/src/runtime/time.rs @@ -0,0 +1,59 @@ +//! Abstracts out the APIs necessary to `Runtime` for integrating the time +//! driver. When the `time` feature flag is **not** enabled. These APIs are +//! shells. This isolates the complexity of dealing with conditional +//! compilation. + +pub(crate) use variant::*; + +#[cfg(feature = "time")] +mod variant { + use crate::park::Either; + use crate::runtime::io; + use crate::time::{self, driver}; + + pub(crate) type Clock = time::Clock; + pub(crate) type Driver = Either<driver::Driver<io::Driver>, io::Driver>; + pub(crate) type Handle = Option<driver::Handle>; + + pub(crate) fn create_clock() -> Clock { + Clock::new() + } + + /// Create a new timer driver / handle pair + pub(crate) fn create_driver( + enable: bool, + io_driver: io::Driver, + clock: Clock, + ) -> (Driver, Handle) { + if enable { + let driver = driver::Driver::new(io_driver, clock); + let handle = driver.handle(); + + (Either::A(driver), Some(handle)) + } else { + (Either::B(io_driver), None) + } + } +} + +#[cfg(not(feature = "time"))] +mod variant { + use crate::runtime::io; + + pub(crate) type Clock = (); + pub(crate) type Driver = io::Driver; + pub(crate) type Handle = (); + + pub(crate) fn create_clock() -> Clock { + () + } + + /// Create a new timer driver / handle pair + pub(crate) fn create_driver( + _enable: bool, + io_driver: io::Driver, + _clock: Clock, + ) -> (Driver, Handle) { + (io_driver, ()) + } +} diff --git a/third_party/rust/tokio/src/signal/ctrl_c.rs b/third_party/rust/tokio/src/signal/ctrl_c.rs new file mode 100644 index 0000000000..1eeeb85aa1 --- /dev/null +++ b/third_party/rust/tokio/src/signal/ctrl_c.rs @@ -0,0 +1,53 @@ +#[cfg(unix)] +use super::unix::{self as os_impl}; +#[cfg(windows)] +use super::windows::{self as os_impl}; + +use std::io; + +/// Completes when a "ctrl-c" notification is sent to the process. +/// +/// While signals are handled very differently between Unix and Windows, both +/// platforms support receiving a signal on "ctrl-c". This function provides a +/// portable API for receiving this notification. +/// +/// Once the returned future is polled, a listener is registered. The future +/// will complete on the first received `ctrl-c` **after** the initial call to +/// either `Future::poll` or `.await`. +/// +/// # Caveats +/// +/// On Unix platforms, the first time that a `Signal` instance is registered for a +/// particular signal kind, an OS signal-handler is installed which replaces the +/// default platform behavior when that signal is received, **for the duration of +/// the entire process**. +/// +/// For example, Unix systems will terminate a process by default when it +/// receives a signal generated by "CTRL+C" on the terminal. But, when a +/// `ctrl_c` stream is created to listen for this signal, the time it arrives, +/// it will be translated to a stream event, and the process will continue to +/// execute. **Even if this `Signal` instance is dropped, subsequent SIGINT +/// deliveries will end up captured by Tokio, and the default platform behavior +/// will NOT be reset**. +/// +/// Thus, applications should take care to ensure the expected signal behavior +/// occurs as expected after listening for specific signals. +/// +/// # Examples +/// +/// ```rust,no_run +/// use tokio::signal; +/// +/// #[tokio::main] +/// async fn main() { +/// println!("waiting for ctrl-c"); +/// +/// signal::ctrl_c().await.expect("failed to listen for event"); +/// +/// println!("received ctrl-c event"); +/// } +/// ``` +pub async fn ctrl_c() -> io::Result<()> { + os_impl::ctrl_c()?.recv().await; + Ok(()) +} diff --git a/third_party/rust/tokio/src/signal/mod.rs b/third_party/rust/tokio/src/signal/mod.rs new file mode 100644 index 0000000000..6e5e350df5 --- /dev/null +++ b/third_party/rust/tokio/src/signal/mod.rs @@ -0,0 +1,60 @@ +//! Asynchronous signal handling for Tokio +//! +//! Note that signal handling is in general a very tricky topic and should be +//! used with great care. This crate attempts to implement 'best practice' for +//! signal handling, but it should be evaluated for your own applications' needs +//! to see if it's suitable. +//! +//! The are some fundamental limitations of this crate documented on the OS +//! specific structures, as well. +//! +//! # Examples +//! +//! Print on "ctrl-c" notification. +//! +//! ```rust,no_run +//! use tokio::signal; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! signal::ctrl_c().await?; +//! println!("ctrl-c received!"); +//! Ok(()) +//! } +//! ``` +//! +//! Wait for SIGHUP on Unix +//! +//! ```rust,no_run +//! # #[cfg(unix)] { +//! use tokio::signal::unix::{signal, SignalKind}; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! // An infinite stream of hangup signals. +//! let mut stream = signal(SignalKind::hangup())?; +//! +//! // Print whenever a HUP signal is received +//! loop { +//! stream.recv().await; +//! println!("got signal HUP"); +//! } +//! } +//! # } +//! ``` + +mod ctrl_c; +pub use ctrl_c::ctrl_c; + +mod registry; + +mod os { + #[cfg(unix)] + pub(crate) use super::unix::{OsExtraData, OsStorage}; + + #[cfg(windows)] + pub(crate) use super::windows::{OsExtraData, OsStorage}; +} + +pub mod unix; +pub mod windows; diff --git a/third_party/rust/tokio/src/signal/registry.rs b/third_party/rust/tokio/src/signal/registry.rs new file mode 100644 index 0000000000..50edd2b6c4 --- /dev/null +++ b/third_party/rust/tokio/src/signal/registry.rs @@ -0,0 +1,321 @@ +#![allow(clippy::unit_arg)] + +use crate::signal::os::{OsExtraData, OsStorage}; + +use crate::sync::mpsc::Sender; + +use lazy_static::lazy_static; +use std::ops; +use std::pin::Pin; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; + +pub(crate) type EventId = usize; + +/// State for a specific event, whether a notification is pending delivery, +/// and what listeners are registered. +#[derive(Default, Debug)] +pub(crate) struct EventInfo { + pending: AtomicBool, + recipients: Mutex<Vec<Sender<()>>>, +} + +/// An interface for retrieving the `EventInfo` for a particular eventId. +pub(crate) trait Storage { + /// Gets the `EventInfo` for `id` if it exists. + fn event_info(&self, id: EventId) -> Option<&EventInfo>; + + /// Invokes `f` once for each defined `EventInfo` in this storage. + fn for_each<'a, F>(&'a self, f: F) + where + F: FnMut(&'a EventInfo); +} + +impl Storage for Vec<EventInfo> { + fn event_info(&self, id: EventId) -> Option<&EventInfo> { + self.get(id) + } + + fn for_each<'a, F>(&'a self, f: F) + where + F: FnMut(&'a EventInfo), + { + self.iter().for_each(f) + } +} + +/// An interface for initializing a type. Useful for situations where we cannot +/// inject a configured instance in the constructor of another type. +pub(crate) trait Init { + fn init() -> Self; +} + +/// Manages and distributes event notifications to any registered listeners. +/// +/// Generic over the underlying storage to allow for domain specific +/// optimizations (e.g. eventIds may or may not be contiguous). +#[derive(Debug)] +pub(crate) struct Registry<S> { + storage: S, +} + +impl<S> Registry<S> { + fn new(storage: S) -> Self { + Self { storage } + } +} + +impl<S: Storage> Registry<S> { + /// Registers a new listener for `event_id`. + fn register_listener(&self, event_id: EventId, listener: Sender<()>) { + self.storage + .event_info(event_id) + .unwrap_or_else(|| panic!("invalid event_id: {}", event_id)) + .recipients + .lock() + .unwrap() + .push(listener); + } + + /// Marks `event_id` as having been delivered, without broadcasting it to + /// any listeners. + fn record_event(&self, event_id: EventId) { + if let Some(event_info) = self.storage.event_info(event_id) { + event_info.pending.store(true, Ordering::SeqCst) + } + } + + /// Broadcasts all previously recorded events to their respective listeners. + /// + /// Returns `true` if an event was delivered to at least one listener. + fn broadcast(&self) -> bool { + use crate::sync::mpsc::error::TrySendError; + + let mut did_notify = false; + self.storage.for_each(|event_info| { + // Any signal of this kind arrived since we checked last? + if !event_info.pending.swap(false, Ordering::SeqCst) { + return; + } + + let mut recipients = event_info.recipients.lock().unwrap(); + + // Notify all waiters on this signal that the signal has been + // received. If we can't push a message into the queue then we don't + // worry about it as everything is coalesced anyway. If the channel + // has gone away then we can remove that slot. + for i in (0..recipients.len()).rev() { + match recipients[i].try_send(()) { + Ok(()) => did_notify = true, + Err(TrySendError::Closed(..)) => { + recipients.swap_remove(i); + } + + // Channel is full, ignore the error since the + // receiver has already been woken up + Err(_) => {} + } + } + }); + + did_notify + } +} + +pub(crate) struct Globals { + extra: OsExtraData, + registry: Registry<OsStorage>, +} + +impl ops::Deref for Globals { + type Target = OsExtraData; + + fn deref(&self) -> &Self::Target { + &self.extra + } +} + +impl Globals { + /// Registers a new listener for `event_id`. + pub(crate) fn register_listener(&self, event_id: EventId, listener: Sender<()>) { + self.registry.register_listener(event_id, listener); + } + + /// Marks `event_id` as having been delivered, without broadcasting it to + /// any listeners. + pub(crate) fn record_event(&self, event_id: EventId) { + self.registry.record_event(event_id); + } + + /// Broadcasts all previously recorded events to their respective listeners. + /// + /// Returns `true` if an event was delivered to at least one listener. + pub(crate) fn broadcast(&self) -> bool { + self.registry.broadcast() + } + + #[cfg(unix)] + pub(crate) fn storage(&self) -> &OsStorage { + &self.registry.storage + } +} + +pub(crate) fn globals() -> Pin<&'static Globals> +where + OsExtraData: 'static + Send + Sync + Init, + OsStorage: 'static + Send + Sync + Init, +{ + lazy_static! { + static ref GLOBALS: Pin<Box<Globals>> = Box::pin(Globals { + extra: OsExtraData::init(), + registry: Registry::new(OsStorage::init()), + }); + } + + GLOBALS.as_ref() +} + +#[cfg(all(test, not(loom)))] +mod tests { + use super::*; + use crate::runtime::{self, Runtime}; + use crate::sync::{mpsc, oneshot}; + + use futures::future; + + #[test] + fn smoke() { + let mut rt = rt(); + rt.block_on(async move { + let registry = Registry::new(vec![ + EventInfo::default(), + EventInfo::default(), + EventInfo::default(), + ]); + + let (first_tx, first_rx) = mpsc::channel(3); + let (second_tx, second_rx) = mpsc::channel(3); + let (third_tx, third_rx) = mpsc::channel(3); + + registry.register_listener(0, first_tx); + registry.register_listener(1, second_tx); + registry.register_listener(2, third_tx); + + let (fire, wait) = oneshot::channel(); + + crate::spawn(async { + wait.await.expect("wait failed"); + + // Record some events which should get coalesced + registry.record_event(0); + registry.record_event(0); + registry.record_event(1); + registry.record_event(1); + registry.broadcast(); + + // Send subsequent signal + registry.record_event(0); + registry.broadcast(); + + drop(registry); + }); + + let _ = fire.send(()); + let all = future::join3(collect(first_rx), collect(second_rx), collect(third_rx)); + + let (first_results, second_results, third_results) = all.await; + assert_eq!(2, first_results.len()); + assert_eq!(1, second_results.len()); + assert_eq!(0, third_results.len()); + }); + } + + #[test] + #[should_panic = "invalid event_id: 1"] + fn register_panics_on_invalid_input() { + let registry = Registry::new(vec![EventInfo::default()]); + + let (tx, _) = mpsc::channel(1); + registry.register_listener(1, tx); + } + + #[test] + fn record_invalid_event_does_nothing() { + let registry = Registry::new(vec![EventInfo::default()]); + registry.record_event(42); + } + + #[test] + fn broadcast_cleans_up_disconnected_listeners() { + let mut rt = Runtime::new().unwrap(); + + rt.block_on(async { + let registry = Registry::new(vec![EventInfo::default()]); + + let (first_tx, first_rx) = mpsc::channel(1); + let (second_tx, second_rx) = mpsc::channel(1); + let (third_tx, third_rx) = mpsc::channel(1); + + registry.register_listener(0, first_tx); + registry.register_listener(0, second_tx); + registry.register_listener(0, third_tx); + + drop(first_rx); + drop(second_rx); + + let (fire, wait) = oneshot::channel(); + + crate::spawn(async { + wait.await.expect("wait failed"); + + registry.record_event(0); + registry.broadcast(); + + assert_eq!(1, registry.storage[0].recipients.lock().unwrap().len()); + drop(registry); + }); + + let _ = fire.send(()); + let results = collect(third_rx).await; + + assert_eq!(1, results.len()); + }); + } + + #[test] + fn broadcast_returns_if_at_least_one_event_fired() { + let registry = Registry::new(vec![EventInfo::default()]); + + registry.record_event(0); + assert_eq!(false, registry.broadcast()); + + let (first_tx, first_rx) = mpsc::channel(1); + let (second_tx, second_rx) = mpsc::channel(1); + + registry.register_listener(0, first_tx); + registry.register_listener(0, second_tx); + + registry.record_event(0); + assert_eq!(true, registry.broadcast()); + + drop(first_rx); + registry.record_event(0); + assert_eq!(false, registry.broadcast()); + + drop(second_rx); + } + + fn rt() -> Runtime { + runtime::Builder::new().basic_scheduler().build().unwrap() + } + + async fn collect(mut rx: crate::sync::mpsc::Receiver<()>) -> Vec<()> { + let mut ret = vec![]; + + while let Some(v) = rx.recv().await { + ret.push(v); + } + + ret + } +} diff --git a/third_party/rust/tokio/src/signal/unix.rs b/third_party/rust/tokio/src/signal/unix.rs new file mode 100644 index 0000000000..06f5cf4eba --- /dev/null +++ b/third_party/rust/tokio/src/signal/unix.rs @@ -0,0 +1,513 @@ +//! Unix-specific types for signal handling. +//! +//! This module is only defined on Unix platforms and contains the primary +//! `Signal` type for receiving notifications of signals. + +#![cfg(unix)] + +use crate::io::{AsyncRead, PollEvented}; +use crate::signal::registry::{globals, EventId, EventInfo, Globals, Init, Storage}; +use crate::sync::mpsc::{channel, Receiver}; + +use libc::c_int; +use mio_uds::UnixStream; +use std::io::{self, Error, ErrorKind, Write}; +use std::pin::Pin; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Once; +use std::task::{Context, Poll}; + +pub(crate) type OsStorage = Vec<SignalInfo>; + +// Number of different unix signals +// (FreeBSD has 33) +const SIGNUM: usize = 33; + +impl Init for OsStorage { + fn init() -> Self { + (0..SIGNUM).map(|_| SignalInfo::default()).collect() + } +} + +impl Storage for OsStorage { + fn event_info(&self, id: EventId) -> Option<&EventInfo> { + self.get(id).map(|si| &si.event_info) + } + + fn for_each<'a, F>(&'a self, f: F) + where + F: FnMut(&'a EventInfo), + { + self.iter().map(|si| &si.event_info).for_each(f) + } +} + +#[derive(Debug)] +pub(crate) struct OsExtraData { + sender: UnixStream, + receiver: UnixStream, +} + +impl Init for OsExtraData { + fn init() -> Self { + let (receiver, sender) = UnixStream::pair().expect("failed to create UnixStream"); + + Self { sender, receiver } + } +} + +/// Represents the specific kind of signal to listen for. +#[derive(Debug, Clone, Copy)] +pub struct SignalKind(c_int); + +impl SignalKind { + /// Allows for listening to any valid OS signal. + /// + /// For example, this can be used for listening for platform-specific + /// signals. + /// ```rust,no_run + /// # use tokio::signal::unix::SignalKind; + /// # let signum = -1; + /// // let signum = libc::OS_SPECIFIC_SIGNAL; + /// let kind = SignalKind::from_raw(signum); + /// ``` + pub fn from_raw(signum: c_int) -> Self { + Self(signum) + } + + /// Represents the SIGALRM signal. + /// + /// On Unix systems this signal is sent when a real-time timer has expired. + /// By default, the process is terminated by this signal. + pub fn alarm() -> Self { + Self(libc::SIGALRM) + } + + /// Represents the SIGCHLD signal. + /// + /// On Unix systems this signal is sent when the status of a child process + /// has changed. By default, this signal is ignored. + pub fn child() -> Self { + Self(libc::SIGCHLD) + } + + /// Represents the SIGHUP signal. + /// + /// On Unix systems this signal is sent when the terminal is disconnected. + /// By default, the process is terminated by this signal. + pub fn hangup() -> Self { + Self(libc::SIGHUP) + } + + /// Represents the SIGINFO signal. + /// + /// On Unix systems this signal is sent to request a status update from the + /// process. By default, this signal is ignored. + #[cfg(any( + target_os = "dragonfly", + target_os = "freebsd", + target_os = "macos", + target_os = "netbsd", + target_os = "openbsd" + ))] + pub fn info() -> Self { + Self(libc::SIGINFO) + } + + /// Represents the SIGINT signal. + /// + /// On Unix systems this signal is sent to interrupt a program. + /// By default, the process is terminated by this signal. + pub fn interrupt() -> Self { + Self(libc::SIGINT) + } + + /// Represents the SIGIO signal. + /// + /// On Unix systems this signal is sent when I/O operations are possible + /// on some file descriptor. By default, this signal is ignored. + pub fn io() -> Self { + Self(libc::SIGIO) + } + + /// Represents the SIGPIPE signal. + /// + /// On Unix systems this signal is sent when the process attempts to write + /// to a pipe which has no reader. By default, the process is terminated by + /// this signal. + pub fn pipe() -> Self { + Self(libc::SIGPIPE) + } + + /// Represents the SIGQUIT signal. + /// + /// On Unix systems this signal is sent to issue a shutdown of the + /// process, after which the OS will dump the process core. + /// By default, the process is terminated by this signal. + pub fn quit() -> Self { + Self(libc::SIGQUIT) + } + + /// Represents the SIGTERM signal. + /// + /// On Unix systems this signal is sent to issue a shutdown of the + /// process. By default, the process is terminated by this signal. + pub fn terminate() -> Self { + Self(libc::SIGTERM) + } + + /// Represents the SIGUSR1 signal. + /// + /// On Unix systems this is a user defined signal. + /// By default, the process is terminated by this signal. + pub fn user_defined1() -> Self { + Self(libc::SIGUSR1) + } + + /// Represents the SIGUSR2 signal. + /// + /// On Unix systems this is a user defined signal. + /// By default, the process is terminated by this signal. + pub fn user_defined2() -> Self { + Self(libc::SIGUSR2) + } + + /// Represents the SIGWINCH signal. + /// + /// On Unix systems this signal is sent when the terminal window is resized. + /// By default, this signal is ignored. + pub fn window_change() -> Self { + Self(libc::SIGWINCH) + } +} + +pub(crate) struct SignalInfo { + event_info: EventInfo, + init: Once, + initialized: AtomicBool, +} + +impl Default for SignalInfo { + fn default() -> SignalInfo { + SignalInfo { + event_info: Default::default(), + init: Once::new(), + initialized: AtomicBool::new(false), + } + } +} + +/// Our global signal handler for all signals registered by this module. +/// +/// The purpose of this signal handler is to primarily: +/// +/// 1. Flag that our specific signal was received (e.g. store an atomic flag) +/// 2. Wake up driver tasks by writing a byte to a pipe +/// +/// Those two operations shoudl both be async-signal safe. +fn action(globals: Pin<&'static Globals>, signal: c_int) { + globals.record_event(signal as EventId); + + // Send a wakeup, ignore any errors (anything reasonably possible is + // full pipe and then it will wake up anyway). + let mut sender = &globals.sender; + drop(sender.write(&[1])); +} + +/// Enables this module to receive signal notifications for the `signal` +/// provided. +/// +/// This will register the signal handler if it hasn't already been registered, +/// returning any error along the way if that fails. +fn signal_enable(signal: c_int) -> io::Result<()> { + if signal < 0 || signal_hook_registry::FORBIDDEN.contains(&signal) { + return Err(Error::new( + ErrorKind::Other, + format!("Refusing to register signal {}", signal), + )); + } + + let globals = globals(); + let siginfo = match globals.storage().get(signal as EventId) { + Some(slot) => slot, + None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")), + }; + let mut registered = Ok(()); + siginfo.init.call_once(|| { + registered = unsafe { + signal_hook_registry::register(signal, move || action(globals, signal)).map(|_| ()) + }; + if registered.is_ok() { + siginfo.initialized.store(true, Ordering::Relaxed); + } + }); + registered?; + // If the call_once failed, it won't be retried on the next attempt to register the signal. In + // such case it is not run, registered is still `Ok(())`, initialized is still `false`. + if siginfo.initialized.load(Ordering::Relaxed) { + Ok(()) + } else { + Err(Error::new( + ErrorKind::Other, + "Failed to register signal handler", + )) + } +} + +#[derive(Debug)] +struct Driver { + wakeup: PollEvented<UnixStream>, +} + +impl Driver { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll<()> { + // Drain the data from the pipe and maintain interest in getting more + self.drain(cx); + // Broadcast any signals which were received + globals().broadcast(); + + Poll::Pending + } +} + +impl Driver { + fn new() -> io::Result<Driver> { + // NB: We give each driver a "fresh" reciever file descriptor to avoid + // the issues described in alexcrichton/tokio-process#42. + // + // In the past we would reuse the actual receiver file descriptor and + // swallow any errors around double registration of the same descriptor. + // I'm not sure if the second (failed) registration simply doesn't end up + // receiving wake up notifications, or there could be some race condition + // when consuming readiness events, but having distinct descriptors for + // distinct PollEvented instances appears to mitigate this. + // + // Unfortunately we cannot just use a single global PollEvented instance + // either, since we can't compare Handles or assume they will always + // point to the exact same reactor. + let stream = globals().receiver.try_clone()?; + let wakeup = PollEvented::new(stream)?; + + Ok(Driver { wakeup }) + } + + /// Drain all data in the global receiver, ensuring we'll get woken up when + /// there is a write on the other end. + /// + /// We do *NOT* use the existence of any read bytes as evidence a signal was + /// received since the `pending` flags would have already been set if that + /// was the case. See + /// [#38](https://github.com/alexcrichton/tokio-signal/issues/38) for more + /// info. + fn drain(&mut self, cx: &mut Context<'_>) { + loop { + match Pin::new(&mut self.wakeup).poll_read(cx, &mut [0; 128]) { + Poll::Ready(Ok(0)) => panic!("EOF on self-pipe"), + Poll::Ready(Ok(_)) => {} + Poll::Ready(Err(e)) => panic!("Bad read on self-pipe: {}", e), + Poll::Pending => break, + } + } + } +} + +/// A stream of events for receiving a particular type of OS signal. +/// +/// In general signal handling on Unix is a pretty tricky topic, and this +/// structure is no exception! There are some important limitations to keep in +/// mind when using `Signal` streams: +/// +/// * Signals handling in Unix already necessitates coalescing signals +/// together sometimes. This `Signal` stream is also no exception here in +/// that it will also coalesce signals. That is, even if the signal handler +/// for this process runs multiple times, the `Signal` stream may only return +/// one signal notification. Specifically, before `poll` is called, all +/// signal notifications are coalesced into one item returned from `poll`. +/// Once `poll` has been called, however, a further signal is guaranteed to +/// be yielded as an item. +/// +/// Put another way, any element pulled off the returned stream corresponds to +/// *at least one* signal, but possibly more. +/// +/// * Signal handling in general is relatively inefficient. Although some +/// improvements are possible in this crate, it's recommended to not plan on +/// having millions of signal channels open. +/// +/// If you've got any questions about this feel free to open an issue on the +/// repo! New approaches to alleviate some of these limitations are always +/// appreciated! +/// +/// # Caveats +/// +/// The first time that a `Signal` instance is registered for a particular +/// signal kind, an OS signal-handler is installed which replaces the default +/// platform behavior when that signal is received, **for the duration of the +/// entire process**. +/// +/// For example, Unix systems will terminate a process by default when it +/// receives SIGINT. But, when a `Signal` instance is created to listen for +/// this signal, the next SIGINT that arrives will be translated to a stream +/// event, and the process will continue to execute. **Even if this `Signal` +/// instance is dropped, subsequent SIGINT deliveries will end up captured by +/// Tokio, and the default platform behavior will NOT be reset**. +/// +/// Thus, applications should take care to ensure the expected signal behavior +/// occurs as expected after listening for specific signals. +/// +/// # Examples +/// +/// Wait for SIGHUP +/// +/// ```rust,no_run +/// use tokio::signal::unix::{signal, SignalKind}; +/// +/// #[tokio::main] +/// async fn main() -> Result<(), Box<dyn std::error::Error>> { +/// // An infinite stream of hangup signals. +/// let mut stream = signal(SignalKind::hangup())?; +/// +/// // Print whenever a HUP signal is received +/// loop { +/// stream.recv().await; +/// println!("got signal HUP"); +/// } +/// } +/// ``` +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct Signal { + driver: Driver, + rx: Receiver<()>, +} + +/// Creates a new stream which will receive notifications when the current +/// process receives the specified signal `kind`. +/// +/// This function will create a new stream which binds to the default reactor. +/// The `Signal` stream is an infinite stream which will receive +/// notifications whenever a signal is received. More documentation can be +/// found on `Signal` itself, but to reiterate: +/// +/// * Signals may be coalesced beyond what the kernel already does. +/// * Once a signal handler is registered with the process the underlying +/// libc signal handler is never unregistered. +/// +/// A `Signal` stream can be created for a particular signal number +/// multiple times. When a signal is received then all the associated +/// channels will receive the signal notification. +/// +/// # Errors +/// +/// * If the lower-level C functions fail for some reason. +/// * If the previous initialization of this specific signal failed. +/// * If the signal is one of +/// [`signal_hook::FORBIDDEN`](https://docs.rs/signal-hook/*/signal_hook/fn.register.html#panics) +pub fn signal(kind: SignalKind) -> io::Result<Signal> { + let signal = kind.0; + + // Turn the signal delivery on once we are ready for it + signal_enable(signal)?; + + // Ensure there's a driver for our associated event loop processing + // signals. + let driver = Driver::new()?; + + // One wakeup in a queue is enough, no need for us to buffer up any + // more. + let (tx, rx) = channel(1); + globals().register_listener(signal as EventId, tx); + + Ok(Signal { driver, rx }) +} + +impl Signal { + /// Receives the next signal notification event. + /// + /// `None` is returned if no more events can be received by this stream. + /// + /// # Examples + /// + /// Wait for SIGHUP + /// + /// ```rust,no_run + /// use tokio::signal::unix::{signal, SignalKind}; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn std::error::Error>> { + /// // An infinite stream of hangup signals. + /// let mut stream = signal(SignalKind::hangup())?; + /// + /// // Print whenever a HUP signal is received + /// loop { + /// stream.recv().await; + /// println!("got signal HUP"); + /// } + /// } + /// ``` + pub async fn recv(&mut self) -> Option<()> { + use crate::future::poll_fn; + poll_fn(|cx| self.poll_recv(cx)).await + } + + /// Polls to receive the next signal notification event, outside of an + /// `async` context. + /// + /// `None` is returned if no more events can be received by this stream. + /// + /// # Examples + /// + /// Polling from a manually implemented future + /// + /// ```rust,no_run + /// use std::pin::Pin; + /// use std::future::Future; + /// use std::task::{Context, Poll}; + /// use tokio::signal::unix::Signal; + /// + /// struct MyFuture { + /// signal: Signal, + /// } + /// + /// impl Future for MyFuture { + /// type Output = Option<()>; + /// + /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + /// println!("polling MyFuture"); + /// self.signal.poll_recv(cx) + /// } + /// } + /// ``` + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { + let _ = self.driver.poll(cx); + self.rx.poll_recv(cx) + } +} + +cfg_stream! { + impl crate::stream::Stream for Signal { + type Item = (); + + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> { + self.poll_recv(cx) + } + } +} + +pub(crate) fn ctrl_c() -> io::Result<Signal> { + signal(SignalKind::interrupt()) +} + +#[cfg(all(test, not(loom)))] +mod tests { + use super::*; + + #[test] + fn signal_enable_error_on_invalid_input() { + signal_enable(-1).unwrap_err(); + } + + #[test] + fn signal_enable_error_on_forbidden_input() { + signal_enable(signal_hook_registry::FORBIDDEN[0]).unwrap_err(); + } +} diff --git a/third_party/rust/tokio/src/signal/windows.rs b/third_party/rust/tokio/src/signal/windows.rs new file mode 100644 index 0000000000..f55e504b00 --- /dev/null +++ b/third_party/rust/tokio/src/signal/windows.rs @@ -0,0 +1,297 @@ +//! Windows-specific types for signal handling. +//! +//! This module is only defined on Windows and contains the primary `Event` type +//! for receiving notifications of events. These events are listened for via the +//! `SetConsoleCtrlHandler` function which receives events of the type +//! `CTRL_C_EVENT` and `CTRL_BREAK_EVENT` + +#![cfg(windows)] + +use crate::signal::registry::{globals, EventId, EventInfo, Init, Storage}; +use crate::sync::mpsc::{channel, Receiver}; + +use std::convert::TryFrom; +use std::io; +use std::sync::Once; +use std::task::{Context, Poll}; +use winapi::shared::minwindef::*; +use winapi::um::consoleapi::SetConsoleCtrlHandler; +use winapi::um::wincon::*; + +#[derive(Debug)] +pub(crate) struct OsStorage { + ctrl_c: EventInfo, + ctrl_break: EventInfo, +} + +impl Init for OsStorage { + fn init() -> Self { + Self { + ctrl_c: EventInfo::default(), + ctrl_break: EventInfo::default(), + } + } +} + +impl Storage for OsStorage { + fn event_info(&self, id: EventId) -> Option<&EventInfo> { + match DWORD::try_from(id) { + Ok(CTRL_C_EVENT) => Some(&self.ctrl_c), + Ok(CTRL_BREAK_EVENT) => Some(&self.ctrl_break), + _ => None, + } + } + + fn for_each<'a, F>(&'a self, mut f: F) + where + F: FnMut(&'a EventInfo), + { + f(&self.ctrl_c); + f(&self.ctrl_break); + } +} + +#[derive(Debug)] +pub(crate) struct OsExtraData {} + +impl Init for OsExtraData { + fn init() -> Self { + Self {} + } +} + +/// Stream of events discovered via `SetConsoleCtrlHandler`. +/// +/// This structure can be used to listen for events of the type `CTRL_C_EVENT` +/// and `CTRL_BREAK_EVENT`. The `Stream` trait is implemented for this struct +/// and will resolve for each notification received by the process. Note that +/// there are few limitations with this as well: +/// +/// * A notification to this process notifies *all* `Event` streams for that +/// event type. +/// * Notifications to an `Event` stream **are coalesced** if they aren't +/// processed quickly enough. This means that if two notifications are +/// received back-to-back, then the stream may only receive one item about the +/// two notifications. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub(crate) struct Event { + rx: Receiver<()>, +} + +pub(crate) fn ctrl_c() -> io::Result<Event> { + Event::new(CTRL_C_EVENT) +} + +impl Event { + fn new(signum: DWORD) -> io::Result<Self> { + global_init()?; + + let (tx, rx) = channel(1); + globals().register_listener(signum as EventId, tx); + + Ok(Event { rx }) + } + + pub(crate) async fn recv(&mut self) -> Option<()> { + use crate::future::poll_fn; + poll_fn(|cx| self.rx.poll_recv(cx)).await + } +} + +fn global_init() -> io::Result<()> { + static INIT: Once = Once::new(); + + let mut init = None; + + INIT.call_once(|| unsafe { + let rc = SetConsoleCtrlHandler(Some(handler), TRUE); + let ret = if rc == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + }; + + init = Some(ret); + }); + + init.unwrap_or_else(|| Ok(())) +} + +unsafe extern "system" fn handler(ty: DWORD) -> BOOL { + let globals = globals(); + globals.record_event(ty as EventId); + + // According to https://docs.microsoft.com/en-us/windows/console/handlerroutine + // the handler routine is always invoked in a new thread, thus we don't + // have the same restrictions as in Unix signal handlers, meaning we can + // go ahead and perform the broadcast here. + if globals.broadcast() { + TRUE + } else { + // No one is listening for this notification any more + // let the OS fire the next (possibly the default) handler. + FALSE + } +} + +/// Represents a stream which receives "ctrl-break" notifications sent to the process +/// via `SetConsoleCtrlHandler`. +/// +/// A notification to this process notifies *all* streams listening for +/// this event. Moreover, the notifications **are coalesced** if they aren't processed +/// quickly enough. This means that if two notifications are received back-to-back, +/// then the stream may only receive one item about the two notifications. +#[must_use = "streams do nothing unless polled"] +#[derive(Debug)] +pub struct CtrlBreak { + inner: Event, +} + +impl CtrlBreak { + /// Receives the next signal notification event. + /// + /// `None` is returned if no more events can be received by this stream. + /// + /// # Examples + /// + /// ```rust,no_run + /// use tokio::signal::windows::ctrl_break; + /// + /// #[tokio::main] + /// async fn main() -> Result<(), Box<dyn std::error::Error>> { + /// // An infinite stream of CTRL-BREAK events. + /// let mut stream = ctrl_break()?; + /// + /// // Print whenever a CTRL-BREAK event is received + /// loop { + /// stream.recv().await; + /// println!("got signal CTRL-BREAK"); + /// } + /// } + /// ``` + pub async fn recv(&mut self) -> Option<()> { + use crate::future::poll_fn; + poll_fn(|cx| self.poll_recv(cx)).await + } + + /// Polls to receive the next signal notification event, outside of an + /// `async` context. + /// + /// `None` is returned if no more events can be received by this stream. + /// + /// # Examples + /// + /// Polling from a manually implemented future + /// + /// ```rust,no_run + /// use std::pin::Pin; + /// use std::future::Future; + /// use std::task::{Context, Poll}; + /// use tokio::signal::windows::CtrlBreak; + /// + /// struct MyFuture { + /// ctrl_break: CtrlBreak, + /// } + /// + /// impl Future for MyFuture { + /// type Output = Option<()>; + /// + /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + /// println!("polling MyFuture"); + /// self.ctrl_break.poll_recv(cx) + /// } + /// } + /// ``` + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { + self.inner.rx.poll_recv(cx) + } +} + +cfg_stream! { + impl crate::stream::Stream for CtrlBreak { + type Item = (); + + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> { + self.poll_recv(cx) + } + } +} + +/// Creates a new stream which receives "ctrl-break" notifications sent to the +/// process. +/// +/// # Examples +/// +/// ```rust,no_run +/// use tokio::signal::windows::ctrl_break; +/// +/// #[tokio::main] +/// async fn main() -> Result<(), Box<dyn std::error::Error>> { +/// // An infinite stream of CTRL-BREAK events. +/// let mut stream = ctrl_break()?; +/// +/// // Print whenever a CTRL-BREAK event is received +/// loop { +/// stream.recv().await; +/// println!("got signal CTRL-BREAK"); +/// } +/// } +/// ``` +pub fn ctrl_break() -> io::Result<CtrlBreak> { + Event::new(CTRL_BREAK_EVENT).map(|inner| CtrlBreak { inner }) +} + +#[cfg(all(test, not(loom)))] +mod tests { + use super::*; + use crate::runtime::Runtime; + use crate::stream::StreamExt; + + use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task}; + + #[test] + fn ctrl_c() { + let rt = rt(); + + rt.enter(|| { + let mut ctrl_c = task::spawn(crate::signal::ctrl_c()); + + assert_pending!(ctrl_c.poll()); + + // Windows doesn't have a good programmatic way of sending events + // like sending signals on Unix, so we'll stub out the actual OS + // integration and test that our handling works. + unsafe { + super::handler(CTRL_C_EVENT); + } + + assert_ready_ok!(ctrl_c.poll()); + }); + } + + #[test] + fn ctrl_break() { + let mut rt = rt(); + + rt.block_on(async { + let mut ctrl_break = assert_ok!(super::ctrl_break()); + + // Windows doesn't have a good programmatic way of sending events + // like sending signals on Unix, so we'll stub out the actual OS + // integration and test that our handling works. + unsafe { + super::handler(CTRL_BREAK_EVENT); + } + + ctrl_break.next().await.unwrap(); + }); + } + + fn rt() -> Runtime { + crate::runtime::Builder::new() + .basic_scheduler() + .build() + .unwrap() + } +} diff --git a/third_party/rust/tokio/src/stream/all.rs b/third_party/rust/tokio/src/stream/all.rs new file mode 100644 index 0000000000..615665d270 --- /dev/null +++ b/third_party/rust/tokio/src/stream/all.rs @@ -0,0 +1,45 @@ +use crate::stream::Stream; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Future for the [`all`](super::StreamExt::all) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct AllFuture<'a, St: ?Sized, F> { + stream: &'a mut St, + f: F, +} + +impl<'a, St: ?Sized, F> AllFuture<'a, St, F> { + pub(super) fn new(stream: &'a mut St, f: F) -> Self { + Self { stream, f } + } +} + +impl<St: ?Sized + Unpin, F> Unpin for AllFuture<'_, St, F> {} + +impl<St, F> Future for AllFuture<'_, St, F> +where + St: ?Sized + Stream + Unpin, + F: FnMut(St::Item) -> bool, +{ + type Output = bool; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx)); + + match next { + Some(v) => { + if !(&mut self.f)(v) { + Poll::Ready(false) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + } + } + None => Poll::Ready(true), + } + } +} diff --git a/third_party/rust/tokio/src/stream/any.rs b/third_party/rust/tokio/src/stream/any.rs new file mode 100644 index 0000000000..f2ecad5edb --- /dev/null +++ b/third_party/rust/tokio/src/stream/any.rs @@ -0,0 +1,45 @@ +use crate::stream::Stream; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Future for the [`any`](super::StreamExt::any) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct AnyFuture<'a, St: ?Sized, F> { + stream: &'a mut St, + f: F, +} + +impl<'a, St: ?Sized, F> AnyFuture<'a, St, F> { + pub(super) fn new(stream: &'a mut St, f: F) -> Self { + Self { stream, f } + } +} + +impl<St: ?Sized + Unpin, F> Unpin for AnyFuture<'_, St, F> {} + +impl<St, F> Future for AnyFuture<'_, St, F> +where + St: ?Sized + Stream + Unpin, + F: FnMut(St::Item) -> bool, +{ + type Output = bool; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx)); + + match next { + Some(v) => { + if (&mut self.f)(v) { + Poll::Ready(true) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + } + } + None => Poll::Ready(false), + } + } +} diff --git a/third_party/rust/tokio/src/stream/chain.rs b/third_party/rust/tokio/src/stream/chain.rs new file mode 100644 index 0000000000..5f0324a4b5 --- /dev/null +++ b/third_party/rust/tokio/src/stream/chain.rs @@ -0,0 +1,57 @@ +use crate::stream::{Fuse, Stream}; + +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream returned by the [`chain`](super::StreamExt::chain) method. + pub struct Chain<T, U> { + #[pin] + a: Fuse<T>, + #[pin] + b: U, + } +} + +impl<T, U> Chain<T, U> { + pub(super) fn new(a: T, b: U) -> Chain<T, U> + where + T: Stream, + U: Stream, + { + Chain { a: Fuse::new(a), b } + } +} + +impl<T, U> Stream for Chain<T, U> +where + T: Stream, + U: Stream<Item = T::Item>, +{ + type Item = T::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> { + use Poll::Ready; + + let me = self.project(); + + if let Some(v) = ready!(me.a.poll_next(cx)) { + return Ready(Some(v)); + } + + me.b.poll_next(cx) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let upper = match (a_upper, b_upper) { + (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper), + _ => None, + }; + + (a_lower + b_lower, upper) + } +} diff --git a/third_party/rust/tokio/src/stream/collect.rs b/third_party/rust/tokio/src/stream/collect.rs new file mode 100644 index 0000000000..f44c72b7b3 --- /dev/null +++ b/third_party/rust/tokio/src/stream/collect.rs @@ -0,0 +1,246 @@ +use crate::stream::Stream; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use core::future::Future; +use core::mem; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +// Do not export this struct until `FromStream` can be unsealed. +pin_project! { + /// Future returned by the [`collect`](super::StreamExt::collect) method. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub struct Collect<T, U> + where + T: Stream, + U: FromStream<T::Item>, + { + #[pin] + stream: T, + collection: U::Collection, + } +} + +/// Convert from a [`Stream`](crate::stream::Stream). +/// +/// This trait is not intended to be used directly. Instead, call +/// [`StreamExt::collect()`](super::StreamExt::collect). +/// +/// # Implementing +/// +/// Currently, this trait may not be implemented by third parties. The trait is +/// sealed in order to make changes in the future. Stabilization is pending +/// enhancements to the Rust langague. +pub trait FromStream<T>: sealed::FromStreamPriv<T> {} + +impl<T, U> Collect<T, U> +where + T: Stream, + U: FromStream<T::Item>, +{ + pub(super) fn new(stream: T) -> Collect<T, U> { + let (lower, upper) = stream.size_hint(); + let collection = U::initialize(lower, upper); + + Collect { stream, collection } + } +} + +impl<T, U> Future for Collect<T, U> +where + T: Stream, + U: FromStream<T::Item>, +{ + type Output = U; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<U> { + use Poll::Ready; + + loop { + let mut me = self.as_mut().project(); + + let item = match ready!(me.stream.poll_next(cx)) { + Some(item) => item, + None => { + return Ready(U::finalize(&mut me.collection)); + } + }; + + if !U::extend(&mut me.collection, item) { + return Ready(U::finalize(&mut me.collection)); + } + } + } +} + +// ===== FromStream implementations + +impl FromStream<()> for () {} + +impl sealed::FromStreamPriv<()> for () { + type Collection = (); + + fn initialize(_lower: usize, _upper: Option<usize>) {} + + fn extend(_collection: &mut (), _item: ()) -> bool { + true + } + + fn finalize(_collection: &mut ()) {} +} + +impl<T: AsRef<str>> FromStream<T> for String {} + +impl<T: AsRef<str>> sealed::FromStreamPriv<T> for String { + type Collection = String; + + fn initialize(_lower: usize, _upper: Option<usize>) -> String { + String::new() + } + + fn extend(collection: &mut String, item: T) -> bool { + collection.push_str(item.as_ref()); + true + } + + fn finalize(collection: &mut String) -> String { + mem::replace(collection, String::new()) + } +} + +impl<T> FromStream<T> for Vec<T> {} + +impl<T> sealed::FromStreamPriv<T> for Vec<T> { + type Collection = Vec<T>; + + fn initialize(lower: usize, _upper: Option<usize>) -> Vec<T> { + Vec::with_capacity(lower) + } + + fn extend(collection: &mut Vec<T>, item: T) -> bool { + collection.push(item); + true + } + + fn finalize(collection: &mut Vec<T>) -> Vec<T> { + mem::replace(collection, vec![]) + } +} + +impl<T> FromStream<T> for Box<[T]> {} + +impl<T> sealed::FromStreamPriv<T> for Box<[T]> { + type Collection = Vec<T>; + + fn initialize(lower: usize, upper: Option<usize>) -> Vec<T> { + <Vec<T> as sealed::FromStreamPriv<T>>::initialize(lower, upper) + } + + fn extend(collection: &mut Vec<T>, item: T) -> bool { + <Vec<T> as sealed::FromStreamPriv<T>>::extend(collection, item) + } + + fn finalize(collection: &mut Vec<T>) -> Box<[T]> { + <Vec<T> as sealed::FromStreamPriv<T>>::finalize(collection).into_boxed_slice() + } +} + +impl<T, U, E> FromStream<Result<T, E>> for Result<U, E> where U: FromStream<T> {} + +impl<T, U, E> sealed::FromStreamPriv<Result<T, E>> for Result<U, E> +where + U: FromStream<T>, +{ + type Collection = Result<U::Collection, E>; + + fn initialize(lower: usize, upper: Option<usize>) -> Result<U::Collection, E> { + Ok(U::initialize(lower, upper)) + } + + fn extend(collection: &mut Self::Collection, item: Result<T, E>) -> bool { + assert!(collection.is_ok()); + match item { + Ok(item) => { + let collection = collection.as_mut().ok().expect("invalid state"); + U::extend(collection, item) + } + Err(err) => { + *collection = Err(err); + false + } + } + } + + fn finalize(collection: &mut Self::Collection) -> Result<U, E> { + if let Ok(collection) = collection.as_mut() { + Ok(U::finalize(collection)) + } else { + let res = mem::replace(collection, Ok(U::initialize(0, Some(0)))); + + if let Err(err) = res { + Err(err) + } else { + unreachable!(); + } + } + } +} + +impl<T: Buf> FromStream<T> for Bytes {} + +impl<T: Buf> sealed::FromStreamPriv<T> for Bytes { + type Collection = BytesMut; + + fn initialize(_lower: usize, _upper: Option<usize>) -> BytesMut { + BytesMut::new() + } + + fn extend(collection: &mut BytesMut, item: T) -> bool { + collection.put(item); + true + } + + fn finalize(collection: &mut BytesMut) -> Bytes { + mem::replace(collection, BytesMut::new()).freeze() + } +} + +impl<T: Buf> FromStream<T> for BytesMut {} + +impl<T: Buf> sealed::FromStreamPriv<T> for BytesMut { + type Collection = BytesMut; + + fn initialize(_lower: usize, _upper: Option<usize>) -> BytesMut { + BytesMut::new() + } + + fn extend(collection: &mut BytesMut, item: T) -> bool { + collection.put(item); + true + } + + fn finalize(collection: &mut BytesMut) -> BytesMut { + mem::replace(collection, BytesMut::new()) + } +} + +pub(crate) mod sealed { + #[doc(hidden)] + pub trait FromStreamPriv<T> { + /// Intermediate type used during collection process + type Collection; + + /// Initialize the collection + fn initialize(lower: usize, upper: Option<usize>) -> Self::Collection; + + /// Extend the collection with the received item + /// + /// Return `true` to continue streaming, `false` complete collection. + fn extend(collection: &mut Self::Collection, item: T) -> bool; + + /// Finalize collection into target type. + fn finalize(collection: &mut Self::Collection) -> Self; + } +} diff --git a/third_party/rust/tokio/src/stream/empty.rs b/third_party/rust/tokio/src/stream/empty.rs new file mode 100644 index 0000000000..6118673e50 --- /dev/null +++ b/third_party/rust/tokio/src/stream/empty.rs @@ -0,0 +1,50 @@ +use crate::stream::Stream; + +use core::marker::PhantomData; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Stream for the [`empty`] function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Empty<T>(PhantomData<T>); + +impl<T> Unpin for Empty<T> {} +unsafe impl<T> Send for Empty<T> {} +unsafe impl<T> Sync for Empty<T> {} + +/// Creates a stream that yields nothing. +/// +/// The returned stream is immediately ready and returns `None`. Use +/// [`stream::pending()`](super::pending()) to obtain a stream that is never +/// ready. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use tokio::stream::{self, StreamExt}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut none = stream::empty::<i32>(); +/// +/// assert_eq!(None, none.next().await); +/// } +/// ``` +pub const fn empty<T>() -> Empty<T> { + Empty(PhantomData) +} + +impl<T> Stream for Empty<T> { + type Item = T; + + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> { + Poll::Ready(None) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (0, Some(0)) + } +} diff --git a/third_party/rust/tokio/src/stream/filter.rs b/third_party/rust/tokio/src/stream/filter.rs new file mode 100644 index 0000000000..799630b234 --- /dev/null +++ b/third_party/rust/tokio/src/stream/filter.rs @@ -0,0 +1,58 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream returned by the [`filter`](super::StreamExt::filter) method. + #[must_use = "streams do nothing unless polled"] + pub struct Filter<St, F> { + #[pin] + stream: St, + f: F, + } +} + +impl<St, F> fmt::Debug for Filter<St, F> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Filter") + .field("stream", &self.stream) + .finish() + } +} + +impl<St, F> Filter<St, F> { + pub(super) fn new(stream: St, f: F) -> Self { + Self { stream, f } + } +} + +impl<St, F> Stream for Filter<St, F> +where + St: Stream, + F: FnMut(&St::Item) -> bool, +{ + type Item = St::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> { + loop { + match ready!(self.as_mut().project().stream.poll_next(cx)) { + Some(e) => { + if (self.as_mut().project().f)(&e) { + return Poll::Ready(Some(e)); + } + } + None => return Poll::Ready(None), + } + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (0, self.stream.size_hint().1) // can't know a lower bound, due to the predicate + } +} diff --git a/third_party/rust/tokio/src/stream/filter_map.rs b/third_party/rust/tokio/src/stream/filter_map.rs new file mode 100644 index 0000000000..8dc05a5460 --- /dev/null +++ b/third_party/rust/tokio/src/stream/filter_map.rs @@ -0,0 +1,58 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream returned by the [`filter_map`](super::StreamExt::filter_map) method. + #[must_use = "streams do nothing unless polled"] + pub struct FilterMap<St, F> { + #[pin] + stream: St, + f: F, + } +} + +impl<St, F> fmt::Debug for FilterMap<St, F> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FilterMap") + .field("stream", &self.stream) + .finish() + } +} + +impl<St, F> FilterMap<St, F> { + pub(super) fn new(stream: St, f: F) -> Self { + Self { stream, f } + } +} + +impl<St, F, T> Stream for FilterMap<St, F> +where + St: Stream, + F: FnMut(St::Item) -> Option<T>, +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + loop { + match ready!(self.as_mut().project().stream.poll_next(cx)) { + Some(e) => { + if let Some(e) = (self.as_mut().project().f)(e) { + return Poll::Ready(Some(e)); + } + } + None => return Poll::Ready(None), + } + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (0, self.stream.size_hint().1) // can't know a lower bound, due to the predicate + } +} diff --git a/third_party/rust/tokio/src/stream/fold.rs b/third_party/rust/tokio/src/stream/fold.rs new file mode 100644 index 0000000000..7b9fead3db --- /dev/null +++ b/third_party/rust/tokio/src/stream/fold.rs @@ -0,0 +1,51 @@ +use crate::stream::Stream; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Future returned by the [`fold`](super::StreamExt::fold) method. + #[derive(Debug)] + pub struct FoldFuture<St, B, F> { + #[pin] + stream: St, + acc: Option<B>, + f: F, + } +} + +impl<St, B, F> FoldFuture<St, B, F> { + pub(super) fn new(stream: St, init: B, f: F) -> Self { + Self { + stream, + acc: Some(init), + f, + } + } +} + +impl<St, B, F> Future for FoldFuture<St, B, F> +where + St: Stream, + F: FnMut(B, St::Item) -> B, +{ + type Output = B; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let mut me = self.project(); + loop { + let next = ready!(me.stream.as_mut().poll_next(cx)); + + match next { + Some(v) => { + let old = me.acc.take().unwrap(); + let new = (me.f)(old, v); + *me.acc = Some(new); + } + None => return Poll::Ready(me.acc.take().unwrap()), + } + } + } +} diff --git a/third_party/rust/tokio/src/stream/fuse.rs b/third_party/rust/tokio/src/stream/fuse.rs new file mode 100644 index 0000000000..6c9e02d664 --- /dev/null +++ b/third_party/rust/tokio/src/stream/fuse.rs @@ -0,0 +1,53 @@ +use crate::stream::Stream; + +use pin_project_lite::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Stream returned by [`fuse()`][super::StreamExt::fuse]. + #[derive(Debug)] + pub struct Fuse<T> { + #[pin] + stream: Option<T>, + } +} + +impl<T> Fuse<T> +where + T: Stream, +{ + pub(crate) fn new(stream: T) -> Fuse<T> { + Fuse { + stream: Some(stream), + } + } +} + +impl<T> Stream for Fuse<T> +where + T: Stream, +{ + type Item = T::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> { + let res = match Option::as_pin_mut(self.as_mut().project().stream) { + Some(stream) => ready!(stream.poll_next(cx)), + None => return Poll::Ready(None), + }; + + if res.is_none() { + // Do not poll the stream anymore + self.as_mut().project().stream.set(None); + } + + Poll::Ready(res) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + match self.stream { + Some(ref stream) => stream.size_hint(), + None => (0, Some(0)), + } + } +} diff --git a/third_party/rust/tokio/src/stream/iter.rs b/third_party/rust/tokio/src/stream/iter.rs new file mode 100644 index 0000000000..36eeb5612f --- /dev/null +++ b/third_party/rust/tokio/src/stream/iter.rs @@ -0,0 +1,55 @@ +use crate::stream::Stream; + +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Stream for the [`iter`] function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Iter<I> { + iter: I, +} + +impl<I> Unpin for Iter<I> {} + +/// Converts an `Iterator` into a `Stream` which is always ready +/// to yield the next value. +/// +/// Iterators in Rust don't express the ability to block, so this adapter +/// simply always calls `iter.next()` and returns that. +/// +/// ``` +/// # async fn dox() { +/// use tokio::stream::{self, StreamExt}; +/// +/// let mut stream = stream::iter(vec![17, 19]); +/// +/// assert_eq!(stream.next().await, Some(17)); +/// assert_eq!(stream.next().await, Some(19)); +/// assert_eq!(stream.next().await, None); +/// # } +/// ``` +pub fn iter<I>(i: I) -> Iter<I::IntoIter> +where + I: IntoIterator, +{ + Iter { + iter: i.into_iter(), + } +} + +impl<I> Stream for Iter<I> +where + I: Iterator, +{ + type Item = I::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<I::Item>> { + ready!(crate::coop::poll_proceed(cx)); + Poll::Ready(self.iter.next()) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} diff --git a/third_party/rust/tokio/src/stream/map.rs b/third_party/rust/tokio/src/stream/map.rs new file mode 100644 index 0000000000..dfac5a2c94 --- /dev/null +++ b/third_party/rust/tokio/src/stream/map.rs @@ -0,0 +1,51 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`map`](super::StreamExt::map) method. + #[must_use = "streams do nothing unless polled"] + pub struct Map<St, F> { + #[pin] + stream: St, + f: F, + } +} + +impl<St, F> fmt::Debug for Map<St, F> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Map").field("stream", &self.stream).finish() + } +} + +impl<St, F> Map<St, F> { + pub(super) fn new(stream: St, f: F) -> Self { + Map { stream, f } + } +} + +impl<St, F, T> Stream for Map<St, F> +where + St: Stream, + F: FnMut(St::Item) -> T, +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + self.as_mut() + .project() + .stream + .poll_next(cx) + .map(|opt| opt.map(|x| (self.as_mut().project().f)(x))) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.stream.size_hint() + } +} diff --git a/third_party/rust/tokio/src/stream/merge.rs b/third_party/rust/tokio/src/stream/merge.rs new file mode 100644 index 0000000000..4850cd40c7 --- /dev/null +++ b/third_party/rust/tokio/src/stream/merge.rs @@ -0,0 +1,97 @@ +use crate::stream::{Fuse, Stream}; + +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream returned by the [`merge`](super::StreamExt::merge) method. + pub struct Merge<T, U> { + #[pin] + a: Fuse<T>, + #[pin] + b: Fuse<U>, + // When `true`, poll `a` first, otherwise, `poll` b`. + a_first: bool, + } +} + +impl<T, U> Merge<T, U> { + pub(super) fn new(a: T, b: U) -> Merge<T, U> + where + T: Stream, + U: Stream, + { + Merge { + a: Fuse::new(a), + b: Fuse::new(b), + a_first: true, + } + } +} + +impl<T, U> Stream for Merge<T, U> +where + T: Stream, + U: Stream<Item = T::Item>, +{ + type Item = T::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> { + let me = self.project(); + let a_first = *me.a_first; + + // Toggle the flag + *me.a_first = !a_first; + + if a_first { + poll_next(me.a, me.b, cx) + } else { + poll_next(me.b, me.a, cx) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let upper = match (a_upper, b_upper) { + (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper), + _ => None, + }; + + (a_lower + b_lower, upper) + } +} + +fn poll_next<T, U>( + first: Pin<&mut T>, + second: Pin<&mut U>, + cx: &mut Context<'_>, +) -> Poll<Option<T::Item>> +where + T: Stream, + U: Stream<Item = T::Item>, +{ + use Poll::*; + + let mut done = true; + + match first.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(val)), + Ready(None) => {} + Pending => done = false, + } + + match second.poll_next(cx) { + Ready(Some(val)) => return Ready(Some(val)), + Ready(None) => {} + Pending => done = false, + } + + if done { + Ready(None) + } else { + Pending + } +} diff --git a/third_party/rust/tokio/src/stream/mod.rs b/third_party/rust/tokio/src/stream/mod.rs new file mode 100644 index 0000000000..307ead5fba --- /dev/null +++ b/third_party/rust/tokio/src/stream/mod.rs @@ -0,0 +1,819 @@ +//! Stream utilities for Tokio. +//! +//! A `Stream` is an asynchronous sequence of values. It can be thought of as an asynchronous version of the standard library's `Iterator` trait. +//! +//! This module provides helpers to work with them. + +mod all; +use all::AllFuture; + +mod any; +use any::AnyFuture; + +mod chain; +use chain::Chain; + +mod collect; +use collect::Collect; +pub use collect::FromStream; + +mod empty; +pub use empty::{empty, Empty}; + +mod filter; +use filter::Filter; + +mod filter_map; +use filter_map::FilterMap; + +mod fold; +use fold::FoldFuture; + +mod fuse; +use fuse::Fuse; + +mod iter; +pub use iter::{iter, Iter}; + +mod map; +use map::Map; + +mod merge; +use merge::Merge; + +mod next; +use next::Next; + +mod once; +pub use once::{once, Once}; + +mod pending; +pub use pending::{pending, Pending}; + +mod stream_map; +pub use stream_map::StreamMap; + +mod skip; +use skip::Skip; + +mod skip_while; +use skip_while::SkipWhile; + +mod try_next; +use try_next::TryNext; + +mod take; +use take::Take; + +mod take_while; +use take_while::TakeWhile; + +cfg_time! { + mod timeout; + use timeout::Timeout; + use std::time::Duration; +} + +pub use futures_core::Stream; + +/// An extension trait for `Stream`s that provides a variety of convenient +/// combinator functions. +pub trait StreamExt: Stream { + /// Consumes and returns the next value in the stream or `None` if the + /// stream is finished. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn next(&mut self) -> Option<Self::Item>; + /// ``` + /// + /// Note that because `next` doesn't take ownership over the stream, + /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a + /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can + /// be done by boxing the stream using [`Box::pin`] or + /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils` + /// crate. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(1..=3); + /// + /// assert_eq!(stream.next().await, Some(1)); + /// assert_eq!(stream.next().await, Some(2)); + /// assert_eq!(stream.next().await, Some(3)); + /// assert_eq!(stream.next().await, None); + /// # } + /// ``` + fn next(&mut self) -> Next<'_, Self> + where + Self: Unpin, + { + Next::new(self) + } + + /// Consumes and returns the next item in the stream. If an error is + /// encountered before the next item, the error is returned instead. + /// + /// Equivalent to: + /// + /// ```ignore + /// async fn try_next(&mut self) -> Result<Option<T>, E>; + /// ``` + /// + /// This is similar to the [`next`](StreamExt::next) combinator, + /// but returns a [`Result<Option<T>, E>`](Result) rather than + /// an [`Option<Result<T, E>>`](Option), making for easy use + /// with the [`?`](std::ops::Try) operator. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(vec![Ok(1), Ok(2), Err("nope")]); + /// + /// assert_eq!(stream.try_next().await, Ok(Some(1))); + /// assert_eq!(stream.try_next().await, Ok(Some(2))); + /// assert_eq!(stream.try_next().await, Err("nope")); + /// # } + /// ``` + fn try_next<T, E>(&mut self) -> TryNext<'_, Self> + where + Self: Stream<Item = Result<T, E>> + Unpin, + { + TryNext::new(self) + } + + /// Maps this stream's items to a different type, returning a new stream of + /// the resulting type. + /// + /// The provided closure is executed over all elements of this stream as + /// they are made available. It is executed inline with calls to + /// [`poll_next`](Stream::poll_next). + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to the existing `map` methods in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let stream = stream::iter(1..=3); + /// let mut stream = stream.map(|x| x + 3); + /// + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// # } + /// ``` + fn map<T, F>(self, f: F) -> Map<Self, F> + where + F: FnMut(Self::Item) -> T, + Self: Sized, + { + Map::new(self, f) + } + + /// Combine two streams into one by interleaving the output of both as it + /// is produced. + /// + /// Values are produced from the merged stream in the order they arrive from + /// the two source streams. If both source streams provide values + /// simultaneously, the merge stream alternates between them. This provides + /// some level of fairness. + /// + /// The merged stream completes once **both** source streams complete. When + /// one source stream completes before the other, the merge stream + /// exclusively polls the remaining stream. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::StreamExt; + /// use tokio::sync::mpsc; + /// use tokio::time; + /// + /// use std::time::Duration; + /// + /// # /* + /// #[tokio::main] + /// # */ + /// # #[tokio::main(basic_scheduler)] + /// async fn main() { + /// # time::pause(); + /// let (mut tx1, rx1) = mpsc::channel(10); + /// let (mut tx2, rx2) = mpsc::channel(10); + /// + /// let mut rx = rx1.merge(rx2); + /// + /// tokio::spawn(async move { + /// // Send some values immediately + /// tx1.send(1).await.unwrap(); + /// tx1.send(2).await.unwrap(); + /// + /// // Let the other task send values + /// time::delay_for(Duration::from_millis(20)).await; + /// + /// tx1.send(4).await.unwrap(); + /// }); + /// + /// tokio::spawn(async move { + /// // Wait for the first task to send values + /// time::delay_for(Duration::from_millis(5)).await; + /// + /// tx2.send(3).await.unwrap(); + /// + /// time::delay_for(Duration::from_millis(25)).await; + /// + /// // Send the final value + /// tx2.send(5).await.unwrap(); + /// }); + /// + /// assert_eq!(1, rx.next().await.unwrap()); + /// assert_eq!(2, rx.next().await.unwrap()); + /// assert_eq!(3, rx.next().await.unwrap()); + /// assert_eq!(4, rx.next().await.unwrap()); + /// assert_eq!(5, rx.next().await.unwrap()); + /// + /// // The merged stream is consumed + /// assert!(rx.next().await.is_none()); + /// } + /// ``` + fn merge<U>(self, other: U) -> Merge<Self, U> + where + U: Stream<Item = Self::Item>, + Self: Sized, + { + Merge::new(self, other) + } + + /// Filters the values produced by this stream according to the provided + /// predicate. + /// + /// As values of this stream are made available, the provided predicate `f` + /// will be run against them. If the predicate + /// resolves to `true`, then the stream will yield the value, but if the + /// predicate resolves to `false`, then the value + /// will be discarded and the next value will be produced. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to [`Iterator::filter`] method in the + /// standard library. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let stream = stream::iter(1..=8); + /// let mut evens = stream.filter(|x| x % 2 == 0); + /// + /// assert_eq!(Some(2), evens.next().await); + /// assert_eq!(Some(4), evens.next().await); + /// assert_eq!(Some(6), evens.next().await); + /// assert_eq!(Some(8), evens.next().await); + /// assert_eq!(None, evens.next().await); + /// # } + /// ``` + fn filter<F>(self, f: F) -> Filter<Self, F> + where + F: FnMut(&Self::Item) -> bool, + Self: Sized, + { + Filter::new(self, f) + } + + /// Filters the values produced by this stream while simultaneously mapping + /// them to a different type according to the provided closure. + /// + /// As values of this stream are made available, the provided function will + /// be run on them. If the predicate `f` resolves to + /// [`Some(item)`](Some) then the stream will yield the value `item`, but if + /// it resolves to [`None`] then the next value will be produced. + /// + /// Note that this function consumes the stream passed into it and returns a + /// wrapped version of it, similar to [`Iterator::filter_map`] method in the + /// standard library. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let stream = stream::iter(1..=8); + /// let mut evens = stream.filter_map(|x| { + /// if x % 2 == 0 { Some(x + 1) } else { None } + /// }); + /// + /// assert_eq!(Some(3), evens.next().await); + /// assert_eq!(Some(5), evens.next().await); + /// assert_eq!(Some(7), evens.next().await); + /// assert_eq!(Some(9), evens.next().await); + /// assert_eq!(None, evens.next().await); + /// # } + /// ``` + fn filter_map<T, F>(self, f: F) -> FilterMap<Self, F> + where + F: FnMut(Self::Item) -> Option<T>, + Self: Sized, + { + FilterMap::new(self, f) + } + + /// Creates a stream which ends after the first `None`. + /// + /// After a stream returns `None`, behavior is undefined. Future calls to + /// `poll_next` may or may not return `Some(T)` again or they may panic. + /// `fuse()` adapts a stream, ensuring that after `None` is given, it will + /// return `None` forever. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{Stream, StreamExt}; + /// + /// use std::pin::Pin; + /// use std::task::{Context, Poll}; + /// + /// // a stream which alternates between Some and None + /// struct Alternate { + /// state: i32, + /// } + /// + /// impl Stream for Alternate { + /// type Item = i32; + /// + /// fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<i32>> { + /// let val = self.state; + /// self.state = self.state + 1; + /// + /// // if it's even, Some(i32), else None + /// if val % 2 == 0 { + /// Poll::Ready(Some(val)) + /// } else { + /// Poll::Ready(None) + /// } + /// } + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let mut stream = Alternate { state: 0 }; + /// + /// // the stream goes back and forth + /// assert_eq!(stream.next().await, Some(0)); + /// assert_eq!(stream.next().await, None); + /// assert_eq!(stream.next().await, Some(2)); + /// assert_eq!(stream.next().await, None); + /// + /// // however, once it is fused + /// let mut stream = stream.fuse(); + /// + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, None); + /// + /// // it will always return `None` after the first time. + /// assert_eq!(stream.next().await, None); + /// assert_eq!(stream.next().await, None); + /// assert_eq!(stream.next().await, None); + /// } + /// ``` + fn fuse(self) -> Fuse<Self> + where + Self: Sized, + { + Fuse::new(self) + } + + /// Creates a new stream of at most `n` items of the underlying stream. + /// + /// Once `n` items have been yielded from this stream then it will always + /// return that the stream is done. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(1..=10).take(3); + /// + /// assert_eq!(Some(1), stream.next().await); + /// assert_eq!(Some(2), stream.next().await); + /// assert_eq!(Some(3), stream.next().await); + /// assert_eq!(None, stream.next().await); + /// # } + /// ``` + fn take(self, n: usize) -> Take<Self> + where + Self: Sized, + { + Take::new(self, n) + } + + /// Take elements from this stream while the provided predicate + /// resolves to `true`. + /// + /// This function, like `Iterator::take_while`, will take elements from the + /// stream until the predicate `f` resolves to `false`. Once one element + /// returns false it will always return that the stream is done. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(1..=10).take_while(|x| *x <= 3); + /// + /// assert_eq!(Some(1), stream.next().await); + /// assert_eq!(Some(2), stream.next().await); + /// assert_eq!(Some(3), stream.next().await); + /// assert_eq!(None, stream.next().await); + /// # } + /// ``` + fn take_while<F>(self, f: F) -> TakeWhile<Self, F> + where + F: FnMut(&Self::Item) -> bool, + Self: Sized, + { + TakeWhile::new(self, f) + } + + /// Creates a new stream that will skip the `n` first items of the + /// underlying stream. + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(1..=10).skip(7); + /// + /// assert_eq!(Some(8), stream.next().await); + /// assert_eq!(Some(9), stream.next().await); + /// assert_eq!(Some(10), stream.next().await); + /// assert_eq!(None, stream.next().await); + /// # } + /// ``` + fn skip(self, n: usize) -> Skip<Self> + where + Self: Sized, + { + Skip::new(self, n) + } + + /// Skip elements from the underlying stream while the provided predicate + /// resolves to `true`. + /// + /// This function, like [`Iterator::skip_while`], will ignore elemets from the + /// stream until the predicate `f` resolves to `false`. Once one element + /// returns false, the rest of the elements will be yielded. + /// + /// [`Iterator::skip_while`]: std::iter::Iterator::skip_while() + /// + /// # Examples + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// let mut stream = stream::iter(vec![1,2,3,4,1]).skip_while(|x| *x < 3); + /// + /// assert_eq!(Some(3), stream.next().await); + /// assert_eq!(Some(4), stream.next().await); + /// assert_eq!(Some(1), stream.next().await); + /// assert_eq!(None, stream.next().await); + /// # } + /// ``` + fn skip_while<F>(self, f: F) -> SkipWhile<Self, F> + where + F: FnMut(&Self::Item) -> bool, + Self: Sized, + { + SkipWhile::new(self, f) + } + + /// Tests if every element of the stream matches a predicate. + /// + /// `all()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the stream, and if they all return + /// `true`, then so does `all`. If any of them return `false`, it + /// returns `false`. An empty stream returns `true`. + /// + /// `all()` is short-circuiting; in other words, it will stop processing + /// as soon as it finds a `false`, given that no matter what else happens, + /// the result will also be `false`. + /// + /// An empty stream returns `true`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let a = [1, 2, 3]; + /// + /// assert!(stream::iter(&a).all(|&x| x > 0).await); + /// + /// assert!(!stream::iter(&a).all(|&x| x > 2).await); + /// # } + /// ``` + /// + /// Stopping at the first `false`: + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let a = [1, 2, 3]; + /// + /// let mut iter = stream::iter(&a); + /// + /// assert!(!iter.all(|&x| x != 2).await); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next().await, Some(&3)); + /// # } + /// ``` + fn all<F>(&mut self, f: F) -> AllFuture<'_, Self, F> + where + Self: Unpin, + F: FnMut(Self::Item) -> bool, + { + AllFuture::new(self, f) + } + + /// Tests if any element of the stream matches a predicate. + /// + /// `any()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the stream, and if any of them return + /// `true`, then so does `any()`. If they all return `false`, it + /// returns `false`. + /// + /// `any()` is short-circuiting; in other words, it will stop processing + /// as soon as it finds a `true`, given that no matter what else happens, + /// the result will also be `true`. + /// + /// An empty stream returns `false`. + /// + /// Basic usage: + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let a = [1, 2, 3]; + /// + /// assert!(stream::iter(&a).any(|&x| x > 0).await); + /// + /// assert!(!stream::iter(&a).any(|&x| x > 5).await); + /// # } + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// + /// let a = [1, 2, 3]; + /// + /// let mut iter = stream::iter(&a); + /// + /// assert!(iter.any(|&x| x != 2).await); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next().await, Some(&2)); + /// # } + /// ``` + fn any<F>(&mut self, f: F) -> AnyFuture<'_, Self, F> + where + Self: Unpin, + F: FnMut(Self::Item) -> bool, + { + AnyFuture::new(self, f) + } + + /// Combine two streams into one by first returning all values from the + /// first stream then all values from the second stream. + /// + /// As long as `self` still has values to emit, no values from `other` are + /// emitted, even if some are ready. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{self, StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let one = stream::iter(vec![1, 2, 3]); + /// let two = stream::iter(vec![4, 5, 6]); + /// + /// let mut stream = one.chain(two); + /// + /// assert_eq!(stream.next().await, Some(1)); + /// assert_eq!(stream.next().await, Some(2)); + /// assert_eq!(stream.next().await, Some(3)); + /// assert_eq!(stream.next().await, Some(4)); + /// assert_eq!(stream.next().await, Some(5)); + /// assert_eq!(stream.next().await, Some(6)); + /// assert_eq!(stream.next().await, None); + /// } + /// ``` + fn chain<U>(self, other: U) -> Chain<Self, U> + where + U: Stream<Item = Self::Item>, + Self: Sized, + { + Chain::new(self, other) + } + + /// A combinator that applies a function to every element in a stream + /// producing a single, final value. + /// + /// # Examples + /// Basic usage: + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, *}; + /// + /// let s = stream::iter(vec![1u8, 2, 3]); + /// let sum = s.fold(0, |acc, x| acc + x).await; + /// + /// assert_eq!(sum, 6); + /// # } + /// ``` + fn fold<B, F>(self, init: B, f: F) -> FoldFuture<Self, B, F> + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + FoldFuture::new(self, init, f) + } + + /// Drain stream pushing all emitted values into a collection. + /// + /// `collect` streams all values, awaiting as needed. Values are pushed into + /// a collection. A number of different target collection types are + /// supported, including [`Vec`](std::vec::Vec), + /// [`String`](std::string::String), and [`Bytes`](bytes::Bytes). + /// + /// # `Result` + /// + /// `collect()` can also be used with streams of type `Result<T, E>` where + /// `T: FromStream<_>`. In this case, `collect()` will stream as long as + /// values yielded from the stream are `Ok(_)`. If `Err(_)` is encountered, + /// streaming is terminated and `collect()` returns the `Err`. + /// + /// # Notes + /// + /// `FromStream` is currently a sealed trait. Stabilization is pending + /// enhancements to the Rust langague. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use tokio::stream::{self, StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// let doubled: Vec<i32> = + /// stream::iter(vec![1, 2, 3]) + /// .map(|x| x * 2) + /// .collect() + /// .await; + /// + /// assert_eq!(vec![2, 4, 6], doubled); + /// } + /// ``` + /// + /// Collecting a stream of `Result` values + /// + /// ``` + /// use tokio::stream::{self, StreamExt}; + /// + /// #[tokio::main] + /// async fn main() { + /// // A stream containing only `Ok` values will be collected + /// let values: Result<Vec<i32>, &str> = + /// stream::iter(vec![Ok(1), Ok(2), Ok(3)]) + /// .collect() + /// .await; + /// + /// assert_eq!(Ok(vec![1, 2, 3]), values); + /// + /// // A stream containing `Err` values will return the first error. + /// let results = vec![Ok(1), Err("no"), Ok(2), Ok(3), Err("nein")]; + /// + /// let values: Result<Vec<i32>, &str> = + /// stream::iter(results) + /// .collect() + /// .await; + /// + /// assert_eq!(Err("no"), values); + /// } + /// ``` + fn collect<T>(self) -> Collect<Self, T> + where + T: FromStream<Self::Item>, + Self: Sized, + { + Collect::new(self) + } + + /// Applies a per-item timeout to the passed stream. + /// + /// `timeout()` takes a `Duration` that represents the maximum amount of + /// time each element of the stream has to complete before timing out. + /// + /// If the wrapped stream yields a value before the deadline is reached, the + /// value is returned. Otherwise, an error is returned. The caller may decide + /// to continue consuming the stream and will eventually get the next source + /// stream value once it becomes available. + /// + /// # Notes + /// + /// This function consumes the stream passed into it and returns a + /// wrapped version of it. + /// + /// Polling the returned stream will continue to poll the inner stream even + /// if one or more items time out. + /// + /// # Examples + /// + /// Suppose we have a stream `int_stream` that yields 3 numbers (1, 2, 3): + /// + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use tokio::stream::{self, StreamExt}; + /// use std::time::Duration; + /// # let int_stream = stream::iter(1..=3); + /// + /// let mut int_stream = int_stream.timeout(Duration::from_secs(1)); + /// + /// // When no items time out, we get the 3 elements in succession: + /// assert_eq!(int_stream.try_next().await, Ok(Some(1))); + /// assert_eq!(int_stream.try_next().await, Ok(Some(2))); + /// assert_eq!(int_stream.try_next().await, Ok(Some(3))); + /// assert_eq!(int_stream.try_next().await, Ok(None)); + /// + /// // If the second item times out, we get an error and continue polling the stream: + /// # let mut int_stream = stream::iter(vec![Ok(1), Err(()), Ok(2), Ok(3)]); + /// assert_eq!(int_stream.try_next().await, Ok(Some(1))); + /// assert!(int_stream.try_next().await.is_err()); + /// assert_eq!(int_stream.try_next().await, Ok(Some(2))); + /// assert_eq!(int_stream.try_next().await, Ok(Some(3))); + /// assert_eq!(int_stream.try_next().await, Ok(None)); + /// + /// // If we want to stop consuming the source stream the first time an + /// // element times out, we can use the `take_while` operator: + /// # let int_stream = stream::iter(vec![Ok(1), Err(()), Ok(2), Ok(3)]); + /// let mut int_stream = int_stream.take_while(Result::is_ok); + /// + /// assert_eq!(int_stream.try_next().await, Ok(Some(1))); + /// assert_eq!(int_stream.try_next().await, Ok(None)); + /// # } + /// ``` + #[cfg(all(feature = "time"))] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] + fn timeout(self, duration: Duration) -> Timeout<Self> + where + Self: Sized, + { + Timeout::new(self, duration) + } +} + +impl<St: ?Sized> StreamExt for St where St: Stream {} diff --git a/third_party/rust/tokio/src/stream/next.rs b/third_party/rust/tokio/src/stream/next.rs new file mode 100644 index 0000000000..3909c0c233 --- /dev/null +++ b/third_party/rust/tokio/src/stream/next.rs @@ -0,0 +1,28 @@ +use crate::stream::Stream; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Future for the [`next`](super::StreamExt::next) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Next<'a, St: ?Sized> { + stream: &'a mut St, +} + +impl<St: ?Sized + Unpin> Unpin for Next<'_, St> {} + +impl<'a, St: ?Sized> Next<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Next { stream } + } +} + +impl<St: ?Sized + Stream + Unpin> Future for Next<'_, St> { + type Output = Option<St::Item>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + Pin::new(&mut self.stream).poll_next(cx) + } +} diff --git a/third_party/rust/tokio/src/stream/once.rs b/third_party/rust/tokio/src/stream/once.rs new file mode 100644 index 0000000000..04a642f309 --- /dev/null +++ b/third_party/rust/tokio/src/stream/once.rs @@ -0,0 +1,52 @@ +use crate::stream::{self, Iter, Stream}; + +use core::option; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Stream for the [`once`] function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Once<T> { + iter: Iter<option::IntoIter<T>>, +} + +impl<I> Unpin for Once<I> {} + +/// Creates a stream that emits an element exactly once. +/// +/// The returned stream is immediately ready and emits the provided value once. +/// +/// # Examples +/// +/// ``` +/// use tokio::stream::{self, StreamExt}; +/// +/// #[tokio::main] +/// async fn main() { +/// // one is the loneliest number +/// let mut one = stream::once(1); +/// +/// assert_eq!(Some(1), one.next().await); +/// +/// // just one, that's all we get +/// assert_eq!(None, one.next().await); +/// } +/// ``` +pub fn once<T>(value: T) -> Once<T> { + Once { + iter: stream::iter(Some(value).into_iter()), + } +} + +impl<T> Stream for Once<T> { + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + Pin::new(&mut self.iter).poll_next(cx) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} diff --git a/third_party/rust/tokio/src/stream/pending.rs b/third_party/rust/tokio/src/stream/pending.rs new file mode 100644 index 0000000000..2e06a1c261 --- /dev/null +++ b/third_party/rust/tokio/src/stream/pending.rs @@ -0,0 +1,54 @@ +use crate::stream::Stream; + +use core::marker::PhantomData; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Stream for the [`pending`] function. +#[derive(Debug)] +#[must_use = "streams do nothing unless polled"] +pub struct Pending<T>(PhantomData<T>); + +impl<T> Unpin for Pending<T> {} +unsafe impl<T> Send for Pending<T> {} +unsafe impl<T> Sync for Pending<T> {} + +/// Creates a stream that is never ready +/// +/// The returned stream is never ready. Attempting to call +/// [`next()`](crate::stream::StreamExt::next) will never complete. Use +/// [`stream::empty()`](super::empty()) to obtain a stream that is is +/// immediately empty but returns no values. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ```no_run +/// use tokio::stream::{self, StreamExt}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut never = stream::pending::<i32>(); +/// +/// // This will never complete +/// never.next().await; +/// +/// unreachable!(); +/// } +/// ``` +pub const fn pending<T>() -> Pending<T> { + Pending(PhantomData) +} + +impl<T> Stream for Pending<T> { + type Item = T; + + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> { + Poll::Pending + } + + fn size_hint(&self) -> (usize, Option<usize>) { + (0, None) + } +} diff --git a/third_party/rust/tokio/src/stream/skip.rs b/third_party/rust/tokio/src/stream/skip.rs new file mode 100644 index 0000000000..39540cc984 --- /dev/null +++ b/third_party/rust/tokio/src/stream/skip.rs @@ -0,0 +1,63 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`skip`](super::StreamExt::skip) method. + #[must_use = "streams do nothing unless polled"] + pub struct Skip<St> { + #[pin] + stream: St, + remaining: usize, + } +} + +impl<St> fmt::Debug for Skip<St> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Skip") + .field("stream", &self.stream) + .finish() + } +} + +impl<St> Skip<St> { + pub(super) fn new(stream: St, remaining: usize) -> Self { + Self { stream, remaining } + } +} + +impl<St> Stream for Skip<St> +where + St: Stream, +{ + type Item = St::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + loop { + match ready!(self.as_mut().project().stream.poll_next(cx)) { + Some(e) => { + if self.remaining == 0 { + return Poll::Ready(Some(e)); + } + *self.as_mut().project().remaining -= 1; + } + None => return Poll::Ready(None), + } + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let (lower, upper) = self.stream.size_hint(); + + let lower = lower.saturating_sub(self.remaining); + let upper = upper.map(|x| x.saturating_sub(self.remaining)); + + (lower, upper) + } +} diff --git a/third_party/rust/tokio/src/stream/skip_while.rs b/third_party/rust/tokio/src/stream/skip_while.rs new file mode 100644 index 0000000000..4e0500701a --- /dev/null +++ b/third_party/rust/tokio/src/stream/skip_while.rs @@ -0,0 +1,73 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`skip_while`](super::StreamExt::skip_while) method. + #[must_use = "streams do nothing unless polled"] + pub struct SkipWhile<St, F> { + #[pin] + stream: St, + predicate: Option<F>, + } +} + +impl<St, F> fmt::Debug for SkipWhile<St, F> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SkipWhile") + .field("stream", &self.stream) + .finish() + } +} + +impl<St, F> SkipWhile<St, F> { + pub(super) fn new(stream: St, predicate: F) -> Self { + Self { + stream, + predicate: Some(predicate), + } + } +} + +impl<St, F> Stream for SkipWhile<St, F> +where + St: Stream, + F: FnMut(&St::Item) -> bool, +{ + type Item = St::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + let mut this = self.project(); + if let Some(predicate) = this.predicate { + loop { + match ready!(this.stream.as_mut().poll_next(cx)) { + Some(item) => { + if !(predicate)(&item) { + *this.predicate = None; + return Poll::Ready(Some(item)); + } + } + None => return Poll::Ready(None), + } + } + } else { + this.stream.poll_next(cx) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let (lower, upper) = self.stream.size_hint(); + + if self.predicate.is_some() { + return (0, upper); + } + + (lower, upper) + } +} diff --git a/third_party/rust/tokio/src/stream/stream_map.rs b/third_party/rust/tokio/src/stream/stream_map.rs new file mode 100644 index 0000000000..2f60ea4dda --- /dev/null +++ b/third_party/rust/tokio/src/stream/stream_map.rs @@ -0,0 +1,503 @@ +use crate::stream::Stream; + +use std::borrow::Borrow; +use std::hash::Hash; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Combine many streams into one, indexing each source stream with a unique +/// key. +/// +/// `StreamMap` is similar to [`StreamExt::merge`] in that it combines source +/// streams into a single merged stream that yields values in the order that +/// they arrive from the source streams. However, `StreamMap` has a lot more +/// flexibility in usage patterns. +/// +/// `StreamMap` can: +/// +/// * Merge an arbitrary number of streams. +/// * Track which source stream the value was received from. +/// * Handle inserting and removing streams from the set of managed streams at +/// any point during iteration. +/// +/// All source streams held by `StreamMap` are indexed using a key. This key is +/// included with the value when a source stream yields a value. The key is also +/// used to remove the stream from the `StreamMap` before the stream has +/// completed streaming. +/// +/// # `Unpin` +/// +/// Because the `StreamMap` API moves streams during runtime, both streams and +/// keys must be `Unpin`. In order to insert a `!Unpin` stream into a +/// `StreamMap`, use [`pin!`] to pin the stream to the stack or [`Box::pin`] to +/// pin the stream in the heap. +/// +/// # Implementation +/// +/// `StreamMap` is backed by a `Vec<(K, V)>`. There is no guarantee that this +/// internal implementation detail will persist in future versions, but it is +/// important to know the runtime implications. In general, `StreamMap` works +/// best with a "smallish" number of streams as all entries are scanned on +/// insert, remove, and polling. In cases where a large number of streams need +/// to be merged, it may be advisable to use tasks sending values on a shared +/// [`mpsc`] channel. +/// +/// [`StreamExt::merge`]: crate::stream::StreamExt::merge +/// [`mpsc`]: crate::sync::mpsc +/// [`pin!`]: macro@pin +/// [`Box::pin`]: std::boxed::Box::pin +/// +/// # Examples +/// +/// Merging two streams, then remove them after receiving the first value +/// +/// ``` +/// use tokio::stream::{StreamExt, StreamMap}; +/// use tokio::sync::mpsc; +/// +/// #[tokio::main] +/// async fn main() { +/// let (mut tx1, rx1) = mpsc::channel(10); +/// let (mut tx2, rx2) = mpsc::channel(10); +/// +/// tokio::spawn(async move { +/// tx1.send(1).await.unwrap(); +/// +/// // This value will never be received. The send may or may not return +/// // `Err` depending on if the remote end closed first or not. +/// let _ = tx1.send(2).await; +/// }); +/// +/// tokio::spawn(async move { +/// tx2.send(3).await.unwrap(); +/// let _ = tx2.send(4).await; +/// }); +/// +/// let mut map = StreamMap::new(); +/// +/// // Insert both streams +/// map.insert("one", rx1); +/// map.insert("two", rx2); +/// +/// // Read twice +/// for _ in 0..2 { +/// let (key, val) = map.next().await.unwrap(); +/// +/// if key == "one" { +/// assert_eq!(val, 1); +/// } else { +/// assert_eq!(val, 3); +/// } +/// +/// // Remove the stream to prevent reading the next value +/// map.remove(key); +/// } +/// } +/// ``` +/// +/// This example models a read-only client to a chat system with channels. The +/// client sends commands to join and leave channels. `StreamMap` is used to +/// manage active channel subscriptions. +/// +/// For simplicity, messages are displayed with `println!`, but they could be +/// sent to the client over a socket. +/// +/// ```no_run +/// use tokio::stream::{Stream, StreamExt, StreamMap}; +/// +/// enum Command { +/// Join(String), +/// Leave(String), +/// } +/// +/// fn commands() -> impl Stream<Item = Command> { +/// // Streams in user commands by parsing `stdin`. +/// # tokio::stream::pending() +/// } +/// +/// // Join a channel, returns a stream of messages received on the channel. +/// fn join(channel: &str) -> impl Stream<Item = String> + Unpin { +/// // left as an exercise to the reader +/// # tokio::stream::pending() +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let mut channels = StreamMap::new(); +/// +/// // Input commands (join / leave channels). +/// let cmds = commands(); +/// tokio::pin!(cmds); +/// +/// loop { +/// tokio::select! { +/// Some(cmd) = cmds.next() => { +/// match cmd { +/// Command::Join(chan) => { +/// // Join the channel and add it to the `channels` +/// // stream map +/// let msgs = join(&chan); +/// channels.insert(chan, msgs); +/// } +/// Command::Leave(chan) => { +/// channels.remove(&chan); +/// } +/// } +/// } +/// Some((chan, msg)) = channels.next() => { +/// // Received a message, display it on stdout with the channel +/// // it originated from. +/// println!("{}: {}", chan, msg); +/// } +/// // Both the `commands` stream and the `channels` stream are +/// // complete. There is no more work to do, so leave the loop. +/// else => break, +/// } +/// } +/// } +/// ``` +#[derive(Debug, Default)] +pub struct StreamMap<K, V> { + /// Streams stored in the map + entries: Vec<(K, V)>, +} + +impl<K, V> StreamMap<K, V> { + /// Creates an empty `StreamMap`. + /// + /// The stream map is initially created with a capacity of `0`, so it will + /// not allocate until it is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, Pending}; + /// + /// let map: StreamMap<&str, Pending<()>> = StreamMap::new(); + /// ``` + pub fn new() -> StreamMap<K, V> { + StreamMap { entries: vec![] } + } + + /// Creates an empty `StreamMap` with the specified capacity. + /// + /// The stream map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the stream map will not allocate. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, Pending}; + /// + /// let map: StreamMap<&str, Pending<()>> = StreamMap::with_capacity(10); + /// ``` + pub fn with_capacity(capacity: usize) -> StreamMap<K, V> { + StreamMap { + entries: Vec::with_capacity(capacity), + } + } + + /// Returns an iterator visiting all keys in arbitrary order. + /// + /// The iterator element type is &'a K. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// map.insert("a", pending::<i32>()); + /// map.insert("b", pending()); + /// map.insert("c", pending()); + /// + /// for key in map.keys() { + /// println!("{}", key); + /// } + /// ``` + pub fn keys(&self) -> impl Iterator<Item = &K> { + self.entries.iter().map(|(k, _)| k) + } + + /// An iterator visiting all values in arbitrary order. + /// + /// The iterator element type is &'a V. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// map.insert("a", pending::<i32>()); + /// map.insert("b", pending()); + /// map.insert("c", pending()); + /// + /// for stream in map.values() { + /// println!("{:?}", stream); + /// } + /// ``` + pub fn values(&self) -> impl Iterator<Item = &V> { + self.entries.iter().map(|(_, v)| v) + } + + /// An iterator visiting all values mutably in arbitrary order. + /// + /// The iterator element type is &'a mut V. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// map.insert("a", pending::<i32>()); + /// map.insert("b", pending()); + /// map.insert("c", pending()); + /// + /// for stream in map.values_mut() { + /// println!("{:?}", stream); + /// } + /// ``` + pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> { + self.entries.iter_mut().map(|(_, v)| v) + } + + /// Returns the number of streams the map can hold without reallocating. + /// + /// This number is a lower bound; the `StreamMap` might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, Pending}; + /// + /// let map: StreamMap<i32, Pending<()>> = StreamMap::with_capacity(100); + /// assert!(map.capacity() >= 100); + /// ``` + pub fn capacity(&self) -> usize { + self.entries.capacity() + } + + /// Returns the number of streams in the map. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut a = StreamMap::new(); + /// assert_eq!(a.len(), 0); + /// a.insert(1, pending::<i32>()); + /// assert_eq!(a.len(), 1); + /// ``` + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// use std::collections::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert!(a.is_empty()); + /// a.insert(1, "a"); + /// assert!(!a.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Clears the map, removing all key-stream pairs. Keeps the allocated + /// memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut a = StreamMap::new(); + /// a.insert(1, pending::<i32>()); + /// a.clear(); + /// assert!(a.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.entries.clear(); + } + + /// Insert a key-stream pair into the map. + /// + /// If the map did not have this key present, `None` is returned. + /// + /// If the map did have this key present, the new `stream` replaces the old + /// one and the old stream is returned. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// + /// assert!(map.insert(37, pending::<i32>()).is_none()); + /// assert!(!map.is_empty()); + /// + /// map.insert(37, pending()); + /// assert!(map.insert(37, pending()).is_some()); + /// ``` + pub fn insert(&mut self, k: K, stream: V) -> Option<V> + where + K: Hash + Eq, + { + let ret = self.remove(&k); + self.entries.push((k, stream)); + + ret + } + + /// Removes a key from the map, returning the stream at the key if the key was previously in the map. + /// + /// The key may be any borrowed form of the map's key type, but `Hash` and + /// `Eq` on the borrowed form must match those for the key type. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// map.insert(1, pending::<i32>()); + /// assert!(map.remove(&1).is_some()); + /// assert!(map.remove(&1).is_none()); + /// ``` + pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V> + where + K: Borrow<Q>, + Q: Hash + Eq, + { + for i in 0..self.entries.len() { + if self.entries[i].0.borrow() == k { + return Some(self.entries.swap_remove(i).1); + } + } + + None + } + + /// Returns `true` if the map contains a stream for the specified key. + /// + /// The key may be any borrowed form of the map's key type, but `Hash` and + /// `Eq` on the borrowed form must match those for the key type. + /// + /// # Examples + /// + /// ``` + /// use tokio::stream::{StreamMap, pending}; + /// + /// let mut map = StreamMap::new(); + /// map.insert(1, pending::<i32>()); + /// assert_eq!(map.contains_key(&1), true); + /// assert_eq!(map.contains_key(&2), false); + /// ``` + pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool + where + K: Borrow<Q>, + Q: Hash + Eq, + { + for i in 0..self.entries.len() { + if self.entries[i].0.borrow() == k { + return true; + } + } + + false + } +} + +impl<K, V> StreamMap<K, V> +where + K: Unpin, + V: Stream + Unpin, +{ + /// Polls the next value, includes the vec entry index + fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<Option<(usize, V::Item)>> { + use Poll::*; + + let start = crate::util::thread_rng_n(self.entries.len() as u32) as usize; + let mut idx = start; + + for _ in 0..self.entries.len() { + let (_, stream) = &mut self.entries[idx]; + + match Pin::new(stream).poll_next(cx) { + Ready(Some(val)) => return Ready(Some((idx, val))), + Ready(None) => { + // Remove the entry + self.entries.swap_remove(idx); + + // Check if this was the last entry, if so the cursor needs + // to wrap + if idx == self.entries.len() { + idx = 0; + } else if idx < start && start <= self.entries.len() { + // The stream being swapped into the current index has + // already been polled, so skip it. + idx = idx.wrapping_add(1) % self.entries.len(); + } + } + Pending => { + idx = idx.wrapping_add(1) % self.entries.len(); + } + } + } + + // If the map is empty, then the stream is complete. + if self.entries.is_empty() { + Ready(None) + } else { + Pending + } + } +} + +impl<K, V> Stream for StreamMap<K, V> +where + K: Clone + Unpin, + V: Stream + Unpin, +{ + type Item = (K, V::Item); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + if let Some((idx, val)) = ready!(self.poll_next_entry(cx)) { + let key = self.entries[idx].0.clone(); + Poll::Ready(Some((key, val))) + } else { + Poll::Ready(None) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let mut ret = (0, Some(0)); + + for (_, stream) in &self.entries { + let hint = stream.size_hint(); + + ret.0 += hint.0; + + match (ret.1, hint.1) { + (Some(a), Some(b)) => ret.1 = Some(a + b), + (Some(_), None) => ret.1 = None, + _ => {} + } + } + + ret + } +} diff --git a/third_party/rust/tokio/src/stream/take.rs b/third_party/rust/tokio/src/stream/take.rs new file mode 100644 index 0000000000..a92430b77c --- /dev/null +++ b/third_party/rust/tokio/src/stream/take.rs @@ -0,0 +1,76 @@ +use crate::stream::Stream; + +use core::cmp; +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`take`](super::StreamExt::take) method. + #[must_use = "streams do nothing unless polled"] + pub struct Take<St> { + #[pin] + stream: St, + remaining: usize, + } +} + +impl<St> fmt::Debug for Take<St> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Take") + .field("stream", &self.stream) + .finish() + } +} + +impl<St> Take<St> { + pub(super) fn new(stream: St, remaining: usize) -> Self { + Self { stream, remaining } + } +} + +impl<St> Stream for Take<St> +where + St: Stream, +{ + type Item = St::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + if *self.as_mut().project().remaining > 0 { + self.as_mut().project().stream.poll_next(cx).map(|ready| { + match &ready { + Some(_) => { + *self.as_mut().project().remaining -= 1; + } + None => { + *self.as_mut().project().remaining = 0; + } + } + ready + }) + } else { + Poll::Ready(None) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + if self.remaining == 0 { + return (0, Some(0)); + } + + let (lower, upper) = self.stream.size_hint(); + + let lower = cmp::min(lower, self.remaining as usize); + + let upper = match upper { + Some(x) if x < self.remaining as usize => Some(x), + _ => Some(self.remaining as usize), + }; + + (lower, upper) + } +} diff --git a/third_party/rust/tokio/src/stream/take_while.rs b/third_party/rust/tokio/src/stream/take_while.rs new file mode 100644 index 0000000000..cf1e160613 --- /dev/null +++ b/third_party/rust/tokio/src/stream/take_while.rs @@ -0,0 +1,79 @@ +use crate::stream::Stream; + +use core::fmt; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; + +pin_project! { + /// Stream for the [`take_while`](super::StreamExt::take_while) method. + #[must_use = "streams do nothing unless polled"] + pub struct TakeWhile<St, F> { + #[pin] + stream: St, + predicate: F, + done: bool, + } +} + +impl<St, F> fmt::Debug for TakeWhile<St, F> +where + St: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TakeWhile") + .field("stream", &self.stream) + .field("done", &self.done) + .finish() + } +} + +impl<St, F> TakeWhile<St, F> { + pub(super) fn new(stream: St, predicate: F) -> Self { + Self { + stream, + predicate, + done: false, + } + } +} + +impl<St, F> Stream for TakeWhile<St, F> +where + St: Stream, + F: FnMut(&St::Item) -> bool, +{ + type Item = St::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + if !*self.as_mut().project().done { + self.as_mut().project().stream.poll_next(cx).map(|ready| { + let ready = ready.and_then(|item| { + if !(self.as_mut().project().predicate)(&item) { + None + } else { + Some(item) + } + }); + + if ready.is_none() { + *self.as_mut().project().done = true; + } + + ready + }) + } else { + Poll::Ready(None) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + if self.done { + return (0, Some(0)); + } + + let (_, upper) = self.stream.size_hint(); + + (0, upper) + } +} diff --git a/third_party/rust/tokio/src/stream/timeout.rs b/third_party/rust/tokio/src/stream/timeout.rs new file mode 100644 index 0000000000..b8a2024f6a --- /dev/null +++ b/third_party/rust/tokio/src/stream/timeout.rs @@ -0,0 +1,65 @@ +use crate::stream::{Fuse, Stream}; +use crate::time::{Delay, Elapsed, Instant}; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use pin_project_lite::pin_project; +use std::time::Duration; + +pin_project! { + /// Stream returned by the [`timeout`](super::StreamExt::timeout) method. + #[must_use = "streams do nothing unless polled"] + #[derive(Debug)] + pub struct Timeout<S> { + #[pin] + stream: Fuse<S>, + deadline: Delay, + duration: Duration, + poll_deadline: bool, + } +} + +impl<S: Stream> Timeout<S> { + pub(super) fn new(stream: S, duration: Duration) -> Self { + let next = Instant::now() + duration; + let deadline = Delay::new_timeout(next, duration); + + Timeout { + stream: Fuse::new(stream), + deadline, + duration, + poll_deadline: true, + } + } +} + +impl<S: Stream> Stream for Timeout<S> { + type Item = Result<S::Item, Elapsed>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { + match self.as_mut().project().stream.poll_next(cx) { + Poll::Ready(v) => { + if v.is_some() { + let next = Instant::now() + self.duration; + self.as_mut().project().deadline.reset(next); + *self.as_mut().project().poll_deadline = true; + } + return Poll::Ready(v.map(Ok)); + } + Poll::Pending => {} + }; + + if self.poll_deadline { + ready!(Pin::new(self.as_mut().project().deadline).poll(cx)); + *self.as_mut().project().poll_deadline = false; + return Poll::Ready(Some(Err(Elapsed::new()))); + } + + Poll::Pending + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.stream.size_hint() + } +} diff --git a/third_party/rust/tokio/src/stream/try_next.rs b/third_party/rust/tokio/src/stream/try_next.rs new file mode 100644 index 0000000000..59e0eb1a41 --- /dev/null +++ b/third_party/rust/tokio/src/stream/try_next.rs @@ -0,0 +1,30 @@ +use crate::stream::{Next, Stream}; + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// Future for the [`try_next`](super::StreamExt::try_next) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct TryNext<'a, St: ?Sized> { + inner: Next<'a, St>, +} + +impl<St: ?Sized + Unpin> Unpin for TryNext<'_, St> {} + +impl<'a, St: ?Sized> TryNext<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Self { + inner: Next::new(stream), + } + } +} + +impl<T, E, St: ?Sized + Stream<Item = Result<T, E>> + Unpin> Future for TryNext<'_, St> { + type Output = Result<Option<T>, E>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + Pin::new(&mut self.inner).poll(cx).map(Option::transpose) + } +} diff --git a/third_party/rust/tokio/src/sync/barrier.rs b/third_party/rust/tokio/src/sync/barrier.rs new file mode 100644 index 0000000000..628633493a --- /dev/null +++ b/third_party/rust/tokio/src/sync/barrier.rs @@ -0,0 +1,136 @@ +use crate::sync::watch; + +use std::sync::Mutex; + +/// A barrier enables multiple threads to synchronize the beginning of some computation. +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use tokio::sync::Barrier; +/// +/// use futures::future::join_all; +/// use std::sync::Arc; +/// +/// let mut handles = Vec::with_capacity(10); +/// let barrier = Arc::new(Barrier::new(10)); +/// for _ in 0..10 { +/// let c = barrier.clone(); +/// // The same messages will be printed together. +/// // You will NOT see any interleaving. +/// handles.push(async move { +/// println!("before wait"); +/// let wr = c.wait().await; +/// println!("after wait"); +/// wr +/// }); +/// } +/// // Will not resolve until all "before wait" messages have been printed +/// let wrs = join_all(handles).await; +/// // Exactly one barrier will resolve as the "leader" +/// assert_eq!(wrs.into_iter().filter(|wr| wr.is_leader()).count(), 1); +/// # } +/// ``` +#[derive(Debug)] +pub struct Barrier { + state: Mutex<BarrierState>, + wait: watch::Receiver<usize>, + n: usize, +} + +#[derive(Debug)] +struct BarrierState { + waker: watch::Sender<usize>, + arrived: usize, + generation: usize, +} + +impl Barrier { + /// Creates a new barrier that can block a given number of threads. + /// + /// A barrier will block `n`-1 threads which call [`Barrier::wait`] and then wake up all + /// threads at once when the `n`th thread calls `wait`. + pub fn new(mut n: usize) -> Barrier { + let (waker, wait) = crate::sync::watch::channel(0); + + if n == 0 { + // if n is 0, it's not clear what behavior the user wants. + // in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every + // .wait() immediately unblocks, so we adopt that here as well. + n = 1; + } + + Barrier { + state: Mutex::new(BarrierState { + waker, + arrived: 0, + generation: 1, + }), + n, + wait, + } + } + + /// Does not resolve until all tasks have rendezvoused here. + /// + /// Barriers are re-usable after all threads have rendezvoused once, and can + /// be used continuously. + /// + /// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from + /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other threads + /// will receive a result that will return `false` from `is_leader`. + pub async fn wait(&self) -> BarrierWaitResult { + // NOTE: we are taking a _synchronous_ lock here. + // It is okay to do so because the critical section is fast and never yields, so it cannot + // deadlock even if another future is concurrently holding the lock. + // It is _desireable_ to do so as synchronous Mutexes are, at least in theory, faster than + // the asynchronous counter-parts, so we should use them where possible [citation needed]. + // NOTE: the extra scope here is so that the compiler doesn't think `state` is held across + // a yield point, and thus marks the returned future as !Send. + let generation = { + let mut state = self.state.lock().unwrap(); + let generation = state.generation; + state.arrived += 1; + if state.arrived == self.n { + // we are the leader for this generation + // wake everyone, increment the generation, and return + state + .waker + .broadcast(state.generation) + .expect("there is at least one receiver"); + state.arrived = 0; + state.generation += 1; + return BarrierWaitResult(true); + } + + generation + }; + + // we're going to have to wait for the last of the generation to arrive + let mut wait = self.wait.clone(); + + loop { + // note that the first time through the loop, this _will_ yield a generation + // immediately, since we cloned a receiver that has never seen any values. + if wait.recv().await.expect("sender hasn't been closed") >= generation { + break; + } + } + + BarrierWaitResult(false) + } +} + +/// A `BarrierWaitResult` is returned by `wait` when all threads in the `Barrier` have rendezvoused. +#[derive(Debug, Clone)] +pub struct BarrierWaitResult(bool); + +impl BarrierWaitResult { + /// Returns `true` if this thread from wait is the "leader thread". + /// + /// Only one thread will have `true` returned from their result, all other threads will have + /// `false` returned. + pub fn is_leader(&self) -> bool { + self.0 + } +} diff --git a/third_party/rust/tokio/src/sync/batch_semaphore.rs b/third_party/rust/tokio/src/sync/batch_semaphore.rs new file mode 100644 index 0000000000..436737a670 --- /dev/null +++ b/third_party/rust/tokio/src/sync/batch_semaphore.rs @@ -0,0 +1,547 @@ +//! # Implementation Details +//! +//! The semaphore is implemented using an intrusive linked list of waiters. An +//! atomic counter tracks the number of available permits. If the semaphore does +//! not contain the required number of permits, the task attempting to acquire +//! permits places its waker at the end of a queue. When new permits are made +//! available (such as by releasing an initial acquisition), they are assigned +//! to the task at the front of the queue, waking that task if its requested +//! number of permits is met. +//! +//! Because waiters are enqueued at the back of the linked list and dequeued +//! from the front, the semaphore is fair. Tasks trying to acquire large numbers +//! of permits at a time will always be woken eventually, even if many other +//! tasks are acquiring smaller numbers of permits. This means that in a +//! use-case like tokio's read-write lock, writers will not be starved by +//! readers. +use crate::loom::cell::UnsafeCell; +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::{Mutex, MutexGuard}; +use crate::util::linked_list::{self, LinkedList}; + +use std::future::Future; +use std::marker::PhantomPinned; +use std::pin::Pin; +use std::ptr::NonNull; +use std::sync::atomic::Ordering::*; +use std::task::Poll::*; +use std::task::{Context, Poll, Waker}; +use std::{cmp, fmt}; + +/// An asynchronous counting semaphore which permits waiting on multiple permits at once. +pub(crate) struct Semaphore { + waiters: Mutex<Waitlist>, + /// The current number of available permits in the semaphore. + permits: AtomicUsize, +} + +struct Waitlist { + queue: LinkedList<Waiter>, + closed: bool, +} + +/// Error returned by `Semaphore::try_acquire`. +#[derive(Debug)] +pub(crate) enum TryAcquireError { + Closed, + NoPermits, +} +/// Error returned by `Semaphore::acquire`. +#[derive(Debug)] +pub(crate) struct AcquireError(()); + +pub(crate) struct Acquire<'a> { + node: Waiter, + semaphore: &'a Semaphore, + num_permits: u16, + queued: bool, +} + +/// An entry in the wait queue. +struct Waiter { + /// The current state of the waiter. + /// + /// This is either the number of remaining permits required by + /// the waiter, or a flag indicating that the waiter is not yet queued. + state: AtomicUsize, + + /// The waker to notify the task awaiting permits. + /// + /// # Safety + /// + /// This may only be accessed while the wait queue is locked. + waker: UnsafeCell<Option<Waker>>, + + /// Intrusive linked-list pointers. + /// + /// # Safety + /// + /// This may only be accessed while the wait queue is locked. + /// + /// TODO: Ideally, we would be able to use loom to enforce that + /// this isn't accessed concurrently. However, it is difficult to + /// use a `UnsafeCell` here, since the `Link` trait requires _returning_ + /// references to `Pointers`, and `UnsafeCell` requires that checked access + /// take place inside a closure. We should consider changing `Pointers` to + /// use `UnsafeCell` internally. + pointers: linked_list::Pointers<Waiter>, + + /// Should not be `Unpin`. + _p: PhantomPinned, +} + +impl Semaphore { + /// The maximum number of permits which a semaphore can hold. + /// + /// Note that this reserves three bits of flags in the permit counter, but + /// we only actually use one of them. However, the previous semaphore + /// implementation used three bits, so we will continue to reserve them to + /// avoid a breaking change if additional flags need to be aadded in the + /// future. + pub(crate) const MAX_PERMITS: usize = std::usize::MAX >> 3; + const CLOSED: usize = 1; + const PERMIT_SHIFT: usize = 1; + + /// Creates a new semaphore with the initial number of permits + pub(crate) fn new(permits: usize) -> Self { + assert!( + permits <= Self::MAX_PERMITS, + "a semaphore may not have more than MAX_PERMITS permits ({})", + Self::MAX_PERMITS + ); + Self { + permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT), + waiters: Mutex::new(Waitlist { + queue: LinkedList::new(), + closed: false, + }), + } + } + + /// Returns the current number of available permits + pub(crate) fn available_permits(&self) -> usize { + self.permits.load(Acquire) >> Self::PERMIT_SHIFT + } + + /// Adds `n` new permits to the semaphore. + pub(crate) fn release(&self, added: usize) { + if added == 0 { + return; + } + + // Assign permits to the wait queue + self.add_permits_locked(added, self.waiters.lock().unwrap()); + } + + /// Closes the semaphore. This prevents the semaphore from issuing new + /// permits and notifies all pending waiters. + // This will be used once the bounded MPSC is updated to use the new + // semaphore implementation. + #[allow(dead_code)] + pub(crate) fn close(&self) { + let mut waiters = self.waiters.lock().unwrap(); + // If the semaphore's permits counter has enough permits for an + // unqueued waiter to acquire all the permits it needs immediately, + // it won't touch the wait list. Therefore, we have to set a bit on + // the permit counter as well. However, we must do this while + // holding the lock --- otherwise, if we set the bit and then wait + // to acquire the lock we'll enter an inconsistent state where the + // permit counter is closed, but the wait list is not. + self.permits.fetch_or(Self::CLOSED, Release); + waiters.closed = true; + while let Some(mut waiter) = waiters.queue.pop_back() { + let waker = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }; + if let Some(waker) = waker { + waker.wake(); + } + } + } + + pub(crate) fn try_acquire(&self, num_permits: u16) -> Result<(), TryAcquireError> { + let mut curr = self.permits.load(Acquire); + let num_permits = (num_permits as usize) << Self::PERMIT_SHIFT; + loop { + // Has the semaphore closed?git + if curr & Self::CLOSED > 0 { + return Err(TryAcquireError::Closed); + } + + // Are there enough permits remaining? + if curr < num_permits { + return Err(TryAcquireError::NoPermits); + } + + let next = curr - num_permits; + + match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { + Ok(_) => return Ok(()), + Err(actual) => curr = actual, + } + } + } + + pub(crate) fn acquire(&self, num_permits: u16) -> Acquire<'_> { + Acquire::new(self, num_permits) + } + + /// Release `rem` permits to the semaphore's wait list, starting from the + /// end of the queue. + /// + /// If `rem` exceeds the number of permits needed by the wait list, the + /// remainder are assigned back to the semaphore. + fn add_permits_locked(&self, mut rem: usize, waiters: MutexGuard<'_, Waitlist>) { + let mut wakers: [Option<Waker>; 8] = Default::default(); + let mut lock = Some(waiters); + let mut is_empty = false; + while rem > 0 { + let mut waiters = lock.take().unwrap_or_else(|| self.waiters.lock().unwrap()); + 'inner: for slot in &mut wakers[..] { + // Was the waiter assigned enough permits to wake it? + match waiters.queue.last() { + Some(waiter) => { + if !waiter.assign_permits(&mut rem) { + break 'inner; + } + } + None => { + is_empty = true; + // If we assigned permits to all the waiters in the queue, and there are + // still permits left over, assign them back to the semaphore. + break 'inner; + } + }; + let mut waiter = waiters.queue.pop_back().unwrap(); + *slot = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }; + } + + if rem > 0 && is_empty { + let permits = rem << Self::PERMIT_SHIFT; + assert!( + permits < Self::MAX_PERMITS, + "cannot add more than MAX_PERMITS permits ({})", + Self::MAX_PERMITS + ); + let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release); + assert!( + prev + permits <= Self::MAX_PERMITS, + "number of added permits ({}) would overflow MAX_PERMITS ({})", + rem, + Self::MAX_PERMITS + ); + rem = 0; + } + + drop(waiters); // release the lock + + wakers + .iter_mut() + .filter_map(Option::take) + .for_each(Waker::wake); + } + + assert_eq!(rem, 0); + } + + fn poll_acquire( + &self, + cx: &mut Context<'_>, + num_permits: u16, + node: Pin<&mut Waiter>, + queued: bool, + ) -> Poll<Result<(), AcquireError>> { + let mut acquired = 0; + + let needed = if queued { + node.state.load(Acquire) << Self::PERMIT_SHIFT + } else { + (num_permits as usize) << Self::PERMIT_SHIFT + }; + + let mut lock = None; + // First, try to take the requested number of permits from the + // semaphore. + let mut curr = self.permits.load(Acquire); + let mut waiters = loop { + // Has the semaphore closed? + if curr & Self::CLOSED > 0 { + return Ready(Err(AcquireError::closed())); + } + + let mut remaining = 0; + let total = curr + .checked_add(acquired) + .expect("number of permits must not overflow"); + let (next, acq) = if total >= needed { + let next = curr - (needed - acquired); + (next, needed >> Self::PERMIT_SHIFT) + } else { + remaining = (needed - acquired) - curr; + (0, curr >> Self::PERMIT_SHIFT) + }; + + if remaining > 0 && lock.is_none() { + // No permits were immediately available, so this permit will + // (probably) need to wait. We'll need to acquire a lock on the + // wait queue before continuing. We need to do this _before_ the + // CAS that sets the new value of the semaphore's `permits` + // counter. Otherwise, if we subtract the permits and then + // acquire the lock, we might miss additional permits being + // added while waiting for the lock. + lock = Some(self.waiters.lock().unwrap()); + } + + match self.permits.compare_exchange(curr, next, AcqRel, Acquire) { + Ok(_) => { + acquired += acq; + if remaining == 0 { + if !queued { + return Ready(Ok(())); + } else if lock.is_none() { + break self.waiters.lock().unwrap(); + } + } + break lock.expect("lock must be acquired before waiting"); + } + Err(actual) => curr = actual, + } + }; + + if waiters.closed { + return Ready(Err(AcquireError::closed())); + } + + if node.assign_permits(&mut acquired) { + self.add_permits_locked(acquired, waiters); + return Ready(Ok(())); + } + + assert_eq!(acquired, 0); + + // Otherwise, register the waker & enqueue the node. + node.waker.with_mut(|waker| { + // Safety: the wait list is locked, so we may modify the waker. + let waker = unsafe { &mut *waker }; + // Do we need to register the new waker? + if waker + .as_ref() + .map(|waker| !waker.will_wake(cx.waker())) + .unwrap_or(true) + { + *waker = Some(cx.waker().clone()); + } + }); + + // If the waiter is not already in the wait queue, enqueue it. + if !queued { + let node = unsafe { + let node = Pin::into_inner_unchecked(node) as *mut _; + NonNull::new_unchecked(node) + }; + + waiters.queue.push_front(node); + } + + Pending + } +} + +impl fmt::Debug for Semaphore { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Semaphore") + .field("permits", &self.permits.load(Relaxed)) + .finish() + } +} + +impl Waiter { + fn new(num_permits: u16) -> Self { + Waiter { + waker: UnsafeCell::new(None), + state: AtomicUsize::new(num_permits as usize), + pointers: linked_list::Pointers::new(), + _p: PhantomPinned, + } + } + + /// Assign permits to the waiter. + /// + /// Returns `true` if the waiter should be removed from the queue + fn assign_permits(&self, n: &mut usize) -> bool { + let mut curr = self.state.load(Acquire); + loop { + let assign = cmp::min(curr, *n); + let next = curr - assign; + match self.state.compare_exchange(curr, next, AcqRel, Acquire) { + Ok(_) => { + *n -= assign; + return next == 0; + } + Err(actual) => curr = actual, + } + } + } +} + +impl Future for Acquire<'_> { + type Output = Result<(), AcquireError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let (node, semaphore, needed, queued) = self.project(); + match semaphore.poll_acquire(cx, needed, node, *queued) { + Pending => { + *queued = true; + Pending + } + Ready(r) => { + r?; + *queued = false; + Ready(Ok(())) + } + } + } +} + +impl<'a> Acquire<'a> { + fn new(semaphore: &'a Semaphore, num_permits: u16) -> Self { + Self { + node: Waiter::new(num_permits), + semaphore, + num_permits, + queued: false, + } + } + + fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u16, &mut bool) { + fn is_unpin<T: Unpin>() {} + unsafe { + // Safety: all fields other than `node` are `Unpin` + + is_unpin::<&Semaphore>(); + is_unpin::<&mut bool>(); + is_unpin::<u16>(); + + let this = self.get_unchecked_mut(); + ( + Pin::new_unchecked(&mut this.node), + &this.semaphore, + this.num_permits, + &mut this.queued, + ) + } + } +} + +impl Drop for Acquire<'_> { + fn drop(&mut self) { + // If the future is completed, there is no node in the wait list, so we + // can skip acquiring the lock. + if !self.queued { + return; + } + + // This is where we ensure safety. The future is being dropped, + // which means we must ensure that the waiter entry is no longer stored + // in the linked list. + let mut waiters = match self.semaphore.waiters.lock() { + Ok(lock) => lock, + // Removing the node from the linked list is necessary to ensure + // safety. Even if the lock was poisoned, we need to make sure it is + // removed from the linked list before dropping it --- otherwise, + // the list will contain a dangling pointer to this node. + Err(e) => e.into_inner(), + }; + + // remove the entry from the list + let node = NonNull::from(&mut self.node); + // Safety: we have locked the wait list. + unsafe { waiters.queue.remove(node) }; + + let acquired_permits = self.num_permits as usize - self.node.state.load(Acquire); + if acquired_permits > 0 { + self.semaphore.add_permits_locked(acquired_permits, waiters); + } + } +} + +// Safety: the `Acquire` future is not `Sync` automatically because it contains +// a `Waiter`, which, in turn, contains an `UnsafeCell`. However, the +// `UnsafeCell` is only accessed when the future is borrowed mutably (either in +// `poll` or in `drop`). Therefore, it is safe (although not particularly +// _useful_) for the future to be borrowed immutably across threads. +unsafe impl Sync for Acquire<'_> {} + +// ===== impl AcquireError ==== + +impl AcquireError { + fn closed() -> AcquireError { + AcquireError(()) + } +} + +impl fmt::Display for AcquireError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "semaphore closed") + } +} + +impl std::error::Error for AcquireError {} + +// ===== impl TryAcquireError ===== + +impl TryAcquireError { + /// Returns `true` if the error was caused by a closed semaphore. + #[allow(dead_code)] // may be used later! + pub(crate) fn is_closed(&self) -> bool { + match self { + TryAcquireError::Closed => true, + _ => false, + } + } + + /// Returns `true` if the error was caused by calling `try_acquire` on a + /// semaphore with no available permits. + #[allow(dead_code)] // may be used later! + pub(crate) fn is_no_permits(&self) -> bool { + match self { + TryAcquireError::NoPermits => true, + _ => false, + } + } +} + +impl fmt::Display for TryAcquireError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TryAcquireError::Closed => write!(fmt, "{}", "semaphore closed"), + TryAcquireError::NoPermits => write!(fmt, "{}", "no permits available"), + } + } +} + +impl std::error::Error for TryAcquireError {} + +/// # Safety +/// +/// `Waiter` is forced to be !Unpin. +unsafe impl linked_list::Link for Waiter { + // XXX: ideally, we would be able to use `Pin` here, to enforce the + // invariant that list entries may not move while in the list. However, we + // can't do this currently, as using `Pin<&'a mut Waiter>` as the `Handle` + // type would require `Semaphore` to be generic over a lifetime. We can't + // use `Pin<*mut Waiter>`, as raw pointers are `Unpin` regardless of whether + // or not they dereference to an `!Unpin` target. + type Handle = NonNull<Waiter>; + type Target = Waiter; + + fn as_raw(handle: &Self::Handle) -> NonNull<Waiter> { + *handle + } + + unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> { + ptr + } + + unsafe fn pointers(mut target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> { + NonNull::from(&mut target.as_mut().pointers) + } +} diff --git a/third_party/rust/tokio/src/sync/broadcast.rs b/third_party/rust/tokio/src/sync/broadcast.rs new file mode 100644 index 0000000000..05a58070ee --- /dev/null +++ b/third_party/rust/tokio/src/sync/broadcast.rs @@ -0,0 +1,1046 @@ +//! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by +//! all consumers. +//! +//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`] +//! values. [`Sender`] handles are clone-able, allowing concurrent send and +//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as +//! long as `T` is also `Send` or `Sync` respectively. +//! +//! When a value is sent, **all** [`Receiver`] handles are notified and will +//! receive the value. The value is stored once inside the channel and cloned on +//! demand for each receiver. Once all receivers have received a clone of the +//! value, the value is released from the channel. +//! +//! A channel is created by calling [`channel`], specifying the maximum number +//! of messages the channel can retain at any given time. +//! +//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The +//! returned [`Receiver`] will receive values sent **after** the call to +//! `subscribe`. +//! +//! ## Lagging +//! +//! As sent messages must be retained until **all** [`Receiver`] handles receive +//! a clone, broadcast channels are suspectible to the "slow receiver" problem. +//! In this case, all but one receiver are able to receive values at the rate +//! they are sent. Because one receiver is stalled, the channel starts to fill +//! up. +//! +//! This broadcast channel implementation handles this case by setting a hard +//! upper bound on the number of values the channel may retain at any given +//! time. This upper bound is passed to the [`channel`] function as an argument. +//! +//! If a value is sent when the channel is at capacity, the oldest value +//! currently held by the channel is released. This frees up space for the new +//! value. Any receiver that has not yet seen the released value will return +//! [`RecvError::Lagged`] the next time [`recv`] is called. +//! +//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is +//! updated to the oldest value contained by the channel. The next call to +//! [`recv`] will return this value. +//! +//! This behavior enables a receiver to detect when it has lagged so far behind +//! that data has been dropped. The caller may decide how to respond to this: +//! either by aborting its task or by tolerating lost messages and resuming +//! consumption of the channel. +//! +//! ## Closing +//! +//! When **all** [`Sender`] handles have been dropped, no new values may be +//! sent. At this point, the channel is "closed". Once a receiver has received +//! all values retained by the channel, the next call to [`recv`] will return +//! with [`RecvError::Closed`]. +//! +//! [`Sender`]: crate::sync::broadcast::Sender +//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe +//! [`Receiver`]: crate::sync::broadcast::Receiver +//! [`channel`]: crate::sync::broadcast::channel +//! [`RecvError::Lagged`]: crate::sync::broadcast::RecvError::Lagged +//! [`RecvError::Closed`]: crate::sync::broadcast::RecvError::Closed +//! [`recv`]: crate::sync::broadcast::Receiver::recv +//! +//! # Examples +//! +//! Basic usage +//! +//! ``` +//! use tokio::sync::broadcast; +//! +//! #[tokio::main] +//! async fn main() { +//! let (tx, mut rx1) = broadcast::channel(16); +//! let mut rx2 = tx.subscribe(); +//! +//! tokio::spawn(async move { +//! assert_eq!(rx1.recv().await.unwrap(), 10); +//! assert_eq!(rx1.recv().await.unwrap(), 20); +//! }); +//! +//! tokio::spawn(async move { +//! assert_eq!(rx2.recv().await.unwrap(), 10); +//! assert_eq!(rx2.recv().await.unwrap(), 20); +//! }); +//! +//! tx.send(10).unwrap(); +//! tx.send(20).unwrap(); +//! } +//! ``` +//! +//! Handling lag +//! +//! ``` +//! use tokio::sync::broadcast; +//! +//! #[tokio::main] +//! async fn main() { +//! let (tx, mut rx) = broadcast::channel(2); +//! +//! tx.send(10).unwrap(); +//! tx.send(20).unwrap(); +//! tx.send(30).unwrap(); +//! +//! // The receiver lagged behind +//! assert!(rx.recv().await.is_err()); +//! +//! // At this point, we can abort or continue with lost messages +//! +//! assert_eq!(20, rx.recv().await.unwrap()); +//! assert_eq!(30, rx.recv().await.unwrap()); +//! } + +use crate::loom::cell::UnsafeCell; +use crate::loom::future::AtomicWaker; +use crate::loom::sync::atomic::{spin_loop_hint, AtomicBool, AtomicPtr, AtomicUsize}; +use crate::loom::sync::{Arc, Condvar, Mutex}; + +use std::fmt; +use std::mem; +use std::ptr; +use std::sync::atomic::Ordering::SeqCst; +use std::task::{Context, Poll, Waker}; +use std::usize; + +/// Sending-half of the [`broadcast`] channel. +/// +/// May be used from many threads. Messages can be sent with +/// [`send`][Sender::send]. +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::broadcast; +/// +/// #[tokio::main] +/// async fn main() { +/// let (tx, mut rx1) = broadcast::channel(16); +/// let mut rx2 = tx.subscribe(); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx1.recv().await.unwrap(), 10); +/// assert_eq!(rx1.recv().await.unwrap(), 20); +/// }); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx2.recv().await.unwrap(), 10); +/// assert_eq!(rx2.recv().await.unwrap(), 20); +/// }); +/// +/// tx.send(10).unwrap(); +/// tx.send(20).unwrap(); +/// } +/// ``` +/// +/// [`broadcast`]: crate::sync::broadcast +pub struct Sender<T> { + shared: Arc<Shared<T>>, +} + +/// Receiving-half of the [`broadcast`] channel. +/// +/// Must not be used concurrently. Messages may be retrieved using +/// [`recv`][Receiver::recv]. +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::broadcast; +/// +/// #[tokio::main] +/// async fn main() { +/// let (tx, mut rx1) = broadcast::channel(16); +/// let mut rx2 = tx.subscribe(); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx1.recv().await.unwrap(), 10); +/// assert_eq!(rx1.recv().await.unwrap(), 20); +/// }); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx2.recv().await.unwrap(), 10); +/// assert_eq!(rx2.recv().await.unwrap(), 20); +/// }); +/// +/// tx.send(10).unwrap(); +/// tx.send(20).unwrap(); +/// } +/// ``` +/// +/// [`broadcast`]: crate::sync::broadcast +pub struct Receiver<T> { + /// State shared with all receivers and senders. + shared: Arc<Shared<T>>, + + /// Next position to read from + next: u64, + + /// Waiter state + wait: Arc<WaitNode>, +} + +/// Error returned by [`Sender::send`][Sender::send]. +/// +/// A **send** operation can only fail if there are no active receivers, +/// implying that the message could never be received. The error contains the +/// message being sent as a payload so it can be recovered. +#[derive(Debug)] +pub struct SendError<T>(pub T); + +/// An error returned from the [`recv`] function on a [`Receiver`]. +/// +/// [`recv`]: crate::sync::broadcast::Receiver::recv +/// [`Receiver`]: crate::sync::broadcast::Receiver +#[derive(Debug, PartialEq)] +pub enum RecvError { + /// There are no more active senders implying no further messages will ever + /// be sent. + Closed, + + /// The receiver lagged too far behind. Attempting to receive again will + /// return the oldest message still retained by the channel. + /// + /// Includes the number of skipped messages. + Lagged(u64), +} + +/// An error returned from the [`try_recv`] function on a [`Receiver`]. +/// +/// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv +/// [`Receiver`]: crate::sync::broadcast::Receiver +#[derive(Debug, PartialEq)] +pub enum TryRecvError { + /// The channel is currently empty. There are still active + /// [`Sender`][Sender] handles, so data may yet become available. + Empty, + + /// There are no more active senders implying no further messages will ever + /// be sent. + Closed, + + /// The receiver lagged too far behind and has been forcibly disconnected. + /// Attempting to receive again will return the oldest message still + /// retained by the channel. + /// + /// Includes the number of skipped messages. + Lagged(u64), +} + +/// Data shared between senders and receivers +struct Shared<T> { + /// slots in the channel + buffer: Box<[Slot<T>]>, + + /// Mask a position -> index + mask: usize, + + /// Tail of the queue + tail: Mutex<Tail>, + + /// Notifies a sender that the slot is unlocked + condvar: Condvar, + + /// Stack of pending waiters + wait_stack: AtomicPtr<WaitNode>, + + /// Number of outstanding Sender handles + num_tx: AtomicUsize, +} + +/// Next position to write a value +struct Tail { + /// Next position to write to + pos: u64, + + /// Number of active receivers + rx_cnt: usize, +} + +/// Slot in the buffer +struct Slot<T> { + /// Remaining number of receivers that are expected to see this value. + /// + /// When this goes to zero, the value is released. + rem: AtomicUsize, + + /// Used to lock the `write` field. + lock: AtomicUsize, + + /// The value being broadcast + /// + /// Synchronized by `state` + write: Write<T>, +} + +/// A write in the buffer +struct Write<T> { + /// Uniquely identifies this write + pos: UnsafeCell<u64>, + + /// The written value + val: UnsafeCell<Option<T>>, +} + +/// Tracks a waiting receiver +#[derive(Debug)] +struct WaitNode { + /// `true` if queued + queued: AtomicBool, + + /// Task to wake when a permit is made available. + waker: AtomicWaker, + + /// Next pointer in the stack of waiting senders. + next: UnsafeCell<*const WaitNode>, +} + +struct RecvGuard<'a, T> { + slot: &'a Slot<T>, + tail: &'a Mutex<Tail>, + condvar: &'a Condvar, +} + +/// Max number of receivers. Reserve space to lock. +const MAX_RECEIVERS: usize = usize::MAX >> 1; + +/// Create a bounded, multi-producer, multi-consumer channel where each sent +/// value is broadcasted to all active receivers. +/// +/// All data sent on [`Sender`] will become available on every active +/// [`Receiver`] in the same order as it was sent. +/// +/// The `Sender` can be cloned to `send` to the same channel from multiple +/// points in the process or it can be used concurrently from an `Arc`. New +/// `Receiver` handles are created by calling [`Sender::subscribe`]. +/// +/// If all [`Receiver`] handles are dropped, the `send` method will return a +/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`] +/// method will return a [`RecvError`]. +/// +/// [`Sender`]: crate::sync::broadcast::Sender +/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe +/// [`Receiver`]: crate::sync::broadcast::Receiver +/// [`recv`]: crate::sync::broadcast::Receiver::recv +/// [`SendError`]: crate::sync::broadcast::SendError +/// [`RecvError`]: crate::sync::broadcast::RecvError +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::broadcast; +/// +/// #[tokio::main] +/// async fn main() { +/// let (tx, mut rx1) = broadcast::channel(16); +/// let mut rx2 = tx.subscribe(); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx1.recv().await.unwrap(), 10); +/// assert_eq!(rx1.recv().await.unwrap(), 20); +/// }); +/// +/// tokio::spawn(async move { +/// assert_eq!(rx2.recv().await.unwrap(), 10); +/// assert_eq!(rx2.recv().await.unwrap(), 20); +/// }); +/// +/// tx.send(10).unwrap(); +/// tx.send(20).unwrap(); +/// } +/// ``` +pub fn channel<T>(mut capacity: usize) -> (Sender<T>, Receiver<T>) { + assert!(capacity > 0, "capacity is empty"); + assert!(capacity <= usize::MAX >> 1, "requested capacity too large"); + + // Round to a power of two + capacity = capacity.next_power_of_two(); + + let mut buffer = Vec::with_capacity(capacity); + + for i in 0..capacity { + buffer.push(Slot { + rem: AtomicUsize::new(0), + lock: AtomicUsize::new(0), + write: Write { + pos: UnsafeCell::new((i as u64).wrapping_sub(capacity as u64)), + val: UnsafeCell::new(None), + }, + }); + } + + let shared = Arc::new(Shared { + buffer: buffer.into_boxed_slice(), + mask: capacity - 1, + tail: Mutex::new(Tail { pos: 0, rx_cnt: 1 }), + condvar: Condvar::new(), + wait_stack: AtomicPtr::new(ptr::null_mut()), + num_tx: AtomicUsize::new(1), + }); + + let rx = Receiver { + shared: shared.clone(), + next: 0, + wait: Arc::new(WaitNode { + queued: AtomicBool::new(false), + waker: AtomicWaker::new(), + next: UnsafeCell::new(ptr::null()), + }), + }; + + let tx = Sender { shared }; + + (tx, rx) +} + +unsafe impl<T: Send> Send for Sender<T> {} +unsafe impl<T: Send> Sync for Sender<T> {} + +unsafe impl<T: Send> Send for Receiver<T> {} +unsafe impl<T: Send> Sync for Receiver<T> {} + +impl<T> Sender<T> { + /// Attempts to send a value to all active [`Receiver`] handles, returning + /// it back if it could not be sent. + /// + /// A successful send occurs when there is at least one active [`Receiver`] + /// handle. An unsuccessful send would be one where all associated + /// [`Receiver`] handles have already been dropped. + /// + /// # Return + /// + /// On success, the number of subscribed [`Receiver`] handles is returned. + /// This does not mean that this number of receivers will see the message as + /// a receiver may drop before receiving the message. + /// + /// # Note + /// + /// A return value of `Ok` **does not** mean that the sent value will be + /// observed by all or any of the active [`Receiver`] handles. [`Receiver`] + /// handles may be dropped before receiving the sent message. + /// + /// A return value of `Err` **does not** mean that future calls to `send` + /// will fail. New [`Receiver`] handles may be created by calling + /// [`subscribe`]. + /// + /// [`Receiver`]: crate::sync::broadcast::Receiver + /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx1) = broadcast::channel(16); + /// let mut rx2 = tx.subscribe(); + /// + /// tokio::spawn(async move { + /// assert_eq!(rx1.recv().await.unwrap(), 10); + /// assert_eq!(rx1.recv().await.unwrap(), 20); + /// }); + /// + /// tokio::spawn(async move { + /// assert_eq!(rx2.recv().await.unwrap(), 10); + /// assert_eq!(rx2.recv().await.unwrap(), 20); + /// }); + /// + /// tx.send(10).unwrap(); + /// tx.send(20).unwrap(); + /// } + /// ``` + pub fn send(&self, value: T) -> Result<usize, SendError<T>> { + self.send2(Some(value)) + .map_err(|SendError(maybe_v)| SendError(maybe_v.unwrap())) + } + + /// Creates a new [`Receiver`] handle that will receive values sent **after** + /// this call to `subscribe`. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, _rx) = broadcast::channel(16); + /// + /// // Will not be seen + /// tx.send(10).unwrap(); + /// + /// let mut rx = tx.subscribe(); + /// + /// tx.send(20).unwrap(); + /// + /// let value = rx.recv().await.unwrap(); + /// assert_eq!(20, value); + /// } + /// ``` + pub fn subscribe(&self) -> Receiver<T> { + let shared = self.shared.clone(); + + let mut tail = shared.tail.lock().unwrap(); + + if tail.rx_cnt == MAX_RECEIVERS { + panic!("max receivers"); + } + + tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow"); + let next = tail.pos; + + drop(tail); + + Receiver { + shared, + next, + wait: Arc::new(WaitNode { + queued: AtomicBool::new(false), + waker: AtomicWaker::new(), + next: UnsafeCell::new(ptr::null()), + }), + } + } + + /// Returns the number of active receivers + /// + /// An active receiver is a [`Receiver`] handle returned from [`channel`] or + /// [`subscribe`]. These are the handles that will receive values sent on + /// this [`Sender`]. + /// + /// # Note + /// + /// It is not guaranteed that a sent message will reach this number of + /// receivers. Active receivers may never call [`recv`] again before + /// dropping. + /// + /// [`recv`]: crate::sync::broadcast::Receiver::recv + /// [`Receiver`]: crate::sync::broadcast::Receiver + /// [`Sender`]: crate::sync::broadcast::Sender + /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe + /// [`channel`]: crate::sync::broadcast::channel + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, _rx1) = broadcast::channel(16); + /// + /// assert_eq!(1, tx.receiver_count()); + /// + /// let mut _rx2 = tx.subscribe(); + /// + /// assert_eq!(2, tx.receiver_count()); + /// + /// tx.send(10).unwrap(); + /// } + /// ``` + pub fn receiver_count(&self) -> usize { + let tail = self.shared.tail.lock().unwrap(); + tail.rx_cnt + } + + fn send2(&self, value: Option<T>) -> Result<usize, SendError<Option<T>>> { + let mut tail = self.shared.tail.lock().unwrap(); + + if tail.rx_cnt == 0 { + return Err(SendError(value)); + } + + // Position to write into + let pos = tail.pos; + let rem = tail.rx_cnt; + let idx = (pos & self.shared.mask as u64) as usize; + + // Update the tail position + tail.pos = tail.pos.wrapping_add(1); + + // Get the slot + let slot = &self.shared.buffer[idx]; + + // Acquire the write lock + let mut prev = slot.lock.fetch_or(1, SeqCst); + + while prev & !1 != 0 { + // Concurrent readers, we must go to sleep + tail = self.shared.condvar.wait(tail).unwrap(); + + prev = slot.lock.load(SeqCst); + + if prev & 1 == 0 { + // The writer lock bit was cleared while this thread was + // sleeping. This can only happen if a newer write happened on + // this slot by another thread. Bail early as an optimization, + // there is nothing left to do. + return Ok(rem); + } + } + + if tail.pos.wrapping_sub(pos) > self.shared.buffer.len() as u64 { + // There is a newer pending write to the same slot. + return Ok(rem); + } + + // Slot lock acquired + slot.write.pos.with_mut(|ptr| unsafe { *ptr = pos }); + slot.write.val.with_mut(|ptr| unsafe { *ptr = value }); + + // Set remaining receivers + slot.rem.store(rem, SeqCst); + + // Release the slot lock + slot.lock.store(0, SeqCst); + + // Release the mutex. This must happen after the slot lock is released, + // otherwise the writer lock bit could be cleared while another thread + // is in the critical section. + drop(tail); + + // Notify waiting receivers + self.notify_rx(); + + Ok(rem) + } + + fn notify_rx(&self) { + let mut curr = self.shared.wait_stack.swap(ptr::null_mut(), SeqCst) as *const WaitNode; + + while !curr.is_null() { + let waiter = unsafe { Arc::from_raw(curr) }; + + // Update `curr` before toggling `queued` and waking + curr = waiter.next.with(|ptr| unsafe { *ptr }); + + // Unset queued + waiter.queued.store(false, SeqCst); + + // Wake + waiter.waker.wake(); + } + } +} + +impl<T> Clone for Sender<T> { + fn clone(&self) -> Sender<T> { + let shared = self.shared.clone(); + shared.num_tx.fetch_add(1, SeqCst); + + Sender { shared } + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + if 1 == self.shared.num_tx.fetch_sub(1, SeqCst) { + let _ = self.send2(None); + } + } +} + +impl<T> Receiver<T> { + /// Locks the next value if there is one. + /// + /// The caller is responsible for unlocking + fn recv_ref(&mut self, spin: bool) -> Result<RecvGuard<'_, T>, TryRecvError> { + let idx = (self.next & self.shared.mask as u64) as usize; + + // The slot holding the next value to read + let slot = &self.shared.buffer[idx]; + + // Lock the slot + if !slot.try_rx_lock() { + if spin { + while !slot.try_rx_lock() { + spin_loop_hint(); + } + } else { + return Err(TryRecvError::Empty); + } + } + + let guard = RecvGuard { + slot, + tail: &self.shared.tail, + condvar: &self.shared.condvar, + }; + + if guard.pos() != self.next { + let pos = guard.pos(); + + guard.drop_no_rem_dec(); + + if pos.wrapping_add(self.shared.buffer.len() as u64) == self.next { + return Err(TryRecvError::Empty); + } else { + let tail = self.shared.tail.lock().unwrap(); + + // `tail.pos` points to the slot the **next** send writes to. + // Because a receiver is lagging, this slot also holds the + // oldest value. To make the positions match, we subtract the + // capacity. + let next = tail.pos.wrapping_sub(self.shared.buffer.len() as u64); + let missed = next.wrapping_sub(self.next); + + self.next = next; + + return Err(TryRecvError::Lagged(missed)); + } + } + + self.next = self.next.wrapping_add(1); + + Ok(guard) + } +} + +impl<T> Receiver<T> +where + T: Clone, +{ + /// Attempts to return a pending value on this receiver without awaiting. + /// + /// This is useful for a flavor of "optimistic check" before deciding to + /// await on a receiver. + /// + /// Compared with [`recv`], this function has three failure cases instead of one + /// (one for closed, one for an empty buffer, one for a lagging receiver). + /// + /// `Err(TryRecvError::Closed)` is returned when all `Sender` halves have + /// dropped, indicating that no further values can be sent on the channel. + /// + /// If the [`Receiver`] handle falls behind, once the channel is full, newly + /// sent values will overwrite old values. At this point, a call to [`recv`] + /// will return with `Err(TryRecvError::Lagged)` and the [`Receiver`]'s + /// internal cursor is updated to point to the oldest value still held by + /// the channel. A subsequent call to [`try_recv`] will return this value + /// **unless** it has been since overwritten. If there are no values to + /// receive, `Err(TryRecvError::Empty)` is returned. + /// + /// [`recv`]: crate::sync::broadcast::Receiver::recv + /// [`Receiver`]: crate::sync::broadcast::Receiver + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = broadcast::channel(16); + /// + /// assert!(rx.try_recv().is_err()); + /// + /// tx.send(10).unwrap(); + /// + /// let value = rx.try_recv().unwrap(); + /// assert_eq!(10, value); + /// } + /// ``` + pub fn try_recv(&mut self) -> Result<T, TryRecvError> { + let guard = self.recv_ref(false)?; + guard.clone_value().ok_or(TryRecvError::Closed) + } + + #[doc(hidden)] // TODO: document + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> { + if let Some(value) = ok_empty(self.try_recv())? { + return Poll::Ready(Ok(value)); + } + + self.register_waker(cx.waker()); + + if let Some(value) = ok_empty(self.try_recv())? { + Poll::Ready(Ok(value)) + } else { + Poll::Pending + } + } + + /// Receives the next value for this receiver. + /// + /// Each [`Receiver`] handle will receive a clone of all values sent + /// **after** it has subscribed. + /// + /// `Err(RecvError::Closed)` is returned when all `Sender` halves have + /// dropped, indicating that no further values can be sent on the channel. + /// + /// If the [`Receiver`] handle falls behind, once the channel is full, newly + /// sent values will overwrite old values. At this point, a call to [`recv`] + /// will return with `Err(RecvError::Lagged)` and the [`Receiver`]'s + /// internal cursor is updated to point to the oldest value still held by + /// the channel. A subsequent call to [`recv`] will return this value + /// **unless** it has been since overwritten. + /// + /// [`Receiver`]: crate::sync::broadcast::Receiver + /// [`recv`]: crate::sync::broadcast::Receiver::recv + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx1) = broadcast::channel(16); + /// let mut rx2 = tx.subscribe(); + /// + /// tokio::spawn(async move { + /// assert_eq!(rx1.recv().await.unwrap(), 10); + /// assert_eq!(rx1.recv().await.unwrap(), 20); + /// }); + /// + /// tokio::spawn(async move { + /// assert_eq!(rx2.recv().await.unwrap(), 10); + /// assert_eq!(rx2.recv().await.unwrap(), 20); + /// }); + /// + /// tx.send(10).unwrap(); + /// tx.send(20).unwrap(); + /// } + /// ``` + /// + /// Handling lag + /// + /// ``` + /// use tokio::sync::broadcast; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = broadcast::channel(2); + /// + /// tx.send(10).unwrap(); + /// tx.send(20).unwrap(); + /// tx.send(30).unwrap(); + /// + /// // The receiver lagged behind + /// assert!(rx.recv().await.is_err()); + /// + /// // At this point, we can abort or continue with lost messages + /// + /// assert_eq!(20, rx.recv().await.unwrap()); + /// assert_eq!(30, rx.recv().await.unwrap()); + /// } + pub async fn recv(&mut self) -> Result<T, RecvError> { + use crate::future::poll_fn; + + poll_fn(|cx| self.poll_recv(cx)).await + } + + fn register_waker(&self, cx: &Waker) { + self.wait.waker.register_by_ref(cx); + + if !self.wait.queued.load(SeqCst) { + // Set `queued` before queuing. + self.wait.queued.store(true, SeqCst); + + let mut curr = self.shared.wait_stack.load(SeqCst); + + // The ref count is decremented in `notify_rx` when all nodes are + // removed from the waiter stack. + let node = Arc::into_raw(self.wait.clone()) as *mut _; + + loop { + // Safety: `queued == false` means the caller has exclusive + // access to `self.wait.next`. + self.wait.next.with_mut(|ptr| unsafe { *ptr = curr }); + + let res = self + .shared + .wait_stack + .compare_exchange(curr, node, SeqCst, SeqCst); + + match res { + Ok(_) => return, + Err(actual) => curr = actual, + } + } + } + } +} + +#[cfg(feature = "stream")] +impl<T> crate::stream::Stream for Receiver<T> +where + T: Clone, +{ + type Item = Result<T, RecvError>; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Option<Result<T, RecvError>>> { + self.poll_recv(cx).map(|v| match v { + Ok(v) => Some(Ok(v)), + lag @ Err(RecvError::Lagged(_)) => Some(lag), + Err(RecvError::Closed) => None, + }) + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + let mut tail = self.shared.tail.lock().unwrap(); + + tail.rx_cnt -= 1; + let until = tail.pos; + + drop(tail); + + while self.next != until { + match self.recv_ref(true) { + // Ignore the value + Ok(_) => {} + // The channel is closed + Err(TryRecvError::Closed) => break, + // Ignore lagging, we will catch up + Err(TryRecvError::Lagged(..)) => {} + // Can't be empty + Err(TryRecvError::Empty) => panic!("unexpected empty broadcast channel"), + } + } + } +} + +impl<T> Drop for Shared<T> { + fn drop(&mut self) { + // Clear the wait stack + let mut curr = self.wait_stack.with_mut(|ptr| *ptr as *const WaitNode); + + while !curr.is_null() { + let waiter = unsafe { Arc::from_raw(curr) }; + curr = waiter.next.with(|ptr| unsafe { *ptr }); + } + } +} + +impl<T> fmt::Debug for Sender<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "broadcast::Sender") + } +} + +impl<T> fmt::Debug for Receiver<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "broadcast::Receiver") + } +} + +impl<T> Slot<T> { + /// Tries to lock the slot for a receiver. If `false`, then a sender holds the + /// lock and the calling task will be notified once the sender has released + /// the lock. + fn try_rx_lock(&self) -> bool { + let mut curr = self.lock.load(SeqCst); + + loop { + if curr & 1 == 1 { + // Locked by sender + return false; + } + + // Only increment (by 2) if the LSB "lock" bit is not set. + let res = self.lock.compare_exchange(curr, curr + 2, SeqCst, SeqCst); + + match res { + Ok(_) => return true, + Err(actual) => curr = actual, + } + } + } + + fn rx_unlock(&self, tail: &Mutex<Tail>, condvar: &Condvar, rem_dec: bool) { + if rem_dec { + // Decrement the remaining counter + if 1 == self.rem.fetch_sub(1, SeqCst) { + // Last receiver, drop the value + self.write.val.with_mut(|ptr| unsafe { *ptr = None }); + } + } + + if 1 == self.lock.fetch_sub(2, SeqCst) - 2 { + // First acquire the lock to make sure our sender is waiting on the + // condition variable, otherwise the notification could be lost. + mem::drop(tail.lock().unwrap()); + // Wake up senders + condvar.notify_all(); + } + } +} + +impl<'a, T> RecvGuard<'a, T> { + fn pos(&self) -> u64 { + self.slot.write.pos.with(|ptr| unsafe { *ptr }) + } + + fn clone_value(&self) -> Option<T> + where + T: Clone, + { + self.slot.write.val.with(|ptr| unsafe { (*ptr).clone() }) + } + + fn drop_no_rem_dec(self) { + self.slot.rx_unlock(self.tail, self.condvar, false); + + mem::forget(self); + } +} + +impl<'a, T> Drop for RecvGuard<'a, T> { + fn drop(&mut self) { + self.slot.rx_unlock(self.tail, self.condvar, true) + } +} + +fn ok_empty<T>(res: Result<T, TryRecvError>) -> Result<Option<T>, RecvError> { + match res { + Ok(value) => Ok(Some(value)), + Err(TryRecvError::Empty) => Ok(None), + Err(TryRecvError::Lagged(n)) => Err(RecvError::Lagged(n)), + Err(TryRecvError::Closed) => Err(RecvError::Closed), + } +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Closed => write!(f, "channel closed"), + RecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), + } + } +} + +impl std::error::Error for RecvError {} + +impl fmt::Display for TryRecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TryRecvError::Empty => write!(f, "channel empty"), + TryRecvError::Closed => write!(f, "channel closed"), + TryRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt), + } + } +} + +impl std::error::Error for TryRecvError {} diff --git a/third_party/rust/tokio/src/sync/mod.rs b/third_party/rust/tokio/src/sync/mod.rs new file mode 100644 index 0000000000..0607f78ad4 --- /dev/null +++ b/third_party/rust/tokio/src/sync/mod.rs @@ -0,0 +1,472 @@ +#![cfg_attr(loom, allow(dead_code, unreachable_pub, unused_imports))] + +//! Synchronization primitives for use in asynchronous contexts. +//! +//! Tokio programs tend to be organized as a set of [tasks] where each task +//! operates independently and may be executed on separate physical threads. The +//! synchronization primitives provided in this module permit these independent +//! tasks to communicate together. +//! +//! [tasks]: crate::task +//! +//! # Message passing +//! +//! The most common form of synchronization in a Tokio program is message +//! passing. Two tasks operate independently and send messages to each other to +//! synchronize. Doing so has the advantage of avoiding shared state. +//! +//! Message passing is implemented using channels. A channel supports sending a +//! message from one producer task to one or more consumer tasks. There are a +//! few flavors of channels provided by Tokio. Each channel flavor supports +//! different message passing patterns. When a channel supports multiple +//! producers, many separate tasks may **send** messages. When a channel +//! supports muliple consumers, many different separate tasks may **receive** +//! messages. +//! +//! Tokio provides many different channel flavors as different message passing +//! patterns are best handled with different implementations. +//! +//! ## `oneshot` channel +//! +//! The [`oneshot` channel][oneshot] supports sending a **single** value from a +//! single producer to a single consumer. This channel is usually used to send +//! the result of a computation to a waiter. +//! +//! **Example:** using a `oneshot` channel to receive the result of a +//! computation. +//! +//! ``` +//! use tokio::sync::oneshot; +//! +//! async fn some_computation() -> String { +//! "represents the result of the computation".to_string() +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! let (tx, rx) = oneshot::channel(); +//! +//! tokio::spawn(async move { +//! let res = some_computation().await; +//! tx.send(res).unwrap(); +//! }); +//! +//! // Do other work while the computation is happening in the background +//! +//! // Wait for the computation result +//! let res = rx.await.unwrap(); +//! } +//! ``` +//! +//! Note, if the task produces the the computation result as its final action +//! before terminating, the [`JoinHandle`] can be used to receive the +//! computation result instead of allocating resources for the `oneshot` +//! channel. Awaiting on [`JoinHandle`] returns `Result`. If the task panics, +//! the `Joinhandle` yields `Err` with the panic cause. +//! +//! **Example:** +//! +//! ``` +//! async fn some_computation() -> String { +//! "the result of the computation".to_string() +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! let join_handle = tokio::spawn(async move { +//! some_computation().await +//! }); +//! +//! // Do other work while the computation is happening in the background +//! +//! // Wait for the computation result +//! let res = join_handle.await.unwrap(); +//! } +//! ``` +//! +//! [`JoinHandle`]: crate::task::JoinHandle +//! +//! ## `mpsc` channel +//! +//! The [`mpsc` channel][mpsc] supports sending **many** values from **many** +//! producers to a single consumer. This channel is often used to send work to a +//! task or to receive the result of many computations. +//! +//! **Example:** using an mpsc to incrementally stream the results of a series +//! of computations. +//! +//! ``` +//! use tokio::sync::mpsc; +//! +//! async fn some_computation(input: u32) -> String { +//! format!("the result of computation {}", input) +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! let (mut tx, mut rx) = mpsc::channel(100); +//! +//! tokio::spawn(async move { +//! for i in 0..10 { +//! let res = some_computation(i).await; +//! tx.send(res).await.unwrap(); +//! } +//! }); +//! +//! while let Some(res) = rx.recv().await { +//! println!("got = {}", res); +//! } +//! } +//! ``` +//! +//! The argument to `mpsc::channel` is the channel capacity. This is the maximum +//! number of values that can be stored in the channel pending receipt at any +//! given time. Properly setting this value is key in implementing robust +//! programs as the channel capacity plays a critical part in handling back +//! pressure. +//! +//! A common concurrency pattern for resource management is to spawn a task +//! dedicated to managing that resource and using message passing betwen other +//! tasks to interact with the resource. The resource may be anything that may +//! not be concurrently used. Some examples include a socket and program state. +//! For example, if multiple tasks need to send data over a single socket, spawn +//! a task to manage the socket and use a channel to synchronize. +//! +//! **Example:** sending data from many tasks over a single socket using message +//! passing. +//! +//! ```no_run +//! use tokio::io::{self, AsyncWriteExt}; +//! use tokio::net::TcpStream; +//! use tokio::sync::mpsc; +//! +//! #[tokio::main] +//! async fn main() -> io::Result<()> { +//! let mut socket = TcpStream::connect("www.example.com:1234").await?; +//! let (tx, mut rx) = mpsc::channel(100); +//! +//! for _ in 0..10 { +//! // Each task needs its own `tx` handle. This is done by cloning the +//! // original handle. +//! let mut tx = tx.clone(); +//! +//! tokio::spawn(async move { +//! tx.send(&b"data to write"[..]).await.unwrap(); +//! }); +//! } +//! +//! // The `rx` half of the channel returns `None` once **all** `tx` clones +//! // drop. To ensure `None` is returned, drop the handle owned by the +//! // current task. If this `tx` handle is not dropped, there will always +//! // be a single outstanding `tx` handle. +//! drop(tx); +//! +//! while let Some(res) = rx.recv().await { +//! socket.write_all(res).await?; +//! } +//! +//! Ok(()) +//! } +//! ``` +//! +//! The [`mpsc`][mpsc] and [`oneshot`][oneshot] channels can be combined to +//! provide a request / response type synchronization pattern with a shared +//! resource. A task is spawned to synchronize a resource and waits on commands +//! received on a [`mpsc`][mpsc] channel. Each command includes a +//! [`oneshot`][oneshot] `Sender` on which the result of the command is sent. +//! +//! **Example:** use a task to synchronize a `u64` counter. Each task sends an +//! "fetch and increment" command. The counter value **before** the increment is +//! sent over the provided `oneshot` channel. +//! +//! ``` +//! use tokio::sync::{oneshot, mpsc}; +//! use Command::Increment; +//! +//! enum Command { +//! Increment, +//! // Other commands can be added here +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! let (cmd_tx, mut cmd_rx) = mpsc::channel::<(Command, oneshot::Sender<u64>)>(100); +//! +//! // Spawn a task to manage the counter +//! tokio::spawn(async move { +//! let mut counter: u64 = 0; +//! +//! while let Some((cmd, response)) = cmd_rx.recv().await { +//! match cmd { +//! Increment => { +//! let prev = counter; +//! counter += 1; +//! response.send(prev).unwrap(); +//! } +//! } +//! } +//! }); +//! +//! let mut join_handles = vec![]; +//! +//! // Spawn tasks that will send the increment command. +//! for _ in 0..10 { +//! let mut cmd_tx = cmd_tx.clone(); +//! +//! join_handles.push(tokio::spawn(async move { +//! let (resp_tx, resp_rx) = oneshot::channel(); +//! +//! cmd_tx.send((Increment, resp_tx)).await.ok().unwrap(); +//! let res = resp_rx.await.unwrap(); +//! +//! println!("previous value = {}", res); +//! })); +//! } +//! +//! // Wait for all tasks to complete +//! for join_handle in join_handles.drain(..) { +//! join_handle.await.unwrap(); +//! } +//! } +//! ``` +//! +//! ## `broadcast` channel +//! +//! The [`broadcast` channel][broadcast] supports sending **many** values from +//! **many** producers to **many** consumers. Each consumer will receive +//! **each** value. This channel can be used to implement "fan out" style +//! patterns common with pub / sub or "chat" systems. +//! +//! This channel tends to be used less often than `oneshot` and `mpsc` but still +//! has its use cases. +//! +//! Basic usage +//! +//! ``` +//! use tokio::sync::broadcast; +//! +//! #[tokio::main] +//! async fn main() { +//! let (tx, mut rx1) = broadcast::channel(16); +//! let mut rx2 = tx.subscribe(); +//! +//! tokio::spawn(async move { +//! assert_eq!(rx1.recv().await.unwrap(), 10); +//! assert_eq!(rx1.recv().await.unwrap(), 20); +//! }); +//! +//! tokio::spawn(async move { +//! assert_eq!(rx2.recv().await.unwrap(), 10); +//! assert_eq!(rx2.recv().await.unwrap(), 20); +//! }); +//! +//! tx.send(10).unwrap(); +//! tx.send(20).unwrap(); +//! } +//! ``` +//! +//! ## `watch` channel +//! +//! The [`watch` channel][watch] supports sending **many** values from a +//! **single** producer to **many** consumers. However, only the **most recent** +//! value is stored in the channel. Consumers are notified when a new value is +//! sent, but there is no guarantee that consumers will see **all** values. +//! +//! The [`watch` channel] is similar to a [`broadcast` channel] with capacity 1. +//! +//! Use cases for the [`watch` channel] include broadcasting configuration +//! changes or signalling program state changes, such as transitioning to +//! shutdown. +//! +//! **Example:** use a `watch` channel to notify tasks of configuration changes. +//! In this example, a configuration file is checked periodically. When the file +//! changes, the configuration changes are signalled to consumers. +//! +//! ``` +//! use tokio::sync::watch; +//! use tokio::time::{self, Duration, Instant}; +//! +//! use std::io; +//! +//! #[derive(Debug, Clone, Eq, PartialEq)] +//! struct Config { +//! timeout: Duration, +//! } +//! +//! impl Config { +//! async fn load_from_file() -> io::Result<Config> { +//! // file loading and deserialization logic here +//! # Ok(Config { timeout: Duration::from_secs(1) }) +//! } +//! } +//! +//! async fn my_async_operation() { +//! // Do something here +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Load initial configuration value +//! let mut config = Config::load_from_file().await.unwrap(); +//! +//! // Create the watch channel, initialized with the loaded configuration +//! let (tx, rx) = watch::channel(config.clone()); +//! +//! // Spawn a task to monitor the file. +//! tokio::spawn(async move { +//! loop { +//! // Wait 10 seconds between checks +//! time::delay_for(Duration::from_secs(10)).await; +//! +//! // Load the configuration file +//! let new_config = Config::load_from_file().await.unwrap(); +//! +//! // If the configuration changed, send the new config value +//! // on the watch channel. +//! if new_config != config { +//! tx.broadcast(new_config.clone()).unwrap(); +//! config = new_config; +//! } +//! } +//! }); +//! +//! let mut handles = vec![]; +//! +//! // Spawn tasks that runs the async operation for at most `timeout`. If +//! // the timeout elapses, restart the operation. +//! // +//! // The task simultaneously watches the `Config` for changes. When the +//! // timeout duration changes, the timeout is updated without restarting +//! // the in-flight operation. +//! for _ in 0..5 { +//! // Clone a config watch handle for use in this task +//! let mut rx = rx.clone(); +//! +//! let handle = tokio::spawn(async move { +//! // Start the initial operation and pin the future to the stack. +//! // Pinning to the stack is required to resume the operation +//! // across multiple calls to `select!` +//! let op = my_async_operation(); +//! tokio::pin!(op); +//! +//! // Receive the **initial** configuration value. As this is the +//! // first time the config is received from the watch, it will +//! // always complete immediatedly. +//! let mut conf = rx.recv().await.unwrap(); +//! +//! let mut op_start = Instant::now(); +//! let mut delay = time::delay_until(op_start + conf.timeout); +//! +//! loop { +//! tokio::select! { +//! _ = &mut delay => { +//! // The operation elapsed. Restart it +//! op.set(my_async_operation()); +//! +//! // Track the new start time +//! op_start = Instant::now(); +//! +//! // Restart the timeout +//! delay = time::delay_until(op_start + conf.timeout); +//! } +//! new_conf = rx.recv() => { +//! conf = new_conf.unwrap(); +//! +//! // The configuration has been updated. Update the +//! // `delay` using the new `timeout` value. +//! delay.reset(op_start + conf.timeout); +//! } +//! _ = &mut op => { +//! // The operation completed! +//! return +//! } +//! } +//! } +//! }); +//! +//! handles.push(handle); +//! } +//! +//! for handle in handles.drain(..) { +//! handle.await.unwrap(); +//! } +//! } +//! ``` +//! +//! # State synchronization +//! +//! The remaining synchronization primitives focus on synchronizing state. +//! These are asynchronous equivalents to versions provided by `std`. They +//! operate in a similar way as their `std` counterparts parts but will wait +//! asynchronously instead of blocking the thread. +//! +//! * [`Barrier`][Barrier] Ensures multiple tasks will wait for each other to +//! reach a point in the program, before continuing execution all together. +//! +//! * [`Mutex`][Mutex] Mutual Exclusion mechanism, which ensures that at most +//! one thread at a time is able to access some data. +//! +//! * [`Notify`][Notify] Basic task notification. `Notify` supports notifying a +//! receiving task without sending data. In this case, the task wakes up and +//! resumes processing. +//! +//! * [`RwLock`][RwLock] Provides a mutual exclusion mechanism which allows +//! multiple readers at the same time, while allowing only one writer at a +//! time. In some cases, this can be more efficient than a mutex. +//! +//! * [`Semaphore`][Semaphore] Limits the amount of concurrency. A semaphore +//! holds a number of permits, which tasks may request in order to enter a +//! critical section. Semaphores are useful for implementing limiting of +//! bounding of any kind. + +cfg_sync! { + mod barrier; + pub use barrier::{Barrier, BarrierWaitResult}; + + pub mod broadcast; + + pub mod mpsc; + + mod mutex; + pub use mutex::{Mutex, MutexGuard}; + + mod notify; + pub use notify::Notify; + + pub mod oneshot; + + pub(crate) mod batch_semaphore; + pub(crate) mod semaphore_ll; + mod semaphore; + pub use semaphore::{Semaphore, SemaphorePermit}; + + mod rwlock; + pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + + mod task; + pub(crate) use task::AtomicWaker; + + pub mod watch; +} + +cfg_not_sync! { + cfg_atomic_waker_impl! { + mod task; + pub(crate) use task::AtomicWaker; + } + + #[cfg(any( + feature = "rt-core", + feature = "process", + feature = "signal"))] + pub(crate) mod oneshot; + + cfg_signal! { + pub(crate) mod mpsc; + pub(crate) mod semaphore_ll; + } +} + +/// Unit tests +#[cfg(test)] +mod tests; diff --git a/third_party/rust/tokio/src/sync/mpsc/block.rs b/third_party/rust/tokio/src/sync/mpsc/block.rs new file mode 100644 index 0000000000..7bf161967b --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/block.rs @@ -0,0 +1,387 @@ +use crate::loom::{ + cell::UnsafeCell, + sync::atomic::{AtomicPtr, AtomicUsize}, + thread, +}; + +use std::mem::MaybeUninit; +use std::ops; +use std::ptr::{self, NonNull}; +use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release}; + +/// A block in a linked list. +/// +/// Each block in the list can hold up to `BLOCK_CAP` messages. +pub(crate) struct Block<T> { + /// The start index of this block. + /// + /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`. + start_index: usize, + + /// The next block in the linked list. + next: AtomicPtr<Block<T>>, + + /// Bitfield tracking slots that are ready to have their values consumed. + ready_slots: AtomicUsize, + + /// The observed `tail_position` value *after* the block has been passed by + /// `block_tail`. + observed_tail_position: UnsafeCell<usize>, + + /// Array containing values pushed into the block. Values are stored in a + /// continuous array in order to improve cache line behavior when reading. + /// The values must be manually dropped. + values: Values<T>, +} + +pub(crate) enum Read<T> { + Value(T), + Closed, +} + +struct Values<T>([UnsafeCell<MaybeUninit<T>>; BLOCK_CAP]); + +use super::BLOCK_CAP; + +/// Masks an index to get the block identifier +const BLOCK_MASK: usize = !(BLOCK_CAP - 1); + +/// Masks an index to get the value offset in a block. +const SLOT_MASK: usize = BLOCK_CAP - 1; + +/// Flag tracking that a block has gone through the sender's release routine. +/// +/// When this is set, the receiver may consider freeing the block. +const RELEASED: usize = 1 << BLOCK_CAP; + +/// Flag tracking all senders dropped. +/// +/// When this flag is set, the send half of the channel has closed. +const TX_CLOSED: usize = RELEASED << 1; + +/// Mask covering all bits used to track slot readiness. +const READY_MASK: usize = RELEASED - 1; + +/// Returns the index of the first slot in the block referenced by `slot_index`. +#[inline(always)] +pub(crate) fn start_index(slot_index: usize) -> usize { + BLOCK_MASK & slot_index +} + +/// Returns the offset into the block referenced by `slot_index`. +#[inline(always)] +pub(crate) fn offset(slot_index: usize) -> usize { + SLOT_MASK & slot_index +} + +impl<T> Block<T> { + pub(crate) fn new(start_index: usize) -> Block<T> { + Block { + // The absolute index in the channel of the first slot in the block. + start_index, + + // Pointer to the next block in the linked list. + next: AtomicPtr::new(ptr::null_mut()), + + ready_slots: AtomicUsize::new(0), + + observed_tail_position: UnsafeCell::new(0), + + // Value storage + values: unsafe { Values::uninitialized() }, + } + } + + /// Returns `true` if the block matches the given index + pub(crate) fn is_at_index(&self, index: usize) -> bool { + debug_assert!(offset(index) == 0); + self.start_index == index + } + + /// Returns the number of blocks between `self` and the block at the + /// specified index. + /// + /// `start_index` must represent a block *after* `self`. + pub(crate) fn distance(&self, other_index: usize) -> usize { + debug_assert!(offset(other_index) == 0); + other_index.wrapping_sub(self.start_index) / BLOCK_CAP + } + + /// Reads the value at the given offset. + /// + /// Returns `None` if the slot is empty. + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * No concurrent access to the slot. + pub(crate) unsafe fn read(&self, slot_index: usize) -> Option<Read<T>> { + let offset = offset(slot_index); + + let ready_bits = self.ready_slots.load(Acquire); + + if !is_ready(ready_bits, offset) { + if is_tx_closed(ready_bits) { + return Some(Read::Closed); + } + + return None; + } + + // Get the value + let value = self.values[offset].with(|ptr| ptr::read(ptr)); + + Some(Read::Value(value.assume_init())) + } + + /// Writes a value to the block at the given offset. + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * The slot is empty. + /// * No concurrent access to the slot. + pub(crate) unsafe fn write(&self, slot_index: usize, value: T) { + // Get the offset into the block + let slot_offset = offset(slot_index); + + self.values[slot_offset].with_mut(|ptr| { + ptr::write(ptr, MaybeUninit::new(value)); + }); + + // Release the value. After this point, the slot ref may no longer + // be used. It is possible for the receiver to free the memory at + // any point. + self.set_ready(slot_offset); + } + + /// Signal to the receiver that the sender half of the list is closed. + pub(crate) unsafe fn tx_close(&self) { + self.ready_slots.fetch_or(TX_CLOSED, Release); + } + + /// Resets the block to a blank state. This enables reusing blocks in the + /// channel. + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * All slots are empty. + /// * The caller holds a unique pointer to the block. + pub(crate) unsafe fn reclaim(&mut self) { + self.start_index = 0; + self.next = AtomicPtr::new(ptr::null_mut()); + self.ready_slots = AtomicUsize::new(0); + } + + /// Releases the block to the rx half for freeing. + /// + /// This function is called by the tx half once it can be guaranteed that no + /// more senders will attempt to access the block. + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * The block will no longer be accessed by any sender. + pub(crate) unsafe fn tx_release(&self, tail_position: usize) { + // Track the observed tail_position. Any sender targetting a greater + // tail_position is guaranteed to not access this block. + self.observed_tail_position + .with_mut(|ptr| *ptr = tail_position); + + // Set the released bit, signalling to the receiver that it is safe to + // free the block's memory as soon as all slots **prior** to + // `observed_tail_position` have been filled. + self.ready_slots.fetch_or(RELEASED, Release); + } + + /// Mark a slot as ready + fn set_ready(&self, slot: usize) { + let mask = 1 << slot; + self.ready_slots.fetch_or(mask, Release); + } + + /// Returns `true` when all slots have their `ready` bits set. + /// + /// This indicates that the block is in its final state and will no longer + /// be mutated. + /// + /// # Implementation + /// + /// The implementation walks each slot checking the `ready` flag. It might + /// be that it would make more sense to coalesce ready flags as bits in a + /// single atomic cell. However, this could have negative impact on cache + /// behavior as there would be many more mutations to a single slot. + pub(crate) fn is_final(&self) -> bool { + self.ready_slots.load(Acquire) & READY_MASK == READY_MASK + } + + /// Returns the `observed_tail_position` value, if set + pub(crate) fn observed_tail_position(&self) -> Option<usize> { + if 0 == RELEASED & self.ready_slots.load(Acquire) { + None + } else { + Some(self.observed_tail_position.with(|ptr| unsafe { *ptr })) + } + } + + /// Loads the next block + pub(crate) fn load_next(&self, ordering: Ordering) -> Option<NonNull<Block<T>>> { + let ret = NonNull::new(self.next.load(ordering)); + + debug_assert!(unsafe { + ret.map(|block| block.as_ref().start_index == self.start_index.wrapping_add(BLOCK_CAP)) + .unwrap_or(true) + }); + + ret + } + + /// Pushes `block` as the next block in the link. + /// + /// Returns Ok if successful, otherwise, a pointer to the next block in + /// the list is returned. + /// + /// This requires that the next pointer is null. + /// + /// # Ordering + /// + /// This performs a compare-and-swap on `next` using AcqRel ordering. + /// + /// # Safety + /// + /// To maintain safety, the caller must ensure: + /// + /// * `block` is not freed until it has been removed from the list. + pub(crate) unsafe fn try_push( + &self, + block: &mut NonNull<Block<T>>, + ordering: Ordering, + ) -> Result<(), NonNull<Block<T>>> { + block.as_mut().start_index = self.start_index.wrapping_add(BLOCK_CAP); + + let next_ptr = self + .next + .compare_and_swap(ptr::null_mut(), block.as_ptr(), ordering); + + match NonNull::new(next_ptr) { + Some(next_ptr) => Err(next_ptr), + None => Ok(()), + } + } + + /// Grows the `Block` linked list by allocating and appending a new block. + /// + /// The next block in the linked list is returned. This may or may not be + /// the one allocated by the function call. + /// + /// # Implementation + /// + /// It is assumed that `self.next` is null. A new block is allocated with + /// `start_index` set to be the next block. A compare-and-swap is performed + /// with AcqRel memory ordering. If the compare-and-swap is successful, the + /// newly allocated block is released to other threads walking the block + /// linked list. If the compare-and-swap fails, the current thread acquires + /// the next block in the linked list, allowing the current thread to access + /// the slots. + pub(crate) fn grow(&self) -> NonNull<Block<T>> { + // Create the new block. It is assumed that the block will become the + // next one after `&self`. If this turns out to not be the case, + // `start_index` is updated accordingly. + let new_block = Box::new(Block::new(self.start_index + BLOCK_CAP)); + + let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) }; + + // Attempt to store the block. The first compare-and-swap attempt is + // "unrolled" due to minor differences in logic + // + // `AcqRel` is used as the ordering **only** when attempting the + // compare-and-swap on self.next. + // + // If the compare-and-swap fails, then the actual value of the cell is + // returned from this function and accessed by the caller. Given this, + // the memory must be acquired. + // + // `Release` ensures that the newly allocated block is available to + // other threads acquiring the next pointer. + let next = NonNull::new(self.next.compare_and_swap( + ptr::null_mut(), + new_block.as_ptr(), + AcqRel, + )); + + let next = match next { + Some(next) => next, + None => { + // The compare-and-swap succeeded and the newly allocated block + // is successfully pushed. + return new_block; + } + }; + + // There already is a next block in the linked list. The newly allocated + // block could be dropped and the discovered next block returned; + // however, that would be wasteful. Instead, the linked list is walked + // by repeatedly attempting to compare-and-swap the pointer into the + // `next` register until the compare-and-swap succeed. + // + // Care is taken to update new_block's start_index field as appropriate. + + let mut curr = next; + + // TODO: Should this iteration be capped? + loop { + let actual = unsafe { curr.as_ref().try_push(&mut new_block, AcqRel) }; + + curr = match actual { + Ok(_) => { + return next; + } + Err(curr) => curr, + }; + + // When running outside of loom, this calls `spin_loop_hint`. + thread::yield_now(); + } + } +} + +/// Returns `true` if the specificed slot has a value ready to be consumed. +fn is_ready(bits: usize, slot: usize) -> bool { + let mask = 1 << slot; + mask == mask & bits +} + +/// Returns `true` if the closed flag has been set. +fn is_tx_closed(bits: usize) -> bool { + TX_CLOSED == bits & TX_CLOSED +} + +impl<T> Values<T> { + unsafe fn uninitialized() -> Values<T> { + let mut vals = MaybeUninit::uninit(); + + // When fuzzing, `UnsafeCell` needs to be initialized. + if_loom! { + let p = vals.as_mut_ptr() as *mut UnsafeCell<MaybeUninit<T>>; + for i in 0..BLOCK_CAP { + p.add(i) + .write(UnsafeCell::new(MaybeUninit::uninit())); + } + } + + Values(vals.assume_init()) + } +} + +impl<T> ops::Index<usize> for Values<T> { + type Output = UnsafeCell<MaybeUninit<T>>; + + fn index(&self, index: usize) -> &Self::Output { + self.0.index(index) + } +} diff --git a/third_party/rust/tokio/src/sync/mpsc/bounded.rs b/third_party/rust/tokio/src/sync/mpsc/bounded.rs new file mode 100644 index 0000000000..afca8c524d --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/bounded.rs @@ -0,0 +1,479 @@ +use crate::sync::mpsc::chan; +use crate::sync::mpsc::error::{ClosedError, SendError, TryRecvError, TrySendError}; +use crate::sync::semaphore_ll as semaphore; + +cfg_time! { + use crate::sync::mpsc::error::SendTimeoutError; + use crate::time::Duration; +} + +use std::fmt; +use std::task::{Context, Poll}; + +/// Send values to the associated `Receiver`. +/// +/// Instances are created by the [`channel`](channel) function. +pub struct Sender<T> { + chan: chan::Tx<T, Semaphore>, +} + +impl<T> Clone for Sender<T> { + fn clone(&self) -> Self { + Sender { + chan: self.chan.clone(), + } + } +} + +impl<T> fmt::Debug for Sender<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Sender") + .field("chan", &self.chan) + .finish() + } +} + +/// Receive values from the associated `Sender`. +/// +/// Instances are created by the [`channel`](channel) function. +pub struct Receiver<T> { + /// The channel receiver + chan: chan::Rx<T, Semaphore>, +} + +impl<T> fmt::Debug for Receiver<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Receiver") + .field("chan", &self.chan) + .finish() + } +} + +/// Creates a bounded mpsc channel for communicating between asynchronous tasks, +/// returning the sender/receiver halves. +/// +/// All data sent on `Sender` will become available on `Receiver` in the same +/// order as it was sent. +/// +/// The `Sender` can be cloned to `send` to the same channel from multiple code +/// locations. Only one `Receiver` is supported. +/// +/// If the `Receiver` is disconnected while trying to `send`, the `send` method +/// will return a `SendError`. Similarly, if `Sender` is disconnected while +/// trying to `recv`, the `recv` method will return a `RecvError`. +/// +/// # Examples +/// +/// ```rust +/// use tokio::sync::mpsc; +/// +/// #[tokio::main] +/// async fn main() { +/// let (mut tx, mut rx) = mpsc::channel(100); +/// +/// tokio::spawn(async move { +/// for i in 0..10 { +/// if let Err(_) = tx.send(i).await { +/// println!("receiver dropped"); +/// return; +/// } +/// } +/// }); +/// +/// while let Some(i) = rx.recv().await { +/// println!("got = {}", i); +/// } +/// } +/// ``` +pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { + assert!(buffer > 0, "mpsc bounded channel requires buffer > 0"); + let semaphore = (semaphore::Semaphore::new(buffer), buffer); + let (tx, rx) = chan::channel(semaphore); + + let tx = Sender::new(tx); + let rx = Receiver::new(rx); + + (tx, rx) +} + +/// Channel semaphore is a tuple of the semaphore implementation and a `usize` +/// representing the channel bound. +type Semaphore = (semaphore::Semaphore, usize); + +impl<T> Receiver<T> { + pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> Receiver<T> { + Receiver { chan } + } + + /// Receives the next value for this receiver. + /// + /// `None` is returned when all `Sender` halves have dropped, indicating + /// that no further values can be sent on the channel. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, mut rx) = mpsc::channel(100); + /// + /// tokio::spawn(async move { + /// tx.send("hello").await.unwrap(); + /// }); + /// + /// assert_eq!(Some("hello"), rx.recv().await); + /// assert_eq!(None, rx.recv().await); + /// } + /// ``` + /// + /// Values are buffered: + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, mut rx) = mpsc::channel(100); + /// + /// tx.send("hello").await.unwrap(); + /// tx.send("world").await.unwrap(); + /// + /// assert_eq!(Some("hello"), rx.recv().await); + /// assert_eq!(Some("world"), rx.recv().await); + /// } + /// ``` + pub async fn recv(&mut self) -> Option<T> { + use crate::future::poll_fn; + + poll_fn(|cx| self.poll_recv(cx)).await + } + + #[doc(hidden)] // TODO: document + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> { + self.chan.recv(cx) + } + + /// Attempts to return a pending value on this receiver without blocking. + /// + /// This method will never block the caller in order to wait for data to + /// become available. Instead, this will always return immediately with + /// a possible option of pending data on the channel. + /// + /// This is useful for a flavor of "optimistic check" before deciding to + /// block on a receiver. + /// + /// Compared with recv, this function has two failure cases instead of + /// one (one for disconnection, one for an empty buffer). + pub fn try_recv(&mut self) -> Result<T, TryRecvError> { + self.chan.try_recv() + } + + /// Closes the receiving half of a channel, without dropping it. + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + self.chan.close(); + } +} + +impl<T> Unpin for Receiver<T> {} + +cfg_stream! { + impl<T> crate::stream::Stream for Receiver<T> { + type Item = T; + + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + self.poll_recv(cx) + } + } +} + +impl<T> Sender<T> { + pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> Sender<T> { + Sender { chan } + } + + /// Sends a value, waiting until there is capacity. + /// + /// A successful send occurs when it is determined that the other end of the + /// channel has not hung up already. An unsuccessful send would be one where + /// the corresponding receiver has already been closed. Note that a return + /// value of `Err` means that the data will never be received, but a return + /// value of `Ok` does not mean that the data will be received. It is + /// possible for the corresponding receiver to hang up immediately after + /// this function returns `Ok`. + /// + /// # Errors + /// + /// If the receive half of the channel is closed, either due to [`close`] + /// being called or the [`Receiver`] handle dropping, the function returns + /// an error. The error includes the value passed to `send`. + /// + /// [`close`]: Receiver::close + /// [`Receiver`]: Receiver + /// + /// # Examples + /// + /// In the following example, each call to `send` will block until the + /// previously sent value was received. + /// + /// ```rust + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, mut rx) = mpsc::channel(1); + /// + /// tokio::spawn(async move { + /// for i in 0..10 { + /// if let Err(_) = tx.send(i).await { + /// println!("receiver dropped"); + /// return; + /// } + /// } + /// }); + /// + /// while let Some(i) = rx.recv().await { + /// println!("got = {}", i); + /// } + /// } + /// ``` + pub async fn send(&mut self, value: T) -> Result<(), SendError<T>> { + use crate::future::poll_fn; + + if poll_fn(|cx| self.poll_ready(cx)).await.is_err() { + return Err(SendError(value)); + } + + match self.try_send(value) { + Ok(()) => Ok(()), + Err(TrySendError::Full(_)) => unreachable!(), + Err(TrySendError::Closed(value)) => Err(SendError(value)), + } + } + + /// Attempts to immediately send a message on this `Sender` + /// + /// This method differs from [`send`] by returning immediately if the channel's + /// buffer is full or no receiver is waiting to acquire some data. Compared + /// with [`send`], this function has two failure cases instead of one (one for + /// disconnection, one for a full buffer). + /// + /// This function may be paired with [`poll_ready`] in order to wait for + /// channel capacity before trying to send a value. + /// + /// # Errors + /// + /// If the channel capacity has been reached, i.e., the channel has `n` + /// buffered values where `n` is the argument passed to [`channel`], then an + /// error is returned. + /// + /// If the receive half of the channel is closed, either due to [`close`] + /// being called or the [`Receiver`] handle dropping, the function returns + /// an error. The error includes the value passed to `send`. + /// + /// [`send`]: Sender::send + /// [`poll_ready`]: Sender::poll_ready + /// [`channel`]: channel + /// [`close`]: Receiver::close + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// // Create a channel with buffer size 1 + /// let (mut tx1, mut rx) = mpsc::channel(1); + /// let mut tx2 = tx1.clone(); + /// + /// tokio::spawn(async move { + /// tx1.send(1).await.unwrap(); + /// tx1.send(2).await.unwrap(); + /// // task waits until the receiver receives a value. + /// }); + /// + /// tokio::spawn(async move { + /// // This will return an error and send + /// // no message if the buffer is full + /// let _ = tx2.try_send(3); + /// }); + /// + /// let mut msg; + /// msg = rx.recv().await.unwrap(); + /// println!("message {} received", msg); + /// + /// msg = rx.recv().await.unwrap(); + /// println!("message {} received", msg); + /// + /// // Third message may have never been sent + /// match rx.recv().await { + /// Some(msg) => println!("message {} received", msg), + /// None => println!("the third message was never sent"), + /// } + /// } + /// ``` + pub fn try_send(&mut self, message: T) -> Result<(), TrySendError<T>> { + self.chan.try_send(message)?; + Ok(()) + } + + /// Sends a value, waiting until there is capacity, but only for a limited time. + /// + /// Shares the same success and error conditions as [`send`], adding one more + /// condition for an unsuccessful send, which is when the provided timeout has + /// elapsed, and there is no capacity available. + /// + /// [`send`]: Sender::send + /// + /// # Errors + /// + /// If the receive half of the channel is closed, either due to [`close`] + /// being called or the [`Receiver`] having been dropped, + /// the function returns an error. The error includes the value passed to `send`. + /// + /// [`close`]: Receiver::close + /// [`Receiver`]: Receiver + /// + /// # Examples + /// + /// In the following example, each call to `send_timeout` will block until the + /// previously sent value was received, unless the timeout has elapsed. + /// + /// ```rust + /// use tokio::sync::mpsc; + /// use tokio::time::{delay_for, Duration}; + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, mut rx) = mpsc::channel(1); + /// + /// tokio::spawn(async move { + /// for i in 0..10 { + /// if let Err(e) = tx.send_timeout(i, Duration::from_millis(100)).await { + /// println!("send error: #{:?}", e); + /// return; + /// } + /// } + /// }); + /// + /// while let Some(i) = rx.recv().await { + /// println!("got = {}", i); + /// delay_for(Duration::from_millis(200)).await; + /// } + /// } + /// ``` + #[cfg(feature = "time")] + #[cfg_attr(docsrs, doc(cfg(feature = "time")))] + pub async fn send_timeout( + &mut self, + value: T, + timeout: Duration, + ) -> Result<(), SendTimeoutError<T>> { + use crate::future::poll_fn; + + match crate::time::timeout(timeout, poll_fn(|cx| self.poll_ready(cx))).await { + Err(_) => { + return Err(SendTimeoutError::Timeout(value)); + } + Ok(Err(_)) => { + return Err(SendTimeoutError::Closed(value)); + } + Ok(_) => {} + } + + match self.try_send(value) { + Ok(()) => Ok(()), + Err(TrySendError::Full(_)) => unreachable!(), + Err(TrySendError::Closed(value)) => Err(SendTimeoutError::Closed(value)), + } + } + + /// Returns `Poll::Ready(Ok(()))` when the channel is able to accept another item. + /// + /// If the channel is full, then `Poll::Pending` is returned and the task is notified when a + /// slot becomes available. + /// + /// Once `poll_ready` returns `Poll::Ready(Ok(()))`, a call to `try_send` will succeed unless + /// the channel has since been closed. To provide this guarantee, the channel reserves one slot + /// in the channel for the coming send. This reserved slot is not available to other `Sender` + /// instances, so you need to be careful to not end up with deadlocks by blocking after calling + /// `poll_ready` but before sending an element. + /// + /// If, after `poll_ready` succeeds, you decide you do not wish to send an item after all, you + /// can use [`disarm`](Sender::disarm) to release the reserved slot. + /// + /// Until an item is sent or [`disarm`](Sender::disarm) is called, repeated calls to + /// `poll_ready` will return either `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))` if channel + /// is closed. + pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ClosedError>> { + self.chan.poll_ready(cx).map_err(|_| ClosedError::new()) + } + + /// Undo a successful call to `poll_ready`. + /// + /// Once a call to `poll_ready` returns `Poll::Ready(Ok(()))`, it holds up one slot in the + /// channel to make room for the coming send. `disarm` allows you to give up that slot if you + /// decide you do not wish to send an item after all. After calling `disarm`, you must call + /// `poll_ready` until it returns `Poll::Ready(Ok(()))` before attempting to send again. + /// + /// Returns `false` if no slot is reserved for this sender (usually because `poll_ready` was + /// not previously called, or did not succeed). + /// + /// # Motivation + /// + /// Since `poll_ready` takes up one of the finite number of slots in a bounded channel, callers + /// need to send an item shortly after `poll_ready` succeeds. If they do not, idle senders may + /// take up all the slots of the channel, and prevent active senders from getting any requests + /// through. Consider this code that forwards from one channel to another: + /// + /// ```rust,ignore + /// loop { + /// ready!(tx.poll_ready(cx))?; + /// if let Some(item) = ready!(rx.poll_recv(cx)) { + /// tx.try_send(item)?; + /// } else { + /// break; + /// } + /// } + /// ``` + /// + /// If many such forwarders exist, and they all forward into a single (cloned) `Sender`, then + /// any number of forwarders may be waiting for `rx.poll_recv` at the same time. While they do, + /// they are effectively each reducing the channel's capacity by 1. If enough of these + /// forwarders are idle, forwarders whose `rx` _do_ have elements will be unable to find a spot + /// for them through `poll_ready`, and the system will deadlock. + /// + /// `disarm` solves this problem by allowing you to give up the reserved slot if you find that + /// you have to block. We can then fix the code above by writing: + /// + /// ```rust,ignore + /// loop { + /// ready!(tx.poll_ready(cx))?; + /// let item = rx.poll_recv(cx); + /// if let Poll::Ready(Ok(_)) = item { + /// // we're going to send the item below, so don't disarm + /// } else { + /// // give up our send slot, we won't need it for a while + /// tx.disarm(); + /// } + /// if let Some(item) = ready!(item) { + /// tx.try_send(item)?; + /// } else { + /// break; + /// } + /// } + /// ``` + pub fn disarm(&mut self) -> bool { + if self.chan.is_ready() { + self.chan.disarm(); + true + } else { + false + } + } +} diff --git a/third_party/rust/tokio/src/sync/mpsc/chan.rs b/third_party/rust/tokio/src/sync/mpsc/chan.rs new file mode 100644 index 0000000000..3466395788 --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/chan.rs @@ -0,0 +1,524 @@ +use crate::loom::cell::UnsafeCell; +use crate::loom::future::AtomicWaker; +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::Arc; +use crate::sync::mpsc::error::{ClosedError, TryRecvError}; +use crate::sync::mpsc::{error, list}; + +use std::fmt; +use std::process; +use std::sync::atomic::Ordering::{AcqRel, Relaxed}; +use std::task::Poll::{Pending, Ready}; +use std::task::{Context, Poll}; + +/// Channel sender +pub(crate) struct Tx<T, S: Semaphore> { + inner: Arc<Chan<T, S>>, + permit: S::Permit, +} + +impl<T, S: Semaphore> fmt::Debug for Tx<T, S> +where + S::Permit: fmt::Debug, + S: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Tx") + .field("inner", &self.inner) + .field("permit", &self.permit) + .finish() + } +} + +/// Channel receiver +pub(crate) struct Rx<T, S: Semaphore> { + inner: Arc<Chan<T, S>>, +} + +impl<T, S: Semaphore> fmt::Debug for Rx<T, S> +where + S: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Rx").field("inner", &self.inner).finish() + } +} + +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum TrySendError { + Closed, + Full, +} + +impl<T> From<(T, TrySendError)> for error::SendError<T> { + fn from(src: (T, TrySendError)) -> error::SendError<T> { + match src.1 { + TrySendError::Closed => error::SendError(src.0), + TrySendError::Full => unreachable!(), + } + } +} + +impl<T> From<(T, TrySendError)> for error::TrySendError<T> { + fn from(src: (T, TrySendError)) -> error::TrySendError<T> { + match src.1 { + TrySendError::Closed => error::TrySendError::Closed(src.0), + TrySendError::Full => error::TrySendError::Full(src.0), + } + } +} + +pub(crate) trait Semaphore { + type Permit; + + fn new_permit() -> Self::Permit; + + /// The permit is dropped without a value being sent. In this case, the + /// permit must be returned to the semaphore. + fn drop_permit(&self, permit: &mut Self::Permit); + + fn is_idle(&self) -> bool; + + fn add_permit(&self); + + fn poll_acquire( + &self, + cx: &mut Context<'_>, + permit: &mut Self::Permit, + ) -> Poll<Result<(), ClosedError>>; + + fn try_acquire(&self, permit: &mut Self::Permit) -> Result<(), TrySendError>; + + /// A value was sent into the channel and the permit held by `tx` is + /// dropped. In this case, the permit should not immeditely be returned to + /// the semaphore. Instead, the permit is returnred to the semaphore once + /// the sent value is read by the rx handle. + fn forget(&self, permit: &mut Self::Permit); + + fn close(&self); +} + +struct Chan<T, S> { + /// Handle to the push half of the lock-free list. + tx: list::Tx<T>, + + /// Coordinates access to channel's capacity. + semaphore: S, + + /// Receiver waker. Notified when a value is pushed into the channel. + rx_waker: AtomicWaker, + + /// Tracks the number of outstanding sender handles. + /// + /// When this drops to zero, the send half of the channel is closed. + tx_count: AtomicUsize, + + /// Only accessed by `Rx` handle. + rx_fields: UnsafeCell<RxFields<T>>, +} + +impl<T, S> fmt::Debug for Chan<T, S> +where + S: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Chan") + .field("tx", &self.tx) + .field("semaphore", &self.semaphore) + .field("rx_waker", &self.rx_waker) + .field("tx_count", &self.tx_count) + .field("rx_fields", &"...") + .finish() + } +} + +/// Fields only accessed by `Rx` handle. +struct RxFields<T> { + /// Channel receiver. This field is only accessed by the `Receiver` type. + list: list::Rx<T>, + + /// `true` if `Rx::close` is called. + rx_closed: bool, +} + +impl<T> fmt::Debug for RxFields<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("RxFields") + .field("list", &self.list) + .field("rx_closed", &self.rx_closed) + .finish() + } +} + +unsafe impl<T: Send, S: Send> Send for Chan<T, S> {} +unsafe impl<T: Send, S: Sync> Sync for Chan<T, S> {} + +pub(crate) fn channel<T, S>(semaphore: S) -> (Tx<T, S>, Rx<T, S>) +where + S: Semaphore, +{ + let (tx, rx) = list::channel(); + + let chan = Arc::new(Chan { + tx, + semaphore, + rx_waker: AtomicWaker::new(), + tx_count: AtomicUsize::new(1), + rx_fields: UnsafeCell::new(RxFields { + list: rx, + rx_closed: false, + }), + }); + + (Tx::new(chan.clone()), Rx::new(chan)) +} + +// ===== impl Tx ===== + +impl<T, S> Tx<T, S> +where + S: Semaphore, +{ + fn new(chan: Arc<Chan<T, S>>) -> Tx<T, S> { + Tx { + inner: chan, + permit: S::new_permit(), + } + } + + pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ClosedError>> { + self.inner.semaphore.poll_acquire(cx, &mut self.permit) + } + + pub(crate) fn disarm(&mut self) { + // TODO: should this error if not acquired? + self.inner.semaphore.drop_permit(&mut self.permit) + } + + /// Send a message and notify the receiver. + pub(crate) fn try_send(&mut self, value: T) -> Result<(), (T, TrySendError)> { + self.inner.try_send(value, &mut self.permit) + } +} + +impl<T> Tx<T, (crate::sync::semaphore_ll::Semaphore, usize)> { + pub(crate) fn is_ready(&self) -> bool { + self.permit.is_acquired() + } +} + +impl<T> Tx<T, AtomicUsize> { + pub(crate) fn send_unbounded(&self, value: T) -> Result<(), (T, TrySendError)> { + self.inner.try_send(value, &mut ()) + } +} + +impl<T, S> Clone for Tx<T, S> +where + S: Semaphore, +{ + fn clone(&self) -> Tx<T, S> { + // Using a Relaxed ordering here is sufficient as the caller holds a + // strong ref to `self`, preventing a concurrent decrement to zero. + self.inner.tx_count.fetch_add(1, Relaxed); + + Tx { + inner: self.inner.clone(), + permit: S::new_permit(), + } + } +} + +impl<T, S> Drop for Tx<T, S> +where + S: Semaphore, +{ + fn drop(&mut self) { + self.inner.semaphore.drop_permit(&mut self.permit); + + if self.inner.tx_count.fetch_sub(1, AcqRel) != 1 { + return; + } + + // Close the list, which sends a `Close` message + self.inner.tx.close(); + + // Notify the receiver + self.inner.rx_waker.wake(); + } +} + +// ===== impl Rx ===== + +impl<T, S> Rx<T, S> +where + S: Semaphore, +{ + fn new(chan: Arc<Chan<T, S>>) -> Rx<T, S> { + Rx { inner: chan } + } + + pub(crate) fn close(&mut self) { + self.inner.rx_fields.with_mut(|rx_fields_ptr| { + let rx_fields = unsafe { &mut *rx_fields_ptr }; + + if rx_fields.rx_closed { + return; + } + + rx_fields.rx_closed = true; + }); + + self.inner.semaphore.close(); + } + + /// Receive the next value + pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> { + use super::block::Read::*; + + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + self.inner.rx_fields.with_mut(|rx_fields_ptr| { + let rx_fields = unsafe { &mut *rx_fields_ptr }; + + macro_rules! try_recv { + () => { + match rx_fields.list.pop(&self.inner.tx) { + Some(Value(value)) => { + self.inner.semaphore.add_permit(); + return Ready(Some(value)); + } + Some(Closed) => { + // TODO: This check may not be required as it most + // likely can only return `true` at this point. A + // channel is closed when all tx handles are + // dropped. Dropping a tx handle releases memory, + // which ensures that if dropping the tx handle is + // visible, then all messages sent are also visible. + assert!(self.inner.semaphore.is_idle()); + return Ready(None); + } + None => {} // fall through + } + }; + } + + try_recv!(); + + self.inner.rx_waker.register_by_ref(cx.waker()); + + // It is possible that a value was pushed between attempting to read + // and registering the task, so we have to check the channel a + // second time here. + try_recv!(); + + if rx_fields.rx_closed && self.inner.semaphore.is_idle() { + Ready(None) + } else { + Pending + } + }) + } + + /// Receives the next value without blocking + pub(crate) fn try_recv(&mut self) -> Result<T, TryRecvError> { + use super::block::Read::*; + self.inner.rx_fields.with_mut(|rx_fields_ptr| { + let rx_fields = unsafe { &mut *rx_fields_ptr }; + match rx_fields.list.pop(&self.inner.tx) { + Some(Value(value)) => { + self.inner.semaphore.add_permit(); + Ok(value) + } + Some(Closed) => Err(TryRecvError::Closed), + None => Err(TryRecvError::Empty), + } + }) + } +} + +impl<T, S> Drop for Rx<T, S> +where + S: Semaphore, +{ + fn drop(&mut self) { + use super::block::Read::Value; + + self.close(); + + self.inner.rx_fields.with_mut(|rx_fields_ptr| { + let rx_fields = unsafe { &mut *rx_fields_ptr }; + + while let Some(Value(_)) = rx_fields.list.pop(&self.inner.tx) { + self.inner.semaphore.add_permit(); + } + }) + } +} + +// ===== impl Chan ===== + +impl<T, S> Chan<T, S> +where + S: Semaphore, +{ + fn try_send(&self, value: T, permit: &mut S::Permit) -> Result<(), (T, TrySendError)> { + if let Err(e) = self.semaphore.try_acquire(permit) { + return Err((value, e)); + } + + // Push the value + self.tx.push(value); + + // Notify the rx task + self.rx_waker.wake(); + + // Release the permit + self.semaphore.forget(permit); + + Ok(()) + } +} + +impl<T, S> Drop for Chan<T, S> { + fn drop(&mut self) { + use super::block::Read::Value; + + // Safety: the only owner of the rx fields is Chan, and eing + // inside its own Drop means we're the last ones to touch it. + self.rx_fields.with_mut(|rx_fields_ptr| { + let rx_fields = unsafe { &mut *rx_fields_ptr }; + + while let Some(Value(_)) = rx_fields.list.pop(&self.tx) {} + unsafe { rx_fields.list.free_blocks() }; + }); + } +} + +use crate::sync::semaphore_ll::TryAcquireError; + +impl From<TryAcquireError> for TrySendError { + fn from(src: TryAcquireError) -> TrySendError { + if src.is_closed() { + TrySendError::Closed + } else if src.is_no_permits() { + TrySendError::Full + } else { + unreachable!(); + } + } +} + +// ===== impl Semaphore for (::Semaphore, capacity) ===== + +use crate::sync::semaphore_ll::Permit; + +impl Semaphore for (crate::sync::semaphore_ll::Semaphore, usize) { + type Permit = Permit; + + fn new_permit() -> Permit { + Permit::new() + } + + fn drop_permit(&self, permit: &mut Permit) { + permit.release(1, &self.0); + } + + fn add_permit(&self) { + self.0.add_permits(1) + } + + fn is_idle(&self) -> bool { + self.0.available_permits() == self.1 + } + + fn poll_acquire( + &self, + cx: &mut Context<'_>, + permit: &mut Permit, + ) -> Poll<Result<(), ClosedError>> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + permit + .poll_acquire(cx, 1, &self.0) + .map_err(|_| ClosedError::new()) + } + + fn try_acquire(&self, permit: &mut Permit) -> Result<(), TrySendError> { + permit.try_acquire(1, &self.0)?; + Ok(()) + } + + fn forget(&self, permit: &mut Self::Permit) { + permit.forget(1); + } + + fn close(&self) { + self.0.close(); + } +} + +// ===== impl Semaphore for AtomicUsize ===== + +use std::sync::atomic::Ordering::{Acquire, Release}; +use std::usize; + +impl Semaphore for AtomicUsize { + type Permit = (); + + fn new_permit() {} + + fn drop_permit(&self, _permit: &mut ()) {} + + fn add_permit(&self) { + let prev = self.fetch_sub(2, Release); + + if prev >> 1 == 0 { + // Something went wrong + process::abort(); + } + } + + fn is_idle(&self) -> bool { + self.load(Acquire) >> 1 == 0 + } + + fn poll_acquire( + &self, + _cx: &mut Context<'_>, + permit: &mut (), + ) -> Poll<Result<(), ClosedError>> { + Ready(self.try_acquire(permit).map_err(|_| ClosedError::new())) + } + + fn try_acquire(&self, _permit: &mut ()) -> Result<(), TrySendError> { + let mut curr = self.load(Acquire); + + loop { + if curr & 1 == 1 { + return Err(TrySendError::Closed); + } + + if curr == usize::MAX ^ 1 { + // Overflowed the ref count. There is no safe way to recover, so + // abort the process. In practice, this should never happen. + process::abort() + } + + match self.compare_exchange(curr, curr + 2, AcqRel, Acquire) { + Ok(_) => return Ok(()), + Err(actual) => { + curr = actual; + } + } + } + } + + fn forget(&self, _permit: &mut ()) {} + + fn close(&self) { + self.fetch_or(1, Release); + } +} diff --git a/third_party/rust/tokio/src/sync/mpsc/error.rs b/third_party/rust/tokio/src/sync/mpsc/error.rs new file mode 100644 index 0000000000..72c42aa53e --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/error.rs @@ -0,0 +1,146 @@ +//! Channel error types + +use std::error::Error; +use std::fmt; + +/// Error returned by the `Sender`. +#[derive(Debug)] +pub struct SendError<T>(pub T); + +impl<T> fmt::Display for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } +} + +impl<T: fmt::Debug> std::error::Error for SendError<T> {} + +// ===== TrySendError ===== + +/// This enumeration is the list of the possible error outcomes for the +/// [try_send](super::Sender::try_send) method. +#[derive(Debug)] +pub enum TrySendError<T> { + /// The data could not be sent on the channel because the channel is + /// currently full and sending would require blocking. + Full(T), + + /// The receive half of the channel was explicitly closed or has been + /// dropped. + Closed(T), +} + +impl<T: fmt::Debug> Error for TrySendError<T> {} + +impl<T> fmt::Display for TrySendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + fmt, + "{}", + match self { + TrySendError::Full(..) => "no available capacity", + TrySendError::Closed(..) => "channel closed", + } + ) + } +} + +impl<T> From<SendError<T>> for TrySendError<T> { + fn from(src: SendError<T>) -> TrySendError<T> { + TrySendError::Closed(src.0) + } +} + +// ===== RecvError ===== + +/// Error returned by `Receiver`. +#[derive(Debug)] +pub struct RecvError(()); + +impl fmt::Display for RecvError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } +} + +impl Error for RecvError {} + +// ===== TryRecvError ===== + +/// This enumeration is the list of the possible reasons that try_recv +/// could not return data when called. +#[derive(Debug, PartialEq)] +pub enum TryRecvError { + /// This channel is currently empty, but the Sender(s) have not yet + /// disconnected, so data may yet become available. + Empty, + /// The channel's sending half has been closed, and there will + /// never be any more data received on it. + Closed, +} + +impl fmt::Display for TryRecvError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + fmt, + "{}", + match self { + TryRecvError::Empty => "channel empty", + TryRecvError::Closed => "channel closed", + } + ) + } +} + +impl Error for TryRecvError {} + +// ===== ClosedError ===== + +/// Error returned by [`Sender::poll_ready`](super::Sender::poll_ready). +#[derive(Debug)] +pub struct ClosedError(()); + +impl ClosedError { + pub(crate) fn new() -> ClosedError { + ClosedError(()) + } +} + +impl fmt::Display for ClosedError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } +} + +impl Error for ClosedError {} + +cfg_time! { + // ===== SendTimeoutError ===== + + #[derive(Debug)] + /// Error returned by [`Sender::send_timeout`](super::Sender::send_timeout)]. + pub enum SendTimeoutError<T> { + /// The data could not be sent on the channel because the channel is + /// full, and the timeout to send has elapsed. + Timeout(T), + + /// The receive half of the channel was explicitly closed or has been + /// dropped. + Closed(T), + } + + impl<T: fmt::Debug> Error for SendTimeoutError<T> {} + + impl<T> fmt::Display for SendTimeoutError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + fmt, + "{}", + match self { + SendTimeoutError::Timeout(..) => "timed out waiting on send operation", + SendTimeoutError::Closed(..) => "channel closed", + } + ) + } + } +} diff --git a/third_party/rust/tokio/src/sync/mpsc/list.rs b/third_party/rust/tokio/src/sync/mpsc/list.rs new file mode 100644 index 0000000000..53f82a25ef --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/list.rs @@ -0,0 +1,341 @@ +//! A concurrent, lock-free, FIFO list. + +use crate::loom::{ + sync::atomic::{AtomicPtr, AtomicUsize}, + thread, +}; +use crate::sync::mpsc::block::{self, Block}; + +use std::fmt; +use std::ptr::NonNull; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; + +/// List queue transmit handle +pub(crate) struct Tx<T> { + /// Tail in the `Block` mpmc list. + block_tail: AtomicPtr<Block<T>>, + + /// Position to push the next message. This reference a block and offset + /// into the block. + tail_position: AtomicUsize, +} + +/// List queue receive handle +pub(crate) struct Rx<T> { + /// Pointer to the block being processed + head: NonNull<Block<T>>, + + /// Next slot index to process + index: usize, + + /// Pointer to the next block pending release + free_head: NonNull<Block<T>>, +} + +pub(crate) fn channel<T>() -> (Tx<T>, Rx<T>) { + // Create the initial block shared between the tx and rx halves. + let initial_block = Box::new(Block::new(0)); + let initial_block_ptr = Box::into_raw(initial_block); + + let tx = Tx { + block_tail: AtomicPtr::new(initial_block_ptr), + tail_position: AtomicUsize::new(0), + }; + + let head = NonNull::new(initial_block_ptr).unwrap(); + + let rx = Rx { + head, + index: 0, + free_head: head, + }; + + (tx, rx) +} + +impl<T> Tx<T> { + /// Pushes a value into the list. + pub(crate) fn push(&self, value: T) { + // First, claim a slot for the value. `Acquire` is used here to + // synchronize with the `fetch_add` in `reclaim_blocks`. + let slot_index = self.tail_position.fetch_add(1, Acquire); + + // Load the current block and write the value + let block = self.find_block(slot_index); + + unsafe { + // Write the value to the block + block.as_ref().write(slot_index, value); + } + } + + /// Closes the send half of the list + /// + /// Similar process as pushing a value, but instead of writing the value & + /// setting the ready flag, the TX_CLOSED flag is set on the block. + pub(crate) fn close(&self) { + // First, claim a slot for the value. This is the last slot that will be + // claimed. + let slot_index = self.tail_position.fetch_add(1, Acquire); + + let block = self.find_block(slot_index); + + unsafe { block.as_ref().tx_close() } + } + + fn find_block(&self, slot_index: usize) -> NonNull<Block<T>> { + // The start index of the block that contains `index`. + let start_index = block::start_index(slot_index); + + // The index offset into the block + let offset = block::offset(slot_index); + + // Load the current head of the block + let mut block_ptr = self.block_tail.load(Acquire); + + let block = unsafe { &*block_ptr }; + + // Calculate the distance between the tail ptr and the target block + let distance = block.distance(start_index); + + // Decide if this call to `find_block` should attempt to update the + // `block_tail` pointer. + // + // Updating `block_tail` is not always performed in order to reduce + // contention. + // + // When set, as the routine walks the linked list, it attempts to update + // `block_tail`. If the update cannot be performed, `try_updating_tail` + // is unset. + let mut try_updating_tail = distance > offset; + + // Walk the linked list of blocks until the block with `start_index` is + // found. + loop { + let block = unsafe { &(*block_ptr) }; + + if block.is_at_index(start_index) { + return unsafe { NonNull::new_unchecked(block_ptr) }; + } + + let next_block = block + .load_next(Acquire) + // There is no allocated next block, grow the linked list. + .unwrap_or_else(|| block.grow()); + + // If the block is **not** final, then the tail pointer cannot be + // advanced any more. + try_updating_tail &= block.is_final(); + + if try_updating_tail { + // Advancing `block_tail` must happen when walking the linked + // list. `block_tail` may not advance passed any blocks that are + // not "final". At the point a block is finalized, it is unknown + // if there are any prior blocks that are unfinalized, which + // makes it impossible to advance `block_tail`. + // + // While walking the linked list, `block_tail` can be advanced + // as long as finalized blocks are traversed. + // + // Release ordering is used to ensure that any subsequent reads + // are able to see the memory pointed to by `block_tail`. + // + // Acquire is not needed as any "actual" value is not accessed. + // At this point, the linked list is walked to acquire blocks. + let actual = + self.block_tail + .compare_and_swap(block_ptr, next_block.as_ptr(), Release); + + if actual == block_ptr { + // Synchronize with any senders + let tail_position = self.tail_position.fetch_add(0, Release); + + unsafe { + block.tx_release(tail_position); + } + } else { + // A concurrent sender is also working on advancing + // `block_tail` and this thread is falling behind. + // + // Stop trying to advance the tail pointer + try_updating_tail = false; + } + } + + block_ptr = next_block.as_ptr(); + + thread::yield_now(); + } + } + + pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull<Block<T>>) { + // The block has been removed from the linked list and ownership + // is reclaimed. + // + // Before dropping the block, see if it can be reused by + // inserting it back at the end of the linked list. + // + // First, reset the data + block.as_mut().reclaim(); + + let mut reused = false; + + // Attempt to insert the block at the end + // + // Walk at most three times + // + let curr_ptr = self.block_tail.load(Acquire); + + // The pointer can never be null + debug_assert!(!curr_ptr.is_null()); + + let mut curr = NonNull::new_unchecked(curr_ptr); + + // TODO: Unify this logic with Block::grow + for _ in 0..3 { + match curr.as_ref().try_push(&mut block, AcqRel) { + Ok(_) => { + reused = true; + break; + } + Err(next) => { + curr = next; + } + } + } + + if !reused { + let _ = Box::from_raw(block.as_ptr()); + } + } +} + +impl<T> fmt::Debug for Tx<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Tx") + .field("block_tail", &self.block_tail.load(Relaxed)) + .field("tail_position", &self.tail_position.load(Relaxed)) + .finish() + } +} + +impl<T> Rx<T> { + /// Pops the next value off the queue + pub(crate) fn pop(&mut self, tx: &Tx<T>) -> Option<block::Read<T>> { + // Advance `head`, if needed + if !self.try_advancing_head() { + return None; + } + + self.reclaim_blocks(tx); + + unsafe { + let block = self.head.as_ref(); + + let ret = block.read(self.index); + + if let Some(block::Read::Value(..)) = ret { + self.index = self.index.wrapping_add(1); + } + + ret + } + } + + /// Tries advancing the block pointer to the block referenced by `self.index`. + /// + /// Returns `true` if successful, `false` if there is no next block to load. + fn try_advancing_head(&mut self) -> bool { + let block_index = block::start_index(self.index); + + loop { + let next_block = { + let block = unsafe { self.head.as_ref() }; + + if block.is_at_index(block_index) { + return true; + } + + block.load_next(Acquire) + }; + + let next_block = match next_block { + Some(next_block) => next_block, + None => { + return false; + } + }; + + self.head = next_block; + + thread::yield_now(); + } + } + + fn reclaim_blocks(&mut self, tx: &Tx<T>) { + while self.free_head != self.head { + unsafe { + // Get a handle to the block that will be freed and update + // `free_head` to point to the next block. + let block = self.free_head; + + let observed_tail_position = block.as_ref().observed_tail_position(); + + let required_index = match observed_tail_position { + Some(i) => i, + None => return, + }; + + if required_index > self.index { + return; + } + + // We may read the next pointer with `Relaxed` ordering as it is + // guaranteed that the `reclaim_blocks` routine trails the `recv` + // routine. Any memory accessed by `reclaim_blocks` has already + // been acquired by `recv`. + let next_block = block.as_ref().load_next(Relaxed); + + // Update the free list head + self.free_head = next_block.unwrap(); + + // Push the emptied block onto the back of the queue, making it + // available to senders. + tx.reclaim_block(block); + } + + thread::yield_now(); + } + } + + /// Effectively `Drop` all the blocks. Should only be called once, when + /// the list is dropping. + pub(super) unsafe fn free_blocks(&mut self) { + debug_assert_ne!(self.free_head, NonNull::dangling()); + + let mut cur = Some(self.free_head); + + #[cfg(debug_assertions)] + { + // to trigger the debug assert above so as to catch that we + // don't call `free_blocks` more than once. + self.free_head = NonNull::dangling(); + self.head = NonNull::dangling(); + } + + while let Some(block) = cur { + cur = block.as_ref().load_next(Relaxed); + drop(Box::from_raw(block.as_ptr())); + } + } +} + +impl<T> fmt::Debug for Rx<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Rx") + .field("head", &self.head) + .field("index", &self.index) + .field("free_head", &self.free_head) + .finish() + } +} diff --git a/third_party/rust/tokio/src/sync/mpsc/mod.rs b/third_party/rust/tokio/src/sync/mpsc/mod.rs new file mode 100644 index 0000000000..4cfd6150f3 --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/mod.rs @@ -0,0 +1,64 @@ +#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] + +//! A multi-producer, single-consumer queue for sending values across +//! asynchronous tasks. +//! +//! Similar to `std`, channel creation provides [`Receiver`] and [`Sender`] +//! handles. [`Receiver`] implements `Stream` and allows a task to read values +//! out of the channel. If there is no message to read, the current task will be +//! notified when a new value is sent. [`Sender`] implements the `Sink` trait +//! and allows sending messages into the channel. If the channel is at capacity, +//! the send is rejected and the task will be notified when additional capacity +//! is available. In other words, the channel provides backpressure. +//! +//! Unbounded channels are also available using the `unbounded_channel` +//! constructor. +//! +//! # Disconnection +//! +//! When all [`Sender`] handles have been dropped, it is no longer +//! possible to send values into the channel. This is considered the termination +//! event of the stream. As such, `Receiver::poll` returns `Ok(Ready(None))`. +//! +//! If the [`Receiver`] handle is dropped, then messages can no longer +//! be read out of the channel. In this case, all further attempts to send will +//! result in an error. +//! +//! # Clean Shutdown +//! +//! When the [`Receiver`] is dropped, it is possible for unprocessed messages to +//! remain in the channel. Instead, it is usually desirable to perform a "clean" +//! shutdown. To do this, the receiver first calls `close`, which will prevent +//! any further messages to be sent into the channel. Then, the receiver +//! consumes the channel to completion, at which point the receiver can be +//! dropped. +//! +//! [`Sender`]: crate::sync::mpsc::Sender +//! [`Receiver`]: crate::sync::mpsc::Receiver + +pub(super) mod block; + +mod bounded; +pub use self::bounded::{channel, Receiver, Sender}; + +mod chan; + +pub(super) mod list; + +mod unbounded; +pub use self::unbounded::{unbounded_channel, UnboundedReceiver, UnboundedSender}; + +pub mod error; + +/// The number of values a block can contain. +/// +/// This value must be a power of 2. It also must be smaller than the number of +/// bits in `usize`. +#[cfg(all(target_pointer_width = "64", not(loom)))] +const BLOCK_CAP: usize = 32; + +#[cfg(all(not(target_pointer_width = "64"), not(loom)))] +const BLOCK_CAP: usize = 16; + +#[cfg(loom)] +const BLOCK_CAP: usize = 2; diff --git a/third_party/rust/tokio/src/sync/mpsc/unbounded.rs b/third_party/rust/tokio/src/sync/mpsc/unbounded.rs new file mode 100644 index 0000000000..ba543fe4c8 --- /dev/null +++ b/third_party/rust/tokio/src/sync/mpsc/unbounded.rs @@ -0,0 +1,176 @@ +use crate::loom::sync::atomic::AtomicUsize; +use crate::sync::mpsc::chan; +use crate::sync::mpsc::error::{SendError, TryRecvError}; + +use std::fmt; +use std::task::{Context, Poll}; + +/// Send values to the associated `UnboundedReceiver`. +/// +/// Instances are created by the +/// [`unbounded_channel`](unbounded_channel) function. +pub struct UnboundedSender<T> { + chan: chan::Tx<T, Semaphore>, +} + +impl<T> Clone for UnboundedSender<T> { + fn clone(&self) -> Self { + UnboundedSender { + chan: self.chan.clone(), + } + } +} + +impl<T> fmt::Debug for UnboundedSender<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("UnboundedSender") + .field("chan", &self.chan) + .finish() + } +} + +/// Receive values from the associated `UnboundedSender`. +/// +/// Instances are created by the +/// [`unbounded_channel`](unbounded_channel) function. +pub struct UnboundedReceiver<T> { + /// The channel receiver + chan: chan::Rx<T, Semaphore>, +} + +impl<T> fmt::Debug for UnboundedReceiver<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("UnboundedReceiver") + .field("chan", &self.chan) + .finish() + } +} + +/// Creates an unbounded mpsc channel for communicating between asynchronous +/// tasks. +/// +/// A `send` on this channel will always succeed as long as the receive half has +/// not been closed. If the receiver falls behind, messages will be arbitrarily +/// buffered. +/// +/// **Note** that the amount of available system memory is an implicit bound to +/// the channel. Using an `unbounded` channel has the ability of causing the +/// process to run out of memory. In this case, the process will be aborted. +pub fn unbounded_channel<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { + let (tx, rx) = chan::channel(AtomicUsize::new(0)); + + let tx = UnboundedSender::new(tx); + let rx = UnboundedReceiver::new(rx); + + (tx, rx) +} + +/// No capacity +type Semaphore = AtomicUsize; + +impl<T> UnboundedReceiver<T> { + pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> UnboundedReceiver<T> { + UnboundedReceiver { chan } + } + + #[doc(hidden)] // TODO: doc + pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> { + self.chan.recv(cx) + } + + /// Receives the next value for this receiver. + /// + /// `None` is returned when all `Sender` halves have dropped, indicating + /// that no further values can be sent on the channel. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = mpsc::unbounded_channel(); + /// + /// tokio::spawn(async move { + /// tx.send("hello").unwrap(); + /// }); + /// + /// assert_eq!(Some("hello"), rx.recv().await); + /// assert_eq!(None, rx.recv().await); + /// } + /// ``` + /// + /// Values are buffered: + /// + /// ``` + /// use tokio::sync::mpsc; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = mpsc::unbounded_channel(); + /// + /// tx.send("hello").unwrap(); + /// tx.send("world").unwrap(); + /// + /// assert_eq!(Some("hello"), rx.recv().await); + /// assert_eq!(Some("world"), rx.recv().await); + /// } + /// ``` + pub async fn recv(&mut self) -> Option<T> { + use crate::future::poll_fn; + + poll_fn(|cx| self.poll_recv(cx)).await + } + + /// Attempts to return a pending value on this receiver without blocking. + /// + /// This method will never block the caller in order to wait for data to + /// become available. Instead, this will always return immediately with + /// a possible option of pending data on the channel. + /// + /// This is useful for a flavor of "optimistic check" before deciding to + /// block on a receiver. + /// + /// Compared with recv, this function has two failure cases instead of + /// one (one for disconnection, one for an empty buffer). + pub fn try_recv(&mut self) -> Result<T, TryRecvError> { + self.chan.try_recv() + } + + /// Closes the receiving half of a channel, without dropping it. + /// + /// This prevents any further messages from being sent on the channel while + /// still enabling the receiver to drain messages that are buffered. + pub fn close(&mut self) { + self.chan.close(); + } +} + +#[cfg(feature = "stream")] +impl<T> crate::stream::Stream for UnboundedReceiver<T> { + type Item = T; + + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + self.poll_recv(cx) + } +} + +impl<T> UnboundedSender<T> { + pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> UnboundedSender<T> { + UnboundedSender { chan } + } + + /// Attempts to send a message on this `UnboundedSender` without blocking. + /// + /// If the receive half of the channel is closed, either due to [`close`] + /// being called or the [`UnboundedReceiver`] having been dropped, + /// the function returns an error. The error includes the value passed to `send`. + /// + /// [`close`]: UnboundedReceiver::close + /// [`UnboundedReceiver`]: UnboundedReceiver + pub fn send(&self, message: T) -> Result<(), SendError<T>> { + self.chan.send_unbounded(message)?; + Ok(()) + } +} diff --git a/third_party/rust/tokio/src/sync/mutex.rs b/third_party/rust/tokio/src/sync/mutex.rs new file mode 100644 index 0000000000..7167906de1 --- /dev/null +++ b/third_party/rust/tokio/src/sync/mutex.rs @@ -0,0 +1,228 @@ +//! An asynchronous `Mutex`-like type. +//! +//! This module provides [`Mutex`], a type that acts similarly to an asynchronous `Mutex`, with one +//! major difference: the [`MutexGuard`] returned by `lock` is not tied to the lifetime of the +//! `Mutex`. This enables you to acquire a lock, and then pass that guard into a future, and then +//! release it at some later point in time. +//! +//! This allows you to do something along the lines of: +//! +//! ```rust,no_run +//! use tokio::sync::Mutex; +//! use std::sync::Arc; +//! +//! #[tokio::main] +//! async fn main() { +//! let data1 = Arc::new(Mutex::new(0)); +//! let data2 = Arc::clone(&data1); +//! +//! tokio::spawn(async move { +//! let mut lock = data2.lock().await; +//! *lock += 1; +//! }); +//! +//! let mut lock = data1.lock().await; +//! *lock += 1; +//! } +//! ``` +//! +//! Another example +//! ```rust,no_run +//! #![warn(rust_2018_idioms)] +//! +//! use tokio::sync::Mutex; +//! use std::sync::Arc; +//! +//! +//! #[tokio::main] +//! async fn main() { +//! let count = Arc::new(Mutex::new(0)); +//! +//! for _ in 0..5 { +//! let my_count = Arc::clone(&count); +//! tokio::spawn(async move { +//! for _ in 0..10 { +//! let mut lock = my_count.lock().await; +//! *lock += 1; +//! println!("{}", lock); +//! } +//! }); +//! } +//! +//! loop { +//! if *count.lock().await >= 50 { +//! break; +//! } +//! } +//! println!("Count hit 50."); +//! } +//! ``` +//! There are a few things of note here to pay attention to in this example. +//! 1. The mutex is wrapped in an [`std::sync::Arc`] to allow it to be shared across threads. +//! 2. Each spawned task obtains a lock and releases it on every iteration. +//! 3. Mutation of the data the Mutex is protecting is done by de-referencing the the obtained lock +//! as seen on lines 23 and 30. +//! +//! Tokio's Mutex works in a simple FIFO (first in, first out) style where as requests for a lock are +//! made Tokio will queue them up and provide a lock when it is that requester's turn. In that way +//! the Mutex is "fair" and predictable in how it distributes the locks to inner data. This is why +//! the output of this program is an in-order count to 50. Locks are released and reacquired +//! after every iteration, so basically, each thread goes to the back of the line after it increments +//! the value once. Also, since there is only a single valid lock at any given time there is no +//! possibility of a race condition when mutating the inner value. +//! +//! Note that in contrast to `std::sync::Mutex`, this implementation does not +//! poison the mutex when a thread holding the `MutexGuard` panics. In such a +//! case, the mutex will be unlocked. If the panic is caught, this might leave +//! the data protected by the mutex in an inconsistent state. +//! +//! [`Mutex`]: struct@Mutex +//! [`MutexGuard`]: struct@MutexGuard +use crate::coop::CoopFutureExt; +use crate::sync::batch_semaphore as semaphore; + +use std::cell::UnsafeCell; +use std::error::Error; +use std::fmt; +use std::ops::{Deref, DerefMut}; + +/// An asynchronous mutual exclusion primitive useful for protecting shared data +/// +/// Each mutex has a type parameter (`T`) which represents the data that it is protecting. The data +/// can only be accessed through the RAII guards returned from `lock`, which +/// guarantees that the data is only ever accessed when the mutex is locked. +#[derive(Debug)] +pub struct Mutex<T> { + c: UnsafeCell<T>, + s: semaphore::Semaphore, +} + +/// A handle to a held `Mutex`. +/// +/// As long as you have this guard, you have exclusive access to the underlying `T`. The guard +/// internally keeps a reference-couned pointer to the original `Mutex`, so even if the lock goes +/// away, the guard remains valid. +/// +/// The lock is automatically released whenever the guard is dropped, at which point `lock` +/// will succeed yet again. +pub struct MutexGuard<'a, T> { + lock: &'a Mutex<T>, +} + +// As long as T: Send, it's fine to send and share Mutex<T> between threads. +// If T was not Send, sending and sharing a Mutex<T> would be bad, since you can access T through +// Mutex<T>. +unsafe impl<T> Send for Mutex<T> where T: Send {} +unsafe impl<T> Sync for Mutex<T> where T: Send {} +unsafe impl<'a, T> Sync for MutexGuard<'a, T> where T: Send + Sync {} + +/// Error returned from the [`Mutex::try_lock`] function. +/// +/// A `try_lock` operation can only fail if the mutex is already locked. +/// +/// [`Mutex::try_lock`]: Mutex::try_lock +#[derive(Debug)] +pub struct TryLockError(()); + +impl fmt::Display for TryLockError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "{}", "operation would block") + } +} + +impl Error for TryLockError {} + +#[test] +#[cfg(not(loom))] +fn bounds() { + fn check_send<T: Send>() {} + fn check_unpin<T: Unpin>() {} + // This has to take a value, since the async fn's return type is unnameable. + fn check_send_sync_val<T: Send + Sync>(_t: T) {} + fn check_send_sync<T: Send + Sync>() {} + check_send::<MutexGuard<'_, u32>>(); + check_unpin::<Mutex<u32>>(); + check_send_sync::<Mutex<u32>>(); + + let mutex = Mutex::new(1); + check_send_sync_val(mutex.lock()); +} + +impl<T> Mutex<T> { + /// Creates a new lock in an unlocked state ready for use. + pub fn new(t: T) -> Self { + Self { + c: UnsafeCell::new(t), + s: semaphore::Semaphore::new(1), + } + } + + /// A future that resolves on acquiring the lock and returns the `MutexGuard`. + pub async fn lock(&self) -> MutexGuard<'_, T> { + self.s.acquire(1).cooperate().await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + MutexGuard { lock: self } + } + + /// Tries to acquire the lock + pub fn try_lock(&self) -> Result<MutexGuard<'_, T>, TryLockError> { + match self.s.try_acquire(1) { + Ok(_) => Ok(MutexGuard { lock: self }), + Err(_) => Err(TryLockError(())), + } + } + + /// Consumes the mutex, returning the underlying data. + pub fn into_inner(self) -> T { + self.c.into_inner() + } +} + +impl<'a, T> Drop for MutexGuard<'a, T> { + fn drop(&mut self) { + self.lock.s.release(1) + } +} + +impl<T> From<T> for Mutex<T> { + fn from(s: T) -> Self { + Self::new(s) + } +} + +impl<T> Default for Mutex<T> +where + T: Default, +{ + fn default() -> Self { + Self::new(T::default()) + } +} + +impl<'a, T> Deref for MutexGuard<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + unsafe { &*self.lock.c.get() } + } +} + +impl<'a, T> DerefMut for MutexGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.lock.c.get() } + } +} + +impl<'a, T: fmt::Debug> fmt::Debug for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, T: fmt::Display> fmt::Display for MutexGuard<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} diff --git a/third_party/rust/tokio/src/sync/notify.rs b/third_party/rust/tokio/src/sync/notify.rs new file mode 100644 index 0000000000..5cb41e89ea --- /dev/null +++ b/third_party/rust/tokio/src/sync/notify.rs @@ -0,0 +1,556 @@ +use crate::loom::sync::atomic::AtomicU8; +use crate::loom::sync::Mutex; +use crate::util::linked_list::{self, LinkedList}; + +use std::cell::UnsafeCell; +use std::future::Future; +use std::marker::PhantomPinned; +use std::pin::Pin; +use std::ptr::NonNull; +use std::sync::atomic::Ordering::SeqCst; +use std::task::{Context, Poll, Waker}; + +/// Notify a single task to wake up. +/// +/// `Notify` provides a basic mechanism to notify a single task of an event. +/// `Notify` itself does not carry any data. Instead, it is to be used to signal +/// another task to perform an operation. +/// +/// `Notify` can be thought of as a [`Semaphore`] starting with 0 permits. +/// [`notified().await`] waits for a permit to become available, and [`notify()`] +/// sets a permit **if there currently are no available permits**. +/// +/// The synchronization details of `Notify` are similar to +/// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`] +/// value contains a single permit. [`notified().await`] waits for the permit to +/// be made available, consumes the permit, and resumes. [`notify()`] sets the +/// permit, waking a pending task if there is one. +/// +/// If `notify()` is called **before** `notfied().await`, then the next call to +/// `notified().await` will complete immediately, consuming the permit. Any +/// subsequent calls to `notified().await` will wait for a new permit. +/// +/// If `notify()` is called **multiple** times before `notified().await`, only a +/// **single** permit is stored. The next call to `notified().await` will +/// complete immediately, but the one after will wait for a new permit. +/// +/// # Examples +/// +/// Basic usage. +/// +/// ``` +/// use tokio::sync::Notify; +/// use std::sync::Arc; +/// +/// #[tokio::main] +/// async fn main() { +/// let notify = Arc::new(Notify::new()); +/// let notify2 = notify.clone(); +/// +/// tokio::spawn(async move { +/// notify2.notified().await; +/// println!("received notification"); +/// }); +/// +/// println!("sending notification"); +/// notify.notify(); +/// } +/// ``` +/// +/// Unbound mpsc channel. +/// +/// ``` +/// use tokio::sync::Notify; +/// +/// use std::collections::VecDeque; +/// use std::sync::Mutex; +/// +/// struct Channel<T> { +/// values: Mutex<VecDeque<T>>, +/// notify: Notify, +/// } +/// +/// impl<T> Channel<T> { +/// pub fn send(&self, value: T) { +/// self.values.lock().unwrap() +/// .push_back(value); +/// +/// // Notify the consumer a value is available +/// self.notify.notify(); +/// } +/// +/// pub async fn recv(&self) -> T { +/// loop { +/// // Drain values +/// if let Some(value) = self.values.lock().unwrap().pop_front() { +/// return value; +/// } +/// +/// // Wait for values to be available +/// self.notify.notified().await; +/// } +/// } +/// } +/// ``` +/// +/// [park]: std::thread::park +/// [unpark]: std::thread::Thread::unpark +/// [`notified().await`]: Notify::notified() +/// [`notify()`]: Notify::notify() +/// [`Semaphore`]: crate::sync::Semaphore +#[derive(Debug)] +pub struct Notify { + state: AtomicU8, + waiters: Mutex<LinkedList<Waiter>>, +} + +#[derive(Debug)] +struct Waiter { + /// Intrusive linked-list pointers + pointers: linked_list::Pointers<Waiter>, + + /// Waiting task's waker + waker: Option<Waker>, + + /// `true` if the notification has been assigned to this waiter. + notified: bool, + + /// Should not be `Unpin`. + _p: PhantomPinned, +} + +/// Future returned from `notified()` +#[derive(Debug)] +struct Notified<'a> { + /// The `Notify` being received on. + notify: &'a Notify, + + /// The current state of the receiving process. + state: State, + + /// Entry in the waiter `LinkedList`. + waiter: UnsafeCell<Waiter>, +} + +unsafe impl<'a> Send for Notified<'a> {} +unsafe impl<'a> Sync for Notified<'a> {} + +#[derive(Debug)] +enum State { + Init, + Waiting, + Done, +} + +/// Initial "idle" state +const EMPTY: u8 = 0; + +/// One or more threads are currently waiting to be notified. +const WAITING: u8 = 1; + +/// Pending notification +const NOTIFIED: u8 = 2; + +impl Notify { + /// Create a new `Notify`, initialized without a permit. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::Notify; + /// + /// let notify = Notify::new(); + /// ``` + pub fn new() -> Notify { + Notify { + state: AtomicU8::new(0), + waiters: Mutex::new(LinkedList::new()), + } + } + + /// Wait for a notification. + /// + /// Each `Notify` value holds a single permit. If a permit is available from + /// an earlier call to [`notify()`], then `notified().await` will complete + /// immediately, consuming that permit. Otherwise, `notified().await` waits + /// for a permit to be made available by the next call to `notify()`. + /// + /// [`notify()`]: Notify::notify + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::Notify; + /// use std::sync::Arc; + /// + /// #[tokio::main] + /// async fn main() { + /// let notify = Arc::new(Notify::new()); + /// let notify2 = notify.clone(); + /// + /// tokio::spawn(async move { + /// notify2.notified().await; + /// println!("received notification"); + /// }); + /// + /// println!("sending notification"); + /// notify.notify(); + /// } + /// ``` + pub async fn notified(&self) { + Notified { + notify: self, + state: State::Init, + waiter: UnsafeCell::new(Waiter { + pointers: linked_list::Pointers::new(), + waker: None, + notified: false, + _p: PhantomPinned, + }), + } + .await + } + + /// Notifies a waiting task + /// + /// If a task is currently waiting, that task is notified. Otherwise, a + /// permit is stored in this `Notify` value and the **next** call to + /// [`notified().await`] will complete immediately consuming the permit made + /// available by this call to `notify()`. + /// + /// At most one permit may be stored by `Notify`. Many sequential calls to + /// `notify` will result in a single permit being stored. The next call to + /// `notified().await` will complete immediately, but the one after that + /// will wait. + /// + /// [`notified().await`]: Notify::notified() + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::Notify; + /// use std::sync::Arc; + /// + /// #[tokio::main] + /// async fn main() { + /// let notify = Arc::new(Notify::new()); + /// let notify2 = notify.clone(); + /// + /// tokio::spawn(async move { + /// notify2.notified().await; + /// println!("received notification"); + /// }); + /// + /// println!("sending notification"); + /// notify.notify(); + /// } + /// ``` + pub fn notify(&self) { + // Load the current state + let mut curr = self.state.load(SeqCst); + + // If the state is `EMPTY`, transition to `NOTIFIED` and return. + while let EMPTY | NOTIFIED = curr { + // The compare-exchange from `NOTIFIED` -> `NOTIFIED` is intended. A + // happens-before synchronization must happen between this atomic + // operation and a task calling `notified().await`. + let res = self.state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst); + + match res { + // No waiters, no further work to do + Ok(_) => return, + Err(actual) => { + curr = actual; + } + } + } + + // There are waiters, the lock must be acquired to notify. + let mut waiters = self.waiters.lock().unwrap(); + + // The state must be reloaded while the lock is held. The state may only + // transition out of WAITING while the lock is held. + curr = self.state.load(SeqCst); + + if let Some(waker) = notify_locked(&mut waiters, &self.state, curr) { + drop(waiters); + waker.wake(); + } + } +} + +impl Default for Notify { + fn default() -> Notify { + Notify::new() + } +} + +fn notify_locked(waiters: &mut LinkedList<Waiter>, state: &AtomicU8, curr: u8) -> Option<Waker> { + loop { + match curr { + EMPTY | NOTIFIED => { + let res = state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst); + + match res { + Ok(_) => return None, + Err(actual) => { + assert!(actual == EMPTY || actual == NOTIFIED); + state.store(NOTIFIED, SeqCst); + return None; + } + } + } + WAITING => { + // At this point, it is guaranteed that the state will not + // concurrently change as holding the lock is required to + // transition **out** of `WAITING`. + // + // Get a pending waiter + let mut waiter = waiters.pop_back().unwrap(); + + // Safety: `waiters` lock is still held. + let waiter = unsafe { waiter.as_mut() }; + + assert!(!waiter.notified); + + waiter.notified = true; + let waker = waiter.waker.take(); + + if waiters.is_empty() { + // As this the **final** waiter in the list, the state + // must be transitioned to `EMPTY`. As transitioning + // **from** `WAITING` requires the lock to be held, a + // `store` is sufficient. + state.store(EMPTY, SeqCst); + } + + return waker; + } + _ => unreachable!(), + } + } +} + +// ===== impl Notified ===== + +impl Notified<'_> { + /// A custom `project` implementation is used in place of `pin-project-lite` + /// as a custom drop implementation is needed. + fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &UnsafeCell<Waiter>) { + unsafe { + // Safety: both `notify` and `state` are `Unpin`. + + is_unpin::<&Notify>(); + is_unpin::<AtomicU8>(); + + let me = self.get_unchecked_mut(); + (&me.notify, &mut me.state, &me.waiter) + } + } +} + +impl Future for Notified<'_> { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + use State::*; + + let (notify, state, waiter) = self.project(); + + loop { + match *state { + Init => { + // Optimistically try acquiring a pending notification + let res = notify + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst); + + if res.is_ok() { + // Acquired the notification + *state = Done; + return Poll::Ready(()); + } + + // Acquire the lock and attempt to transition to the waiting + // state. + let mut waiters = notify.waiters.lock().unwrap(); + + // Reload the state with the lock held + let mut curr = notify.state.load(SeqCst); + + // Transition the state to WAITING. + loop { + match curr { + EMPTY => { + // Transition to WAITING + let res = notify + .state + .compare_exchange(EMPTY, WAITING, SeqCst, SeqCst); + + if let Err(actual) = res { + assert_eq!(actual, NOTIFIED); + curr = actual; + } else { + break; + } + } + WAITING => break, + NOTIFIED => { + // Try consuming the notification + let res = notify + .state + .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst); + + match res { + Ok(_) => { + // Acquired the notification + *state = Done; + return Poll::Ready(()); + } + Err(actual) => { + assert_eq!(actual, EMPTY); + curr = actual; + } + } + } + _ => unreachable!(), + } + } + + // Safety: called while locked. + unsafe { + (*waiter.get()).waker = Some(cx.waker().clone()); + } + + // Insert the waiter into the linked list + // + // safety: pointers from `UnsafeCell` are never null. + waiters.push_front(unsafe { NonNull::new_unchecked(waiter.get()) }); + + *state = Waiting; + } + Waiting => { + // Currently in the "Waiting" state, implying the caller has + // a waiter stored in the waiter list (guarded by + // `notify.waiters`). In order to access the waker fields, + // we must hold the lock. + + let waiters = notify.waiters.lock().unwrap(); + + // Safety: called while locked + let w = unsafe { &mut *waiter.get() }; + + if w.notified { + // Our waker has been notified. Reset the fields and + // remove it from the list. + w.waker = None; + w.notified = false; + + *state = Done; + } else { + // Update the waker, if necessary. + if !w.waker.as_ref().unwrap().will_wake(cx.waker()) { + w.waker = Some(cx.waker().clone()); + } + + return Poll::Pending; + } + + // Explicit drop of the lock to indicate the scope that the + // lock is held. Because holding the lock is required to + // ensure safe access to fields not held within the lock, it + // is helpful to visualize the scope of the critical + // section. + drop(waiters); + } + Done => { + return Poll::Ready(()); + } + } + } + } +} + +impl Drop for Notified<'_> { + fn drop(&mut self) { + use State::*; + + // Safety: The type only transitions to a "Waiting" state when pinned. + let (notify, state, waiter) = unsafe { Pin::new_unchecked(self).project() }; + + // This is where we ensure safety. The `Notified` value is being + // dropped, which means we must ensure that the waiter entry is no + // longer stored in the linked list. + if let Waiting = *state { + let mut notify_state = WAITING; + let mut waiters = notify.waiters.lock().unwrap(); + + // `Notify.state` may be in any of the three states (Empty, Waiting, + // Notified). It doesn't actually matter what the atomic is set to + // at this point. We hold the lock and will ensure the atomic is in + // the correct state once th elock is dropped. + // + // Because the atomic state is not checked, at first glance, it may + // seem like this routine does not handle the case where the + // receiver is notified but has not yet observed the notification. + // If this happens, no matter how many notifications happen between + // this receiver being notified and the receive future dropping, all + // we need to do is ensure that one notification is returned back to + // the `Notify`. This is done by calling `notify_locked` if `self` + // has the `notified` flag set. + + // remove the entry from the list + // + // safety: the waiter is only added to `waiters` by virtue of it + // being the only `LinkedList` available to the type. + unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) }; + + if waiters.is_empty() { + notify_state = EMPTY; + // If the state *should* be `NOTIFIED`, the call to + // `notify_locked` below will end up doing the + // `store(NOTIFIED)`. If a concurrent receiver races and + // observes the incorrect `EMPTY` state, it will then obtain the + // lock and block until `notify.state` is in the correct final + // state. + notify.state.store(EMPTY, SeqCst); + } + + // See if the node was notified but not received. In this case, the + // notification must be sent to another waiter. + // + // Safety: with the entry removed from the linked list, there can be + // no concurrent access to the entry + let notified = unsafe { (*waiter.get()).notified }; + + if notified { + if let Some(waker) = notify_locked(&mut waiters, ¬ify.state, notify_state) { + drop(waiters); + waker.wake(); + } + } + } + } +} + +/// # Safety +/// +/// `Waiter` is forced to be !Unpin. +unsafe impl linked_list::Link for Waiter { + type Handle = NonNull<Waiter>; + type Target = Waiter; + + fn as_raw(handle: &NonNull<Waiter>) -> NonNull<Waiter> { + *handle + } + + unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> { + ptr + } + + unsafe fn pointers(mut target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> { + NonNull::from(&mut target.as_mut().pointers) + } +} + +fn is_unpin<T: Unpin>() {} diff --git a/third_party/rust/tokio/src/sync/oneshot.rs b/third_party/rust/tokio/src/sync/oneshot.rs new file mode 100644 index 0000000000..62ad484eec --- /dev/null +++ b/third_party/rust/tokio/src/sync/oneshot.rs @@ -0,0 +1,784 @@ +#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] + +//! A channel for sending a single message between asynchronous tasks. + +use crate::loom::cell::UnsafeCell; +use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::Arc; + +use std::fmt; +use std::future::Future; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::sync::atomic::Ordering::{self, AcqRel, Acquire}; +use std::task::Poll::{Pending, Ready}; +use std::task::{Context, Poll, Waker}; + +/// Sends a value to the associated `Receiver`. +/// +/// Instances are created by the [`channel`](fn@channel) function. +#[derive(Debug)] +pub struct Sender<T> { + inner: Option<Arc<Inner<T>>>, +} + +/// Receive a value from the associated `Sender`. +/// +/// Instances are created by the [`channel`](fn@channel) function. +#[derive(Debug)] +pub struct Receiver<T> { + inner: Option<Arc<Inner<T>>>, +} + +pub mod error { + //! Oneshot error types + + use std::fmt; + + /// Error returned by the `Future` implementation for `Receiver`. + #[derive(Debug, Eq, PartialEq)] + pub struct RecvError(pub(super) ()); + + /// Error returned by the `try_recv` function on `Receiver`. + #[derive(Debug, Eq, PartialEq)] + pub enum TryRecvError { + /// The send half of the channel has not yet sent a value. + Empty, + + /// The send half of the channel was dropped without sending a value. + Closed, + } + + // ===== impl RecvError ===== + + impl fmt::Display for RecvError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } + } + + impl std::error::Error for RecvError {} + + // ===== impl TryRecvError ===== + + impl fmt::Display for TryRecvError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TryRecvError::Empty => write!(fmt, "channel empty"), + TryRecvError::Closed => write!(fmt, "channel closed"), + } + } + } + + impl std::error::Error for TryRecvError {} +} + +use self::error::*; + +struct Inner<T> { + /// Manages the state of the inner cell + state: AtomicUsize, + + /// The value. This is set by `Sender` and read by `Receiver`. The state of + /// the cell is tracked by `state`. + value: UnsafeCell<Option<T>>, + + /// The task to notify when the receiver drops without consuming the value. + tx_task: UnsafeCell<MaybeUninit<Waker>>, + + /// The task to notify when the value is sent. + rx_task: UnsafeCell<MaybeUninit<Waker>>, +} + +#[derive(Clone, Copy)] +struct State(usize); + +/// Create a new one-shot channel for sending single values across asynchronous +/// tasks. +/// +/// The function returns separate "send" and "receive" handles. The `Sender` +/// handle is used by the producer to send the value. The `Receiver` handle is +/// used by the consumer to receive the value. +/// +/// Each handle can be used on separate tasks. +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::oneshot; +/// +/// #[tokio::main] +/// async fn main() { +/// let (tx, rx) = oneshot::channel(); +/// +/// tokio::spawn(async move { +/// if let Err(_) = tx.send(3) { +/// println!("the receiver dropped"); +/// } +/// }); +/// +/// match rx.await { +/// Ok(v) => println!("got = {:?}", v), +/// Err(_) => println!("the sender dropped"), +/// } +/// } +/// ``` +pub fn channel<T>() -> (Sender<T>, Receiver<T>) { + #[allow(deprecated)] + let inner = Arc::new(Inner { + state: AtomicUsize::new(State::new().as_usize()), + value: UnsafeCell::new(None), + tx_task: UnsafeCell::new(MaybeUninit::uninit()), + rx_task: UnsafeCell::new(MaybeUninit::uninit()), + }); + + let tx = Sender { + inner: Some(inner.clone()), + }; + let rx = Receiver { inner: Some(inner) }; + + (tx, rx) +} + +impl<T> Sender<T> { + /// Attempts to send a value on this channel, returning it back if it could + /// not be sent. + /// + /// The function consumes `self` as only one value may ever be sent on a + /// one-shot channel. + /// + /// A successful send occurs when it is determined that the other end of the + /// channel has not hung up already. An unsuccessful send would be one where + /// the corresponding receiver has already been deallocated. Note that a + /// return value of `Err` means that the data will never be received, but + /// a return value of `Ok` does *not* mean that the data will be received. + /// It is possible for the corresponding receiver to hang up immediately + /// after this function returns `Ok`. + /// + /// # Examples + /// + /// Send a value to another task + /// + /// ``` + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = oneshot::channel(); + /// + /// tokio::spawn(async move { + /// if let Err(_) = tx.send(3) { + /// println!("the receiver dropped"); + /// } + /// }); + /// + /// match rx.await { + /// Ok(v) => println!("got = {:?}", v), + /// Err(_) => println!("the sender dropped"), + /// } + /// } + /// ``` + pub fn send(mut self, t: T) -> Result<(), T> { + let inner = self.inner.take().unwrap(); + + inner.value.with_mut(|ptr| unsafe { + *ptr = Some(t); + }); + + if !inner.complete() { + return Err(inner + .value + .with_mut(|ptr| unsafe { (*ptr).take() }.unwrap())); + } + + Ok(()) + } + + #[doc(hidden)] // TODO: remove + pub fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + let inner = self.inner.as_ref().unwrap(); + + let mut state = State::load(&inner.state, Acquire); + + if state.is_closed() { + return Poll::Ready(()); + } + + if state.is_tx_task_set() { + let will_notify = unsafe { inner.with_tx_task(|w| w.will_wake(cx.waker())) }; + + if !will_notify { + state = State::unset_tx_task(&inner.state); + + if state.is_closed() { + // Set the flag again so that the waker is released in drop + State::set_tx_task(&inner.state); + return Ready(()); + } else { + unsafe { inner.drop_tx_task() }; + } + } + } + + if !state.is_tx_task_set() { + // Attempt to set the task + unsafe { + inner.set_tx_task(cx); + } + + // Update the state + state = State::set_tx_task(&inner.state); + + if state.is_closed() { + return Ready(()); + } + } + + Pending + } + + /// Waits for the associated [`Receiver`] handle to close. + /// + /// A [`Receiver`] is closed by either calling [`close`] explicitly or the + /// [`Receiver`] value is dropped. + /// + /// This function is useful when paired with `select!` to abort a + /// computation when the receiver is no longer interested in the result. + /// + /// # Return + /// + /// Returns a `Future` which must be awaited on. + /// + /// [`Receiver`]: Receiver + /// [`close`]: Receiver::close + /// + /// # Examples + /// + /// Basic usage + /// + /// ``` + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, rx) = oneshot::channel::<()>(); + /// + /// tokio::spawn(async move { + /// drop(rx); + /// }); + /// + /// tx.closed().await; + /// println!("the receiver dropped"); + /// } + /// ``` + /// + /// Paired with select + /// + /// ``` + /// use tokio::sync::oneshot; + /// use tokio::time::{self, Duration}; + /// + /// use futures::{select, FutureExt}; + /// + /// async fn compute() -> String { + /// // Complex computation returning a `String` + /// # "hello".to_string() + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// let (mut tx, rx) = oneshot::channel(); + /// + /// tokio::spawn(async move { + /// select! { + /// _ = tx.closed().fuse() => { + /// // The receiver dropped, no need to do any further work + /// } + /// value = compute().fuse() => { + /// tx.send(value).unwrap() + /// } + /// } + /// }); + /// + /// // Wait for up to 10 seconds + /// let _ = time::timeout(Duration::from_secs(10), rx).await; + /// } + /// ``` + pub async fn closed(&mut self) { + use crate::future::poll_fn; + + poll_fn(|cx| self.poll_closed(cx)).await + } + + /// Returns `true` if the associated [`Receiver`] handle has been dropped. + /// + /// A [`Receiver`] is closed by either calling [`close`] explicitly or the + /// [`Receiver`] value is dropped. + /// + /// If `true` is returned, a call to `send` will always result in an error. + /// + /// [`Receiver`]: Receiver + /// [`close`]: Receiver::close + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, rx) = oneshot::channel(); + /// + /// assert!(!tx.is_closed()); + /// + /// drop(rx); + /// + /// assert!(tx.is_closed()); + /// assert!(tx.send("never received").is_err()); + /// } + /// ``` + pub fn is_closed(&self) -> bool { + let inner = self.inner.as_ref().unwrap(); + + let state = State::load(&inner.state, Acquire); + state.is_closed() + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + if let Some(inner) = self.inner.as_ref() { + inner.complete(); + } + } +} + +impl<T> Receiver<T> { + /// Prevents the associated [`Sender`] handle from sending a value. + /// + /// Any `send` operation which happens after calling `close` is guaranteed + /// to fail. After calling `close`, `Receiver::poll`] should be called to + /// receive a value if one was sent **before** the call to `close` + /// completed. + /// + /// This function is useful to perform a graceful shutdown and ensure that a + /// value will not be sent into the channel and never received. + /// + /// [`Sender`]: Sender + /// + /// # Examples + /// + /// Prevent a value from being sent + /// + /// ``` + /// use tokio::sync::oneshot; + /// use tokio::sync::oneshot::error::TryRecvError; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = oneshot::channel(); + /// + /// assert!(!tx.is_closed()); + /// + /// rx.close(); + /// + /// assert!(tx.is_closed()); + /// assert!(tx.send("never received").is_err()); + /// + /// match rx.try_recv() { + /// Err(TryRecvError::Closed) => {} + /// _ => unreachable!(), + /// } + /// } + /// ``` + /// + /// Receive a value sent **before** calling `close` + /// + /// ``` + /// use tokio::sync::oneshot; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = oneshot::channel(); + /// + /// assert!(tx.send("will receive").is_ok()); + /// + /// rx.close(); + /// + /// let msg = rx.try_recv().unwrap(); + /// assert_eq!(msg, "will receive"); + /// } + /// ``` + pub fn close(&mut self) { + let inner = self.inner.as_ref().unwrap(); + inner.close(); + } + + /// Attempts to receive a value. + /// + /// If a pending value exists in the channel, it is returned. If no value + /// has been sent, the current task **will not** be registered for + /// future notification. + /// + /// This function is useful to call from outside the context of an + /// asynchronous task. + /// + /// # Return + /// + /// - `Ok(T)` if a value is pending in the channel. + /// - `Err(TryRecvError::Empty)` if no value has been sent yet. + /// - `Err(TryRecvError::Closed)` if the sender has dropped without sending + /// a value. + /// + /// # Examples + /// + /// `try_recv` before a value is sent, then after. + /// + /// ``` + /// use tokio::sync::oneshot; + /// use tokio::sync::oneshot::error::TryRecvError; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = oneshot::channel(); + /// + /// match rx.try_recv() { + /// // The channel is currently empty + /// Err(TryRecvError::Empty) => {} + /// _ => unreachable!(), + /// } + /// + /// // Send a value + /// tx.send("hello").unwrap(); + /// + /// match rx.try_recv() { + /// Ok(value) => assert_eq!(value, "hello"), + /// _ => unreachable!(), + /// } + /// } + /// ``` + /// + /// `try_recv` when the sender dropped before sending a value + /// + /// ``` + /// use tokio::sync::oneshot; + /// use tokio::sync::oneshot::error::TryRecvError; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = oneshot::channel::<()>(); + /// + /// drop(tx); + /// + /// match rx.try_recv() { + /// // The channel will never receive a value. + /// Err(TryRecvError::Closed) => {} + /// _ => unreachable!(), + /// } + /// } + /// ``` + pub fn try_recv(&mut self) -> Result<T, TryRecvError> { + let result = if let Some(inner) = self.inner.as_ref() { + let state = State::load(&inner.state, Acquire); + + if state.is_complete() { + match unsafe { inner.consume_value() } { + Some(value) => Ok(value), + None => Err(TryRecvError::Closed), + } + } else if state.is_closed() { + Err(TryRecvError::Closed) + } else { + // Not ready, this does not clear `inner` + return Err(TryRecvError::Empty); + } + } else { + panic!("called after complete"); + }; + + self.inner = None; + result + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + if let Some(inner) = self.inner.as_ref() { + inner.close(); + } + } +} + +impl<T> Future for Receiver<T> { + type Output = Result<T, RecvError>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + // If `inner` is `None`, then `poll()` has already completed. + let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() { + ready!(inner.poll_recv(cx))? + } else { + panic!("called after complete"); + }; + + self.inner = None; + Ready(Ok(ret)) + } +} + +impl<T> Inner<T> { + fn complete(&self) -> bool { + let prev = State::set_complete(&self.state); + + if prev.is_closed() { + return false; + } + + if prev.is_rx_task_set() { + // TODO: Consume waker? + unsafe { + self.with_rx_task(Waker::wake_by_ref); + } + } + + true + } + + fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + // Load the state + let mut state = State::load(&self.state, Acquire); + + if state.is_complete() { + match unsafe { self.consume_value() } { + Some(value) => Ready(Ok(value)), + None => Ready(Err(RecvError(()))), + } + } else if state.is_closed() { + Ready(Err(RecvError(()))) + } else { + if state.is_rx_task_set() { + let will_notify = unsafe { self.with_rx_task(|w| w.will_wake(cx.waker())) }; + + // Check if the task is still the same + if !will_notify { + // Unset the task + state = State::unset_rx_task(&self.state); + if state.is_complete() { + // Set the flag again so that the waker is released in drop + State::set_rx_task(&self.state); + + return match unsafe { self.consume_value() } { + Some(value) => Ready(Ok(value)), + None => Ready(Err(RecvError(()))), + }; + } else { + unsafe { self.drop_rx_task() }; + } + } + } + + if !state.is_rx_task_set() { + // Attempt to set the task + unsafe { + self.set_rx_task(cx); + } + + // Update the state + state = State::set_rx_task(&self.state); + + if state.is_complete() { + match unsafe { self.consume_value() } { + Some(value) => Ready(Ok(value)), + None => Ready(Err(RecvError(()))), + } + } else { + Pending + } + } else { + Pending + } + } + } + + /// Called by `Receiver` to indicate that the value will never be received. + fn close(&self) { + let prev = State::set_closed(&self.state); + + if prev.is_tx_task_set() && !prev.is_complete() { + unsafe { + self.with_tx_task(Waker::wake_by_ref); + } + } + } + + /// Consumes the value. This function does not check `state`. + unsafe fn consume_value(&self) -> Option<T> { + self.value.with_mut(|ptr| (*ptr).take()) + } + + unsafe fn with_rx_task<F, R>(&self, f: F) -> R + where + F: FnOnce(&Waker) -> R, + { + self.rx_task.with(|ptr| { + let waker: *const Waker = (&*ptr).as_ptr(); + f(&*waker) + }) + } + + unsafe fn with_tx_task<F, R>(&self, f: F) -> R + where + F: FnOnce(&Waker) -> R, + { + self.tx_task.with(|ptr| { + let waker: *const Waker = (&*ptr).as_ptr(); + f(&*waker) + }) + } + + unsafe fn drop_rx_task(&self) { + self.rx_task.with_mut(|ptr| { + let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); + ptr.drop_in_place(); + }); + } + + unsafe fn drop_tx_task(&self) { + self.tx_task.with_mut(|ptr| { + let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); + ptr.drop_in_place(); + }); + } + + unsafe fn set_rx_task(&self, cx: &mut Context<'_>) { + self.rx_task.with_mut(|ptr| { + let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); + ptr.write(cx.waker().clone()); + }); + } + + unsafe fn set_tx_task(&self, cx: &mut Context<'_>) { + self.tx_task.with_mut(|ptr| { + let ptr: *mut Waker = (&mut *ptr).as_mut_ptr(); + ptr.write(cx.waker().clone()); + }); + } +} + +unsafe impl<T: Send> Send for Inner<T> {} +unsafe impl<T: Send> Sync for Inner<T> {} + +impl<T> Drop for Inner<T> { + fn drop(&mut self) { + let state = State(self.state.with_mut(|v| *v)); + + if state.is_rx_task_set() { + unsafe { + self.drop_rx_task(); + } + } + + if state.is_tx_task_set() { + unsafe { + self.drop_tx_task(); + } + } + } +} + +impl<T: fmt::Debug> fmt::Debug for Inner<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use std::sync::atomic::Ordering::Relaxed; + + fmt.debug_struct("Inner") + .field("state", &State::load(&self.state, Relaxed)) + .finish() + } +} + +const RX_TASK_SET: usize = 0b00001; +const VALUE_SENT: usize = 0b00010; +const CLOSED: usize = 0b00100; +const TX_TASK_SET: usize = 0b01000; + +impl State { + fn new() -> State { + State(0) + } + + fn is_complete(self) -> bool { + self.0 & VALUE_SENT == VALUE_SENT + } + + fn set_complete(cell: &AtomicUsize) -> State { + // TODO: This could be `Release`, followed by an `Acquire` fence *if* + // the `RX_TASK_SET` flag is set. However, `loom` does not support + // fences yet. + let val = cell.fetch_or(VALUE_SENT, AcqRel); + State(val) + } + + fn is_rx_task_set(self) -> bool { + self.0 & RX_TASK_SET == RX_TASK_SET + } + + fn set_rx_task(cell: &AtomicUsize) -> State { + let val = cell.fetch_or(RX_TASK_SET, AcqRel); + State(val | RX_TASK_SET) + } + + fn unset_rx_task(cell: &AtomicUsize) -> State { + let val = cell.fetch_and(!RX_TASK_SET, AcqRel); + State(val & !RX_TASK_SET) + } + + fn is_closed(self) -> bool { + self.0 & CLOSED == CLOSED + } + + fn set_closed(cell: &AtomicUsize) -> State { + // Acquire because we want all later writes (attempting to poll) to be + // ordered after this. + let val = cell.fetch_or(CLOSED, Acquire); + State(val) + } + + fn set_tx_task(cell: &AtomicUsize) -> State { + let val = cell.fetch_or(TX_TASK_SET, AcqRel); + State(val | TX_TASK_SET) + } + + fn unset_tx_task(cell: &AtomicUsize) -> State { + let val = cell.fetch_and(!TX_TASK_SET, AcqRel); + State(val & !TX_TASK_SET) + } + + fn is_tx_task_set(self) -> bool { + self.0 & TX_TASK_SET == TX_TASK_SET + } + + fn as_usize(self) -> usize { + self.0 + } + + fn load(cell: &AtomicUsize, order: Ordering) -> State { + let val = cell.load(order); + State(val) + } +} + +impl fmt::Debug for State { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("State") + .field("is_complete", &self.is_complete()) + .field("is_closed", &self.is_closed()) + .field("is_rx_task_set", &self.is_rx_task_set()) + .field("is_tx_task_set", &self.is_tx_task_set()) + .finish() + } +} diff --git a/third_party/rust/tokio/src/sync/rwlock.rs b/third_party/rust/tokio/src/sync/rwlock.rs new file mode 100644 index 0000000000..68cf710e84 --- /dev/null +++ b/third_party/rust/tokio/src/sync/rwlock.rs @@ -0,0 +1,294 @@ +use crate::coop::CoopFutureExt; +use crate::sync::batch_semaphore::{AcquireError, Semaphore}; +use std::cell::UnsafeCell; +use std::ops; + +#[cfg(not(loom))] +const MAX_READS: usize = 32; + +#[cfg(loom)] +const MAX_READS: usize = 10; + +/// An asynchronous reader-writer lock +/// +/// This type of lock allows a number of readers or at most one writer at any +/// point in time. The write portion of this lock typically allows modification +/// of the underlying data (exclusive access) and the read portion of this lock +/// typically allows for read-only access (shared access). +/// +/// In comparison, a [`Mutex`] does not distinguish between readers or writers +/// that acquire the lock, therefore causing any tasks waiting for the lock to +/// become available to yield. An `RwLock` will allow any number of readers to +/// acquire the lock as long as a writer is not holding the lock. +/// +/// The priority policy of Tokio's read-write lock is _fair_ (or +/// [_write-preferring_]), in order to ensure that readers cannot starve +/// writers. Fairness is ensured using a first-in, first-out queue for the tasks +/// awaiting the lock; if a task that wishes to acquire the write lock is at the +/// head of the queue, read locks will not be given out until the write lock has +/// been released. This is in contrast to the Rust standard library's +/// `std::sync::RwLock`, where the priority policy is dependent on the +/// operating system's implementation. +/// +/// The type parameter `T` represents the data that this lock protects. It is +/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards +/// returned from the locking methods implement [`Deref`](https://doc.rust-lang.org/std/ops/trait.Deref.html) +/// (and [`DerefMut`](https://doc.rust-lang.org/std/ops/trait.DerefMut.html) +/// for the `write` methods) to allow access to the content of the lock. +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::RwLock; +/// +/// #[tokio::main] +/// async fn main() { +/// let lock = RwLock::new(5); +/// +/// // many reader locks can be held at once +/// { +/// let r1 = lock.read().await; +/// let r2 = lock.read().await; +/// assert_eq!(*r1, 5); +/// assert_eq!(*r2, 5); +/// } // read locks are dropped at this point +/// +/// // only one write lock may be held, however +/// { +/// let mut w = lock.write().await; +/// *w += 1; +/// assert_eq!(*w, 6); +/// } // write lock is dropped here +/// } +/// ``` +/// +/// [`Mutex`]: struct@super::Mutex +/// [`RwLock`]: struct@RwLock +/// [`RwLockReadGuard`]: struct@RwLockReadGuard +/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard +/// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html +/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies +#[derive(Debug)] +pub struct RwLock<T> { + //semaphore to coordinate read and write access to T + s: Semaphore, + + //inner data T + c: UnsafeCell<T>, +} + +/// RAII structure used to release the shared read access of a lock when +/// dropped. +/// +/// This structure is created by the [`read`] method on +/// [`RwLock`]. +/// +/// [`read`]: method@RwLock::read +#[derive(Debug)] +pub struct RwLockReadGuard<'a, T> { + permit: ReleasingPermit<'a, T>, + lock: &'a RwLock<T>, +} + +/// RAII structure used to release the exclusive write access of a lock when +/// dropped. +/// +/// This structure is created by the [`write`] and method +/// on [`RwLock`]. +/// +/// [`write`]: method@RwLock::write +/// [`RwLock`]: struct@RwLock +#[derive(Debug)] +pub struct RwLockWriteGuard<'a, T> { + permit: ReleasingPermit<'a, T>, + lock: &'a RwLock<T>, +} + +// Wrapper arround Permit that releases on Drop +#[derive(Debug)] +struct ReleasingPermit<'a, T> { + num_permits: u16, + lock: &'a RwLock<T>, +} + +impl<'a, T> ReleasingPermit<'a, T> { + async fn acquire( + lock: &'a RwLock<T>, + num_permits: u16, + ) -> Result<ReleasingPermit<'a, T>, AcquireError> { + lock.s.acquire(num_permits).cooperate().await?; + Ok(Self { num_permits, lock }) + } +} + +impl<'a, T> Drop for ReleasingPermit<'a, T> { + fn drop(&mut self) { + self.lock.s.release(self.num_permits as usize); + } +} + +#[test] +#[cfg(not(loom))] +fn bounds() { + fn check_send<T: Send>() {} + fn check_sync<T: Sync>() {} + fn check_unpin<T: Unpin>() {} + // This has to take a value, since the async fn's return type is unnameable. + fn check_send_sync_val<T: Send + Sync>(_t: T) {} + + check_send::<RwLock<u32>>(); + check_sync::<RwLock<u32>>(); + check_unpin::<RwLock<u32>>(); + + check_sync::<RwLockReadGuard<'_, u32>>(); + check_unpin::<RwLockReadGuard<'_, u32>>(); + + check_sync::<RwLockWriteGuard<'_, u32>>(); + check_unpin::<RwLockWriteGuard<'_, u32>>(); + + let rwlock = RwLock::new(0); + check_send_sync_val(rwlock.read()); + check_send_sync_val(rwlock.write()); +} + +// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads. +// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through +// RwLock<T>. +unsafe impl<T> Send for RwLock<T> where T: Send {} +unsafe impl<T> Sync for RwLock<T> where T: Send + Sync {} +unsafe impl<'a, T> Sync for RwLockReadGuard<'a, T> where T: Send + Sync {} +unsafe impl<'a, T> Sync for RwLockWriteGuard<'a, T> where T: Send + Sync {} + +impl<T> RwLock<T> { + /// Creates a new instance of an `RwLock<T>` which is unlocked. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::RwLock; + /// + /// let lock = RwLock::new(5); + /// ``` + pub fn new(value: T) -> RwLock<T> { + RwLock { + c: UnsafeCell::new(value), + s: Semaphore::new(MAX_READS), + } + } + + /// Locks this rwlock with shared read access, causing the current task + /// to yield until the lock has been acquired. + /// + /// The calling task will yield until there are no more writers which + /// hold the lock. There may be other readers currently inside the lock when + /// this method returns. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use tokio::sync::RwLock; + /// + /// #[tokio::main] + /// async fn main() { + /// let lock = Arc::new(RwLock::new(1)); + /// let c_lock = lock.clone(); + /// + /// let n = lock.read().await; + /// assert_eq!(*n, 1); + /// + /// tokio::spawn(async move { + /// // While main has an active read lock, we acquire one too. + /// let r = c_lock.read().await; + /// assert_eq!(*r, 1); + /// }).await.expect("The spawned task has paniced"); + /// + /// // Drop the guard after the spawned task finishes. + /// drop(n); + ///} + /// ``` + pub async fn read(&self) -> RwLockReadGuard<'_, T> { + let permit = ReleasingPermit::acquire(self, 1).await.unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + RwLockReadGuard { lock: self, permit } + } + + /// Locks this rwlock with exclusive write access, causing the current task + /// to yield until the lock has been acquired. + /// + /// This function will not return while other writers or other readers + /// currently have access to the lock. + /// + /// Returns an RAII guard which will drop the write access of this rwlock + /// when dropped. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::RwLock; + /// + /// #[tokio::main] + /// async fn main() { + /// let lock = RwLock::new(1); + /// + /// let mut n = lock.write().await; + /// *n = 2; + ///} + /// ``` + pub async fn write(&self) -> RwLockWriteGuard<'_, T> { + let permit = ReleasingPermit::acquire(self, MAX_READS as u16) + .await + .unwrap_or_else(|_| { + // The semaphore was closed. but, we never explicitly close it, and we have a + // handle to it through the Arc, which means that this can never happen. + unreachable!() + }); + + RwLockWriteGuard { lock: self, permit } + } + + /// Consumes the lock, returning the underlying data. + pub fn into_inner(self) -> T { + self.c.into_inner() + } +} + +impl<T> ops::Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.c.get() } + } +} + +impl<T> ops::Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.c.get() } + } +} + +impl<T> ops::DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.c.get() } + } +} + +impl<T> From<T> for RwLock<T> { + fn from(s: T) -> Self { + Self::new(s) + } +} + +impl<T> Default for RwLock<T> +where + T: Default, +{ + fn default() -> Self { + Self::new(T::default()) + } +} diff --git a/third_party/rust/tokio/src/sync/semaphore.rs b/third_party/rust/tokio/src/sync/semaphore.rs new file mode 100644 index 0000000000..4cce7e8f5b --- /dev/null +++ b/third_party/rust/tokio/src/sync/semaphore.rs @@ -0,0 +1,105 @@ +use super::batch_semaphore as ll; // low level implementation +use crate::coop::CoopFutureExt; + +/// Counting semaphore performing asynchronous permit aquisition. +/// +/// A semaphore maintains a set of permits. Permits are used to synchronize +/// access to a shared resource. A semaphore differs from a mutex in that it +/// can allow more than one concurrent caller to access the shared resource at a +/// time. +/// +/// When `acquire` is called and the semaphore has remaining permits, the +/// function immediately returns a permit. However, if no remaining permits are +/// available, `acquire` (asynchronously) waits until an outstanding permit is +/// dropped. At this point, the freed permit is assigned to the caller. +#[derive(Debug)] +pub struct Semaphore { + /// The low level semaphore + ll_sem: ll::Semaphore, +} + +/// A permit from the semaphore +#[must_use] +#[derive(Debug)] +pub struct SemaphorePermit<'a> { + sem: &'a Semaphore, + permits: u16, +} + +/// Error returned from the [`Semaphore::try_acquire`] function. +/// +/// A `try_acquire` operation can only fail if the semaphore has no available +/// permits. +/// +/// [`Semaphore::try_acquire`]: Semaphore::try_acquire +#[derive(Debug)] +pub struct TryAcquireError(()); + +#[test] +#[cfg(not(loom))] +fn bounds() { + fn check_unpin<T: Unpin>() {} + // This has to take a value, since the async fn's return type is unnameable. + fn check_send_sync_val<T: Send + Sync>(_t: T) {} + fn check_send_sync<T: Send + Sync>() {} + check_unpin::<Semaphore>(); + check_unpin::<SemaphorePermit<'_>>(); + check_send_sync::<Semaphore>(); + + let semaphore = Semaphore::new(0); + check_send_sync_val(semaphore.acquire()); +} + +impl Semaphore { + /// Creates a new semaphore with the initial number of permits + pub fn new(permits: usize) -> Self { + Self { + ll_sem: ll::Semaphore::new(permits), + } + } + + /// Returns the current number of available permits + pub fn available_permits(&self) -> usize { + self.ll_sem.available_permits() + } + + /// Adds `n` new permits to the semaphore. + pub fn add_permits(&self, n: usize) { + self.ll_sem.release(n); + } + + /// Acquires permit from the semaphore + pub async fn acquire(&self) -> SemaphorePermit<'_> { + self.ll_sem.acquire(1).cooperate().await.unwrap(); + SemaphorePermit { + sem: &self, + permits: 1, + } + } + + /// Tries to acquire a permit form the semaphore + pub fn try_acquire(&self) -> Result<SemaphorePermit<'_>, TryAcquireError> { + match self.ll_sem.try_acquire(1) { + Ok(_) => Ok(SemaphorePermit { + sem: self, + permits: 1, + }), + Err(_) => Err(TryAcquireError(())), + } + } +} + +impl<'a> SemaphorePermit<'a> { + /// Forgets the permit **without** releasing it back to the semaphore. + /// This can be used to reduce the amount of permits available from a + /// semaphore. + pub fn forget(mut self) { + self.permits = 0; + } +} + +impl<'a> Drop for SemaphorePermit<'_> { + fn drop(&mut self) { + self.sem.add_permits(self.permits as usize); + } +} diff --git a/third_party/rust/tokio/src/sync/semaphore_ll.rs b/third_party/rust/tokio/src/sync/semaphore_ll.rs new file mode 100644 index 0000000000..0bdc4e2761 --- /dev/null +++ b/third_party/rust/tokio/src/sync/semaphore_ll.rs @@ -0,0 +1,1220 @@ +#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))] + +//! Thread-safe, asynchronous counting semaphore. +//! +//! A `Semaphore` instance holds a set of permits. Permits are used to +//! synchronize access to a shared resource. +//! +//! Before accessing the shared resource, callers acquire a permit from the +//! semaphore. Once the permit is acquired, the caller then enters the critical +//! section. If no permits are available, then acquiring the semaphore returns +//! `Pending`. The task is woken once a permit becomes available. + +use crate::loom::cell::UnsafeCell; +use crate::loom::future::AtomicWaker; +use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize}; +use crate::loom::thread; + +use std::cmp; +use std::fmt; +use std::ptr::{self, NonNull}; +use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed, Release}; +use std::task::Poll::{Pending, Ready}; +use std::task::{Context, Poll}; +use std::usize; + +/// Futures-aware semaphore. +pub(crate) struct Semaphore { + /// Tracks both the waiter queue tail pointer and the number of remaining + /// permits. + state: AtomicUsize, + + /// waiter queue head pointer. + head: UnsafeCell<NonNull<Waiter>>, + + /// Coordinates access to the queue head. + rx_lock: AtomicUsize, + + /// Stub waiter node used as part of the MPSC channel algorithm. + stub: Box<Waiter>, +} + +/// A semaphore permit +/// +/// Tracks the lifecycle of a semaphore permit. +/// +/// An instance of `Permit` is intended to be used with a **single** instance of +/// `Semaphore`. Using a single instance of `Permit` with multiple semaphore +/// instances will result in unexpected behavior. +/// +/// `Permit` does **not** release the permit back to the semaphore on drop. It +/// is the user's responsibility to ensure that `Permit::release` is called +/// before dropping the permit. +#[derive(Debug)] +pub(crate) struct Permit { + waiter: Option<Box<Waiter>>, + state: PermitState, +} + +/// Error returned by `Permit::poll_acquire`. +#[derive(Debug)] +pub(crate) struct AcquireError(()); + +/// Error returned by `Permit::try_acquire`. +#[derive(Debug)] +pub(crate) enum TryAcquireError { + Closed, + NoPermits, +} + +/// Node used to notify the semaphore waiter when permit is available. +#[derive(Debug)] +struct Waiter { + /// Stores waiter state. + /// + /// See `WaiterState` for more details. + state: AtomicUsize, + + /// Task to wake when a permit is made available. + waker: AtomicWaker, + + /// Next pointer in the queue of waiting senders. + next: AtomicPtr<Waiter>, +} + +/// Semaphore state +/// +/// The 2 low bits track the modes. +/// +/// - Closed +/// - Full +/// +/// When not full, the rest of the `usize` tracks the total number of messages +/// in the channel. When full, the rest of the `usize` is a pointer to the tail +/// of the "waiting senders" queue. +#[derive(Copy, Clone)] +struct SemState(usize); + +/// Permit state +#[derive(Debug, Copy, Clone)] +enum PermitState { + /// Currently waiting for permits to be made available and assigned to the + /// waiter. + Waiting(u16), + + /// The number of acquired permits + Acquired(u16), +} + +/// State for an individual waker node +#[derive(Debug, Copy, Clone)] +struct WaiterState(usize); + +/// Waiter node is in the semaphore queue +const QUEUED: usize = 0b001; + +/// Semaphore has been closed, no more permits will be issued. +const CLOSED: usize = 0b10; + +/// The permit that owns the `Waiter` dropped. +const DROPPED: usize = 0b100; + +/// Represents "one requested permit" in the waiter state +const PERMIT_ONE: usize = 0b1000; + +/// Masks the waiter state to only contain bits tracking number of requested +/// permits. +const PERMIT_MASK: usize = usize::MAX - (PERMIT_ONE - 1); + +/// How much to shift a permit count to pack it into the waker state +const PERMIT_SHIFT: u32 = PERMIT_ONE.trailing_zeros(); + +/// Flag differentiating between available permits and waiter pointers. +/// +/// If we assume pointers are properly aligned, then the least significant bit +/// will always be zero. So, we use that bit to track if the value represents a +/// number. +const NUM_FLAG: usize = 0b01; + +/// Signal the semaphore is closed +const CLOSED_FLAG: usize = 0b10; + +/// Maximum number of permits a semaphore can manage +const MAX_PERMITS: usize = usize::MAX >> NUM_SHIFT; + +/// When representing "numbers", the state has to be shifted this much (to get +/// rid of the flag bit). +const NUM_SHIFT: usize = 2; + +// ===== impl Semaphore ===== + +impl Semaphore { + /// Creates a new semaphore with the initial number of permits + /// + /// # Panics + /// + /// Panics if `permits` is zero. + pub(crate) fn new(permits: usize) -> Semaphore { + let stub = Box::new(Waiter::new()); + let ptr = NonNull::from(&*stub); + + // Allocations are aligned + debug_assert!(ptr.as_ptr() as usize & NUM_FLAG == 0); + + let state = SemState::new(permits, &stub); + + Semaphore { + state: AtomicUsize::new(state.to_usize()), + head: UnsafeCell::new(ptr), + rx_lock: AtomicUsize::new(0), + stub, + } + } + + /// Returns the current number of available permits + pub(crate) fn available_permits(&self) -> usize { + let curr = SemState(self.state.load(Acquire)); + curr.available_permits() + } + + /// Tries to acquire the requested number of permits, registering the waiter + /// if not enough permits are available. + fn poll_acquire( + &self, + cx: &mut Context<'_>, + num_permits: u16, + permit: &mut Permit, + ) -> Poll<Result<(), AcquireError>> { + self.poll_acquire2(num_permits, || { + let waiter = permit.waiter.get_or_insert_with(|| Box::new(Waiter::new())); + + waiter.waker.register_by_ref(cx.waker()); + + Some(NonNull::from(&**waiter)) + }) + } + + fn try_acquire(&self, num_permits: u16) -> Result<(), TryAcquireError> { + match self.poll_acquire2(num_permits, || None) { + Poll::Ready(res) => res.map_err(to_try_acquire), + Poll::Pending => Err(TryAcquireError::NoPermits), + } + } + + /// Polls for a permit + /// + /// Tries to acquire available permits first. If unable to acquire a + /// sufficient number of permits, the caller's waiter is pushed onto the + /// semaphore's wait queue. + fn poll_acquire2<F>( + &self, + num_permits: u16, + mut get_waiter: F, + ) -> Poll<Result<(), AcquireError>> + where + F: FnMut() -> Option<NonNull<Waiter>>, + { + let num_permits = num_permits as usize; + + // Load the current state + let mut curr = SemState(self.state.load(Acquire)); + + // Saves a ref to the waiter node + let mut maybe_waiter: Option<NonNull<Waiter>> = None; + + /// Used in branches where we attempt to push the waiter into the wait + /// queue but fail due to permits becoming available or the wait queue + /// transitioning to "closed". In this case, the waiter must be + /// transitioned back to the "idle" state. + macro_rules! revert_to_idle { + () => { + if let Some(waiter) = maybe_waiter { + unsafe { waiter.as_ref() }.revert_to_idle(); + } + }; + } + + loop { + let mut next = curr; + + if curr.is_closed() { + revert_to_idle!(); + return Ready(Err(AcquireError::closed())); + } + + let acquired = next.acquire_permits(num_permits, &self.stub); + + if !acquired { + // There are not enough available permits to satisfy the + // request. The permit transitions to a waiting state. + debug_assert!(curr.waiter().is_some() || curr.available_permits() < num_permits); + + if let Some(waiter) = maybe_waiter.as_ref() { + // Safety: the caller owns the waiter. + let w = unsafe { waiter.as_ref() }; + w.set_permits_to_acquire(num_permits - curr.available_permits()); + } else { + // Get the waiter for the permit. + if let Some(waiter) = get_waiter() { + // Safety: the caller owns the waiter. + let w = unsafe { waiter.as_ref() }; + + // If there are any currently available permits, the + // waiter acquires those immediately and waits for the + // remaining permits to become available. + if !w.to_queued(num_permits - curr.available_permits()) { + // The node is alrady queued, there is no further work + // to do. + return Pending; + } + + maybe_waiter = Some(waiter); + } else { + // No waiter, this indicates the caller does not wish to + // "wait", so there is nothing left to do. + return Pending; + } + } + + next.set_waiter(maybe_waiter.unwrap()); + } + + debug_assert_ne!(curr.0, 0); + debug_assert_ne!(next.0, 0); + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => { + if acquired { + // Successfully acquire permits **without** queuing the + // waiter node. The waiter node is not currently in the + // queue. + revert_to_idle!(); + return Ready(Ok(())); + } else { + // The node is pushed into the queue, the final step is + // to set the node's "next" pointer to return the wait + // queue into a consistent state. + + let prev_waiter = + curr.waiter().unwrap_or_else(|| NonNull::from(&*self.stub)); + + let waiter = maybe_waiter.unwrap(); + + // Link the nodes. + // + // Safety: the mpsc algorithm guarantees the old tail of + // the queue is not removed from the queue during the + // push process. + unsafe { + prev_waiter.as_ref().store_next(waiter); + } + + return Pending; + } + } + Err(actual) => { + curr = SemState(actual); + } + } + } + } + + /// Closes the semaphore. This prevents the semaphore from issuing new + /// permits and notifies all pending waiters. + pub(crate) fn close(&self) { + // Acquire the `rx_lock`, setting the "closed" flag on the lock. + let prev = self.rx_lock.fetch_or(1, AcqRel); + + if prev != 0 { + // Another thread has the lock and will be responsible for notifying + // pending waiters. + return; + } + + self.add_permits_locked(0, true); + } + + /// Adds `n` new permits to the semaphore. + pub(crate) fn add_permits(&self, n: usize) { + if n == 0 { + return; + } + + // TODO: Handle overflow. A panic is not sufficient, the process must + // abort. + let prev = self.rx_lock.fetch_add(n << 1, AcqRel); + + if prev != 0 { + // Another thread has the lock and will be responsible for notifying + // pending waiters. + return; + } + + self.add_permits_locked(n, false); + } + + fn add_permits_locked(&self, mut rem: usize, mut closed: bool) { + while rem > 0 || closed { + if closed { + SemState::fetch_set_closed(&self.state, AcqRel); + } + + // Release the permits and notify + self.add_permits_locked2(rem, closed); + + let n = rem << 1; + + let actual = if closed { + let actual = self.rx_lock.fetch_sub(n | 1, AcqRel); + closed = false; + actual + } else { + let actual = self.rx_lock.fetch_sub(n, AcqRel); + closed = actual & 1 == 1; + actual + }; + + rem = (actual >> 1) - rem; + } + } + + /// Releases a specific amount of permits to the semaphore + /// + /// This function is called by `add_permits` after the add lock has been + /// acquired. + fn add_permits_locked2(&self, mut n: usize, closed: bool) { + // If closing the semaphore, we want to drain the entire queue. The + // number of permits being assigned doesn't matter. + if closed { + n = usize::MAX; + } + + 'outer: while n > 0 { + unsafe { + let mut head = self.head.with(|head| *head); + let mut next_ptr = head.as_ref().next.load(Acquire); + + let stub = self.stub(); + + if head == stub { + // The stub node indicates an empty queue. Any remaining + // permits get assigned back to the semaphore. + let next = match NonNull::new(next_ptr) { + Some(next) => next, + None => { + // This loop is not part of the standard intrusive mpsc + // channel algorithm. This is where we atomically pop + // the last task and add `n` to the remaining capacity. + // + // This modification to the pop algorithm works because, + // at this point, we have not done any work (only done + // reading). We have a *pretty* good idea that there is + // no concurrent pusher. + // + // The capacity is then atomically added by doing an + // AcqRel CAS on `state`. The `state` cell is the + // linchpin of the algorithm. + // + // By successfully CASing `head` w/ AcqRel, we ensure + // that, if any thread was racing and entered a push, we + // see that and abort pop, retrying as it is + // "inconsistent". + let mut curr = SemState::load(&self.state, Acquire); + + loop { + if curr.has_waiter(&self.stub) { + // A waiter is being added concurrently. + // This is the MPSC queue's "inconsistent" + // state and we must loop and try again. + thread::yield_now(); + continue 'outer; + } + + // If closing, nothing more to do. + if closed { + debug_assert!(curr.is_closed(), "state = {:?}", curr); + return; + } + + let mut next = curr; + next.release_permits(n, &self.stub); + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => return, + Err(actual) => { + curr = SemState(actual); + } + } + } + } + }; + + self.head.with_mut(|head| *head = next); + head = next; + next_ptr = next.as_ref().next.load(Acquire); + } + + // `head` points to a waiter assign permits to the waiter. If + // all requested permits are satisfied, then we can continue, + // otherwise the node stays in the wait queue. + if !head.as_ref().assign_permits(&mut n, closed) { + assert_eq!(n, 0); + return; + } + + if let Some(next) = NonNull::new(next_ptr) { + self.head.with_mut(|head| *head = next); + + self.remove_queued(head, closed); + continue 'outer; + } + + let state = SemState::load(&self.state, Acquire); + + // This must always be a pointer as the wait list is not empty. + let tail = state.waiter().unwrap(); + + if tail != head { + // Inconsistent + thread::yield_now(); + continue 'outer; + } + + self.push_stub(closed); + + next_ptr = head.as_ref().next.load(Acquire); + + if let Some(next) = NonNull::new(next_ptr) { + self.head.with_mut(|head| *head = next); + + self.remove_queued(head, closed); + continue 'outer; + } + + // Inconsistent state, loop + thread::yield_now(); + } + } + } + + /// The wait node has had all of its permits assigned and has been removed + /// from the wait queue. + /// + /// Attempt to remove the QUEUED bit from the node. If additional permits + /// are concurrently requested, the node must be pushed back into the wait + /// queued. + fn remove_queued(&self, waiter: NonNull<Waiter>, closed: bool) { + let mut curr = WaiterState(unsafe { waiter.as_ref() }.state.load(Acquire)); + + loop { + if curr.is_dropped() { + // The Permit dropped, it is on us to release the memory + let _ = unsafe { Box::from_raw(waiter.as_ptr()) }; + return; + } + + // The node is removed from the queue. We attempt to unset the + // queued bit, but concurrently the waiter has requested more + // permits. When the waiter requested more permits, it saw the + // queued bit set so took no further action. This requires us to + // push the node back into the queue. + if curr.permits_to_acquire() > 0 { + // More permits are requested. The waiter must be re-queued + unsafe { + self.push_waiter(waiter, closed); + } + return; + } + + let mut next = curr; + next.unset_queued(); + + let w = unsafe { waiter.as_ref() }; + + match w.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => return, + Err(actual) => { + curr = WaiterState(actual); + } + } + } + } + + unsafe fn push_stub(&self, closed: bool) { + self.push_waiter(self.stub(), closed); + } + + unsafe fn push_waiter(&self, waiter: NonNull<Waiter>, closed: bool) { + // Set the next pointer. This does not require an atomic operation as + // this node is not accessible. The write will be flushed with the next + // operation + waiter.as_ref().next.store(ptr::null_mut(), Relaxed); + + // Update the tail to point to the new node. We need to see the previous + // node in order to update the next pointer as well as release `task` + // to any other threads calling `push`. + let next = SemState::new_ptr(waiter, closed); + let prev = SemState(self.state.swap(next.0, AcqRel)); + + debug_assert_eq!(closed, prev.is_closed()); + + // This function is only called when there are pending tasks. Because of + // this, the state must *always* be in pointer mode. + let prev = prev.waiter().unwrap(); + + // No cycles plz + debug_assert_ne!(prev, waiter); + + // Release `task` to the consume end. + prev.as_ref().next.store(waiter.as_ptr(), Release); + } + + fn stub(&self) -> NonNull<Waiter> { + unsafe { NonNull::new_unchecked(&*self.stub as *const _ as *mut _) } + } +} + +impl Drop for Semaphore { + fn drop(&mut self) { + self.close(); + } +} + +impl fmt::Debug for Semaphore { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Semaphore") + .field("state", &SemState::load(&self.state, Relaxed)) + .field("head", &self.head.with(|ptr| ptr)) + .field("rx_lock", &self.rx_lock.load(Relaxed)) + .field("stub", &self.stub) + .finish() + } +} + +unsafe impl Send for Semaphore {} +unsafe impl Sync for Semaphore {} + +// ===== impl Permit ===== + +impl Permit { + /// Creates a new `Permit`. + /// + /// The permit begins in the "unacquired" state. + pub(crate) fn new() -> Permit { + use PermitState::Acquired; + + Permit { + waiter: None, + state: Acquired(0), + } + } + + /// Returns `true` if the permit has been acquired + #[allow(dead_code)] // may be used later + pub(crate) fn is_acquired(&self) -> bool { + match self.state { + PermitState::Acquired(num) if num > 0 => true, + _ => false, + } + } + + /// Tries to acquire the permit. If no permits are available, the current task + /// is notified once a new permit becomes available. + pub(crate) fn poll_acquire( + &mut self, + cx: &mut Context<'_>, + num_permits: u16, + semaphore: &Semaphore, + ) -> Poll<Result<(), AcquireError>> { + use std::cmp::Ordering::*; + use PermitState::*; + + match self.state { + Waiting(requested) => { + // There must be a waiter + let waiter = self.waiter.as_ref().unwrap(); + + match requested.cmp(&num_permits) { + Less => { + let delta = num_permits - requested; + + // Request additional permits. If the waiter has been + // dequeued, it must be re-queued. + if !waiter.try_inc_permits_to_acquire(delta as usize) { + let waiter = NonNull::from(&**waiter); + + // Ignore the result. The check for + // `permits_to_acquire()` will converge the state as + // needed + let _ = semaphore.poll_acquire2(delta, || Some(waiter))?; + } + + self.state = Waiting(num_permits); + } + Greater => { + let delta = requested - num_permits; + let to_release = waiter.try_dec_permits_to_acquire(delta as usize); + + semaphore.add_permits(to_release); + self.state = Waiting(num_permits); + } + Equal => {} + } + + if waiter.permits_to_acquire()? == 0 { + self.state = Acquired(requested); + return Ready(Ok(())); + } + + waiter.waker.register_by_ref(cx.waker()); + + if waiter.permits_to_acquire()? == 0 { + self.state = Acquired(requested); + return Ready(Ok(())); + } + + Pending + } + Acquired(acquired) => { + if acquired >= num_permits { + Ready(Ok(())) + } else { + match semaphore.poll_acquire(cx, num_permits - acquired, self)? { + Ready(()) => { + self.state = Acquired(num_permits); + Ready(Ok(())) + } + Pending => { + self.state = Waiting(num_permits); + Pending + } + } + } + } + } + } + + /// Tries to acquire the permit. + pub(crate) fn try_acquire( + &mut self, + num_permits: u16, + semaphore: &Semaphore, + ) -> Result<(), TryAcquireError> { + use PermitState::*; + + match self.state { + Waiting(requested) => { + // There must be a waiter + let waiter = self.waiter.as_ref().unwrap(); + + if requested > num_permits { + let delta = requested - num_permits; + let to_release = waiter.try_dec_permits_to_acquire(delta as usize); + + semaphore.add_permits(to_release); + self.state = Waiting(num_permits); + } + + let res = waiter.permits_to_acquire().map_err(to_try_acquire)?; + + if res == 0 { + if requested < num_permits { + // Try to acquire the additional permits + semaphore.try_acquire(num_permits - requested)?; + } + + self.state = Acquired(num_permits); + Ok(()) + } else { + Err(TryAcquireError::NoPermits) + } + } + Acquired(acquired) => { + if acquired < num_permits { + semaphore.try_acquire(num_permits - acquired)?; + self.state = Acquired(num_permits); + } + + Ok(()) + } + } + } + + /// Releases a permit back to the semaphore + pub(crate) fn release(&mut self, n: u16, semaphore: &Semaphore) { + let n = self.forget(n); + semaphore.add_permits(n as usize); + } + + /// Forgets the permit **without** releasing it back to the semaphore. + /// + /// After calling `forget`, `poll_acquire` is able to acquire new permit + /// from the sempahore. + /// + /// Repeatedly calling `forget` without associated calls to `add_permit` + /// will result in the semaphore losing all permits. + /// + /// Will forget **at most** the number of acquired permits. This number is + /// returned. + pub(crate) fn forget(&mut self, n: u16) -> u16 { + use PermitState::*; + + match self.state { + Waiting(requested) => { + let n = cmp::min(n, requested); + + // Decrement + let acquired = self + .waiter + .as_ref() + .unwrap() + .try_dec_permits_to_acquire(n as usize) as u16; + + if n == requested { + self.state = Acquired(0); + } else if acquired == requested - n { + self.state = Waiting(acquired); + } else { + self.state = Waiting(requested - n); + } + + acquired + } + Acquired(acquired) => { + let n = cmp::min(n, acquired); + self.state = Acquired(acquired - n); + n + } + } + } +} + +impl Default for Permit { + fn default() -> Self { + Self::new() + } +} + +impl Drop for Permit { + fn drop(&mut self) { + if let Some(waiter) = self.waiter.take() { + // Set the dropped flag + let state = WaiterState(waiter.state.fetch_or(DROPPED, AcqRel)); + + if state.is_queued() { + // The waiter is stored in the queue. The semaphore will drop it + std::mem::forget(waiter); + } + } + } +} + +// ===== impl AcquireError ==== + +impl AcquireError { + fn closed() -> AcquireError { + AcquireError(()) + } +} + +fn to_try_acquire(_: AcquireError) -> TryAcquireError { + TryAcquireError::Closed +} + +impl fmt::Display for AcquireError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "semaphore closed") + } +} + +impl std::error::Error for AcquireError {} + +// ===== impl TryAcquireError ===== + +impl TryAcquireError { + /// Returns `true` if the error was caused by a closed semaphore. + pub(crate) fn is_closed(&self) -> bool { + match self { + TryAcquireError::Closed => true, + _ => false, + } + } + + /// Returns `true` if the error was caused by calling `try_acquire` on a + /// semaphore with no available permits. + pub(crate) fn is_no_permits(&self) -> bool { + match self { + TryAcquireError::NoPermits => true, + _ => false, + } + } +} + +impl fmt::Display for TryAcquireError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TryAcquireError::Closed => write!(fmt, "{}", "semaphore closed"), + TryAcquireError::NoPermits => write!(fmt, "{}", "no permits available"), + } + } +} + +impl std::error::Error for TryAcquireError {} + +// ===== impl Waiter ===== + +impl Waiter { + fn new() -> Waiter { + Waiter { + state: AtomicUsize::new(0), + waker: AtomicWaker::new(), + next: AtomicPtr::new(ptr::null_mut()), + } + } + + fn permits_to_acquire(&self) -> Result<usize, AcquireError> { + let state = WaiterState(self.state.load(Acquire)); + + if state.is_closed() { + Err(AcquireError(())) + } else { + Ok(state.permits_to_acquire()) + } + } + + /// Only increments the number of permits *if* the waiter is currently + /// queued. + /// + /// # Returns + /// + /// `true` if the number of permits to acquire has been incremented. `false` + /// otherwise. On `false`, the caller should use `Semaphore::poll_acquire`. + fn try_inc_permits_to_acquire(&self, n: usize) -> bool { + let mut curr = WaiterState(self.state.load(Acquire)); + + loop { + if !curr.is_queued() { + assert_eq!(0, curr.permits_to_acquire()); + return false; + } + + let mut next = curr; + next.set_permits_to_acquire(n + curr.permits_to_acquire()); + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => return true, + Err(actual) => curr = WaiterState(actual), + } + } + } + + /// Try to decrement the number of permits to acquire. This returns the + /// actual number of permits that were decremented. The delta betweeen `n` + /// and the return has been assigned to the permit and the caller must + /// assign these back to the semaphore. + fn try_dec_permits_to_acquire(&self, n: usize) -> usize { + let mut curr = WaiterState(self.state.load(Acquire)); + + loop { + if !curr.is_queued() { + assert_eq!(0, curr.permits_to_acquire()); + } + + let delta = cmp::min(n, curr.permits_to_acquire()); + let rem = curr.permits_to_acquire() - delta; + + let mut next = curr; + next.set_permits_to_acquire(rem); + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => return n - delta, + Err(actual) => curr = WaiterState(actual), + } + } + } + + /// Store the number of remaining permits needed to satisfy the waiter and + /// transition to the "QUEUED" state. + /// + /// # Returns + /// + /// `true` if the `QUEUED` bit was set as part of the transition. + fn to_queued(&self, num_permits: usize) -> bool { + let mut curr = WaiterState(self.state.load(Acquire)); + + // The waiter should **not** be waiting for any permits. + debug_assert_eq!(curr.permits_to_acquire(), 0); + + loop { + let mut next = curr; + next.set_permits_to_acquire(num_permits); + next.set_queued(); + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => { + if curr.is_queued() { + return false; + } else { + // Make sure the next pointer is null + self.next.store(ptr::null_mut(), Relaxed); + return true; + } + } + Err(actual) => curr = WaiterState(actual), + } + } + } + + /// Set the number of permits to acquire. + /// + /// This function is only called when the waiter is being inserted into the + /// wait queue. Because of this, there are no concurrent threads that can + /// modify the state and using `store` is safe. + fn set_permits_to_acquire(&self, num_permits: usize) { + debug_assert!(WaiterState(self.state.load(Acquire)).is_queued()); + + let mut state = WaiterState(QUEUED); + state.set_permits_to_acquire(num_permits); + + self.state.store(state.0, Release); + } + + /// Assign permits to the waiter. + /// + /// Returns `true` if the waiter should be removed from the queue + fn assign_permits(&self, n: &mut usize, closed: bool) -> bool { + let mut curr = WaiterState(self.state.load(Acquire)); + + loop { + let mut next = curr; + + // Number of permits to assign to this waiter + let assign = cmp::min(curr.permits_to_acquire(), *n); + + // Assign the permits + next.set_permits_to_acquire(curr.permits_to_acquire() - assign); + + if closed { + next.set_closed(); + } + + match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) { + Ok(_) => { + // Update `n` + *n -= assign; + + if next.permits_to_acquire() == 0 { + if curr.permits_to_acquire() > 0 { + self.waker.wake(); + } + + return true; + } else { + return false; + } + } + Err(actual) => curr = WaiterState(actual), + } + } + } + + fn revert_to_idle(&self) { + // An idle node is not waiting on any permits + self.state.store(0, Relaxed); + } + + fn store_next(&self, next: NonNull<Waiter>) { + self.next.store(next.as_ptr(), Release); + } +} + +// ===== impl SemState ===== + +impl SemState { + /// Returns a new default `State` value. + fn new(permits: usize, stub: &Waiter) -> SemState { + assert!(permits <= MAX_PERMITS); + + if permits > 0 { + SemState((permits << NUM_SHIFT) | NUM_FLAG) + } else { + SemState(stub as *const _ as usize) + } + } + + /// Returns a `State` tracking `ptr` as the tail of the queue. + fn new_ptr(tail: NonNull<Waiter>, closed: bool) -> SemState { + let mut val = tail.as_ptr() as usize; + + if closed { + val |= CLOSED_FLAG; + } + + SemState(val) + } + + /// Returns the amount of remaining capacity + fn available_permits(self) -> usize { + if !self.has_available_permits() { + return 0; + } + + self.0 >> NUM_SHIFT + } + + /// Returns `true` if the state has permits that can be claimed by a waiter. + fn has_available_permits(self) -> bool { + self.0 & NUM_FLAG == NUM_FLAG + } + + fn has_waiter(self, stub: &Waiter) -> bool { + !self.has_available_permits() && !self.is_stub(stub) + } + + /// Tries to atomically acquire specified number of permits. + /// + /// # Return + /// + /// Returns `true` if the specified number of permits were acquired, `false` + /// otherwise. Returning false does not mean that there are no more + /// available permits. + fn acquire_permits(&mut self, num: usize, stub: &Waiter) -> bool { + debug_assert!(num > 0); + + if self.available_permits() < num { + return false; + } + + debug_assert!(self.waiter().is_none()); + + self.0 -= num << NUM_SHIFT; + + if self.0 == NUM_FLAG { + // Set the state to the stub pointer. + self.0 = stub as *const _ as usize; + } + + true + } + + /// Releases permits + /// + /// Returns `true` if the permits were accepted. + fn release_permits(&mut self, permits: usize, stub: &Waiter) { + debug_assert!(permits > 0); + + if self.is_stub(stub) { + self.0 = (permits << NUM_SHIFT) | NUM_FLAG | (self.0 & CLOSED_FLAG); + return; + } + + debug_assert!(self.has_available_permits()); + + self.0 += permits << NUM_SHIFT; + } + + fn is_waiter(self) -> bool { + self.0 & NUM_FLAG == 0 + } + + /// Returns the waiter, if one is set. + fn waiter(self) -> Option<NonNull<Waiter>> { + if self.is_waiter() { + let waiter = NonNull::new(self.as_ptr()).expect("null pointer stored"); + + Some(waiter) + } else { + None + } + } + + /// Assumes `self` represents a pointer + fn as_ptr(self) -> *mut Waiter { + (self.0 & !CLOSED_FLAG) as *mut Waiter + } + + /// Sets to a pointer to a waiter. + /// + /// This can only be done from the full state. + fn set_waiter(&mut self, waiter: NonNull<Waiter>) { + let waiter = waiter.as_ptr() as usize; + debug_assert!(!self.is_closed()); + + self.0 = waiter; + } + + fn is_stub(self, stub: &Waiter) -> bool { + self.as_ptr() as usize == stub as *const _ as usize + } + + /// Loads the state from an AtomicUsize. + fn load(cell: &AtomicUsize, ordering: Ordering) -> SemState { + let value = cell.load(ordering); + SemState(value) + } + + fn fetch_set_closed(cell: &AtomicUsize, ordering: Ordering) -> SemState { + let value = cell.fetch_or(CLOSED_FLAG, ordering); + SemState(value) + } + + fn is_closed(self) -> bool { + self.0 & CLOSED_FLAG == CLOSED_FLAG + } + + /// Converts the state into a `usize` representation. + fn to_usize(self) -> usize { + self.0 + } +} + +impl fmt::Debug for SemState { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut fmt = fmt.debug_struct("SemState"); + + if self.is_waiter() { + fmt.field("state", &"<waiter>"); + } else { + fmt.field("permits", &self.available_permits()); + } + + fmt.finish() + } +} + +// ===== impl WaiterState ===== + +impl WaiterState { + fn permits_to_acquire(self) -> usize { + self.0 >> PERMIT_SHIFT + } + + fn set_permits_to_acquire(&mut self, val: usize) { + self.0 = (val << PERMIT_SHIFT) | (self.0 & !PERMIT_MASK) + } + + fn is_queued(self) -> bool { + self.0 & QUEUED == QUEUED + } + + fn set_queued(&mut self) { + self.0 |= QUEUED; + } + + fn is_closed(self) -> bool { + self.0 & CLOSED == CLOSED + } + + fn set_closed(&mut self) { + self.0 |= CLOSED; + } + + fn unset_queued(&mut self) { + assert!(self.is_queued()); + self.0 -= QUEUED; + } + + fn is_dropped(self) -> bool { + self.0 & DROPPED == DROPPED + } +} diff --git a/third_party/rust/tokio/src/sync/task/atomic_waker.rs b/third_party/rust/tokio/src/sync/task/atomic_waker.rs new file mode 100644 index 0000000000..73b1745f1a --- /dev/null +++ b/third_party/rust/tokio/src/sync/task/atomic_waker.rs @@ -0,0 +1,318 @@ +#![cfg_attr(any(loom, not(feature = "sync")), allow(dead_code, unreachable_pub))] + +use crate::loom::cell::UnsafeCell; +use crate::loom::sync::atomic::{self, AtomicUsize}; + +use std::fmt; +use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; +use std::task::Waker; + +/// A synchronization primitive for task waking. +/// +/// `AtomicWaker` will coordinate concurrent wakes with the consumer +/// potentially "waking" the underlying task. This is useful in scenarios +/// where a computation completes in another thread and wants to wake the +/// consumer, but the consumer is in the process of being migrated to a new +/// logical task. +/// +/// Consumers should call `register` before checking the result of a computation +/// and producers should call `wake` after producing the computation (this +/// differs from the usual `thread::park` pattern). It is also permitted for +/// `wake` to be called **before** `register`. This results in a no-op. +/// +/// A single `AtomicWaker` may be reused for any number of calls to `register` or +/// `wake`. +pub(crate) struct AtomicWaker { + state: AtomicUsize, + waker: UnsafeCell<Option<Waker>>, +} + +// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell +// stores a `Waker` value produced by calls to `register` and many threads can +// race to take the waker by calling `wake. +// +// If a new `Waker` instance is produced by calling `register` before an existing +// one is consumed, then the existing one is overwritten. +// +// While `AtomicWaker` is single-producer, the implementation ensures memory +// safety. In the event of concurrent calls to `register`, there will be a +// single winner whose waker will get stored in the cell. The losers will not +// have their tasks woken. As such, callers should ensure to add synchronization +// to calls to `register`. +// +// The implementation uses a single `AtomicUsize` value to coordinate access to +// the `Waker` cell. There are two bits that are operated on independently. These +// are represented by `REGISTERING` and `WAKING`. +// +// The `REGISTERING` bit is set when a producer enters the critical section. The +// `WAKING` bit is set when a consumer enters the critical section. Neither +// bit being set is represented by `WAITING`. +// +// A thread obtains an exclusive lock on the waker cell by transitioning the +// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the +// operation the thread wishes to perform. When this transition is made, it is +// guaranteed that no other thread will access the waker cell. +// +// # Registering +// +// On a call to `register`, an attempt to transition the state from WAITING to +// REGISTERING is made. On success, the caller obtains a lock on the waker cell. +// +// If the lock is obtained, then the thread sets the waker cell to the waker +// provided as an argument. Then it attempts to transition the state back from +// `REGISTERING` -> `WAITING`. +// +// If this transition is successful, then the registering process is complete +// and the next call to `wake` will observe the waker. +// +// If the transition fails, then there was a concurrent call to `wake` that +// was unable to access the waker cell (due to the registering thread holding the +// lock). To handle this, the registering thread removes the waker it just set +// from the cell and calls `wake` on it. This call to wake represents the +// attempt to wake by the other thread (that set the `WAKING` bit). The +// state is then transitioned from `REGISTERING | WAKING` back to `WAITING`. +// This transition must succeed because, at this point, the state cannot be +// transitioned by another thread. +// +// # Waking +// +// On a call to `wake`, an attempt to transition the state from `WAITING` to +// `WAKING` is made. On success, the caller obtains a lock on the waker cell. +// +// If the lock is obtained, then the thread takes ownership of the current value +// in the waker cell, and calls `wake` on it. The state is then transitioned +// back to `WAITING`. This transition must succeed as, at this point, the state +// cannot be transitioned by another thread. +// +// If the thread is unable to obtain the lock, the `WAKING` bit is still. +// This is because it has either been set by the current thread but the previous +// value included the `REGISTERING` bit **or** a concurrent thread is in the +// `WAKING` critical section. Either way, no action must be taken. +// +// If the current thread is the only concurrent call to `wake` and another +// thread is in the `register` critical section, when the other thread **exits** +// the `register` critical section, it will observe the `WAKING` bit and +// handle the waker itself. +// +// If another thread is in the `waker` critical section, then it will handle +// waking the caller task. +// +// # A potential race (is safely handled). +// +// Imagine the following situation: +// +// * Thread A obtains the `wake` lock and wakes a task. +// +// * Before thread A releases the `wake` lock, the woken task is scheduled. +// +// * Thread B attempts to wake the task. In theory this should result in the +// task being woken, but it cannot because thread A still holds the wake +// lock. +// +// This case is handled by requiring users of `AtomicWaker` to call `register` +// **before** attempting to observe the application state change that resulted +// in the task being woken. The wakers also change the application state +// before calling wake. +// +// Because of this, the task will do one of two things. +// +// 1) Observe the application state change that Thread B is waking on. In +// this case, it is OK for Thread B's wake to be lost. +// +// 2) Call register before attempting to observe the application state. Since +// Thread A still holds the `wake` lock, the call to `register` will result +// in the task waking itself and get scheduled again. + +/// Idle state +const WAITING: usize = 0; + +/// A new waker value is being registered with the `AtomicWaker` cell. +const REGISTERING: usize = 0b01; + +/// The task currently registered with the `AtomicWaker` cell is being woken. +const WAKING: usize = 0b10; + +impl AtomicWaker { + /// Create an `AtomicWaker` + pub(crate) fn new() -> AtomicWaker { + AtomicWaker { + state: AtomicUsize::new(WAITING), + waker: UnsafeCell::new(None), + } + } + + /// Registers the current waker to be notified on calls to `wake`. + /// + /// This is the same as calling `register_task` with `task::current()`. + #[cfg(feature = "io-driver")] + pub(crate) fn register(&self, waker: Waker) { + self.do_register(waker); + } + + /// Registers the provided waker to be notified on calls to `wake`. + /// + /// The new waker will take place of any previous wakers that were registered + /// by previous calls to `register`. Any calls to `wake` that happen after + /// a call to `register` (as defined by the memory ordering rules), will + /// wake the `register` caller's task. + /// + /// It is safe to call `register` with multiple other threads concurrently + /// calling `wake`. This will result in the `register` caller's current + /// task being woken once. + /// + /// This function is safe to call concurrently, but this is generally a bad + /// idea. Concurrent calls to `register` will attempt to register different + /// tasks to be woken. One of the callers will win and have its task set, + /// but there is no guarantee as to which caller will succeed. + pub(crate) fn register_by_ref(&self, waker: &Waker) { + self.do_register(waker); + } + + fn do_register<W>(&self, waker: W) + where + W: WakerRef, + { + match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) { + WAITING => { + unsafe { + // Locked acquired, update the waker cell + self.waker.with_mut(|t| *t = Some(waker.into_waker())); + + // Release the lock. If the state transitioned to include + // the `WAKING` bit, this means that a wake has been + // called concurrently, so we have to remove the waker and + // wake it.` + // + // Start by assuming that the state is `REGISTERING` as this + // is what we jut set it to. + let res = self + .state + .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire); + + match res { + Ok(_) => {} + Err(actual) => { + // This branch can only be reached if a + // concurrent thread called `wake`. In this + // case, `actual` **must** be `REGISTERING | + // `WAKING`. + debug_assert_eq!(actual, REGISTERING | WAKING); + + // Take the waker to wake once the atomic operation has + // completed. + let waker = self.waker.with_mut(|t| (*t).take()).unwrap(); + + // Just swap, because no one could change state + // while state == `Registering | `Waking` + self.state.swap(WAITING, AcqRel); + + // The atomic swap was complete, now + // wake the waker and return. + waker.wake(); + } + } + } + } + WAKING => { + // Currently in the process of waking the task, i.e., + // `wake` is currently being called on the old waker. + // So, we call wake on the new waker. + waker.wake(); + + // This is equivalent to a spin lock, so use a spin hint. + atomic::spin_loop_hint(); + } + state => { + // In this case, a concurrent thread is holding the + // "registering" lock. This probably indicates a bug in the + // caller's code as racing to call `register` doesn't make much + // sense. + // + // We just want to maintain memory safety. It is ok to drop the + // call to `register`. + debug_assert!(state == REGISTERING || state == REGISTERING | WAKING); + } + } + } + + /// Wakes the task that last called `register`. + /// + /// If `register` has not been called yet, then this does nothing. + pub(crate) fn wake(&self) { + if let Some(waker) = self.take_waker() { + waker.wake(); + } + } + + /// Attempts to take the `Waker` value out of the `AtomicWaker` with the + /// intention that the caller will wake the task later. + pub(crate) fn take_waker(&self) -> Option<Waker> { + // AcqRel ordering is used in order to acquire the value of the `waker` + // cell as well as to establish a `release` ordering with whatever + // memory the `AtomicWaker` is associated with. + match self.state.fetch_or(WAKING, AcqRel) { + WAITING => { + // The waking lock has been acquired. + let waker = unsafe { self.waker.with_mut(|t| (*t).take()) }; + + // Release the lock + self.state.fetch_and(!WAKING, Release); + + waker + } + state => { + // There is a concurrent thread currently updating the + // associated waker. + // + // Nothing more to do as the `WAKING` bit has been set. It + // doesn't matter if there are concurrent registering threads or + // not. + // + debug_assert!( + state == REGISTERING || state == REGISTERING | WAKING || state == WAKING + ); + None + } + } + } +} + +impl Default for AtomicWaker { + fn default() -> Self { + AtomicWaker::new() + } +} + +impl fmt::Debug for AtomicWaker { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "AtomicWaker") + } +} + +unsafe impl Send for AtomicWaker {} +unsafe impl Sync for AtomicWaker {} + +trait WakerRef { + fn wake(self); + fn into_waker(self) -> Waker; +} + +impl WakerRef for Waker { + fn wake(self) { + self.wake() + } + + fn into_waker(self) -> Waker { + self + } +} + +impl WakerRef for &Waker { + fn wake(self) { + self.wake_by_ref() + } + + fn into_waker(self) -> Waker { + self.clone() + } +} diff --git a/third_party/rust/tokio/src/sync/task/mod.rs b/third_party/rust/tokio/src/sync/task/mod.rs new file mode 100644 index 0000000000..a6bc6ed06e --- /dev/null +++ b/third_party/rust/tokio/src/sync/task/mod.rs @@ -0,0 +1,4 @@ +//! Thread-safe task notification primitives. + +mod atomic_waker; +pub(crate) use self::atomic_waker::AtomicWaker; diff --git a/third_party/rust/tokio/src/sync/tests/atomic_waker.rs b/third_party/rust/tokio/src/sync/tests/atomic_waker.rs new file mode 100644 index 0000000000..c832d62e9a --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/atomic_waker.rs @@ -0,0 +1,34 @@ +use crate::sync::AtomicWaker; +use tokio_test::task; + +use std::task::Waker; + +trait AssertSend: Send {} +trait AssertSync: Send {} + +impl AssertSend for AtomicWaker {} +impl AssertSync for AtomicWaker {} + +impl AssertSend for Waker {} +impl AssertSync for Waker {} + +#[test] +fn basic_usage() { + let mut waker = task::spawn(AtomicWaker::new()); + + waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); + waker.wake(); + + assert!(waker.is_woken()); +} + +#[test] +fn wake_without_register() { + let mut waker = task::spawn(AtomicWaker::new()); + waker.wake(); + + // Registering should not result in a notification + waker.enter(|cx, waker| waker.register_by_ref(cx.waker())); + + assert!(!waker.is_woken()); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs b/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs new file mode 100644 index 0000000000..c148bcbe11 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs @@ -0,0 +1,45 @@ +use crate::sync::task::AtomicWaker; + +use futures::future::poll_fn; +use loom::future::block_on; +use loom::sync::atomic::AtomicUsize; +use loom::thread; +use std::sync::atomic::Ordering::Relaxed; +use std::sync::Arc; +use std::task::Poll::{Pending, Ready}; + +struct Chan { + num: AtomicUsize, + task: AtomicWaker, +} + +#[test] +fn basic_notification() { + const NUM_NOTIFY: usize = 2; + + loom::model(|| { + let chan = Arc::new(Chan { + num: AtomicUsize::new(0), + task: AtomicWaker::new(), + }); + + for _ in 0..NUM_NOTIFY { + let chan = chan.clone(); + + thread::spawn(move || { + chan.num.fetch_add(1, Relaxed); + chan.task.wake(); + }); + } + + block_on(poll_fn(move |cx| { + chan.task.register_by_ref(cx.waker()); + + if NUM_NOTIFY == chan.num.load(Relaxed) { + return Ready(()); + } + + Pending + })); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs b/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs new file mode 100644 index 0000000000..da12fb9ff0 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs @@ -0,0 +1,180 @@ +use crate::sync::broadcast; +use crate::sync::broadcast::RecvError::{Closed, Lagged}; + +use loom::future::block_on; +use loom::sync::Arc; +use loom::thread; +use tokio_test::{assert_err, assert_ok}; + +#[test] +fn broadcast_send() { + loom::model(|| { + let (tx1, mut rx) = broadcast::channel(2); + let tx1 = Arc::new(tx1); + let tx2 = tx1.clone(); + + let th1 = thread::spawn(move || { + block_on(async { + assert_ok!(tx1.send("one")); + assert_ok!(tx1.send("two")); + assert_ok!(tx1.send("three")); + }); + }); + + let th2 = thread::spawn(move || { + block_on(async { + assert_ok!(tx2.send("eins")); + assert_ok!(tx2.send("zwei")); + assert_ok!(tx2.send("drei")); + }); + }); + + block_on(async { + let mut num = 0; + loop { + match rx.recv().await { + Ok(_) => num += 1, + Err(Closed) => break, + Err(Lagged(n)) => num += n as usize, + } + } + assert_eq!(num, 6); + }); + + assert_ok!(th1.join()); + assert_ok!(th2.join()); + }); +} + +// An `Arc` is used as the value in order to detect memory leaks. +#[test] +fn broadcast_two() { + loom::model(|| { + let (tx, mut rx1) = broadcast::channel::<Arc<&'static str>>(16); + let mut rx2 = tx.subscribe(); + + let th1 = thread::spawn(move || { + block_on(async { + let v = assert_ok!(rx1.recv().await); + assert_eq!(*v, "hello"); + + let v = assert_ok!(rx1.recv().await); + assert_eq!(*v, "world"); + + match assert_err!(rx1.recv().await) { + Closed => {} + _ => panic!(), + } + }); + }); + + let th2 = thread::spawn(move || { + block_on(async { + let v = assert_ok!(rx2.recv().await); + assert_eq!(*v, "hello"); + + let v = assert_ok!(rx2.recv().await); + assert_eq!(*v, "world"); + + match assert_err!(rx2.recv().await) { + Closed => {} + _ => panic!(), + } + }); + }); + + assert_ok!(tx.send(Arc::new("hello"))); + assert_ok!(tx.send(Arc::new("world"))); + drop(tx); + + assert_ok!(th1.join()); + assert_ok!(th2.join()); + }); +} + +#[test] +fn broadcast_wrap() { + loom::model(|| { + let (tx, mut rx1) = broadcast::channel(2); + let mut rx2 = tx.subscribe(); + + let th1 = thread::spawn(move || { + block_on(async { + let mut num = 0; + + loop { + match rx1.recv().await { + Ok(_) => num += 1, + Err(Closed) => break, + Err(Lagged(n)) => num += n as usize, + } + } + + assert_eq!(num, 3); + }); + }); + + let th2 = thread::spawn(move || { + block_on(async { + let mut num = 0; + + loop { + match rx2.recv().await { + Ok(_) => num += 1, + Err(Closed) => break, + Err(Lagged(n)) => num += n as usize, + } + } + + assert_eq!(num, 3); + }); + }); + + assert_ok!(tx.send("one")); + assert_ok!(tx.send("two")); + assert_ok!(tx.send("three")); + + drop(tx); + + assert_ok!(th1.join()); + assert_ok!(th2.join()); + }); +} + +#[test] +fn drop_rx() { + loom::model(|| { + let (tx, mut rx1) = broadcast::channel(16); + let rx2 = tx.subscribe(); + + let th1 = thread::spawn(move || { + block_on(async { + let v = assert_ok!(rx1.recv().await); + assert_eq!(v, "one"); + + let v = assert_ok!(rx1.recv().await); + assert_eq!(v, "two"); + + let v = assert_ok!(rx1.recv().await); + assert_eq!(v, "three"); + + match assert_err!(rx1.recv().await) { + Closed => {} + _ => panic!(), + } + }); + }); + + let th2 = thread::spawn(move || { + drop(rx2); + }); + + assert_ok!(tx.send("one")); + assert_ok!(tx.send("two")); + assert_ok!(tx.send("three")); + drop(tx); + + assert_ok!(th1.join()); + assert_ok!(th2.join()); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_list.rs b/third_party/rust/tokio/src/sync/tests/loom_list.rs new file mode 100644 index 0000000000..4067f865ce --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_list.rs @@ -0,0 +1,48 @@ +use crate::sync::mpsc::list; + +use loom::thread; +use std::sync::Arc; + +#[test] +fn smoke() { + use crate::sync::mpsc::block::Read::*; + + const NUM_TX: usize = 2; + const NUM_MSG: usize = 2; + + loom::model(|| { + let (tx, mut rx) = list::channel(); + let tx = Arc::new(tx); + + for th in 0..NUM_TX { + let tx = tx.clone(); + + thread::spawn(move || { + for i in 0..NUM_MSG { + tx.push((th, i)); + } + }); + } + + let mut next = vec![0; NUM_TX]; + + loop { + match rx.pop(&tx) { + Some(Value((th, v))) => { + assert_eq!(v, next[th]); + next[th] += 1; + + if next.iter().all(|&i| i == NUM_MSG) { + break; + } + } + Some(Closed) => { + panic!(); + } + None => { + thread::yield_now(); + } + } + } + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs b/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs new file mode 100644 index 0000000000..6a1a6abedd --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs @@ -0,0 +1,77 @@ +use crate::sync::mpsc; + +use futures::future::poll_fn; +use loom::future::block_on; +use loom::thread; + +#[test] +fn closing_tx() { + loom::model(|| { + let (mut tx, mut rx) = mpsc::channel(16); + + thread::spawn(move || { + tx.try_send(()).unwrap(); + drop(tx); + }); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_some()); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_none()); + }); +} + +#[test] +fn closing_unbounded_tx() { + loom::model(|| { + let (tx, mut rx) = mpsc::unbounded_channel(); + + thread::spawn(move || { + tx.send(()).unwrap(); + drop(tx); + }); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_some()); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_none()); + }); +} + +#[test] +fn dropping_tx() { + loom::model(|| { + let (tx, mut rx) = mpsc::channel::<()>(16); + + for _ in 0..2 { + let tx = tx.clone(); + thread::spawn(move || { + drop(tx); + }); + } + drop(tx); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_none()); + }); +} + +#[test] +fn dropping_unbounded_tx() { + loom::model(|| { + let (tx, mut rx) = mpsc::unbounded_channel::<()>(); + + for _ in 0..2 { + let tx = tx.clone(); + thread::spawn(move || { + drop(tx); + }); + } + drop(tx); + + let v = block_on(poll_fn(|cx| rx.poll_recv(cx))); + assert!(v.is_none()); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_notify.rs b/third_party/rust/tokio/src/sync/tests/loom_notify.rs new file mode 100644 index 0000000000..60981d4669 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_notify.rs @@ -0,0 +1,90 @@ +use crate::sync::Notify; + +use loom::future::block_on; +use loom::sync::Arc; +use loom::thread; + +#[test] +fn notify_one() { + loom::model(|| { + let tx = Arc::new(Notify::new()); + let rx = tx.clone(); + + let th = thread::spawn(move || { + block_on(async { + rx.notified().await; + }); + }); + + tx.notify(); + th.join().unwrap(); + }); +} + +#[test] +fn notify_multi() { + loom::model(|| { + let notify = Arc::new(Notify::new()); + + let mut ths = vec![]; + + for _ in 0..2 { + let notify = notify.clone(); + + ths.push(thread::spawn(move || { + block_on(async { + notify.notified().await; + notify.notify(); + }) + })); + } + + notify.notify(); + + for th in ths.drain(..) { + th.join().unwrap(); + } + + block_on(async { + notify.notified().await; + }); + }); +} + +#[test] +fn notify_drop() { + use crate::future::poll_fn; + use std::future::Future; + use std::task::Poll; + + loom::model(|| { + let notify = Arc::new(Notify::new()); + let rx1 = notify.clone(); + let rx2 = notify.clone(); + + let th1 = thread::spawn(move || { + let mut recv = Box::pin(rx1.notified()); + + block_on(poll_fn(|cx| { + if recv.as_mut().poll(cx).is_ready() { + rx1.notify(); + } + Poll::Ready(()) + })); + }); + + let th2 = thread::spawn(move || { + block_on(async { + rx2.notified().await; + // Trigger second notification + rx2.notify(); + rx2.notified().await; + }); + }); + + notify.notify(); + + th1.join().unwrap(); + th2.join().unwrap(); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs b/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs new file mode 100644 index 0000000000..dfa7459da7 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs @@ -0,0 +1,109 @@ +use crate::sync::oneshot; + +use futures::future::poll_fn; +use loom::future::block_on; +use loom::thread; +use std::task::Poll::{Pending, Ready}; + +#[test] +fn smoke() { + loom::model(|| { + let (tx, rx) = oneshot::channel(); + + thread::spawn(move || { + tx.send(1).unwrap(); + }); + + let value = block_on(rx).unwrap(); + assert_eq!(1, value); + }); +} + +#[test] +fn changing_rx_task() { + loom::model(|| { + let (tx, mut rx) = oneshot::channel(); + + thread::spawn(move || { + tx.send(1).unwrap(); + }); + + let rx = thread::spawn(move || { + let ready = block_on(poll_fn(|cx| match Pin::new(&mut rx).poll(cx) { + Ready(Ok(value)) => { + assert_eq!(1, value); + Ready(true) + } + Ready(Err(_)) => unimplemented!(), + Pending => Ready(false), + })); + + if ready { + None + } else { + Some(rx) + } + }) + .join() + .unwrap(); + + if let Some(rx) = rx { + // Previous task parked, use a new task... + let value = block_on(rx).unwrap(); + assert_eq!(1, value); + } + }); +} + +// TODO: Move this into `oneshot` proper. + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +struct OnClose<'a> { + tx: &'a mut oneshot::Sender<i32>, +} + +impl<'a> OnClose<'a> { + fn new(tx: &'a mut oneshot::Sender<i32>) -> Self { + OnClose { tx } + } +} + +impl Future for OnClose<'_> { + type Output = bool; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<bool> { + let res = self.get_mut().tx.poll_closed(cx); + Ready(res.is_ready()) + } +} + +#[test] +fn changing_tx_task() { + loom::model(|| { + let (mut tx, rx) = oneshot::channel::<i32>(); + + thread::spawn(move || { + drop(rx); + }); + + let tx = thread::spawn(move || { + let t1 = block_on(OnClose::new(&mut tx)); + + if t1 { + None + } else { + Some(tx) + } + }) + .join() + .unwrap(); + + if let Some(mut tx) = tx { + // Previous task parked, use a new task... + block_on(OnClose::new(&mut tx)); + } + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs b/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs new file mode 100644 index 0000000000..48d06e1d5f --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs @@ -0,0 +1,78 @@ +use crate::sync::rwlock::*; + +use loom::future::block_on; +use loom::thread; +use std::sync::Arc; + +#[test] +fn concurrent_write() { + let mut b = loom::model::Builder::new(); + + b.check(|| { + let rwlock = Arc::new(RwLock::<u32>::new(0)); + + let rwclone = rwlock.clone(); + let t1 = thread::spawn(move || { + block_on(async { + let mut guard = rwclone.write().await; + *guard += 5; + }); + }); + + let rwclone = rwlock.clone(); + let t2 = thread::spawn(move || { + block_on(async { + let mut guard = rwclone.write().await; + *guard += 5; + }); + }); + + t1.join().expect("thread 1 write should not panic"); + t2.join().expect("thread 2 write should not panic"); + //when all threads have finished the value on the lock should be 10 + let guard = block_on(rwlock.read()); + assert_eq!(10, *guard); + }); +} + +#[test] +fn concurrent_read_write() { + let mut b = loom::model::Builder::new(); + + b.check(|| { + let rwlock = Arc::new(RwLock::<u32>::new(0)); + + let rwclone = rwlock.clone(); + let t1 = thread::spawn(move || { + block_on(async { + let mut guard = rwclone.write().await; + *guard += 5; + }); + }); + + let rwclone = rwlock.clone(); + let t2 = thread::spawn(move || { + block_on(async { + let mut guard = rwclone.write().await; + *guard += 5; + }); + }); + + let rwclone = rwlock.clone(); + let t3 = thread::spawn(move || { + block_on(async { + let guard = rwclone.read().await; + //at this state the value on the lock may either be 0, 5, or 10 + assert!(*guard == 0 || *guard == 5 || *guard == 10); + }); + }); + + t1.join().expect("thread 1 write should not panic"); + t2.join().expect("thread 2 write should not panic"); + t3.join().expect("thread 3 read should not panic"); + + let guard = block_on(rwlock.read()); + //when all threads have finished the value on the lock should be 10 + assert_eq!(10, *guard); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs b/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs new file mode 100644 index 0000000000..76a1bc0062 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs @@ -0,0 +1,215 @@ +use crate::sync::batch_semaphore::*; + +use futures::future::poll_fn; +use loom::future::block_on; +use loom::sync::atomic::AtomicUsize; +use loom::thread; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; +use std::task::Poll::Ready; +use std::task::{Context, Poll}; + +#[test] +fn basic_usage() { + const NUM: usize = 2; + + struct Shared { + semaphore: Semaphore, + active: AtomicUsize, + } + + async fn actor(shared: Arc<Shared>) { + shared.semaphore.acquire(1).await.unwrap(); + let actual = shared.active.fetch_add(1, SeqCst); + assert!(actual <= NUM - 1); + + let actual = shared.active.fetch_sub(1, SeqCst); + assert!(actual <= NUM); + shared.semaphore.release(1); + } + + loom::model(|| { + let shared = Arc::new(Shared { + semaphore: Semaphore::new(NUM), + active: AtomicUsize::new(0), + }); + + for _ in 0..NUM { + let shared = shared.clone(); + + thread::spawn(move || { + block_on(actor(shared)); + }); + } + + block_on(actor(shared)); + }); +} + +#[test] +fn release() { + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + { + let semaphore = semaphore.clone(); + thread::spawn(move || { + block_on(semaphore.acquire(1)).unwrap(); + semaphore.release(1); + }); + } + + block_on(semaphore.acquire(1)).unwrap(); + + semaphore.release(1); + }); +} + +#[test] +fn basic_closing() { + const NUM: usize = 2; + + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + for _ in 0..NUM { + let semaphore = semaphore.clone(); + + thread::spawn(move || { + for _ in 0..2 { + block_on(semaphore.acquire(1)).map_err(|_| ())?; + + semaphore.release(1); + } + + Ok::<(), ()>(()) + }); + } + + semaphore.close(); + }); +} + +#[test] +fn concurrent_close() { + const NUM: usize = 3; + + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + for _ in 0..NUM { + let semaphore = semaphore.clone(); + + thread::spawn(move || { + block_on(semaphore.acquire(1)).map_err(|_| ())?; + semaphore.release(1); + semaphore.close(); + + Ok::<(), ()>(()) + }); + } + }); +} + +#[test] +fn concurrent_cancel() { + async fn poll_and_cancel(semaphore: Arc<Semaphore>) { + let mut acquire1 = Some(semaphore.acquire(1)); + let mut acquire2 = Some(semaphore.acquire(1)); + poll_fn(|cx| { + // poll the acquire future once, and then immediately throw + // it away. this simulates a situation where a future is + // polled and then cancelled, such as by a timeout. + if let Some(acquire) = acquire1.take() { + pin!(acquire); + let _ = acquire.poll(cx); + } + if let Some(acquire) = acquire2.take() { + pin!(acquire); + let _ = acquire.poll(cx); + } + Poll::Ready(()) + }) + .await + } + + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(0)); + let t1 = { + let semaphore = semaphore.clone(); + thread::spawn(move || block_on(poll_and_cancel(semaphore))) + }; + let t2 = { + let semaphore = semaphore.clone(); + thread::spawn(move || block_on(poll_and_cancel(semaphore))) + }; + let t3 = { + let semaphore = semaphore.clone(); + thread::spawn(move || block_on(poll_and_cancel(semaphore))) + }; + + t1.join().unwrap(); + semaphore.release(10); + t2.join().unwrap(); + t3.join().unwrap(); + }); +} + +#[test] +fn batch() { + let mut b = loom::model::Builder::new(); + b.preemption_bound = Some(1); + + b.check(|| { + let semaphore = Arc::new(Semaphore::new(10)); + let active = Arc::new(AtomicUsize::new(0)); + let mut ths = vec![]; + + for _ in 0..2 { + let semaphore = semaphore.clone(); + let active = active.clone(); + + ths.push(thread::spawn(move || { + for n in &[4, 10, 8] { + block_on(semaphore.acquire(*n)).unwrap(); + + active.fetch_add(*n as usize, SeqCst); + + let num_active = active.load(SeqCst); + assert!(num_active <= 10); + + thread::yield_now(); + + active.fetch_sub(*n as usize, SeqCst); + + semaphore.release(*n as usize); + } + })); + } + + for th in ths.into_iter() { + th.join().unwrap(); + } + + assert_eq!(10, semaphore.available_permits()); + }); +} + +#[test] +fn release_during_acquire() { + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(10)); + semaphore + .try_acquire(8) + .expect("try_acquire should succeed; semaphore uncontended"); + let semaphore2 = semaphore.clone(); + let thread = thread::spawn(move || block_on(semaphore2.acquire(4)).unwrap()); + + semaphore.release(8); + thread.join().unwrap(); + semaphore.release(4); + assert_eq!(10, semaphore.available_permits()); + }) +} diff --git a/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs b/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs new file mode 100644 index 0000000000..b5e5efba82 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs @@ -0,0 +1,192 @@ +use crate::sync::semaphore_ll::*; + +use futures::future::poll_fn; +use loom::future::block_on; +use loom::thread; +use std::future::Future; +use std::pin::Pin; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; +use std::task::Poll::Ready; +use std::task::{Context, Poll}; + +#[test] +fn basic_usage() { + const NUM: usize = 2; + + struct Actor { + waiter: Permit, + shared: Arc<Shared>, + } + + struct Shared { + semaphore: Semaphore, + active: AtomicUsize, + } + + impl Future for Actor { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + let me = &mut *self; + + ready!(me.waiter.poll_acquire(cx, 1, &me.shared.semaphore)).unwrap(); + + let actual = me.shared.active.fetch_add(1, SeqCst); + assert!(actual <= NUM - 1); + + let actual = me.shared.active.fetch_sub(1, SeqCst); + assert!(actual <= NUM); + + me.waiter.release(1, &me.shared.semaphore); + + Ready(()) + } + } + + loom::model(|| { + let shared = Arc::new(Shared { + semaphore: Semaphore::new(NUM), + active: AtomicUsize::new(0), + }); + + for _ in 0..NUM { + let shared = shared.clone(); + + thread::spawn(move || { + block_on(Actor { + waiter: Permit::new(), + shared, + }); + }); + } + + block_on(Actor { + waiter: Permit::new(), + shared, + }); + }); +} + +#[test] +fn release() { + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + { + let semaphore = semaphore.clone(); + thread::spawn(move || { + let mut permit = Permit::new(); + + block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap(); + + permit.release(1, &semaphore); + }); + } + + let mut permit = Permit::new(); + + block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap(); + + permit.release(1, &semaphore); + }); +} + +#[test] +fn basic_closing() { + const NUM: usize = 2; + + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + for _ in 0..NUM { + let semaphore = semaphore.clone(); + + thread::spawn(move || { + let mut permit = Permit::new(); + + for _ in 0..2 { + block_on(poll_fn(|cx| { + permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ()) + }))?; + + permit.release(1, &semaphore); + } + + Ok::<(), ()>(()) + }); + } + + semaphore.close(); + }); +} + +#[test] +fn concurrent_close() { + const NUM: usize = 3; + + loom::model(|| { + let semaphore = Arc::new(Semaphore::new(1)); + + for _ in 0..NUM { + let semaphore = semaphore.clone(); + + thread::spawn(move || { + let mut permit = Permit::new(); + + block_on(poll_fn(|cx| { + permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ()) + }))?; + + permit.release(1, &semaphore); + + semaphore.close(); + + Ok::<(), ()>(()) + }); + } + }); +} + +#[test] +fn batch() { + let mut b = loom::model::Builder::new(); + b.preemption_bound = Some(1); + + b.check(|| { + let semaphore = Arc::new(Semaphore::new(10)); + let active = Arc::new(AtomicUsize::new(0)); + let mut ths = vec![]; + + for _ in 0..2 { + let semaphore = semaphore.clone(); + let active = active.clone(); + + ths.push(thread::spawn(move || { + let mut permit = Permit::new(); + + for n in &[4, 10, 8] { + block_on(poll_fn(|cx| permit.poll_acquire(cx, *n, &semaphore))).unwrap(); + + active.fetch_add(*n as usize, SeqCst); + + let num_active = active.load(SeqCst); + assert!(num_active <= 10); + + thread::yield_now(); + + active.fetch_sub(*n as usize, SeqCst); + + permit.release(*n, &semaphore); + } + })); + } + + for th in ths.into_iter() { + th.join().unwrap(); + } + + assert_eq!(10, semaphore.available_permits()); + }); +} diff --git a/third_party/rust/tokio/src/sync/tests/mod.rs b/third_party/rust/tokio/src/sync/tests/mod.rs new file mode 100644 index 0000000000..d571754c01 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/mod.rs @@ -0,0 +1,16 @@ +cfg_not_loom! { + mod atomic_waker; + mod semaphore_ll; + mod semaphore_batch; +} + +cfg_loom! { + mod loom_atomic_waker; + mod loom_broadcast; + mod loom_list; + mod loom_mpsc; + mod loom_notify; + mod loom_oneshot; + mod loom_semaphore_batch; + mod loom_semaphore_ll; +} diff --git a/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs b/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs new file mode 100644 index 0000000000..60f3f231e7 --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs @@ -0,0 +1,250 @@ +use crate::sync::batch_semaphore::Semaphore; +use tokio_test::*; + +#[test] +fn poll_acquire_one_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + assert_ready_ok!(task::spawn(s.acquire(1)).poll()); + assert_eq!(s.available_permits(), 99); +} + +#[test] +fn poll_acquire_many_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + assert_ready_ok!(task::spawn(s.acquire(5)).poll()); + assert_eq!(s.available_permits(), 95); + + assert_ready_ok!(task::spawn(s.acquire(5)).poll()); + assert_eq!(s.available_permits(), 90); +} + +#[test] +fn try_acquire_one_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + assert_ok!(s.try_acquire(1)); + assert_eq!(s.available_permits(), 99); + + assert_ok!(s.try_acquire(1)); + assert_eq!(s.available_permits(), 98); +} + +#[test] +fn try_acquire_many_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + assert_ok!(s.try_acquire(5)); + assert_eq!(s.available_permits(), 95); + + assert_ok!(s.try_acquire(5)); + assert_eq!(s.available_permits(), 90); +} + +#[test] +fn poll_acquire_one_unavailable() { + let s = Semaphore::new(1); + + // Acquire the first permit + assert_ready_ok!(task::spawn(s.acquire(1)).poll()); + assert_eq!(s.available_permits(), 0); + + let mut acquire_2 = task::spawn(s.acquire(1)); + // Try to acquire the second permit + assert_pending!(acquire_2.poll()); + assert_eq!(s.available_permits(), 0); + + s.release(1); + + assert_eq!(s.available_permits(), 0); + assert!(acquire_2.is_woken()); + assert_ready_ok!(acquire_2.poll()); + assert_eq!(s.available_permits(), 0); + + s.release(1); + assert_eq!(s.available_permits(), 1); +} + +#[test] +fn poll_acquire_many_unavailable() { + let s = Semaphore::new(5); + + // Acquire the first permit + assert_ready_ok!(task::spawn(s.acquire(1)).poll()); + assert_eq!(s.available_permits(), 4); + + // Try to acquire the second permit + let mut acquire_2 = task::spawn(s.acquire(5)); + assert_pending!(acquire_2.poll()); + assert_eq!(s.available_permits(), 0); + + // Try to acquire the third permit + let mut acquire_3 = task::spawn(s.acquire(3)); + assert_pending!(acquire_3.poll()); + assert_eq!(s.available_permits(), 0); + + s.release(1); + + assert_eq!(s.available_permits(), 0); + assert!(acquire_2.is_woken()); + assert_ready_ok!(acquire_2.poll()); + + assert!(!acquire_3.is_woken()); + assert_eq!(s.available_permits(), 0); + + s.release(1); + assert!(!acquire_3.is_woken()); + assert_eq!(s.available_permits(), 0); + + s.release(2); + assert!(acquire_3.is_woken()); + + assert_ready_ok!(acquire_3.poll()); +} + +#[test] +fn try_acquire_one_unavailable() { + let s = Semaphore::new(1); + + // Acquire the first permit + assert_ok!(s.try_acquire(1)); + assert_eq!(s.available_permits(), 0); + + assert_err!(s.try_acquire(1)); + + s.release(1); + + assert_eq!(s.available_permits(), 1); + assert_ok!(s.try_acquire(1)); + + s.release(1); + assert_eq!(s.available_permits(), 1); +} + +#[test] +fn try_acquire_many_unavailable() { + let s = Semaphore::new(5); + + // Acquire the first permit + assert_ok!(s.try_acquire(1)); + assert_eq!(s.available_permits(), 4); + + assert_err!(s.try_acquire(5)); + + s.release(1); + assert_eq!(s.available_permits(), 5); + + assert_ok!(s.try_acquire(5)); + + s.release(1); + assert_eq!(s.available_permits(), 1); + + s.release(1); + assert_eq!(s.available_permits(), 2); +} + +#[test] +fn poll_acquire_one_zero_permits() { + let s = Semaphore::new(0); + assert_eq!(s.available_permits(), 0); + + // Try to acquire the permit + let mut acquire = task::spawn(s.acquire(1)); + assert_pending!(acquire.poll()); + + s.release(1); + + assert!(acquire.is_woken()); + assert_ready_ok!(acquire.poll()); +} + +#[test] +#[should_panic] +fn validates_max_permits() { + use std::usize; + Semaphore::new((usize::MAX >> 2) + 1); +} + +#[test] +fn close_semaphore_prevents_acquire() { + let s = Semaphore::new(5); + s.close(); + + assert_eq!(5, s.available_permits()); + + assert_ready_err!(task::spawn(s.acquire(1)).poll()); + assert_eq!(5, s.available_permits()); + + assert_ready_err!(task::spawn(s.acquire(1)).poll()); + assert_eq!(5, s.available_permits()); +} + +#[test] +fn close_semaphore_notifies_permit1() { + let s = Semaphore::new(0); + let mut acquire = task::spawn(s.acquire(1)); + + assert_pending!(acquire.poll()); + + s.close(); + + assert!(acquire.is_woken()); + assert_ready_err!(acquire.poll()); +} + +#[test] +fn close_semaphore_notifies_permit2() { + let s = Semaphore::new(2); + + // Acquire a couple of permits + assert_ready_ok!(task::spawn(s.acquire(1)).poll()); + assert_ready_ok!(task::spawn(s.acquire(1)).poll()); + + let mut acquire3 = task::spawn(s.acquire(1)); + let mut acquire4 = task::spawn(s.acquire(1)); + assert_pending!(acquire3.poll()); + assert_pending!(acquire4.poll()); + + s.close(); + + assert!(acquire3.is_woken()); + assert!(acquire4.is_woken()); + + assert_ready_err!(acquire3.poll()); + assert_ready_err!(acquire4.poll()); + + assert_eq!(0, s.available_permits()); + + s.release(1); + + assert_eq!(1, s.available_permits()); + + assert_ready_err!(task::spawn(s.acquire(1)).poll()); + + s.release(1); + + assert_eq!(2, s.available_permits()); +} + +#[test] +fn cancel_acquire_releases_permits() { + let s = Semaphore::new(10); + let _permit1 = s.try_acquire(4).expect("uncontended try_acquire succeeds"); + assert_eq!(6, s.available_permits()); + + let mut acquire = task::spawn(s.acquire(8)); + assert_pending!(acquire.poll()); + + assert_eq!(0, s.available_permits()); + drop(acquire); + + assert_eq!(6, s.available_permits()); + assert_ok!(s.try_acquire(6)); +} diff --git a/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs b/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs new file mode 100644 index 0000000000..bfb075780b --- /dev/null +++ b/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs @@ -0,0 +1,470 @@ +use crate::sync::semaphore_ll::{Permit, Semaphore}; +use tokio_test::*; + +#[test] +fn poll_acquire_one_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + let mut permit = task::spawn(Permit::new()); + assert!(!permit.is_acquired()); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 99); + assert!(permit.is_acquired()); + + // Polling again on the same waiter does not claim a new permit + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 99); + assert!(permit.is_acquired()); +} + +#[test] +fn poll_acquire_many_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + let mut permit = task::spawn(Permit::new()); + assert!(!permit.is_acquired()); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); + assert_eq!(s.available_permits(), 95); + assert!(permit.is_acquired()); + + // Polling again on the same waiter does not claim a new permit + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 95); + assert!(permit.is_acquired()); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); + assert_eq!(s.available_permits(), 95); + assert!(permit.is_acquired()); + + // Polling for a larger number of permits acquires more + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 8, &s))); + assert_eq!(s.available_permits(), 92); + assert!(permit.is_acquired()); +} + +#[test] +fn try_acquire_one_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + let mut permit = Permit::new(); + assert!(!permit.is_acquired()); + + assert_ok!(permit.try_acquire(1, &s)); + assert_eq!(s.available_permits(), 99); + assert!(permit.is_acquired()); + + // Polling again on the same waiter does not claim a new permit + assert_ok!(permit.try_acquire(1, &s)); + assert_eq!(s.available_permits(), 99); + assert!(permit.is_acquired()); +} + +#[test] +fn try_acquire_many_available() { + let s = Semaphore::new(100); + assert_eq!(s.available_permits(), 100); + + // Polling for a permit succeeds immediately + let mut permit = Permit::new(); + assert!(!permit.is_acquired()); + + assert_ok!(permit.try_acquire(5, &s)); + assert_eq!(s.available_permits(), 95); + assert!(permit.is_acquired()); + + // Polling again on the same waiter does not claim a new permit + assert_ok!(permit.try_acquire(5, &s)); + assert_eq!(s.available_permits(), 95); + assert!(permit.is_acquired()); +} + +#[test] +fn poll_acquire_one_unavailable() { + let s = Semaphore::new(1); + + let mut permit_1 = task::spawn(Permit::new()); + let mut permit_2 = task::spawn(Permit::new()); + + // Acquire the first permit + assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 0); + + permit_2.enter(|cx, mut p| { + // Try to acquire the second permit + assert_pending!(p.poll_acquire(cx, 1, &s)); + }); + + permit_1.release(1, &s); + + assert_eq!(s.available_permits(), 0); + assert!(permit_2.is_woken()); + assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + permit_2.release(1, &s); + assert_eq!(s.available_permits(), 1); +} + +#[test] +fn forget_acquired() { + let s = Semaphore::new(1); + + // Polling for a permit succeeds immediately + let mut permit = task::spawn(Permit::new()); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + assert_eq!(s.available_permits(), 0); + + permit.forget(1); + assert_eq!(s.available_permits(), 0); +} + +#[test] +fn forget_waiting() { + let s = Semaphore::new(0); + + // Polling for a permit succeeds immediately + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + assert_eq!(s.available_permits(), 0); + + permit.forget(1); + + s.add_permits(1); + + assert!(!permit.is_woken()); + assert_eq!(s.available_permits(), 1); +} + +#[test] +fn poll_acquire_many_unavailable() { + let s = Semaphore::new(5); + + let mut permit_1 = task::spawn(Permit::new()); + let mut permit_2 = task::spawn(Permit::new()); + let mut permit_3 = task::spawn(Permit::new()); + + // Acquire the first permit + assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 4); + + permit_2.enter(|cx, mut p| { + // Try to acquire the second permit + assert_pending!(p.poll_acquire(cx, 5, &s)); + }); + + assert_eq!(s.available_permits(), 0); + + permit_3.enter(|cx, mut p| { + // Try to acquire the third permit + assert_pending!(p.poll_acquire(cx, 3, &s)); + }); + + permit_1.release(1, &s); + + assert_eq!(s.available_permits(), 0); + assert!(permit_2.is_woken()); + assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); + + assert!(!permit_3.is_woken()); + assert_eq!(s.available_permits(), 0); + + permit_2.release(1, &s); + assert!(!permit_3.is_woken()); + assert_eq!(s.available_permits(), 0); + + permit_2.release(2, &s); + assert!(permit_3.is_woken()); + + assert_ready_ok!(permit_3.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); +} + +#[test] +fn try_acquire_one_unavailable() { + let s = Semaphore::new(1); + + let mut permit_1 = Permit::new(); + let mut permit_2 = Permit::new(); + + // Acquire the first permit + assert_ok!(permit_1.try_acquire(1, &s)); + assert_eq!(s.available_permits(), 0); + + assert_err!(permit_2.try_acquire(1, &s)); + + permit_1.release(1, &s); + + assert_eq!(s.available_permits(), 1); + assert_ok!(permit_2.try_acquire(1, &s)); + + permit_2.release(1, &s); + assert_eq!(s.available_permits(), 1); +} + +#[test] +fn try_acquire_many_unavailable() { + let s = Semaphore::new(5); + + let mut permit_1 = Permit::new(); + let mut permit_2 = Permit::new(); + + // Acquire the first permit + assert_ok!(permit_1.try_acquire(1, &s)); + assert_eq!(s.available_permits(), 4); + + assert_err!(permit_2.try_acquire(5, &s)); + + permit_1.release(1, &s); + assert_eq!(s.available_permits(), 5); + + assert_ok!(permit_2.try_acquire(5, &s)); + + permit_2.release(1, &s); + assert_eq!(s.available_permits(), 1); + + permit_2.release(1, &s); + assert_eq!(s.available_permits(), 2); +} + +#[test] +fn poll_acquire_one_zero_permits() { + let s = Semaphore::new(0); + assert_eq!(s.available_permits(), 0); + + let mut permit = task::spawn(Permit::new()); + + // Try to acquire the permit + permit.enter(|cx, mut p| { + assert_pending!(p.poll_acquire(cx, 1, &s)); + }); + + s.add_permits(1); + + assert!(permit.is_woken()); + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); +} + +#[test] +#[should_panic] +fn validates_max_permits() { + use std::usize; + Semaphore::new((usize::MAX >> 2) + 1); +} + +#[test] +fn close_semaphore_prevents_acquire() { + let s = Semaphore::new(5); + s.close(); + + assert_eq!(5, s.available_permits()); + + let mut permit_1 = task::spawn(Permit::new()); + let mut permit_2 = task::spawn(Permit::new()); + + assert_ready_err!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(5, s.available_permits()); + + assert_ready_err!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + assert_eq!(5, s.available_permits()); +} + +#[test] +fn close_semaphore_notifies_permit1() { + let s = Semaphore::new(0); + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + s.close(); + + assert!(permit.is_woken()); + assert_ready_err!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); +} + +#[test] +fn close_semaphore_notifies_permit2() { + let s = Semaphore::new(2); + + let mut permit1 = task::spawn(Permit::new()); + let mut permit2 = task::spawn(Permit::new()); + let mut permit3 = task::spawn(Permit::new()); + let mut permit4 = task::spawn(Permit::new()); + + // Acquire a couple of permits + assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + assert_pending!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_pending!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + s.close(); + + assert!(permit3.is_woken()); + assert!(permit4.is_woken()); + + assert_ready_err!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_ready_err!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + assert_eq!(0, s.available_permits()); + + permit1.release(1, &s); + + assert_eq!(1, s.available_permits()); + + assert_ready_err!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + permit2.release(1, &s); + + assert_eq!(2, s.available_permits()); +} + +#[test] +fn poll_acquire_additional_permits_while_waiting_before_assigned() { + let s = Semaphore::new(1); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); + + s.add_permits(1); + assert!(!permit.is_woken()); + + s.add_permits(1); + assert!(permit.is_woken()); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); +} + +#[test] +fn try_acquire_additional_permits_while_waiting_before_assigned() { + let s = Semaphore::new(1); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + + assert_err!(permit.enter(|_, mut p| p.try_acquire(3, &s))); + + s.add_permits(1); + assert!(permit.is_woken()); + + assert_ok!(permit.enter(|_, mut p| p.try_acquire(2, &s))); +} + +#[test] +fn poll_acquire_additional_permits_while_waiting_after_assigned_success() { + let s = Semaphore::new(1); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + + s.add_permits(2); + + assert!(permit.is_woken()); + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); +} + +#[test] +fn poll_acquire_additional_permits_while_waiting_after_assigned_requeue() { + let s = Semaphore::new(1); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + + s.add_permits(2); + + assert!(permit.is_woken()); + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s))); + + s.add_permits(1); + + assert!(permit.is_woken()); + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s))); +} + +#[test] +fn poll_acquire_fewer_permits_while_waiting() { + let s = Semaphore::new(1); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + assert_eq!(s.available_permits(), 0); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + assert_eq!(s.available_permits(), 0); +} + +#[test] +fn poll_acquire_fewer_permits_after_assigned() { + let s = Semaphore::new(1); + + let mut permit1 = task::spawn(Permit::new()); + let mut permit2 = task::spawn(Permit::new()); + + assert_pending!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 5, &s))); + assert_eq!(s.available_permits(), 0); + + assert_pending!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + s.add_permits(4); + assert!(permit1.is_woken()); + assert!(!permit2.is_woken()); + + assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 3, &s))); + + assert!(permit2.is_woken()); + assert_eq!(s.available_permits(), 1); + + assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); +} + +#[test] +fn forget_partial_1() { + let s = Semaphore::new(0); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + s.add_permits(1); + + assert_eq!(0, s.available_permits()); + + permit.release(1, &s); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s))); + + assert_eq!(s.available_permits(), 0); +} + +#[test] +fn forget_partial_2() { + let s = Semaphore::new(0); + + let mut permit = task::spawn(Permit::new()); + + assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + s.add_permits(1); + + assert_eq!(0, s.available_permits()); + + permit.release(1, &s); + + s.add_permits(1); + + assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s))); + assert_eq!(s.available_permits(), 0); +} diff --git a/third_party/rust/tokio/src/sync/watch.rs b/third_party/rust/tokio/src/sync/watch.rs new file mode 100644 index 0000000000..ba609a8c6d --- /dev/null +++ b/third_party/rust/tokio/src/sync/watch.rs @@ -0,0 +1,432 @@ +//! A single-producer, multi-consumer channel that only retains the *last* sent +//! value. +//! +//! This channel is useful for watching for changes to a value from multiple +//! points in the code base, for example, changes to configuration values. +//! +//! # Usage +//! +//! [`channel`] returns a [`Sender`] / [`Receiver`] pair. These are +//! the producer and sender halves of the channel. The channel is +//! created with an initial value. [`Receiver::recv`] will always +//! be ready upon creation and will yield either this initial value or +//! the latest value that has been sent by `Sender`. +//! +//! Calls to [`Receiver::recv`] will always yield the latest value. +//! +//! # Examples +//! +//! ``` +//! use tokio::sync::watch; +//! +//! # async fn dox() -> Result<(), Box<dyn std::error::Error>> { +//! let (tx, mut rx) = watch::channel("hello"); +//! +//! tokio::spawn(async move { +//! while let Some(value) = rx.recv().await { +//! println!("received = {:?}", value); +//! } +//! }); +//! +//! tx.broadcast("world")?; +//! # Ok(()) +//! # } +//! ``` +//! +//! # Closing +//! +//! [`Sender::closed`] allows the producer to detect when all [`Receiver`] +//! handles have been dropped. This indicates that there is no further interest +//! in the values being produced and work can be stopped. +//! +//! # Thread safety +//! +//! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other +//! threads and can be used in a concurrent environment. Clones of [`Receiver`] +//! handles may be moved to separate threads and also used concurrently. +//! +//! [`Sender`]: crate::sync::watch::Sender +//! [`Receiver`]: crate::sync::watch::Receiver +//! [`Receiver::recv`]: crate::sync::watch::Receiver::recv +//! [`channel`]: crate::sync::watch::channel +//! [`Sender::closed`]: crate::sync::watch::Sender::closed + +use crate::future::poll_fn; +use crate::sync::task::AtomicWaker; + +use fnv::FnvHashSet; +use std::ops; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::{Relaxed, SeqCst}; +use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, Weak}; +use std::task::Poll::{Pending, Ready}; +use std::task::{Context, Poll}; + +/// Receives values from the associated [`Sender`](struct@Sender). +/// +/// Instances are created by the [`channel`](fn@channel) function. +#[derive(Debug)] +pub struct Receiver<T> { + /// Pointer to the shared state + shared: Arc<Shared<T>>, + + /// Pointer to the watcher's internal state + inner: Watcher, +} + +/// Sends values to the associated [`Receiver`](struct@Receiver). +/// +/// Instances are created by the [`channel`](fn@channel) function. +#[derive(Debug)] +pub struct Sender<T> { + shared: Weak<Shared<T>>, +} + +/// Returns a reference to the inner value +/// +/// Outstanding borrows hold a read lock on the inner value. This means that +/// long lived borrows could cause the produce half to block. It is recommended +/// to keep the borrow as short lived as possible. +#[derive(Debug)] +pub struct Ref<'a, T> { + inner: RwLockReadGuard<'a, T>, +} + +pub mod error { + //! Watch error types + + use std::fmt; + + /// Error produced when sending a value fails. + #[derive(Debug)] + pub struct SendError<T> { + pub(crate) inner: T, + } + + // ===== impl SendError ===== + + impl<T: fmt::Debug> fmt::Display for SendError<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "channel closed") + } + } + + impl<T: fmt::Debug> std::error::Error for SendError<T> {} +} + +#[derive(Debug)] +struct Shared<T> { + /// The most recent value + value: RwLock<T>, + + /// The current version + /// + /// The lowest bit represents a "closed" state. The rest of the bits + /// represent the current version. + version: AtomicUsize, + + /// All watchers + watchers: Mutex<Watchers>, + + /// Task to notify when all watchers drop + cancel: AtomicWaker, +} + +type Watchers = FnvHashSet<Watcher>; + +/// The watcher's ID is based on the Arc's pointer. +#[derive(Clone, Debug)] +struct Watcher(Arc<WatchInner>); + +#[derive(Debug)] +struct WatchInner { + /// Last observed version + version: AtomicUsize, + waker: AtomicWaker, +} + +const CLOSED: usize = 1; + +/// Creates a new watch channel, returning the "send" and "receive" handles. +/// +/// All values sent by [`Sender`] will become visible to the [`Receiver`] handles. +/// Only the last value sent is made available to the [`Receiver`] half. All +/// intermediate values are dropped. +/// +/// # Examples +/// +/// ``` +/// use tokio::sync::watch; +/// +/// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { +/// let (tx, mut rx) = watch::channel("hello"); +/// +/// tokio::spawn(async move { +/// while let Some(value) = rx.recv().await { +/// println!("received = {:?}", value); +/// } +/// }); +/// +/// tx.broadcast("world")?; +/// # Ok(()) +/// # } +/// ``` +/// +/// [`Sender`]: struct@Sender +/// [`Receiver`]: struct@Receiver +pub fn channel<T: Clone>(init: T) -> (Sender<T>, Receiver<T>) { + const VERSION_0: usize = 0; + const VERSION_1: usize = 2; + + // We don't start knowing VERSION_1 + let inner = Watcher::new_version(VERSION_0); + + // Insert the watcher + let mut watchers = FnvHashSet::with_capacity_and_hasher(0, Default::default()); + watchers.insert(inner.clone()); + + let shared = Arc::new(Shared { + value: RwLock::new(init), + version: AtomicUsize::new(VERSION_1), + watchers: Mutex::new(watchers), + cancel: AtomicWaker::new(), + }); + + let tx = Sender { + shared: Arc::downgrade(&shared), + }; + + let rx = Receiver { shared, inner }; + + (tx, rx) +} + +impl<T> Receiver<T> { + /// Returns a reference to the most recently sent value + /// + /// Outstanding borrows hold a read lock. This means that long lived borrows + /// could cause the send half to block. It is recommended to keep the borrow + /// as short lived as possible. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::watch; + /// + /// let (_, rx) = watch::channel("hello"); + /// assert_eq!(*rx.borrow(), "hello"); + /// ``` + pub fn borrow(&self) -> Ref<'_, T> { + let inner = self.shared.value.read().unwrap(); + Ref { inner } + } + + // TODO: document + #[doc(hidden)] + pub fn poll_recv_ref<'a>(&'a mut self, cx: &mut Context<'_>) -> Poll<Option<Ref<'a, T>>> { + // Make sure the task is up to date + self.inner.waker.register_by_ref(cx.waker()); + + let state = self.shared.version.load(SeqCst); + let version = state & !CLOSED; + + if self.inner.version.swap(version, Relaxed) != version { + let inner = self.shared.value.read().unwrap(); + + return Ready(Some(Ref { inner })); + } + + if CLOSED == state & CLOSED { + // The `Store` handle has been dropped. + return Ready(None); + } + + Pending + } +} + +impl<T: Clone> Receiver<T> { + /// Attempts to clone the latest value sent via the channel. + /// + /// If this is the first time the function is called on a `Receiver` + /// instance, then the function completes immediately with the **current** + /// value held by the channel. On the next call, the function waits until + /// a new value is sent in the channel. + /// + /// `None` is returned if the `Sender` half is dropped. + /// + /// # Examples + /// + /// ``` + /// use tokio::sync::watch; + /// + /// #[tokio::main] + /// async fn main() { + /// let (tx, mut rx) = watch::channel("hello"); + /// + /// let v = rx.recv().await.unwrap(); + /// assert_eq!(v, "hello"); + /// + /// tokio::spawn(async move { + /// tx.broadcast("goodbye").unwrap(); + /// }); + /// + /// // Waits for the new task to spawn and send the value. + /// let v = rx.recv().await.unwrap(); + /// assert_eq!(v, "goodbye"); + /// + /// let v = rx.recv().await; + /// assert!(v.is_none()); + /// } + /// ``` + pub async fn recv(&mut self) -> Option<T> { + poll_fn(|cx| { + let v_ref = ready!(self.poll_recv_ref(cx)); + Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone())) + }) + .await + } +} + +#[cfg(feature = "stream")] +impl<T: Clone> crate::stream::Stream for Receiver<T> { + type Item = T; + + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> { + let v_ref = ready!(self.poll_recv_ref(cx)); + + Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone())) + } +} + +impl<T> Clone for Receiver<T> { + fn clone(&self) -> Self { + let ver = self.inner.version.load(Relaxed); + let inner = Watcher::new_version(ver); + let shared = self.shared.clone(); + + shared.watchers.lock().unwrap().insert(inner.clone()); + + Receiver { shared, inner } + } +} + +impl<T> Drop for Receiver<T> { + fn drop(&mut self) { + self.shared.watchers.lock().unwrap().remove(&self.inner); + } +} + +impl<T> Sender<T> { + /// Broadcasts a new value via the channel, notifying all receivers. + pub fn broadcast(&self, value: T) -> Result<(), error::SendError<T>> { + let shared = match self.shared.upgrade() { + Some(shared) => shared, + // All `Watch` handles have been canceled + None => return Err(error::SendError { inner: value }), + }; + + // Replace the value + { + let mut lock = shared.value.write().unwrap(); + *lock = value; + } + + // Update the version. 2 is used so that the CLOSED bit is not set. + shared.version.fetch_add(2, SeqCst); + + // Notify all watchers + notify_all(&*shared); + + // Return the old value + Ok(()) + } + + /// Completes when all receivers have dropped. + /// + /// This allows the producer to get notified when interest in the produced + /// values is canceled and immediately stop doing work. + pub async fn closed(&mut self) { + poll_fn(|cx| self.poll_close(cx)).await + } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll<()> { + match self.shared.upgrade() { + Some(shared) => { + shared.cancel.register_by_ref(cx.waker()); + Pending + } + None => Ready(()), + } + } +} + +/// Notifies all watchers of a change +fn notify_all<T>(shared: &Shared<T>) { + let watchers = shared.watchers.lock().unwrap(); + + for watcher in watchers.iter() { + // Notify the task + watcher.waker.wake(); + } +} + +impl<T> Drop for Sender<T> { + fn drop(&mut self) { + if let Some(shared) = self.shared.upgrade() { + shared.version.fetch_or(CLOSED, SeqCst); + notify_all(&*shared); + } + } +} + +// ===== impl Ref ===== + +impl<T> ops::Deref for Ref<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + self.inner.deref() + } +} + +// ===== impl Shared ===== + +impl<T> Drop for Shared<T> { + fn drop(&mut self) { + self.cancel.wake(); + } +} + +// ===== impl Watcher ===== + +impl Watcher { + fn new_version(version: usize) -> Self { + Watcher(Arc::new(WatchInner { + version: AtomicUsize::new(version), + waker: AtomicWaker::new(), + })) + } +} + +impl std::cmp::PartialEq for Watcher { + fn eq(&self, other: &Watcher) -> bool { + Arc::ptr_eq(&self.0, &other.0) + } +} + +impl std::cmp::Eq for Watcher {} + +impl std::hash::Hash for Watcher { + fn hash<H: std::hash::Hasher>(&self, state: &mut H) { + (&*self.0 as *const WatchInner).hash(state) + } +} + +impl std::ops::Deref for Watcher { + type Target = WatchInner; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/third_party/rust/tokio/src/task/blocking.rs b/third_party/rust/tokio/src/task/blocking.rs new file mode 100644 index 0000000000..0069b10ada --- /dev/null +++ b/third_party/rust/tokio/src/task/blocking.rs @@ -0,0 +1,71 @@ +use crate::task::JoinHandle; + +cfg_rt_threaded! { + /// Runs the provided blocking function without blocking the executor. + /// + /// In general, issuing a blocking call or performing a lot of compute in a + /// future without yielding is not okay, as it may prevent the executor from + /// driving other futures forward. If you run a closure through this method, + /// the current executor thread will relegate all its executor duties to another + /// (possibly new) thread, and only then poll the task. Note that this requires + /// additional synchronization. + /// + /// # Note + /// + /// This function can only be called from a spawned task when working with + /// the [threaded scheduler](https://docs.rs/tokio/0.2.10/tokio/runtime/index.html#threaded-scheduler). + /// Consider using [tokio::task::spawn_blocking](https://docs.rs/tokio/0.2.10/tokio/task/fn.spawn_blocking.html). + /// + /// # Examples + /// + /// ``` + /// use tokio::task; + /// + /// # async fn docs() { + /// task::block_in_place(move || { + /// // do some compute-heavy work or call synchronous code + /// }); + /// # } + /// ``` + #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))] + pub fn block_in_place<F, R>(f: F) -> R + where + F: FnOnce() -> R, + { + use crate::runtime::{enter, thread_pool}; + + enter::exit(|| thread_pool::block_in_place(f)) + } +} + +cfg_blocking! { + /// Runs the provided closure on a thread where blocking is acceptable. + /// + /// In general, issuing a blocking call or performing a lot of compute in a future without + /// yielding is not okay, as it may prevent the executor from driving other futures forward. + /// A closure that is run through this method will instead be run on a dedicated thread pool for + /// such blocking tasks without holding up the main futures executor. + /// + /// # Examples + /// + /// ``` + /// use tokio::task; + /// + /// # async fn docs() -> Result<(), Box<dyn std::error::Error>>{ + /// let res = task::spawn_blocking(move || { + /// // do some compute-heavy work or call synchronous code + /// "done computing" + /// }).await?; + /// + /// assert_eq!(res, "done computing"); + /// # Ok(()) + /// # } + /// ``` + pub fn spawn_blocking<F, R>(f: F) -> JoinHandle<R> + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + crate::runtime::spawn_blocking(f) + } +} diff --git a/third_party/rust/tokio/src/task/local.rs b/third_party/rust/tokio/src/task/local.rs new file mode 100644 index 0000000000..fce467079f --- /dev/null +++ b/third_party/rust/tokio/src/task/local.rs @@ -0,0 +1,584 @@ +//! Runs `!Send` futures on the current thread. +use crate::runtime::task::{self, JoinHandle, Task}; +use crate::sync::AtomicWaker; +use crate::util::linked_list::LinkedList; + +use std::cell::{Cell, RefCell}; +use std::collections::VecDeque; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Poll; + +use pin_project_lite::pin_project; + +cfg_rt_util! { + /// A set of tasks which are executed on the same thread. + /// + /// In some cases, it is necessary to run one or more futures that do not + /// implement [`Send`] and thus are unsafe to send between threads. In these + /// cases, a [local task set] may be used to schedule one or more `!Send` + /// futures to run together on the same thread. + /// + /// For example, the following code will not compile: + /// + /// ```rust,compile_fail + /// use std::rc::Rc; + /// + /// #[tokio::main] + /// async fn main() { + /// // `Rc` does not implement `Send`, and thus may not be sent between + /// // threads safely. + /// let unsend_data = Rc::new("my unsend data..."); + /// + /// let unsend_data = unsend_data.clone(); + /// // Because the `async` block here moves `unsend_data`, the future is `!Send`. + /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this + /// // will not compile. + /// tokio::spawn(async move { + /// println!("{}", unsend_data); + /// // ... + /// }).await.unwrap(); + /// } + /// ``` + /// In order to spawn `!Send` futures, we can use a local task set to + /// schedule them on the thread calling [`Runtime::block_on`]. When running + /// inside of the local task set, we can use [`task::spawn_local`], which can + /// spawn `!Send` futures. For example: + /// + /// ```rust + /// use std::rc::Rc; + /// use tokio::task; + /// + /// #[tokio::main] + /// async fn main() { + /// let unsend_data = Rc::new("my unsend data..."); + /// + /// // Construct a local task set that can run `!Send` futures. + /// let local = task::LocalSet::new(); + /// + /// // Run the local task set. + /// local.run_until(async move { + /// let unsend_data = unsend_data.clone(); + /// // `spawn_local` ensures that the future is spawned on the local + /// // task set. + /// task::spawn_local(async move { + /// println!("{}", unsend_data); + /// // ... + /// }).await.unwrap(); + /// }).await; + /// } + /// ``` + /// + /// ## Awaiting a `LocalSet` + /// + /// Additionally, a `LocalSet` itself implements `Future`, completing when + /// *all* tasks spawned on the `LocalSet` complete. This can be used to run + /// several futures on a `LocalSet` and drive the whole set until they + /// complete. For example, + /// + /// ```rust + /// use tokio::{task, time}; + /// use std::rc::Rc; + /// + /// #[tokio::main] + /// async fn main() { + /// let unsend_data = Rc::new("world"); + /// let local = task::LocalSet::new(); + /// + /// let unsend_data2 = unsend_data.clone(); + /// local.spawn_local(async move { + /// // ... + /// println!("hello {}", unsend_data2) + /// }); + /// + /// local.spawn_local(async move { + /// time::delay_for(time::Duration::from_millis(100)).await; + /// println!("goodbye {}", unsend_data) + /// }); + /// + /// // ... + /// + /// local.await; + /// } + /// ``` + /// + /// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html + /// [local task set]: struct@LocalSet + /// [`Runtime::block_on`]: ../struct.Runtime.html#method.block_on + /// [`task::spawn_local`]: fn@spawn_local + pub struct LocalSet { + /// Current scheduler tick + tick: Cell<u8>, + + /// State available from thread-local + context: Context, + + /// This type should not be Send. + _not_send: PhantomData<*const ()>, + } +} + +/// State available from the thread-local +struct Context { + /// Owned task set and local run queue + tasks: RefCell<Tasks>, + + /// State shared between threads. + shared: Arc<Shared>, +} + +struct Tasks { + /// Collection of all active tasks spawned onto this executor. + owned: LinkedList<Task<Arc<Shared>>>, + + /// Local run queue sender and receiver. + queue: VecDeque<task::Notified<Arc<Shared>>>, +} + +/// LocalSet state shared between threads. +struct Shared { + /// Remote run queue sender + queue: Mutex<VecDeque<task::Notified<Arc<Shared>>>>, + + /// Wake the `LocalSet` task + waker: AtomicWaker, +} + +pin_project! { + #[derive(Debug)] + struct RunUntil<'a, F> { + local_set: &'a LocalSet, + #[pin] + future: F, + } +} + +scoped_thread_local!(static CURRENT: Context); + +cfg_rt_util! { + /// Spawns a `!Send` future on the local task set. + /// + /// The spawned future will be run on the same thread that called `spawn_local.` + /// This may only be called from the context of a local task set. + /// + /// # Panics + /// + /// - This function panics if called outside of a local task set. + /// + /// # Examples + /// + /// ```rust + /// use std::rc::Rc; + /// use tokio::task; + /// + /// #[tokio::main] + /// async fn main() { + /// let unsend_data = Rc::new("my unsend data..."); + /// + /// let local = task::LocalSet::new(); + /// + /// // Run the local task set. + /// local.run_until(async move { + /// let unsend_data = unsend_data.clone(); + /// task::spawn_local(async move { + /// println!("{}", unsend_data); + /// // ... + /// }).await.unwrap(); + /// }).await; + /// } + /// ``` + pub fn spawn_local<F>(future: F) -> JoinHandle<F::Output> + where + F: Future + 'static, + F::Output: 'static, + { + CURRENT.with(|maybe_cx| { + let cx = maybe_cx + .expect("`spawn_local` called from outside of a `task::LocalSet`"); + + // Safety: Tasks are only polled and dropped from the thread that + // spawns them. + let (task, handle) = unsafe { task::joinable_local(future) }; + cx.tasks.borrow_mut().queue.push_back(task); + handle + }) + } +} + +/// Initial queue capacity +const INITIAL_CAPACITY: usize = 64; + +/// Max number of tasks to poll per tick. +const MAX_TASKS_PER_TICK: usize = 61; + +/// How often it check the remote queue first +const REMOTE_FIRST_INTERVAL: u8 = 31; + +impl LocalSet { + /// Returns a new local task set. + pub fn new() -> LocalSet { + LocalSet { + tick: Cell::new(0), + context: Context { + tasks: RefCell::new(Tasks { + owned: LinkedList::new(), + queue: VecDeque::with_capacity(INITIAL_CAPACITY), + }), + shared: Arc::new(Shared { + queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)), + waker: AtomicWaker::new(), + }), + }, + _not_send: PhantomData, + } + } + + /// Spawns a `!Send` task onto the local task set. + /// + /// This task is guaranteed to be run on the current thread. + /// + /// Unlike the free function [`spawn_local`], this method may be used to + /// spawn local tasks when the task set is _not_ running. For example: + /// ```rust + /// use tokio::task; + /// + /// #[tokio::main] + /// async fn main() { + /// let local = task::LocalSet::new(); + /// + /// // Spawn a future on the local set. This future will be run when + /// // we call `run_until` to drive the task set. + /// local.spawn_local(async { + /// // ... + /// }); + /// + /// // Run the local task set. + /// local.run_until(async move { + /// // ... + /// }).await; + /// + /// // When `run` finishes, we can spawn _more_ futures, which will + /// // run in subsequent calls to `run_until`. + /// local.spawn_local(async { + /// // ... + /// }); + /// + /// local.run_until(async move { + /// // ... + /// }).await; + /// } + /// ``` + /// [`spawn_local`]: fn@spawn_local + pub fn spawn_local<F>(&self, future: F) -> JoinHandle<F::Output> + where + F: Future + 'static, + F::Output: 'static, + { + let (task, handle) = unsafe { task::joinable_local(future) }; + self.context.tasks.borrow_mut().queue.push_back(task); + handle + } + + /// Runs a future to completion on the provided runtime, driving any local + /// futures spawned on this task set on the current thread. + /// + /// This runs the given future on the runtime, blocking until it is + /// complete, and yielding its resolved result. Any tasks or timers which + /// the future spawns internally will be executed on the runtime. The future + /// may also call [`spawn_local`] to spawn_local additional local futures on the + /// current thread. + /// + /// This method should not be called from an asynchronous context. + /// + /// # Panics + /// + /// This function panics if the executor is at capacity, if the provided + /// future panics, or if called within an asynchronous execution context. + /// + /// # Notes + /// + /// Since this function internally calls [`Runtime::block_on`], and drives + /// futures in the local task set inside that call to `block_on`, the local + /// futures may not use [in-place blocking]. If a blocking call needs to be + /// issued from a local task, the [`spawn_blocking`] API may be used instead. + /// + /// For example, this will panic: + /// ```should_panic + /// use tokio::runtime::Runtime; + /// use tokio::task; + /// + /// let mut rt = Runtime::new().unwrap(); + /// let local = task::LocalSet::new(); + /// local.block_on(&mut rt, async { + /// let join = task::spawn_local(async { + /// let blocking_result = task::block_in_place(|| { + /// // ... + /// }); + /// // ... + /// }); + /// join.await.unwrap(); + /// }) + /// ``` + /// This, however, will not panic: + /// ``` + /// use tokio::runtime::Runtime; + /// use tokio::task; + /// + /// let mut rt = Runtime::new().unwrap(); + /// let local = task::LocalSet::new(); + /// local.block_on(&mut rt, async { + /// let join = task::spawn_local(async { + /// let blocking_result = task::spawn_blocking(|| { + /// // ... + /// }).await; + /// // ... + /// }); + /// join.await.unwrap(); + /// }) + /// ``` + /// + /// [`spawn_local`]: fn@spawn_local + /// [`Runtime::block_on`]: ../struct.Runtime.html#method.block_on + /// [in-place blocking]: ../blocking/fn.in_place.html + /// [`spawn_blocking`]: ../blocking/fn.spawn_blocking.html + pub fn block_on<F>(&self, rt: &mut crate::runtime::Runtime, future: F) -> F::Output + where + F: Future, + { + rt.block_on(self.run_until(future)) + } + + /// Run a future to completion on the local set, returning its output. + /// + /// This returns a future that runs the given future with a local set, + /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures. + /// Any local futures spawned on the local set will be driven in the + /// background until the future passed to `run_until` completes. When the future + /// passed to `run` finishes, any local futures which have not completed + /// will remain on the local set, and will be driven on subsequent calls to + /// `run_until` or when [awaiting the local set] itself. + /// + /// # Examples + /// + /// ```rust + /// use tokio::task; + /// + /// #[tokio::main] + /// async fn main() { + /// task::LocalSet::new().run_until(async { + /// task::spawn_local(async move { + /// // ... + /// }).await.unwrap(); + /// // ... + /// }).await; + /// } + /// ``` + /// + /// [`spawn_local`]: fn@spawn_local + /// [awaiting the local set]: #awaiting-a-localset + pub async fn run_until<F>(&self, future: F) -> F::Output + where + F: Future, + { + let run_until = RunUntil { + future, + local_set: self, + }; + run_until.await + } + + /// Tick the scheduler, returning whether the local future needs to be + /// notified again. + fn tick(&self) -> bool { + for _ in 0..MAX_TASKS_PER_TICK { + match self.next_task() { + // Run the task + // + // Safety: As spawned tasks are `!Send`, `run_unchecked` must be + // used. We are responsible for maintaining the invariant that + // `run_unchecked` is only called on threads that spawned the + // task initially. Because `LocalSet` itself is `!Send`, and + // `spawn_local` spawns into the `LocalSet` on the current + // thread, the invariant is maintained. + Some(task) => crate::coop::budget(|| task.run()), + // We have fully drained the queue of notified tasks, so the + // local future doesn't need to be notified again — it can wait + // until something else wakes a task in the local set. + None => return false, + } + } + + true + } + + fn next_task(&self) -> Option<task::Notified<Arc<Shared>>> { + let tick = self.tick.get(); + self.tick.set(tick.wrapping_add(1)); + + if tick % REMOTE_FIRST_INTERVAL == 0 { + self.context + .shared + .queue + .lock() + .unwrap() + .pop_front() + .or_else(|| self.context.tasks.borrow_mut().queue.pop_front()) + } else { + self.context + .tasks + .borrow_mut() + .queue + .pop_front() + .or_else(|| self.context.shared.queue.lock().unwrap().pop_front()) + } + } + + fn with<T>(&self, f: impl FnOnce() -> T) -> T { + CURRENT.set(&self.context, f) + } +} + +impl fmt::Debug for LocalSet { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("LocalSet").finish() + } +} + +impl Future for LocalSet { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> { + // Register the waker before starting to work + self.context.shared.waker.register_by_ref(cx.waker()); + + if self.with(|| self.tick()) { + // If `tick` returns true, we need to notify the local future again: + // there are still tasks remaining in the run queue. + cx.waker().wake_by_ref(); + Poll::Pending + } else if self.context.tasks.borrow().owned.is_empty() { + // If the scheduler has no remaining futures, we're done! + Poll::Ready(()) + } else { + // There are still futures in the local set, but we've polled all the + // futures in the run queue. Therefore, we can just return Pending + // since the remaining futures will be woken from somewhere else. + Poll::Pending + } + } +} + +impl Default for LocalSet { + fn default() -> LocalSet { + LocalSet::new() + } +} + +impl Drop for LocalSet { + fn drop(&mut self) { + self.with(|| { + // Loop required here to ensure borrow is dropped between iterations + #[allow(clippy::while_let_loop)] + loop { + let task = match self.context.tasks.borrow_mut().owned.pop_back() { + Some(task) => task, + None => break, + }; + + // Safety: same as `run_unchecked`. + task.shutdown(); + } + + for task in self.context.tasks.borrow_mut().queue.drain(..) { + task.shutdown(); + } + + for task in self.context.shared.queue.lock().unwrap().drain(..) { + task.shutdown(); + } + + assert!(self.context.tasks.borrow().owned.is_empty()); + }); + } +} + +// === impl LocalFuture === + +impl<T: Future> Future for RunUntil<'_, T> { + type Output = T::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> { + let me = self.project(); + + me.local_set.with(|| { + me.local_set + .context + .shared + .waker + .register_by_ref(cx.waker()); + + if let Poll::Ready(output) = me.future.poll(cx) { + return Poll::Ready(output); + } + + if me.local_set.tick() { + // If `tick` returns `true`, we need to notify the local future again: + // there are still tasks remaining in the run queue. + cx.waker().wake_by_ref(); + } + + Poll::Pending + }) + } +} + +impl Shared { + /// Schedule the provided task on the scheduler. + fn schedule(&self, task: task::Notified<Arc<Self>>) { + CURRENT.with(|maybe_cx| match maybe_cx { + Some(cx) if cx.shared.ptr_eq(self) => { + cx.tasks.borrow_mut().queue.push_back(task); + } + _ => { + self.queue.lock().unwrap().push_back(task); + self.waker.wake(); + } + }); + } + + fn ptr_eq(&self, other: &Shared) -> bool { + self as *const _ == other as *const _ + } +} + +impl task::Schedule for Arc<Shared> { + fn bind(task: Task<Self>) -> Arc<Shared> { + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + cx.tasks.borrow_mut().owned.push_front(task); + cx.shared.clone() + }) + } + + fn release(&self, task: &Task<Self>) -> Option<Task<Self>> { + use std::ptr::NonNull; + + CURRENT.with(|maybe_cx| { + let cx = maybe_cx.expect("scheduler context missing"); + + assert!(cx.shared.ptr_eq(self)); + + let ptr = NonNull::from(task.header()); + // safety: task must be contained by list. It is inserted into the + // list in `bind`. + unsafe { cx.tasks.borrow_mut().owned.remove(ptr) } + }) + } + + fn schedule(&self, task: task::Notified<Self>) { + Shared::schedule(self, task); + } +} diff --git a/third_party/rust/tokio/src/task/mod.rs b/third_party/rust/tokio/src/task/mod.rs new file mode 100644 index 0000000000..5c89393a5e --- /dev/null +++ b/third_party/rust/tokio/src/task/mod.rs @@ -0,0 +1,242 @@ +//! Asynchronous green-threads. +//! +//! ## What are Tasks? +//! +//! A _task_ is a light weight, non-blocking unit of execution. A task is similar +//! to an OS thread, but rather than being managed by the OS scheduler, they are +//! managed by the [Tokio runtime][rt]. Another name for this general pattern is +//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's +//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as +//! something similar. +//! +//! Key points about tasks include: +//! +//! * Tasks are **light weight**. Because tasks are scheduled by the Tokio +//! runtime rather than the operating system, creating new tasks or switching +//! between tasks does not require a context switch and has fairly low +//! overhead. Creating, running, and destroying large numbers of tasks is +//! quite cheap, especially compared to OS threads. +//! +//! * Tasks are scheduled **cooperatively**. Most operating systems implement +//! _preemptive multitasking_. This is a scheduling technique where the +//! operating system allows each thread to run for a period of time, and then +//! _preempts_ it, temporarily pausing that thread and switching to another. +//! Tasks, on the other hand, implement _cooperative multitasking_. In +//! cooperative multitasking, a task is allowed to run until it _yields_, +//! indicating to the Tokio runtime's scheduler that it cannot currently +//! continue executing. When a task yields, the Tokio runtime switches to +//! executing the next task. +//! +//! * Tasks are **non-blocking**. Typically, when an OS thread performs I/O or +//! must synchronize with another thread, it _blocks_, allowing the OS to +//! schedule another thread. When a task cannot continue executing, it must +//! yield instead, allowing the Tokio runtime to schedule another task. Tasks +//! should generally not perform system calls or other operations that could +//! block a thread, as this would prevent other tasks running on the same +//! thread from executing as well. Instead, this module provides APIs for +//! running blocking operations in an asynchronous context. +//! +//! [rt]: crate::runtime +//! [green threads]: https://en.wikipedia.org/wiki/Green_threads +//! [Go's goroutines]: https://tour.golang.org/concurrency/1 +//! [Kotlin's coroutines]: https://kotlinlang.org/docs/reference/coroutines-overview.html +//! [Erlang's processes]: http://erlang.org/doc/getting_started/conc_prog.html#processes +//! +//! ## Working with Tasks +//! +//! This module provides the following APIs for working with tasks: +//! +//! ### Spawning +//! +//! Perhaps the most important function in this module is [`task::spawn`]. This +//! function can be thought of as an async equivalent to the standard library's +//! [`thread::spawn`][`std::thread::spawn`]. It takes an `async` block or other +//! [future], and creates a new task to run that work concurrently: +//! +//! ``` +//! use tokio::task; +//! +//! # async fn doc() { +//! task::spawn(async { +//! // perform some work here... +//! }); +//! # } +//! ``` +//! +//! Like [`std::thread::spawn`], `task::spawn` returns a [`JoinHandle`] struct. +//! A `JoinHandle` is itself a future which may be used to await the output of +//! the spawned task. For example: +//! +//! ``` +//! use tokio::task; +//! +//! # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { +//! let join = task::spawn(async { +//! // ... +//! "hello world!" +//! }); +//! +//! // ... +//! +//! // Await the result of the spawned task. +//! let result = join.await?; +//! assert_eq!(result, "hello world!"); +//! # Ok(()) +//! # } +//! ``` +//! +//! Again, like `std::thread`'s [`JoinHandle` type][thread_join], if the spawned +//! task panics, awaiting its `JoinHandle` will return a [`JoinError`]`. For +//! example: +//! +//! ``` +//! use tokio::task; +//! +//! # #[tokio::main] async fn main() { +//! let join = task::spawn(async { +//! panic!("something bad happened!") +//! }); +//! +//! // The returned result indicates that the task failed. +//! assert!(join.await.is_err()); +//! # } +//! ``` +//! +//! `spawn`, `JoinHandle`, and `JoinError` are present when the "rt-core" +//! feature flag is enabled. +//! +//! [`task::spawn`]: crate::task::spawn() +//! [future]: std::future::Future +//! [`std::thread::spawn`]: std::thread::spawn +//! [`JoinHandle`]: crate::task::JoinHandle +//! [thread_join]: std::thread::JoinHandle +//! [`JoinError`]: crate::task::JoinError +//! +//! ### Blocking and Yielding +//! +//! As we discussed above, code running in asynchronous tasks should not perform +//! operations that can block. A blocking operation performed in a task running +//! on a thread that is also running other tasks would block the entire thread, +//! preventing other tasks from running. +//! +//! Instead, Tokio provides two APIs for running blocking operations in an +//! asynchronous context: [`task::spawn_blocking`] and [`task::block_in_place`]. +//! +//! #### spawn_blocking +//! +//! The `task::spawn_blocking` function is similar to the `task::spawn` function +//! discussed in the previous section, but rather than spawning an +//! _non-blocking_ future on the Tokio runtime, it instead spawns a +//! _blocking_ function on a dedicated thread pool for blocking tasks. For +//! example: +//! +//! ``` +//! use tokio::task; +//! +//! # async fn docs() { +//! task::spawn_blocking(|| { +//! // do some compute-heavy work or call synchronous code +//! }); +//! # } +//! ``` +//! +//! Just like `task::spawn`, `task::spawn_blocking` returns a `JoinHandle` +//! which we can use to await the result of the blocking operation: +//! +//! ```rust +//! # use tokio::task; +//! # async fn docs() -> Result<(), Box<dyn std::error::Error>>{ +//! let join = task::spawn_blocking(|| { +//! // do some compute-heavy work or call synchronous code +//! "blocking completed" +//! }); +//! +//! let result = join.await?; +//! assert_eq!(result, "blocking completed"); +//! # Ok(()) +//! # } +//! ``` +//! +//! #### block_in_place +//! +//! When using the [threaded runtime][rt-threaded], the [`task::block_in_place`] +//! function is also available. Like `task::spawn_blocking`, this function +//! allows running a blocking operation from an asynchronous context. Unlike +//! `spawn_blocking`, however, `block_in_place` works by transitioning the +//! _current_ worker thread to a blocking thread, moving other tasks running on +//! that thread to another worker thread. This can improve performance by avoiding +//! context switches. +//! +//! For example: +//! +//! ``` +//! use tokio::task; +//! +//! # async fn docs() { +//! let result = task::block_in_place(|| { +//! // do some compute-heavy work or call synchronous code +//! "blocking completed" +//! }); +//! +//! assert_eq!(result, "blocking completed"); +//! # } +//! ``` +//! +//! #### yield_now +//! +//! In addition, this module provides a [`task::yield_now`] async function +//! that is analogous to the standard library's [`thread::yield_now`]. Calling +//! and `await`ing this function will cause the current task to yield to the +//! Tokio runtime's scheduler, allowing other tasks to be +//! scheduled. Eventually, the yielding task will be polled again, allowing it +//! to execute. For example: +//! +//! ```rust +//! use tokio::task; +//! +//! # #[tokio::main] async fn main() { +//! async { +//! task::spawn(async { +//! // ... +//! println!("spawned task done!") +//! }); +//! +//! // Yield, allowing the newly-spawned task to execute first. +//! task::yield_now().await; +//! println!("main task done!"); +//! } +//! # .await; +//! # } +//! ``` +//! +//! [`task::spawn_blocking`]: crate::task::spawn_blocking +//! [`task::block_in_place`]: crate::task::block_in_place +//! [rt-threaded]: ../runtime/index.html#threaded-scheduler +//! [`task::yield_now`]: crate::task::yield_now() +//! [`thread::yield_now`]: std::thread::yield_now +cfg_blocking! { + mod blocking; + pub use blocking::spawn_blocking; + + cfg_rt_threaded! { + pub use blocking::block_in_place; + } +} + +cfg_rt_core! { + pub use crate::runtime::task::{JoinError, JoinHandle}; + + mod spawn; + pub use spawn::spawn; + + mod yield_now; + pub use yield_now::yield_now; +} + +cfg_rt_util! { + mod local; + pub use local::{spawn_local, LocalSet}; + + mod task_local; + pub use task_local::LocalKey; +} diff --git a/third_party/rust/tokio/src/task/spawn.rs b/third_party/rust/tokio/src/task/spawn.rs new file mode 100644 index 0000000000..fa5ff13b01 --- /dev/null +++ b/third_party/rust/tokio/src/task/spawn.rs @@ -0,0 +1,134 @@ +use crate::runtime; +use crate::task::JoinHandle; + +use std::future::Future; + +doc_rt_core! { + /// Spawns a new asynchronous task, returning a + /// [`JoinHandle`](super::JoinHandle) for it. + /// + /// Spawning a task enables the task to execute concurrently to other tasks. The + /// spawned task may execute on the current thread, or it may be sent to a + /// different thread to be executed. The specifics depend on the current + /// [`Runtime`](crate::runtime::Runtime) configuration. + /// + /// There is no guarantee that a spawned task will execute to completion. + /// When a runtime is shutdown, all outstanding tasks are dropped, + /// regardless of the lifecycle of that task. + /// + /// This function must be called from the context of a Tokio runtime. Tasks running on + /// the Tokio runtime are always inside its context, but you can also enter the context + /// using the [`Handle::enter`](crate::runtime::Handle::enter()) method. + /// + /// # Examples + /// + /// In this example, a server is started and `spawn` is used to start a new task + /// that processes each received connection. + /// + /// ```no_run + /// use tokio::net::{TcpListener, TcpStream}; + /// + /// use std::io; + /// + /// async fn process(socket: TcpStream) { + /// // ... + /// # drop(socket); + /// } + /// + /// #[tokio::main] + /// async fn main() -> io::Result<()> { + /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?; + /// + /// loop { + /// let (socket, _) = listener.accept().await?; + /// + /// tokio::spawn(async move { + /// // Process each socket concurrently. + /// process(socket).await + /// }); + /// } + /// } + /// ``` + /// + /// # Panics + /// + /// Panics if called from **outside** of the Tokio runtime. + /// + /// # Using `!Send` values from a task + /// + /// The task supplied to `spawn` must implement `Send`. However, it is + /// possible to **use** `!Send` values from the task as long as they only + /// exist between calls to `.await`. + /// + /// For example, this will work: + /// + /// ``` + /// use tokio::task; + /// + /// use std::rc::Rc; + /// + /// fn use_rc(rc: Rc<()>) { + /// // Do stuff w/ rc + /// # drop(rc); + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// tokio::spawn(async { + /// // Force the `Rc` to stay in a scope with no `.await` + /// { + /// let rc = Rc::new(()); + /// use_rc(rc.clone()); + /// } + /// + /// task::yield_now().await; + /// }).await.unwrap(); + /// } + /// ``` + /// + /// This will **not** work: + /// + /// ```compile_fail + /// use tokio::task; + /// + /// use std::rc::Rc; + /// + /// fn use_rc(rc: Rc<()>) { + /// // Do stuff w/ rc + /// # drop(rc); + /// } + /// + /// #[tokio::main] + /// async fn main() { + /// tokio::spawn(async { + /// let rc = Rc::new(()); + /// + /// task::yield_now().await; + /// + /// use_rc(rc.clone()); + /// }).await.unwrap(); + /// } + /// ``` + /// + /// Holding on to a `!Send` value across calls to `.await` will result in + /// an unfriendly compile error message similar to: + /// + /// ```text + /// `[... some type ...]` cannot be sent between threads safely + /// ``` + /// + /// or: + /// + /// ```text + /// error[E0391]: cycle detected when processing `main` + /// ``` + pub fn spawn<T>(task: T) -> JoinHandle<T::Output> + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + let spawn_handle = runtime::context::spawn_handle() + .expect("must be called from the context of Tokio runtime configured with either `basic_scheduler` or `threaded_scheduler`"); + spawn_handle.spawn(task) + } +} diff --git a/third_party/rust/tokio/src/task/task_local.rs b/third_party/rust/tokio/src/task/task_local.rs new file mode 100644 index 0000000000..f3341b6a7e --- /dev/null +++ b/third_party/rust/tokio/src/task/task_local.rs @@ -0,0 +1,240 @@ +use pin_project_lite::pin_project; +use std::cell::RefCell; +use std::error::Error; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::{fmt, thread}; + +/// Declares a new task-local key of type [`tokio::task::LocalKey`]. +/// +/// # Syntax +/// +/// The macro wraps any number of static declarations and makes them local to the current task. +/// Publicity and attributes for each static is preserved. For example: +/// +/// # Examples +/// +/// ``` +/// # use tokio::task_local; +/// task_local! { +/// pub static ONE: u32; +/// +/// #[allow(unused)] +/// static TWO: f32; +/// } +/// # fn main() {} +/// ``` +/// +/// See [LocalKey documentation][`tokio::task::LocalKey`] for more +/// information. +/// +/// [`tokio::task::LocalKey`]: ../tokio/task/struct.LocalKey.html +#[macro_export] +macro_rules! task_local { + // empty (base case for the recursion) + () => {}; + + ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => { + $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); + $crate::task_local!($($rest)*); + }; + + ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => { + $crate::__task_local_inner!($(#[$attr])* $vis $name, $t); + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __task_local_inner { + ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => { + static $name: $crate::task::LocalKey<$t> = { + std::thread_local! { + static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None); + } + + $crate::task::LocalKey { inner: __KEY } + }; + }; +} + +/// A key for task-local data. +/// +/// This type is generated by the `task_local!` macro. +/// +/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will +/// _not_ lazily initialize the value on first access. Instead, the +/// value is first initialized when the future containing +/// the task-local is first polled by a futures executor, like Tokio. +/// +/// # Examples +/// +/// ``` +/// # async fn dox() { +/// tokio::task_local! { +/// static NUMBER: u32; +/// } +/// +/// NUMBER.scope(1, async move { +/// assert_eq!(NUMBER.get(), 1); +/// }).await; +/// +/// NUMBER.scope(2, async move { +/// assert_eq!(NUMBER.get(), 2); +/// +/// NUMBER.scope(3, async move { +/// assert_eq!(NUMBER.get(), 3); +/// }).await; +/// }).await; +/// # } +/// ``` +/// [`std::thread::LocalKey`]: https://doc.rust-lang.org/std/thread/struct.LocalKey.html +pub struct LocalKey<T: 'static> { + #[doc(hidden)] + pub inner: thread::LocalKey<RefCell<Option<T>>>, +} + +impl<T: 'static> LocalKey<T> { + /// Sets a value `T` as the task-local value for the future `F`. + /// + /// On completion of `scope`, the task-local will be dropped. + /// + /// ### Examples + /// + /// ``` + /// # async fn dox() { + /// tokio::task_local! { + /// static NUMBER: u32; + /// } + /// + /// NUMBER.scope(1, async move { + /// println!("task local value: {}", NUMBER.get()); + /// }).await; + /// # } + /// ``` + pub async fn scope<F>(&'static self, value: T, f: F) -> F::Output + where + F: Future, + { + TaskLocalFuture { + local: &self, + slot: Some(value), + future: f, + } + .await + } + + /// Accesses the current task-local and runs the provided closure. + /// + /// # Panics + /// + /// This function will panic if not called within the context + /// of a future containing a task-local with the corresponding key. + pub fn with<F, R>(&'static self, f: F) -> R + where + F: FnOnce(&T) -> R, + { + self.try_with(f).expect( + "cannot access a Task Local Storage value \ + without setting it via `LocalKey::set`", + ) + } + + /// Accesses the current task-local and runs the provided closure. + /// + /// If the task-local with the accociated key is not present, this + /// method will return an `AccessError`. For a panicking variant, + /// see `with`. + pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError> + where + F: FnOnce(&T) -> R, + { + self.inner.with(|v| { + if let Some(val) = v.borrow().as_ref() { + Ok(f(val)) + } else { + Err(AccessError { _private: () }) + } + }) + } +} + +impl<T: Copy + 'static> LocalKey<T> { + /// Returns a copy of the task-local value + /// if the task-local value implements `Copy`. + pub fn get(&'static self) -> T { + self.with(|v| *v) + } +} + +impl<T: 'static> fmt::Debug for LocalKey<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("LocalKey { .. }") + } +} + +pin_project! { + struct TaskLocalFuture<T: StaticLifetime, F> { + local: &'static LocalKey<T>, + slot: Option<T>, + #[pin] + future: F, + } +} + +impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + struct Guard<'a, T: 'static> { + local: &'static LocalKey<T>, + slot: &'a mut Option<T>, + prev: Option<T>, + } + + impl<T> Drop for Guard<'_, T> { + fn drop(&mut self) { + let value = self.local.inner.with(|c| c.replace(self.prev.take())); + *self.slot = value; + } + } + + let mut project = self.project(); + let val = project.slot.take(); + + let prev = project.local.inner.with(|c| c.replace(val)); + + let _guard = Guard { + prev, + slot: &mut project.slot, + local: *project.local, + }; + + project.future.poll(cx) + } +} + +// Required to make `pin_project` happy. +trait StaticLifetime: 'static {} +impl<T: 'static> StaticLifetime for T {} + +/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with). +#[derive(Clone, Copy, Eq, PartialEq)] +pub struct AccessError { + _private: (), +} + +impl fmt::Debug for AccessError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AccessError").finish() + } +} + +impl fmt::Display for AccessError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt("task-local value not set", f) + } +} + +impl Error for AccessError {} diff --git a/third_party/rust/tokio/src/task/yield_now.rs b/third_party/rust/tokio/src/task/yield_now.rs new file mode 100644 index 0000000000..e0e20841c9 --- /dev/null +++ b/third_party/rust/tokio/src/task/yield_now.rs @@ -0,0 +1,38 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +doc_rt_core! { + /// Yields execution back to the Tokio runtime. + /// + /// A task yields by awaiting on `yield_now()`, and may resume when that + /// future completes (with no output.) The current task will be re-added as + /// a pending task at the _back_ of the pending queue. Any other pending + /// tasks will be scheduled. No other waking is required for the task to + /// continue. + /// + /// See also the usage example in the [task module](index.html#yield_now). + #[must_use = "yield_now does nothing unless polled/`await`-ed"] + pub async fn yield_now() { + /// Yield implementation + struct YieldNow { + yielded: bool, + } + + impl Future for YieldNow { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + if self.yielded { + return Poll::Ready(()); + } + + self.yielded = true; + cx.waker().wake_by_ref(); + Poll::Pending + } + } + + YieldNow { yielded: false }.await + } +} diff --git a/third_party/rust/tokio/src/time/clock.rs b/third_party/rust/tokio/src/time/clock.rs new file mode 100644 index 0000000000..4ac24af3d0 --- /dev/null +++ b/third_party/rust/tokio/src/time/clock.rs @@ -0,0 +1,164 @@ +//! Source of time abstraction. +//! +//! By default, `std::time::Instant::now()` is used. However, when the +//! `test-util` feature flag is enabled, the values returned for `now()` are +//! configurable. + +cfg_not_test_util! { + use crate::time::{Duration, Instant}; + + #[derive(Debug, Clone)] + pub(crate) struct Clock {} + + pub(crate) fn now() -> Instant { + Instant::from_std(std::time::Instant::now()) + } + + impl Clock { + pub(crate) fn new() -> Clock { + Clock {} + } + + pub(crate) fn now(&self) -> Instant { + now() + } + + pub(crate) fn is_paused(&self) -> bool { + false + } + + pub(crate) fn advance(&self, _dur: Duration) { + unreachable!(); + } + } +} + +cfg_test_util! { + use crate::time::{Duration, Instant}; + use std::sync::{Arc, Mutex}; + use crate::runtime::context; + + /// A handle to a source of time. + #[derive(Debug, Clone)] + pub(crate) struct Clock { + inner: Arc<Mutex<Inner>>, + } + + #[derive(Debug)] + struct Inner { + /// Instant to use as the clock's base instant. + base: std::time::Instant, + + /// Instant at which the clock was last unfrozen + unfrozen: Option<std::time::Instant>, + } + + /// Pause time + /// + /// The current value of `Instant::now()` is saved and all subsequent calls + /// to `Instant::now()` will return the saved value. This is useful for + /// running tests that are dependent on time. + /// + /// # Panics + /// + /// Panics if time is already frozen or if called from outside of the Tokio + /// runtime. + pub fn pause() { + let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); + clock.pause(); + } + + /// Resume time + /// + /// Clears the saved `Instant::now()` value. Subsequent calls to + /// `Instant::now()` will return the value returned by the system call. + /// + /// # Panics + /// + /// Panics if time is not frozen or if called from outside of the Tokio + /// runtime. + pub fn resume() { + let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); + let mut inner = clock.inner.lock().unwrap(); + + if inner.unfrozen.is_some() { + panic!("time is not frozen"); + } + + inner.unfrozen = Some(std::time::Instant::now()); + } + + /// Advance time + /// + /// Increments the saved `Instant::now()` value by `duration`. Subsequent + /// calls to `Instant::now()` will return the result of the increment. + /// + /// # Panics + /// + /// Panics if time is not frozen or if called from outside of the Tokio + /// runtime. + pub async fn advance(duration: Duration) { + let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime"); + clock.advance(duration); + crate::task::yield_now().await; + } + + /// Return the current instant, factoring in frozen time. + pub(crate) fn now() -> Instant { + if let Some(clock) = context::clock() { + clock.now() + } else { + Instant::from_std(std::time::Instant::now()) + } + } + + impl Clock { + /// Return a new `Clock` instance that uses the current execution context's + /// source of time. + pub(crate) fn new() -> Clock { + let now = std::time::Instant::now(); + + Clock { + inner: Arc::new(Mutex::new(Inner { + base: now, + unfrozen: Some(now), + })), + } + } + + pub(crate) fn pause(&self) { + let mut inner = self.inner.lock().unwrap(); + + let elapsed = inner.unfrozen.as_ref().expect("time is already frozen").elapsed(); + inner.base += elapsed; + inner.unfrozen = None; + } + + pub(crate) fn is_paused(&self) -> bool { + let inner = self.inner.lock().unwrap(); + inner.unfrozen.is_none() + } + + pub(crate) fn advance(&self, duration: Duration) { + let mut inner = self.inner.lock().unwrap(); + + if inner.unfrozen.is_some() { + panic!("time is not frozen"); + } + + inner.base += duration; + } + + pub(crate) fn now(&self) -> Instant { + let inner = self.inner.lock().unwrap(); + + let mut ret = inner.base; + + if let Some(unfrozen) = inner.unfrozen { + ret += unfrozen.elapsed(); + } + + Instant::from_std(ret) + } + } +} diff --git a/third_party/rust/tokio/src/time/delay.rs b/third_party/rust/tokio/src/time/delay.rs new file mode 100644 index 0000000000..8088c9955c --- /dev/null +++ b/third_party/rust/tokio/src/time/delay.rs @@ -0,0 +1,99 @@ +use crate::time::driver::Registration; +use crate::time::{Duration, Instant}; + +use std::future::Future; +use std::pin::Pin; +use std::task::{self, Poll}; + +/// Waits until `deadline` is reached. +/// +/// No work is performed while awaiting on the delay to complete. The delay +/// operates at millisecond granularity and should not be used for tasks that +/// require high-resolution timers. +/// +/// # Cancellation +/// +/// Canceling a delay is done by dropping the returned future. No additional +/// cleanup work is required. +pub fn delay_until(deadline: Instant) -> Delay { + let registration = Registration::new(deadline, Duration::from_millis(0)); + Delay { registration } +} + +/// Waits until `duration` has elapsed. +/// +/// Equivalent to `delay_until(Instant::now() + duration)`. An asynchronous +/// analog to `std::thread::sleep`. +/// +/// No work is performed while awaiting on the delay to complete. The delay +/// operates at millisecond granularity and should not be used for tasks that +/// require high-resolution timers. +/// +/// # Cancellation +/// +/// Canceling a delay is done by dropping the returned future. No additional +/// cleanup work is required. +pub fn delay_for(duration: Duration) -> Delay { + delay_until(Instant::now() + duration) +} + +/// Future returned by [`delay_until`](delay_until) and +/// [`delay_for`](delay_for). +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Delay { + /// The link between the `Delay` instance and the timer that drives it. + /// + /// This also stores the `deadline` value. + registration: Registration, +} + +impl Delay { + pub(crate) fn new_timeout(deadline: Instant, duration: Duration) -> Delay { + let registration = Registration::new(deadline, duration); + Delay { registration } + } + + /// Returns the instant at which the future will complete. + pub fn deadline(&self) -> Instant { + self.registration.deadline() + } + + /// Returns `true` if the `Delay` has elapsed + /// + /// A `Delay` is elapsed when the requested duration has elapsed. + pub fn is_elapsed(&self) -> bool { + self.registration.is_elapsed() + } + + /// Resets the `Delay` instance to a new deadline. + /// + /// Calling this function allows changing the instant at which the `Delay` + /// future completes without having to create new associated state. + /// + /// This function can be called both before and after the future has + /// completed. + pub fn reset(&mut self, deadline: Instant) { + self.registration.reset(deadline); + } +} + +impl Future for Delay { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { + // `poll_elapsed` can return an error in two cases: + // + // - AtCapacity: this is a pathlogical case where far too many + // delays have been scheduled. + // - Shutdown: No timer has been setup, which is a mis-use error. + // + // Both cases are extremely rare, and pretty accurately fit into + // "logic errors", so we just panic in this case. A user couldn't + // really do much better if we passed the error onwards. + match ready!(self.registration.poll_elapsed(cx)) { + Ok(()) => Poll::Ready(()), + Err(e) => panic!("timer error: {}", e), + } + } +} diff --git a/third_party/rust/tokio/src/time/delay_queue.rs b/third_party/rust/tokio/src/time/delay_queue.rs new file mode 100644 index 0000000000..59f901a95d --- /dev/null +++ b/third_party/rust/tokio/src/time/delay_queue.rs @@ -0,0 +1,887 @@ +//! A queue of delayed elements. +//! +//! See [`DelayQueue`] for more details. +//! +//! [`DelayQueue`]: struct@DelayQueue + +use crate::time::wheel::{self, Wheel}; +use crate::time::{delay_until, Delay, Duration, Error, Instant}; + +use slab::Slab; +use std::cmp; +use std::future::Future; +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{self, Poll}; + +/// A queue of delayed elements. +/// +/// Once an element is inserted into the `DelayQueue`, it is yielded once the +/// specified deadline has been reached. +/// +/// # Usage +/// +/// Elements are inserted into `DelayQueue` using the [`insert`] or +/// [`insert_at`] methods. A deadline is provided with the item and a [`Key`] is +/// returned. The key is used to remove the entry or to change the deadline at +/// which it should be yielded back. +/// +/// Once delays have been configured, the `DelayQueue` is used via its +/// [`Stream`] implementation. [`poll`] is called. If an entry has reached its +/// deadline, it is returned. If not, `Poll::Pending` indicating that the +/// current task will be notified once the deadline has been reached. +/// +/// # `Stream` implementation +/// +/// Items are retrieved from the queue via [`Stream::poll`]. If no delays have +/// expired, no items are returned. In this case, `NotReady` is returned and the +/// current task is registered to be notified once the next item's delay has +/// expired. +/// +/// If no items are in the queue, i.e. `is_empty()` returns `true`, then `poll` +/// returns `Ready(None)`. This indicates that the stream has reached an end. +/// However, if a new item is inserted *after*, `poll` will once again start +/// returning items or `NotReady. +/// +/// Items are returned ordered by their expirations. Items that are configured +/// to expire first will be returned first. There are no ordering guarantees +/// for items configured to expire the same instant. Also note that delays are +/// rounded to the closest millisecond. +/// +/// # Implementation +/// +/// The `DelayQueue` is backed by the same hashed timing wheel implementation as +/// [`Timer`] as such, it offers the same performance benefits. See [`Timer`] +/// for further implementation notes. +/// +/// State associated with each entry is stored in a [`slab`]. This allows +/// amortizing the cost of allocation. Space created for expired entries is +/// reused when inserting new entries. +/// +/// Capacity can be checked using [`capacity`] and allocated preemptively by using +/// the [`reserve`] method. +/// +/// # Usage +/// +/// Using `DelayQueue` to manage cache entries. +/// +/// ```rust,no_run +/// use tokio::time::{delay_queue, DelayQueue, Error}; +/// +/// use futures::ready; +/// use std::collections::HashMap; +/// use std::task::{Context, Poll}; +/// use std::time::Duration; +/// # type CacheKey = String; +/// # type Value = String; +/// +/// struct Cache { +/// entries: HashMap<CacheKey, (Value, delay_queue::Key)>, +/// expirations: DelayQueue<CacheKey>, +/// } +/// +/// const TTL_SECS: u64 = 30; +/// +/// impl Cache { +/// fn insert(&mut self, key: CacheKey, value: Value) { +/// let delay = self.expirations +/// .insert(key.clone(), Duration::from_secs(TTL_SECS)); +/// +/// self.entries.insert(key, (value, delay)); +/// } +/// +/// fn get(&self, key: &CacheKey) -> Option<&Value> { +/// self.entries.get(key) +/// .map(|&(ref v, _)| v) +/// } +/// +/// fn remove(&mut self, key: &CacheKey) { +/// if let Some((_, cache_key)) = self.entries.remove(key) { +/// self.expirations.remove(&cache_key); +/// } +/// } +/// +/// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> { +/// while let Some(res) = ready!(self.expirations.poll_expired(cx)) { +/// let entry = res?; +/// self.entries.remove(entry.get_ref()); +/// } +/// +/// Poll::Ready(Ok(())) +/// } +/// } +/// ``` +/// +/// [`insert`]: #method.insert +/// [`insert_at`]: #method.insert_at +/// [`Key`]: struct@Key +/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html +/// [`poll`]: #method.poll +/// [`Stream::poll`]: #method.poll +/// [`Timer`]: ../struct.Timer.html +/// [`slab`]: https://docs.rs/slab +/// [`capacity`]: #method.capacity +/// [`reserve`]: #method.reserve +#[derive(Debug)] +pub struct DelayQueue<T> { + /// Stores data associated with entries + slab: Slab<Data<T>>, + + /// Lookup structure tracking all delays in the queue + wheel: Wheel<Stack<T>>, + + /// Delays that were inserted when already expired. These cannot be stored + /// in the wheel + expired: Stack<T>, + + /// Delay expiring when the *first* item in the queue expires + delay: Option<Delay>, + + /// Wheel polling state + poll: wheel::Poll, + + /// Instant at which the timer starts + start: Instant, +} + +/// An entry in `DelayQueue` that has expired and removed. +/// +/// Values are returned by [`DelayQueue::poll`]. +/// +/// [`DelayQueue::poll`]: method@DelayQueue::poll +#[derive(Debug)] +pub struct Expired<T> { + /// The data stored in the queue + data: T, + + /// The expiration time + deadline: Instant, + + /// The key associated with the entry + key: Key, +} + +/// Token to a value stored in a `DelayQueue`. +/// +/// Instances of `Key` are returned by [`DelayQueue::insert`]. See [`DelayQueue`] +/// documentation for more details. +/// +/// [`DelayQueue`]: struct@DelayQueue +/// [`DelayQueue::insert`]: method@DelayQueue::insert +#[derive(Debug, Clone)] +pub struct Key { + index: usize, +} + +#[derive(Debug)] +struct Stack<T> { + /// Head of the stack + head: Option<usize>, + _p: PhantomData<fn() -> T>, +} + +#[derive(Debug)] +struct Data<T> { + /// The data being stored in the queue and will be returned at the requested + /// instant. + inner: T, + + /// The instant at which the item is returned. + when: u64, + + /// Set to true when stored in the `expired` queue + expired: bool, + + /// Next entry in the stack + next: Option<usize>, + + /// Previous entry in the stack + prev: Option<usize>, +} + +/// Maximum number of entries the queue can handle +const MAX_ENTRIES: usize = (1 << 30) - 1; + +impl<T> DelayQueue<T> { + /// Creates a new, empty, `DelayQueue` + /// + /// The queue will not allocate storage until items are inserted into it. + /// + /// # Examples + /// + /// ```rust + /// # use tokio::time::DelayQueue; + /// let delay_queue: DelayQueue<u32> = DelayQueue::new(); + /// ``` + pub fn new() -> DelayQueue<T> { + DelayQueue::with_capacity(0) + } + + /// Creates a new, empty, `DelayQueue` with the specified capacity. + /// + /// The queue will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the queue will not allocate for + /// storage. + /// + /// # Examples + /// + /// ```rust + /// # use tokio::time::DelayQueue; + /// # use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::with_capacity(10); + /// + /// // These insertions are done without further allocation + /// for i in 0..10 { + /// delay_queue.insert(i, Duration::from_secs(i)); + /// } + /// + /// // This will make the queue allocate additional storage + /// delay_queue.insert(11, Duration::from_secs(11)); + /// # } + /// ``` + pub fn with_capacity(capacity: usize) -> DelayQueue<T> { + DelayQueue { + wheel: Wheel::new(), + slab: Slab::with_capacity(capacity), + expired: Stack::default(), + delay: None, + poll: wheel::Poll::new(0), + start: Instant::now(), + } + } + + /// Inserts `value` into the queue set to expire at a specific instant in + /// time. + /// + /// This function is identical to `insert`, but takes an `Instant` instead + /// of a `Duration`. + /// + /// `value` is stored in the queue until `when` is reached. At which point, + /// `value` will be returned from [`poll`]. If `when` has already been + /// reached, then `value` is immediately made available to poll. + /// + /// The return value represents the insertion and is used at an argument to + /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once + /// `value` is removed from the queue either by calling [`poll`] after + /// `when` is reached or by calling [`remove`]. At this point, the caller + /// must take care to not use the returned [`Key`] again as it may reference + /// a different item in the queue. + /// + /// See [type] level documentation for more details. + /// + /// # Panics + /// + /// This function panics if `when` is too far in the future. + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio::time::{DelayQueue, Duration, Instant}; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert_at( + /// "foo", Instant::now() + Duration::from_secs(5)); + /// + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); + /// # } + /// ``` + /// + /// [`poll`]: #method.poll + /// [`remove`]: #method.remove + /// [`reset`]: #method.reset + /// [`Key`]: struct@Key + /// [type]: # + pub fn insert_at(&mut self, value: T, when: Instant) -> Key { + assert!(self.slab.len() < MAX_ENTRIES, "max entries exceeded"); + + // Normalize the deadline. Values cannot be set to expire in the past. + let when = self.normalize_deadline(when); + + // Insert the value in the store + let key = self.slab.insert(Data { + inner: value, + when, + expired: false, + next: None, + prev: None, + }); + + self.insert_idx(when, key); + + // Set a new delay if the current's deadline is later than the one of the new item + let should_set_delay = if let Some(ref delay) = self.delay { + let current_exp = self.normalize_deadline(delay.deadline()); + current_exp > when + } else { + true + }; + + if should_set_delay { + let delay_time = self.start + Duration::from_millis(when); + if let Some(ref mut delay) = &mut self.delay { + delay.reset(delay_time); + } else { + self.delay = Some(delay_until(delay_time)); + } + } + + Key::new(key) + } + + /// Attempts to pull out the next value of the delay queue, registering the + /// current task for wakeup if the value is not yet available, and returning + /// None if the queue is exhausted. + pub fn poll_expired( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll<Option<Result<Expired<T>, Error>>> { + let item = ready!(self.poll_idx(cx)); + Poll::Ready(item.map(|result| { + result.map(|idx| { + let data = self.slab.remove(idx); + debug_assert!(data.next.is_none()); + debug_assert!(data.prev.is_none()); + + Expired { + key: Key::new(idx), + data: data.inner, + deadline: self.start + Duration::from_millis(data.when), + } + }) + })) + } + + /// Inserts `value` into the queue set to expire after the requested duration + /// elapses. + /// + /// This function is identical to `insert_at`, but takes a `Duration` + /// instead of an `Instant`. + /// + /// `value` is stored in the queue until `when` is reached. At which point, + /// `value` will be returned from [`poll`]. If `when` has already been + /// reached, then `value` is immediately made available to poll. + /// + /// The return value represents the insertion and is used at an argument to + /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once + /// `value` is removed from the queue either by calling [`poll`] after + /// `when` is reached or by calling [`remove`]. At this point, the caller + /// must take care to not use the returned [`Key`] again as it may reference + /// a different item in the queue. + /// + /// See [type] level documentation for more details. + /// + /// # Panics + /// + /// This function panics if `timeout` is greater than the maximum supported + /// duration. + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); + /// # } + /// ``` + /// + /// [`poll`]: #method.poll + /// [`remove`]: #method.remove + /// [`reset`]: #method.reset + /// [`Key`]: struct@Key + /// [type]: # + pub fn insert(&mut self, value: T, timeout: Duration) -> Key { + self.insert_at(value, Instant::now() + timeout) + } + + fn insert_idx(&mut self, when: u64, key: usize) { + use self::wheel::{InsertError, Stack}; + + // Register the deadline with the timer wheel + match self.wheel.insert(when, key, &mut self.slab) { + Ok(_) => {} + Err((_, InsertError::Elapsed)) => { + self.slab[key].expired = true; + // The delay is already expired, store it in the expired queue + self.expired.push(key, &mut self.slab); + } + Err((_, err)) => panic!("invalid deadline; err={:?}", err), + } + } + + /// Removes the item associated with `key` from the queue. + /// + /// There must be an item associated with `key`. The function returns the + /// removed item as well as the `Instant` at which it will the delay will + /// have expired. + /// + /// # Panics + /// + /// The function panics if `key` is not contained by the queue. + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// + /// // Remove the entry + /// let item = delay_queue.remove(&key); + /// assert_eq!(*item.get_ref(), "foo"); + /// # } + /// ``` + pub fn remove(&mut self, key: &Key) -> Expired<T> { + use crate::time::wheel::Stack; + + // Special case the `expired` queue + if self.slab[key.index].expired { + self.expired.remove(&key.index, &mut self.slab); + } else { + self.wheel.remove(&key.index, &mut self.slab); + } + + let data = self.slab.remove(key.index); + + Expired { + key: Key::new(key.index), + data: data.inner, + deadline: self.start + Duration::from_millis(data.when), + } + } + + /// Sets the delay of the item associated with `key` to expire at `when`. + /// + /// This function is identical to `reset` but takes an `Instant` instead of + /// a `Duration`. + /// + /// The item remains in the queue but the delay is set to expire at `when`. + /// If `when` is in the past, then the item is immediately made available to + /// the caller. + /// + /// # Panics + /// + /// This function panics if `when` is too far in the future or if `key` is + /// not contained by the queue. + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio::time::{DelayQueue, Duration, Instant}; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// + /// // "foo" is scheduled to be returned in 5 seconds + /// + /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10)); + /// + /// // "foo"is now scheduled to be returned in 10 seconds + /// # } + /// ``` + pub fn reset_at(&mut self, key: &Key, when: Instant) { + self.wheel.remove(&key.index, &mut self.slab); + + // Normalize the deadline. Values cannot be set to expire in the past. + let when = self.normalize_deadline(when); + + self.slab[key.index].when = when; + self.insert_idx(when, key.index); + + let next_deadline = self.next_deadline(); + if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) { + delay.reset(deadline); + } + } + + /// Returns the next time poll as determined by the wheel + fn next_deadline(&mut self) -> Option<Instant> { + self.wheel + .poll_at() + .map(|poll_at| self.start + Duration::from_millis(poll_at)) + } + + /// Sets the delay of the item associated with `key` to expire after + /// `timeout`. + /// + /// This function is identical to `reset_at` but takes a `Duration` instead + /// of an `Instant`. + /// + /// The item remains in the queue but the delay is set to expire after + /// `timeout`. If `timeout` is zero, then the item is immediately made + /// available to the caller. + /// + /// # Panics + /// + /// This function panics if `timeout` is greater than the maximum supported + /// duration or if `key` is not contained by the queue. + /// + /// # Examples + /// + /// Basic usage + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// let key = delay_queue.insert("foo", Duration::from_secs(5)); + /// + /// // "foo" is scheduled to be returned in 5 seconds + /// + /// delay_queue.reset(&key, Duration::from_secs(10)); + /// + /// // "foo"is now scheduled to be returned in 10 seconds + /// # } + /// ``` + pub fn reset(&mut self, key: &Key, timeout: Duration) { + self.reset_at(key, Instant::now() + timeout); + } + + /// Clears the queue, removing all items. + /// + /// After calling `clear`, [`poll`] will return `Ok(Ready(None))`. + /// + /// Note that this method has no effect on the allocated capacity. + /// + /// [`poll`]: #method.poll + /// + /// # Examples + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// + /// delay_queue.insert("foo", Duration::from_secs(5)); + /// + /// assert!(!delay_queue.is_empty()); + /// + /// delay_queue.clear(); + /// + /// assert!(delay_queue.is_empty()); + /// # } + /// ``` + pub fn clear(&mut self) { + self.slab.clear(); + self.expired = Stack::default(); + self.wheel = Wheel::new(); + self.delay = None; + } + + /// Returns the number of elements the queue can hold without reallocating. + /// + /// # Examples + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// + /// let delay_queue: DelayQueue<i32> = DelayQueue::with_capacity(10); + /// assert_eq!(delay_queue.capacity(), 10); + /// ``` + pub fn capacity(&self) -> usize { + self.slab.capacity() + } + + /// Returns the number of elements currently in the queue. + /// + /// # Examples + /// + /// ```rust + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue: DelayQueue<i32> = DelayQueue::with_capacity(10); + /// assert_eq!(delay_queue.len(), 0); + /// delay_queue.insert(3, Duration::from_secs(5)); + /// assert_eq!(delay_queue.len(), 1); + /// # } + /// ``` + pub fn len(&self) -> usize { + self.slab.len() + } + + /// Reserves capacity for at least `additional` more items to be queued + /// without allocating. + /// + /// `reserve` does nothing if the queue already has sufficient capacity for + /// `additional` more values. If more capacity is required, a new segment of + /// memory will be allocated and all existing values will be copied into it. + /// As such, if the queue is already very large, a call to `reserve` can end + /// up being expensive. + /// + /// The queue may reserve more than `additional` extra space in order to + /// avoid frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds the maximum number of entries the + /// queue can contain. + /// + /// # Examples + /// + /// ``` + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// + /// delay_queue.insert("hello", Duration::from_secs(10)); + /// delay_queue.reserve(10); + /// + /// assert!(delay_queue.capacity() >= 11); + /// # } + /// ``` + pub fn reserve(&mut self, additional: usize) { + self.slab.reserve(additional); + } + + /// Returns `true` if there are no items in the queue. + /// + /// Note that this function returns `false` even if all items have not yet + /// expired and a call to `poll` will return `NotReady`. + /// + /// # Examples + /// + /// ``` + /// use tokio::time::DelayQueue; + /// use std::time::Duration; + /// + /// # #[tokio::main] + /// # async fn main() { + /// let mut delay_queue = DelayQueue::new(); + /// assert!(delay_queue.is_empty()); + /// + /// delay_queue.insert("hello", Duration::from_secs(5)); + /// assert!(!delay_queue.is_empty()); + /// # } + /// ``` + pub fn is_empty(&self) -> bool { + self.slab.is_empty() + } + + /// Polls the queue, returning the index of the next slot in the slab that + /// should be returned. + /// + /// A slot should be returned when the associated deadline has been reached. + fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Result<usize, Error>>> { + use self::wheel::Stack; + + let expired = self.expired.pop(&mut self.slab); + + if expired.is_some() { + return Poll::Ready(expired.map(Ok)); + } + + loop { + if let Some(ref mut delay) = self.delay { + if !delay.is_elapsed() { + ready!(Pin::new(&mut *delay).poll(cx)); + } + + let now = crate::time::ms(delay.deadline() - self.start, crate::time::Round::Down); + + self.poll = wheel::Poll::new(now); + } + + // We poll the wheel to get the next value out before finding the next deadline. + let wheel_idx = self.wheel.poll(&mut self.poll, &mut self.slab); + + self.delay = self.next_deadline().map(delay_until); + + if let Some(idx) = wheel_idx { + return Poll::Ready(Some(Ok(idx))); + } + + if self.delay.is_none() { + return Poll::Ready(None); + } + } + } + + fn normalize_deadline(&self, when: Instant) -> u64 { + let when = if when < self.start { + 0 + } else { + crate::time::ms(when - self.start, crate::time::Round::Up) + }; + + cmp::max(when, self.wheel.elapsed()) + } +} + +// We never put `T` in a `Pin`... +impl<T> Unpin for DelayQueue<T> {} + +impl<T> Default for DelayQueue<T> { + fn default() -> DelayQueue<T> { + DelayQueue::new() + } +} + +#[cfg(feature = "stream")] +impl<T> futures_core::Stream for DelayQueue<T> { + // DelayQueue seems much more specific, where a user may care that it + // has reached capacity, so return those errors instead of panicking. + type Item = Result<Expired<T>, Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> { + DelayQueue::poll_expired(self.get_mut(), cx) + } +} + +impl<T> wheel::Stack for Stack<T> { + type Owned = usize; + type Borrowed = usize; + type Store = Slab<Data<T>>; + + fn is_empty(&self) -> bool { + self.head.is_none() + } + + fn push(&mut self, item: Self::Owned, store: &mut Self::Store) { + // Ensure the entry is not already in a stack. + debug_assert!(store[item].next.is_none()); + debug_assert!(store[item].prev.is_none()); + + // Remove the old head entry + let old = self.head.take(); + + if let Some(idx) = old { + store[idx].prev = Some(item); + } + + store[item].next = old; + self.head = Some(item) + } + + fn pop(&mut self, store: &mut Self::Store) -> Option<Self::Owned> { + if let Some(idx) = self.head { + self.head = store[idx].next; + + if let Some(idx) = self.head { + store[idx].prev = None; + } + + store[idx].next = None; + debug_assert!(store[idx].prev.is_none()); + + Some(idx) + } else { + None + } + } + + fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) { + assert!(store.contains(*item)); + + // Ensure that the entry is in fact contained by the stack + debug_assert!({ + // This walks the full linked list even if an entry is found. + let mut next = self.head; + let mut contains = false; + + while let Some(idx) = next { + if idx == *item { + debug_assert!(!contains); + contains = true; + } + + next = store[idx].next; + } + + contains + }); + + if let Some(next) = store[*item].next { + store[next].prev = store[*item].prev; + } + + if let Some(prev) = store[*item].prev { + store[prev].next = store[*item].next; + } else { + self.head = store[*item].next; + } + + store[*item].next = None; + store[*item].prev = None; + } + + fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 { + store[*item].when + } +} + +impl<T> Default for Stack<T> { + fn default() -> Stack<T> { + Stack { + head: None, + _p: PhantomData, + } + } +} + +impl Key { + pub(crate) fn new(index: usize) -> Key { + Key { index } + } +} + +impl<T> Expired<T> { + /// Returns a reference to the inner value. + pub fn get_ref(&self) -> &T { + &self.data + } + + /// Returns a mutable reference to the inner value. + pub fn get_mut(&mut self) -> &mut T { + &mut self.data + } + + /// Consumes `self` and returns the inner value. + pub fn into_inner(self) -> T { + self.data + } + + /// Returns the deadline that the expiration was set to. + pub fn deadline(&self) -> Instant { + self.deadline + } +} diff --git a/third_party/rust/tokio/src/time/driver/atomic_stack.rs b/third_party/rust/tokio/src/time/driver/atomic_stack.rs new file mode 100644 index 0000000000..7e5a83fa52 --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/atomic_stack.rs @@ -0,0 +1,124 @@ +use crate::time::driver::Entry; +use crate::time::Error; + +use std::ptr; +use std::sync::atomic::AtomicPtr; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; + +/// A stack of `Entry` nodes +#[derive(Debug)] +pub(crate) struct AtomicStack { + /// Stack head + head: AtomicPtr<Entry>, +} + +/// Entries that were removed from the stack +#[derive(Debug)] +pub(crate) struct AtomicStackEntries { + ptr: *mut Entry, +} + +/// Used to indicate that the timer has shutdown. +const SHUTDOWN: *mut Entry = 1 as *mut _; + +impl AtomicStack { + pub(crate) fn new() -> AtomicStack { + AtomicStack { + head: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Pushes an entry onto the stack. + /// + /// Returns `true` if the entry was pushed, `false` if the entry is already + /// on the stack, `Err` if the timer is shutdown. + pub(crate) fn push(&self, entry: &Arc<Entry>) -> Result<bool, Error> { + // First, set the queued bit on the entry + let queued = entry.queued.fetch_or(true, SeqCst); + + if queued { + // Already queued, nothing more to do + return Ok(false); + } + + let ptr = Arc::into_raw(entry.clone()) as *mut _; + + let mut curr = self.head.load(SeqCst); + + loop { + if curr == SHUTDOWN { + // Don't leak the entry node + let _ = unsafe { Arc::from_raw(ptr) }; + + return Err(Error::shutdown()); + } + + // Update the `next` pointer. This is safe because setting the queued + // bit is a "lock" on this field. + unsafe { + *(entry.next_atomic.get()) = curr; + } + + let actual = self.head.compare_and_swap(curr, ptr, SeqCst); + + if actual == curr { + break; + } + + curr = actual; + } + + Ok(true) + } + + /// Takes all entries from the stack + pub(crate) fn take(&self) -> AtomicStackEntries { + let ptr = self.head.swap(ptr::null_mut(), SeqCst); + AtomicStackEntries { ptr } + } + + /// Drains all remaining nodes in the stack and prevent any new nodes from + /// being pushed onto the stack. + pub(crate) fn shutdown(&self) { + // Shutdown the processing queue + let ptr = self.head.swap(SHUTDOWN, SeqCst); + + // Let the drop fn of `AtomicStackEntries` handle draining the stack + drop(AtomicStackEntries { ptr }); + } +} + +// ===== impl AtomicStackEntries ===== + +impl Iterator for AtomicStackEntries { + type Item = Arc<Entry>; + + fn next(&mut self) -> Option<Self::Item> { + if self.ptr.is_null() { + return None; + } + + // Convert the pointer to an `Arc<Entry>` + let entry = unsafe { Arc::from_raw(self.ptr) }; + + // Update `self.ptr` to point to the next element of the stack + self.ptr = unsafe { *entry.next_atomic.get() }; + + // Unset the queued flag + let res = entry.queued.fetch_and(false, SeqCst); + debug_assert!(res); + + // Return the entry + Some(entry) + } +} + +impl Drop for AtomicStackEntries { + fn drop(&mut self) { + for entry in self { + // Flag the entry as errored + entry.error(); + } + } +} diff --git a/third_party/rust/tokio/src/time/driver/entry.rs b/third_party/rust/tokio/src/time/driver/entry.rs new file mode 100644 index 0000000000..20cc824019 --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/entry.rs @@ -0,0 +1,345 @@ +use crate::loom::sync::atomic::AtomicU64; +use crate::sync::AtomicWaker; +use crate::time::driver::{Handle, Inner}; +use crate::time::{Duration, Error, Instant}; + +use std::cell::UnsafeCell; +use std::ptr; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::{Arc, Weak}; +use std::task::{self, Poll}; +use std::u64; + +/// Internal state shared between a `Delay` instance and the timer. +/// +/// This struct is used as a node in two intrusive data structures: +/// +/// * An atomic stack used to signal to the timer thread that the entry state +/// has changed. The timer thread will observe the entry on this stack and +/// perform any actions as necessary. +/// +/// * A doubly linked list used **only** by the timer thread. Each slot in the +/// timer wheel is a head pointer to the list of entries that must be +/// processed during that timer tick. +#[derive(Debug)] +pub(crate) struct Entry { + /// Only accessed from `Registration`. + time: CachePadded<UnsafeCell<Time>>, + + /// Timer internals. Using a weak pointer allows the timer to shutdown + /// without all `Delay` instances having completed. + /// + /// When `None`, the entry has not yet been linked with a timer instance. + inner: Weak<Inner>, + + /// Tracks the entry state. This value contains the following information: + /// + /// * The deadline at which the entry must be "fired". + /// * A flag indicating if the entry has already been fired. + /// * Whether or not the entry transitioned to the error state. + /// + /// When an `Entry` is created, `state` is initialized to the instant at + /// which the entry must be fired. When a timer is reset to a different + /// instant, this value is changed. + state: AtomicU64, + + /// Task to notify once the deadline is reached. + waker: AtomicWaker, + + /// True when the entry is queued in the "process" stack. This value + /// is set before pushing the value and unset after popping the value. + /// + /// TODO: This could possibly be rolled up into `state`. + pub(super) queued: AtomicBool, + + /// Next entry in the "process" linked list. + /// + /// Access to this field is coordinated by the `queued` flag. + /// + /// Represents a strong Arc ref. + pub(super) next_atomic: UnsafeCell<*mut Entry>, + + /// When the entry expires, relative to the `start` of the timer + /// (Inner::start). This is only used by the timer. + /// + /// A `Delay` instance can be reset to a different deadline by the thread + /// that owns the `Delay` instance. In this case, the timer thread will not + /// immediately know that this has happened. The timer thread must know the + /// last deadline that it saw as it uses this value to locate the entry in + /// its wheel. + /// + /// Once the timer thread observes that the instant has changed, it updates + /// the wheel and sets this value. The idea is that this value eventually + /// converges to the value of `state` as the timer thread makes updates. + when: UnsafeCell<Option<u64>>, + + /// Next entry in the State's linked list. + /// + /// This is only accessed by the timer + pub(super) next_stack: UnsafeCell<Option<Arc<Entry>>>, + + /// Previous entry in the State's linked list. + /// + /// This is only accessed by the timer and is used to unlink a canceled + /// entry. + /// + /// This is a weak reference. + pub(super) prev_stack: UnsafeCell<*const Entry>, +} + +/// Stores the info for `Delay`. +#[derive(Debug)] +pub(crate) struct Time { + pub(crate) deadline: Instant, + pub(crate) duration: Duration, +} + +/// Flag indicating a timer entry has elapsed +const ELAPSED: u64 = 1 << 63; + +/// Flag indicating a timer entry has reached an error state +const ERROR: u64 = u64::MAX; + +// ===== impl Entry ===== + +impl Entry { + pub(crate) fn new(handle: &Handle, deadline: Instant, duration: Duration) -> Arc<Entry> { + let inner = handle.inner().unwrap(); + let entry: Entry; + + // Increment the number of active timeouts + if inner.increment().is_err() { + entry = Entry::new2(deadline, duration, Weak::new(), ERROR) + } else { + let when = inner.normalize_deadline(deadline); + let state = if when <= inner.elapsed() { + ELAPSED + } else { + when + }; + entry = Entry::new2(deadline, duration, Arc::downgrade(&inner), state); + } + + let entry = Arc::new(entry); + if inner.queue(&entry).is_err() { + entry.error(); + } + + entry + } + + /// Only called by `Registration` + pub(crate) fn time_ref(&self) -> &Time { + unsafe { &*self.time.0.get() } + } + + /// Only called by `Registration` + #[allow(clippy::mut_from_ref)] // https://github.com/rust-lang/rust-clippy/issues/4281 + pub(crate) unsafe fn time_mut(&self) -> &mut Time { + &mut *self.time.0.get() + } + + /// The current entry state as known by the timer. This is not the value of + /// `state`, but lets the timer know how to converge its state to `state`. + pub(crate) fn when_internal(&self) -> Option<u64> { + unsafe { *self.when.get() } + } + + pub(crate) fn set_when_internal(&self, when: Option<u64>) { + unsafe { + *self.when.get() = when; + } + } + + /// Called by `Timer` to load the current value of `state` for processing + pub(crate) fn load_state(&self) -> Option<u64> { + let state = self.state.load(SeqCst); + + if is_elapsed(state) { + None + } else { + Some(state) + } + } + + pub(crate) fn is_elapsed(&self) -> bool { + let state = self.state.load(SeqCst); + is_elapsed(state) + } + + pub(crate) fn fire(&self, when: u64) { + let mut curr = self.state.load(SeqCst); + + loop { + if is_elapsed(curr) || curr > when { + return; + } + + let next = ELAPSED | curr; + let actual = self.state.compare_and_swap(curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + self.waker.wake(); + } + + pub(crate) fn error(&self) { + // Only transition to the error state if not currently elapsed + let mut curr = self.state.load(SeqCst); + + loop { + if is_elapsed(curr) { + return; + } + + let next = ERROR; + + let actual = self.state.compare_and_swap(curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + self.waker.wake(); + } + + pub(crate) fn cancel(entry: &Arc<Entry>) { + let state = entry.state.fetch_or(ELAPSED, SeqCst); + + if is_elapsed(state) { + // Nothing more to do + return; + } + + // If registered with a timer instance, try to upgrade the Arc. + let inner = match entry.upgrade_inner() { + Some(inner) => inner, + None => return, + }; + + let _ = inner.queue(entry); + } + + pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> { + let mut curr = self.state.load(SeqCst); + + if is_elapsed(curr) { + return Poll::Ready(if curr == ERROR { + Err(Error::shutdown()) + } else { + Ok(()) + }); + } + + self.waker.register_by_ref(cx.waker()); + + curr = self.state.load(SeqCst); + + if is_elapsed(curr) { + return Poll::Ready(if curr == ERROR { + Err(Error::shutdown()) + } else { + Ok(()) + }); + } + + Poll::Pending + } + + /// Only called by `Registration` + pub(crate) fn reset(entry: &mut Arc<Entry>) { + let inner = match entry.upgrade_inner() { + Some(inner) => inner, + None => return, + }; + + let deadline = entry.time_ref().deadline; + let when = inner.normalize_deadline(deadline); + let elapsed = inner.elapsed(); + + let mut curr = entry.state.load(SeqCst); + let mut notify; + + loop { + // In these two cases, there is no work to do when resetting the + // timer. If the `Entry` is in an error state, then it cannot be + // used anymore. If resetting the entry to the current value, then + // the reset is a noop. + if curr == ERROR || curr == when { + return; + } + + let next; + + if when <= elapsed { + next = ELAPSED; + notify = !is_elapsed(curr); + } else { + next = when; + notify = true; + } + + let actual = entry.state.compare_and_swap(curr, next, SeqCst); + + if curr == actual { + break; + } + + curr = actual; + } + + if notify { + let _ = inner.queue(entry); + } + } + + fn new2(deadline: Instant, duration: Duration, inner: Weak<Inner>, state: u64) -> Self { + Self { + time: CachePadded(UnsafeCell::new(Time { deadline, duration })), + inner, + waker: AtomicWaker::new(), + state: AtomicU64::new(state), + queued: AtomicBool::new(false), + next_atomic: UnsafeCell::new(ptr::null_mut()), + when: UnsafeCell::new(None), + next_stack: UnsafeCell::new(None), + prev_stack: UnsafeCell::new(ptr::null_mut()), + } + } + + fn upgrade_inner(&self) -> Option<Arc<Inner>> { + self.inner.upgrade() + } +} + +fn is_elapsed(state: u64) -> bool { + state & ELAPSED == ELAPSED +} + +impl Drop for Entry { + fn drop(&mut self) { + let inner = match self.upgrade_inner() { + Some(inner) => inner, + None => return, + }; + + inner.decrement(); + } +} + +unsafe impl Send for Entry {} +unsafe impl Sync for Entry {} + +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] +#[derive(Debug)] +struct CachePadded<T>(T); diff --git a/third_party/rust/tokio/src/time/driver/handle.rs b/third_party/rust/tokio/src/time/driver/handle.rs new file mode 100644 index 0000000000..38b1761c8e --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/handle.rs @@ -0,0 +1,38 @@ +use crate::runtime::context; +use crate::time::driver::Inner; +use std::fmt; +use std::sync::{Arc, Weak}; + +/// Handle to time driver instance. +#[derive(Clone)] +pub(crate) struct Handle { + inner: Weak<Inner>, +} + +impl Handle { + /// Creates a new timer `Handle` from a shared `Inner` timer state. + pub(crate) fn new(inner: Weak<Inner>) -> Self { + Handle { inner } + } + + /// Tries to get a handle to the current timer. + /// + /// # Panics + /// + /// This function panics if there is no current timer set. + pub(crate) fn current() -> Self { + context::time_handle() + .expect("there is no timer running, must be called from the context of Tokio runtime") + } + + /// Tries to return a strong ref to the inner + pub(crate) fn inner(&self) -> Option<Arc<Inner>> { + self.inner.upgrade() + } +} + +impl fmt::Debug for Handle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Handle") + } +} diff --git a/third_party/rust/tokio/src/time/driver/mod.rs b/third_party/rust/tokio/src/time/driver/mod.rs new file mode 100644 index 0000000000..4616816f3f --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/mod.rs @@ -0,0 +1,391 @@ +//! Time driver + +mod atomic_stack; +use self::atomic_stack::AtomicStack; + +mod entry; +pub(super) use self::entry::Entry; + +mod handle; +pub(crate) use self::handle::Handle; + +mod registration; +pub(crate) use self::registration::Registration; + +mod stack; +use self::stack::Stack; + +use crate::loom::sync::atomic::{AtomicU64, AtomicUsize}; +use crate::park::{Park, Unpark}; +use crate::time::{wheel, Error}; +use crate::time::{Clock, Duration, Instant}; + +use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; + +use std::sync::Arc; +use std::usize; +use std::{cmp, fmt}; + +/// Time implementation that drives [`Delay`], [`Interval`], and [`Timeout`]. +/// +/// A `Driver` instance tracks the state necessary for managing time and +/// notifying the [`Delay`] instances once their deadlines are reached. +/// +/// It is expected that a single instance manages many individual [`Delay`] +/// instances. The `Driver` implementation is thread-safe and, as such, is able +/// to handle callers from across threads. +/// +/// After creating the `Driver` instance, the caller must repeatedly call +/// [`turn`]. The time driver will perform no work unless [`turn`] is called +/// repeatedly. +/// +/// The driver has a resolution of one millisecond. Any unit of time that falls +/// between milliseconds are rounded up to the next millisecond. +/// +/// When an instance is dropped, any outstanding [`Delay`] instance that has not +/// elapsed will be notified with an error. At this point, calling `poll` on the +/// [`Delay`] instance will result in `Err` being returned. +/// +/// # Implementation +/// +/// THe time driver is based on the [paper by Varghese and Lauck][paper]. +/// +/// A hashed timing wheel is a vector of slots, where each slot handles a time +/// slice. As time progresses, the timer walks over the slot for the current +/// instant, and processes each entry for that slot. When the timer reaches the +/// end of the wheel, it starts again at the beginning. +/// +/// The implementation maintains six wheels arranged in a set of levels. As the +/// levels go up, the slots of the associated wheel represent larger intervals +/// of time. At each level, the wheel has 64 slots. Each slot covers a range of +/// time equal to the wheel at the lower level. At level zero, each slot +/// represents one millisecond of time. +/// +/// The wheels are: +/// +/// * Level 0: 64 x 1 millisecond slots. +/// * Level 1: 64 x 64 millisecond slots. +/// * Level 2: 64 x ~4 second slots. +/// * Level 3: 64 x ~4 minute slots. +/// * Level 4: 64 x ~4 hour slots. +/// * Level 5: 64 x ~12 day slots. +/// +/// When the timer processes entries at level zero, it will notify all the +/// `Delay` instances as their deadlines have been reached. For all higher +/// levels, all entries will be redistributed across the wheel at the next level +/// down. Eventually, as time progresses, entries will [`Delay`] instances will +/// either be canceled (dropped) or their associated entries will reach level +/// zero and be notified. +#[derive(Debug)] +pub(crate) struct Driver<T> { + /// Shared state + inner: Arc<Inner>, + + /// Timer wheel + wheel: wheel::Wheel<Stack>, + + /// Thread parker. The `Driver` park implementation delegates to this. + park: T, + + /// Source of "now" instances + clock: Clock, +} + +/// Timer state shared between `Driver`, `Handle`, and `Registration`. +pub(crate) struct Inner { + /// The instant at which the timer started running. + start: Instant, + + /// The last published timer `elapsed` value. + elapsed: AtomicU64, + + /// Number of active timeouts + num: AtomicUsize, + + /// Head of the "process" linked list. + process: AtomicStack, + + /// Unparks the timer thread. + unpark: Box<dyn Unpark>, +} + +/// Maximum number of timeouts the system can handle concurrently. +const MAX_TIMEOUTS: usize = usize::MAX >> 1; + +// ===== impl Driver ===== + +impl<T> Driver<T> +where + T: Park, +{ + /// Creates a new `Driver` instance that uses `park` to block the current + /// thread and `now` to get the current `Instant`. + /// + /// Specifying the source of time is useful when testing. + pub(crate) fn new(park: T, clock: Clock) -> Driver<T> { + let unpark = Box::new(park.unpark()); + + Driver { + inner: Arc::new(Inner::new(clock.now(), unpark)), + wheel: wheel::Wheel::new(), + park, + clock, + } + } + + /// Returns a handle to the timer. + /// + /// The `Handle` is how `Delay` instances are created. The `Delay` instances + /// can either be created directly or the `Handle` instance can be passed to + /// `with_default`, setting the timer as the default timer for the execution + /// context. + pub(crate) fn handle(&self) -> Handle { + Handle::new(Arc::downgrade(&self.inner)) + } + + /// Converts an `Expiration` to an `Instant`. + fn expiration_instant(&self, when: u64) -> Instant { + self.inner.start + Duration::from_millis(when) + } + + /// Runs timer related logic + fn process(&mut self) { + let now = crate::time::ms( + self.clock.now() - self.inner.start, + crate::time::Round::Down, + ); + let mut poll = wheel::Poll::new(now); + + while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) { + let when = entry.when_internal().expect("invalid internal entry state"); + + // Fire the entry + entry.fire(when); + + // Track that the entry has been fired + entry.set_when_internal(None); + } + + // Update the elapsed cache + self.inner.elapsed.store(self.wheel.elapsed(), SeqCst); + } + + /// Processes the entry queue + /// + /// This handles adding and canceling timeouts. + fn process_queue(&mut self) { + for entry in self.inner.process.take() { + match (entry.when_internal(), entry.load_state()) { + (None, None) => { + // Nothing to do + } + (Some(_), None) => { + // Remove the entry + self.clear_entry(&entry); + } + (None, Some(when)) => { + // Queue the entry + self.add_entry(entry, when); + } + (Some(_), Some(next)) => { + self.clear_entry(&entry); + self.add_entry(entry, next); + } + } + } + } + + fn clear_entry(&mut self, entry: &Arc<Entry>) { + self.wheel.remove(entry, &mut ()); + entry.set_when_internal(None); + } + + /// Fires the entry if it needs to, otherwise queue it to be processed later. + /// + /// Returns `None` if the entry was fired. + fn add_entry(&mut self, entry: Arc<Entry>, when: u64) { + use crate::time::wheel::InsertError; + + entry.set_when_internal(Some(when)); + + match self.wheel.insert(when, entry, &mut ()) { + Ok(_) => {} + Err((entry, InsertError::Elapsed)) => { + // The entry's deadline has elapsed, so fire it and update the + // internal state accordingly. + entry.set_when_internal(None); + entry.fire(when); + } + Err((entry, InsertError::Invalid)) => { + // The entry's deadline is invalid, so error it and update the + // internal state accordingly. + entry.set_when_internal(None); + entry.error(); + } + } + } +} + +impl<T> Park for Driver<T> +where + T: Park, +{ + type Unpark = T::Unpark; + type Error = T::Error; + + fn unpark(&self) -> Self::Unpark { + self.park.unpark() + } + + fn park(&mut self) -> Result<(), Self::Error> { + self.process_queue(); + + match self.wheel.poll_at() { + Some(when) => { + let now = self.clock.now(); + let deadline = self.expiration_instant(when); + + if deadline > now { + let dur = deadline - now; + + if self.clock.is_paused() { + self.park.park_timeout(Duration::from_secs(0))?; + self.clock.advance(dur); + } else { + self.park.park_timeout(dur)?; + } + } else { + self.park.park_timeout(Duration::from_secs(0))?; + } + } + None => { + self.park.park()?; + } + } + + self.process(); + + Ok(()) + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.process_queue(); + + match self.wheel.poll_at() { + Some(when) => { + let now = self.clock.now(); + let deadline = self.expiration_instant(when); + + if deadline > now { + let duration = cmp::min(deadline - now, duration); + + if self.clock.is_paused() { + self.park.park_timeout(Duration::from_secs(0))?; + self.clock.advance(duration); + } else { + self.park.park_timeout(duration)?; + } + } else { + self.park.park_timeout(Duration::from_secs(0))?; + } + } + None => { + self.park.park_timeout(duration)?; + } + } + + self.process(); + + Ok(()) + } +} + +impl<T> Drop for Driver<T> { + fn drop(&mut self) { + use std::u64; + + // Shutdown the stack of entries to process, preventing any new entries + // from being pushed. + self.inner.process.shutdown(); + + // Clear the wheel, using u64::MAX allows us to drain everything + let mut poll = wheel::Poll::new(u64::MAX); + + while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) { + entry.error(); + } + } +} + +// ===== impl Inner ===== + +impl Inner { + fn new(start: Instant, unpark: Box<dyn Unpark>) -> Inner { + Inner { + num: AtomicUsize::new(0), + elapsed: AtomicU64::new(0), + process: AtomicStack::new(), + start, + unpark, + } + } + + fn elapsed(&self) -> u64 { + self.elapsed.load(SeqCst) + } + + #[cfg(all(test, loom))] + fn num(&self, ordering: std::sync::atomic::Ordering) -> usize { + self.num.load(ordering) + } + + /// Increments the number of active timeouts + fn increment(&self) -> Result<(), Error> { + let mut curr = self.num.load(Relaxed); + loop { + if curr == MAX_TIMEOUTS { + return Err(Error::at_capacity()); + } + + match self + .num + .compare_exchange_weak(curr, curr + 1, Release, Relaxed) + { + Ok(_) => return Ok(()), + Err(next) => curr = next, + } + } + } + + /// Decrements the number of active timeouts + fn decrement(&self) { + let prev = self.num.fetch_sub(1, Acquire); + debug_assert!(prev <= MAX_TIMEOUTS); + } + + fn queue(&self, entry: &Arc<Entry>) -> Result<(), Error> { + if self.process.push(entry)? { + // The timer is notified so that it can process the timeout + self.unpark.unpark(); + } + + Ok(()) + } + + fn normalize_deadline(&self, deadline: Instant) -> u64 { + if deadline < self.start { + return 0; + } + + crate::time::ms(deadline - self.start, crate::time::Round::Up) + } +} + +impl fmt::Debug for Inner { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Inner").finish() + } +} + +#[cfg(all(test, loom))] +mod tests; diff --git a/third_party/rust/tokio/src/time/driver/registration.rs b/third_party/rust/tokio/src/time/driver/registration.rs new file mode 100644 index 0000000000..b77357e735 --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/registration.rs @@ -0,0 +1,53 @@ +use crate::time::driver::{Entry, Handle}; +use crate::time::{Duration, Error, Instant}; + +use std::sync::Arc; +use std::task::{self, Poll}; + +/// Registration with a timer. +/// +/// The association between a `Delay` instance and a timer is done lazily in +/// `poll` +#[derive(Debug)] +pub(crate) struct Registration { + entry: Arc<Entry>, +} + +impl Registration { + pub(crate) fn new(deadline: Instant, duration: Duration) -> Registration { + let handle = Handle::current(); + + Registration { + entry: Entry::new(&handle, deadline, duration), + } + } + + pub(crate) fn deadline(&self) -> Instant { + self.entry.time_ref().deadline + } + + pub(crate) fn reset(&mut self, deadline: Instant) { + unsafe { + self.entry.time_mut().deadline = deadline; + } + + Entry::reset(&mut self.entry); + } + + pub(crate) fn is_elapsed(&self) -> bool { + self.entry.is_elapsed() + } + + pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> { + // Keep track of task budget + ready!(crate::coop::poll_proceed(cx)); + + self.entry.poll_elapsed(cx) + } +} + +impl Drop for Registration { + fn drop(&mut self) { + Entry::cancel(&self.entry); + } +} diff --git a/third_party/rust/tokio/src/time/driver/stack.rs b/third_party/rust/tokio/src/time/driver/stack.rs new file mode 100644 index 0000000000..3e2924f265 --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/stack.rs @@ -0,0 +1,121 @@ +use crate::time::driver::Entry; +use crate::time::wheel; + +use std::ptr; +use std::sync::Arc; + +/// A doubly linked stack +#[derive(Debug)] +pub(crate) struct Stack { + head: Option<Arc<Entry>>, +} + +impl Default for Stack { + fn default() -> Stack { + Stack { head: None } + } +} + +impl wheel::Stack for Stack { + type Owned = Arc<Entry>; + type Borrowed = Entry; + type Store = (); + + fn is_empty(&self) -> bool { + self.head.is_none() + } + + fn push(&mut self, entry: Self::Owned, _: &mut Self::Store) { + // Get a pointer to the entry to for the prev link + let ptr: *const Entry = &*entry as *const _; + + // Remove the old head entry + let old = self.head.take(); + + unsafe { + // Ensure the entry is not already in a stack. + debug_assert!((*entry.next_stack.get()).is_none()); + debug_assert!((*entry.prev_stack.get()).is_null()); + + if let Some(ref entry) = old.as_ref() { + debug_assert!({ + // The head is not already set to the entry + ptr != &***entry as *const _ + }); + + // Set the previous link on the old head + *entry.prev_stack.get() = ptr; + } + + // Set this entry's next pointer + *entry.next_stack.get() = old; + } + + // Update the head pointer + self.head = Some(entry); + } + + /// Pops an item from the stack + fn pop(&mut self, _: &mut ()) -> Option<Arc<Entry>> { + let entry = self.head.take(); + + unsafe { + if let Some(entry) = entry.as_ref() { + self.head = (*entry.next_stack.get()).take(); + + if let Some(entry) = self.head.as_ref() { + *entry.prev_stack.get() = ptr::null(); + } + + *entry.prev_stack.get() = ptr::null(); + } + } + + entry + } + + fn remove(&mut self, entry: &Entry, _: &mut ()) { + unsafe { + // Ensure that the entry is in fact contained by the stack + debug_assert!({ + // This walks the full linked list even if an entry is found. + let mut next = self.head.as_ref(); + let mut contains = false; + + while let Some(n) = next { + if entry as *const _ == &**n as *const _ { + debug_assert!(!contains); + contains = true; + } + + next = (*n.next_stack.get()).as_ref(); + } + + contains + }); + + // Unlink `entry` from the next node + let next = (*entry.next_stack.get()).take(); + + if let Some(next) = next.as_ref() { + (*next.prev_stack.get()) = *entry.prev_stack.get(); + } + + // Unlink `entry` from the prev node + + if let Some(prev) = (*entry.prev_stack.get()).as_ref() { + *prev.next_stack.get() = next; + } else { + // It is the head + self.head = next; + } + + // Unset the prev pointer + *entry.prev_stack.get() = ptr::null(); + } + } + + fn when(item: &Entry, _: &()) -> u64 { + item.when_internal().expect("invalid internal state") + } +} diff --git a/third_party/rust/tokio/src/time/driver/tests/mod.rs b/third_party/rust/tokio/src/time/driver/tests/mod.rs new file mode 100644 index 0000000000..88ff5525da --- /dev/null +++ b/third_party/rust/tokio/src/time/driver/tests/mod.rs @@ -0,0 +1,55 @@ +use crate::park::Unpark; +use crate::time::driver::Inner; +use crate::time::Instant; + +use loom::thread; + +use std::sync::atomic::Ordering; +use std::sync::Arc; + +struct MockUnpark; + +impl Unpark for MockUnpark { + fn unpark(&self) {} +} + +#[test] +fn balanced_incr_and_decr() { + const OPS: usize = 5; + + fn incr(inner: Arc<Inner>) { + for _ in 0..OPS { + inner.increment().expect("increment should not have failed"); + thread::yield_now(); + } + } + + fn decr(inner: Arc<Inner>) { + let mut ops_performed = 0; + while ops_performed < OPS { + if inner.num(Ordering::Relaxed) > 0 { + ops_performed += 1; + inner.decrement(); + } + thread::yield_now(); + } + } + + loom::model(|| { + let unpark = Box::new(MockUnpark); + let instant = Instant::now(); + + let inner = Arc::new(Inner::new(instant, unpark)); + + let incr_inner = inner.clone(); + let decr_inner = inner.clone(); + + let incr_hndle = thread::spawn(move || incr(incr_inner)); + let decr_hndle = thread::spawn(move || decr(decr_inner)); + + incr_hndle.join().expect("should never fail"); + decr_hndle.join().expect("should never fail"); + + assert_eq!(inner.num(Ordering::SeqCst), 0); + }) +} diff --git a/third_party/rust/tokio/src/time/error.rs b/third_party/rust/tokio/src/time/error.rs new file mode 100644 index 0000000000..0667b97ac1 --- /dev/null +++ b/third_party/rust/tokio/src/time/error.rs @@ -0,0 +1,72 @@ +use self::Kind::*; +use std::error; +use std::fmt; + +/// Errors encountered by the timer implementation. +/// +/// Currently, there are two different errors that can occur: +/// +/// * `shutdown` occurs when a timer operation is attempted, but the timer +/// instance has been dropped. In this case, the operation will never be able +/// to complete and the `shutdown` error is returned. This is a permanent +/// error, i.e., once this error is observed, timer operations will never +/// succeed in the future. +/// +/// * `at_capacity` occurs when a timer operation is attempted, but the timer +/// instance is currently handling its maximum number of outstanding delays. +/// In this case, the operation is not able to be performed at the current +/// moment, and `at_capacity` is returned. This is a transient error, i.e., at +/// some point in the future, if the operation is attempted again, it might +/// succeed. Callers that observe this error should attempt to [shed load]. One +/// way to do this would be dropping the future that issued the timer operation. +/// +/// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding +#[derive(Debug)] +pub struct Error(Kind); + +#[derive(Debug)] +enum Kind { + Shutdown, + AtCapacity, +} + +impl Error { + /// Creates an error representing a shutdown timer. + pub fn shutdown() -> Error { + Error(Shutdown) + } + + /// Returns `true` if the error was caused by the timer being shutdown. + pub fn is_shutdown(&self) -> bool { + match self.0 { + Kind::Shutdown => true, + _ => false, + } + } + + /// Creates an error representing a timer at capacity. + pub fn at_capacity() -> Error { + Error(AtCapacity) + } + + /// Returns `true` if the error was caused by the timer being at capacity. + pub fn is_at_capacity(&self) -> bool { + match self.0 { + Kind::AtCapacity => true, + _ => false, + } + } +} + +impl error::Error for Error {} + +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use self::Kind::*; + let descr = match self.0 { + Shutdown => "the timer is shutdown, must be called from the context of Tokio runtime", + AtCapacity => "timer is at capacity and cannot create a new entry", + }; + write!(fmt, "{}", descr) + } +} diff --git a/third_party/rust/tokio/src/time/instant.rs b/third_party/rust/tokio/src/time/instant.rs new file mode 100644 index 0000000000..f2cb4bc97d --- /dev/null +++ b/third_party/rust/tokio/src/time/instant.rs @@ -0,0 +1,199 @@ +#![allow(clippy::trivially_copy_pass_by_ref)] + +use std::fmt; +use std::ops; +use std::time::Duration; + +/// A measurement of the system clock, useful for talking to +/// external entities like the file system or other processes. +#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)] +pub struct Instant { + std: std::time::Instant, +} + +impl Instant { + /// Returns an instant corresponding to "now". + /// + /// # Examples + /// + /// ``` + /// use tokio::time::Instant; + /// + /// let now = Instant::now(); + /// ``` + pub fn now() -> Instant { + variant::now() + } + + /// Create a `tokio::time::Instant` from a `std::time::Instant`. + pub fn from_std(std: std::time::Instant) -> Instant { + Instant { std } + } + + /// Convert the value into a `std::time::Instant`. + pub fn into_std(self) -> std::time::Instant { + self.std + } + + /// Returns the amount of time elapsed from another instant to this one. + /// + /// # Panics + /// + /// This function will panic if `earlier` is later than `self`. + pub fn duration_since(&self, earlier: Instant) -> Duration { + self.std.duration_since(earlier.std) + } + + /// Returns the amount of time elapsed from another instant to this one, or + /// None if that instant is later than this one. + /// + /// # Examples + /// + /// ``` + /// use tokio::time::{Duration, Instant, delay_for}; + /// + /// #[tokio::main] + /// async fn main() { + /// let now = Instant::now(); + /// delay_for(Duration::new(1, 0)).await; + /// let new_now = Instant::now(); + /// println!("{:?}", new_now.checked_duration_since(now)); + /// println!("{:?}", now.checked_duration_since(new_now)); // None + /// } + /// ``` + pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> { + self.std.checked_duration_since(earlier.std) + } + + /// Returns the amount of time elapsed from another instant to this one, or + /// zero duration if that instant is earlier than this one. + /// + /// # Examples + /// + /// ``` + /// use tokio::time::{Duration, Instant, delay_for}; + /// + /// #[tokio::main] + /// async fn main() { + /// let now = Instant::now(); + /// delay_for(Duration::new(1, 0)).await; + /// let new_now = Instant::now(); + /// println!("{:?}", new_now.saturating_duration_since(now)); + /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns + /// } + /// ``` + pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { + self.std.saturating_duration_since(earlier.std) + } + + /// Returns the amount of time elapsed since this instant was created. + /// + /// # Panics + /// + /// This function may panic if the current time is earlier than this + /// instant, which is something that can happen if an `Instant` is + /// produced synthetically. + /// + /// # Examples + /// + /// ``` + /// use tokio::time::{Duration, Instant, delay_for}; + /// + /// #[tokio::main] + /// async fn main() { + /// let instant = Instant::now(); + /// let three_secs = Duration::from_secs(3); + /// delay_for(three_secs).await; + /// assert!(instant.elapsed() >= three_secs); + /// } + /// ``` + pub fn elapsed(&self) -> Duration { + Instant::now() - *self + } + + /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be + /// represented as `Instant` (which means it's inside the bounds of the + /// underlying data structure), `None` otherwise. + pub fn checked_add(&self, duration: Duration) -> Option<Instant> { + self.std.checked_add(duration).map(Instant::from_std) + } + + /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be + /// represented as `Instant` (which means it's inside the bounds of the + /// underlying data structure), `None` otherwise. + pub fn checked_sub(&self, duration: Duration) -> Option<Instant> { + self.std.checked_sub(duration).map(Instant::from_std) + } +} + +impl From<std::time::Instant> for Instant { + fn from(time: std::time::Instant) -> Instant { + Instant::from_std(time) + } +} + +impl From<Instant> for std::time::Instant { + fn from(time: Instant) -> std::time::Instant { + time.into_std() + } +} + +impl ops::Add<Duration> for Instant { + type Output = Instant; + + fn add(self, other: Duration) -> Instant { + Instant::from_std(self.std + other) + } +} + +impl ops::AddAssign<Duration> for Instant { + fn add_assign(&mut self, rhs: Duration) { + *self = *self + rhs; + } +} + +impl ops::Sub for Instant { + type Output = Duration; + + fn sub(self, rhs: Instant) -> Duration { + self.std - rhs.std + } +} + +impl ops::Sub<Duration> for Instant { + type Output = Instant; + + fn sub(self, rhs: Duration) -> Instant { + Instant::from_std(self.std - rhs) + } +} + +impl ops::SubAssign<Duration> for Instant { + fn sub_assign(&mut self, rhs: Duration) { + *self = *self - rhs; + } +} + +impl fmt::Debug for Instant { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.std.fmt(fmt) + } +} + +#[cfg(not(feature = "test-util"))] +mod variant { + use super::Instant; + + pub(super) fn now() -> Instant { + Instant::from_std(std::time::Instant::now()) + } +} + +#[cfg(feature = "test-util")] +mod variant { + use super::Instant; + + pub(super) fn now() -> Instant { + crate::time::clock::now() + } +} diff --git a/third_party/rust/tokio/src/time/interval.rs b/third_party/rust/tokio/src/time/interval.rs new file mode 100644 index 0000000000..090e2d1f05 --- /dev/null +++ b/third_party/rust/tokio/src/time/interval.rs @@ -0,0 +1,139 @@ +use crate::future::poll_fn; +use crate::time::{delay_until, Delay, Duration, Instant}; + +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Creates new `Interval` that yields with interval of `duration`. The first +/// tick completes immediately. +/// +/// An interval will tick indefinitely. At any time, the `Interval` value can be +/// dropped. This cancels the interval. +/// +/// This function is equivalent to `interval_at(Instant::now(), period)`. +/// +/// # Panics +/// +/// This function panics if `period` is zero. +/// +/// # Examples +/// +/// ``` +/// use tokio::time::{self, Duration}; +/// +/// #[tokio::main] +/// async fn main() { +/// let mut interval = time::interval(Duration::from_millis(10)); +/// +/// interval.tick().await; +/// interval.tick().await; +/// interval.tick().await; +/// +/// // approximately 20ms have elapsed. +/// } +/// ``` +pub fn interval(period: Duration) -> Interval { + assert!(period > Duration::new(0, 0), "`period` must be non-zero."); + + interval_at(Instant::now(), period) +} + +/// Creates new `Interval` that yields with interval of `period` with the +/// first tick completing at `at`. +/// +/// An interval will tick indefinitely. At any time, the `Interval` value can be +/// dropped. This cancels the interval. +/// +/// # Panics +/// +/// This function panics if `period` is zero. +/// +/// # Examples +/// +/// ``` +/// use tokio::time::{interval_at, Duration, Instant}; +/// +/// #[tokio::main] +/// async fn main() { +/// let start = Instant::now() + Duration::from_millis(50); +/// let mut interval = interval_at(start, Duration::from_millis(10)); +/// +/// interval.tick().await; +/// interval.tick().await; +/// interval.tick().await; +/// +/// // approximately 70ms have elapsed. +/// } +/// ``` +pub fn interval_at(start: Instant, period: Duration) -> Interval { + assert!(period > Duration::new(0, 0), "`period` must be non-zero."); + + Interval { + delay: delay_until(start), + period, + } +} + +/// Stream returned by [`interval`](interval) and [`interval_at`](interval_at). +#[derive(Debug)] +pub struct Interval { + /// Future that completes the next time the `Interval` yields a value. + delay: Delay, + + /// The duration between values yielded by `Interval`. + period: Duration, +} + +impl Interval { + #[doc(hidden)] // TODO: document + pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll<Instant> { + // Wait for the delay to be done + ready!(Pin::new(&mut self.delay).poll(cx)); + + // Get the `now` by looking at the `delay` deadline + let now = self.delay.deadline(); + + // The next interval value is `duration` after the one that just + // yielded. + let next = now + self.period; + self.delay.reset(next); + + // Return the current instant + Poll::Ready(now) + } + + /// Completes when the next instant in the interval has been reached. + /// + /// # Examples + /// + /// ``` + /// use tokio::time; + /// + /// use std::time::Duration; + /// + /// #[tokio::main] + /// async fn main() { + /// let mut interval = time::interval(Duration::from_millis(10)); + /// + /// interval.tick().await; + /// interval.tick().await; + /// interval.tick().await; + /// + /// // approximately 20ms have elapsed. + /// } + /// ``` + #[allow(clippy::should_implement_trait)] // TODO: rename (tokio-rs/tokio#1261) + pub async fn tick(&mut self) -> Instant { + poll_fn(|cx| self.poll_tick(cx)).await + } +} + +#[cfg(feature = "stream")] +impl crate::stream::Stream for Interval { + type Item = Instant; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Instant>> { + Poll::Ready(Some(ready!(self.poll_tick(cx)))) + } +} diff --git a/third_party/rust/tokio/src/time/mod.rs b/third_party/rust/tokio/src/time/mod.rs new file mode 100644 index 0000000000..7070d6b257 --- /dev/null +++ b/third_party/rust/tokio/src/time/mod.rs @@ -0,0 +1,130 @@ +//! Utilities for tracking time. +//! +//! This module provides a number of types for executing code after a set period +//! of time. +//! +//! * `Delay` is a future that does no work and completes at a specific `Instant` +//! in time. +//! +//! * `Interval` is a stream yielding a value at a fixed period. It is +//! initialized with a `Duration` and repeatedly yields each time the duration +//! elapses. +//! +//! * `Timeout`: Wraps a future or stream, setting an upper bound to the amount +//! of time it is allowed to execute. If the future or stream does not +//! complete in time, then it is canceled and an error is returned. +//! +//! * `DelayQueue`: A queue where items are returned once the requested delay +//! has expired. +//! +//! These types are sufficient for handling a large number of scenarios +//! involving time. +//! +//! These types must be used from within the context of the `Runtime`. +//! +//! # Examples +//! +//! Wait 100ms and print "Hello World!" +//! +//! ``` +//! use tokio::time::delay_for; +//! +//! use std::time::Duration; +//! +//! +//! #[tokio::main] +//! async fn main() { +//! delay_for(Duration::from_millis(100)).await; +//! println!("100 ms have elapsed"); +//! } +//! ``` +//! +//! Require that an operation takes no more than 300ms. Note that this uses the +//! `timeout` function on the `FutureExt` trait. This trait is included in the +//! prelude. +//! +//! ``` +//! use tokio::time::{timeout, Duration}; +//! +//! async fn long_future() { +//! // do work here +//! } +//! +//! # async fn dox() { +//! let res = timeout(Duration::from_secs(1), long_future()).await; +//! +//! if res.is_err() { +//! println!("operation timed out"); +//! } +//! # } +//! ``` + +mod clock; +pub(crate) use self::clock::Clock; +#[cfg(feature = "test-util")] +pub use clock::{advance, pause, resume}; + +pub mod delay_queue; +#[doc(inline)] +pub use delay_queue::DelayQueue; + +mod delay; +pub use delay::{delay_for, delay_until, Delay}; + +pub(crate) mod driver; + +mod error; +pub use error::Error; + +mod instant; +pub use self::instant::Instant; + +mod interval; +pub use interval::{interval, interval_at, Interval}; + +mod timeout; +#[doc(inline)] +pub use timeout::{timeout, timeout_at, Elapsed, Timeout}; + +cfg_stream! { + mod throttle; + pub use throttle::{throttle, Throttle}; +} + +mod wheel; + +#[cfg(test)] +#[cfg(not(loom))] +mod tests; + +// Re-export for convenience +pub use std::time::Duration; + +// ===== Internal utils ===== + +enum Round { + Up, + Down, +} + +/// Convert a `Duration` to milliseconds, rounding up and saturating at +/// `u64::MAX`. +/// +/// The saturating is fine because `u64::MAX` milliseconds are still many +/// million years. +#[inline] +fn ms(duration: Duration, round: Round) -> u64 { + const NANOS_PER_MILLI: u32 = 1_000_000; + const MILLIS_PER_SEC: u64 = 1_000; + + // Round up. + let millis = match round { + Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI, + Round::Down => duration.subsec_millis(), + }; + + duration + .as_secs() + .saturating_mul(MILLIS_PER_SEC) + .saturating_add(u64::from(millis)) +} diff --git a/third_party/rust/tokio/src/time/tests/mod.rs b/third_party/rust/tokio/src/time/tests/mod.rs new file mode 100644 index 0000000000..4710d470f7 --- /dev/null +++ b/third_party/rust/tokio/src/time/tests/mod.rs @@ -0,0 +1,22 @@ +mod test_delay; + +use crate::time::{self, Instant}; +use std::time::Duration; + +fn assert_send<T: Send>() {} +fn assert_sync<T: Sync>() {} + +#[test] +fn registration_is_send_and_sync() { + use crate::time::driver::Registration; + + assert_send::<Registration>(); + assert_sync::<Registration>(); +} + +#[test] +#[should_panic] +fn delay_is_eager() { + let when = Instant::now() + Duration::from_millis(100); + let _ = time::delay_until(when); +} diff --git a/third_party/rust/tokio/src/time/tests/test_delay.rs b/third_party/rust/tokio/src/time/tests/test_delay.rs new file mode 100644 index 0000000000..f843434be4 --- /dev/null +++ b/third_party/rust/tokio/src/time/tests/test_delay.rs @@ -0,0 +1,447 @@ +#![warn(rust_2018_idioms)] + +use crate::park::{Park, Unpark}; +use crate::time::driver::{Driver, Entry, Handle}; +use crate::time::Clock; +use crate::time::{Duration, Instant}; + +use tokio_test::task; +use tokio_test::{assert_ok, assert_pending, assert_ready_ok}; + +use std::sync::Arc; + +macro_rules! poll { + ($e:expr) => { + $e.enter(|cx, e| e.poll_elapsed(cx)) + }; +} + +#[test] +fn frozen_utility_returns_correct_advanced_duration() { + let clock = Clock::new(); + clock.pause(); + let start = clock.now(); + + clock.advance(ms(10)); + assert_eq!(clock.now() - start, ms(10)); +} + +#[test] +fn immediate_delay() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + let when = clock.now(); + let mut e = task::spawn(delay_until(&handle, when)); + + assert_ready_ok!(poll!(e)); + + assert_ok!(driver.park_timeout(Duration::from_millis(1000))); + + // The time has not advanced. The `turn` completed immediately. + assert_eq!(clock.now() - start, ms(1000)); +} + +#[test] +fn delayed_delay_level_0() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + for &i in &[1, 10, 60] { + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, start + ms(i))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(i)); + + assert_ready_ok!(poll!(e)); + } +} + +#[test] +fn sub_ms_delayed_delay() { + let (mut driver, clock, handle) = setup(); + + for _ in 0..5 { + let deadline = clock.now() + ms(1) + Duration::new(0, 1); + + let mut e = task::spawn(delay_until(&handle, deadline)); + + assert_pending!(poll!(e)); + + assert_ok!(driver.park()); + assert_ready_ok!(poll!(e)); + + assert!(clock.now() >= deadline); + + clock.advance(Duration::new(0, 1)); + } +} + +#[test] +fn delayed_delay_wrapping_level_0() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + assert_ok!(driver.park_timeout(ms(5))); + assert_eq!(clock.now() - start, ms(5)); + + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(60))); + + assert_pending!(poll!(e)); + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(64)); + assert_pending!(poll!(e)); + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(65)); + + assert_ready_ok!(poll!(e)); +} + +#[test] +fn timer_wrapping_with_higher_levels() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Set delay to hit level 1 + let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(64))); + assert_pending!(poll!(e1)); + + // Turn a bit + assert_ok!(driver.park_timeout(ms(5))); + + // Set timeout such that it will hit level 0, but wrap + let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(60))); + assert_pending!(poll!(e2)); + + // This should result in s1 firing + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(64)); + + assert_ready_ok!(poll!(e1)); + assert_pending!(poll!(e2)); + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(65)); + + assert_ready_ok!(poll!(e1)); +} + +#[test] +fn delay_with_deadline_in_past() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create `Delay` that elapsed immediately. + let mut e = task::spawn(delay_until(&handle, clock.now() - ms(100))); + + // Even though the delay expires in the past, it is not ready yet + // because the timer must observe it. + assert_ready_ok!(poll!(e)); + + // Turn the timer, it runs for the elapsed time + assert_ok!(driver.park_timeout(ms(1000))); + + // The time has not advanced. The `turn` completed immediately. + assert_eq!(clock.now() - start, ms(1000)); +} + +#[test] +fn delayed_delay_level_1() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + // Turn the timer, this will wake up to cascade the timer down. + assert_ok!(driver.park_timeout(ms(1000))); + assert_eq!(clock.now() - start, ms(192)); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + // Turn the timer again + assert_ok!(driver.park_timeout(ms(1000))); + assert_eq!(clock.now() - start, ms(234)); + + // The delay has elapsed. + assert_ready_ok!(poll!(e)); + + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + // Turn the timer with a smaller timeout than the cascade. + assert_ok!(driver.park_timeout(ms(100))); + assert_eq!(clock.now() - start, ms(100)); + + assert_pending!(poll!(e)); + + // Turn the timer, this will wake up to cascade the timer down. + assert_ok!(driver.park_timeout(ms(1000))); + assert_eq!(clock.now() - start, ms(192)); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + // Turn the timer again + assert_ok!(driver.park_timeout(ms(1000))); + assert_eq!(clock.now() - start, ms(234)); + + // The delay has elapsed. + assert_ready_ok!(poll!(e)); +} + +#[test] +fn concurrently_set_two_timers_second_one_shorter() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(500))); + let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(200))); + + // The delay has not elapsed + assert_pending!(poll!(e1)); + assert_pending!(poll!(e2)); + + // Delay until a cascade + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(192)); + + // Delay until the second timer. + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(200)); + + // The shorter delay fires + assert_ready_ok!(poll!(e2)); + assert_pending!(poll!(e1)); + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(448)); + + assert_pending!(poll!(e1)); + + // Turn again, this time the time will advance to the second delay + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(500)); + + assert_ready_ok!(poll!(e1)); +} + +#[test] +fn short_delay() { + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + // Turn the timer, but not enough time will go by. + assert_ok!(driver.park()); + + // The delay has elapsed. + assert_ready_ok!(poll!(e)); + + // The time has advanced to the point of the delay elapsing. + assert_eq!(clock.now() - start, ms(1)); +} + +#[test] +fn sorta_long_delay_until() { + const MIN_5: u64 = 5 * 60 * 1000; + + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MIN_5))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + let cascades = &[262_144, 262_144 + 9 * 4096, 262_144 + 9 * 4096 + 15 * 64]; + + for &elapsed in cascades { + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(elapsed)); + + assert_pending!(poll!(e)); + } + + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(MIN_5)); + + // The delay has elapsed. + assert_ready_ok!(poll!(e)); +} + +#[test] +fn very_long_delay() { + const MO_5: u64 = 5 * 30 * 24 * 60 * 60 * 1000; + + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + // Create a `Delay` that elapses in the future + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MO_5))); + + // The delay has not elapsed. + assert_pending!(poll!(e)); + + let cascades = &[ + 12_884_901_888, + 12_952_010_752, + 12_959_875_072, + 12_959_997_952, + ]; + + for &elapsed in cascades { + assert_ok!(driver.park()); + assert_eq!(clock.now() - start, ms(elapsed)); + + assert_pending!(poll!(e)); + } + + // Turn the timer, but not enough time will go by. + assert_ok!(driver.park()); + + // The time has advanced to the point of the delay elapsing. + assert_eq!(clock.now() - start, ms(MO_5)); + + // The delay has elapsed. + assert_ready_ok!(poll!(e)); +} + +#[test] +fn unpark_is_delayed() { + // A special park that will take much longer than the requested duration + struct MockPark(Clock); + + struct MockUnpark; + + impl Park for MockPark { + type Unpark = MockUnpark; + type Error = (); + + fn unpark(&self) -> Self::Unpark { + MockUnpark + } + + fn park(&mut self) -> Result<(), Self::Error> { + panic!("parking forever"); + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + assert_eq!(duration, ms(0)); + self.0.advance(ms(436)); + Ok(()) + } + } + + impl Unpark for MockUnpark { + fn unpark(&self) {} + } + + let clock = Clock::new(); + clock.pause(); + let start = clock.now(); + let mut driver = Driver::new(MockPark(clock.clone()), clock.clone()); + let handle = driver.handle(); + + let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(100))); + let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(101))); + let mut e3 = task::spawn(delay_until(&handle, clock.now() + ms(200))); + + assert_pending!(poll!(e1)); + assert_pending!(poll!(e2)); + assert_pending!(poll!(e3)); + + assert_ok!(driver.park()); + + assert_eq!(clock.now() - start, ms(500)); + + assert_ready_ok!(poll!(e1)); + assert_ready_ok!(poll!(e2)); + assert_ready_ok!(poll!(e3)); +} + +#[test] +fn set_timeout_at_deadline_greater_than_max_timer() { + const YR_1: u64 = 365 * 24 * 60 * 60 * 1000; + const YR_5: u64 = 5 * YR_1; + + let (mut driver, clock, handle) = setup(); + let start = clock.now(); + + for _ in 0..5 { + assert_ok!(driver.park_timeout(ms(YR_1))); + } + + let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1))); + assert_pending!(poll!(e)); + + assert_ok!(driver.park_timeout(ms(1000))); + assert_eq!(clock.now() - start, ms(YR_5) + ms(1)); + + assert_ready_ok!(poll!(e)); +} + +fn setup() -> (Driver<MockPark>, Clock, Handle) { + let clock = Clock::new(); + clock.pause(); + let driver = Driver::new(MockPark(clock.clone()), clock.clone()); + let handle = driver.handle(); + + (driver, clock, handle) +} + +fn delay_until(handle: &Handle, when: Instant) -> Arc<Entry> { + Entry::new(&handle, when, ms(0)) +} + +struct MockPark(Clock); + +struct MockUnpark; + +impl Park for MockPark { + type Unpark = MockUnpark; + type Error = (); + + fn unpark(&self) -> Self::Unpark { + MockUnpark + } + + fn park(&mut self) -> Result<(), Self::Error> { + panic!("parking forever"); + } + + fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { + self.0.advance(duration); + Ok(()) + } +} + +impl Unpark for MockUnpark { + fn unpark(&self) {} +} + +fn ms(n: u64) -> Duration { + Duration::from_millis(n) +} diff --git a/third_party/rust/tokio/src/time/throttle.rs b/third_party/rust/tokio/src/time/throttle.rs new file mode 100644 index 0000000000..435bef6381 --- /dev/null +++ b/third_party/rust/tokio/src/time/throttle.rs @@ -0,0 +1,117 @@ +//! Slow down a stream by enforcing a delay between items. + +use crate::stream::Stream; +use crate::time::{Delay, Duration, Instant}; + +use std::future::Future; +use std::marker::Unpin; +use std::pin::Pin; +use std::task::{self, Poll}; + +use pin_project_lite::pin_project; + +/// Slows down a stream by enforcing a delay between items. +/// They will be produced not more often than the specified interval. +/// +/// # Example +/// +/// Create a throttled stream. +/// ```rust,norun +/// use std::time::Duration; +/// use tokio::stream::StreamExt; +/// use tokio::time::throttle; +/// +/// # async fn dox() { +/// let mut item_stream = throttle(Duration::from_secs(2), futures::stream::repeat("one")); +/// +/// loop { +/// // The string will be produced at most every 2 seconds +/// println!("{:?}", item_stream.next().await); +/// } +/// # } +/// ``` +pub fn throttle<T>(duration: Duration, stream: T) -> Throttle<T> +where + T: Stream, +{ + let delay = if duration == Duration::from_millis(0) { + None + } else { + Some(Delay::new_timeout(Instant::now() + duration, duration)) + }; + + Throttle { + delay, + duration, + has_delayed: true, + stream, + } +} + +pin_project! { + /// Stream for the [`throttle`](throttle) function. + #[derive(Debug)] + #[must_use = "streams do nothing unless polled"] + pub struct Throttle<T> { + // `None` when duration is zero. + delay: Option<Delay>, + duration: Duration, + + // Set to true when `delay` has returned ready, but `stream` hasn't. + has_delayed: bool, + + // The stream to throttle + #[pin] + stream: T, + } +} + +// XXX: are these safe if `T: !Unpin`? +impl<T: Unpin> Throttle<T> { + /// Acquires a reference to the underlying stream that this combinator is + /// pulling from. + pub fn get_ref(&self) -> &T { + &self.stream + } + + /// Acquires a mutable reference to the underlying stream that this combinator + /// is pulling from. + /// + /// Note that care must be taken to avoid tampering with the state of the stream + /// which may otherwise confuse this combinator. + pub fn get_mut(&mut self) -> &mut T { + &mut self.stream + } + + /// Consumes this combinator, returning the underlying stream. + /// + /// Note that this may discard intermediate state of this combinator, so care + /// should be taken to avoid losing resources when this is called. + pub fn into_inner(self) -> T { + self.stream + } +} + +impl<T: Stream> Stream for Throttle<T> { + type Item = T::Item; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> { + if !self.has_delayed && self.delay.is_some() { + ready!(Pin::new(self.as_mut().project().delay.as_mut().unwrap()).poll(cx)); + *self.as_mut().project().has_delayed = true; + } + + let value = ready!(self.as_mut().project().stream.poll_next(cx)); + + if value.is_some() { + let dur = self.duration; + if let Some(ref mut delay) = self.as_mut().project().delay { + delay.reset(Instant::now() + dur); + } + + *self.as_mut().project().has_delayed = false; + } + + Poll::Ready(value) + } +} diff --git a/third_party/rust/tokio/src/time/timeout.rs b/third_party/rust/tokio/src/time/timeout.rs new file mode 100644 index 0000000000..401856a881 --- /dev/null +++ b/third_party/rust/tokio/src/time/timeout.rs @@ -0,0 +1,185 @@ +//! Allows a future to execute for a maximum amount of time. +//! +//! See [`Timeout`] documentation for more details. +//! +//! [`Timeout`]: struct@Timeout + +use crate::time::{delay_until, Delay, Duration, Instant}; + +use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::task::{self, Poll}; + +/// Require a `Future` to complete before the specified duration has elapsed. +/// +/// If the future completes before the duration has elapsed, then the completed +/// value is returned. Otherwise, an error is returned and the future is +/// canceled. +/// +/// # Cancelation +/// +/// Cancelling a timeout is done by dropping the future. No additional cleanup +/// or other work is required. +/// +/// The original future may be obtained by calling [`Timeout::into_inner`]. This +/// consumes the `Timeout`. +/// +/// # Examples +/// +/// Create a new `Timeout` set to expire in 10 milliseconds. +/// +/// ```rust +/// use tokio::time::timeout; +/// use tokio::sync::oneshot; +/// +/// use std::time::Duration; +/// +/// # async fn dox() { +/// let (tx, rx) = oneshot::channel(); +/// # tx.send(()).unwrap(); +/// +/// // Wrap the future with a `Timeout` set to expire in 10 milliseconds. +/// if let Err(_) = timeout(Duration::from_millis(10), rx).await { +/// println!("did not receive value within 10 ms"); +/// } +/// # } +/// ``` +pub fn timeout<T>(duration: Duration, future: T) -> Timeout<T> +where + T: Future, +{ + let delay = Delay::new_timeout(Instant::now() + duration, duration); + Timeout::new_with_delay(future, delay) +} + +/// Require a `Future` to complete before the specified instant in time. +/// +/// If the future completes before the instant is reached, then the completed +/// value is returned. Otherwise, an error is returned. +/// +/// # Cancelation +/// +/// Cancelling a timeout is done by dropping the future. No additional cleanup +/// or other work is required. +/// +/// The original future may be obtained by calling [`Timeout::into_inner`]. This +/// consumes the `Timeout`. +/// +/// # Examples +/// +/// Create a new `Timeout` set to expire in 10 milliseconds. +/// +/// ```rust +/// use tokio::time::{Instant, timeout_at}; +/// use tokio::sync::oneshot; +/// +/// use std::time::Duration; +/// +/// # async fn dox() { +/// let (tx, rx) = oneshot::channel(); +/// # tx.send(()).unwrap(); +/// +/// // Wrap the future with a `Timeout` set to expire 10 milliseconds into the +/// // future. +/// if let Err(_) = timeout_at(Instant::now() + Duration::from_millis(10), rx).await { +/// println!("did not receive value within 10 ms"); +/// } +/// # } +/// ``` +pub fn timeout_at<T>(deadline: Instant, future: T) -> Timeout<T> +where + T: Future, +{ + let delay = delay_until(deadline); + + Timeout { + value: future, + delay, + } +} + +/// Future returned by [`timeout`](timeout) and [`timeout_at`](timeout_at). +#[must_use = "futures do nothing unless you `.await` or poll them"] +#[derive(Debug)] +pub struct Timeout<T> { + value: T, + delay: Delay, +} + +/// Error returned by `Timeout`. +#[derive(Debug, PartialEq)] +pub struct Elapsed(()); + +impl Elapsed { + // Used on StreamExt::timeout + #[allow(unused)] + pub(crate) fn new() -> Self { + Elapsed(()) + } +} + +impl<T> Timeout<T> { + pub(crate) fn new_with_delay(value: T, delay: Delay) -> Timeout<T> { + Timeout { value, delay } + } + + /// Gets a reference to the underlying value in this timeout. + pub fn get_ref(&self) -> &T { + &self.value + } + + /// Gets a mutable reference to the underlying value in this timeout. + pub fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + /// Consumes this timeout, returning the underlying value. + pub fn into_inner(self) -> T { + self.value + } +} + +impl<T> Future for Timeout<T> +where + T: Future, +{ + type Output = Result<T::Output, Elapsed>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { + // First, try polling the future + + // Safety: we never move `self.value` + unsafe { + let p = self.as_mut().map_unchecked_mut(|me| &mut me.value); + if let Poll::Ready(v) = p.poll(cx) { + return Poll::Ready(Ok(v)); + } + } + + // Now check the timer + // Safety: X_X! + unsafe { + match self.map_unchecked_mut(|me| &mut me.delay).poll(cx) { + Poll::Ready(()) => Poll::Ready(Err(Elapsed(()))), + Poll::Pending => Poll::Pending, + } + } + } +} + +// ===== impl Elapsed ===== + +impl fmt::Display for Elapsed { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + "deadline has elapsed".fmt(fmt) + } +} + +impl std::error::Error for Elapsed {} + +impl From<Elapsed> for std::io::Error { + fn from(_err: Elapsed) -> std::io::Error { + std::io::ErrorKind::TimedOut.into() + } +} diff --git a/third_party/rust/tokio/src/time/wheel/level.rs b/third_party/rust/tokio/src/time/wheel/level.rs new file mode 100644 index 0000000000..49f9bfb9cf --- /dev/null +++ b/third_party/rust/tokio/src/time/wheel/level.rs @@ -0,0 +1,255 @@ +use crate::time::wheel::Stack; + +use std::fmt; + +/// Wheel for a single level in the timer. This wheel contains 64 slots. +pub(crate) struct Level<T> { + level: usize, + + /// Bit field tracking which slots currently contain entries. + /// + /// Using a bit field to track slots that contain entries allows avoiding a + /// scan to find entries. This field is updated when entries are added or + /// removed from a slot. + /// + /// The least-significant bit represents slot zero. + occupied: u64, + + /// Slots + slot: [T; LEVEL_MULT], +} + +/// Indicates when a slot must be processed next. +#[derive(Debug)] +pub(crate) struct Expiration { + /// The level containing the slot. + pub(crate) level: usize, + + /// The slot index. + pub(crate) slot: usize, + + /// The instant at which the slot needs to be processed. + pub(crate) deadline: u64, +} + +/// Level multiplier. +/// +/// Being a power of 2 is very important. +const LEVEL_MULT: usize = 64; + +impl<T: Stack> Level<T> { + pub(crate) fn new(level: usize) -> Level<T> { + // Rust's derived implementations for arrays require that the value + // contained by the array be `Copy`. So, here we have to manually + // initialize every single slot. + macro_rules! s { + () => { + T::default() + }; + }; + + Level { + level, + occupied: 0, + slot: [ + // It does not look like the necessary traits are + // derived for [T; 64]. + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + s!(), + ], + } + } + + /// Finds the slot that needs to be processed next and returns the slot and + /// `Instant` at which this slot must be processed. + pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> { + // Use the `occupied` bit field to get the index of the next slot that + // needs to be processed. + let slot = match self.next_occupied_slot(now) { + Some(slot) => slot, + None => return None, + }; + + // From the slot index, calculate the `Instant` at which it needs to be + // processed. This value *must* be in the future with respect to `now`. + + let level_range = level_range(self.level); + let slot_range = slot_range(self.level); + + // TODO: This can probably be simplified w/ power of 2 math + let level_start = now - (now % level_range); + let deadline = level_start + slot as u64 * slot_range; + + debug_assert!( + deadline >= now, + "deadline={}; now={}; level={}; slot={}; occupied={:b}", + deadline, + now, + self.level, + slot, + self.occupied + ); + + Some(Expiration { + level: self.level, + slot, + deadline, + }) + } + + fn next_occupied_slot(&self, now: u64) -> Option<usize> { + if self.occupied == 0 { + return None; + } + + // Get the slot for now using Maths + let now_slot = (now / slot_range(self.level)) as usize; + let occupied = self.occupied.rotate_right(now_slot as u32); + let zeros = occupied.trailing_zeros() as usize; + let slot = (zeros + now_slot) % 64; + + Some(slot) + } + + pub(crate) fn add_entry(&mut self, when: u64, item: T::Owned, store: &mut T::Store) { + let slot = slot_for(when, self.level); + + self.slot[slot].push(item, store); + self.occupied |= occupied_bit(slot); + } + + pub(crate) fn remove_entry(&mut self, when: u64, item: &T::Borrowed, store: &mut T::Store) { + let slot = slot_for(when, self.level); + + self.slot[slot].remove(item, store); + + if self.slot[slot].is_empty() { + // The bit is currently set + debug_assert!(self.occupied & occupied_bit(slot) != 0); + + // Unset the bit + self.occupied ^= occupied_bit(slot); + } + } + + pub(crate) fn pop_entry_slot(&mut self, slot: usize, store: &mut T::Store) -> Option<T::Owned> { + let ret = self.slot[slot].pop(store); + + if ret.is_some() && self.slot[slot].is_empty() { + // The bit is currently set + debug_assert!(self.occupied & occupied_bit(slot) != 0); + + self.occupied ^= occupied_bit(slot); + } + + ret + } +} + +impl<T> fmt::Debug for Level<T> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Level") + .field("occupied", &self.occupied) + .finish() + } +} + +fn occupied_bit(slot: usize) -> u64 { + 1 << slot +} + +fn slot_range(level: usize) -> u64 { + LEVEL_MULT.pow(level as u32) as u64 +} + +fn level_range(level: usize) -> u64 { + LEVEL_MULT as u64 * slot_range(level) +} + +/// Convert a duration (milliseconds) and a level to a slot position +fn slot_for(duration: u64, level: usize) -> usize { + ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize +} + +/* +#[cfg(all(test, not(loom)))] +mod test { + use super::*; + + #[test] + fn test_slot_for() { + for pos in 1..64 { + assert_eq!(pos as usize, slot_for(pos, 0)); + } + + for level in 1..5 { + for pos in level..64 { + let a = pos * 64_usize.pow(level as u32); + assert_eq!(pos as usize, slot_for(a as u64, level)); + } + } + } +} +*/ diff --git a/third_party/rust/tokio/src/time/wheel/mod.rs b/third_party/rust/tokio/src/time/wheel/mod.rs new file mode 100644 index 0000000000..a2ef27fc6c --- /dev/null +++ b/third_party/rust/tokio/src/time/wheel/mod.rs @@ -0,0 +1,314 @@ +mod level; +pub(crate) use self::level::Expiration; +use self::level::Level; + +mod stack; +pub(crate) use self::stack::Stack; + +use std::borrow::Borrow; +use std::usize; + +/// Timing wheel implementation. +/// +/// This type provides the hashed timing wheel implementation that backs `Timer` +/// and `DelayQueue`. +/// +/// The structure is generic over `T: Stack`. This allows handling timeout data +/// being stored on the heap or in a slab. In order to support the latter case, +/// the slab must be passed into each function allowing the implementation to +/// lookup timer entries. +/// +/// See `Timer` documentation for some implementation notes. +#[derive(Debug)] +pub(crate) struct Wheel<T> { + /// The number of milliseconds elapsed since the wheel started. + elapsed: u64, + + /// Timer wheel. + /// + /// Levels: + /// + /// * 1 ms slots / 64 ms range + /// * 64 ms slots / ~ 4 sec range + /// * ~ 4 sec slots / ~ 4 min range + /// * ~ 4 min slots / ~ 4 hr range + /// * ~ 4 hr slots / ~ 12 day range + /// * ~ 12 day slots / ~ 2 yr range + levels: Vec<Level<T>>, +} + +/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots +/// each, the timer is able to track time up to 2 years into the future with a +/// precision of 1 millisecond. +const NUM_LEVELS: usize = 6; + +/// The maximum duration of a delay +const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1; + +#[derive(Debug)] +pub(crate) enum InsertError { + Elapsed, + Invalid, +} + +/// Poll expirations from the wheel +#[derive(Debug, Default)] +pub(crate) struct Poll { + now: u64, + expiration: Option<Expiration>, +} + +impl<T> Wheel<T> +where + T: Stack, +{ + /// Create a new timing wheel + pub(crate) fn new() -> Wheel<T> { + let levels = (0..NUM_LEVELS).map(Level::new).collect(); + + Wheel { elapsed: 0, levels } + } + + /// Return the number of milliseconds that have elapsed since the timing + /// wheel's creation. + pub(crate) fn elapsed(&self) -> u64 { + self.elapsed + } + + /// Insert an entry into the timing wheel. + /// + /// # Arguments + /// + /// * `when`: is the instant at which the entry should be fired. It is + /// represented as the number of milliseconds since the creation + /// of the timing wheel. + /// + /// * `item`: The item to insert into the wheel. + /// + /// * `store`: The slab or `()` when using heap storage. + /// + /// # Return + /// + /// Returns `Ok` when the item is successfully inserted, `Err` otherwise. + /// + /// `Err(Elapsed)` indicates that `when` represents an instant that has + /// already passed. In this case, the caller should fire the timeout + /// immediately. + /// + /// `Err(Invalid)` indicates an invalid `when` argument as been supplied. + pub(crate) fn insert( + &mut self, + when: u64, + item: T::Owned, + store: &mut T::Store, + ) -> Result<(), (T::Owned, InsertError)> { + if when <= self.elapsed { + return Err((item, InsertError::Elapsed)); + } else if when - self.elapsed > MAX_DURATION { + return Err((item, InsertError::Invalid)); + } + + // Get the level at which the entry should be stored + let level = self.level_for(when); + + self.levels[level].add_entry(when, item, store); + + debug_assert!({ + self.levels[level] + .next_expiration(self.elapsed) + .map(|e| e.deadline >= self.elapsed) + .unwrap_or(true) + }); + + Ok(()) + } + + /// Remove `item` from thee timing wheel. + pub(crate) fn remove(&mut self, item: &T::Borrowed, store: &mut T::Store) { + let when = T::when(item, store); + let level = self.level_for(when); + + self.levels[level].remove_entry(when, item, store); + } + + /// Instant at which to poll + pub(crate) fn poll_at(&self) -> Option<u64> { + self.next_expiration().map(|expiration| expiration.deadline) + } + + pub(crate) fn poll(&mut self, poll: &mut Poll, store: &mut T::Store) -> Option<T::Owned> { + loop { + if poll.expiration.is_none() { + poll.expiration = self.next_expiration().and_then(|expiration| { + if expiration.deadline > poll.now { + None + } else { + Some(expiration) + } + }); + } + + match poll.expiration { + Some(ref expiration) => { + if let Some(item) = self.poll_expiration(expiration, store) { + return Some(item); + } + + self.set_elapsed(expiration.deadline); + } + None => { + self.set_elapsed(poll.now); + return None; + } + } + + poll.expiration = None; + } + } + + /// Returns the instant at which the next timeout expires. + fn next_expiration(&self) -> Option<Expiration> { + // Check all levels + for level in 0..NUM_LEVELS { + if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) { + // There cannot be any expirations at a higher level that happen + // before this one. + debug_assert!(self.no_expirations_before(level + 1, expiration.deadline)); + + return Some(expiration); + } + } + + None + } + + /// Used for debug assertions + fn no_expirations_before(&self, start_level: usize, before: u64) -> bool { + let mut res = true; + + for l2 in start_level..NUM_LEVELS { + if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) { + if e2.deadline < before { + res = false; + } + } + } + + res + } + + pub(crate) fn poll_expiration( + &mut self, + expiration: &Expiration, + store: &mut T::Store, + ) -> Option<T::Owned> { + while let Some(item) = self.pop_entry(expiration, store) { + if expiration.level == 0 { + debug_assert_eq!(T::when(item.borrow(), store), expiration.deadline); + + return Some(item); + } else { + let when = T::when(item.borrow(), store); + + let next_level = expiration.level - 1; + + self.levels[next_level].add_entry(when, item, store); + } + } + + None + } + + fn set_elapsed(&mut self, when: u64) { + assert!( + self.elapsed <= when, + "elapsed={:?}; when={:?}", + self.elapsed, + when + ); + + if when > self.elapsed { + self.elapsed = when; + } + } + + fn pop_entry(&mut self, expiration: &Expiration, store: &mut T::Store) -> Option<T::Owned> { + self.levels[expiration.level].pop_entry_slot(expiration.slot, store) + } + + fn level_for(&self, when: u64) -> usize { + level_for(self.elapsed, when) + } +} + +fn level_for(elapsed: u64, when: u64) -> usize { + let masked = elapsed ^ when; + + assert!(masked != 0, "elapsed={}; when={}", elapsed, when); + + let leading_zeros = masked.leading_zeros() as usize; + let significant = 63 - leading_zeros; + significant / 6 +} + +impl Poll { + pub(crate) fn new(now: u64) -> Poll { + Poll { + now, + expiration: None, + } + } +} + +#[cfg(all(test, not(loom)))] +mod test { + use super::*; + + #[test] + fn test_level_for() { + for pos in 1..64 { + assert_eq!( + 0, + level_for(0, pos), + "level_for({}) -- binary = {:b}", + pos, + pos + ); + } + + for level in 1..5 { + for pos in level..64 { + let a = pos * 64_usize.pow(level as u32); + assert_eq!( + level, + level_for(0, a as u64), + "level_for({}) -- binary = {:b}", + a, + a + ); + + if pos > level { + let a = a - 1; + assert_eq!( + level, + level_for(0, a as u64), + "level_for({}) -- binary = {:b}", + a, + a + ); + } + + if pos < 64 { + let a = a + 1; + assert_eq!( + level, + level_for(0, a as u64), + "level_for({}) -- binary = {:b}", + a, + a + ); + } + } + } + } +} diff --git a/third_party/rust/tokio/src/time/wheel/stack.rs b/third_party/rust/tokio/src/time/wheel/stack.rs new file mode 100644 index 0000000000..6e55c38ccd --- /dev/null +++ b/third_party/rust/tokio/src/time/wheel/stack.rs @@ -0,0 +1,26 @@ +use std::borrow::Borrow; + +/// Abstracts the stack operations needed to track timeouts. +pub(crate) trait Stack: Default { + /// Type of the item stored in the stack + type Owned: Borrow<Self::Borrowed>; + + /// Borrowed item + type Borrowed; + + /// Item storage, this allows a slab to be used instead of just the heap + type Store; + + /// Returns `true` if the stack is empty + fn is_empty(&self) -> bool; + + /// Push an item onto the stack + fn push(&mut self, item: Self::Owned, store: &mut Self::Store); + + /// Pop an item from the stack + fn pop(&mut self, store: &mut Self::Store) -> Option<Self::Owned>; + + fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store); + + fn when(item: &Self::Borrowed, store: &Self::Store) -> u64; +} diff --git a/third_party/rust/tokio/src/util/bit.rs b/third_party/rust/tokio/src/util/bit.rs new file mode 100644 index 0000000000..e61ac2165a --- /dev/null +++ b/third_party/rust/tokio/src/util/bit.rs @@ -0,0 +1,85 @@ +use std::fmt; + +#[derive(Clone, Copy)] +pub(crate) struct Pack { + mask: usize, + shift: u32, +} + +impl Pack { + /// Value is packed in the `width` most-significant bits. + pub(crate) const fn most_significant(width: u32) -> Pack { + let mask = mask_for(width).reverse_bits(); + + Pack { + mask, + shift: mask.trailing_zeros(), + } + } + + /// Value is packed in the `width` least-significant bits. + pub(crate) const fn least_significant(width: u32) -> Pack { + let mask = mask_for(width); + + Pack { mask, shift: 0 } + } + + /// Value is packed in the `width` more-significant bits. + pub(crate) const fn then(&self, width: u32) -> Pack { + let shift = pointer_width() - self.mask.leading_zeros(); + let mask = mask_for(width) << shift; + + Pack { mask, shift } + } + + /// Mask used to unpack value + #[cfg(all(test, loom))] + pub(crate) const fn mask(&self) -> usize { + self.mask + } + + /// Width, in bits, dedicated to storing the value. + pub(crate) const fn width(&self) -> u32 { + pointer_width() - (self.mask >> self.shift).leading_zeros() + } + + /// Max representable value + pub(crate) const fn max_value(&self) -> usize { + (1 << self.width()) - 1 + } + + pub(crate) fn pack(&self, value: usize, base: usize) -> usize { + assert!(value <= self.max_value()); + (base & !self.mask) | (value << self.shift) + } + + pub(crate) fn unpack(&self, src: usize) -> usize { + unpack(src, self.mask, self.shift) + } +} + +impl fmt::Debug for Pack { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + fmt, + "Pack {{ mask: {:b}, shift: {} }}", + self.mask, self.shift + ) + } +} + +/// Returns the width of a pointer in bits +pub(crate) const fn pointer_width() -> u32 { + std::mem::size_of::<usize>() as u32 * 8 +} + +/// Returns a `usize` with the right-most `n` bits set. +pub(crate) const fn mask_for(n: u32) -> usize { + let shift = 1usize.wrapping_shl(n - 1); + shift | (shift - 1) +} + +/// Unpack a value using a mask & shift +pub(crate) const fn unpack(src: usize, mask: usize, shift: u32) -> usize { + (src & mask) >> shift +} diff --git a/third_party/rust/tokio/src/util/linked_list.rs b/third_party/rust/tokio/src/util/linked_list.rs new file mode 100644 index 0000000000..aa3ce77188 --- /dev/null +++ b/third_party/rust/tokio/src/util/linked_list.rs @@ -0,0 +1,585 @@ +//! An intrusive double linked list of data +//! +//! The data structure supports tracking pinned nodes. Most of the data +//! structure's APIs are `unsafe` as they require the caller to ensure the +//! specified node is actually contained by the list. + +use core::fmt; +use core::mem::ManuallyDrop; +use core::ptr::NonNull; + +/// An intrusive linked list. +/// +/// Currently, the list is not emptied on drop. It is the caller's +/// responsibility to ensure the list is empty before dropping it. +pub(crate) struct LinkedList<T: Link> { + /// Linked list head + head: Option<NonNull<T::Target>>, + + /// Linked list tail + tail: Option<NonNull<T::Target>>, +} + +unsafe impl<T: Link> Send for LinkedList<T> where T::Target: Send {} +unsafe impl<T: Link> Sync for LinkedList<T> where T::Target: Sync {} + +/// Defines how a type is tracked within a linked list. +/// +/// In order to support storing a single type within multiple lists, accessing +/// the list pointers is decoupled from the entry type. +/// +/// # Safety +/// +/// Implementations must guarantee that `Target` types are pinned in memory. In +/// other words, when a node is inserted, the value will not be moved as long as +/// it is stored in the list. +pub(crate) unsafe trait Link { + /// Handle to the list entry. + /// + /// This is usually a pointer-ish type. + type Handle; + + /// Node type + type Target; + + /// Convert the handle to a raw pointer without consuming the handle + fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target>; + + /// Convert the raw pointer to a handle + unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle; + + /// Return the pointers for a node + unsafe fn pointers(target: NonNull<Self::Target>) -> NonNull<Pointers<Self::Target>>; +} + +/// Previous / next pointers +pub(crate) struct Pointers<T> { + /// The previous node in the list. null if there is no previous node. + prev: Option<NonNull<T>>, + + /// The next node in the list. null if there is no previous node. + next: Option<NonNull<T>>, +} + +unsafe impl<T: Send> Send for Pointers<T> {} +unsafe impl<T: Sync> Sync for Pointers<T> {} + +// ===== impl LinkedList ===== + +impl<T: Link> LinkedList<T> { + /// Creates an empty linked list + pub(crate) fn new() -> LinkedList<T> { + LinkedList { + head: None, + tail: None, + } + } + + /// Adds an element first in the list. + pub(crate) fn push_front(&mut self, val: T::Handle) { + // The value should not be dropped, it is being inserted into the list + let val = ManuallyDrop::new(val); + let ptr = T::as_raw(&*val); + assert_ne!(self.head, Some(ptr)); + unsafe { + T::pointers(ptr).as_mut().next = self.head; + T::pointers(ptr).as_mut().prev = None; + + if let Some(head) = self.head { + T::pointers(head).as_mut().prev = Some(ptr); + } + + self.head = Some(ptr); + + if self.tail.is_none() { + self.tail = Some(ptr); + } + } + } + + /// Removes the last element from a list and returns it, or None if it is + /// empty. + pub(crate) fn pop_back(&mut self) -> Option<T::Handle> { + unsafe { + let last = self.tail?; + self.tail = T::pointers(last).as_ref().prev; + + if let Some(prev) = T::pointers(last).as_ref().prev { + T::pointers(prev).as_mut().next = None; + } else { + self.head = None + } + + T::pointers(last).as_mut().prev = None; + T::pointers(last).as_mut().next = None; + + Some(T::from_raw(last)) + } + } + + /// Returns whether the linked list doesn not contain any node + pub(crate) fn is_empty(&self) -> bool { + if self.head.is_some() { + return false; + } + + assert!(self.tail.is_none()); + true + } + + /// Removes the specified node from the list + /// + /// # Safety + /// + /// The caller **must** ensure that `node` is currently contained by + /// `self` or not contained by any other list. + pub(crate) unsafe fn remove(&mut self, node: NonNull<T::Target>) -> Option<T::Handle> { + if let Some(prev) = T::pointers(node).as_ref().prev { + debug_assert_eq!(T::pointers(prev).as_ref().next, Some(node)); + T::pointers(prev).as_mut().next = T::pointers(node).as_ref().next; + } else { + if self.head != Some(node) { + return None; + } + + self.head = T::pointers(node).as_ref().next; + } + + if let Some(next) = T::pointers(node).as_ref().next { + debug_assert_eq!(T::pointers(next).as_ref().prev, Some(node)); + T::pointers(next).as_mut().prev = T::pointers(node).as_ref().prev; + } else { + // This might be the last item in the list + if self.tail != Some(node) { + return None; + } + + self.tail = T::pointers(node).as_ref().prev; + } + + T::pointers(node).as_mut().next = None; + T::pointers(node).as_mut().prev = None; + + Some(T::from_raw(node)) + } +} + +impl<T: Link> fmt::Debug for LinkedList<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("LinkedList") + .field("head", &self.head) + .field("tail", &self.tail) + .finish() + } +} + +cfg_sync! { + impl<T: Link> LinkedList<T> { + pub(crate) fn last(&self) -> Option<&T::Target> { + let tail = self.tail.as_ref()?; + unsafe { + Some(&*tail.as_ptr()) + } + } + } +} + +// ===== impl Iter ===== + +cfg_rt_threaded! { + pub(crate) struct Iter<'a, T: Link> { + curr: Option<NonNull<T::Target>>, + _p: core::marker::PhantomData<&'a T>, + } + + impl<T: Link> LinkedList<T> { + pub(crate) fn iter(&self) -> Iter<'_, T> { + Iter { + curr: self.head, + _p: core::marker::PhantomData, + } + } + } + + impl<'a, T: Link> Iterator for Iter<'a, T> { + type Item = &'a T::Target; + + fn next(&mut self) -> Option<&'a T::Target> { + let curr = self.curr?; + // safety: the pointer references data contained by the list + self.curr = unsafe { T::pointers(curr).as_ref() }.next; + + // safety: the value is still owned by the linked list. + Some(unsafe { &*curr.as_ptr() }) + } + } +} + +// ===== impl Pointers ===== + +impl<T> Pointers<T> { + /// Create a new set of empty pointers + pub(crate) fn new() -> Pointers<T> { + Pointers { + prev: None, + next: None, + } + } +} + +impl<T> fmt::Debug for Pointers<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Pointers") + .field("prev", &self.prev) + .field("next", &self.next) + .finish() + } +} + +#[cfg(test)] +#[cfg(not(loom))] +mod tests { + use super::*; + + use std::pin::Pin; + + #[derive(Debug)] + struct Entry { + pointers: Pointers<Entry>, + val: i32, + } + + unsafe impl<'a> Link for &'a Entry { + type Handle = Pin<&'a Entry>; + type Target = Entry; + + fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull<Entry> { + NonNull::from(handle.get_ref()) + } + + unsafe fn from_raw(ptr: NonNull<Entry>) -> Pin<&'a Entry> { + Pin::new(&*ptr.as_ptr()) + } + + unsafe fn pointers(mut target: NonNull<Entry>) -> NonNull<Pointers<Entry>> { + NonNull::from(&mut target.as_mut().pointers) + } + } + + fn entry(val: i32) -> Pin<Box<Entry>> { + Box::pin(Entry { + pointers: Pointers::new(), + val, + }) + } + + fn ptr(r: &Pin<Box<Entry>>) -> NonNull<Entry> { + r.as_ref().get_ref().into() + } + + fn collect_list(list: &mut LinkedList<&'_ Entry>) -> Vec<i32> { + let mut ret = vec![]; + + while let Some(entry) = list.pop_back() { + ret.push(entry.val); + } + + ret + } + + fn push_all<'a>(list: &mut LinkedList<&'a Entry>, entries: &[Pin<&'a Entry>]) { + for entry in entries.iter() { + list.push_front(*entry); + } + } + + macro_rules! assert_clean { + ($e:ident) => {{ + assert!($e.pointers.next.is_none()); + assert!($e.pointers.prev.is_none()); + }}; + } + + macro_rules! assert_ptr_eq { + ($a:expr, $b:expr) => {{ + // Deal with mapping a Pin<&mut T> -> Option<NonNull<T>> + assert_eq!(Some($a.as_ref().get_ref().into()), $b) + }}; + } + + #[test] + fn push_and_drain() { + let a = entry(5); + let b = entry(7); + let c = entry(31); + + let mut list = LinkedList::new(); + assert!(list.is_empty()); + + list.push_front(a.as_ref()); + assert!(!list.is_empty()); + list.push_front(b.as_ref()); + list.push_front(c.as_ref()); + + let items: Vec<i32> = collect_list(&mut list); + assert_eq!([5, 7, 31].to_vec(), items); + + assert!(list.is_empty()); + } + + #[test] + fn push_pop_push_pop() { + let a = entry(5); + let b = entry(7); + + let mut list = LinkedList::<&Entry>::new(); + + list.push_front(a.as_ref()); + + let entry = list.pop_back().unwrap(); + assert_eq!(5, entry.val); + assert!(list.is_empty()); + + list.push_front(b.as_ref()); + + let entry = list.pop_back().unwrap(); + assert_eq!(7, entry.val); + + assert!(list.is_empty()); + assert!(list.pop_back().is_none()); + } + + #[test] + fn remove_by_address() { + let a = entry(5); + let b = entry(7); + let c = entry(31); + + unsafe { + // Remove first + let mut list = LinkedList::new(); + + push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); + assert!(list.remove(ptr(&a)).is_some()); + assert_clean!(a); + // `a` should be no longer there and can't be removed twice + assert!(list.remove(ptr(&a)).is_none()); + assert!(!list.is_empty()); + + assert!(list.remove(ptr(&b)).is_some()); + assert_clean!(b); + // `b` should be no longer there and can't be removed twice + assert!(list.remove(ptr(&b)).is_none()); + assert!(!list.is_empty()); + + assert!(list.remove(ptr(&c)).is_some()); + assert_clean!(c); + // `b` should be no longer there and can't be removed twice + assert!(list.remove(ptr(&c)).is_none()); + assert!(list.is_empty()); + } + + unsafe { + // Remove middle + let mut list = LinkedList::new(); + + push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); + + assert!(list.remove(ptr(&a)).is_some()); + assert_clean!(a); + + assert_ptr_eq!(b, list.head); + assert_ptr_eq!(c, b.pointers.next); + assert_ptr_eq!(b, c.pointers.prev); + + let items = collect_list(&mut list); + assert_eq!([31, 7].to_vec(), items); + } + + unsafe { + // Remove middle + let mut list = LinkedList::new(); + + push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); + + assert!(list.remove(ptr(&b)).is_some()); + assert_clean!(b); + + assert_ptr_eq!(c, a.pointers.next); + assert_ptr_eq!(a, c.pointers.prev); + + let items = collect_list(&mut list); + assert_eq!([31, 5].to_vec(), items); + } + + unsafe { + // Remove last + // Remove middle + let mut list = LinkedList::new(); + + push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]); + + assert!(list.remove(ptr(&c)).is_some()); + assert_clean!(c); + + assert!(b.pointers.next.is_none()); + assert_ptr_eq!(b, list.tail); + + let items = collect_list(&mut list); + assert_eq!([7, 5].to_vec(), items); + } + + unsafe { + // Remove first of two + let mut list = LinkedList::new(); + + push_all(&mut list, &[b.as_ref(), a.as_ref()]); + + assert!(list.remove(ptr(&a)).is_some()); + + assert_clean!(a); + + // a should be no longer there and can't be removed twice + assert!(list.remove(ptr(&a)).is_none()); + + assert_ptr_eq!(b, list.head); + assert_ptr_eq!(b, list.tail); + + assert!(b.pointers.next.is_none()); + assert!(b.pointers.prev.is_none()); + + let items = collect_list(&mut list); + assert_eq!([7].to_vec(), items); + } + + unsafe { + // Remove last of two + let mut list = LinkedList::new(); + + push_all(&mut list, &[b.as_ref(), a.as_ref()]); + + assert!(list.remove(ptr(&b)).is_some()); + + assert_clean!(b); + + assert_ptr_eq!(a, list.head); + assert_ptr_eq!(a, list.tail); + + assert!(a.pointers.next.is_none()); + assert!(a.pointers.prev.is_none()); + + let items = collect_list(&mut list); + assert_eq!([5].to_vec(), items); + } + + unsafe { + // Remove last item + let mut list = LinkedList::new(); + + push_all(&mut list, &[a.as_ref()]); + + assert!(list.remove(ptr(&a)).is_some()); + assert_clean!(a); + + assert!(list.head.is_none()); + assert!(list.tail.is_none()); + let items = collect_list(&mut list); + assert!(items.is_empty()); + } + + unsafe { + // Remove missing + let mut list = LinkedList::<&Entry>::new(); + + list.push_front(b.as_ref()); + list.push_front(a.as_ref()); + + assert!(list.remove(ptr(&c)).is_none()); + } + } + + #[test] + fn iter() { + let a = entry(5); + let b = entry(7); + + let mut list = LinkedList::<&Entry>::new(); + + assert_eq!(0, list.iter().count()); + + list.push_front(a.as_ref()); + list.push_front(b.as_ref()); + + let mut i = list.iter(); + assert_eq!(7, i.next().unwrap().val); + assert_eq!(5, i.next().unwrap().val); + assert!(i.next().is_none()); + } + + proptest::proptest! { + #[test] + fn fuzz_linked_list(ops: Vec<usize>) { + run_fuzz(ops); + } + } + + fn run_fuzz(ops: Vec<usize>) { + use std::collections::VecDeque; + + #[derive(Debug)] + enum Op { + Push, + Pop, + Remove(usize), + } + + let ops = ops + .iter() + .map(|i| match i % 3 { + 0 => Op::Push, + 1 => Op::Pop, + 2 => Op::Remove(i / 3), + _ => unreachable!(), + }) + .collect::<Vec<_>>(); + + let mut ll = LinkedList::<&Entry>::new(); + let mut reference = VecDeque::new(); + + let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect(); + + for (i, op) in ops.iter().enumerate() { + match op { + Op::Push => { + reference.push_front(i as i32); + assert_eq!(entries[i].val, i as i32); + + ll.push_front(entries[i].as_ref()); + } + Op::Pop => { + if reference.is_empty() { + assert!(ll.is_empty()); + continue; + } + + let v = reference.pop_back(); + assert_eq!(v, ll.pop_back().map(|v| v.val)); + } + Op::Remove(n) => { + if reference.is_empty() { + assert!(ll.is_empty()); + continue; + } + + let idx = n % reference.len(); + let expect = reference.remove(idx).unwrap(); + + unsafe { + let entry = ll.remove(ptr(&entries[expect as usize])).unwrap(); + assert_eq!(expect, entry.val); + } + } + } + } + } +} diff --git a/third_party/rust/tokio/src/util/mod.rs b/third_party/rust/tokio/src/util/mod.rs new file mode 100644 index 0000000000..a093395c02 --- /dev/null +++ b/third_party/rust/tokio/src/util/mod.rs @@ -0,0 +1,24 @@ +cfg_io_driver! { + pub(crate) mod bit; + pub(crate) mod slab; +} + +#[cfg(any(feature = "sync", feature = "rt-core"))] +pub(crate) mod linked_list; + +#[cfg(any(feature = "rt-threaded", feature = "macros", feature = "stream"))] +mod rand; + +mod wake; +pub(crate) use wake::{waker_ref, Wake}; + +cfg_rt_threaded! { + pub(crate) use rand::FastRand; + + mod try_lock; + pub(crate) use try_lock::TryLock; +} + +#[cfg(any(feature = "macros", feature = "stream"))] +#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] +pub use rand::thread_rng_n; diff --git a/third_party/rust/tokio/src/util/pad.rs b/third_party/rust/tokio/src/util/pad.rs new file mode 100644 index 0000000000..bf0913ca85 --- /dev/null +++ b/third_party/rust/tokio/src/util/pad.rs @@ -0,0 +1,52 @@ +use core::fmt; +use core::ops::{Deref, DerefMut}; + +#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] +// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache +// lines at a time, so we have to align to 128 bytes rather than 64. +// +// Sources: +// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf +// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] +pub(crate) struct CachePadded<T> { + value: T, +} + +unsafe impl<T: Send> Send for CachePadded<T> {} +unsafe impl<T: Sync> Sync for CachePadded<T> {} + +impl<T> CachePadded<T> { + pub(crate) fn new(t: T) -> CachePadded<T> { + CachePadded::<T> { value: t } + } +} + +impl<T> Deref for CachePadded<T> { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl<T> DerefMut for CachePadded<T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl<T: fmt::Debug> fmt::Debug for CachePadded<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CachePadded") + .field("value", &self.value) + .finish() + } +} + +impl<T> From<T> for CachePadded<T> { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} diff --git a/third_party/rust/tokio/src/util/rand.rs b/third_party/rust/tokio/src/util/rand.rs new file mode 100644 index 0000000000..4b72b4b110 --- /dev/null +++ b/third_party/rust/tokio/src/util/rand.rs @@ -0,0 +1,64 @@ +use std::cell::Cell; + +/// Fast random number generate +/// +/// Implement xorshift64+: 2 32-bit xorshift sequences added together. +/// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's +/// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +/// This generator passes the SmallCrush suite, part of TestU01 framework: +/// http://simul.iro.umontreal.ca/testu01/tu01.html +#[derive(Debug)] +pub(crate) struct FastRand { + one: Cell<u32>, + two: Cell<u32>, +} + +impl FastRand { + /// Initialize a new, thread-local, fast random number generator. + pub(crate) fn new(seed: u64) -> FastRand { + let one = (seed >> 32) as u32; + let mut two = seed as u32; + + if two == 0 { + // This value cannot be zero + two = 1; + } + + FastRand { + one: Cell::new(one), + two: Cell::new(two), + } + } + + pub(crate) fn fastrand_n(&self, n: u32) -> u32 { + // This is similar to fastrand() % n, but faster. + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + let mul = (self.fastrand() as u64).wrapping_mul(n as u64); + (mul >> 32) as u32 + } + + fn fastrand(&self) -> u32 { + let mut s1 = self.one.get(); + let s0 = self.two.get(); + + s1 ^= s1 << 17; + s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; + + self.one.set(s0); + self.two.set(s1); + + s0.wrapping_add(s1) + } +} + +// Used by the select macro and `StreamMap` +#[cfg(any(feature = "macros", feature = "stream"))] +#[doc(hidden)] +#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))] +pub fn thread_rng_n(n: u32) -> u32 { + thread_local! { + static THREAD_RNG: FastRand = FastRand::new(crate::loom::rand::seed()); + } + + THREAD_RNG.with(|rng| rng.fastrand_n(n)) +} diff --git a/third_party/rust/tokio/src/util/slab/addr.rs b/third_party/rust/tokio/src/util/slab/addr.rs new file mode 100644 index 0000000000..c14e32e909 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/addr.rs @@ -0,0 +1,154 @@ +//! Tracks the location of an entry in a slab. +//! +//! # Index packing +//! +//! A slab index consists of multiple indices packed into a single `usize` value +//! that correspond to different parts of the slab. +//! +//! The least significant `MAX_PAGES + INITIAL_PAGE_SIZE.trailing_zeros() + 1` +//! bits store the address within a shard, starting at 0 for the first slot on +//! the first page. To index a slot within a shard, we first find the index of +//! the page that the address falls on, and then the offset of the slot within +//! that page. +//! +//! Since every page is twice as large as the previous page, and all page sizes +//! are powers of two, we can determine the page index that contains a given +//! address by shifting the address down by the smallest page size and looking +//! at how many twos places necessary to represent that number, telling us what +//! power of two page size it fits inside of. We can determine the number of +//! twos places by counting the number of leading zeros (unused twos places) in +//! the number's binary representation, and subtracting that count from the +//! total number of bits in a word. +//! +//! Once we know what page contains an address, we can subtract the size of all +//! previous pages from the address to determine the offset within the page. +//! +//! After the page address, the next `MAX_THREADS.trailing_zeros() + 1` least +//! significant bits are the thread ID. These are used to index the array of +//! shards to find which shard a slot belongs to. If an entry is being removed +//! and the thread ID of its index matches that of the current thread, we can +//! use the `remove_local` fast path; otherwise, we have to use the synchronized +//! `remove_remote` path. +//! +//! Finally, a generation value is packed into the index. The `RESERVED_BITS` +//! most significant bits are left unused, and the remaining bits between the +//! last bit of the thread ID and the first reserved bit are used to store the +//! generation. The generation is used as part of an atomic read-modify-write +//! loop every time a `ScheduledIo`'s readiness is modified, or when the +//! resource is removed, to guard against the ABA problem. +//! +//! Visualized: +//! +//! ```text +//! ┌──────────┬───────────────┬──────────────────┬──────────────────────────┐ +//! │ reserved │ generation │ thread ID │ address │ +//! └▲─────────┴▲──────────────┴▲─────────────────┴▲────────────────────────▲┘ +//! │ │ │ │ │ +//! bits(usize) │ bits(MAX_THREADS) │ 0 +//! │ │ +//! bits(usize) - RESERVED MAX_PAGES + bits(INITIAL_PAGE_SIZE) +//! ``` + +use crate::util::bit; +use crate::util::slab::{Generation, INITIAL_PAGE_SIZE, MAX_PAGES, MAX_THREADS}; + +use std::usize; + +/// References the location at which an entry is stored in a slab. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct Address(usize); + +const PAGE_INDEX_SHIFT: u32 = INITIAL_PAGE_SIZE.trailing_zeros() + 1; + +/// Address in the shard +const SLOT: bit::Pack = bit::Pack::least_significant(MAX_PAGES as u32 + PAGE_INDEX_SHIFT); + +/// Masks the thread identifier +const THREAD: bit::Pack = SLOT.then(MAX_THREADS.trailing_zeros() + 1); + +/// Masks the generation +const GENERATION: bit::Pack = THREAD + .then(bit::pointer_width().wrapping_sub(RESERVED.width() + THREAD.width() + SLOT.width())); + +// Chosen arbitrarily +const RESERVED: bit::Pack = bit::Pack::most_significant(5); + +impl Address { + /// Represents no entry, picked to avoid collision with Mio's internals. + /// This value should not be passed to mio. + pub(crate) const NULL: usize = usize::MAX >> 1; + + /// Re-exported by `Generation`. + pub(super) const GENERATION_WIDTH: u32 = GENERATION.width(); + + pub(super) fn new(shard_index: usize, generation: Generation) -> Address { + let mut repr = 0; + + repr = SLOT.pack(shard_index, repr); + repr = GENERATION.pack(generation.to_usize(), repr); + + Address(repr) + } + + /// Convert from a `usize` representation. + pub(crate) fn from_usize(src: usize) -> Address { + assert_ne!(src, Self::NULL); + + Address(src) + } + + /// Convert to a `usize` representation + pub(crate) fn to_usize(self) -> usize { + self.0 + } + + pub(crate) fn generation(self) -> Generation { + Generation::new(GENERATION.unpack(self.0)) + } + + /// Returns the page index + pub(super) fn page(self) -> usize { + // Since every page is twice as large as the previous page, and all page + // sizes are powers of two, we can determine the page index that + // contains a given address by shifting the address down by the smallest + // page size and looking at how many twos places necessary to represent + // that number, telling us what power of two page size it fits inside + // of. We can determine the number of twos places by counting the number + // of leading zeros (unused twos places) in the number's binary + // representation, and subtracting that count from the total number of + // bits in a word. + let slot_shifted = (self.slot() + INITIAL_PAGE_SIZE) >> PAGE_INDEX_SHIFT; + (bit::pointer_width() - slot_shifted.leading_zeros()) as usize + } + + /// Returns the slot index + pub(super) fn slot(self) -> usize { + SLOT.unpack(self.0) + } +} + +#[cfg(test)] +cfg_not_loom! { + use proptest::proptest; + + #[test] + fn test_pack_format() { + assert_eq!(5, RESERVED.width()); + assert_eq!(0b11111, RESERVED.max_value()); + } + + proptest! { + #[test] + fn address_roundtrips( + slot in 0usize..SLOT.max_value(), + generation in 0usize..Generation::MAX, + ) { + let address = Address::new(slot, Generation::new(generation)); + // Round trip + let address = Address::from_usize(address.to_usize()); + + assert_eq!(address.slot(), slot); + assert_eq!(address.generation().to_usize(), generation); + } + } +} diff --git a/third_party/rust/tokio/src/util/slab/entry.rs b/third_party/rust/tokio/src/util/slab/entry.rs new file mode 100644 index 0000000000..2e0b10b0fd --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/entry.rs @@ -0,0 +1,7 @@ +use crate::util::slab::Generation; + +pub(crate) trait Entry: Default { + fn generation(&self) -> Generation; + + fn reset(&self, generation: Generation) -> bool; +} diff --git a/third_party/rust/tokio/src/util/slab/generation.rs b/third_party/rust/tokio/src/util/slab/generation.rs new file mode 100644 index 0000000000..4b16b2caf6 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/generation.rs @@ -0,0 +1,32 @@ +use crate::util::bit; +use crate::util::slab::Address; + +/// An mutation identifier for a slot in the slab. The generation helps prevent +/// accessing an entry with an outdated token. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] +pub(crate) struct Generation(usize); + +impl Generation { + pub(crate) const WIDTH: u32 = Address::GENERATION_WIDTH; + + pub(super) const MAX: usize = bit::mask_for(Address::GENERATION_WIDTH); + + /// Create a new generation + /// + /// # Panics + /// + /// Panics if `value` is greater than max generation. + pub(crate) fn new(value: usize) -> Generation { + assert!(value <= Self::MAX); + Generation(value) + } + + /// Returns the next generation value + pub(crate) fn next(self) -> Generation { + Generation((self.0 + 1) & Self::MAX) + } + + pub(crate) fn to_usize(self) -> usize { + self.0 + } +} diff --git a/third_party/rust/tokio/src/util/slab/mod.rs b/third_party/rust/tokio/src/util/slab/mod.rs new file mode 100644 index 0000000000..5082970507 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/mod.rs @@ -0,0 +1,107 @@ +//! A lock-free concurrent slab. + +mod addr; +pub(crate) use addr::Address; + +mod entry; +pub(crate) use entry::Entry; + +mod generation; +pub(crate) use generation::Generation; + +mod page; + +mod shard; +use shard::Shard; + +mod slot; +use slot::Slot; + +mod stack; +use stack::TransferStack; + +#[cfg(all(loom, test))] +mod tests; + +use crate::loom::sync::Mutex; +use crate::util::bit; + +use std::fmt; + +#[cfg(target_pointer_width = "64")] +const MAX_THREADS: usize = 4096; + +#[cfg(target_pointer_width = "32")] +const MAX_THREADS: usize = 2048; + +/// Max number of pages per slab +const MAX_PAGES: usize = bit::pointer_width() as usize / 4; + +cfg_not_loom! { + /// Size of first page + const INITIAL_PAGE_SIZE: usize = 32; +} + +cfg_loom! { + const INITIAL_PAGE_SIZE: usize = 2; +} + +/// A sharded slab. +pub(crate) struct Slab<T> { + // Signal shard for now. Eventually there will be more. + shard: Shard<T>, + local: Mutex<()>, +} + +unsafe impl<T: Send> Send for Slab<T> {} +unsafe impl<T: Sync> Sync for Slab<T> {} + +impl<T: Entry> Slab<T> { + /// Returns a new slab with the default configuration parameters. + pub(crate) fn new() -> Slab<T> { + Slab { + shard: Shard::new(), + local: Mutex::new(()), + } + } + + /// allocs a value into the slab, returning a key that can be used to + /// access it. + /// + /// If this function returns `None`, then the shard for the current thread + /// is full and no items can be added until some are removed, or the maximum + /// number of shards has been reached. + pub(crate) fn alloc(&self) -> Option<Address> { + // we must lock the slab to alloc an item. + let _local = self.local.lock().unwrap(); + self.shard.alloc() + } + + /// Removes the value associated with the given key from the slab. + pub(crate) fn remove(&self, idx: Address) { + // try to lock the slab so that we can use `remove_local`. + let lock = self.local.try_lock(); + + // if we were able to lock the slab, we are "local" and can use the fast + // path; otherwise, we will use `remove_remote`. + if lock.is_ok() { + self.shard.remove_local(idx) + } else { + self.shard.remove_remote(idx) + } + } + + /// Return a reference to the value associated with the given key. + /// + /// If the slab does not contain a value for the given key, `None` is + /// returned instead. + pub(crate) fn get(&self, token: Address) -> Option<&T> { + self.shard.get(token) + } +} + +impl<T> fmt::Debug for Slab<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Slab").field("shard", &self.shard).finish() + } +} diff --git a/third_party/rust/tokio/src/util/slab/page.rs b/third_party/rust/tokio/src/util/slab/page.rs new file mode 100644 index 0000000000..0000e934de --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/page.rs @@ -0,0 +1,187 @@ +use crate::loom::cell::UnsafeCell; +use crate::util::slab::{Address, Entry, Slot, TransferStack, INITIAL_PAGE_SIZE}; + +use std::fmt; + +/// Data accessed only by the thread that owns the shard. +pub(crate) struct Local { + head: UnsafeCell<usize>, +} + +/// Data accessed by any thread. +pub(crate) struct Shared<T> { + remote: TransferStack, + size: usize, + prev_sz: usize, + slab: UnsafeCell<Option<Box<[Slot<T>]>>>, +} + +/// Returns the size of the page at index `n` +pub(super) fn size(n: usize) -> usize { + INITIAL_PAGE_SIZE << n +} + +impl Local { + pub(crate) fn new() -> Self { + Self { + head: UnsafeCell::new(0), + } + } + + fn head(&self) -> usize { + self.head.with(|head| unsafe { *head }) + } + + fn set_head(&self, new_head: usize) { + self.head.with_mut(|head| unsafe { + *head = new_head; + }) + } +} + +impl<T: Entry> Shared<T> { + pub(crate) fn new(size: usize, prev_sz: usize) -> Shared<T> { + Self { + prev_sz, + size, + remote: TransferStack::new(), + slab: UnsafeCell::new(None), + } + } + + /// Allocates storage for this page if it does not allready exist. + /// + /// This requires unique access to the page (e.g. it is called from the + /// thread that owns the page, or, in the case of `SingleShard`, while the + /// lock is held). In order to indicate this, a reference to the page's + /// `Local` data is taken by this function; the `Local` argument is not + /// actually used, but requiring it ensures that this is only called when + /// local access is held. + #[cold] + fn alloc_page(&self, _: &Local) { + debug_assert!(self.slab.with(|s| unsafe { (*s).is_none() })); + + let mut slab = Vec::with_capacity(self.size); + slab.extend((1..self.size).map(Slot::new)); + slab.push(Slot::new(Address::NULL)); + + self.slab.with_mut(|s| { + // this mut access is safe — it only occurs to initially + // allocate the page, which only happens on this thread; if the + // page has not yet been allocated, other threads will not try + // to access it yet. + unsafe { + *s = Some(slab.into_boxed_slice()); + } + }); + } + + pub(crate) fn alloc(&self, local: &Local) -> Option<Address> { + let head = local.head(); + + // are there any items on the local free list? (fast path) + let head = if head < self.size { + head + } else { + // if the local free list is empty, pop all the items on the remote + // free list onto the local free list. + self.remote.pop_all()? + }; + + // if the head is still null, both the local and remote free lists are + // empty --- we can't fit any more items on this page. + if head == Address::NULL { + return None; + } + + // do we need to allocate storage for this page? + let page_needs_alloc = self.slab.with(|s| unsafe { (*s).is_none() }); + if page_needs_alloc { + self.alloc_page(local); + } + + let gen = self.slab.with(|slab| { + let slab = unsafe { &*(slab) } + .as_ref() + .expect("page must have been allocated to alloc!"); + + let slot = &slab[head]; + + local.set_head(slot.next()); + slot.generation() + }); + + let index = head + self.prev_sz; + + Some(Address::new(index, gen)) + } + + pub(crate) fn get(&self, addr: Address) -> Option<&T> { + let page_offset = addr.slot() - self.prev_sz; + + self.slab + .with(|slab| unsafe { &*slab }.as_ref()?.get(page_offset)) + .map(|slot| slot.get()) + } + + pub(crate) fn remove_local(&self, local: &Local, addr: Address) { + let offset = addr.slot() - self.prev_sz; + + self.slab.with(|slab| { + let slab = unsafe { &*slab }.as_ref(); + + let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { + slot + } else { + return; + }; + + if slot.reset(addr.generation()) { + slot.set_next(local.head()); + local.set_head(offset); + } + }) + } + + pub(crate) fn remove_remote(&self, addr: Address) { + let offset = addr.slot() - self.prev_sz; + + self.slab.with(|slab| { + let slab = unsafe { &*slab }.as_ref(); + + let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) { + slot + } else { + return; + }; + + if !slot.reset(addr.generation()) { + return; + } + + self.remote.push(offset, |next| slot.set_next(next)); + }) + } +} + +impl fmt::Debug for Local { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.head.with(|head| { + let head = unsafe { *head }; + f.debug_struct("Local") + .field("head", &format_args!("{:#0x}", head)) + .finish() + }) + } +} + +impl<T> fmt::Debug for Shared<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Shared") + .field("remote", &self.remote) + .field("prev_sz", &self.prev_sz) + .field("size", &self.size) + // .field("slab", &self.slab) + .finish() + } +} diff --git a/third_party/rust/tokio/src/util/slab/shard.rs b/third_party/rust/tokio/src/util/slab/shard.rs new file mode 100644 index 0000000000..eaca6f656a --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/shard.rs @@ -0,0 +1,105 @@ +use crate::util::slab::{page, Address, Entry, MAX_PAGES}; + +use std::fmt; + +// ┌─────────────┐ ┌────────┐ +// │ page 1 │ │ │ +// ├─────────────┤ ┌───▶│ next──┼─┐ +// │ page 2 │ │ ├────────┤ │ +// │ │ │ │XXXXXXXX│ │ +// │ local_free──┼─┘ ├────────┤ │ +// │ global_free─┼─┐ │ │◀┘ +// ├─────────────┤ └───▶│ next──┼─┐ +// │ page 3 │ ├────────┤ │ +// └─────────────┘ │XXXXXXXX│ │ +// ... ├────────┤ │ +// ┌─────────────┐ │XXXXXXXX│ │ +// │ page n │ ├────────┤ │ +// └─────────────┘ │ │◀┘ +// │ next──┼───▶ +// ├────────┤ +// │XXXXXXXX│ +// └────────┘ +// ... +pub(super) struct Shard<T> { + /// The local free list for each page. + /// + /// These are only ever accessed from this shard's thread, so they are + /// stored separately from the shared state for the page that can be + /// accessed concurrently, to minimize false sharing. + local: Box<[page::Local]>, + /// The shared state for each page in this shard. + /// + /// This consists of the page's metadata (size, previous size), remote free + /// list, and a pointer to the actual array backing that page. + shared: Box<[page::Shared<T>]>, +} + +impl<T: Entry> Shard<T> { + pub(super) fn new() -> Shard<T> { + let mut total_sz = 0; + let shared = (0..MAX_PAGES) + .map(|page_num| { + let sz = page::size(page_num); + let prev_sz = total_sz; + total_sz += sz; + page::Shared::new(sz, prev_sz) + }) + .collect(); + + let local = (0..MAX_PAGES).map(|_| page::Local::new()).collect(); + + Shard { local, shared } + } + + pub(super) fn alloc(&self) -> Option<Address> { + // Can we fit the value into an existing page? + for (page_idx, page) in self.shared.iter().enumerate() { + let local = self.local(page_idx); + + if let Some(page_offset) = page.alloc(local) { + return Some(page_offset); + } + } + + None + } + + pub(super) fn get(&self, addr: Address) -> Option<&T> { + let page_idx = addr.page(); + + if page_idx > self.shared.len() { + return None; + } + + self.shared[page_idx].get(addr) + } + + /// Remove an item on the shard's local thread. + pub(super) fn remove_local(&self, addr: Address) { + let page_idx = addr.page(); + + if let Some(page) = self.shared.get(page_idx) { + page.remove_local(self.local(page_idx), addr); + } + } + + /// Remove an item, while on a different thread from the shard's local thread. + pub(super) fn remove_remote(&self, addr: Address) { + if let Some(page) = self.shared.get(addr.page()) { + page.remove_remote(addr); + } + } + + fn local(&self, i: usize) -> &page::Local { + &self.local[i] + } +} + +impl<T> fmt::Debug for Shard<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Shard") + .field("shared", &self.shared) + .finish() + } +} diff --git a/third_party/rust/tokio/src/util/slab/slot.rs b/third_party/rust/tokio/src/util/slab/slot.rs new file mode 100644 index 0000000000..0608b26189 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/slot.rs @@ -0,0 +1,42 @@ +use crate::loom::cell::UnsafeCell; +use crate::util::slab::{Entry, Generation}; + +/// Stores an entry in the slab. +pub(super) struct Slot<T> { + next: UnsafeCell<usize>, + entry: T, +} + +impl<T: Entry> Slot<T> { + /// Initialize a new `Slot` linked to `next`. + /// + /// The entry is initialized to a default value. + pub(super) fn new(next: usize) -> Slot<T> { + Slot { + next: UnsafeCell::new(next), + entry: T::default(), + } + } + + pub(super) fn get(&self) -> &T { + &self.entry + } + + pub(super) fn generation(&self) -> Generation { + self.entry.generation() + } + + pub(super) fn reset(&self, generation: Generation) -> bool { + self.entry.reset(generation) + } + + pub(super) fn next(&self) -> usize { + self.next.with(|next| unsafe { *next }) + } + + pub(super) fn set_next(&self, next: usize) { + self.next.with_mut(|n| unsafe { + (*n) = next; + }) + } +} diff --git a/third_party/rust/tokio/src/util/slab/stack.rs b/third_party/rust/tokio/src/util/slab/stack.rs new file mode 100644 index 0000000000..0ae0d71006 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/stack.rs @@ -0,0 +1,58 @@ +use crate::loom::sync::atomic::AtomicUsize; +use crate::util::slab::Address; + +use std::fmt; +use std::sync::atomic::Ordering; +use std::usize; + +pub(super) struct TransferStack { + head: AtomicUsize, +} + +impl TransferStack { + pub(super) fn new() -> Self { + Self { + head: AtomicUsize::new(Address::NULL), + } + } + + pub(super) fn pop_all(&self) -> Option<usize> { + let val = self.head.swap(Address::NULL, Ordering::Acquire); + + if val == Address::NULL { + None + } else { + Some(val) + } + } + + pub(super) fn push(&self, value: usize, before: impl Fn(usize)) { + let mut next = self.head.load(Ordering::Relaxed); + + loop { + before(next); + + match self + .head + .compare_exchange(next, value, Ordering::AcqRel, Ordering::Acquire) + { + // lost the race! + Err(actual) => next = actual, + Ok(_) => return, + } + } + } +} + +impl fmt::Debug for TransferStack { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Loom likes to dump all its internal state in `fmt::Debug` impls, so + // we override this to just print the current value in tests. + f.debug_struct("TransferStack") + .field( + "head", + &format_args!("{:#x}", self.head.load(Ordering::Relaxed)), + ) + .finish() + } +} diff --git a/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs b/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs new file mode 100644 index 0000000000..48e94f0034 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs @@ -0,0 +1,327 @@ +use crate::io::driver::ScheduledIo; +use crate::util::slab::{Address, Slab}; + +use loom::sync::{Arc, Condvar, Mutex}; +use loom::thread; + +#[test] +fn local_remove() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + + let s = slab.clone(); + let t1 = thread::spawn(move || { + let idx = store_val(&s, 1); + assert_eq!(get_val(&s, idx), Some(1)); + s.remove(idx); + assert_eq!(get_val(&s, idx), None); + let idx = store_val(&s, 2); + assert_eq!(get_val(&s, idx), Some(2)); + s.remove(idx); + assert_eq!(get_val(&s, idx), None); + }); + + let s = slab.clone(); + let t2 = thread::spawn(move || { + let idx = store_val(&s, 3); + assert_eq!(get_val(&s, idx), Some(3)); + s.remove(idx); + assert_eq!(get_val(&s, idx), None); + let idx = store_val(&s, 4); + s.remove(idx); + assert_eq!(get_val(&s, idx), None); + }); + + let s = slab; + let idx1 = store_val(&s, 5); + assert_eq!(get_val(&s, idx1), Some(5)); + let idx2 = store_val(&s, 6); + assert_eq!(get_val(&s, idx2), Some(6)); + s.remove(idx1); + assert_eq!(get_val(&s, idx1), None); + assert_eq!(get_val(&s, idx2), Some(6)); + s.remove(idx2); + assert_eq!(get_val(&s, idx2), None); + + t1.join().expect("thread 1 should not panic"); + t2.join().expect("thread 2 should not panic"); + }); +} + +#[test] +fn remove_remote() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + + let idx1 = store_val(&slab, 1); + assert_eq!(get_val(&slab, idx1), Some(1)); + + let idx2 = store_val(&slab, 2); + assert_eq!(get_val(&slab, idx2), Some(2)); + + let idx3 = store_val(&slab, 3); + assert_eq!(get_val(&slab, idx3), Some(3)); + + let s = slab.clone(); + let t1 = thread::spawn(move || { + assert_eq!(get_val(&s, idx2), Some(2)); + s.remove(idx2); + assert_eq!(get_val(&s, idx2), None); + }); + + let s = slab.clone(); + let t2 = thread::spawn(move || { + assert_eq!(get_val(&s, idx3), Some(3)); + s.remove(idx3); + assert_eq!(get_val(&s, idx3), None); + }); + + t1.join().expect("thread 1 should not panic"); + t2.join().expect("thread 2 should not panic"); + + assert_eq!(get_val(&slab, idx1), Some(1)); + assert_eq!(get_val(&slab, idx2), None); + assert_eq!(get_val(&slab, idx3), None); + }); +} + +#[test] +fn remove_remote_and_reuse() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + + let idx1 = store_val(&slab, 1); + let idx2 = store_val(&slab, 2); + + assert_eq!(get_val(&slab, idx1), Some(1)); + assert_eq!(get_val(&slab, idx2), Some(2)); + + let s = slab.clone(); + let t1 = thread::spawn(move || { + s.remove(idx1); + let value = get_val(&s, idx1); + + // We may or may not see the new value yet, depending on when + // this occurs, but we must either see the new value or `None`; + // the old value has been removed! + assert!(value == None || value == Some(3)); + }); + + let idx3 = store_when_free(&slab, 3); + t1.join().expect("thread 1 should not panic"); + + assert_eq!(get_val(&slab, idx3), Some(3)); + assert_eq!(get_val(&slab, idx2), Some(2)); + }); +} + +#[test] +fn concurrent_alloc_remove() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + let pair = Arc::new((Mutex::new(None), Condvar::new())); + + let slab2 = slab.clone(); + let pair2 = pair.clone(); + let remover = thread::spawn(move || { + let (lock, cvar) = &*pair2; + for _ in 0..2 { + let mut next = lock.lock().unwrap(); + while next.is_none() { + next = cvar.wait(next).unwrap(); + } + let key = next.take().unwrap(); + slab2.remove(key); + assert_eq!(get_val(&slab2, key), None); + cvar.notify_one(); + } + }); + + let (lock, cvar) = &*pair; + for i in 0..2 { + let key = store_val(&slab, i); + + let mut next = lock.lock().unwrap(); + *next = Some(key); + cvar.notify_one(); + + // Wait for the item to be removed. + while next.is_some() { + next = cvar.wait(next).unwrap(); + } + + assert_eq!(get_val(&slab, key), None); + } + + remover.join().unwrap(); + }) +} + +#[test] +fn concurrent_remove_remote_and_reuse() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + + let idx1 = store_val(&slab, 1); + let idx2 = store_val(&slab, 2); + + assert_eq!(get_val(&slab, idx1), Some(1)); + assert_eq!(get_val(&slab, idx2), Some(2)); + + let s = slab.clone(); + let s2 = slab.clone(); + let t1 = thread::spawn(move || { + s.remove(idx1); + }); + + let t2 = thread::spawn(move || { + s2.remove(idx2); + }); + + let idx3 = store_when_free(&slab, 3); + t1.join().expect("thread 1 should not panic"); + t2.join().expect("thread 1 should not panic"); + + assert!(get_val(&slab, idx1).is_none()); + assert!(get_val(&slab, idx2).is_none()); + assert_eq!(get_val(&slab, idx3), Some(3)); + }); +} + +#[test] +fn alloc_remove_get() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + let pair = Arc::new((Mutex::new(None), Condvar::new())); + + let slab2 = slab.clone(); + let pair2 = pair.clone(); + let t1 = thread::spawn(move || { + let slab = slab2; + let (lock, cvar) = &*pair2; + // allocate one entry just so that we have to use the final one for + // all future allocations. + let _key0 = store_val(&slab, 0); + let key = store_val(&slab, 1); + + let mut next = lock.lock().unwrap(); + *next = Some(key); + cvar.notify_one(); + // remove the second entry + slab.remove(key); + // store a new readiness at the same location (since the slab + // already has an entry in slot 0) + store_val(&slab, 2); + }); + + let (lock, cvar) = &*pair; + // wait for the second entry to be stored... + let mut next = lock.lock().unwrap(); + while next.is_none() { + next = cvar.wait(next).unwrap(); + } + let key = next.unwrap(); + + // our generation will be stale when the second store occurs at that + // index, we must not see the value of that store. + let val = get_val(&slab, key); + assert_ne!(val, Some(2), "generation must have advanced!"); + + t1.join().unwrap(); + }) +} + +#[test] +fn alloc_remove_set() { + loom::model(|| { + let slab = Arc::new(Slab::new()); + let pair = Arc::new((Mutex::new(None), Condvar::new())); + + let slab2 = slab.clone(); + let pair2 = pair.clone(); + let t1 = thread::spawn(move || { + let slab = slab2; + let (lock, cvar) = &*pair2; + // allocate one entry just so that we have to use the final one for + // all future allocations. + let _key0 = store_val(&slab, 0); + let key = store_val(&slab, 1); + + let mut next = lock.lock().unwrap(); + *next = Some(key); + cvar.notify_one(); + + slab.remove(key); + // remove the old entry and insert a new one, with a new generation. + let key2 = slab.alloc().expect("store key 2"); + // after the remove, we must not see the value written with the + // stale index. + assert_eq!( + get_val(&slab, key), + None, + "stale set must no longer be visible" + ); + assert_eq!(get_val(&slab, key2), Some(0)); + key2 + }); + + let (lock, cvar) = &*pair; + + // wait for the second entry to be stored. the index we get from the + // other thread may become stale after a write. + let mut next = lock.lock().unwrap(); + while next.is_none() { + next = cvar.wait(next).unwrap(); + } + let key = next.unwrap(); + + // try to write to the index with our generation + slab.get(key).map(|val| val.set_readiness(key, |_| 2)); + + let key2 = t1.join().unwrap(); + // after the remove, we must not see the value written with the + // stale index either. + assert_eq!( + get_val(&slab, key), + None, + "stale set must no longer be visible" + ); + assert_eq!(get_val(&slab, key2), Some(0)); + }); +} + +fn get_val(slab: &Arc<Slab<ScheduledIo>>, address: Address) -> Option<usize> { + slab.get(address).and_then(|s| s.get_readiness(address)) +} + +fn store_val(slab: &Arc<Slab<ScheduledIo>>, readiness: usize) -> Address { + let key = slab.alloc().expect("allocate slot"); + + if let Some(slot) = slab.get(key) { + slot.set_readiness(key, |_| readiness) + .expect("generation should still be valid!"); + } else { + panic!("slab did not contain a value for {:?}", key); + } + + key +} + +fn store_when_free(slab: &Arc<Slab<ScheduledIo>>, readiness: usize) -> Address { + let key = loop { + if let Some(key) = slab.alloc() { + break key; + } + + thread::yield_now(); + }; + + if let Some(slot) = slab.get(key) { + slot.set_readiness(key, |_| readiness) + .expect("generation should still be valid!"); + } else { + panic!("slab did not contain a value for {:?}", key); + } + + key +} diff --git a/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs b/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs new file mode 100644 index 0000000000..47ad46d3a1 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs @@ -0,0 +1,88 @@ +use crate::util::slab::TransferStack; + +use loom::cell::UnsafeCell; +use loom::sync::Arc; +use loom::thread; + +#[test] +fn transfer_stack() { + loom::model(|| { + let causalities = [UnsafeCell::new(None), UnsafeCell::new(None)]; + let shared = Arc::new((causalities, TransferStack::new())); + let shared1 = shared.clone(); + let shared2 = shared.clone(); + + // Spawn two threads that both try to push to the stack. + let t1 = thread::spawn(move || { + let (causalities, stack) = &*shared1; + stack.push(0, |prev| { + causalities[0].with_mut(|c| unsafe { + *c = Some(prev); + }); + }); + }); + + let t2 = thread::spawn(move || { + let (causalities, stack) = &*shared2; + stack.push(1, |prev| { + causalities[1].with_mut(|c| unsafe { + *c = Some(prev); + }); + }); + }); + + let (causalities, stack) = &*shared; + + // Try to pop from the stack... + let mut idx = stack.pop_all(); + while idx == None { + idx = stack.pop_all(); + thread::yield_now(); + } + let idx = idx.unwrap(); + + let saw_both = causalities[idx].with(|val| { + let val = unsafe { *val }; + assert!( + val.is_some(), + "UnsafeCell write must happen-before index is pushed to the stack!", + ); + // were there two entries in the stack? if so, check that + // both saw a write. + if let Some(c) = causalities.get(val.unwrap()) { + c.with(|val| { + let val = unsafe { *val }; + assert!( + val.is_some(), + "UnsafeCell write must happen-before index is pushed to the stack!", + ); + }); + true + } else { + false + } + }); + + // We only saw one push. Ensure that the other push happens too. + if !saw_both { + // Try to pop from the stack... + let mut idx = stack.pop_all(); + while idx == None { + idx = stack.pop_all(); + thread::yield_now(); + } + let idx = idx.unwrap(); + + causalities[idx].with(|val| { + let val = unsafe { *val }; + assert!( + val.is_some(), + "UnsafeCell write must happen-before index is pushed to the stack!", + ); + }); + } + + t1.join().unwrap(); + t2.join().unwrap(); + }); +} diff --git a/third_party/rust/tokio/src/util/slab/tests/mod.rs b/third_party/rust/tokio/src/util/slab/tests/mod.rs new file mode 100644 index 0000000000..7f79354466 --- /dev/null +++ b/third_party/rust/tokio/src/util/slab/tests/mod.rs @@ -0,0 +1,2 @@ +mod loom_slab; +mod loom_stack; diff --git a/third_party/rust/tokio/src/util/try_lock.rs b/third_party/rust/tokio/src/util/try_lock.rs new file mode 100644 index 0000000000..8b0edb4a87 --- /dev/null +++ b/third_party/rust/tokio/src/util/try_lock.rs @@ -0,0 +1,80 @@ +use crate::loom::sync::atomic::AtomicBool; + +use std::cell::UnsafeCell; +use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::Ordering::SeqCst; + +pub(crate) struct TryLock<T> { + locked: AtomicBool, + data: UnsafeCell<T>, +} + +pub(crate) struct LockGuard<'a, T> { + lock: &'a TryLock<T>, + _p: PhantomData<std::rc::Rc<()>>, +} + +unsafe impl<T: Send> Send for TryLock<T> {} +unsafe impl<T: Send> Sync for TryLock<T> {} + +unsafe impl<T: Sync> Sync for LockGuard<'_, T> {} + +macro_rules! new { + ($data:ident) => { + TryLock { + locked: AtomicBool::new(false), + data: UnsafeCell::new($data), + } + }; +} + +impl<T> TryLock<T> { + #[cfg(not(loom))] + /// Create a new `TryLock` + pub(crate) const fn new(data: T) -> TryLock<T> { + new!(data) + } + + #[cfg(loom)] + /// Create a new `TryLock` + pub(crate) fn new(data: T) -> TryLock<T> { + new!(data) + } + + /// Attempt to acquire lock + pub(crate) fn try_lock(&self) -> Option<LockGuard<'_, T>> { + if self + .locked + .compare_exchange(false, true, SeqCst, SeqCst) + .is_err() + { + return None; + } + + Some(LockGuard { + lock: self, + _p: PhantomData, + }) + } +} + +impl<T> Deref for LockGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.data.get() } + } +} + +impl<T> DerefMut for LockGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.data.get() } + } +} + +impl<T> Drop for LockGuard<'_, T> { + fn drop(&mut self) { + self.lock.locked.store(false, SeqCst); + } +} diff --git a/third_party/rust/tokio/src/util/wake.rs b/third_party/rust/tokio/src/util/wake.rs new file mode 100644 index 0000000000..e49f1e895d --- /dev/null +++ b/third_party/rust/tokio/src/util/wake.rs @@ -0,0 +1,83 @@ +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::ops::Deref; +use std::sync::Arc; +use std::task::{RawWaker, RawWakerVTable, Waker}; + +/// Simplfied waking interface based on Arcs +pub(crate) trait Wake: Send + Sync { + /// Wake by value + fn wake(self: Arc<Self>); + + /// Wake by reference + fn wake_by_ref(arc_self: &Arc<Self>); +} + +/// A `Waker` that is only valid for a given lifetime. +#[derive(Debug)] +pub(crate) struct WakerRef<'a> { + waker: ManuallyDrop<Waker>, + _p: PhantomData<&'a ()>, +} + +impl Deref for WakerRef<'_> { + type Target = Waker; + + fn deref(&self) -> &Waker { + &self.waker + } +} + +/// Creates a reference to a `Waker` from a reference to `Arc<impl Wake>`. +pub(crate) fn waker_ref<W: Wake>(wake: &Arc<W>) -> WakerRef<'_> { + let ptr = &**wake as *const _ as *const (); + + let waker = unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) }; + + WakerRef { + waker: ManuallyDrop::new(waker), + _p: PhantomData, + } +} + +fn waker_vtable<W: Wake>() -> &'static RawWakerVTable { + &RawWakerVTable::new( + clone_arc_raw::<W>, + wake_arc_raw::<W>, + wake_by_ref_arc_raw::<W>, + drop_arc_raw::<W>, + ) +} + +unsafe fn inc_ref_count<T: Wake>(data: *const ()) { + // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop + let arc = ManuallyDrop::new(Arc::<T>::from_raw(data as *const T)); + + // Now increase refcount, but don't drop new refcount either + let arc_clone: ManuallyDrop<_> = arc.clone(); + + // Drop explicitly to avoid clippy warnings + drop(arc); + drop(arc_clone); +} + +unsafe fn clone_arc_raw<T: Wake>(data: *const ()) -> RawWaker { + inc_ref_count::<T>(data); + RawWaker::new(data, waker_vtable::<T>()) +} + +unsafe fn wake_arc_raw<T: Wake>(data: *const ()) { + let arc: Arc<T> = Arc::from_raw(data as *const T); + Wake::wake(arc); +} + +// used by `waker_ref` +unsafe fn wake_by_ref_arc_raw<T: Wake>(data: *const ()) { + // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop + let arc = ManuallyDrop::new(Arc::<T>::from_raw(data as *const T)); + Wake::wake_by_ref(&arc); +} + +unsafe fn drop_arc_raw<T: Wake>(data: *const ()) { + drop(Arc::<T>::from_raw(data as *const T)) +} |