summaryrefslogtreecommitdiffstats
path: root/third_party/rust/tokio/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/tokio/tests
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/tokio/tests')
-rw-r--r--third_party/rust/tokio/tests/_require_full.rs2
-rw-r--r--third_party/rust/tokio/tests/async_send_sync.rs681
-rw-r--r--third_party/rust/tokio/tests/buffered.rs50
-rw-r--r--third_party/rust/tokio/tests/fs.rs20
-rw-r--r--third_party/rust/tokio/tests/fs_copy.rs39
-rw-r--r--third_party/rust/tokio/tests/fs_dir.rs87
-rw-r--r--third_party/rust/tokio/tests/fs_file.rs112
-rw-r--r--third_party/rust/tokio/tests/fs_link.rs68
-rw-r--r--third_party/rust/tokio/tests/io_async_fd.rs601
-rw-r--r--third_party/rust/tokio/tests/io_async_read.rs10
-rw-r--r--third_party/rust/tokio/tests/io_buf_reader.rs379
-rw-r--r--third_party/rust/tokio/tests/io_buf_writer.rs537
-rw-r--r--third_party/rust/tokio/tests/io_chain.rs16
-rw-r--r--third_party/rust/tokio/tests/io_copy.rs87
-rw-r--r--third_party/rust/tokio/tests/io_copy_bidirectional.rs128
-rw-r--r--third_party/rust/tokio/tests/io_driver.rs99
-rw-r--r--third_party/rust/tokio/tests/io_driver_drop.rs54
-rw-r--r--third_party/rust/tokio/tests/io_fill_buf.rs34
-rw-r--r--third_party/rust/tokio/tests/io_lines.rs19
-rw-r--r--third_party/rust/tokio/tests/io_mem_stream.rs121
-rw-r--r--third_party/rust/tokio/tests/io_poll_aio.rs375
-rw-r--r--third_party/rust/tokio/tests/io_read.rs59
-rw-r--r--third_party/rust/tokio/tests/io_read_buf.rs36
-rw-r--r--third_party/rust/tokio/tests/io_read_exact.rs15
-rw-r--r--third_party/rust/tokio/tests/io_read_line.rs107
-rw-r--r--third_party/rust/tokio/tests/io_read_to_end.rs78
-rw-r--r--third_party/rust/tokio/tests/io_read_to_string.rs63
-rw-r--r--third_party/rust/tokio/tests/io_read_until.rs74
-rw-r--r--third_party/rust/tokio/tests/io_split.rs79
-rw-r--r--third_party/rust/tokio/tests/io_take.rs61
-rw-r--r--third_party/rust/tokio/tests/io_util_empty.rs32
-rw-r--r--third_party/rust/tokio/tests/io_write.rs58
-rw-r--r--third_party/rust/tokio/tests/io_write_all.rs51
-rw-r--r--third_party/rust/tokio/tests/io_write_all_buf.rs96
-rw-r--r--third_party/rust/tokio/tests/io_write_buf.rs56
-rw-r--r--third_party/rust/tokio/tests/io_write_int.rs37
-rw-r--r--third_party/rust/tokio/tests/join_handle_panic.rs20
-rw-r--r--third_party/rust/tokio/tests/macros_join.rs82
-rw-r--r--third_party/rust/tokio/tests/macros_pin.rs21
-rw-r--r--third_party/rust/tokio/tests/macros_select.rs600
-rw-r--r--third_party/rust/tokio/tests/macros_test.rs72
-rw-r--r--third_party/rust/tokio/tests/macros_try_join.rs109
-rw-r--r--third_party/rust/tokio/tests/named_pipe.rs393
-rw-r--r--third_party/rust/tokio/tests/net_bind_resource.rs14
-rw-r--r--third_party/rust/tokio/tests/net_lookup_host.rs38
-rw-r--r--third_party/rust/tokio/tests/no_rt.rs41
-rw-r--r--third_party/rust/tokio/tests/process_arg0.rs13
-rw-r--r--third_party/rust/tokio/tests/process_issue_2174.rs45
-rw-r--r--third_party/rust/tokio/tests/process_issue_42.rs38
-rw-r--r--third_party/rust/tokio/tests/process_kill_on_drop.rs44
-rw-r--r--third_party/rust/tokio/tests/process_raw_handle.rs23
-rw-r--r--third_party/rust/tokio/tests/process_smoke.rs34
-rw-r--r--third_party/rust/tokio/tests/rt_basic.rs296
-rw-r--r--third_party/rust/tokio/tests/rt_common.rs1109
-rw-r--r--third_party/rust/tokio/tests/rt_handle_block_on.rs533
-rw-r--r--third_party/rust/tokio/tests/rt_metrics.rs385
-rw-r--r--third_party/rust/tokio/tests/rt_threaded.rs544
-rw-r--r--third_party/rust/tokio/tests/signal_ctrl_c.rs30
-rw-r--r--third_party/rust/tokio/tests/signal_drop_recv.rs22
-rw-r--r--third_party/rust/tokio/tests/signal_drop_rt.rs44
-rw-r--r--third_party/rust/tokio/tests/signal_drop_signal.rs26
-rw-r--r--third_party/rust/tokio/tests/signal_multi_rt.rs54
-rw-r--r--third_party/rust/tokio/tests/signal_no_rt.rs11
-rw-r--r--third_party/rust/tokio/tests/signal_notify_both.rs23
-rw-r--r--third_party/rust/tokio/tests/signal_twice.rs22
-rw-r--r--third_party/rust/tokio/tests/signal_usr1.rs23
-rw-r--r--third_party/rust/tokio/tests/support/io_vec.rs45
-rw-r--r--third_party/rust/tokio/tests/support/mpsc_stream.rs42
-rw-r--r--third_party/rust/tokio/tests/support/signal.rs7
-rw-r--r--third_party/rust/tokio/tests/sync_barrier.rs99
-rw-r--r--third_party/rust/tokio/tests/sync_broadcast.rs462
-rw-r--r--third_party/rust/tokio/tests/sync_errors.rs30
-rw-r--r--third_party/rust/tokio/tests/sync_mpsc.rs659
-rw-r--r--third_party/rust/tokio/tests/sync_mutex.rs178
-rw-r--r--third_party/rust/tokio/tests/sync_mutex_owned.rs136
-rw-r--r--third_party/rust/tokio/tests/sync_notify.rs156
-rw-r--r--third_party/rust/tokio/tests/sync_once_cell.rs274
-rw-r--r--third_party/rust/tokio/tests/sync_oneshot.rs279
-rw-r--r--third_party/rust/tokio/tests/sync_rwlock.rs281
-rw-r--r--third_party/rust/tokio/tests/sync_semaphore.rs102
-rw-r--r--third_party/rust/tokio/tests/sync_semaphore_owned.rs113
-rw-r--r--third_party/rust/tokio/tests/sync_watch.rs213
-rw-r--r--third_party/rust/tokio/tests/task_abort.rs224
-rw-r--r--third_party/rust/tokio/tests/task_blocking.rs228
-rw-r--r--third_party/rust/tokio/tests/task_builder.rs67
-rw-r--r--third_party/rust/tokio/tests/task_join_set.rs192
-rw-r--r--third_party/rust/tokio/tests/task_local.rs33
-rw-r--r--third_party/rust/tokio/tests/task_local_set.rs525
-rw-r--r--third_party/rust/tokio/tests/tcp_accept.rs157
-rw-r--r--third_party/rust/tokio/tests/tcp_connect.rs229
-rw-r--r--third_party/rust/tokio/tests/tcp_echo.rs42
-rw-r--r--third_party/rust/tokio/tests/tcp_into_split.rs131
-rw-r--r--third_party/rust/tokio/tests/tcp_into_std.rs45
-rw-r--r--third_party/rust/tokio/tests/tcp_peek.rs29
-rw-r--r--third_party/rust/tokio/tests/tcp_shutdown.rs28
-rw-r--r--third_party/rust/tokio/tests/tcp_socket.rs74
-rw-r--r--third_party/rust/tokio/tests/tcp_split.rs42
-rw-r--r--third_party/rust/tokio/tests/tcp_stream.rs359
-rw-r--r--third_party/rust/tokio/tests/test_clock.rs50
-rw-r--r--third_party/rust/tokio/tests/time_interval.rs211
-rw-r--r--third_party/rust/tokio/tests/time_pause.rs326
-rw-r--r--third_party/rust/tokio/tests/time_rt.rs89
-rw-r--r--third_party/rust/tokio/tests/time_sleep.rs356
-rw-r--r--third_party/rust/tokio/tests/time_timeout.rs150
-rw-r--r--third_party/rust/tokio/tests/udp.rs486
-rw-r--r--third_party/rust/tokio/tests/uds_cred.rs26
-rw-r--r--third_party/rust/tokio/tests/uds_datagram.rs377
-rw-r--r--third_party/rust/tokio/tests/uds_split.rs43
-rw-r--r--third_party/rust/tokio/tests/uds_stream.rs411
-rw-r--r--third_party/rust/tokio/tests/unwindsafe.rs37
110 files changed, 17173 insertions, 0 deletions
diff --git a/third_party/rust/tokio/tests/_require_full.rs b/third_party/rust/tokio/tests/_require_full.rs
new file mode 100644
index 0000000000..3abda2629c
--- /dev/null
+++ b/third_party/rust/tokio/tests/_require_full.rs
@@ -0,0 +1,2 @@
+#![cfg(not(any(feature = "full", target_arch = "wasm32")))]
+compile_error!("run main Tokio tests with `--features full`");
diff --git a/third_party/rust/tokio/tests/async_send_sync.rs b/third_party/rust/tokio/tests/async_send_sync.rs
new file mode 100644
index 0000000000..ea4445a27f
--- /dev/null
+++ b/third_party/rust/tokio/tests/async_send_sync.rs
@@ -0,0 +1,681 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![allow(clippy::type_complexity, clippy::diverging_sub_expression)]
+
+use std::cell::Cell;
+use std::future::Future;
+use std::io::SeekFrom;
+use std::net::SocketAddr;
+use std::pin::Pin;
+use std::rc::Rc;
+use tokio::net::TcpStream;
+use tokio::time::{Duration, Instant};
+
+// The names of these structs behaves better when sorted.
+// Send: Yes, Sync: Yes
+#[derive(Clone)]
+struct YY {}
+
+// Send: Yes, Sync: No
+#[derive(Clone)]
+struct YN {
+ _value: Cell<u8>,
+}
+
+// Send: No, Sync: No
+#[derive(Clone)]
+struct NN {
+ _value: Rc<u8>,
+}
+
+#[allow(dead_code)]
+type BoxFutureSync<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + Sync>>;
+#[allow(dead_code)]
+type BoxFutureSend<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send>>;
+#[allow(dead_code)]
+type BoxFuture<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T>>>;
+
+#[allow(dead_code)]
+type BoxAsyncRead = std::pin::Pin<Box<dyn tokio::io::AsyncBufRead + Send + Sync>>;
+#[allow(dead_code)]
+type BoxAsyncSeek = std::pin::Pin<Box<dyn tokio::io::AsyncSeek + Send + Sync>>;
+#[allow(dead_code)]
+type BoxAsyncWrite = std::pin::Pin<Box<dyn tokio::io::AsyncWrite + Send + Sync>>;
+
+#[allow(dead_code)]
+fn require_send<T: Send>(_t: &T) {}
+#[allow(dead_code)]
+fn require_sync<T: Sync>(_t: &T) {}
+#[allow(dead_code)]
+fn require_unpin<T: Unpin>(_t: &T) {}
+
+#[allow(dead_code)]
+struct Invalid;
+
+trait AmbiguousIfSend<A> {
+ fn some_item(&self) {}
+}
+impl<T: ?Sized> AmbiguousIfSend<()> for T {}
+impl<T: ?Sized + Send> AmbiguousIfSend<Invalid> for T {}
+
+trait AmbiguousIfSync<A> {
+ fn some_item(&self) {}
+}
+impl<T: ?Sized> AmbiguousIfSync<()> for T {}
+impl<T: ?Sized + Sync> AmbiguousIfSync<Invalid> for T {}
+
+trait AmbiguousIfUnpin<A> {
+ fn some_item(&self) {}
+}
+impl<T: ?Sized> AmbiguousIfUnpin<()> for T {}
+impl<T: ?Sized + Unpin> AmbiguousIfUnpin<Invalid> for T {}
+
+macro_rules! into_todo {
+ ($typ:ty) => {{
+ let x: $typ = todo!();
+ x
+ }};
+}
+
+macro_rules! async_assert_fn_send {
+ (Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
+ require_send(&$value);
+ };
+ (!Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
+ AmbiguousIfSend::some_item(&$value);
+ };
+}
+macro_rules! async_assert_fn_sync {
+ ($(!)?Send & Sync & $(!)?Unpin, $value:expr) => {
+ require_sync(&$value);
+ };
+ ($(!)?Send & !Sync & $(!)?Unpin, $value:expr) => {
+ AmbiguousIfSync::some_item(&$value);
+ };
+}
+macro_rules! async_assert_fn_unpin {
+ ($(!)?Send & $(!)?Sync & Unpin, $value:expr) => {
+ require_unpin(&$value);
+ };
+ ($(!)?Send & $(!)?Sync & !Unpin, $value:expr) => {
+ AmbiguousIfUnpin::some_item(&$value);
+ };
+}
+
+macro_rules! async_assert_fn {
+ ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): $($tok:tt)*) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
+ async_assert_fn_send!($($tok)*, f);
+ async_assert_fn_sync!($($tok)*, f);
+ async_assert_fn_unpin!($($tok)*, f);
+ };
+ };
+}
+macro_rules! assert_value {
+ ($type:ty: $($tok:tt)*) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f: $type = todo!();
+ async_assert_fn_send!($($tok)*, f);
+ async_assert_fn_sync!($($tok)*, f);
+ async_assert_fn_unpin!($($tok)*, f);
+ };
+ };
+}
+
+assert_value!(tokio::fs::DirBuilder: Send & Sync & Unpin);
+assert_value!(tokio::fs::DirEntry: Send & Sync & Unpin);
+assert_value!(tokio::fs::File: Send & Sync & Unpin);
+assert_value!(tokio::fs::OpenOptions: Send & Sync & Unpin);
+assert_value!(tokio::fs::ReadDir: Send & Sync & Unpin);
+
+async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::metadata(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::read(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::read_link(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::DirBuilder::create(_, &str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::open(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::create(&str): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync & !Unpin);
+
+assert_value!(tokio::net::TcpListener: Send & Sync & Unpin);
+assert_value!(tokio::net::TcpSocket: Send & Sync & Unpin);
+assert_value!(tokio::net::TcpStream: Send & Sync & Unpin);
+assert_value!(tokio::net::UdpSocket: Send & Sync & Unpin);
+assert_value!(tokio::net::tcp::OwnedReadHalf: Send & Sync & Unpin);
+assert_value!(tokio::net::tcp::OwnedWriteHalf: Send & Sync & Unpin);
+assert_value!(tokio::net::tcp::ReadHalf<'_>: Send & Sync & Unpin);
+assert_value!(tokio::net::tcp::ReuniteError: Send & Sync & Unpin);
+assert_value!(tokio::net::tcp::WriteHalf<'_>: Send & Sync & Unpin);
+async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpStream::readable(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::TcpStream::writable(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::peek_from(_, &mut [u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::readable(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::UdpSocket::writable(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync & !Unpin);
+async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync & !Unpin);
+
+#[cfg(unix)]
+mod unix_datagram {
+ use super::*;
+ use tokio::net::*;
+ assert_value!(UnixDatagram: Send & Sync & Unpin);
+ assert_value!(UnixListener: Send & Sync & Unpin);
+ assert_value!(UnixStream: Send & Sync & Unpin);
+ assert_value!(unix::OwnedReadHalf: Send & Sync & Unpin);
+ assert_value!(unix::OwnedWriteHalf: Send & Sync & Unpin);
+ assert_value!(unix::ReadHalf<'_>: Send & Sync & Unpin);
+ assert_value!(unix::ReuniteError: Send & Sync & Unpin);
+ assert_value!(unix::SocketAddr: Send & Sync & Unpin);
+ assert_value!(unix::UCred: Send & Sync & Unpin);
+ assert_value!(unix::WriteHalf<'_>: Send & Sync & Unpin);
+ async_assert_fn!(UnixDatagram::readable(_): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::recv(_, &mut [u8]): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::send(_, &[u8]): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::send_to(_, &[u8], &str): Send & Sync & !Unpin);
+ async_assert_fn!(UnixDatagram::writable(_): Send & Sync & !Unpin);
+ async_assert_fn!(UnixListener::accept(_): Send & Sync & !Unpin);
+ async_assert_fn!(UnixStream::connect(&str): Send & Sync & !Unpin);
+ async_assert_fn!(UnixStream::readable(_): Send & Sync & !Unpin);
+ async_assert_fn!(UnixStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+ async_assert_fn!(UnixStream::writable(_): Send & Sync & !Unpin);
+}
+
+#[cfg(windows)]
+mod windows_named_pipe {
+ use super::*;
+ use tokio::net::windows::named_pipe::*;
+ assert_value!(ClientOptions: Send & Sync & Unpin);
+ assert_value!(NamedPipeClient: Send & Sync & Unpin);
+ assert_value!(NamedPipeServer: Send & Sync & Unpin);
+ assert_value!(PipeEnd: Send & Sync & Unpin);
+ assert_value!(PipeInfo: Send & Sync & Unpin);
+ assert_value!(PipeMode: Send & Sync & Unpin);
+ assert_value!(ServerOptions: Send & Sync & Unpin);
+ async_assert_fn!(NamedPipeClient::readable(_): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeClient::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeClient::writable(_): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeServer::connect(_): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeServer::readable(_): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeServer::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
+ async_assert_fn!(NamedPipeServer::writable(_): Send & Sync & !Unpin);
+}
+
+assert_value!(tokio::process::Child: Send & Sync & Unpin);
+assert_value!(tokio::process::ChildStderr: Send & Sync & Unpin);
+assert_value!(tokio::process::ChildStdin: Send & Sync & Unpin);
+assert_value!(tokio::process::ChildStdout: Send & Sync & Unpin);
+assert_value!(tokio::process::Command: Send & Sync & Unpin);
+async_assert_fn!(tokio::process::Child::kill(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::process::Child::wait(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync & !Unpin);
+
+async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync & !Unpin);
+#[cfg(unix)]
+mod unix_signal {
+ use super::*;
+ assert_value!(tokio::signal::unix::Signal: Send & Sync & Unpin);
+ assert_value!(tokio::signal::unix::SignalKind: Send & Sync & Unpin);
+ async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync & !Unpin);
+}
+#[cfg(windows)]
+mod windows_signal {
+ use super::*;
+ assert_value!(tokio::signal::windows::CtrlC: Send & Sync & Unpin);
+ assert_value!(tokio::signal::windows::CtrlBreak: Send & Sync & Unpin);
+ async_assert_fn!(tokio::signal::windows::CtrlC::recv(_): Send & Sync & !Unpin);
+ async_assert_fn!(tokio::signal::windows::CtrlBreak::recv(_): Send & Sync & !Unpin);
+}
+
+assert_value!(tokio::sync::AcquireError: Send & Sync & Unpin);
+assert_value!(tokio::sync::Barrier: Send & Sync & Unpin);
+assert_value!(tokio::sync::BarrierWaitResult: Send & Sync & Unpin);
+assert_value!(tokio::sync::MappedMutexGuard<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::MappedMutexGuard<'_, YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::MappedMutexGuard<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::Mutex<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::Mutex<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::Mutex<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::MutexGuard<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::MutexGuard<'_, YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::MutexGuard<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::Notify: Send & Sync & Unpin);
+assert_value!(tokio::sync::OnceCell<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OnceCell<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::OnceCell<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::OwnedMutexGuard<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedMutexGuard<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedMutexGuard<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockReadGuard<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockReadGuard<YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockReadGuard<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockWriteGuard<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockWriteGuard<YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::OwnedRwLockWriteGuard<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::OwnedSemaphorePermit: Send & Sync & Unpin);
+assert_value!(tokio::sync::RwLock<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLock<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLock<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::RwLockReadGuard<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockReadGuard<'_, YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockReadGuard<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::RwLockWriteGuard<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockWriteGuard<'_, YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::RwLockWriteGuard<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::Semaphore: Send & Sync & Unpin);
+assert_value!(tokio::sync::SemaphorePermit<'_>: Send & Sync & Unpin);
+assert_value!(tokio::sync::TryAcquireError: Send & Sync & Unpin);
+assert_value!(tokio::sync::TryLockError: Send & Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Receiver<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Receiver<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Receiver<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Sender<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Sender<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::broadcast::Sender<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::futures::Notified<'_>: Send & Sync & !Unpin);
+assert_value!(tokio::sync::mpsc::OwnedPermit<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::OwnedPermit<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::OwnedPermit<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Permit<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Permit<'_, YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Permit<'_, YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Receiver<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Receiver<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Receiver<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Sender<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Sender<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::Sender<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedReceiver<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedReceiver<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedReceiver<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedSender<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedSender<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::UnboundedSender<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendError<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendError<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendError<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendTimeoutError<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::TrySendError<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::TrySendError<YN>: Send & !Sync & Unpin);
+assert_value!(tokio::sync::mpsc::error::TrySendError<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Receiver<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Receiver<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Receiver<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Sender<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Sender<YN>: Send & Sync & Unpin);
+assert_value!(tokio::sync::oneshot::Sender<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::watch::Receiver<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Receiver<YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Receiver<YY>: Send & Sync & Unpin);
+assert_value!(tokio::sync::watch::Ref<'_, NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Ref<'_, YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Ref<'_, YY>: !Send & Sync & Unpin);
+assert_value!(tokio::sync::watch::Sender<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Sender<YN>: !Send & !Sync & Unpin);
+assert_value!(tokio::sync::watch::Sender<YY>: Send & Sync & Unpin);
+async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<NN>::lock(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<NN>::lock_owned(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<YN>::lock(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<YN>::lock_owned(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<YY>::lock(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Mutex<YY>::lock_owned(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Notify::notified(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send + Sync>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send + Sync>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send + Sync>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send + Sync>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send + Sync>>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send>>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send + Sync>>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send>>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>>>>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<NN>::read(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<NN>::write(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<YN>::read(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<YN>::write(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<YY>::read(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::RwLock<YY>::write(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Semaphore::acquire_many(_, u32): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Semaphore::acquire_many_owned(_, u32): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::Semaphore::acquire_owned(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::broadcast::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::broadcast::Receiver<YN>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::broadcast::Receiver<YY>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Receiver<YN>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Receiver<YY>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve_owned(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send(_, NN): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send_timeout(_, NN, Duration): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YN>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve_owned(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send(_, YN): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send_timeout(_, YN, Duration): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YY>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve_owned(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send(_, YY): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send_timeout(_, YY, Duration): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<NN>::recv(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YN>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YY>::recv(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedSender<NN>::closed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YN>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YY>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::oneshot::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::oneshot::Sender<YN>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::oneshot::Sender<YY>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Receiver<NN>::changed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Receiver<YN>::changed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Receiver<YY>::changed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Sender<YN>::closed(_): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::sync::watch::Sender<YY>::closed(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::task::unconstrained(BoxFuture<()>): !Send & !Sync & Unpin);
+async_assert_fn!(tokio::task::unconstrained(BoxFutureSend<()>): Send & !Sync & Unpin);
+async_assert_fn!(tokio::task::unconstrained(BoxFutureSync<()>): Send & Sync & Unpin);
+assert_value!(tokio::task::JoinError: Send & Sync & Unpin);
+assert_value!(tokio::task::JoinHandle<NN>: !Send & !Sync & Unpin);
+assert_value!(tokio::task::JoinHandle<YN>: Send & Sync & Unpin);
+assert_value!(tokio::task::JoinHandle<YY>: Send & Sync & Unpin);
+#[cfg(tokio_unstable)]
+mod unstable {
+ use super::*;
+ async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::join_one(_): Send & Sync & !Unpin);
+ async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::shutdown(_): Send & Sync & !Unpin);
+ async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::join_one(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::shutdown(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(tokio::task::JoinSet<u32>::join_one(_): Send & Sync & !Unpin);
+ async_assert_fn!(tokio::task::JoinSet<u32>::shutdown(_): Send & Sync & !Unpin);
+ assert_value!(tokio::task::JoinSet<YY>: Send & Sync & Unpin);
+ assert_value!(tokio::task::JoinSet<YN>: Send & Sync & Unpin);
+ assert_value!(tokio::task::JoinSet<NN>: !Send & !Sync & Unpin);
+ assert_value!(tokio::task::LocalSet: !Send & !Sync & Unpin);
+}
+
+assert_value!(tokio::runtime::Builder: Send & Sync & Unpin);
+assert_value!(tokio::runtime::EnterGuard<'_>: Send & Sync & Unpin);
+assert_value!(tokio::runtime::Handle: Send & Sync & Unpin);
+assert_value!(tokio::runtime::Runtime: Send & Sync & Unpin);
+
+assert_value!(tokio::time::Interval: Send & Sync & Unpin);
+assert_value!(tokio::time::Instant: Send & Sync & Unpin);
+assert_value!(tokio::time::Sleep: Send & Sync & !Unpin);
+assert_value!(tokio::time::Timeout<BoxFutureSync<()>>: Send & Sync & !Unpin);
+assert_value!(tokio::time::Timeout<BoxFutureSend<()>>: Send & !Sync & !Unpin);
+assert_value!(tokio::time::Timeout<BoxFuture<()>>: !Send & !Sync & !Unpin);
+assert_value!(tokio::time::error::Elapsed: Send & Sync & Unpin);
+assert_value!(tokio::time::error::Error: Send & Sync & Unpin);
+async_assert_fn!(tokio::time::advance(Duration): Send & Sync & !Unpin);
+async_assert_fn!(tokio::time::sleep(Duration): Send & Sync & !Unpin);
+async_assert_fn!(tokio::time::sleep_until(Instant): Send & Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync & !Unpin);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync & !Unpin);
+async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync & !Unpin);
+
+assert_value!(tokio::io::BufReader<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::BufStream<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::BufWriter<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::DuplexStream: Send & Sync & Unpin);
+assert_value!(tokio::io::Empty: Send & Sync & Unpin);
+assert_value!(tokio::io::Interest: Send & Sync & Unpin);
+assert_value!(tokio::io::Lines<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::ReadBuf<'_>: Send & Sync & Unpin);
+assert_value!(tokio::io::ReadHalf<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::Ready: Send & Sync & Unpin);
+assert_value!(tokio::io::Repeat: Send & Sync & Unpin);
+assert_value!(tokio::io::Sink: Send & Sync & Unpin);
+assert_value!(tokio::io::Split<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::Stderr: Send & Sync & Unpin);
+assert_value!(tokio::io::Stdin: Send & Sync & Unpin);
+assert_value!(tokio::io::Stdout: Send & Sync & Unpin);
+assert_value!(tokio::io::Take<TcpStream>: Send & Sync & Unpin);
+assert_value!(tokio::io::WriteHalf<TcpStream>: Send & Sync & Unpin);
+async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::copy_bidirectional(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::copy_buf(&mut tokio::io::BufReader<TcpStream>, &mut TcpStream): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::empty(): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::repeat(u8): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::sink(): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::split(TcpStream): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::stderr(): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::stdin(): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::stdout(): Send & Sync & Unpin);
+async_assert_fn!(tokio::io::Split<tokio::io::BufReader<TcpStream>>::next_segment(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::Lines<tokio::io::BufReader<TcpStream>>::next_line(_): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncBufReadExt::read_until(&mut BoxAsyncRead, u8, &mut Vec<u8>): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::AsyncBufReadExt::read_line(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::AsyncBufReadExt::fill_buf(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_buf(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::AsyncReadExt::read_exact(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u8(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i8(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u16(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i16(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u32(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i32(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u64(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i64(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u128(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i128(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_f32(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_f64(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_u128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_i128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_f32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_f64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncReadExt::read_to_end(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::AsyncReadExt::read_to_string(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::AsyncSeekExt::seek(&mut BoxAsyncSeek, SeekFrom): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncSeekExt::stream_position(&mut BoxAsyncSeek): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncWriteExt::write(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_vectored(&mut BoxAsyncWrite, _): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
+ & Sync
+ & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_all_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
+ & Sync
+ & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_all(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::AsyncWriteExt::write_u8(&mut BoxAsyncWrite, u8): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncWriteExt::write_i8(&mut BoxAsyncWrite, i8): Send & Sync & !Unpin);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u16(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i16(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u32(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i32(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u64(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i64(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u128(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i128(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_f32(&mut BoxAsyncWrite, f32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_f64(&mut BoxAsyncWrite, f64): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u16_le(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i16_le(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u32_le(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i32_le(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u64_le(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i64_le(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_u128_le(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_i128_le(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_f32_le(&mut BoxAsyncWrite, f32): Send & Sync & !Unpin
+);
+async_assert_fn!(
+ tokio::io::AsyncWriteExt::write_f64_le(&mut BoxAsyncWrite, f64): Send & Sync & !Unpin
+);
+async_assert_fn!(tokio::io::AsyncWriteExt::flush(&mut BoxAsyncWrite): Send & Sync & !Unpin);
+async_assert_fn!(tokio::io::AsyncWriteExt::shutdown(&mut BoxAsyncWrite): Send & Sync & !Unpin);
+
+#[cfg(unix)]
+mod unix_asyncfd {
+ use super::*;
+ use tokio::io::unix::*;
+
+ struct ImplsFd<T> {
+ _t: T,
+ }
+ impl<T> std::os::unix::io::AsRawFd for ImplsFd<T> {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ unreachable!()
+ }
+ }
+
+ assert_value!(AsyncFd<ImplsFd<YY>>: Send & Sync & Unpin);
+ assert_value!(AsyncFd<ImplsFd<YN>>: Send & !Sync & Unpin);
+ assert_value!(AsyncFd<ImplsFd<NN>>: !Send & !Sync & Unpin);
+ assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
+ assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YN>>: !Send & !Sync & Unpin);
+ assert_value!(AsyncFdReadyGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
+ assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
+ assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YN>>: Send & !Sync & Unpin);
+ assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
+ assert_value!(TryIoError: Send & Sync & Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable(_): Send & Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable_mut(_): Send & Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable(_): Send & Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable_mut(_): Send & Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable_mut(_): Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable_mut(_): Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable_mut(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable(_): !Send & !Sync & !Unpin);
+ async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable_mut(_): !Send & !Sync & !Unpin);
+}
diff --git a/third_party/rust/tokio/tests/buffered.rs b/third_party/rust/tokio/tests/buffered.rs
new file mode 100644
index 0000000000..98b6d5f312
--- /dev/null
+++ b/third_party/rust/tokio/tests/buffered.rs
@@ -0,0 +1,50 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio_test::assert_ok;
+
+use std::io::prelude::*;
+use std::net::TcpStream;
+use std::thread;
+
+#[tokio::test]
+async fn echo_server() {
+ const N: usize = 1024;
+
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ let msg = "foo bar baz";
+
+ let t = thread::spawn(move || {
+ let mut s = assert_ok!(TcpStream::connect(&addr));
+
+ let t2 = thread::spawn(move || {
+ let mut s = assert_ok!(TcpStream::connect(&addr));
+ let mut b = vec![0; msg.len() * N];
+ assert_ok!(s.read_exact(&mut b));
+ b
+ });
+
+ let mut expected = Vec::<u8>::new();
+ for _i in 0..N {
+ expected.extend(msg.as_bytes());
+ let res = assert_ok!(s.write(msg.as_bytes()));
+ assert_eq!(res, msg.len());
+ }
+
+ (expected, t2)
+ });
+
+ let (mut a, _) = assert_ok!(srv.accept().await);
+ let (mut b, _) = assert_ok!(srv.accept().await);
+
+ let n = assert_ok!(tokio::io::copy(&mut a, &mut b).await);
+
+ let (expected, t2) = t.join().unwrap();
+ let actual = t2.join().unwrap();
+
+ assert!(expected == actual);
+ assert_eq!(n, msg.len() as u64 * 1024);
+}
diff --git a/third_party/rust/tokio/tests/fs.rs b/third_party/rust/tokio/tests/fs.rs
new file mode 100644
index 0000000000..13c44c08d6
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs.rs
@@ -0,0 +1,20 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn path_read_write() {
+ let temp = tempdir();
+ let dir = temp.path();
+
+ assert_ok!(fs::write(dir.join("bar"), b"bytes").await);
+ let out = assert_ok!(fs::read(dir.join("bar")).await);
+
+ assert_eq!(out, b"bytes");
+}
+
+fn tempdir() -> tempfile::TempDir {
+ tempfile::tempdir().unwrap()
+}
diff --git a/third_party/rust/tokio/tests/fs_copy.rs b/third_party/rust/tokio/tests/fs_copy.rs
new file mode 100644
index 0000000000..8d1632013e
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_copy.rs
@@ -0,0 +1,39 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tempfile::tempdir;
+use tokio::fs;
+
+#[tokio::test]
+async fn copy() {
+ let dir = tempdir().unwrap();
+
+ let source_path = dir.path().join("foo.txt");
+ let dest_path = dir.path().join("bar.txt");
+
+ fs::write(&source_path, b"Hello File!").await.unwrap();
+ fs::copy(&source_path, &dest_path).await.unwrap();
+
+ let from = fs::read(&source_path).await.unwrap();
+ let to = fs::read(&dest_path).await.unwrap();
+
+ assert_eq!(from, to);
+}
+
+#[tokio::test]
+async fn copy_permissions() {
+ let dir = tempdir().unwrap();
+ let from_path = dir.path().join("foo.txt");
+ let to_path = dir.path().join("bar.txt");
+
+ let from = tokio::fs::File::create(&from_path).await.unwrap();
+ let mut from_perms = from.metadata().await.unwrap().permissions();
+ from_perms.set_readonly(true);
+ from.set_permissions(from_perms.clone()).await.unwrap();
+
+ tokio::fs::copy(from_path, &to_path).await.unwrap();
+
+ let to_perms = tokio::fs::metadata(to_path).await.unwrap().permissions();
+
+ assert_eq!(from_perms, to_perms);
+}
diff --git a/third_party/rust/tokio/tests/fs_dir.rs b/third_party/rust/tokio/tests/fs_dir.rs
new file mode 100644
index 0000000000..21efe8c0ee
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_dir.rs
@@ -0,0 +1,87 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+use tokio_test::{assert_err, assert_ok};
+
+use std::sync::{Arc, Mutex};
+use tempfile::tempdir;
+
+#[tokio::test]
+async fn create_dir() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo");
+ let new_dir_2 = new_dir.clone();
+
+ assert_ok!(fs::create_dir(new_dir).await);
+
+ assert!(new_dir_2.is_dir());
+}
+
+#[tokio::test]
+async fn create_all() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo").join("bar");
+ let new_dir_2 = new_dir.clone();
+
+ assert_ok!(fs::create_dir_all(new_dir).await);
+ assert!(new_dir_2.is_dir());
+}
+
+#[tokio::test]
+async fn build_dir() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo").join("bar");
+ let new_dir_2 = new_dir.clone();
+
+ assert_ok!(fs::DirBuilder::new().recursive(true).create(new_dir).await);
+
+ assert!(new_dir_2.is_dir());
+ assert_err!(
+ fs::DirBuilder::new()
+ .recursive(false)
+ .create(new_dir_2)
+ .await
+ );
+}
+
+#[tokio::test]
+async fn remove() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo");
+ let new_dir_2 = new_dir.clone();
+
+ std::fs::create_dir(new_dir.clone()).unwrap();
+
+ assert_ok!(fs::remove_dir(new_dir).await);
+ assert!(!new_dir_2.exists());
+}
+
+#[tokio::test]
+async fn read_inherent() {
+ let base_dir = tempdir().unwrap();
+
+ let p = base_dir.path();
+ std::fs::create_dir(p.join("aa")).unwrap();
+ std::fs::create_dir(p.join("bb")).unwrap();
+ std::fs::create_dir(p.join("cc")).unwrap();
+
+ let files = Arc::new(Mutex::new(Vec::new()));
+
+ let f = files.clone();
+ let p = p.to_path_buf();
+
+ let mut entries = fs::read_dir(p).await.unwrap();
+
+ while let Some(e) = assert_ok!(entries.next_entry().await) {
+ let s = e.file_name().to_str().unwrap().to_string();
+ f.lock().unwrap().push(s);
+ }
+
+ let mut files = files.lock().unwrap();
+ files.sort(); // because the order is not guaranteed
+ assert_eq!(
+ *files,
+ vec!["aa".to_string(), "bb".to_string(), "cc".to_string()]
+ );
+}
diff --git a/third_party/rust/tokio/tests/fs_file.rs b/third_party/rust/tokio/tests/fs_file.rs
new file mode 100644
index 0000000000..f645e61aed
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_file.rs
@@ -0,0 +1,112 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::prelude::*;
+use tempfile::NamedTempFile;
+use tokio::fs::File;
+use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom};
+use tokio_test::task;
+
+const HELLO: &[u8] = b"hello world...";
+
+#[tokio::test]
+async fn basic_read() {
+ let mut tempfile = tempfile();
+ tempfile.write_all(HELLO).unwrap();
+
+ let mut file = File::open(tempfile.path()).await.unwrap();
+
+ let mut buf = [0; 1024];
+ let n = file.read(&mut buf).await.unwrap();
+
+ assert_eq!(n, HELLO.len());
+ assert_eq!(&buf[..n], HELLO);
+}
+
+#[tokio::test]
+async fn basic_write() {
+ let tempfile = tempfile();
+
+ let mut file = File::create(tempfile.path()).await.unwrap();
+
+ file.write_all(HELLO).await.unwrap();
+ file.flush().await.unwrap();
+
+ let file = std::fs::read(tempfile.path()).unwrap();
+ assert_eq!(file, HELLO);
+}
+
+#[tokio::test]
+async fn basic_write_and_shutdown() {
+ let tempfile = tempfile();
+
+ let mut file = File::create(tempfile.path()).await.unwrap();
+
+ file.write_all(HELLO).await.unwrap();
+ file.shutdown().await.unwrap();
+
+ let file = std::fs::read(tempfile.path()).unwrap();
+ assert_eq!(file, HELLO);
+}
+
+#[tokio::test]
+async fn rewind_seek_position() {
+ let tempfile = tempfile();
+
+ let mut file = File::create(tempfile.path()).await.unwrap();
+
+ file.seek(SeekFrom::Current(10)).await.unwrap();
+
+ file.rewind().await.unwrap();
+
+ assert_eq!(file.stream_position().await.unwrap(), 0);
+}
+
+#[tokio::test]
+async fn coop() {
+ let mut tempfile = tempfile();
+ tempfile.write_all(HELLO).unwrap();
+
+ let mut task = task::spawn(async {
+ let mut file = File::open(tempfile.path()).await.unwrap();
+
+ let mut buf = [0; 1024];
+
+ loop {
+ file.read(&mut buf).await.unwrap();
+ file.seek(std::io::SeekFrom::Start(0)).await.unwrap();
+ }
+ });
+
+ for _ in 0..1_000 {
+ if task.poll().is_pending() {
+ return;
+ }
+ }
+
+ panic!("did not yield");
+}
+
+fn tempfile() -> NamedTempFile {
+ NamedTempFile::new().unwrap()
+}
+
+#[tokio::test]
+#[cfg(unix)]
+async fn unix_fd() {
+ use std::os::unix::io::AsRawFd;
+ let tempfile = tempfile();
+
+ let file = File::create(tempfile.path()).await.unwrap();
+ assert!(file.as_raw_fd() as u64 > 0);
+}
+
+#[tokio::test]
+#[cfg(windows)]
+async fn windows_handle() {
+ use std::os::windows::io::AsRawHandle;
+ let tempfile = tempfile();
+
+ let file = File::create(tempfile.path()).await.unwrap();
+ assert!(file.as_raw_handle() as u64 > 0);
+}
diff --git a/third_party/rust/tokio/tests/fs_link.rs b/third_party/rust/tokio/tests/fs_link.rs
new file mode 100644
index 0000000000..2ef666fb2f
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_link.rs
@@ -0,0 +1,68 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+
+use std::io::prelude::*;
+use std::io::BufReader;
+use tempfile::tempdir;
+
+#[tokio::test]
+async fn test_hard_link() {
+ let dir = tempdir().unwrap();
+ let src = dir.path().join("src.txt");
+ let dst = dir.path().join("dst.txt");
+
+ {
+ let mut file = std::fs::File::create(&src).unwrap();
+ file.write_all(b"hello").unwrap();
+ }
+
+ let dst_2 = dst.clone();
+
+ assert!(fs::hard_link(src, dst_2.clone()).await.is_ok());
+
+ let mut content = String::new();
+
+ {
+ let file = std::fs::File::open(dst).unwrap();
+ let mut reader = BufReader::new(file);
+ reader.read_to_string(&mut content).unwrap();
+ }
+
+ assert!(content == "hello");
+}
+
+#[cfg(unix)]
+#[tokio::test]
+async fn test_symlink() {
+ let dir = tempdir().unwrap();
+ let src = dir.path().join("src.txt");
+ let dst = dir.path().join("dst.txt");
+
+ {
+ let mut file = std::fs::File::create(&src).unwrap();
+ file.write_all(b"hello").unwrap();
+ }
+
+ let src_2 = src.clone();
+ let dst_2 = dst.clone();
+
+ assert!(fs::symlink(src_2.clone(), dst_2.clone()).await.is_ok());
+
+ let mut content = String::new();
+
+ {
+ let file = std::fs::File::open(dst.clone()).unwrap();
+ let mut reader = BufReader::new(file);
+ reader.read_to_string(&mut content).unwrap();
+ }
+
+ assert!(content == "hello");
+
+ let read = fs::read_link(dst.clone()).await.unwrap();
+ assert!(read == src);
+
+ let symlink_meta = fs::symlink_metadata(dst.clone()).await.unwrap();
+ assert!(symlink_meta.file_type().is_symlink());
+}
diff --git a/third_party/rust/tokio/tests/io_async_fd.rs b/third_party/rust/tokio/tests/io_async_fd.rs
new file mode 100644
index 0000000000..5a6875e3c2
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_async_fd.rs
@@ -0,0 +1,601 @@
+#![warn(rust_2018_idioms)]
+#![cfg(all(unix, feature = "full"))]
+
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+};
+use std::time::Duration;
+use std::{
+ future::Future,
+ io::{self, ErrorKind, Read, Write},
+ task::{Context, Waker},
+};
+
+use nix::unistd::{close, read, write};
+
+use futures::poll;
+
+use tokio::io::unix::{AsyncFd, AsyncFdReadyGuard};
+use tokio_test::{assert_err, assert_pending};
+
+struct TestWaker {
+ inner: Arc<TestWakerInner>,
+ waker: Waker,
+}
+
+#[derive(Default)]
+struct TestWakerInner {
+ awoken: AtomicBool,
+}
+
+impl futures::task::ArcWake for TestWakerInner {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.awoken.store(true, Ordering::SeqCst);
+ }
+}
+
+impl TestWaker {
+ fn new() -> Self {
+ let inner: Arc<TestWakerInner> = Default::default();
+
+ Self {
+ inner: inner.clone(),
+ waker: futures::task::waker(inner),
+ }
+ }
+
+ fn awoken(&self) -> bool {
+ self.inner.awoken.swap(false, Ordering::SeqCst)
+ }
+
+ fn context(&self) -> Context<'_> {
+ Context::from_waker(&self.waker)
+ }
+}
+
+#[derive(Debug)]
+struct FileDescriptor {
+ fd: RawFd,
+}
+
+impl AsRawFd for FileDescriptor {
+ fn as_raw_fd(&self) -> RawFd {
+ self.fd
+ }
+}
+
+impl Read for &FileDescriptor {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ read(self.fd, buf).map_err(io::Error::from)
+ }
+}
+
+impl Read for FileDescriptor {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (self as &Self).read(buf)
+ }
+}
+
+impl Write for &FileDescriptor {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ write(self.fd, buf).map_err(io::Error::from)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Write for FileDescriptor {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ (self as &Self).write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ (self as &Self).flush()
+ }
+}
+
+impl Drop for FileDescriptor {
+ fn drop(&mut self) {
+ let _ = close(self.fd);
+ }
+}
+
+fn set_nonblocking(fd: RawFd) {
+ use nix::fcntl::{OFlag, F_GETFL, F_SETFL};
+
+ let flags = nix::fcntl::fcntl(fd, F_GETFL).expect("fcntl(F_GETFD)");
+
+ if flags < 0 {
+ panic!(
+ "bad return value from fcntl(F_GETFL): {} ({:?})",
+ flags,
+ nix::Error::last()
+ );
+ }
+
+ let flags = OFlag::from_bits_truncate(flags) | OFlag::O_NONBLOCK;
+
+ nix::fcntl::fcntl(fd, F_SETFL(flags)).expect("fcntl(F_SETFD)");
+}
+
+fn socketpair() -> (FileDescriptor, FileDescriptor) {
+ use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
+
+ let (fd_a, fd_b) = socket::socketpair(
+ AddressFamily::Unix,
+ SockType::Stream,
+ None,
+ SockFlag::empty(),
+ )
+ .expect("socketpair");
+ let fds = (FileDescriptor { fd: fd_a }, FileDescriptor { fd: fd_b });
+
+ set_nonblocking(fds.0.fd);
+ set_nonblocking(fds.1.fd);
+
+ fds
+}
+
+fn drain(mut fd: &FileDescriptor) {
+ let mut buf = [0u8; 512];
+
+ loop {
+ match fd.read(&mut buf[..]) {
+ Err(e) if e.kind() == ErrorKind::WouldBlock => break,
+ Ok(0) => panic!("unexpected EOF"),
+ Err(e) => panic!("unexpected error: {:?}", e),
+ Ok(_) => continue,
+ }
+ }
+}
+
+#[tokio::test]
+async fn initially_writable() {
+ let (a, b) = socketpair();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+ let afd_b = AsyncFd::new(b).unwrap();
+
+ afd_a.writable().await.unwrap().clear_ready();
+ afd_b.writable().await.unwrap().clear_ready();
+
+ tokio::select! {
+ biased;
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {},
+ _ = afd_a.readable() => panic!("Unexpected readable state"),
+ _ = afd_b.readable() => panic!("Unexpected readable state"),
+ }
+}
+
+#[tokio::test]
+async fn reset_readable() {
+ let (a, mut b) = socketpair();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+
+ let readable = afd_a.readable();
+ tokio::pin!(readable);
+
+ tokio::select! {
+ _ = readable.as_mut() => panic!(),
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {}
+ }
+
+ b.write_all(b"0").unwrap();
+
+ let mut guard = readable.await.unwrap();
+
+ guard
+ .try_io(|_| afd_a.get_ref().read(&mut [0]))
+ .unwrap()
+ .unwrap();
+
+ // `a` is not readable, but the reactor still thinks it is
+ // (because we have not observed a not-ready error yet)
+ afd_a.readable().await.unwrap().retain_ready();
+
+ // Explicitly clear the ready state
+ guard.clear_ready();
+
+ let readable = afd_a.readable();
+ tokio::pin!(readable);
+
+ tokio::select! {
+ _ = readable.as_mut() => panic!(),
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {}
+ }
+
+ b.write_all(b"0").unwrap();
+
+ // We can observe the new readable event
+ afd_a.readable().await.unwrap().clear_ready();
+}
+
+#[tokio::test]
+async fn reset_writable() {
+ let (a, b) = socketpair();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+
+ let mut guard = afd_a.writable().await.unwrap();
+
+ // Write until we get a WouldBlock. This also clears the ready state.
+ while guard
+ .try_io(|_| afd_a.get_ref().write(&[0; 512][..]))
+ .is_ok()
+ {}
+
+ // Writable state should be cleared now.
+ let writable = afd_a.writable();
+ tokio::pin!(writable);
+
+ tokio::select! {
+ _ = writable.as_mut() => panic!(),
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {}
+ }
+
+ // Read from the other side; we should become writable now.
+ drain(&b);
+
+ let _ = writable.await.unwrap();
+}
+
+#[derive(Debug)]
+struct ArcFd<T>(Arc<T>);
+impl<T: AsRawFd> AsRawFd for ArcFd<T> {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+}
+
+#[tokio::test]
+async fn drop_closes() {
+ let (a, mut b) = socketpair();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+
+ assert_eq!(
+ ErrorKind::WouldBlock,
+ b.read(&mut [0]).err().unwrap().kind()
+ );
+
+ std::mem::drop(afd_a);
+
+ assert_eq!(0, b.read(&mut [0]).unwrap());
+
+ // into_inner does not close the fd
+
+ let (a, mut b) = socketpair();
+ let afd_a = AsyncFd::new(a).unwrap();
+ let _a: FileDescriptor = afd_a.into_inner();
+
+ assert_eq!(
+ ErrorKind::WouldBlock,
+ b.read(&mut [0]).err().unwrap().kind()
+ );
+
+ // Drop closure behavior is delegated to the inner object
+ let (a, mut b) = socketpair();
+ let arc_fd = Arc::new(a);
+ let afd_a = AsyncFd::new(ArcFd(arc_fd.clone())).unwrap();
+ std::mem::drop(afd_a);
+
+ assert_eq!(
+ ErrorKind::WouldBlock,
+ b.read(&mut [0]).err().unwrap().kind()
+ );
+
+ std::mem::drop(arc_fd); // suppress unnecessary clone clippy warning
+}
+
+#[tokio::test]
+async fn reregister() {
+ let (a, _b) = socketpair();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+ let a = afd_a.into_inner();
+ AsyncFd::new(a).unwrap();
+}
+
+#[tokio::test]
+async fn try_io() {
+ let (a, mut b) = socketpair();
+
+ b.write_all(b"0").unwrap();
+
+ let afd_a = AsyncFd::new(a).unwrap();
+
+ let mut guard = afd_a.readable().await.unwrap();
+
+ afd_a.get_ref().read_exact(&mut [0]).unwrap();
+
+ // Should not clear the readable state
+ let _ = guard.try_io(|_| Ok(()));
+
+ // Still readable...
+ let _ = afd_a.readable().await.unwrap();
+
+ // Should clear the readable state
+ let _ = guard.try_io(|_| io::Result::<()>::Err(ErrorKind::WouldBlock.into()));
+
+ // Assert not readable
+ let readable = afd_a.readable();
+ tokio::pin!(readable);
+
+ tokio::select! {
+ _ = readable.as_mut() => panic!(),
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {}
+ }
+
+ // Write something down b again and make sure we're reawoken
+ b.write_all(b"0").unwrap();
+ let _ = readable.await.unwrap();
+}
+
+#[tokio::test]
+async fn multiple_waiters() {
+ let (a, mut b) = socketpair();
+ let afd_a = Arc::new(AsyncFd::new(a).unwrap());
+
+ let barrier = Arc::new(tokio::sync::Barrier::new(11));
+
+ let mut tasks = Vec::new();
+ for _ in 0..10 {
+ let afd_a = afd_a.clone();
+ let barrier = barrier.clone();
+
+ let f = async move {
+ let notify_barrier = async {
+ barrier.wait().await;
+ futures::future::pending::<()>().await;
+ };
+
+ tokio::select! {
+ biased;
+ guard = afd_a.readable() => {
+ tokio::task::yield_now().await;
+ guard.unwrap().clear_ready()
+ },
+ _ = notify_barrier => unreachable!(),
+ }
+
+ std::mem::drop(afd_a);
+ };
+
+ tasks.push(tokio::spawn(f));
+ }
+
+ let mut all_tasks = futures::future::try_join_all(tasks);
+
+ tokio::select! {
+ r = std::pin::Pin::new(&mut all_tasks) => {
+ r.unwrap(); // propagate panic
+ panic!("Tasks exited unexpectedly")
+ },
+ _ = barrier.wait() => {}
+ };
+
+ b.write_all(b"0").unwrap();
+
+ all_tasks.await.unwrap();
+}
+
+#[tokio::test]
+async fn poll_fns() {
+ let (a, b) = socketpair();
+ let afd_a = Arc::new(AsyncFd::new(a).unwrap());
+ let afd_b = Arc::new(AsyncFd::new(b).unwrap());
+
+ // Fill up the write side of A
+ while afd_a.get_ref().write(&[0; 512]).is_ok() {}
+
+ let waker = TestWaker::new();
+
+ assert_pending!(afd_a.as_ref().poll_read_ready(&mut waker.context()));
+
+ let afd_a_2 = afd_a.clone();
+ let r_barrier = Arc::new(tokio::sync::Barrier::new(2));
+ let barrier_clone = r_barrier.clone();
+
+ let read_fut = tokio::spawn(async move {
+ // Move waker onto this task first
+ assert_pending!(poll!(futures::future::poll_fn(|cx| afd_a_2
+ .as_ref()
+ .poll_read_ready(cx))));
+ barrier_clone.wait().await;
+
+ let _ = futures::future::poll_fn(|cx| afd_a_2.as_ref().poll_read_ready(cx)).await;
+ });
+
+ let afd_a_2 = afd_a.clone();
+ let w_barrier = Arc::new(tokio::sync::Barrier::new(2));
+ let barrier_clone = w_barrier.clone();
+
+ let mut write_fut = tokio::spawn(async move {
+ // Move waker onto this task first
+ assert_pending!(poll!(futures::future::poll_fn(|cx| afd_a_2
+ .as_ref()
+ .poll_write_ready(cx))));
+ barrier_clone.wait().await;
+
+ let _ = futures::future::poll_fn(|cx| afd_a_2.as_ref().poll_write_ready(cx)).await;
+ });
+
+ r_barrier.wait().await;
+ w_barrier.wait().await;
+
+ let readable = afd_a.readable();
+ tokio::pin!(readable);
+
+ tokio::select! {
+ _ = &mut readable => unreachable!(),
+ _ = tokio::task::yield_now() => {}
+ }
+
+ // Make A readable. We expect that 'readable' and 'read_fut' will both complete quickly
+ afd_b.get_ref().write_all(b"0").unwrap();
+
+ let _ = tokio::join!(readable, read_fut);
+
+ // Our original waker should _not_ be awoken (poll_read_ready retains only the last context)
+ assert!(!waker.awoken());
+
+ // The writable side should not be awoken
+ tokio::select! {
+ _ = &mut write_fut => unreachable!(),
+ _ = tokio::time::sleep(Duration::from_millis(5)) => {}
+ }
+
+ // Make it writable now
+ drain(afd_b.get_ref());
+
+ // now we should be writable (ie - the waker for poll_write should still be registered after we wake the read side)
+ let _ = write_fut.await;
+}
+
+fn assert_pending<T: std::fmt::Debug, F: Future<Output = T>>(f: F) -> std::pin::Pin<Box<F>> {
+ let mut pinned = Box::pin(f);
+
+ assert_pending!(pinned
+ .as_mut()
+ .poll(&mut Context::from_waker(futures::task::noop_waker_ref())));
+
+ pinned
+}
+
+fn rt() -> tokio::runtime::Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+#[test]
+fn driver_shutdown_wakes_currently_pending() {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ let readable = assert_pending(afd_a.readable());
+
+ std::mem::drop(rt);
+
+ // The future was initialized **before** dropping the rt
+ assert_err!(futures::executor::block_on(readable));
+
+ // The future is initialized **after** dropping the rt.
+ assert_err!(futures::executor::block_on(afd_a.readable()));
+}
+
+#[test]
+fn driver_shutdown_wakes_future_pending() {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ std::mem::drop(rt);
+
+ assert_err!(futures::executor::block_on(afd_a.readable()));
+}
+
+#[test]
+fn driver_shutdown_wakes_pending_race() {
+ // TODO: make this a loom test
+ for _ in 0..100 {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ let _ = std::thread::spawn(move || std::mem::drop(rt));
+
+ // This may or may not return an error (but will be awoken)
+ let _ = futures::executor::block_on(afd_a.readable());
+
+ // However retrying will always return an error
+ assert_err!(futures::executor::block_on(afd_a.readable()));
+ }
+}
+
+async fn poll_readable<T: AsRawFd>(fd: &AsyncFd<T>) -> std::io::Result<AsyncFdReadyGuard<'_, T>> {
+ futures::future::poll_fn(|cx| fd.poll_read_ready(cx)).await
+}
+
+async fn poll_writable<T: AsRawFd>(fd: &AsyncFd<T>) -> std::io::Result<AsyncFdReadyGuard<'_, T>> {
+ futures::future::poll_fn(|cx| fd.poll_write_ready(cx)).await
+}
+
+#[test]
+fn driver_shutdown_wakes_currently_pending_polls() {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ while afd_a.get_ref().write(&[0; 512]).is_ok() {} // make not writable
+
+ let readable = assert_pending(poll_readable(&afd_a));
+ let writable = assert_pending(poll_writable(&afd_a));
+
+ std::mem::drop(rt);
+
+ // Attempting to poll readiness when the rt is dropped is an error
+ assert_err!(futures::executor::block_on(readable));
+ assert_err!(futures::executor::block_on(writable));
+}
+
+#[test]
+fn driver_shutdown_wakes_poll() {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ std::mem::drop(rt);
+
+ assert_err!(futures::executor::block_on(poll_readable(&afd_a)));
+ assert_err!(futures::executor::block_on(poll_writable(&afd_a)));
+}
+
+#[test]
+fn driver_shutdown_wakes_poll_race() {
+ // TODO: make this a loom test
+ for _ in 0..100 {
+ let rt = rt();
+
+ let (a, _b) = socketpair();
+ let afd_a = {
+ let _enter = rt.enter();
+ AsyncFd::new(a).unwrap()
+ };
+
+ while afd_a.get_ref().write(&[0; 512]).is_ok() {} // make not writable
+
+ let _ = std::thread::spawn(move || std::mem::drop(rt));
+
+ // The poll variants will always return an error in this case
+ assert_err!(futures::executor::block_on(poll_readable(&afd_a)));
+ assert_err!(futures::executor::block_on(poll_writable(&afd_a)));
+ }
+}
diff --git a/third_party/rust/tokio/tests/io_async_read.rs b/third_party/rust/tokio/tests/io_async_read.rs
new file mode 100644
index 0000000000..aaeadfa4c1
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_async_read.rs
@@ -0,0 +1,10 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncRead;
+
+#[test]
+fn assert_obj_safe() {
+ fn _assert<T>() {}
+ _assert::<Box<dyn AsyncRead>>();
+}
diff --git a/third_party/rust/tokio/tests/io_buf_reader.rs b/third_party/rust/tokio/tests/io_buf_reader.rs
new file mode 100644
index 0000000000..0d3f6bafc2
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_buf_reader.rs
@@ -0,0 +1,379 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// https://github.com/rust-lang/futures-rs/blob/1803948ff091b4eabf7f3bf39e16bbbdefca5cc8/futures/tests/io_buf_reader.rs
+
+use futures::task::{noop_waker_ref, Context, Poll};
+use std::cmp;
+use std::io::{self, Cursor};
+use std::pin::Pin;
+use tokio::io::{
+ AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt,
+ BufReader, ReadBuf, SeekFrom,
+};
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+macro_rules! run_fill_buf {
+ ($reader:expr) => {{
+ let mut cx = Context::from_waker(noop_waker_ref());
+ loop {
+ if let Poll::Ready(x) = Pin::new(&mut $reader).poll_fill_buf(&mut cx) {
+ break x;
+ }
+ }
+ }};
+}
+
+struct MaybePending<'a> {
+ inner: &'a [u8],
+ ready_read: bool,
+ ready_fill_buf: bool,
+}
+
+impl<'a> MaybePending<'a> {
+ fn new(inner: &'a [u8]) -> Self {
+ Self {
+ inner,
+ ready_read: false,
+ ready_fill_buf: false,
+ }
+ }
+}
+
+impl AsyncRead for MaybePending<'_> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ if self.ready_read {
+ self.ready_read = false;
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ } else {
+ self.ready_read = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+}
+
+impl AsyncBufRead for MaybePending<'_> {
+ fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ if self.ready_fill_buf {
+ self.ready_fill_buf = false;
+ if self.inner.is_empty() {
+ return Poll::Ready(Ok(&[]));
+ }
+ let len = cmp::min(2, self.inner.len());
+ Poll::Ready(Ok(&self.inner[0..len]))
+ } else {
+ self.ready_fill_buf = true;
+ Poll::Pending
+ }
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ self.inner = &self.inner[amt..];
+ }
+}
+
+#[tokio::test]
+async fn test_buffered_reader() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, inner);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 3);
+ assert_eq!(buf, [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 2);
+ assert_eq!(buf, [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [3, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [4, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+}
+
+#[tokio::test]
+async fn test_buffered_reader_seek() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, Cursor::new(inner));
+
+ assert_eq!(reader.seek(SeekFrom::Start(3)).await.unwrap(), 3);
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
+ assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err());
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
+ assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4);
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[1, 2][..]);
+ Pin::new(&mut reader).consume(1);
+ assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3);
+}
+
+#[tokio::test]
+async fn test_buffered_reader_seek_underflow() {
+ // gimmick reader that yields its position modulo 256 for each byte
+ struct PositionReader {
+ pos: u64,
+ }
+ impl AsyncRead for PositionReader {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ let b = buf.initialize_unfilled();
+ let len = b.len();
+ for x in b {
+ *x = self.pos as u8;
+ self.pos = self.pos.wrapping_add(1);
+ }
+ buf.advance(len);
+ Poll::Ready(Ok(()))
+ }
+ }
+ impl AsyncSeek for PositionReader {
+ fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
+ match pos {
+ SeekFrom::Start(n) => {
+ self.pos = n;
+ }
+ SeekFrom::Current(n) => {
+ self.pos = self.pos.wrapping_add(n as u64);
+ }
+ SeekFrom::End(n) => {
+ self.pos = u64::MAX.wrapping_add(n as u64);
+ }
+ }
+ Ok(())
+ }
+ fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ Poll::Ready(Ok(self.pos))
+ }
+ }
+
+ let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1, 2, 3, 4][..]);
+ assert_eq!(reader.seek(SeekFrom::End(-5)).await.unwrap(), u64::MAX - 5);
+ assert_eq!(run_fill_buf!(reader).unwrap().len(), 5);
+ // the following seek will require two underlying seeks
+ let expected = 9_223_372_036_854_775_802;
+ assert_eq!(
+ reader.seek(SeekFrom::Current(i64::MIN)).await.unwrap(),
+ expected
+ );
+ assert_eq!(run_fill_buf!(reader).unwrap().len(), 5);
+ // seeking to 0 should empty the buffer.
+ assert_eq!(reader.seek(SeekFrom::Current(0)).await.unwrap(), expected);
+ assert_eq!(reader.get_ref().pos, expected);
+}
+
+#[tokio::test]
+async fn test_short_reads() {
+ /// A dummy reader intended at testing short-reads propagation.
+ struct ShortReader {
+ lengths: Vec<usize>,
+ }
+
+ impl AsyncRead for ShortReader {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ if !self.lengths.is_empty() {
+ buf.advance(self.lengths.remove(0));
+ }
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ let inner = ShortReader {
+ lengths: vec![0, 1, 2, 0, 1, 0],
+ };
+ let mut reader = BufReader::new(inner);
+ let mut buf = [0, 0];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 2);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+}
+
+#[tokio::test]
+async fn maybe_pending() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, MaybePending::new(inner));
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 3);
+ assert_eq!(buf, [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 2);
+ assert_eq!(buf, [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [3, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [4, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+}
+
+#[tokio::test]
+async fn maybe_pending_buf_read() {
+ let inner = MaybePending::new(&[0, 1, 2, 3, 1, 0]);
+ let mut reader = BufReader::with_capacity(2, inner);
+ let mut v = Vec::new();
+ reader.read_until(3, &mut v).await.unwrap();
+ assert_eq!(v, [0, 1, 2, 3]);
+ v.clear();
+ reader.read_until(1, &mut v).await.unwrap();
+ assert_eq!(v, [1]);
+ v.clear();
+ reader.read_until(8, &mut v).await.unwrap();
+ assert_eq!(v, [0]);
+ v.clear();
+ reader.read_until(9, &mut v).await.unwrap();
+ assert_eq!(v, []);
+}
+
+// https://github.com/rust-lang/futures-rs/pull/1573#discussion_r281162309
+#[tokio::test]
+async fn maybe_pending_seek() {
+ struct MaybePendingSeek<'a> {
+ inner: Cursor<&'a [u8]>,
+ ready: bool,
+ seek_res: Option<io::Result<()>>,
+ }
+
+ impl<'a> MaybePendingSeek<'a> {
+ fn new(inner: &'a [u8]) -> Self {
+ Self {
+ inner: Cursor::new(inner),
+ ready: true,
+ seek_res: None,
+ }
+ }
+ }
+
+ impl AsyncRead for MaybePendingSeek<'_> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ }
+ }
+
+ impl AsyncBufRead for MaybePendingSeek<'_> {
+ fn poll_fill_buf(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<&[u8]>> {
+ let this: *mut Self = &mut *self as *mut _;
+ Pin::new(&mut unsafe { &mut *this }.inner).poll_fill_buf(cx)
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ Pin::new(&mut self.inner).consume(amt)
+ }
+ }
+
+ impl AsyncSeek for MaybePendingSeek<'_> {
+ fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
+ self.seek_res = Some(Pin::new(&mut self.inner).start_seek(pos));
+ Ok(())
+ }
+ fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ if self.ready {
+ self.ready = false;
+ self.seek_res.take().unwrap_or(Ok(()))?;
+ Pin::new(&mut self.inner).poll_complete(cx)
+ } else {
+ self.ready = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ }
+
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, MaybePendingSeek::new(inner));
+
+ assert_eq!(reader.seek(SeekFrom::Current(3)).await.unwrap(), 3);
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
+ assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err());
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
+ assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4);
+ assert_eq!(run_fill_buf!(reader).unwrap(), &[1, 2][..]);
+ Pin::new(&mut reader).consume(1);
+ assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3);
+}
+
+// This tests the AsyncBufReadExt::fill_buf wrapper.
+#[tokio::test]
+async fn test_fill_buf_wrapper() {
+ let (mut write, read) = tokio::io::duplex(16);
+
+ let mut read = BufReader::new(read);
+ write.write_all(b"hello world").await.unwrap();
+
+ assert_eq!(read.fill_buf().await.unwrap(), b"hello world");
+ read.consume(b"hello ".len());
+ assert_eq!(read.fill_buf().await.unwrap(), b"world");
+ assert_eq!(read.fill_buf().await.unwrap(), b"world");
+ read.consume(b"world".len());
+
+ let mut fill = spawn(read.fill_buf());
+ assert_pending!(fill.poll());
+
+ write.write_all(b"foo bar").await.unwrap();
+ assert_eq!(assert_ready!(fill.poll()).unwrap(), b"foo bar");
+ drop(fill);
+
+ drop(write);
+ assert_eq!(read.fill_buf().await.unwrap(), b"foo bar");
+ read.consume(b"foo bar".len());
+ assert_eq!(read.fill_buf().await.unwrap(), b"");
+}
diff --git a/third_party/rust/tokio/tests/io_buf_writer.rs b/third_party/rust/tokio/tests/io_buf_writer.rs
new file mode 100644
index 0000000000..47a0d466f4
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_buf_writer.rs
@@ -0,0 +1,537 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// https://github.com/rust-lang/futures-rs/blob/1803948ff091b4eabf7f3bf39e16bbbdefca5cc8/futures/tests/io_buf_writer.rs
+
+use futures::task::{Context, Poll};
+use std::io::{self, Cursor};
+use std::pin::Pin;
+use tokio::io::{AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufWriter, SeekFrom};
+
+use futures::future;
+use tokio_test::assert_ok;
+
+use std::cmp;
+use std::io::IoSlice;
+
+mod support {
+ pub(crate) mod io_vec;
+}
+use support::io_vec::IoBufs;
+
+struct MaybePending {
+ inner: Vec<u8>,
+ ready: bool,
+}
+
+impl MaybePending {
+ fn new(inner: Vec<u8>) -> Self {
+ Self {
+ inner,
+ ready: false,
+ }
+ }
+}
+
+impl AsyncWrite for MaybePending {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.ready {
+ self.ready = false;
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ } else {
+ self.ready = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_shutdown(cx)
+ }
+}
+
+async fn write_vectored<W>(writer: &mut W, bufs: &[IoSlice<'_>]) -> io::Result<usize>
+where
+ W: AsyncWrite + Unpin,
+{
+ let mut writer = Pin::new(writer);
+ future::poll_fn(|cx| writer.as_mut().poll_write_vectored(cx, bufs)).await
+}
+
+#[tokio::test]
+async fn buf_writer() {
+ let mut writer = BufWriter::with_capacity(2, Vec::new());
+
+ writer.write(&[0, 1]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.write(&[2]).await.unwrap();
+ assert_eq!(writer.buffer(), [2]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.write(&[3]).await.unwrap();
+ assert_eq!(writer.buffer(), [2, 3]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ writer.flush().await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ writer.write(&[4]).await.unwrap();
+ writer.write(&[5]).await.unwrap();
+ assert_eq!(writer.buffer(), [4, 5]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ writer.write(&[6]).await.unwrap();
+ assert_eq!(writer.buffer(), [6]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
+
+ writer.write(&[7, 8]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ writer.write(&[9, 10, 11]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+
+ writer.flush().await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+}
+
+#[tokio::test]
+async fn buf_writer_inner_flushes() {
+ let mut w = BufWriter::with_capacity(3, Vec::new());
+ w.write(&[0, 1]).await.unwrap();
+ assert_eq!(*w.get_ref(), []);
+ w.flush().await.unwrap();
+ let w = w.into_inner();
+ assert_eq!(w, [0, 1]);
+}
+
+#[tokio::test]
+async fn buf_writer_seek() {
+ let mut w = BufWriter::with_capacity(3, Cursor::new(Vec::new()));
+ w.write_all(&[0, 1, 2, 3, 4, 5]).await.unwrap();
+ w.write_all(&[6, 7]).await.unwrap();
+ assert_eq!(w.seek(SeekFrom::Current(0)).await.unwrap(), 8);
+ assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
+ assert_eq!(w.seek(SeekFrom::Start(2)).await.unwrap(), 2);
+ w.write_all(&[8, 9]).await.unwrap();
+ w.flush().await.unwrap();
+ assert_eq!(&w.into_inner().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
+}
+
+#[tokio::test]
+async fn maybe_pending_buf_writer() {
+ let mut writer = BufWriter::with_capacity(2, MaybePending::new(Vec::new()));
+
+ writer.write(&[0, 1]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ writer.write(&[2]).await.unwrap();
+ assert_eq!(writer.buffer(), [2]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ writer.write(&[3]).await.unwrap();
+ assert_eq!(writer.buffer(), [2, 3]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ writer.flush().await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
+
+ writer.write(&[4]).await.unwrap();
+ writer.write(&[5]).await.unwrap();
+ assert_eq!(writer.buffer(), [4, 5]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
+
+ writer.write(&[6]).await.unwrap();
+ assert_eq!(writer.buffer(), [6]);
+ assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5]);
+
+ writer.write(&[7, 8]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ writer.write(&[9, 10, 11]).await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(
+ writer.get_ref().inner,
+ &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
+ );
+
+ writer.flush().await.unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(
+ &writer.get_ref().inner,
+ &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
+ );
+}
+
+#[tokio::test]
+async fn maybe_pending_buf_writer_inner_flushes() {
+ let mut w = BufWriter::with_capacity(3, MaybePending::new(Vec::new()));
+ w.write(&[0, 1]).await.unwrap();
+ assert_eq!(&w.get_ref().inner, &[]);
+ w.flush().await.unwrap();
+ let w = w.into_inner().inner;
+ assert_eq!(w, [0, 1]);
+}
+
+#[tokio::test]
+async fn maybe_pending_buf_writer_seek() {
+ struct MaybePendingSeek {
+ inner: Cursor<Vec<u8>>,
+ ready_write: bool,
+ ready_seek: bool,
+ seek_res: Option<io::Result<()>>,
+ }
+
+ impl MaybePendingSeek {
+ fn new(inner: Vec<u8>) -> Self {
+ Self {
+ inner: Cursor::new(inner),
+ ready_write: false,
+ ready_seek: false,
+ seek_res: None,
+ }
+ }
+ }
+
+ impl AsyncWrite for MaybePendingSeek {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.ready_write {
+ self.ready_write = false;
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ } else {
+ self.ready_write = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_shutdown(cx)
+ }
+ }
+
+ impl AsyncSeek for MaybePendingSeek {
+ fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
+ self.seek_res = Some(Pin::new(&mut self.inner).start_seek(pos));
+ Ok(())
+ }
+ fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ if self.ready_seek {
+ self.ready_seek = false;
+ self.seek_res.take().unwrap_or(Ok(()))?;
+ Pin::new(&mut self.inner).poll_complete(cx)
+ } else {
+ self.ready_seek = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ }
+
+ let mut w = BufWriter::with_capacity(3, MaybePendingSeek::new(Vec::new()));
+ w.write_all(&[0, 1, 2, 3, 4, 5]).await.unwrap();
+ w.write_all(&[6, 7]).await.unwrap();
+ assert_eq!(w.seek(SeekFrom::Current(0)).await.unwrap(), 8);
+ assert_eq!(
+ &w.get_ref().inner.get_ref()[..],
+ &[0, 1, 2, 3, 4, 5, 6, 7][..]
+ );
+ assert_eq!(w.seek(SeekFrom::Start(2)).await.unwrap(), 2);
+ w.write_all(&[8, 9]).await.unwrap();
+ w.flush().await.unwrap();
+ assert_eq!(
+ &w.into_inner().inner.into_inner()[..],
+ &[0, 1, 8, 9, 4, 5, 6, 7]
+ );
+}
+
+struct MockWriter {
+ data: Vec<u8>,
+ write_len: usize,
+ vectored: bool,
+}
+
+impl MockWriter {
+ fn new(write_len: usize) -> Self {
+ MockWriter {
+ data: Vec::new(),
+ write_len,
+ vectored: false,
+ }
+ }
+
+ fn vectored(write_len: usize) -> Self {
+ MockWriter {
+ data: Vec::new(),
+ write_len,
+ vectored: true,
+ }
+ }
+
+ fn write_up_to(&mut self, buf: &[u8], limit: usize) -> usize {
+ let len = cmp::min(buf.len(), limit);
+ self.data.extend_from_slice(&buf[..len]);
+ len
+ }
+}
+
+impl AsyncWrite for MockWriter {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ let this = self.get_mut();
+ let n = this.write_up_to(buf, this.write_len);
+ Ok(n).into()
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize, io::Error>> {
+ let this = self.get_mut();
+ let mut total_written = 0;
+ for buf in bufs {
+ let n = this.write_up_to(buf, this.write_len - total_written);
+ total_written += n;
+ if total_written == this.write_len {
+ break;
+ }
+ }
+ Ok(total_written).into()
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ self.vectored
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Ok(()).into()
+ }
+}
+
+#[tokio::test]
+async fn write_vectored_empty_on_non_vectored() {
+ let mut w = BufWriter::new(MockWriter::new(4));
+ let n = assert_ok!(write_vectored(&mut w, &[]).await);
+ assert_eq!(n, 0);
+
+ let io_vec = [IoSlice::new(&[]); 3];
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 0);
+
+ assert_ok!(w.flush().await);
+ assert!(w.get_ref().data.is_empty());
+}
+
+#[tokio::test]
+async fn write_vectored_empty_on_vectored() {
+ let mut w = BufWriter::new(MockWriter::vectored(4));
+ let n = assert_ok!(write_vectored(&mut w, &[]).await);
+ assert_eq!(n, 0);
+
+ let io_vec = [IoSlice::new(&[]); 3];
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 0);
+
+ assert_ok!(w.flush().await);
+ assert!(w.get_ref().data.is_empty());
+}
+
+#[tokio::test]
+async fn write_vectored_basic_on_non_vectored() {
+ let msg = b"foo bar baz";
+ let bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&msg[4..8]),
+ IoSlice::new(&msg[8..]),
+ ];
+ let mut w = BufWriter::new(MockWriter::new(4));
+ let n = assert_ok!(write_vectored(&mut w, &bufs).await);
+ assert_eq!(n, msg.len());
+ assert!(w.buffer() == &msg[..]);
+ assert_ok!(w.flush().await);
+ assert_eq!(w.get_ref().data, msg);
+}
+
+#[tokio::test]
+async fn write_vectored_basic_on_vectored() {
+ let msg = b"foo bar baz";
+ let bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&msg[4..8]),
+ IoSlice::new(&msg[8..]),
+ ];
+ let mut w = BufWriter::new(MockWriter::vectored(4));
+ let n = assert_ok!(write_vectored(&mut w, &bufs).await);
+ assert_eq!(n, msg.len());
+ assert!(w.buffer() == &msg[..]);
+ assert_ok!(w.flush().await);
+ assert_eq!(w.get_ref().data, msg);
+}
+
+#[tokio::test]
+async fn write_vectored_large_total_on_non_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&msg[4..8]),
+ IoSlice::new(&msg[8..]),
+ ];
+ let io_vec = IoBufs::new(&mut bufs);
+ let mut w = BufWriter::with_capacity(8, MockWriter::new(4));
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 8);
+ assert!(w.buffer() == &msg[..8]);
+ let io_vec = io_vec.advance(n);
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 3);
+ assert!(w.get_ref().data.as_slice() == &msg[..8]);
+ assert!(w.buffer() == &msg[8..]);
+}
+
+#[tokio::test]
+async fn write_vectored_large_total_on_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&msg[4..8]),
+ IoSlice::new(&msg[8..]),
+ ];
+ let io_vec = IoBufs::new(&mut bufs);
+ let mut w = BufWriter::with_capacity(8, MockWriter::vectored(10));
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 10);
+ assert!(w.buffer().is_empty());
+ let io_vec = io_vec.advance(n);
+ let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
+ assert_eq!(n, 1);
+ assert!(w.get_ref().data.as_slice() == &msg[..10]);
+ assert!(w.buffer() == &msg[10..]);
+}
+
+struct VectoredWriteHarness {
+ writer: BufWriter<MockWriter>,
+ buf_capacity: usize,
+}
+
+impl VectoredWriteHarness {
+ fn new(buf_capacity: usize) -> Self {
+ VectoredWriteHarness {
+ writer: BufWriter::with_capacity(buf_capacity, MockWriter::new(4)),
+ buf_capacity,
+ }
+ }
+
+ fn with_vectored_backend(buf_capacity: usize) -> Self {
+ VectoredWriteHarness {
+ writer: BufWriter::with_capacity(buf_capacity, MockWriter::vectored(4)),
+ buf_capacity,
+ }
+ }
+
+ async fn write_all<'a, 'b>(&mut self, mut io_vec: IoBufs<'a, 'b>) -> usize {
+ let mut total_written = 0;
+ while !io_vec.is_empty() {
+ let n = assert_ok!(write_vectored(&mut self.writer, &io_vec).await);
+ assert!(n != 0);
+ assert!(self.writer.buffer().len() <= self.buf_capacity);
+ total_written += n;
+ io_vec = io_vec.advance(n);
+ }
+ total_written
+ }
+
+ async fn flush(&mut self) -> &[u8] {
+ assert_ok!(self.writer.flush().await);
+ &self.writer.get_ref().data
+ }
+}
+
+#[tokio::test]
+async fn write_vectored_odd_on_non_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&[]),
+ IoSlice::new(&msg[4..9]),
+ IoSlice::new(&msg[9..]),
+ ];
+ let mut h = VectoredWriteHarness::new(8);
+ let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
+ assert_eq!(bytes_written, msg.len());
+ assert_eq!(h.flush().await, msg);
+}
+
+#[tokio::test]
+async fn write_vectored_odd_on_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&msg[0..4]),
+ IoSlice::new(&[]),
+ IoSlice::new(&msg[4..9]),
+ IoSlice::new(&msg[9..]),
+ ];
+ let mut h = VectoredWriteHarness::with_vectored_backend(8);
+ let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
+ assert_eq!(bytes_written, msg.len());
+ assert_eq!(h.flush().await, msg);
+}
+
+#[tokio::test]
+async fn write_vectored_large_slice_on_non_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&[]),
+ IoSlice::new(&msg[..9]),
+ IoSlice::new(&msg[9..]),
+ ];
+ let mut h = VectoredWriteHarness::new(8);
+ let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
+ assert_eq!(bytes_written, msg.len());
+ assert_eq!(h.flush().await, msg);
+}
+
+#[tokio::test]
+async fn write_vectored_large_slice_on_vectored() {
+ let msg = b"foo bar baz";
+ let mut bufs = [
+ IoSlice::new(&[]),
+ IoSlice::new(&msg[..9]),
+ IoSlice::new(&msg[9..]),
+ ];
+ let mut h = VectoredWriteHarness::with_vectored_backend(8);
+ let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
+ assert_eq!(bytes_written, msg.len());
+ assert_eq!(h.flush().await, msg);
+}
diff --git a/third_party/rust/tokio/tests/io_chain.rs b/third_party/rust/tokio/tests/io_chain.rs
new file mode 100644
index 0000000000..e2d59411a1
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_chain.rs
@@ -0,0 +1,16 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn chain() {
+ let mut buf = Vec::new();
+ let rd1: &[u8] = b"hello ";
+ let rd2: &[u8] = b"world";
+
+ let mut rd = rd1.chain(rd2);
+ assert_ok!(rd.read_to_end(&mut buf).await);
+ assert_eq!(buf, b"hello world");
+}
diff --git a/third_party/rust/tokio/tests/io_copy.rs b/third_party/rust/tokio/tests/io_copy.rs
new file mode 100644
index 0000000000..005e170119
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_copy.rs
@@ -0,0 +1,87 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use bytes::BytesMut;
+use futures::ready;
+use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
+use tokio_test::assert_ok;
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn copy() {
+ struct Rd(bool);
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ if self.0 {
+ buf.put_slice(b"hello world");
+ self.0 = false;
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+
+ let mut rd = Rd(true);
+ let mut wr = Vec::new();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, 11);
+ assert_eq!(wr, b"hello world");
+}
+
+#[tokio::test]
+async fn proxy() {
+ struct BufferedWd {
+ buf: BytesMut,
+ writer: io::DuplexStream,
+ }
+
+ impl AsyncWrite for BufferedWd {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_mut().buf.extend_from_slice(buf);
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let this = self.get_mut();
+
+ while !this.buf.is_empty() {
+ let n = ready!(Pin::new(&mut this.writer).poll_write(cx, &this.buf))?;
+ let _ = this.buf.split_to(n);
+ }
+
+ Pin::new(&mut this.writer).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.writer).poll_shutdown(cx)
+ }
+ }
+
+ let (rd, wd) = io::duplex(1024);
+ let mut rd = rd.take(1024);
+ let mut wd = BufferedWd {
+ buf: BytesMut::new(),
+ writer: wd,
+ };
+
+ // write start bytes
+ assert_ok!(wd.write_all(&[0x42; 512]).await);
+ assert_ok!(wd.flush().await);
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wd).await);
+
+ assert_eq!(n, 1024);
+}
diff --git a/third_party/rust/tokio/tests/io_copy_bidirectional.rs b/third_party/rust/tokio/tests/io_copy_bidirectional.rs
new file mode 100644
index 0000000000..0e82b29ea6
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_copy_bidirectional.rs
@@ -0,0 +1,128 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::time::Duration;
+use tokio::io::{self, copy_bidirectional, AsyncReadExt, AsyncWriteExt};
+use tokio::net::TcpStream;
+use tokio::task::JoinHandle;
+
+async fn make_socketpair() -> (TcpStream, TcpStream) {
+ let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+ let connector = TcpStream::connect(addr);
+ let acceptor = listener.accept();
+
+ let (c1, c2) = tokio::join!(connector, acceptor);
+
+ (c1.unwrap(), c2.unwrap().0)
+}
+
+async fn block_write(s: &mut TcpStream) -> usize {
+ static BUF: [u8; 2048] = [0; 2048];
+
+ let mut copied = 0;
+ loop {
+ tokio::select! {
+ result = s.write(&BUF) => {
+ copied += result.expect("write error")
+ },
+ _ = tokio::time::sleep(Duration::from_millis(10)) => {
+ break;
+ }
+ }
+ }
+
+ copied
+}
+
+async fn symmetric<F, Fut>(mut cb: F)
+where
+ F: FnMut(JoinHandle<io::Result<(u64, u64)>>, TcpStream, TcpStream) -> Fut,
+ Fut: std::future::Future<Output = ()>,
+{
+ // We run the test twice, with streams passed to copy_bidirectional in
+ // different orders, in order to ensure that the two arguments are
+ // interchangeable.
+
+ let (a, mut a1) = make_socketpair().await;
+ let (b, mut b1) = make_socketpair().await;
+
+ let handle = tokio::spawn(async move { copy_bidirectional(&mut a1, &mut b1).await });
+ cb(handle, a, b).await;
+
+ let (a, mut a1) = make_socketpair().await;
+ let (b, mut b1) = make_socketpair().await;
+
+ let handle = tokio::spawn(async move { copy_bidirectional(&mut b1, &mut a1).await });
+
+ cb(handle, b, a).await;
+}
+
+#[tokio::test]
+async fn test_basic_transfer() {
+ symmetric(|_handle, mut a, mut b| async move {
+ a.write_all(b"test").await.unwrap();
+ let mut tmp = [0; 4];
+ b.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"test");
+ })
+ .await
+}
+
+#[tokio::test]
+async fn test_transfer_after_close() {
+ symmetric(|handle, mut a, mut b| async move {
+ AsyncWriteExt::shutdown(&mut a).await.unwrap();
+ b.read_to_end(&mut Vec::new()).await.unwrap();
+
+ b.write_all(b"quux").await.unwrap();
+ let mut tmp = [0; 4];
+ a.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"quux");
+
+ // Once both are closed, we should have our handle back
+ drop(b);
+
+ assert_eq!(handle.await.unwrap().unwrap(), (0, 4));
+ })
+ .await
+}
+
+#[tokio::test]
+async fn blocking_one_side_does_not_block_other() {
+ symmetric(|handle, mut a, mut b| async move {
+ block_write(&mut a).await;
+
+ b.write_all(b"quux").await.unwrap();
+ let mut tmp = [0; 4];
+ a.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"quux");
+
+ AsyncWriteExt::shutdown(&mut a).await.unwrap();
+
+ let mut buf = Vec::new();
+ b.read_to_end(&mut buf).await.unwrap();
+
+ drop(b);
+
+ assert_eq!(handle.await.unwrap().unwrap(), (buf.len() as u64, 4));
+ })
+ .await
+}
+
+#[tokio::test]
+async fn immediate_exit_on_error() {
+ symmetric(|handle, mut a, mut b| async move {
+ block_write(&mut a).await;
+
+ // Fill up the b->copy->a path. We expect that this will _not_ drain
+ // before we exit the copy task.
+ let _bytes_written = block_write(&mut b).await;
+
+ // Drop b. We should not wait for a to consume the data buffered in the
+ // copy loop, since b will be failing writes.
+ drop(b);
+ assert!(handle.await.unwrap().is_err());
+ })
+ .await
+}
diff --git a/third_party/rust/tokio/tests/io_driver.rs b/third_party/rust/tokio/tests/io_driver.rs
new file mode 100644
index 0000000000..6fb566de58
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_driver.rs
@@ -0,0 +1,99 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio::runtime;
+use tokio_test::{assert_ok, assert_pending};
+
+use futures::task::{waker_ref, ArcWake};
+use std::future::Future;
+use std::net::TcpStream;
+use std::pin::Pin;
+use std::sync::{mpsc, Arc, Mutex};
+use std::task::Context;
+
+struct Task<T> {
+ future: Mutex<Pin<Box<T>>>,
+}
+
+impl<T: Send> ArcWake for Task<T> {
+ fn wake_by_ref(_: &Arc<Self>) {
+ // Do nothing...
+ }
+}
+
+impl<T> Task<T> {
+ fn new(future: T) -> Task<T> {
+ Task {
+ future: Mutex::new(Box::pin(future)),
+ }
+ }
+}
+
+#[test]
+fn test_drop_on_notify() {
+ // When the reactor receives a kernel notification, it notifies the
+ // task that holds the associated socket. If this notification results in
+ // the task being dropped, the socket will also be dropped.
+ //
+ // Previously, there was a deadlock scenario where the reactor, while
+ // notifying, held a lock and the task being dropped attempted to acquire
+ // that same lock in order to clean up state.
+ //
+ // To simulate this case, we create a fake executor that does nothing when
+ // the task is notified. This simulates an executor in the process of
+ // shutting down. Then, when the task handle is dropped, the task itself is
+ // dropped.
+
+ let rt = runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap();
+
+ let (addr_tx, addr_rx) = mpsc::channel();
+
+ // Define a task that just drains the listener
+ let task = Arc::new(Task::new(async move {
+ // Create a listener
+ let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Send the address
+ let addr = listener.local_addr().unwrap();
+ addr_tx.send(addr).unwrap();
+
+ loop {
+ let _ = listener.accept().await;
+ }
+ }));
+
+ {
+ let _enter = rt.enter();
+ let waker = waker_ref(&task);
+ let mut cx = Context::from_waker(&waker);
+ assert_pending!(task.future.lock().unwrap().as_mut().poll(&mut cx));
+ }
+
+ // Get the address
+ let addr = addr_rx.recv().unwrap();
+
+ drop(task);
+
+ // Establish a connection to the acceptor
+ let _s = TcpStream::connect(&addr).unwrap();
+
+ // Force the reactor to turn
+ rt.block_on(async {});
+}
+
+#[test]
+#[should_panic(
+ expected = "A Tokio 1.x context was found, but IO is disabled. Call `enable_io` on the runtime builder to enable IO."
+)]
+fn panics_when_io_disabled() {
+ let rt = runtime::Builder::new_current_thread().build().unwrap();
+
+ rt.block_on(async {
+ let _ =
+ tokio::net::TcpListener::from_std(std::net::TcpListener::bind("127.0.0.1:0").unwrap());
+ });
+}
diff --git a/third_party/rust/tokio/tests/io_driver_drop.rs b/third_party/rust/tokio/tests/io_driver_drop.rs
new file mode 100644
index 0000000000..631e66e9fb
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_driver_drop.rs
@@ -0,0 +1,54 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio::runtime;
+use tokio_test::{assert_err, assert_pending, assert_ready, task};
+
+#[test]
+fn tcp_doesnt_block() {
+ let rt = rt();
+
+ let listener = {
+ let _enter = rt.enter();
+ let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
+ TcpListener::from_std(listener).unwrap()
+ };
+
+ drop(rt);
+
+ let mut task = task::spawn(async move {
+ assert_err!(listener.accept().await);
+ });
+
+ assert_ready!(task.poll());
+}
+
+#[test]
+fn drop_wakes() {
+ let rt = rt();
+
+ let listener = {
+ let _enter = rt.enter();
+ let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
+ TcpListener::from_std(listener).unwrap()
+ };
+
+ let mut task = task::spawn(async move {
+ assert_err!(listener.accept().await);
+ });
+
+ assert_pending!(task.poll());
+
+ drop(rt);
+
+ assert!(task.is_woken());
+ assert_ready!(task.poll());
+}
+
+fn rt() -> runtime::Runtime {
+ runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/io_fill_buf.rs b/third_party/rust/tokio/tests/io_fill_buf.rs
new file mode 100644
index 0000000000..0b2ebd78fc
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_fill_buf.rs
@@ -0,0 +1,34 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tempfile::NamedTempFile;
+use tokio::fs::File;
+use tokio::io::{AsyncBufReadExt, BufReader};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn fill_buf_file() {
+ let file = NamedTempFile::new().unwrap();
+
+ assert_ok!(std::fs::write(file.path(), b"hello"));
+
+ let file = assert_ok!(File::open(file.path()).await);
+ let mut file = BufReader::new(file);
+
+ let mut contents = Vec::new();
+
+ loop {
+ let consumed = {
+ let buffer = assert_ok!(file.fill_buf().await);
+ if buffer.is_empty() {
+ break;
+ }
+ contents.extend_from_slice(buffer);
+ buffer.len()
+ };
+
+ file.consume(consumed);
+ }
+
+ assert_eq!(contents, b"hello");
+}
diff --git a/third_party/rust/tokio/tests/io_lines.rs b/third_party/rust/tokio/tests/io_lines.rs
new file mode 100644
index 0000000000..9996d81ca7
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_lines.rs
@@ -0,0 +1,19 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncBufReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn lines_inherent() {
+ let rd: &[u8] = b"hello\r\nworld\n\n";
+ let mut st = rd.lines();
+
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "hello");
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "world");
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "");
+ assert!(assert_ok!(st.next_line().await).is_none());
+}
diff --git a/third_party/rust/tokio/tests/io_mem_stream.rs b/third_party/rust/tokio/tests/io_mem_stream.rs
new file mode 100644
index 0000000000..a2c2dadfc9
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_mem_stream.rs
@@ -0,0 +1,121 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
+
+#[tokio::test]
+async fn ping_pong() {
+ let (mut a, mut b) = duplex(32);
+
+ let mut buf = [0u8; 4];
+
+ a.write_all(b"ping").await.unwrap();
+ b.read_exact(&mut buf).await.unwrap();
+ assert_eq!(&buf, b"ping");
+
+ b.write_all(b"pong").await.unwrap();
+ a.read_exact(&mut buf).await.unwrap();
+ assert_eq!(&buf, b"pong");
+}
+
+#[tokio::test]
+async fn across_tasks() {
+ let (mut a, mut b) = duplex(32);
+
+ let t1 = tokio::spawn(async move {
+ a.write_all(b"ping").await.unwrap();
+ let mut buf = [0u8; 4];
+ a.read_exact(&mut buf).await.unwrap();
+ assert_eq!(&buf, b"pong");
+ });
+
+ let t2 = tokio::spawn(async move {
+ let mut buf = [0u8; 4];
+ b.read_exact(&mut buf).await.unwrap();
+ assert_eq!(&buf, b"ping");
+ b.write_all(b"pong").await.unwrap();
+ });
+
+ t1.await.unwrap();
+ t2.await.unwrap();
+}
+
+#[tokio::test]
+async fn disconnect() {
+ let (mut a, mut b) = duplex(32);
+
+ let t1 = tokio::spawn(async move {
+ a.write_all(b"ping").await.unwrap();
+ // and dropped
+ });
+
+ let t2 = tokio::spawn(async move {
+ let mut buf = [0u8; 32];
+ let n = b.read(&mut buf).await.unwrap();
+ assert_eq!(&buf[..n], b"ping");
+
+ let n = b.read(&mut buf).await.unwrap();
+ assert_eq!(n, 0);
+ });
+
+ t1.await.unwrap();
+ t2.await.unwrap();
+}
+
+#[tokio::test]
+async fn disconnect_reader() {
+ let (a, mut b) = duplex(2);
+
+ let t1 = tokio::spawn(async move {
+ // this will block, as not all data fits into duplex
+ b.write_all(b"ping").await.unwrap_err();
+ });
+
+ let t2 = tokio::spawn(async move {
+ // here we drop the reader side, and we expect the writer in the other
+ // task to exit with an error
+ drop(a);
+ });
+
+ t2.await.unwrap();
+ t1.await.unwrap();
+}
+
+#[tokio::test]
+async fn max_write_size() {
+ let (mut a, mut b) = duplex(32);
+
+ let t1 = tokio::spawn(async move {
+ let n = a.write(&[0u8; 64]).await.unwrap();
+ assert_eq!(n, 32);
+ let n = a.write(&[0u8; 64]).await.unwrap();
+ assert_eq!(n, 4);
+ });
+
+ let mut buf = [0u8; 4];
+ b.read_exact(&mut buf).await.unwrap();
+
+ t1.await.unwrap();
+
+ // drop b only after task t1 finishes writing
+ drop(b);
+}
+
+#[tokio::test]
+async fn duplex_is_cooperative() {
+ let (mut tx, mut rx) = tokio::io::duplex(1024 * 8);
+
+ tokio::select! {
+ biased;
+
+ _ = async {
+ loop {
+ let buf = [3u8; 4096];
+ tx.write_all(&buf).await.unwrap();
+ let mut buf = [0u8; 4096];
+ rx.read(&mut buf).await.unwrap();
+ }
+ } => {},
+ _ = tokio::task::yield_now() => {}
+ }
+}
diff --git a/third_party/rust/tokio/tests/io_poll_aio.rs b/third_party/rust/tokio/tests/io_poll_aio.rs
new file mode 100644
index 0000000000..f044af5cc4
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_poll_aio.rs
@@ -0,0 +1,375 @@
+#![warn(rust_2018_idioms)]
+#![cfg(all(target_os = "freebsd", feature = "net"))]
+
+use mio_aio::{AioCb, AioFsyncMode, LioCb};
+use std::{
+ future::Future,
+ mem,
+ os::unix::io::{AsRawFd, RawFd},
+ pin::Pin,
+ task::{Context, Poll},
+};
+use tempfile::tempfile;
+use tokio::io::bsd::{Aio, AioSource};
+use tokio_test::assert_pending;
+
+mod aio {
+ use super::*;
+
+ /// Adapts mio_aio::AioCb (which implements mio::event::Source) to AioSource
+ struct WrappedAioCb<'a>(AioCb<'a>);
+ impl<'a> AioSource for WrappedAioCb<'a> {
+ fn register(&mut self, kq: RawFd, token: usize) {
+ self.0.register_raw(kq, token)
+ }
+ fn deregister(&mut self) {
+ self.0.deregister_raw()
+ }
+ }
+
+ /// A very crude implementation of an AIO-based future
+ struct FsyncFut(Aio<WrappedAioCb<'static>>);
+
+ impl Future for FsyncFut {
+ type Output = std::io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let poll_result = self.0.poll_ready(cx);
+ match poll_result {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Ready(Ok(_ev)) => {
+ // At this point, we could clear readiness. But there's no
+ // point, since we're about to drop the Aio.
+ let result = (*self.0).0.aio_return();
+ match result {
+ Ok(_) => Poll::Ready(Ok(())),
+ Err(e) => Poll::Ready(Err(e.into())),
+ }
+ }
+ }
+ }
+ }
+
+ /// Low-level AIO Source
+ ///
+ /// An example bypassing mio_aio and Nix to demonstrate how the kevent
+ /// registration actually works, under the hood.
+ struct LlSource(Pin<Box<libc::aiocb>>);
+
+ impl AioSource for LlSource {
+ fn register(&mut self, kq: RawFd, token: usize) {
+ let mut sev: libc::sigevent = unsafe { mem::MaybeUninit::zeroed().assume_init() };
+ sev.sigev_notify = libc::SIGEV_KEVENT;
+ sev.sigev_signo = kq;
+ sev.sigev_value = libc::sigval {
+ sival_ptr: token as *mut libc::c_void,
+ };
+ self.0.aio_sigevent = sev;
+ }
+
+ fn deregister(&mut self) {
+ unsafe {
+ self.0.aio_sigevent = mem::zeroed();
+ }
+ }
+ }
+
+ struct LlFut(Aio<LlSource>);
+
+ impl Future for LlFut {
+ type Output = std::io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let poll_result = self.0.poll_ready(cx);
+ match poll_result {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Ready(Ok(_ev)) => {
+ let r = unsafe { libc::aio_return(self.0 .0.as_mut().get_unchecked_mut()) };
+ assert_eq!(0, r);
+ Poll::Ready(Ok(()))
+ }
+ }
+ }
+ }
+
+ /// A very simple object that can implement AioSource and can be reused.
+ ///
+ /// mio_aio normally assumes that each AioCb will be consumed on completion.
+ /// This somewhat contrived example shows how an Aio object can be reused
+ /// anyway.
+ struct ReusableFsyncSource {
+ aiocb: Pin<Box<AioCb<'static>>>,
+ fd: RawFd,
+ token: usize,
+ }
+ impl ReusableFsyncSource {
+ fn fsync(&mut self) {
+ self.aiocb.register_raw(self.fd, self.token);
+ self.aiocb.fsync(AioFsyncMode::O_SYNC).unwrap();
+ }
+ fn new(aiocb: AioCb<'static>) -> Self {
+ ReusableFsyncSource {
+ aiocb: Box::pin(aiocb),
+ fd: 0,
+ token: 0,
+ }
+ }
+ fn reset(&mut self, aiocb: AioCb<'static>) {
+ self.aiocb = Box::pin(aiocb);
+ }
+ }
+ impl AioSource for ReusableFsyncSource {
+ fn register(&mut self, kq: RawFd, token: usize) {
+ self.fd = kq;
+ self.token = token;
+ }
+ fn deregister(&mut self) {
+ self.fd = 0;
+ }
+ }
+
+ struct ReusableFsyncFut<'a>(&'a mut Aio<ReusableFsyncSource>);
+ impl<'a> Future for ReusableFsyncFut<'a> {
+ type Output = std::io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let poll_result = self.0.poll_ready(cx);
+ match poll_result {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Ready(Ok(ev)) => {
+ // Since this future uses a reusable Aio, we must clear
+ // its readiness here. That makes the future
+ // non-idempotent; the caller can't poll it repeatedly after
+ // it has already returned Ready. But that's ok; most
+ // futures behave this way.
+ self.0.clear_ready(ev);
+ let result = (*self.0).aiocb.aio_return();
+ match result {
+ Ok(_) => Poll::Ready(Ok(())),
+ Err(e) => Poll::Ready(Err(e.into())),
+ }
+ }
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn fsync() {
+ let f = tempfile().unwrap();
+ let fd = f.as_raw_fd();
+ let aiocb = AioCb::from_fd(fd, 0);
+ let source = WrappedAioCb(aiocb);
+ let mut poll_aio = Aio::new_for_aio(source).unwrap();
+ (*poll_aio).0.fsync(AioFsyncMode::O_SYNC).unwrap();
+ let fut = FsyncFut(poll_aio);
+ fut.await.unwrap();
+ }
+
+ #[tokio::test]
+ async fn ll_fsync() {
+ let f = tempfile().unwrap();
+ let fd = f.as_raw_fd();
+ let mut aiocb: libc::aiocb = unsafe { mem::MaybeUninit::zeroed().assume_init() };
+ aiocb.aio_fildes = fd;
+ let source = LlSource(Box::pin(aiocb));
+ let mut poll_aio = Aio::new_for_aio(source).unwrap();
+ let r = unsafe {
+ let p = (*poll_aio).0.as_mut().get_unchecked_mut();
+ libc::aio_fsync(libc::O_SYNC, p)
+ };
+ assert_eq!(0, r);
+ let fut = LlFut(poll_aio);
+ fut.await.unwrap();
+ }
+
+ /// A suitably crafted future type can reuse an Aio object
+ #[tokio::test]
+ async fn reuse() {
+ let f = tempfile().unwrap();
+ let fd = f.as_raw_fd();
+ let aiocb0 = AioCb::from_fd(fd, 0);
+ let source = ReusableFsyncSource::new(aiocb0);
+ let mut poll_aio = Aio::new_for_aio(source).unwrap();
+ poll_aio.fsync();
+ let fut0 = ReusableFsyncFut(&mut poll_aio);
+ fut0.await.unwrap();
+
+ let aiocb1 = AioCb::from_fd(fd, 0);
+ poll_aio.reset(aiocb1);
+ let mut ctx = Context::from_waker(futures::task::noop_waker_ref());
+ assert_pending!(poll_aio.poll_ready(&mut ctx));
+ poll_aio.fsync();
+ let fut1 = ReusableFsyncFut(&mut poll_aio);
+ fut1.await.unwrap();
+ }
+}
+
+mod lio {
+ use super::*;
+
+ struct WrappedLioCb<'a>(LioCb<'a>);
+ impl<'a> AioSource for WrappedLioCb<'a> {
+ fn register(&mut self, kq: RawFd, token: usize) {
+ self.0.register_raw(kq, token)
+ }
+ fn deregister(&mut self) {
+ self.0.deregister_raw()
+ }
+ }
+
+ /// A very crude lio_listio-based Future
+ struct LioFut(Option<Aio<WrappedLioCb<'static>>>);
+
+ impl Future for LioFut {
+ type Output = std::io::Result<Vec<isize>>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let poll_result = self.0.as_mut().unwrap().poll_ready(cx);
+ match poll_result {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Ready(Ok(_ev)) => {
+ // At this point, we could clear readiness. But there's no
+ // point, since we're about to drop the Aio.
+ let r = self.0.take().unwrap().into_inner().0.into_results(|iter| {
+ iter.map(|lr| lr.result.unwrap()).collect::<Vec<isize>>()
+ });
+ Poll::Ready(Ok(r))
+ }
+ }
+ }
+ }
+
+ /// Minimal example demonstrating reuse of an Aio object with lio
+ /// readiness. mio_aio::LioCb actually does something similar under the
+ /// hood.
+ struct ReusableLioSource {
+ liocb: Option<LioCb<'static>>,
+ fd: RawFd,
+ token: usize,
+ }
+ impl ReusableLioSource {
+ fn new(liocb: LioCb<'static>) -> Self {
+ ReusableLioSource {
+ liocb: Some(liocb),
+ fd: 0,
+ token: 0,
+ }
+ }
+ fn reset(&mut self, liocb: LioCb<'static>) {
+ self.liocb = Some(liocb);
+ }
+ fn submit(&mut self) {
+ self.liocb
+ .as_mut()
+ .unwrap()
+ .register_raw(self.fd, self.token);
+ self.liocb.as_mut().unwrap().submit().unwrap();
+ }
+ }
+ impl AioSource for ReusableLioSource {
+ fn register(&mut self, kq: RawFd, token: usize) {
+ self.fd = kq;
+ self.token = token;
+ }
+ fn deregister(&mut self) {
+ self.fd = 0;
+ }
+ }
+ struct ReusableLioFut<'a>(&'a mut Aio<ReusableLioSource>);
+ impl<'a> Future for ReusableLioFut<'a> {
+ type Output = std::io::Result<Vec<isize>>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let poll_result = self.0.poll_ready(cx);
+ match poll_result {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Ready(Ok(ev)) => {
+ // Since this future uses a reusable Aio, we must clear
+ // its readiness here. That makes the future
+ // non-idempotent; the caller can't poll it repeatedly after
+ // it has already returned Ready. But that's ok; most
+ // futures behave this way.
+ self.0.clear_ready(ev);
+ let r = (*self.0).liocb.take().unwrap().into_results(|iter| {
+ iter.map(|lr| lr.result.unwrap()).collect::<Vec<isize>>()
+ });
+ Poll::Ready(Ok(r))
+ }
+ }
+ }
+ }
+
+ /// An lio_listio operation with one write element
+ #[tokio::test]
+ async fn onewrite() {
+ const WBUF: &[u8] = b"abcdef";
+ let f = tempfile().unwrap();
+
+ let mut builder = mio_aio::LioCbBuilder::with_capacity(1);
+ builder = builder.emplace_slice(
+ f.as_raw_fd(),
+ 0,
+ &WBUF[..],
+ 0,
+ mio_aio::LioOpcode::LIO_WRITE,
+ );
+ let liocb = builder.finish();
+ let source = WrappedLioCb(liocb);
+ let mut poll_aio = Aio::new_for_lio(source).unwrap();
+
+ // Send the operation to the kernel
+ (*poll_aio).0.submit().unwrap();
+ let fut = LioFut(Some(poll_aio));
+ let v = fut.await.unwrap();
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0] as usize, WBUF.len());
+ }
+
+ /// A suitably crafted future type can reuse an Aio object
+ #[tokio::test]
+ async fn reuse() {
+ const WBUF: &[u8] = b"abcdef";
+ let f = tempfile().unwrap();
+
+ let mut builder0 = mio_aio::LioCbBuilder::with_capacity(1);
+ builder0 = builder0.emplace_slice(
+ f.as_raw_fd(),
+ 0,
+ &WBUF[..],
+ 0,
+ mio_aio::LioOpcode::LIO_WRITE,
+ );
+ let liocb0 = builder0.finish();
+ let source = ReusableLioSource::new(liocb0);
+ let mut poll_aio = Aio::new_for_aio(source).unwrap();
+ poll_aio.submit();
+ let fut0 = ReusableLioFut(&mut poll_aio);
+ let v = fut0.await.unwrap();
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0] as usize, WBUF.len());
+
+ // Now reuse the same Aio
+ let mut builder1 = mio_aio::LioCbBuilder::with_capacity(1);
+ builder1 = builder1.emplace_slice(
+ f.as_raw_fd(),
+ 0,
+ &WBUF[..],
+ 0,
+ mio_aio::LioOpcode::LIO_WRITE,
+ );
+ let liocb1 = builder1.finish();
+ poll_aio.reset(liocb1);
+ let mut ctx = Context::from_waker(futures::task::noop_waker_ref());
+ assert_pending!(poll_aio.poll_ready(&mut ctx));
+ poll_aio.submit();
+ let fut1 = ReusableLioFut(&mut poll_aio);
+ let v = fut1.await.unwrap();
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0] as usize, WBUF.len());
+ }
+}
diff --git a/third_party/rust/tokio/tests/io_read.rs b/third_party/rust/tokio/tests/io_read.rs
new file mode 100644
index 0000000000..cb1aa70523
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read.rs
@@ -0,0 +1,59 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
+use tokio_test::assert_ok;
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn read() {
+ #[derive(Default)]
+ struct Rd {
+ poll_cnt: usize,
+ }
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ assert_eq!(0, self.poll_cnt);
+ self.poll_cnt += 1;
+
+ buf.put_slice(b"hello world");
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ let mut buf = Box::new([0; 11]);
+ let mut rd = Rd::default();
+
+ let n = assert_ok!(rd.read(&mut buf[..]).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], b"hello world"[..]);
+}
+
+struct BadAsyncRead;
+
+impl AsyncRead for BadAsyncRead {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ *buf = ReadBuf::new(Box::leak(vec![0; buf.capacity()].into_boxed_slice()));
+ buf.advance(buf.capacity());
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[tokio::test]
+#[should_panic]
+async fn read_buf_bad_async_read() {
+ let mut buf = Vec::with_capacity(10);
+ BadAsyncRead.read_buf(&mut buf).await.unwrap();
+}
diff --git a/third_party/rust/tokio/tests/io_read_buf.rs b/third_party/rust/tokio/tests/io_read_buf.rs
new file mode 100644
index 0000000000..0328168d7a
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_buf.rs
@@ -0,0 +1,36 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
+use tokio_test::assert_ok;
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn read_buf() {
+ struct Rd {
+ cnt: usize,
+ }
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ self.cnt += 1;
+ buf.put_slice(b"hello world");
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ let mut buf = vec![];
+ let mut rd = Rd { cnt: 0 };
+
+ let n = assert_ok!(rd.read_buf(&mut buf).await);
+ assert_eq!(1, rd.cnt);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], b"hello world"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_read_exact.rs b/third_party/rust/tokio/tests/io_read_exact.rs
new file mode 100644
index 0000000000..d0e659bd33
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_exact.rs
@@ -0,0 +1,15 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_exact() {
+ let mut buf = Box::new([0; 8]);
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_exact(&mut buf[..]).await);
+ assert_eq!(n, 8);
+ assert_eq!(buf[..], b"hello wo"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_read_line.rs b/third_party/rust/tokio/tests/io_read_line.rs
new file mode 100644
index 0000000000..15841c9b49
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_line.rs
@@ -0,0 +1,107 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::ErrorKind;
+use tokio::io::{AsyncBufReadExt, BufReader, Error};
+use tokio_test::{assert_ok, io::Builder};
+
+use std::io::Cursor;
+
+#[tokio::test]
+async fn read_line() {
+ let mut buf = String::new();
+ let mut rd = Cursor::new(b"hello\nworld\n\n");
+
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, "hello\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, "world\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 1);
+ assert_eq!(buf, "\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 0);
+ assert_eq!(buf, "");
+}
+
+#[tokio::test]
+async fn read_line_not_all_ready() {
+ let mock = Builder::new()
+ .read(b"Hello Wor")
+ .read(b"ld\nFizzBuz")
+ .read(b"z\n1\n2")
+ .build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut line = "We say ".to_string();
+ let bytes = read.read_line(&mut line).await.unwrap();
+ assert_eq!(bytes, "Hello World\n".len());
+ assert_eq!(line.as_str(), "We say Hello World\n");
+
+ line = "I solve ".to_string();
+ let bytes = read.read_line(&mut line).await.unwrap();
+ assert_eq!(bytes, "FizzBuzz\n".len());
+ assert_eq!(line.as_str(), "I solve FizzBuzz\n");
+
+ line.clear();
+ let bytes = read.read_line(&mut line).await.unwrap();
+ assert_eq!(bytes, 2);
+ assert_eq!(line.as_str(), "1\n");
+
+ line.clear();
+ let bytes = read.read_line(&mut line).await.unwrap();
+ assert_eq!(bytes, 1);
+ assert_eq!(line.as_str(), "2");
+}
+
+#[tokio::test]
+async fn read_line_invalid_utf8() {
+ let mock = Builder::new().read(b"Hello Wor\xffld.\n").build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut line = "Foo".to_string();
+ let err = read.read_line(&mut line).await.expect_err("Should fail");
+ assert_eq!(err.kind(), ErrorKind::InvalidData);
+ assert_eq!(err.to_string(), "stream did not contain valid UTF-8");
+ assert_eq!(line.as_str(), "Foo");
+}
+
+#[tokio::test]
+async fn read_line_fail() {
+ let mock = Builder::new()
+ .read(b"Hello Wor")
+ .read_error(Error::new(ErrorKind::Other, "The world has no end"))
+ .build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut line = "Foo".to_string();
+ let err = read.read_line(&mut line).await.expect_err("Should fail");
+ assert_eq!(err.kind(), ErrorKind::Other);
+ assert_eq!(err.to_string(), "The world has no end");
+ assert_eq!(line.as_str(), "FooHello Wor");
+}
+
+#[tokio::test]
+async fn read_line_fail_and_utf8_fail() {
+ let mock = Builder::new()
+ .read(b"Hello Wor")
+ .read(b"\xff\xff\xff")
+ .read_error(Error::new(ErrorKind::Other, "The world has no end"))
+ .build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut line = "Foo".to_string();
+ let err = read.read_line(&mut line).await.expect_err("Should fail");
+ assert_eq!(err.kind(), ErrorKind::Other);
+ assert_eq!(err.to_string(), "The world has no end");
+ assert_eq!(line.as_str(), "Foo");
+}
diff --git a/third_party/rust/tokio/tests/io_read_to_end.rs b/third_party/rust/tokio/tests/io_read_to_end.rs
new file mode 100644
index 0000000000..171e6d6480
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_to_end.rs
@@ -0,0 +1,78 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_to_end() {
+ let mut buf = vec![];
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_to_end(&mut buf).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], b"hello world"[..]);
+}
+
+#[derive(Copy, Clone, Debug)]
+enum State {
+ Initializing,
+ JustFilling,
+ Done,
+}
+
+struct UninitTest {
+ num_init: usize,
+ state: State,
+}
+
+impl AsyncRead for UninitTest {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ let me = Pin::into_inner(self);
+ let real_num_init = buf.initialized().len() - buf.filled().len();
+ assert_eq!(real_num_init, me.num_init, "{:?}", me.state);
+
+ match me.state {
+ State::Initializing => {
+ buf.initialize_unfilled_to(me.num_init + 2);
+ buf.advance(1);
+ me.num_init += 1;
+
+ if me.num_init == 24 {
+ me.state = State::JustFilling;
+ }
+ }
+ State::JustFilling => {
+ buf.advance(1);
+ me.num_init -= 1;
+
+ if me.num_init == 15 {
+ // The buffer is resized on next call.
+ me.num_init = 0;
+ me.state = State::Done;
+ }
+ }
+ State::Done => { /* .. do nothing .. */ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[tokio::test]
+async fn read_to_end_uninit() {
+ let mut buf = Vec::with_capacity(64);
+ let mut test = UninitTest {
+ num_init: 0,
+ state: State::Initializing,
+ };
+
+ test.read_to_end(&mut buf).await.unwrap();
+ assert_eq!(buf.len(), 33);
+}
diff --git a/third_party/rust/tokio/tests/io_read_to_string.rs b/third_party/rust/tokio/tests/io_read_to_string.rs
new file mode 100644
index 0000000000..f30c26caa8
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_to_string.rs
@@ -0,0 +1,63 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io;
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+use tokio_test::io::Builder;
+
+#[tokio::test]
+async fn read_to_string() {
+ let mut buf = String::new();
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_to_string(&mut buf).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], "hello world"[..]);
+}
+
+#[tokio::test]
+async fn to_string_does_not_truncate_on_utf8_error() {
+ let data = vec![0xff, 0xff, 0xff];
+
+ let mut s = "abc".to_string();
+
+ match AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s).await {
+ Ok(len) => panic!("Should fail: {} bytes.", len),
+ Err(err) if err.to_string() == "stream did not contain valid UTF-8" => {}
+ Err(err) => panic!("Fail: {}.", err),
+ }
+
+ assert_eq!(s, "abc");
+}
+
+#[tokio::test]
+async fn to_string_does_not_truncate_on_io_error() {
+ let mut mock = Builder::new()
+ .read(b"def")
+ .read_error(io::Error::new(io::ErrorKind::Other, "whoops"))
+ .build();
+ let mut s = "abc".to_string();
+
+ match AsyncReadExt::read_to_string(&mut mock, &mut s).await {
+ Ok(len) => panic!("Should fail: {} bytes.", len),
+ Err(err) if err.to_string() == "whoops" => {}
+ Err(err) => panic!("Fail: {}.", err),
+ }
+
+ assert_eq!(s, "abc");
+}
+
+#[tokio::test]
+async fn to_string_appends() {
+ let data = b"def".to_vec();
+
+ let mut s = "abc".to_string();
+
+ let len = AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s)
+ .await
+ .unwrap();
+
+ assert_eq!(len, 3);
+ assert_eq!(s, "abcdef");
+}
diff --git a/third_party/rust/tokio/tests/io_read_until.rs b/third_party/rust/tokio/tests/io_read_until.rs
new file mode 100644
index 0000000000..61800a0d9c
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_until.rs
@@ -0,0 +1,74 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::ErrorKind;
+use tokio::io::{AsyncBufReadExt, BufReader, Error};
+use tokio_test::{assert_ok, io::Builder};
+
+#[tokio::test]
+async fn read_until() {
+ let mut buf = vec![];
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, b"hello ");
+ buf.clear();
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 5);
+ assert_eq!(buf, b"world");
+ buf.clear();
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 0);
+ assert_eq!(buf, []);
+}
+
+#[tokio::test]
+async fn read_until_not_all_ready() {
+ let mock = Builder::new()
+ .read(b"Hello Wor")
+ .read(b"ld#Fizz\xffBuz")
+ .read(b"z#1#2")
+ .build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut chunk = b"We say ".to_vec();
+ let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
+ assert_eq!(bytes, b"Hello World#".len());
+ assert_eq!(chunk, b"We say Hello World#");
+
+ chunk = b"I solve ".to_vec();
+ let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
+ assert_eq!(bytes, b"Fizz\xffBuzz\n".len());
+ assert_eq!(chunk, b"I solve Fizz\xffBuzz#");
+
+ chunk.clear();
+ let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
+ assert_eq!(bytes, 2);
+ assert_eq!(chunk, b"1#");
+
+ chunk.clear();
+ let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
+ assert_eq!(bytes, 1);
+ assert_eq!(chunk, b"2");
+}
+
+#[tokio::test]
+async fn read_until_fail() {
+ let mock = Builder::new()
+ .read(b"Hello \xffWor")
+ .read_error(Error::new(ErrorKind::Other, "The world has no end"))
+ .build();
+
+ let mut read = BufReader::new(mock);
+
+ let mut chunk = b"Foo".to_vec();
+ let err = read
+ .read_until(b'#', &mut chunk)
+ .await
+ .expect_err("Should fail");
+ assert_eq!(err.kind(), ErrorKind::Other);
+ assert_eq!(err.to_string(), "The world has no end");
+ assert_eq!(chunk, b"FooHello \xffWor");
+}
diff --git a/third_party/rust/tokio/tests/io_split.rs b/third_party/rust/tokio/tests/io_split.rs
new file mode 100644
index 0000000000..a0121667f7
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_split.rs
@@ -0,0 +1,79 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+struct RW;
+
+impl AsyncRead for RW {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ buf.put_slice(&[b'z']);
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl AsyncWrite for RW {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ Poll::Ready(Ok(1))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[test]
+fn is_send_and_sync() {
+ fn assert_bound<T: Send + Sync>() {}
+
+ assert_bound::<ReadHalf<RW>>();
+ assert_bound::<WriteHalf<RW>>();
+}
+
+#[test]
+fn split_stream_id() {
+ let (r1, w1) = split(RW);
+ let (r2, w2) = split(RW);
+ assert!(r1.is_pair_of(&w1));
+ assert!(!r1.is_pair_of(&w2));
+ assert!(r2.is_pair_of(&w2));
+ assert!(!r2.is_pair_of(&w1));
+}
+
+#[test]
+fn unsplit_ok() {
+ let (r, w) = split(RW);
+ r.unsplit(w);
+}
+
+#[test]
+#[should_panic]
+fn unsplit_err1() {
+ let (r, _) = split(RW);
+ let (_, w) = split(RW);
+ r.unsplit(w);
+}
+
+#[test]
+#[should_panic]
+fn unsplit_err2() {
+ let (_, w) = split(RW);
+ let (r, _) = split(RW);
+ r.unsplit(w);
+}
diff --git a/third_party/rust/tokio/tests/io_take.rs b/third_party/rust/tokio/tests/io_take.rs
new file mode 100644
index 0000000000..684e041a67
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_take.rs
@@ -0,0 +1,61 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::io::{self, AsyncRead, AsyncReadExt, ReadBuf};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn take() {
+ let mut buf = [0; 6];
+ let rd: &[u8] = b"hello world";
+
+ let mut rd = rd.take(4);
+ let n = assert_ok!(rd.read(&mut buf).await);
+ assert_eq!(n, 4);
+ assert_eq!(&buf, &b"hell\0\0"[..]);
+}
+
+#[tokio::test]
+async fn issue_4435() {
+ let mut buf = [0; 8];
+ let rd: &[u8] = b"hello world";
+
+ let rd = rd.take(4);
+ tokio::pin!(rd);
+
+ let mut read_buf = ReadBuf::new(&mut buf);
+ read_buf.put_slice(b"AB");
+
+ futures::future::poll_fn(|cx| rd.as_mut().poll_read(cx, &mut read_buf))
+ .await
+ .unwrap();
+ assert_eq!(&buf, &b"ABhell\0\0"[..]);
+}
+
+struct BadReader;
+
+impl AsyncRead for BadReader {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ read_buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ let vec = vec![0; 10];
+
+ let mut buf = ReadBuf::new(vec.leak());
+ buf.put_slice(&[123; 10]);
+ *read_buf = buf;
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[tokio::test]
+#[should_panic]
+async fn bad_reader_fails() {
+ let mut buf = Vec::with_capacity(10);
+
+ BadReader.take(10).read_buf(&mut buf).await.unwrap();
+}
diff --git a/third_party/rust/tokio/tests/io_util_empty.rs b/third_party/rust/tokio/tests/io_util_empty.rs
new file mode 100644
index 0000000000..e49cd17fcd
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_util_empty.rs
@@ -0,0 +1,32 @@
+#![cfg(feature = "full")]
+use tokio::io::{AsyncBufReadExt, AsyncReadExt};
+
+#[tokio::test]
+async fn empty_read_is_cooperative() {
+ tokio::select! {
+ biased;
+
+ _ = async {
+ loop {
+ let mut buf = [0u8; 4096];
+ let _ = tokio::io::empty().read(&mut buf).await;
+ }
+ } => {},
+ _ = tokio::task::yield_now() => {}
+ }
+}
+
+#[tokio::test]
+async fn empty_buf_reads_are_cooperative() {
+ tokio::select! {
+ biased;
+
+ _ = async {
+ loop {
+ let mut buf = String::new();
+ let _ = tokio::io::empty().read_line(&mut buf).await;
+ }
+ } => {},
+ _ = tokio::task::yield_now() => {}
+ }
+}
diff --git a/third_party/rust/tokio/tests/io_write.rs b/third_party/rust/tokio/tests/io_write.rs
new file mode 100644
index 0000000000..96cebc3313
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write.rs
@@ -0,0 +1,58 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::assert_ok;
+
+use bytes::BytesMut;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ assert_eq!(self.cnt, 0);
+ self.buf.extend(&buf[0..4]);
+ Ok(4).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ let n = assert_ok!(wr.write(b"hello world").await);
+ assert_eq!(n, 4);
+ assert_eq!(wr.buf, b"hell"[..]);
+}
+
+#[tokio::test]
+async fn write_cursor() {
+ use std::io::Cursor;
+
+ let mut wr = Cursor::new(Vec::new());
+
+ let n = assert_ok!(wr.write(b"hello world").await);
+ assert_eq!(n, 11);
+ assert_eq!(wr.get_ref().as_slice(), &b"hello world"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_write_all.rs b/third_party/rust/tokio/tests/io_write_all.rs
new file mode 100644
index 0000000000..7ca02228a3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_all.rs
@@ -0,0 +1,51 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::assert_ok;
+
+use bytes::BytesMut;
+use std::cmp;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_all() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ let n = cmp::min(4, buf.len());
+ let buf = &buf[0..n];
+
+ self.cnt += 1;
+ self.buf.extend(buf);
+ Ok(buf.len()).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ assert_ok!(wr.write_all(b"hello world").await);
+ assert_eq!(wr.buf, b"hello world"[..]);
+ assert_eq!(wr.cnt, 3);
+}
diff --git a/third_party/rust/tokio/tests/io_write_all_buf.rs b/third_party/rust/tokio/tests/io_write_all_buf.rs
new file mode 100644
index 0000000000..7c8b619358
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_all_buf.rs
@@ -0,0 +1,96 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::{assert_err, assert_ok};
+
+use bytes::{Buf, Bytes, BytesMut};
+use std::cmp;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_all_buf() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ let n = cmp::min(4, buf.len());
+ dbg!(buf);
+ let buf = &buf[0..n];
+
+ self.cnt += 1;
+ self.buf.extend(buf);
+ Ok(buf.len()).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world"));
+
+ assert_ok!(wr.write_all_buf(&mut buf).await);
+ assert_eq!(wr.buf, b"helloworld"[..]);
+ // expect 4 writes, [hell],[o],[worl],[d]
+ assert_eq!(wr.cnt, 4);
+ assert!(!buf.has_remaining());
+}
+
+#[tokio::test]
+async fn write_buf_err() {
+ /// Error out after writing the first 4 bytes
+ struct Wr {
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.cnt += 1;
+ if self.cnt == 2 {
+ return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, "whoops")));
+ }
+ Poll::Ready(Ok(4))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr { cnt: 0 };
+
+ let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world"));
+
+ assert_err!(wr.write_all_buf(&mut buf).await);
+ assert_eq!(
+ buf.copy_to_bytes(buf.remaining()),
+ Bytes::from_static(b"oworld")
+ );
+}
diff --git a/third_party/rust/tokio/tests/io_write_buf.rs b/third_party/rust/tokio/tests/io_write_buf.rs
new file mode 100644
index 0000000000..9ae655b6cc
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_buf.rs
@@ -0,0 +1,56 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::assert_ok;
+
+use bytes::BytesMut;
+use std::cmp;
+use std::io::{self, Cursor};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_all() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ assert_eq!(self.cnt, 0);
+
+ let n = cmp::min(4, buf.len());
+ let buf = &buf[0..n];
+
+ self.cnt += 1;
+ self.buf.extend(buf);
+ Ok(buf.len()).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ let mut buf = Cursor::new(&b"hello world"[..]);
+
+ assert_ok!(wr.write_buf(&mut buf).await);
+ assert_eq!(wr.buf, b"hell"[..]);
+ assert_eq!(wr.cnt, 1);
+ assert_eq!(buf.position(), 4);
+}
diff --git a/third_party/rust/tokio/tests/io_write_int.rs b/third_party/rust/tokio/tests/io_write_int.rs
new file mode 100644
index 0000000000..48a583d8c3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_int.rs
@@ -0,0 +1,37 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_int_should_err_if_write_count_0() {
+ struct Wr {}
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Ok(0).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {};
+
+ // should be ok just to test these 2, other cases actually expanded by same macro.
+ assert!(wr.write_i8(0).await.is_err());
+ assert!(wr.write_i32(12).await.is_err());
+}
diff --git a/third_party/rust/tokio/tests/join_handle_panic.rs b/third_party/rust/tokio/tests/join_handle_panic.rs
new file mode 100644
index 0000000000..f7de92d417
--- /dev/null
+++ b/third_party/rust/tokio/tests/join_handle_panic.rs
@@ -0,0 +1,20 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+struct PanicsOnDrop;
+
+impl Drop for PanicsOnDrop {
+ fn drop(&mut self) {
+ panic!("I told you so");
+ }
+}
+
+#[tokio::test]
+async fn test_panics_do_not_propagate_when_dropping_join_handle() {
+ let join_handle = tokio::spawn(async move { PanicsOnDrop });
+
+ // only drop the JoinHandle when the task has completed
+ // (which is difficult to synchronize precisely)
+ tokio::time::sleep(std::time::Duration::from_millis(3)).await;
+ drop(join_handle);
+}
diff --git a/third_party/rust/tokio/tests/macros_join.rs b/third_party/rust/tokio/tests/macros_join.rs
new file mode 100644
index 0000000000..d4f20b3862
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_join.rs
@@ -0,0 +1,82 @@
+#![cfg(feature = "macros")]
+#![allow(clippy::blacklisted_name)]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::oneshot;
+use tokio_test::{assert_pending, assert_ready, task};
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::join!(async { 1 },);
+
+ assert_eq!(foo, (1,));
+}
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::join!(async { 1 });
+
+ assert_eq!(foo, (1,));
+}
+
+#[maybe_tokio_test]
+async fn sync_two_lit_expr_comma() {
+ let foo = tokio::join!(async { 1 }, async { 2 },);
+
+ assert_eq!(foo, (1, 2));
+}
+
+#[maybe_tokio_test]
+async fn sync_two_lit_expr_no_comma() {
+ let foo = tokio::join!(async { 1 }, async { 2 });
+
+ assert_eq!(foo, (1, 2));
+}
+
+#[maybe_tokio_test]
+async fn two_await() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+
+ let mut join = task::spawn(async {
+ tokio::join!(async { rx1.await.unwrap() }, async { rx2.await.unwrap() })
+ });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ tx1.send("hello").unwrap();
+ assert!(join.is_woken());
+ let res = assert_ready!(join.poll());
+
+ assert_eq!(("hello", 123), res);
+}
+
+#[test]
+fn join_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(0i32);
+ tokio::join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+ tokio::join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
diff --git a/third_party/rust/tokio/tests/macros_pin.rs b/third_party/rust/tokio/tests/macros_pin.rs
new file mode 100644
index 0000000000..70c95a1c82
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_pin.rs
@@ -0,0 +1,21 @@
+#![cfg(feature = "macros")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+async fn one() {}
+async fn two() {}
+
+#[maybe_tokio_test]
+async fn multi_pin() {
+ tokio::pin! {
+ let f1 = one();
+ let f2 = two();
+ }
+
+ (&mut f1).await;
+ (&mut f2).await;
+}
diff --git a/third_party/rust/tokio/tests/macros_select.rs b/third_party/rust/tokio/tests/macros_select.rs
new file mode 100644
index 0000000000..755365affb
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_select.rs
@@ -0,0 +1,600 @@
+#![cfg(feature = "macros")]
+#![allow(clippy::blacklisted_name)]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::oneshot;
+use tokio_test::{assert_ok, assert_pending, assert_ready};
+
+use futures::future::poll_fn;
+use std::task::Poll::Ready;
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::select! {
+ foo = async { 1 } => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn nested_one() {
+ let foo = tokio::select! {
+ foo = async { 1 } => tokio::select! {
+ bar = async { foo } => bar,
+ },
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::select! {
+ foo = async { 1 } => foo
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_block() {
+ let foo = tokio::select! {
+ foo = async { 1 } => { foo }
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn sync_one_await() {
+ let foo = tokio::select! {
+ foo = one() => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn sync_one_ident() {
+ let one = one();
+
+ let foo = tokio::select! {
+ foo = one => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[maybe_tokio_test]
+async fn sync_two() {
+ use std::cell::Cell;
+
+ let cnt = Cell::new(0);
+
+ let res = tokio::select! {
+ foo = async {
+ cnt.set(cnt.get() + 1);
+ 1
+ } => foo,
+ bar = async {
+ cnt.set(cnt.get() + 1);
+ 2
+ } => bar,
+ };
+
+ assert_eq!(1, cnt.get());
+ assert!(res == 1 || res == 2);
+}
+
+#[maybe_tokio_test]
+async fn drop_in_fut() {
+ let s = "hello".to_string();
+
+ let res = tokio::select! {
+ foo = async {
+ let v = one().await;
+ drop(s);
+ v
+ } => foo
+ };
+
+ assert_eq!(res, 1);
+}
+
+#[maybe_tokio_test]
+#[cfg(feature = "full")]
+async fn one_ready() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (_tx2, rx2) = oneshot::channel::<i32>();
+
+ tx1.send(1).unwrap();
+
+ let v = tokio::select! {
+ res = rx1 => {
+ assert_ok!(res)
+ },
+ _ = rx2 => unreachable!(),
+ };
+
+ assert_eq!(1, v);
+}
+
+#[maybe_tokio_test]
+#[cfg(feature = "full")]
+async fn select_streams() {
+ use tokio::sync::mpsc;
+
+ let (tx1, mut rx1) = mpsc::unbounded_channel::<i32>();
+ let (tx2, mut rx2) = mpsc::unbounded_channel::<i32>();
+
+ tokio::spawn(async move {
+ assert_ok!(tx2.send(1));
+ tokio::task::yield_now().await;
+
+ assert_ok!(tx1.send(2));
+ tokio::task::yield_now().await;
+
+ assert_ok!(tx2.send(3));
+ tokio::task::yield_now().await;
+
+ drop((tx1, tx2));
+ });
+
+ let mut rem = true;
+ let mut msgs = vec![];
+
+ while rem {
+ tokio::select! {
+ Some(x) = rx1.recv() => {
+ msgs.push(x);
+ }
+ Some(y) = rx2.recv() => {
+ msgs.push(y);
+ }
+ else => {
+ rem = false;
+ }
+ }
+ }
+
+ msgs.sort_unstable();
+ assert_eq!(&msgs[..], &[1, 2, 3]);
+}
+
+#[maybe_tokio_test]
+async fn move_uncompleted_futures() {
+ let (tx1, mut rx1) = oneshot::channel::<i32>();
+ let (tx2, mut rx2) = oneshot::channel::<i32>();
+
+ tx1.send(1).unwrap();
+ tx2.send(2).unwrap();
+
+ let ran;
+
+ tokio::select! {
+ res = &mut rx1 => {
+ assert_eq!(1, assert_ok!(res));
+ assert_eq!(2, assert_ok!(rx2.await));
+ ran = true;
+ },
+ res = &mut rx2 => {
+ assert_eq!(2, assert_ok!(res));
+ assert_eq!(1, assert_ok!(rx1.await));
+ ran = true;
+ },
+ }
+
+ assert!(ran);
+}
+
+#[maybe_tokio_test]
+async fn nested() {
+ let res = tokio::select! {
+ x = async { 1 } => {
+ tokio::select! {
+ y = async { 2 } => x + y,
+ }
+ }
+ };
+
+ assert_eq!(res, 3);
+}
+
+#[maybe_tokio_test]
+async fn struct_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 32);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready1 => {},
+ _ = ready2 => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 40);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+ let ready3 = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready1 => {},
+ _ = ready2 => {},
+ _ = ready3 => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 48);
+}
+
+#[maybe_tokio_test]
+async fn mutable_borrowing_future_with_same_borrow_in_block() {
+ let mut value = 234;
+
+ tokio::select! {
+ _ = require_mutable(&mut value) => { },
+ _ = async_noop() => {
+ value += 5;
+ },
+ }
+
+ assert!(value >= 234);
+}
+
+#[maybe_tokio_test]
+async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() {
+ let mut value = 234;
+
+ tokio::select! {
+ _ = require_mutable(&mut value) => { },
+ _ = async_noop() => {
+ value += 5;
+ },
+ else => {
+ value += 27;
+ },
+ }
+
+ assert!(value >= 234);
+}
+
+#[maybe_tokio_test]
+async fn future_panics_after_poll() {
+ use tokio_test::task;
+
+ let (tx, rx) = oneshot::channel();
+
+ let mut polled = false;
+
+ let f = poll_fn(|_| {
+ assert!(!polled);
+ polled = true;
+ Ready(None::<()>)
+ });
+
+ let mut f = task::spawn(async {
+ tokio::select! {
+ Some(_) = f => unreachable!(),
+ ret = rx => ret.unwrap(),
+ }
+ });
+
+ assert_pending!(f.poll());
+ assert_pending!(f.poll());
+
+ assert_ok!(tx.send(1));
+
+ let res = assert_ready!(f.poll());
+ assert_eq!(1, res);
+}
+
+#[maybe_tokio_test]
+async fn disable_with_if() {
+ use tokio_test::task;
+
+ let f = poll_fn(|_| panic!());
+ let (tx, rx) = oneshot::channel();
+
+ let mut f = task::spawn(async {
+ tokio::select! {
+ _ = f, if false => unreachable!(),
+ _ = rx => (),
+ }
+ });
+
+ assert_pending!(f.poll());
+
+ assert_ok!(tx.send(()));
+ assert!(f.is_woken());
+
+ assert_ready!(f.poll());
+}
+
+#[maybe_tokio_test]
+async fn join_with_select() {
+ use tokio_test::task;
+
+ let (tx1, mut rx1) = oneshot::channel();
+ let (tx2, mut rx2) = oneshot::channel();
+
+ let mut f = task::spawn(async {
+ let mut a = None;
+ let mut b = None;
+
+ while a.is_none() || b.is_none() {
+ tokio::select! {
+ v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)),
+ v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2))
+ }
+ }
+
+ (a.unwrap(), b.unwrap())
+ });
+
+ assert_pending!(f.poll());
+
+ assert_ok!(tx1.send(123));
+ assert!(f.is_woken());
+ assert_pending!(f.poll());
+
+ assert_ok!(tx2.send(456));
+ assert!(f.is_woken());
+ let (a, b) = assert_ready!(f.poll());
+
+ assert_eq!(a, 123);
+ assert_eq!(b, 456);
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn use_future_in_if_condition() {
+ use tokio::time::{self, Duration};
+
+ tokio::select! {
+ _ = time::sleep(Duration::from_millis(10)), if false => {
+ panic!("if condition ignored")
+ }
+ _ = async { 1u32 } => {
+ }
+ }
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn use_future_in_if_condition_biased() {
+ use tokio::time::{self, Duration};
+
+ tokio::select! {
+ biased;
+ _ = time::sleep(Duration::from_millis(10)), if false => {
+ panic!("if condition ignored")
+ }
+ _ = async { 1u32 } => {
+ }
+ }
+}
+
+#[maybe_tokio_test]
+async fn many_branches() {
+ let num = tokio::select! {
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ };
+
+ assert_eq!(1, num);
+}
+
+#[maybe_tokio_test]
+async fn never_branch_no_warnings() {
+ let t = tokio::select! {
+ _ = async_never() => 0,
+ one_async_ready = one() => one_async_ready,
+ };
+ assert_eq!(t, 1);
+}
+
+async fn one() -> usize {
+ 1
+}
+
+async fn require_mutable(_: &mut i32) {}
+async fn async_noop() {}
+
+async fn async_never() -> ! {
+ futures::future::pending().await
+}
+
+// From https://github.com/tokio-rs/tokio/issues/2857
+#[maybe_tokio_test]
+async fn mut_on_left_hand_side() {
+ let v = async move {
+ let ok = async { 1 };
+ tokio::pin!(ok);
+ tokio::select! {
+ mut a = &mut ok => {
+ a += 1;
+ a
+ }
+ }
+ }
+ .await;
+ assert_eq!(v, 2);
+}
+
+#[maybe_tokio_test]
+async fn biased_one_not_ready() {
+ let (_tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = oneshot::channel::<i32>();
+ let (tx3, rx3) = oneshot::channel::<i32>();
+
+ tx2.send(2).unwrap();
+ tx3.send(3).unwrap();
+
+ let v = tokio::select! {
+ biased;
+
+ _ = rx1 => unreachable!(),
+ res = rx2 => {
+ assert_ok!(res)
+ },
+ _ = rx3 => {
+ panic!("This branch should never be activated because `rx2` should be polled before `rx3` due to `biased;`.")
+ }
+ };
+
+ assert_eq!(2, v);
+}
+
+#[maybe_tokio_test]
+#[cfg(feature = "full")]
+async fn biased_eventually_ready() {
+ use tokio::task::yield_now;
+
+ let one = async {};
+ let two = async { yield_now().await };
+ let three = async { yield_now().await };
+
+ let mut count = 0u8;
+
+ tokio::pin!(one, two, three);
+
+ loop {
+ tokio::select! {
+ biased;
+
+ _ = &mut two, if count < 2 => {
+ count += 1;
+ assert_eq!(count, 2);
+ }
+ _ = &mut three, if count < 3 => {
+ count += 1;
+ assert_eq!(count, 3);
+ }
+ _ = &mut one, if count < 1 => {
+ count += 1;
+ assert_eq!(count, 1);
+ }
+ else => break,
+ }
+ }
+
+ assert_eq!(count, 3);
+}
+
+// https://github.com/tokio-rs/tokio/issues/3830
+// https://github.com/rust-lang/rust-clippy/issues/7304
+#[warn(clippy::default_numeric_fallback)]
+pub async fn default_numeric_fallback() {
+ tokio::select! {
+ _ = async {} => (),
+ else => (),
+ }
+}
+
+// https://github.com/tokio-rs/tokio/issues/4182
+#[maybe_tokio_test]
+async fn mut_ref_patterns() {
+ tokio::select! {
+ Some(mut foo) = async { Some("1".to_string()) } => {
+ assert_eq!(foo, "1");
+ foo = "2".to_string();
+ assert_eq!(foo, "2");
+ },
+ };
+
+ tokio::select! {
+ Some(ref foo) = async { Some("1".to_string()) } => {
+ assert_eq!(*foo, "1");
+ },
+ };
+
+ tokio::select! {
+ Some(ref mut foo) = async { Some("1".to_string()) } => {
+ assert_eq!(*foo, "1");
+ *foo = "2".to_string();
+ assert_eq!(*foo, "2");
+ },
+ };
+}
diff --git a/third_party/rust/tokio/tests/macros_test.rs b/third_party/rust/tokio/tests/macros_test.rs
new file mode 100644
index 0000000000..c5d9d9f9b0
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_test.rs
@@ -0,0 +1,72 @@
+#![cfg(feature = "full")]
+
+use tokio::test;
+
+#[test]
+async fn test_macro_can_be_used_via_use() {
+ tokio::spawn(async {}).await.unwrap();
+}
+
+#[tokio::test]
+async fn test_macro_is_resilient_to_shadowing() {
+ tokio::spawn(async {}).await.unwrap();
+}
+
+// https://github.com/tokio-rs/tokio/issues/3403
+#[rustfmt::skip] // this `rustfmt::skip` is necessary because unused_braces does not warn if the block contains newline.
+#[tokio::main]
+pub async fn unused_braces_main() { println!("hello") }
+#[rustfmt::skip] // this `rustfmt::skip` is necessary because unused_braces does not warn if the block contains newline.
+#[tokio::test]
+async fn unused_braces_test() { assert_eq!(1 + 1, 2) }
+
+// https://github.com/tokio-rs/tokio/pull/3766#issuecomment-835508651
+#[std::prelude::v1::test]
+fn trait_method() {
+ trait A {
+ fn f(self);
+ }
+ impl A for () {
+ #[tokio::main]
+ async fn f(self) {}
+ }
+ ().f()
+}
+
+// https://github.com/tokio-rs/tokio/issues/4175
+#[tokio::main]
+pub async fn issue_4175_main_1() -> ! {
+ panic!();
+}
+#[tokio::main]
+pub async fn issue_4175_main_2() -> std::io::Result<()> {
+ panic!();
+}
+#[allow(unreachable_code)]
+#[tokio::test]
+pub async fn issue_4175_test() -> std::io::Result<()> {
+ return Ok(());
+ panic!();
+}
+
+// https://github.com/tokio-rs/tokio/issues/4175
+pub mod clippy_semicolon_if_nothing_returned {
+ #![deny(clippy::semicolon_if_nothing_returned)]
+
+ #[tokio::main]
+ pub async fn local() {
+ let _x = ();
+ }
+ #[tokio::main]
+ pub async fn item() {
+ fn _f() {}
+ }
+ #[tokio::main]
+ pub async fn semi() {
+ panic!();
+ }
+ #[tokio::main]
+ pub async fn empty() {
+ // To trigger clippy::semicolon_if_nothing_returned lint, the block needs to contain newline.
+ }
+}
diff --git a/third_party/rust/tokio/tests/macros_try_join.rs b/third_party/rust/tokio/tests/macros_try_join.rs
new file mode 100644
index 0000000000..60a726b659
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_try_join.rs
@@ -0,0 +1,109 @@
+#![cfg(feature = "macros")]
+#![allow(clippy::blacklisted_name)]
+
+use tokio::sync::oneshot;
+use tokio_test::{assert_pending, assert_ready, task};
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::try_join!(async { ok(1) },);
+
+ assert_eq!(foo, Ok((1,)));
+}
+
+#[maybe_tokio_test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::try_join!(async { ok(1) });
+
+ assert_eq!(foo, Ok((1,)));
+}
+
+#[maybe_tokio_test]
+async fn sync_two_lit_expr_comma() {
+ let foo = tokio::try_join!(async { ok(1) }, async { ok(2) },);
+
+ assert_eq!(foo, Ok((1, 2)));
+}
+
+#[maybe_tokio_test]
+async fn sync_two_lit_expr_no_comma() {
+ let foo = tokio::try_join!(async { ok(1) }, async { ok(2) });
+
+ assert_eq!(foo, Ok((1, 2)));
+}
+
+#[maybe_tokio_test]
+async fn two_await() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+
+ let mut join =
+ task::spawn(async { tokio::try_join!(async { rx1.await }, async { rx2.await }) });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ tx1.send("hello").unwrap();
+ assert!(join.is_woken());
+ let res: Result<(&str, u32), _> = assert_ready!(join.poll());
+
+ assert_eq!(Ok(("hello", 123)), res);
+}
+
+#[maybe_tokio_test]
+async fn err_abort_early() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+ let (_tx3, rx3) = oneshot::channel::<u32>();
+
+ let mut join = task::spawn(async {
+ tokio::try_join!(async { rx1.await }, async { rx2.await }, async {
+ rx3.await
+ })
+ });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ drop(tx1);
+ assert!(join.is_woken());
+
+ let res = assert_ready!(join.poll());
+
+ assert!(res.is_err());
+}
+
+#[test]
+fn join_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(ok(0i32));
+ tokio::try_join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(ok(0i32));
+ let ready2 = future::ready(ok(0i32));
+ tokio::try_join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
+
+fn ok<T>(val: T) -> Result<T, ()> {
+ Ok(val)
+}
diff --git a/third_party/rust/tokio/tests/named_pipe.rs b/third_party/rust/tokio/tests/named_pipe.rs
new file mode 100644
index 0000000000..2055c3ce5b
--- /dev/null
+++ b/third_party/rust/tokio/tests/named_pipe.rs
@@ -0,0 +1,393 @@
+#![cfg(feature = "full")]
+#![cfg(all(windows))]
+
+use std::io;
+use std::mem;
+use std::os::windows::io::AsRawHandle;
+use std::time::Duration;
+use tokio::io::AsyncWriteExt;
+use tokio::net::windows::named_pipe::{ClientOptions, PipeMode, ServerOptions};
+use tokio::time;
+use winapi::shared::winerror;
+
+#[tokio::test]
+async fn test_named_pipe_client_drop() -> io::Result<()> {
+ const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-client-drop";
+
+ let mut server = ServerOptions::new().create(PIPE_NAME)?;
+
+ assert_eq!(num_instances("test-named-pipe-client-drop")?, 1);
+
+ let client = ClientOptions::new().open(PIPE_NAME)?;
+
+ server.connect().await?;
+ drop(client);
+
+ // instance will be broken because client is gone
+ match server.write_all(b"ping").await {
+ Err(e) if e.raw_os_error() == Some(winerror::ERROR_NO_DATA as i32) => (),
+ x => panic!("{:?}", x),
+ }
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn test_named_pipe_single_client() -> io::Result<()> {
+ use tokio::io::{AsyncBufReadExt as _, BufReader};
+
+ const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-single-client";
+
+ let server = ServerOptions::new().create(PIPE_NAME)?;
+
+ let server = tokio::spawn(async move {
+ // Note: we wait for a client to connect.
+ server.connect().await?;
+
+ let mut server = BufReader::new(server);
+
+ let mut buf = String::new();
+ server.read_line(&mut buf).await?;
+ server.write_all(b"pong\n").await?;
+ Ok::<_, io::Error>(buf)
+ });
+
+ let client = tokio::spawn(async move {
+ let client = ClientOptions::new().open(PIPE_NAME)?;
+
+ let mut client = BufReader::new(client);
+
+ let mut buf = String::new();
+ client.write_all(b"ping\n").await?;
+ client.read_line(&mut buf).await?;
+ Ok::<_, io::Error>(buf)
+ });
+
+ let (server, client) = tokio::try_join!(server, client)?;
+
+ assert_eq!(server?, "ping\n");
+ assert_eq!(client?, "pong\n");
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn test_named_pipe_multi_client() -> io::Result<()> {
+ use tokio::io::{AsyncBufReadExt as _, BufReader};
+
+ const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-multi-client";
+ const N: usize = 10;
+
+ // The first server needs to be constructed early so that clients can
+ // be correctly connected. Otherwise calling .wait will cause the client to
+ // error.
+ let mut server = ServerOptions::new().create(PIPE_NAME)?;
+
+ let server = tokio::spawn(async move {
+ for _ in 0..N {
+ // Wait for client to connect.
+ server.connect().await?;
+ let mut inner = BufReader::new(server);
+
+ // Construct the next server to be connected before sending the one
+ // we already have of onto a task. This ensures that the server
+ // isn't closed (after it's done in the task) before a new one is
+ // available. Otherwise the client might error with
+ // `io::ErrorKind::NotFound`.
+ server = ServerOptions::new().create(PIPE_NAME)?;
+
+ let _ = tokio::spawn(async move {
+ let mut buf = String::new();
+ inner.read_line(&mut buf).await?;
+ inner.write_all(b"pong\n").await?;
+ inner.flush().await?;
+ Ok::<_, io::Error>(())
+ });
+ }
+
+ Ok::<_, io::Error>(())
+ });
+
+ let mut clients = Vec::new();
+
+ for _ in 0..N {
+ clients.push(tokio::spawn(async move {
+ // This showcases a generic connect loop.
+ //
+ // We immediately try to create a client, if it's not found or the
+ // pipe is busy we use the specialized wait function on the client
+ // builder.
+ let client = loop {
+ match ClientOptions::new().open(PIPE_NAME) {
+ Ok(client) => break client,
+ Err(e) if e.raw_os_error() == Some(winerror::ERROR_PIPE_BUSY as i32) => (),
+ Err(e) if e.kind() == io::ErrorKind::NotFound => (),
+ Err(e) => return Err(e),
+ }
+
+ // Wait for a named pipe to become available.
+ time::sleep(Duration::from_millis(10)).await;
+ };
+
+ let mut client = BufReader::new(client);
+
+ let mut buf = String::new();
+ client.write_all(b"ping\n").await?;
+ client.flush().await?;
+ client.read_line(&mut buf).await?;
+ Ok::<_, io::Error>(buf)
+ }));
+ }
+
+ for client in clients {
+ let result = client.await?;
+ assert_eq!(result?, "pong\n");
+ }
+
+ server.await??;
+ Ok(())
+}
+
+#[tokio::test]
+async fn test_named_pipe_multi_client_ready() -> io::Result<()> {
+ use tokio::io::Interest;
+
+ const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-multi-client-ready";
+ const N: usize = 10;
+
+ // The first server needs to be constructed early so that clients can
+ // be correctly connected. Otherwise calling .wait will cause the client to
+ // error.
+ let mut server = ServerOptions::new().create(PIPE_NAME)?;
+
+ let server = tokio::spawn(async move {
+ for _ in 0..N {
+ // Wait for client to connect.
+ server.connect().await?;
+
+ let inner_server = server;
+
+ // Construct the next server to be connected before sending the one
+ // we already have of onto a task. This ensures that the server
+ // isn't closed (after it's done in the task) before a new one is
+ // available. Otherwise the client might error with
+ // `io::ErrorKind::NotFound`.
+ server = ServerOptions::new().create(PIPE_NAME)?;
+
+ let _ = tokio::spawn(async move {
+ let server = inner_server;
+
+ {
+ let mut read_buf = [0u8; 5];
+ let mut read_buf_cursor = 0;
+
+ loop {
+ server.readable().await?;
+
+ let buf = &mut read_buf[read_buf_cursor..];
+
+ match server.try_read(buf) {
+ Ok(n) => {
+ read_buf_cursor += n;
+
+ if read_buf_cursor == read_buf.len() {
+ break;
+ }
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ continue;
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ }
+ }
+ };
+
+ {
+ let write_buf = b"pong\n";
+ let mut write_buf_cursor = 0;
+
+ loop {
+ server.writable().await?;
+ let buf = &write_buf[write_buf_cursor..];
+
+ match server.try_write(buf) {
+ Ok(n) => {
+ write_buf_cursor += n;
+
+ if write_buf_cursor == write_buf.len() {
+ break;
+ }
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ continue;
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ }
+ }
+ }
+
+ Ok::<_, io::Error>(())
+ });
+ }
+
+ Ok::<_, io::Error>(())
+ });
+
+ let mut clients = Vec::new();
+
+ for _ in 0..N {
+ clients.push(tokio::spawn(async move {
+ // This showcases a generic connect loop.
+ //
+ // We immediately try to create a client, if it's not found or the
+ // pipe is busy we use the specialized wait function on the client
+ // builder.
+ let client = loop {
+ match ClientOptions::new().open(PIPE_NAME) {
+ Ok(client) => break client,
+ Err(e) if e.raw_os_error() == Some(winerror::ERROR_PIPE_BUSY as i32) => (),
+ Err(e) if e.kind() == io::ErrorKind::NotFound => (),
+ Err(e) => return Err(e),
+ }
+
+ // Wait for a named pipe to become available.
+ time::sleep(Duration::from_millis(10)).await;
+ };
+
+ let mut read_buf = [0u8; 5];
+ let mut read_buf_cursor = 0;
+ let write_buf = b"ping\n";
+ let mut write_buf_cursor = 0;
+
+ loop {
+ let mut interest = Interest::READABLE;
+ if write_buf_cursor < write_buf.len() {
+ interest |= Interest::WRITABLE;
+ }
+
+ let ready = client.ready(interest).await?;
+
+ if ready.is_readable() {
+ let buf = &mut read_buf[read_buf_cursor..];
+
+ match client.try_read(buf) {
+ Ok(n) => {
+ read_buf_cursor += n;
+
+ if read_buf_cursor == read_buf.len() {
+ break;
+ }
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ continue;
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ }
+ }
+
+ if ready.is_writable() {
+ let buf = &write_buf[write_buf_cursor..];
+
+ if buf.is_empty() {
+ continue;
+ }
+
+ match client.try_write(buf) {
+ Ok(n) => {
+ write_buf_cursor += n;
+ }
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ continue;
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ }
+ }
+ }
+
+ let buf = String::from_utf8_lossy(&read_buf).into_owned();
+
+ Ok::<_, io::Error>(buf)
+ }));
+ }
+
+ for client in clients {
+ let result = client.await?;
+ assert_eq!(result?, "pong\n");
+ }
+
+ server.await??;
+ Ok(())
+}
+
+// This tests what happens when a client tries to disconnect.
+#[tokio::test]
+async fn test_named_pipe_mode_message() -> io::Result<()> {
+ const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-mode-message";
+
+ let server = ServerOptions::new()
+ .pipe_mode(PipeMode::Message)
+ .create(PIPE_NAME)?;
+
+ let _ = ClientOptions::new().open(PIPE_NAME)?;
+ server.connect().await?;
+ Ok(())
+}
+
+fn num_instances(pipe_name: impl AsRef<str>) -> io::Result<u32> {
+ use ntapi::ntioapi;
+ use winapi::shared::ntdef;
+
+ let mut name = pipe_name.as_ref().encode_utf16().collect::<Vec<_>>();
+ let mut name = ntdef::UNICODE_STRING {
+ Length: (name.len() * mem::size_of::<u16>()) as u16,
+ MaximumLength: (name.len() * mem::size_of::<u16>()) as u16,
+ Buffer: name.as_mut_ptr(),
+ };
+ let root = std::fs::File::open(r"\\.\Pipe\")?;
+ let mut io_status_block = unsafe { mem::zeroed() };
+ let mut file_directory_information = [0_u8; 1024];
+
+ let status = unsafe {
+ ntioapi::NtQueryDirectoryFile(
+ root.as_raw_handle(),
+ std::ptr::null_mut(),
+ None,
+ std::ptr::null_mut(),
+ &mut io_status_block,
+ &mut file_directory_information as *mut _ as *mut _,
+ 1024,
+ ntioapi::FileDirectoryInformation,
+ 0,
+ &mut name,
+ 0,
+ )
+ };
+
+ if status as u32 != winerror::NO_ERROR {
+ return Err(io::Error::last_os_error());
+ }
+
+ let info = unsafe {
+ mem::transmute::<_, &ntioapi::FILE_DIRECTORY_INFORMATION>(&file_directory_information)
+ };
+ let raw_name = unsafe {
+ std::slice::from_raw_parts(
+ info.FileName.as_ptr(),
+ info.FileNameLength as usize / mem::size_of::<u16>(),
+ )
+ };
+ let name = String::from_utf16(raw_name).unwrap();
+ let num_instances = unsafe { *info.EndOfFile.QuadPart() };
+
+ assert_eq!(name, pipe_name.as_ref());
+
+ Ok(num_instances as u32)
+}
diff --git a/third_party/rust/tokio/tests/net_bind_resource.rs b/third_party/rust/tokio/tests/net_bind_resource.rs
new file mode 100644
index 0000000000..d4a0b8dab0
--- /dev/null
+++ b/third_party/rust/tokio/tests/net_bind_resource.rs
@@ -0,0 +1,14 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+
+use std::convert::TryFrom;
+use std::net;
+
+#[test]
+#[should_panic]
+fn no_runtime_panics_binding_net_tcp_listener() {
+ let listener = net::TcpListener::bind("127.0.0.1:0").expect("failed to bind listener");
+ let _ = TcpListener::try_from(listener);
+}
diff --git a/third_party/rust/tokio/tests/net_lookup_host.rs b/third_party/rust/tokio/tests/net_lookup_host.rs
new file mode 100644
index 0000000000..44c8e19e0d
--- /dev/null
+++ b/third_party/rust/tokio/tests/net_lookup_host.rs
@@ -0,0 +1,38 @@
+#![cfg(feature = "full")]
+
+use tokio::net;
+use tokio_test::assert_ok;
+
+use std::io;
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+#[tokio::test]
+async fn lookup_socket_addr() {
+ let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
+
+ let actual = assert_ok!(net::lookup_host(addr).await).collect::<Vec<_>>();
+ assert_eq!(vec![addr], actual);
+}
+
+#[tokio::test]
+async fn lookup_str_socket_addr() {
+ let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
+
+ let actual = assert_ok!(net::lookup_host("127.0.0.1:8000").await).collect::<Vec<_>>();
+ assert_eq!(vec![addr], actual);
+}
+
+#[tokio::test]
+async fn resolve_dns() -> io::Result<()> {
+ let mut hosts = net::lookup_host("localhost:3000").await?;
+ let host = hosts.next().unwrap();
+
+ let expected = if host.is_ipv4() {
+ SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000)
+ } else {
+ SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 3000)
+ };
+ assert_eq!(host, expected);
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/no_rt.rs b/third_party/rust/tokio/tests/no_rt.rs
new file mode 100644
index 0000000000..64e56f4d43
--- /dev/null
+++ b/third_party/rust/tokio/tests/no_rt.rs
@@ -0,0 +1,41 @@
+#![cfg(feature = "full")]
+
+use tokio::net::TcpStream;
+use tokio::sync::oneshot;
+use tokio::time::{timeout, Duration};
+
+use futures::executor::block_on;
+
+use std::net::TcpListener;
+
+#[test]
+#[should_panic(
+ expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
+)]
+fn timeout_panics_when_no_tokio_context() {
+ block_on(timeout_value());
+}
+
+#[test]
+#[should_panic(
+ expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
+)]
+fn panics_when_no_reactor() {
+ let srv = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = srv.local_addr().unwrap();
+ block_on(TcpStream::connect(&addr)).unwrap();
+}
+
+async fn timeout_value() {
+ let (_tx, rx) = oneshot::channel::<()>();
+ let dur = Duration::from_millis(10);
+ let _ = timeout(dur, rx).await;
+}
+
+#[test]
+#[should_panic(
+ expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
+)]
+fn io_panics_when_no_tokio_context() {
+ let _ = tokio::net::TcpListener::from_std(std::net::TcpListener::bind("127.0.0.1:0").unwrap());
+}
diff --git a/third_party/rust/tokio/tests/process_arg0.rs b/third_party/rust/tokio/tests/process_arg0.rs
new file mode 100644
index 0000000000..4fabea0fe1
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_arg0.rs
@@ -0,0 +1,13 @@
+#![warn(rust_2018_idioms)]
+#![cfg(all(feature = "full", unix))]
+
+use tokio::process::Command;
+
+#[tokio::test]
+async fn arg0() {
+ let mut cmd = Command::new("sh");
+ cmd.arg0("test_string").arg("-c").arg("echo $0");
+
+ let output = cmd.output().await.unwrap();
+ assert_eq!(output.stdout, b"test_string\n");
+}
diff --git a/third_party/rust/tokio/tests/process_issue_2174.rs b/third_party/rust/tokio/tests/process_issue_2174.rs
new file mode 100644
index 0000000000..5ee9dc0a4b
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_issue_2174.rs
@@ -0,0 +1,45 @@
+#![cfg(feature = "process")]
+#![warn(rust_2018_idioms)]
+// This test reveals a difference in behavior of kqueue on FreeBSD. When the
+// reader disconnects, there does not seem to be an `EVFILT_WRITE` filter that
+// is returned.
+//
+// It is expected that `EVFILT_WRITE` would be returned with either the
+// `EV_EOF` or `EV_ERROR` flag set. If either flag is set a write would be
+// attempted, but that does not seem to occur.
+#![cfg(all(unix, not(target_os = "freebsd")))]
+
+use std::process::Stdio;
+use std::time::Duration;
+use tokio::io::AsyncWriteExt;
+use tokio::process::Command;
+use tokio::time;
+use tokio_test::assert_err;
+
+#[tokio::test]
+async fn issue_2174() {
+ let mut child = Command::new("sleep")
+ .arg("2")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::null())
+ .spawn()
+ .unwrap();
+ let mut input = child.stdin.take().unwrap();
+
+ // Writes will buffer up to 65_636. This *should* loop at least 8 times
+ // and then register interest.
+ let handle = tokio::spawn(async move {
+ let data = [0u8; 8192];
+ loop {
+ input.write_all(&data).await.unwrap();
+ }
+ });
+
+ // Sleep enough time so that the child process's stdin's buffer fills.
+ time::sleep(Duration::from_secs(1)).await;
+
+ // Kill the child process.
+ child.kill().await.unwrap();
+
+ assert_err!(handle.await);
+}
diff --git a/third_party/rust/tokio/tests/process_issue_42.rs b/third_party/rust/tokio/tests/process_issue_42.rs
new file mode 100644
index 0000000000..569c122e36
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_issue_42.rs
@@ -0,0 +1,38 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use futures::future::join_all;
+use std::process::Stdio;
+use tokio::process::Command;
+use tokio::task;
+
+#[tokio::test]
+async fn issue_42() {
+ // We spawn a many batches of processes which should exit at roughly the
+ // same time (modulo OS scheduling delays), to make sure that consuming
+ // a readiness event for one process doesn't inadvertently starve another.
+ // We then do this many times (in parallel) in an effort to stress test the
+ // implementation to ensure there are no race conditions.
+ // See alexcrichton/tokio-process#42 for background
+ let join_handles = (0..10usize).map(|_| {
+ task::spawn(async {
+ let processes = (0..10usize).map(|i| {
+ let mut child = Command::new("echo")
+ .arg(format!("I am spawned process #{}", i))
+ .stdin(Stdio::null())
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .kill_on_drop(true)
+ .spawn()
+ .unwrap();
+
+ async move { child.wait().await }
+ });
+
+ join_all(processes).await;
+ })
+ });
+
+ join_all(join_handles).await;
+}
diff --git a/third_party/rust/tokio/tests/process_kill_on_drop.rs b/third_party/rust/tokio/tests/process_kill_on_drop.rs
new file mode 100644
index 0000000000..658e4addd6
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_kill_on_drop.rs
@@ -0,0 +1,44 @@
+#![cfg(all(unix, feature = "process"))]
+#![warn(rust_2018_idioms)]
+
+use std::io::ErrorKind;
+use std::process::Stdio;
+use std::time::Duration;
+use tokio::io::AsyncReadExt;
+use tokio::process::Command;
+use tokio::time::sleep;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn kill_on_drop() {
+ let mut cmd = Command::new("bash");
+ cmd.args(&[
+ "-c",
+ "
+ # Fork another child that won't get killed
+ sh -c 'sleep 1; echo child ran' &
+ disown -a
+
+ # Await our death
+ sleep 5
+ echo hello from beyond the grave
+ ",
+ ]);
+
+ let e = cmd.kill_on_drop(true).stdout(Stdio::piped()).spawn();
+ if e.is_err() && e.as_ref().unwrap_err().kind() == ErrorKind::NotFound {
+ println!("bash not available; skipping test");
+ return;
+ }
+ let mut child = e.unwrap();
+
+ sleep(Duration::from_secs(2)).await;
+
+ let mut out = child.stdout.take().unwrap();
+ drop(child);
+
+ let mut msg = String::new();
+ assert_ok!(out.read_to_string(&mut msg).await);
+
+ assert_eq!("child ran\n", msg);
+}
diff --git a/third_party/rust/tokio/tests/process_raw_handle.rs b/third_party/rust/tokio/tests/process_raw_handle.rs
new file mode 100644
index 0000000000..727e66d65e
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_raw_handle.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(windows)]
+
+use tokio::process::Command;
+use winapi::um::processthreadsapi::GetProcessId;
+
+#[tokio::test]
+async fn obtain_raw_handle() {
+ let mut cmd = Command::new("cmd");
+ cmd.kill_on_drop(true);
+ cmd.arg("/c");
+ cmd.arg("pause");
+
+ let child = cmd.spawn().unwrap();
+
+ let orig_id = child.id().expect("missing id");
+ assert!(orig_id > 0);
+
+ let handle = child.raw_handle().expect("process stopped");
+ let handled_id = unsafe { GetProcessId(handle as _) };
+ assert_eq!(handled_id, orig_id);
+}
diff --git a/third_party/rust/tokio/tests/process_smoke.rs b/third_party/rust/tokio/tests/process_smoke.rs
new file mode 100644
index 0000000000..fae5793fab
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_smoke.rs
@@ -0,0 +1,34 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::process::Command;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn simple() {
+ let mut cmd;
+
+ if cfg!(windows) {
+ cmd = Command::new("cmd");
+ cmd.arg("/c");
+ } else {
+ cmd = Command::new("sh");
+ cmd.arg("-c");
+ }
+
+ let mut child = cmd.arg("exit 2").spawn().unwrap();
+
+ let id = child.id().expect("missing id");
+ assert!(id > 0);
+
+ let status = assert_ok!(child.wait().await);
+ assert_eq!(status.code(), Some(2));
+
+ // test that the `.wait()` method is fused just like the stdlib
+ let status = assert_ok!(child.wait().await);
+ assert_eq!(status.code(), Some(2));
+
+ // Can't get id after process has exited
+ assert_eq!(child.id(), None);
+ drop(child.kill());
+}
diff --git a/third_party/rust/tokio/tests/rt_basic.rs b/third_party/rust/tokio/tests/rt_basic.rs
new file mode 100644
index 0000000000..cc6ac67728
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_basic.rs
@@ -0,0 +1,296 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::runtime::Runtime;
+use tokio::sync::oneshot;
+use tokio::time::{timeout, Duration};
+use tokio_test::{assert_err, assert_ok};
+
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::task::{Context, Poll};
+use std::thread;
+
+mod support {
+ pub(crate) mod mpsc_stream;
+}
+
+macro_rules! cfg_metrics {
+ ($($t:tt)*) => {
+ #[cfg(tokio_unstable)]
+ {
+ $( $t )*
+ }
+ }
+}
+
+#[test]
+fn spawned_task_does_not_progress_without_block_on() {
+ let (tx, mut rx) = oneshot::channel();
+
+ let rt = rt();
+
+ rt.spawn(async move {
+ assert_ok!(tx.send("hello"));
+ });
+
+ thread::sleep(Duration::from_millis(50));
+
+ assert_err!(rx.try_recv());
+
+ let out = rt.block_on(async { assert_ok!(rx.await) });
+
+ assert_eq!(out, "hello");
+}
+
+#[test]
+fn no_extra_poll() {
+ use pin_project_lite::pin_project;
+ use std::pin::Pin;
+ use std::sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Arc,
+ };
+ use std::task::{Context, Poll};
+ use tokio_stream::{Stream, StreamExt};
+
+ pin_project! {
+ struct TrackPolls<S> {
+ npolls: Arc<AtomicUsize>,
+ #[pin]
+ s: S,
+ }
+ }
+
+ impl<S> Stream for TrackPolls<S>
+ where
+ S: Stream,
+ {
+ type Item = S::Item;
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+ this.npolls.fetch_add(1, SeqCst);
+ this.s.poll_next(cx)
+ }
+ }
+
+ let (tx, rx) = support::mpsc_stream::unbounded_channel_stream::<()>();
+ let rx = TrackPolls {
+ npolls: Arc::new(AtomicUsize::new(0)),
+ s: rx,
+ };
+ let npolls = Arc::clone(&rx.npolls);
+
+ let rt = rt();
+
+ // TODO: could probably avoid this, but why not.
+ let mut rx = Box::pin(rx);
+
+ rt.spawn(async move { while rx.next().await.is_some() {} });
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled exactly once: the initial poll
+ assert_eq!(npolls.load(SeqCst), 1);
+
+ tx.send(()).unwrap();
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled twice more: once to yield Some(), then once to yield Pending
+ assert_eq!(npolls.load(SeqCst), 1 + 2);
+
+ drop(tx);
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled once more: to yield None
+ assert_eq!(npolls.load(SeqCst), 1 + 2 + 1);
+}
+
+#[test]
+fn acquire_mutex_in_drop() {
+ use futures::future::pending;
+ use tokio::task;
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ let rt = rt();
+
+ rt.spawn(async move {
+ let _ = rx2.await;
+ unreachable!();
+ });
+
+ rt.spawn(async move {
+ let _ = rx1.await;
+ tx2.send(()).unwrap();
+ unreachable!();
+ });
+
+ // Spawn a task that will never notify
+ rt.spawn(async move {
+ pending::<()>().await;
+ tx1.send(()).unwrap();
+ });
+
+ // Tick the loop
+ rt.block_on(async {
+ task::yield_now().await;
+ });
+
+ // Drop the rt
+ drop(rt);
+}
+
+#[test]
+fn drop_tasks_in_context() {
+ static SUCCESS: AtomicBool = AtomicBool::new(false);
+
+ struct ContextOnDrop;
+
+ impl Future for ContextOnDrop {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+ }
+
+ impl Drop for ContextOnDrop {
+ fn drop(&mut self) {
+ if tokio::runtime::Handle::try_current().is_ok() {
+ SUCCESS.store(true, Ordering::SeqCst);
+ }
+ }
+ }
+
+ let rt = rt();
+ rt.spawn(ContextOnDrop);
+ drop(rt);
+
+ assert!(SUCCESS.load(Ordering::SeqCst));
+}
+
+#[test]
+#[should_panic(expected = "boom")]
+fn wake_in_drop_after_panic() {
+ let (tx, rx) = oneshot::channel::<()>();
+
+ struct WakeOnDrop(Option<oneshot::Sender<()>>);
+
+ impl Drop for WakeOnDrop {
+ fn drop(&mut self) {
+ self.0.take().unwrap().send(()).unwrap();
+ }
+ }
+
+ let rt = rt();
+
+ rt.spawn(async move {
+ let _wake_on_drop = WakeOnDrop(Some(tx));
+ // wait forever
+ futures::future::pending::<()>().await;
+ });
+
+ let _join = rt.spawn(async move { rx.await });
+
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ panic!("boom");
+ });
+}
+
+#[test]
+fn spawn_two() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ });
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+
+ cfg_metrics! {
+ let metrics = rt.metrics();
+ drop(rt);
+ assert_eq!(0, metrics.remote_schedule_count());
+
+ let mut local = 0;
+ for i in 0..metrics.num_workers() {
+ local += metrics.worker_local_schedule_count(i);
+ }
+
+ assert_eq!(2, local);
+ }
+}
+
+#[test]
+fn spawn_remote() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ let handle = tokio::spawn(async move {
+ std::thread::spawn(move || {
+ std::thread::sleep(Duration::from_millis(10));
+ tx.send("ZOMG").unwrap();
+ });
+
+ rx.await.unwrap()
+ });
+
+ handle.await.unwrap()
+ });
+
+ assert_eq!(out, "ZOMG");
+
+ cfg_metrics! {
+ let metrics = rt.metrics();
+ drop(rt);
+ assert_eq!(1, metrics.remote_schedule_count());
+
+ let mut local = 0;
+ for i in 0..metrics.num_workers() {
+ local += metrics.worker_local_schedule_count(i);
+ }
+
+ assert_eq!(1, local);
+ }
+}
+
+#[test]
+#[should_panic(
+ expected = "A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers."
+)]
+fn timeout_panics_when_no_time_handle() {
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ rt.block_on(async {
+ let (_tx, rx) = oneshot::channel::<()>();
+ let dur = Duration::from_millis(20);
+ let _ = timeout(dur, rx).await;
+ });
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/rt_common.rs b/third_party/rust/tokio/tests/rt_common.rs
new file mode 100644
index 0000000000..cb1d0f6615
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_common.rs
@@ -0,0 +1,1109 @@
+#![allow(clippy::needless_range_loop)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// Tests to run on both current-thread & thread-pool runtime variants.
+
+macro_rules! rt_test {
+ ($($t:tt)*) => {
+ mod current_thread_scheduler {
+ $($t)*
+
+ fn rt() -> Arc<Runtime> {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+ .into()
+ }
+ }
+
+ mod threaded_scheduler_4_threads {
+ $($t)*
+
+ fn rt() -> Arc<Runtime> {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ .into()
+ }
+ }
+
+ mod threaded_scheduler_1_thread {
+ $($t)*
+
+ fn rt() -> Arc<Runtime> {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ .into()
+ }
+ }
+ }
+}
+
+#[test]
+fn send_sync_bound() {
+ use tokio::runtime::Runtime;
+ fn is_send<T: Send + Sync>() {}
+
+ is_send::<Runtime>();
+}
+
+rt_test! {
+ use tokio::net::{TcpListener, TcpStream, UdpSocket};
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+ use tokio::runtime::Runtime;
+ use tokio::sync::oneshot;
+ use tokio::{task, time};
+ use tokio_test::{assert_err, assert_ok};
+
+ use futures::future::poll_fn;
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::sync::{mpsc, Arc};
+ use std::task::{Context, Poll};
+ use std::thread;
+ use std::time::{Duration, Instant};
+
+ #[test]
+ fn block_on_sync() {
+ let rt = rt();
+
+ let mut win = false;
+ rt.block_on(async {
+ win = true;
+ });
+
+ assert!(win);
+ }
+
+
+ #[test]
+ fn block_on_async() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ tx.send("ZOMG").unwrap();
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_one_bg() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_one_join() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ let handle = tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ "DONE"
+ });
+
+ let msg = assert_ok!(rx.await);
+
+ let out = assert_ok!(handle.await);
+ assert_eq!(out, "DONE");
+
+ msg
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_two() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ tokio::spawn(async move {
+ assert_ok!(tx1.send("ZOMG"));
+ });
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx1.await);
+ assert_ok!(tx2.send(msg));
+ });
+
+ assert_ok!(rx2.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_many_from_block_on() {
+ use tokio::sync::mpsc;
+
+ const ITER: usize = 200;
+
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (done_tx, mut done_rx) = mpsc::unbounded_channel();
+
+ let mut txs = (0..ITER)
+ .map(|i| {
+ let (tx, rx) = oneshot::channel();
+ let done_tx = done_tx.clone();
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx.await);
+ assert_eq!(i, msg);
+ assert_ok!(done_tx.send(msg));
+ });
+
+ tx
+ })
+ .collect::<Vec<_>>();
+
+ drop(done_tx);
+
+ thread::spawn(move || {
+ for (i, tx) in txs.drain(..).enumerate() {
+ assert_ok!(tx.send(i));
+ }
+ });
+
+ let mut out = vec![];
+ while let Some(i) = done_rx.recv().await {
+ out.push(i);
+ }
+
+ out.sort_unstable();
+ out
+ });
+
+ assert_eq!(ITER, out.len());
+
+ for i in 0..ITER {
+ assert_eq!(i, out[i]);
+ }
+ }
+
+ #[test]
+ fn spawn_many_from_task() {
+ use tokio::sync::mpsc;
+
+ const ITER: usize = 500;
+
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ tokio::spawn(async move {
+ let (done_tx, mut done_rx) = mpsc::unbounded_channel();
+
+ /*
+ for _ in 0..100 {
+ tokio::spawn(async move { });
+ }
+
+ tokio::task::yield_now().await;
+ */
+
+ let mut txs = (0..ITER)
+ .map(|i| {
+ let (tx, rx) = oneshot::channel();
+ let done_tx = done_tx.clone();
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx.await);
+ assert_eq!(i, msg);
+ assert_ok!(done_tx.send(msg));
+ });
+
+ tx
+ })
+ .collect::<Vec<_>>();
+
+ drop(done_tx);
+
+ thread::spawn(move || {
+ for (i, tx) in txs.drain(..).enumerate() {
+ assert_ok!(tx.send(i));
+ }
+ });
+
+ let mut out = vec![];
+ while let Some(i) = done_rx.recv().await {
+ out.push(i);
+ }
+
+ out.sort_unstable();
+ out
+ }).await.unwrap()
+ });
+
+ assert_eq!(ITER, out.len());
+
+ for i in 0..ITER {
+ assert_eq!(i, out[i]);
+ }
+ }
+
+ #[test]
+ fn spawn_await_chain() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ assert_ok!(tokio::spawn(async {
+ assert_ok!(tokio::spawn(async {
+ "hello"
+ }).await)
+ }).await)
+ });
+
+ assert_eq!(out, "hello");
+ }
+
+ #[test]
+ fn outstanding_tasks_dropped() {
+ let rt = rt();
+
+ let cnt = Arc::new(());
+
+ rt.block_on(async {
+ let cnt = cnt.clone();
+
+ tokio::spawn(poll_fn(move |_| {
+ assert_eq!(2, Arc::strong_count(&cnt));
+ Poll::<()>::Pending
+ }));
+ });
+
+ assert_eq!(2, Arc::strong_count(&cnt));
+
+ drop(rt);
+
+ assert_eq!(1, Arc::strong_count(&cnt));
+ }
+
+ #[test]
+ #[should_panic]
+ fn nested_rt() {
+ let rt1 = rt();
+ let rt2 = rt();
+
+ rt1.block_on(async { rt2.block_on(async { "hello" }) });
+ }
+
+ #[test]
+ fn create_rt_in_block_on() {
+ let rt1 = rt();
+ let rt2 = rt1.block_on(async { rt() });
+ let out = rt2.block_on(async { "ZOMG" });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn complete_block_on_under_load() {
+ let rt = rt();
+
+ rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn complete_task_under_load() {
+ let rt = rt();
+
+ rt.block_on(async {
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ assert_ok!(tx1.send(()));
+ });
+
+ tokio::spawn(async move {
+ assert_ok!(rx1.await);
+ assert_ok!(tx2.send(()));
+ });
+
+ assert_ok!(rx2.await);
+ });
+ }
+
+ #[test]
+ fn spawn_from_other_thread_idle() {
+ let rt = rt();
+ let handle = rt.clone();
+
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+
+ handle.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+ });
+
+ rt.block_on(async move {
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn spawn_from_other_thread_under_load() {
+ let rt = rt();
+ let handle = rt.clone();
+
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ handle.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+ });
+
+ rt.block_on(async move {
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn sleep_at_root() {
+ let rt = rt();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(50);
+
+ rt.block_on(async move {
+ time::sleep(dur).await;
+ });
+
+ assert!(now.elapsed() >= dur);
+ }
+
+ #[test]
+ fn sleep_in_spawn() {
+ let rt = rt();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(50);
+
+ rt.block_on(async move {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ time::sleep(dur).await;
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rx.await);
+ });
+
+ assert!(now.elapsed() >= dur);
+ }
+
+ #[test]
+ fn block_on_socket() {
+ let rt = rt();
+
+ rt.block_on(async move {
+ let (tx, rx) = oneshot::channel();
+
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ tokio::spawn(async move {
+ let _ = listener.accept().await;
+ tx.send(()).unwrap();
+ });
+
+ TcpStream::connect(&addr).await.unwrap();
+ rx.await.unwrap();
+ });
+ }
+
+ #[test]
+ fn spawn_from_blocking() {
+ let rt = rt();
+
+ let out = rt.block_on(async move {
+ let inner = assert_ok!(tokio::task::spawn_blocking(|| {
+ tokio::spawn(async move { "hello" })
+ }).await);
+
+ assert_ok!(inner.await)
+ });
+
+ assert_eq!(out, "hello")
+ }
+
+ #[test]
+ fn spawn_blocking_from_blocking() {
+ let rt = rt();
+
+ let out = rt.block_on(async move {
+ let inner = assert_ok!(tokio::task::spawn_blocking(|| {
+ tokio::task::spawn_blocking(|| "hello")
+ }).await);
+
+ assert_ok!(inner.await)
+ });
+
+ assert_eq!(out, "hello")
+ }
+
+ #[test]
+ fn sleep_from_blocking() {
+ let rt = rt();
+
+ rt.block_on(async move {
+ assert_ok!(tokio::task::spawn_blocking(|| {
+ let now = std::time::Instant::now();
+ let dur = Duration::from_millis(1);
+
+ // use the futures' block_on fn to make sure we aren't setting
+ // any Tokio context
+ futures::executor::block_on(async {
+ tokio::time::sleep(dur).await;
+ });
+
+ assert!(now.elapsed() >= dur);
+ }).await);
+ });
+ }
+
+ #[test]
+ fn socket_from_blocking() {
+ let rt = rt();
+
+ rt.block_on(async move {
+ let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(listener.local_addr());
+
+ let peer = tokio::task::spawn_blocking(move || {
+ // use the futures' block_on fn to make sure we aren't setting
+ // any Tokio context
+ futures::executor::block_on(async {
+ assert_ok!(TcpStream::connect(addr).await);
+ });
+ });
+
+ // Wait for the client to connect
+ let _ = assert_ok!(listener.accept().await);
+
+ assert_ok!(peer.await);
+ });
+ }
+
+ #[test]
+ fn always_active_parker() {
+ // This test it to show that we will always have
+ // an active parker even if we call block_on concurrently
+
+ let rt = rt();
+ let rt2 = rt.clone();
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ let jh1 = thread::spawn(move || {
+ rt.block_on(async move {
+ rx2.await.unwrap();
+ time::sleep(Duration::from_millis(5)).await;
+ tx1.send(()).unwrap();
+ });
+ });
+
+ let jh2 = thread::spawn(move || {
+ rt2.block_on(async move {
+ tx2.send(()).unwrap();
+ time::sleep(Duration::from_millis(5)).await;
+ rx1.await.unwrap();
+ time::sleep(Duration::from_millis(5)).await;
+ });
+ });
+
+ jh1.join().unwrap();
+ jh2.join().unwrap();
+ }
+
+ #[test]
+ // IOCP requires setting the "max thread" concurrency value. The sane,
+ // default, is to set this to the number of cores. Threads that poll I/O
+ // become associated with the IOCP handle. Once those threads sleep for any
+ // reason (mutex), they yield their ownership.
+ //
+ // This test hits an edge case on windows where more threads than cores are
+ // created, none of those threads ever yield due to being at capacity, so
+ // IOCP gets "starved".
+ //
+ // For now, this is a very edge case that is probably not a real production
+ // concern. There also isn't a great/obvious solution to take. For now, the
+ // test is disabled.
+ #[cfg(not(windows))]
+ fn io_driver_called_when_under_load() {
+ let rt = rt();
+
+ // Create a lot of constant load. The scheduler will always be busy.
+ for _ in 0..100 {
+ rt.spawn(async {
+ loop {
+ tokio::task::yield_now().await;
+ }
+ });
+ }
+
+ // Do some I/O work
+ rt.block_on(async {
+ let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(listener.local_addr());
+
+ let srv = tokio::spawn(async move {
+ let (mut stream, _) = assert_ok!(listener.accept().await);
+ assert_ok!(stream.write_all(b"hello world").await);
+ });
+
+ let cli = tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(addr).await);
+ let mut dst = vec![0; 11];
+
+ assert_ok!(stream.read_exact(&mut dst).await);
+ assert_eq!(dst, b"hello world");
+ });
+
+ assert_ok!(srv.await);
+ assert_ok!(cli.await);
+ });
+ }
+
+ #[test]
+ fn client_server_block_on() {
+ let rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async move { client_server(tx).await });
+
+ assert_ok!(rx.try_recv());
+ assert_err!(rx.try_recv());
+ }
+
+ #[test]
+ fn panic_in_task() {
+ let rt = rt();
+ let (tx, rx) = oneshot::channel();
+
+ struct Boom(Option<oneshot::Sender<()>>);
+
+ impl Future for Boom {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ panic!();
+ }
+ }
+
+ impl Drop for Boom {
+ fn drop(&mut self) {
+ assert!(std::thread::panicking());
+ self.0.take().unwrap().send(()).unwrap();
+ }
+ }
+
+ rt.spawn(Boom(Some(tx)));
+ assert_ok!(rt.block_on(rx));
+ }
+
+ #[test]
+ #[should_panic]
+ fn panic_in_block_on() {
+ let rt = rt();
+ rt.block_on(async { panic!() });
+ }
+
+ async fn yield_once() {
+ let mut yielded = false;
+ poll_fn(|cx| {
+ if yielded {
+ Poll::Ready(())
+ } else {
+ yielded = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ })
+ .await
+ }
+
+ #[test]
+ fn enter_and_spawn() {
+ let rt = rt();
+ let handle = {
+ let _enter = rt.enter();
+ tokio::spawn(async {})
+ };
+
+ assert_ok!(rt.block_on(handle));
+ }
+
+ #[test]
+ fn eagerly_drops_futures_on_shutdown() {
+ use std::sync::mpsc;
+
+ struct Never {
+ drop_tx: mpsc::Sender<()>,
+ }
+
+ impl Future for Never {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+ }
+
+ impl Drop for Never {
+ fn drop(&mut self) {
+ self.drop_tx.send(()).unwrap();
+ }
+ }
+
+ let rt = rt();
+
+ let (drop_tx, drop_rx) = mpsc::channel();
+ let (run_tx, run_rx) = oneshot::channel();
+
+ rt.block_on(async move {
+ tokio::spawn(async move {
+ assert_ok!(run_tx.send(()));
+
+ Never { drop_tx }.await
+ });
+
+ assert_ok!(run_rx.await);
+ });
+
+ drop(rt);
+
+ assert_ok!(drop_rx.recv());
+ }
+
+ #[test]
+ fn wake_while_rt_is_dropping() {
+ use tokio::task;
+
+ struct OnDrop<F: FnMut()>(F);
+
+ impl<F: FnMut()> Drop for OnDrop<F> {
+ fn drop(&mut self) {
+ (self.0)()
+ }
+ }
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let (tx3, rx3) = oneshot::channel();
+
+ let rt = rt();
+
+ let h1 = rt.clone();
+
+ rt.spawn(async move {
+ // Ensure a waker gets stored in oneshot 1.
+ let _ = rx1.await;
+ tx3.send(()).unwrap();
+ });
+
+ rt.spawn(async move {
+ // When this task is dropped, we'll be "closing remotes".
+ // We spawn a new task that owns the `tx1`, to move its Drop
+ // out of here.
+ //
+ // Importantly, the oneshot 1 has a waker already stored, so
+ // the eventual drop here will try to re-schedule again.
+ let mut opt_tx1 = Some(tx1);
+ let _d = OnDrop(move || {
+ let tx1 = opt_tx1.take().unwrap();
+ h1.spawn(async move {
+ tx1.send(()).unwrap();
+ });
+ });
+ let _ = rx2.await;
+ });
+
+ rt.spawn(async move {
+ let _ = rx3.await;
+ // We'll never get here, but once task 3 drops, this will
+ // force task 2 to re-schedule since it's waiting on oneshot 2.
+ tx2.send(()).unwrap();
+ });
+
+ // Tick the loop
+ rt.block_on(async {
+ task::yield_now().await;
+ });
+
+ // Drop the rt
+ drop(rt);
+ }
+
+ #[test]
+ fn io_notify_while_shutting_down() {
+ use std::net::Ipv6Addr;
+ use std::sync::Arc;
+
+ for _ in 1..10 {
+ let runtime = rt();
+
+ runtime.block_on(async {
+ let socket = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)).await.unwrap();
+ let addr = socket.local_addr().unwrap();
+ let send_half = Arc::new(socket);
+ let recv_half = send_half.clone();
+
+ tokio::spawn(async move {
+ let mut buf = [0];
+ loop {
+ recv_half.recv_from(&mut buf).await.unwrap();
+ std::thread::sleep(Duration::from_millis(2));
+ }
+ });
+
+ tokio::spawn(async move {
+ let buf = [0];
+ loop {
+ send_half.send_to(&buf, &addr).await.unwrap();
+ tokio::time::sleep(Duration::from_millis(1)).await;
+ }
+ });
+
+ tokio::time::sleep(Duration::from_millis(5)).await;
+ });
+ }
+ }
+
+ #[test]
+ fn shutdown_timeout() {
+ let (tx, rx) = oneshot::channel();
+ let runtime = rt();
+
+ runtime.block_on(async move {
+ task::spawn_blocking(move || {
+ tx.send(()).unwrap();
+ thread::sleep(Duration::from_secs(10_000));
+ });
+
+ rx.await.unwrap();
+ });
+
+ Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_millis(100));
+ }
+
+ #[test]
+ fn shutdown_timeout_0() {
+ let runtime = rt();
+
+ runtime.block_on(async move {
+ task::spawn_blocking(move || {
+ thread::sleep(Duration::from_secs(10_000));
+ });
+ });
+
+ let now = Instant::now();
+ Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_nanos(0));
+ assert!(now.elapsed().as_secs() < 1);
+ }
+
+ #[test]
+ fn shutdown_wakeup_time() {
+ let runtime = rt();
+
+ runtime.block_on(async move {
+ tokio::time::sleep(std::time::Duration::from_millis(100)).await;
+ });
+
+ Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_secs(10_000));
+ }
+
+ // This test is currently ignored on Windows because of a
+ // rust-lang issue in thread local storage destructors.
+ // See https://github.com/rust-lang/rust/issues/74875
+ #[test]
+ #[cfg(not(windows))]
+ fn runtime_in_thread_local() {
+ use std::cell::RefCell;
+ use std::thread;
+
+ thread_local!(
+ static R: RefCell<Option<Runtime>> = RefCell::new(None);
+ );
+
+ thread::spawn(|| {
+ R.with(|cell| {
+ let rt = rt();
+ let rt = Arc::try_unwrap(rt).unwrap();
+ *cell.borrow_mut() = Some(rt);
+ });
+
+ let _rt = rt();
+ }).join().unwrap();
+ }
+
+ async fn client_server(tx: mpsc::Sender<()>) {
+ let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ tokio::spawn(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+ }
+
+ #[test]
+ fn local_set_block_on_socket() {
+ let rt = rt();
+ let local = task::LocalSet::new();
+
+ local.block_on(&rt, async move {
+ let (tx, rx) = oneshot::channel();
+
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ task::spawn_local(async move {
+ let _ = listener.accept().await;
+ tx.send(()).unwrap();
+ });
+
+ TcpStream::connect(&addr).await.unwrap();
+ rx.await.unwrap();
+ });
+ }
+
+ #[test]
+ fn local_set_client_server_block_on() {
+ let rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ let local = task::LocalSet::new();
+
+ local.block_on(&rt, async move { client_server_local(tx).await });
+
+ assert_ok!(rx.try_recv());
+ assert_err!(rx.try_recv());
+ }
+
+ async fn client_server_local(tx: mpsc::Sender<()>) {
+ let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ task::spawn_local(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+ }
+
+ #[test]
+ fn coop() {
+ use std::task::Poll::Ready;
+
+ let rt = rt();
+
+ rt.block_on(async {
+ // Create a bunch of tasks
+ let mut tasks = (0..1_000).map(|_| {
+ tokio::spawn(async { })
+ }).collect::<Vec<_>>();
+
+ // Hope that all the tasks complete...
+ time::sleep(Duration::from_millis(100)).await;
+
+ poll_fn(|cx| {
+ // At least one task should not be ready
+ for task in &mut tasks {
+ if Pin::new(task).poll(cx).is_pending() {
+ return Ready(());
+ }
+ }
+
+ panic!("did not yield");
+ }).await;
+ });
+ }
+
+ #[test]
+ fn coop_unconstrained() {
+ use std::task::Poll::Ready;
+
+ let rt = rt();
+
+ rt.block_on(async {
+ // Create a bunch of tasks
+ let mut tasks = (0..1_000).map(|_| {
+ tokio::spawn(async { })
+ }).collect::<Vec<_>>();
+
+ // Hope that all the tasks complete...
+ time::sleep(Duration::from_millis(100)).await;
+
+ tokio::task::unconstrained(poll_fn(|cx| {
+ // All the tasks should be ready
+ for task in &mut tasks {
+ assert!(Pin::new(task).poll(cx).is_ready());
+ }
+
+ Ready(())
+ })).await;
+ });
+ }
+
+ // Tests that the "next task" scheduler optimization is not able to starve
+ // other tasks.
+ #[test]
+ fn ping_pong_saturation() {
+ use std::sync::atomic::{Ordering, AtomicBool};
+ use tokio::sync::mpsc;
+
+ const NUM: usize = 100;
+
+ let rt = rt();
+
+ let running = Arc::new(AtomicBool::new(true));
+
+ rt.block_on(async {
+ let (spawned_tx, mut spawned_rx) = mpsc::unbounded_channel();
+
+ let mut tasks = vec![];
+ // Spawn a bunch of tasks that ping ping between each other to
+ // saturate the runtime.
+ for _ in 0..NUM {
+ let (tx1, mut rx1) = mpsc::unbounded_channel();
+ let (tx2, mut rx2) = mpsc::unbounded_channel();
+ let spawned_tx = spawned_tx.clone();
+ let running = running.clone();
+ tasks.push(task::spawn(async move {
+ spawned_tx.send(()).unwrap();
+
+
+ while running.load(Ordering::Relaxed) {
+ tx1.send(()).unwrap();
+ rx2.recv().await.unwrap();
+ }
+
+ // Close the channel and wait for the other task to exit.
+ drop(tx1);
+ assert!(rx2.recv().await.is_none());
+ }));
+
+ tasks.push(task::spawn(async move {
+ while rx1.recv().await.is_some() {
+ tx2.send(()).unwrap();
+ }
+ }));
+ }
+
+ for _ in 0..NUM {
+ spawned_rx.recv().await.unwrap();
+ }
+
+ // spawn another task and wait for it to complete
+ let handle = task::spawn(async {
+ for _ in 0..5 {
+ // Yielding forces it back into the local queue.
+ task::yield_now().await;
+ }
+ });
+ handle.await.unwrap();
+ running.store(false, Ordering::Relaxed);
+ for t in tasks {
+ t.await.unwrap();
+ }
+ });
+ }
+}
diff --git a/third_party/rust/tokio/tests/rt_handle_block_on.rs b/third_party/rust/tokio/tests/rt_handle_block_on.rs
new file mode 100644
index 0000000000..5c1d533a01
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_handle_block_on.rs
@@ -0,0 +1,533 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// All io tests that deal with shutdown is currently ignored because there are known bugs in with
+// shutting down the io driver while concurrently registering new resources. See
+// https://github.com/tokio-rs/tokio/pull/3569#pullrequestreview-612703467 fo more details.
+//
+// When this has been fixed we want to re-enable these tests.
+
+use std::time::Duration;
+use tokio::runtime::{Handle, Runtime};
+use tokio::sync::mpsc;
+use tokio::task::spawn_blocking;
+use tokio::{fs, net, time};
+
+macro_rules! multi_threaded_rt_test {
+ ($($t:tt)*) => {
+ mod threaded_scheduler_4_threads_only {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_1_thread_only {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+ }
+}
+
+macro_rules! rt_test {
+ ($($t:tt)*) => {
+ mod current_thread_scheduler {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_4_threads {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_1_thread {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+ }
+}
+
+// ==== runtime independent futures ======
+
+#[test]
+fn basic() {
+ test_with_runtimes(|| {
+ let one = Handle::current().block_on(async { 1 });
+ assert_eq!(1, one);
+ });
+}
+
+#[test]
+fn bounded_mpsc_channel() {
+ test_with_runtimes(|| {
+ let (tx, mut rx) = mpsc::channel(1024);
+
+ Handle::current().block_on(tx.send(42)).unwrap();
+
+ let value = Handle::current().block_on(rx.recv()).unwrap();
+ assert_eq!(value, 42);
+ });
+}
+
+#[test]
+fn unbounded_mpsc_channel() {
+ test_with_runtimes(|| {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ let _ = tx.send(42);
+
+ let value = Handle::current().block_on(rx.recv()).unwrap();
+ assert_eq!(value, 42);
+ })
+}
+
+rt_test! {
+ // ==== spawn blocking futures ======
+
+ #[test]
+ fn basic_fs() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let contents = Handle::current()
+ .block_on(fs::read_to_string("Cargo.toml"))
+ .unwrap();
+ assert!(contents.contains("https://tokio.rs"));
+ }
+
+ #[test]
+ fn fs_shutdown_before_started() {
+ let rt = rt();
+ let _enter = rt.enter();
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err: std::io::Error = Handle::current()
+ .block_on(fs::read_to_string("Cargo.toml"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+
+ let inner_err = err.get_ref().expect("no inner error");
+ assert_eq!(inner_err.to_string(), "background task failed");
+ }
+
+ #[test]
+ fn basic_spawn_blocking() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let answer = Handle::current()
+ .block_on(spawn_blocking(|| {
+ std::thread::sleep(Duration::from_millis(100));
+ 42
+ }))
+ .unwrap();
+
+ assert_eq!(answer, 42);
+ }
+
+ #[test]
+ fn spawn_blocking_after_shutdown_fails() {
+ let rt = rt();
+ let _enter = rt.enter();
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let join_err = Handle::current()
+ .block_on(spawn_blocking(|| {
+ std::thread::sleep(Duration::from_millis(100));
+ 42
+ }))
+ .unwrap_err();
+
+ assert!(join_err.is_cancelled());
+ }
+
+ #[test]
+ fn spawn_blocking_started_before_shutdown_continues() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let handle = spawn_blocking(|| {
+ std::thread::sleep(Duration::from_secs(1));
+ 42
+ });
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let answer = Handle::current().block_on(handle).unwrap();
+
+ assert_eq!(answer, 42);
+ }
+
+ // ==== net ======
+
+ #[test]
+ fn tcp_listener_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current()
+ .block_on(net::TcpListener::bind("127.0.0.1:0"))
+ .unwrap();
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn tcp_listener_connect_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current()
+ .block_on(net::TcpListener::bind("127.0.0.1:0"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn tcp_listener_connect_before_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let bind_future = net::TcpListener::bind("127.0.0.1:0");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current().block_on(bind_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ #[test]
+ fn udp_socket_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current()
+ .block_on(net::UdpSocket::bind("127.0.0.1:0"))
+ .unwrap();
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn udp_stream_bind_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current()
+ .block_on(net::UdpSocket::bind("127.0.0.1:0"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn udp_stream_bind_before_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let bind_future = net::UdpSocket::bind("127.0.0.1:0");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current().block_on(bind_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_bind_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = net::UnixListener::bind(path).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_shutdown_after_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ // this should not timeout but fail immediately since the runtime has been shutdown
+ let err = Handle::current().block_on(listener.accept()).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_shutdown_after_accept() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ let accept_future = listener.accept();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ // this should not timeout but fail immediately since the runtime has been shutdown
+ let err = Handle::current().block_on(accept_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
+ }
+
+ // ==== nesting ======
+
+ #[test]
+ #[should_panic(
+ expected = "Cannot start a runtime from within a runtime. This happens because a function (like `block_on`) attempted to block the current thread while the thread is being used to drive asynchronous tasks."
+ )]
+ fn nesting() {
+ fn some_non_async_function() -> i32 {
+ Handle::current().block_on(time::sleep(Duration::from_millis(10)));
+ 1
+ }
+
+ let rt = rt();
+
+ rt.block_on(async { some_non_async_function() });
+ }
+
+ #[test]
+ fn spawn_after_runtime_dropped() {
+ use futures::future::FutureExt;
+
+ let rt = rt();
+
+ let handle = rt.block_on(async move {
+ Handle::current()
+ });
+
+ let jh1 = handle.spawn(futures::future::pending::<()>());
+
+ drop(rt);
+
+ let jh2 = handle.spawn(futures::future::pending::<()>());
+
+ let err1 = jh1.now_or_never().unwrap().unwrap_err();
+ let err2 = jh2.now_or_never().unwrap().unwrap_err();
+ assert!(err1.is_cancelled());
+ assert!(err2.is_cancelled());
+ }
+}
+
+multi_threaded_rt_test! {
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ // this should timeout and not fail immediately since the runtime has not been shutdown
+ let _: tokio::time::error::Elapsed = Handle::current()
+ .block_on(tokio::time::timeout(
+ Duration::from_millis(10),
+ listener.accept(),
+ ))
+ .unwrap_err();
+ }
+
+ // ==== timers ======
+
+ // `Handle::block_on` doesn't work with timer futures on a current thread runtime as there is no
+ // one to drive the timers so they will just hang forever. Therefore they are not tested.
+
+ #[test]
+ fn sleep() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current().block_on(time::sleep(Duration::from_millis(100)));
+ }
+
+ #[test]
+ #[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
+ fn sleep_before_shutdown_panics() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let f = time::sleep(Duration::from_millis(100));
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ Handle::current().block_on(f);
+ }
+
+ #[test]
+ #[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
+ fn sleep_after_shutdown_panics() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ Handle::current().block_on(time::sleep(Duration::from_millis(100)));
+ }
+}
+
+// ==== utils ======
+
+/// Create a new multi threaded runtime
+fn new_multi_thread(n: usize) -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(n)
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+/// Create a new single threaded runtime
+fn new_current_thread() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+/// Utility to test things on both kinds of runtimes both before and after shutting it down.
+fn test_with_runtimes<F>(f: F)
+where
+ F: Fn(),
+{
+ {
+ println!("current thread runtime");
+
+ let rt = new_current_thread();
+ let _enter = rt.enter();
+ f();
+
+ println!("current thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+
+ {
+ println!("multi thread (1 thread) runtime");
+
+ let rt = new_multi_thread(1);
+ let _enter = rt.enter();
+ f();
+
+ println!("multi thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+
+ {
+ println!("multi thread (4 threads) runtime");
+
+ let rt = new_multi_thread(4);
+ let _enter = rt.enter();
+ f();
+
+ println!("multi thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+}
diff --git a/third_party/rust/tokio/tests/rt_metrics.rs b/third_party/rust/tokio/tests/rt_metrics.rs
new file mode 100644
index 0000000000..0a26b80285
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_metrics.rs
@@ -0,0 +1,385 @@
+#![warn(rust_2018_idioms)]
+#![cfg(all(feature = "full", tokio_unstable))]
+
+use tokio::runtime::Runtime;
+use tokio::time::{self, Duration};
+
+#[test]
+fn num_workers() {
+ let rt = basic();
+ assert_eq!(1, rt.metrics().num_workers());
+
+ let rt = threaded();
+ assert_eq!(2, rt.metrics().num_workers());
+}
+
+#[test]
+fn remote_schedule_count() {
+ use std::thread;
+
+ let rt = basic();
+ let handle = rt.handle().clone();
+ let task = thread::spawn(move || {
+ handle.spawn(async {
+ // DO nothing
+ })
+ })
+ .join()
+ .unwrap();
+
+ rt.block_on(task).unwrap();
+
+ assert_eq!(1, rt.metrics().remote_schedule_count());
+
+ let rt = threaded();
+ let handle = rt.handle().clone();
+ let task = thread::spawn(move || {
+ handle.spawn(async {
+ // DO nothing
+ })
+ })
+ .join()
+ .unwrap();
+
+ rt.block_on(task).unwrap();
+
+ assert_eq!(1, rt.metrics().remote_schedule_count());
+}
+
+#[test]
+fn worker_park_count() {
+ let rt = basic();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ time::sleep(Duration::from_millis(1)).await;
+ });
+ drop(rt);
+ assert!(2 <= metrics.worker_park_count(0));
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ time::sleep(Duration::from_millis(1)).await;
+ });
+ drop(rt);
+ assert!(1 <= metrics.worker_park_count(0));
+ assert!(1 <= metrics.worker_park_count(1));
+}
+
+#[test]
+fn worker_noop_count() {
+ // There isn't really a great way to generate no-op parks as they happen as
+ // false-positive events under concurrency.
+
+ let rt = basic();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ time::sleep(Duration::from_millis(1)).await;
+ });
+ drop(rt);
+ assert!(2 <= metrics.worker_noop_count(0));
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ time::sleep(Duration::from_millis(1)).await;
+ });
+ drop(rt);
+ assert!(1 <= metrics.worker_noop_count(0));
+ assert!(1 <= metrics.worker_noop_count(1));
+}
+
+#[test]
+fn worker_steal_count() {
+ // This metric only applies to the multi-threaded runtime.
+ //
+ // We use a blocking channel to backup one worker thread.
+ use std::sync::mpsc::channel;
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+
+ rt.block_on(async {
+ let (tx, rx) = channel();
+
+ // Move to the runtime.
+ tokio::spawn(async move {
+ // Spawn the task that sends to the channel
+ tokio::spawn(async move {
+ tx.send(()).unwrap();
+ });
+
+ // Spawn a task that bumps the previous task out of the "next
+ // scheduled" slot.
+ tokio::spawn(async {});
+
+ // Blocking receive on the channe.
+ rx.recv().unwrap();
+ })
+ .await
+ .unwrap();
+ });
+
+ drop(rt);
+
+ let n: u64 = (0..metrics.num_workers())
+ .map(|i| metrics.worker_steal_count(i))
+ .sum();
+
+ assert_eq!(1, n);
+}
+
+#[test]
+fn worker_poll_count() {
+ const N: u64 = 5;
+
+ let rt = basic();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ for _ in 0..N {
+ tokio::spawn(async {}).await.unwrap();
+ }
+ });
+ drop(rt);
+ assert_eq!(N, metrics.worker_poll_count(0));
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ for _ in 0..N {
+ tokio::spawn(async {}).await.unwrap();
+ }
+ });
+ drop(rt);
+ // Account for the `block_on` task
+ let n = (0..metrics.num_workers())
+ .map(|i| metrics.worker_poll_count(i))
+ .sum();
+
+ assert_eq!(N, n);
+}
+
+#[test]
+fn worker_total_busy_duration() {
+ const N: usize = 5;
+
+ let zero = Duration::from_millis(0);
+
+ let rt = basic();
+ let metrics = rt.metrics();
+
+ rt.block_on(async {
+ for _ in 0..N {
+ tokio::spawn(async {
+ tokio::task::yield_now().await;
+ })
+ .await
+ .unwrap();
+ }
+ });
+
+ drop(rt);
+
+ assert!(zero < metrics.worker_total_busy_duration(0));
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+
+ rt.block_on(async {
+ for _ in 0..N {
+ tokio::spawn(async {
+ tokio::task::yield_now().await;
+ })
+ .await
+ .unwrap();
+ }
+ });
+
+ drop(rt);
+
+ for i in 0..metrics.num_workers() {
+ assert!(zero < metrics.worker_total_busy_duration(i));
+ }
+}
+
+#[test]
+fn worker_local_schedule_count() {
+ let rt = basic();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ tokio::spawn(async {}).await.unwrap();
+ });
+ drop(rt);
+
+ assert_eq!(1, metrics.worker_local_schedule_count(0));
+ assert_eq!(0, metrics.remote_schedule_count());
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ // Move to the runtime
+ tokio::spawn(async {
+ tokio::spawn(async {}).await.unwrap();
+ })
+ .await
+ .unwrap();
+ });
+ drop(rt);
+
+ let n: u64 = (0..metrics.num_workers())
+ .map(|i| metrics.worker_local_schedule_count(i))
+ .sum();
+
+ assert_eq!(2, n);
+ assert_eq!(1, metrics.remote_schedule_count());
+}
+
+#[test]
+fn worker_overflow_count() {
+ // Only applies to the threaded worker
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ // Move to the runtime
+ tokio::spawn(async {
+ let (tx1, rx1) = std::sync::mpsc::channel();
+ let (tx2, rx2) = std::sync::mpsc::channel();
+
+ // First, we need to block the other worker until all tasks have
+ // been spawned.
+ tokio::spawn(async move {
+ tx1.send(()).unwrap();
+ rx2.recv().unwrap();
+ });
+
+ // Bump the next-run spawn
+ tokio::spawn(async {});
+
+ rx1.recv().unwrap();
+
+ // Spawn many tasks
+ for _ in 0..300 {
+ tokio::spawn(async {});
+ }
+
+ tx2.send(()).unwrap();
+ })
+ .await
+ .unwrap();
+ });
+ drop(rt);
+
+ let n: u64 = (0..metrics.num_workers())
+ .map(|i| metrics.worker_overflow_count(i))
+ .sum();
+
+ assert_eq!(1, n);
+}
+
+#[test]
+fn injection_queue_depth() {
+ use std::thread;
+
+ let rt = basic();
+ let handle = rt.handle().clone();
+ let metrics = rt.metrics();
+
+ thread::spawn(move || {
+ handle.spawn(async {});
+ })
+ .join()
+ .unwrap();
+
+ assert_eq!(1, metrics.injection_queue_depth());
+
+ let rt = threaded();
+ let handle = rt.handle().clone();
+ let metrics = rt.metrics();
+
+ // First we need to block the runtime workers
+ let (tx1, rx1) = std::sync::mpsc::channel();
+ let (tx2, rx2) = std::sync::mpsc::channel();
+
+ rt.spawn(async move { rx1.recv().unwrap() });
+ rt.spawn(async move { rx2.recv().unwrap() });
+
+ thread::spawn(move || {
+ handle.spawn(async {});
+ })
+ .join()
+ .unwrap();
+
+ let n = metrics.injection_queue_depth();
+ assert!(1 <= n, "{}", n);
+ assert!(3 >= n, "{}", n);
+
+ tx1.send(()).unwrap();
+ tx2.send(()).unwrap();
+}
+
+#[test]
+fn worker_local_queue_depth() {
+ const N: usize = 100;
+
+ let rt = basic();
+ let metrics = rt.metrics();
+ rt.block_on(async {
+ for _ in 0..N {
+ tokio::spawn(async {});
+ }
+
+ assert_eq!(N, metrics.worker_local_queue_depth(0));
+ });
+
+ let rt = threaded();
+ let metrics = rt.metrics();
+ rt.block_on(async move {
+ // Move to the runtime
+ tokio::spawn(async move {
+ let (tx1, rx1) = std::sync::mpsc::channel();
+ let (tx2, rx2) = std::sync::mpsc::channel();
+
+ // First, we need to block the other worker until all tasks have
+ // been spawned.
+ tokio::spawn(async move {
+ tx1.send(()).unwrap();
+ rx2.recv().unwrap();
+ });
+
+ // Bump the next-run spawn
+ tokio::spawn(async {});
+
+ rx1.recv().unwrap();
+
+ // Spawn some tasks
+ for _ in 0..100 {
+ tokio::spawn(async {});
+ }
+
+ let n: usize = (0..metrics.num_workers())
+ .map(|i| metrics.worker_local_queue_depth(i))
+ .sum();
+
+ assert_eq!(n, N);
+
+ tx2.send(()).unwrap();
+ })
+ .await
+ .unwrap();
+ });
+}
+
+fn basic() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+fn threaded() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(2)
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/rt_threaded.rs b/third_party/rust/tokio/tests/rt_threaded.rs
new file mode 100644
index 0000000000..b2f84fd33f
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_threaded.rs
@@ -0,0 +1,544 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::runtime::{self, Runtime};
+use tokio::sync::oneshot;
+use tokio_test::{assert_err, assert_ok};
+
+use futures::future::poll_fn;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::Relaxed;
+use std::sync::{mpsc, Arc, Mutex};
+use std::task::{Context, Poll, Waker};
+
+macro_rules! cfg_metrics {
+ ($($t:tt)*) => {
+ #[cfg(tokio_unstable)]
+ {
+ $( $t )*
+ }
+ }
+}
+
+#[test]
+fn single_thread() {
+ // No panic when starting a runtime w/ a single thread
+ let _ = runtime::Builder::new_multi_thread()
+ .enable_all()
+ .worker_threads(1)
+ .build();
+}
+
+#[test]
+fn many_oneshot_futures() {
+ // used for notifying the main thread
+ const NUM: usize = 1_000;
+
+ for _ in 0..5 {
+ let (tx, rx) = mpsc::channel();
+
+ let rt = rt();
+ let cnt = Arc::new(AtomicUsize::new(0));
+
+ for _ in 0..NUM {
+ let cnt = cnt.clone();
+ let tx = tx.clone();
+
+ rt.spawn(async move {
+ let num = cnt.fetch_add(1, Relaxed) + 1;
+
+ if num == NUM {
+ tx.send(()).unwrap();
+ }
+ });
+ }
+
+ rx.recv().unwrap();
+
+ // Wait for the pool to shutdown
+ drop(rt);
+ }
+}
+
+#[test]
+fn spawn_two() {
+ let rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ });
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+
+ cfg_metrics! {
+ let metrics = rt.metrics();
+ drop(rt);
+ assert_eq!(1, metrics.remote_schedule_count());
+
+ let mut local = 0;
+ for i in 0..metrics.num_workers() {
+ local += metrics.worker_local_schedule_count(i);
+ }
+
+ assert_eq!(1, local);
+ }
+}
+
+#[test]
+fn many_multishot_futures() {
+ const CHAIN: usize = 200;
+ const CYCLES: usize = 5;
+ const TRACKS: usize = 50;
+
+ for _ in 0..50 {
+ let rt = rt();
+ let mut start_txs = Vec::with_capacity(TRACKS);
+ let mut final_rxs = Vec::with_capacity(TRACKS);
+
+ for _ in 0..TRACKS {
+ let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10);
+
+ for _ in 0..CHAIN {
+ let (next_tx, next_rx) = tokio::sync::mpsc::channel(10);
+
+ // Forward all the messages
+ rt.spawn(async move {
+ while let Some(v) = chain_rx.recv().await {
+ next_tx.send(v).await.unwrap();
+ }
+ });
+
+ chain_rx = next_rx;
+ }
+
+ // This final task cycles if needed
+ let (final_tx, final_rx) = tokio::sync::mpsc::channel(10);
+ let cycle_tx = start_tx.clone();
+ let mut rem = CYCLES;
+
+ rt.spawn(async move {
+ for _ in 0..CYCLES {
+ let msg = chain_rx.recv().await.unwrap();
+
+ rem -= 1;
+
+ if rem == 0 {
+ final_tx.send(msg).await.unwrap();
+ } else {
+ cycle_tx.send(msg).await.unwrap();
+ }
+ }
+ });
+
+ start_txs.push(start_tx);
+ final_rxs.push(final_rx);
+ }
+
+ {
+ rt.block_on(async move {
+ for start_tx in start_txs {
+ start_tx.send("ping").await.unwrap();
+ }
+
+ for mut final_rx in final_rxs {
+ final_rx.recv().await.unwrap();
+ }
+ });
+ }
+ }
+}
+
+#[test]
+fn spawn_shutdown() {
+ let rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async {
+ tokio::spawn(client_server(tx.clone()));
+ });
+
+ // Use spawner
+ rt.spawn(client_server(tx));
+
+ assert_ok!(rx.recv());
+ assert_ok!(rx.recv());
+
+ drop(rt);
+ assert_err!(rx.try_recv());
+}
+
+async fn client_server(tx: mpsc::Sender<()>) {
+ let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ tokio::spawn(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+}
+
+#[test]
+fn drop_threadpool_drops_futures() {
+ for _ in 0..1_000 {
+ let num_inc = Arc::new(AtomicUsize::new(0));
+ let num_dec = Arc::new(AtomicUsize::new(0));
+ let num_drop = Arc::new(AtomicUsize::new(0));
+
+ struct Never(Arc<AtomicUsize>);
+
+ impl Future for Never {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+ }
+
+ impl Drop for Never {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Relaxed);
+ }
+ }
+
+ let a = num_inc.clone();
+ let b = num_dec.clone();
+
+ let rt = runtime::Builder::new_multi_thread()
+ .enable_all()
+ .on_thread_start(move || {
+ a.fetch_add(1, Relaxed);
+ })
+ .on_thread_stop(move || {
+ b.fetch_add(1, Relaxed);
+ })
+ .build()
+ .unwrap();
+
+ rt.spawn(Never(num_drop.clone()));
+
+ // Wait for the pool to shutdown
+ drop(rt);
+
+ // Assert that only a single thread was spawned.
+ let a = num_inc.load(Relaxed);
+ assert!(a >= 1);
+
+ // Assert that all threads shutdown
+ let b = num_dec.load(Relaxed);
+ assert_eq!(a, b);
+
+ // Assert that the future was dropped
+ let c = num_drop.load(Relaxed);
+ assert_eq!(c, 1);
+ }
+}
+
+#[test]
+fn start_stop_callbacks_called() {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ let after_start = Arc::new(AtomicUsize::new(0));
+ let before_stop = Arc::new(AtomicUsize::new(0));
+
+ let after_inner = after_start.clone();
+ let before_inner = before_stop.clone();
+ let rt = tokio::runtime::Builder::new_multi_thread()
+ .enable_all()
+ .on_thread_start(move || {
+ after_inner.clone().fetch_add(1, Ordering::Relaxed);
+ })
+ .on_thread_stop(move || {
+ before_inner.clone().fetch_add(1, Ordering::Relaxed);
+ })
+ .build()
+ .unwrap();
+
+ let (tx, rx) = oneshot::channel();
+
+ rt.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rt.block_on(rx));
+
+ drop(rt);
+
+ assert!(after_start.load(Ordering::Relaxed) > 0);
+ assert!(before_stop.load(Ordering::Relaxed) > 0);
+}
+
+#[test]
+fn blocking() {
+ // used for notifying the main thread
+ const NUM: usize = 1_000;
+
+ for _ in 0..10 {
+ let (tx, rx) = mpsc::channel();
+
+ let rt = rt();
+ let cnt = Arc::new(AtomicUsize::new(0));
+
+ // there are four workers in the pool
+ // so, if we run 4 blocking tasks, we know that handoff must have happened
+ let block = Arc::new(std::sync::Barrier::new(5));
+ for _ in 0..4 {
+ let block = block.clone();
+ rt.spawn(async move {
+ tokio::task::block_in_place(move || {
+ block.wait();
+ block.wait();
+ })
+ });
+ }
+ block.wait();
+
+ for _ in 0..NUM {
+ let cnt = cnt.clone();
+ let tx = tx.clone();
+
+ rt.spawn(async move {
+ let num = cnt.fetch_add(1, Relaxed) + 1;
+
+ if num == NUM {
+ tx.send(()).unwrap();
+ }
+ });
+ }
+
+ rx.recv().unwrap();
+
+ // Wait for the pool to shutdown
+ block.wait();
+ }
+}
+
+#[test]
+fn multi_threadpool() {
+ use tokio::sync::oneshot;
+
+ let rt1 = rt();
+ let rt2 = rt();
+
+ let (tx, rx) = oneshot::channel();
+ let (done_tx, done_rx) = mpsc::channel();
+
+ rt2.spawn(async move {
+ rx.await.unwrap();
+ done_tx.send(()).unwrap();
+ });
+
+ rt1.spawn(async move {
+ tx.send(()).unwrap();
+ });
+
+ done_rx.recv().unwrap();
+}
+
+// When `block_in_place` returns, it attempts to reclaim the yielded runtime
+// worker. In this case, the remainder of the task is on the runtime worker and
+// must take part in the cooperative task budgeting system.
+//
+// The test ensures that, when this happens, attempting to consume from a
+// channel yields occasionally even if there are values ready to receive.
+#[test]
+fn coop_and_block_in_place() {
+ let rt = tokio::runtime::Builder::new_multi_thread()
+ // Setting max threads to 1 prevents another thread from claiming the
+ // runtime worker yielded as part of `block_in_place` and guarantees the
+ // same thread will reclaim the worker at the end of the
+ // `block_in_place` call.
+ .max_blocking_threads(1)
+ .build()
+ .unwrap();
+
+ rt.block_on(async move {
+ let (tx, mut rx) = tokio::sync::mpsc::channel(1024);
+
+ // Fill the channel
+ for _ in 0..1024 {
+ tx.send(()).await.unwrap();
+ }
+
+ drop(tx);
+
+ tokio::spawn(async move {
+ // Block in place without doing anything
+ tokio::task::block_in_place(|| {});
+
+ // Receive all the values, this should trigger a `Pending` as the
+ // coop limit will be reached.
+ poll_fn(|cx| {
+ while let Poll::Ready(v) = {
+ tokio::pin! {
+ let fut = rx.recv();
+ }
+
+ Pin::new(&mut fut).poll(cx)
+ } {
+ if v.is_none() {
+ panic!("did not yield");
+ }
+ }
+
+ Poll::Ready(())
+ })
+ .await
+ })
+ .await
+ .unwrap();
+ });
+}
+
+// Testing this does not panic
+#[test]
+fn max_blocking_threads() {
+ let _rt = tokio::runtime::Builder::new_multi_thread()
+ .max_blocking_threads(1)
+ .build()
+ .unwrap();
+}
+
+#[test]
+#[should_panic]
+fn max_blocking_threads_set_to_zero() {
+ let _rt = tokio::runtime::Builder::new_multi_thread()
+ .max_blocking_threads(0)
+ .build()
+ .unwrap();
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+async fn hang_on_shutdown() {
+ let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>();
+ tokio::spawn(async move {
+ tokio::task::block_in_place(|| sync_rx.recv().ok());
+ });
+
+ tokio::spawn(async {
+ tokio::time::sleep(std::time::Duration::from_secs(2)).await;
+ drop(sync_tx);
+ });
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
+}
+
+/// Demonstrates tokio-rs/tokio#3869
+#[test]
+fn wake_during_shutdown() {
+ struct Shared {
+ waker: Option<Waker>,
+ }
+
+ struct MyFuture {
+ shared: Arc<Mutex<Shared>>,
+ put_waker: bool,
+ }
+
+ impl MyFuture {
+ fn new() -> (Self, Self) {
+ let shared = Arc::new(Mutex::new(Shared { waker: None }));
+ let f1 = MyFuture {
+ shared: shared.clone(),
+ put_waker: true,
+ };
+ let f2 = MyFuture {
+ shared,
+ put_waker: false,
+ };
+ (f1, f2)
+ }
+ }
+
+ impl Future for MyFuture {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let me = Pin::into_inner(self);
+ let mut lock = me.shared.lock().unwrap();
+ println!("poll {}", me.put_waker);
+ if me.put_waker {
+ println!("putting");
+ lock.waker = Some(cx.waker().clone());
+ }
+ Poll::Pending
+ }
+ }
+
+ impl Drop for MyFuture {
+ fn drop(&mut self) {
+ println!("drop {} start", self.put_waker);
+ let mut lock = self.shared.lock().unwrap();
+ if !self.put_waker {
+ lock.waker.take().unwrap().wake();
+ }
+ drop(lock);
+ println!("drop {} stop", self.put_waker);
+ }
+ }
+
+ let rt = tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap();
+
+ let (f1, f2) = MyFuture::new();
+
+ rt.spawn(f1);
+ rt.spawn(f2);
+
+ rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await });
+}
+
+#[should_panic]
+#[tokio::test]
+async fn test_block_in_place1() {
+ tokio::task::block_in_place(|| {});
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn test_block_in_place2() {
+ tokio::task::block_in_place(|| {});
+}
+
+#[should_panic]
+#[tokio::main(flavor = "current_thread")]
+#[test]
+async fn test_block_in_place3() {
+ tokio::task::block_in_place(|| {});
+}
+
+#[tokio::main]
+#[test]
+async fn test_block_in_place4() {
+ tokio::task::block_in_place(|| {});
+}
+
+fn rt() -> Runtime {
+ Runtime::new().unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_ctrl_c.rs b/third_party/rust/tokio/tests/signal_ctrl_c.rs
new file mode 100644
index 0000000000..4b057ee7e1
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_ctrl_c.rs
@@ -0,0 +1,30 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal;
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn ctrl_c() {
+ let ctrl_c = signal::ctrl_c();
+
+ let (fire, wait) = oneshot::channel();
+
+ // NB: simulate a signal coming in by exercising our signal handler
+ // to avoid complications with sending SIGINT to the test process
+ tokio::spawn(async {
+ wait.await.expect("wait failed");
+ send_signal(libc::SIGINT);
+ });
+
+ let _ = fire.send(());
+
+ assert_ok!(ctrl_c.await);
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_recv.rs b/third_party/rust/tokio/tests/signal_drop_recv.rs
new file mode 100644
index 0000000000..b0d9213e61
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_recv.rs
@@ -0,0 +1,22 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn drop_then_get_a_signal() {
+ let kind = SignalKind::user_defined1();
+ let sig = signal(kind).expect("failed to create first signal");
+ drop(sig);
+
+ send_signal(libc::SIGUSR1);
+ let mut sig = signal(kind).expect("failed to create second signal");
+
+ let _ = sig.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_rt.rs b/third_party/rust/tokio/tests/signal_drop_rt.rs
new file mode 100644
index 0000000000..b931d7a903
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_rt.rs
@@ -0,0 +1,44 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::runtime::Runtime;
+use tokio::signal::unix::{signal, SignalKind};
+
+#[test]
+fn dropping_loops_does_not_cause_starvation() {
+ let kind = SignalKind::user_defined1();
+
+ let first_rt = rt();
+ let mut first_signal =
+ first_rt.block_on(async { signal(kind).expect("failed to register first signal") });
+
+ let second_rt = rt();
+ let mut second_signal =
+ second_rt.block_on(async { signal(kind).expect("failed to register second signal") });
+
+ send_signal(libc::SIGUSR1);
+
+ first_rt
+ .block_on(first_signal.recv())
+ .expect("failed to await first signal");
+
+ drop(first_rt);
+ drop(first_signal);
+
+ send_signal(libc::SIGUSR1);
+
+ second_rt.block_on(second_signal.recv());
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_signal.rs b/third_party/rust/tokio/tests/signal_drop_signal.rs
new file mode 100644
index 0000000000..92ac4050d5
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_signal.rs
@@ -0,0 +1,26 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn dropping_signal_does_not_deregister_any_other_instances() {
+ let kind = SignalKind::user_defined1();
+
+ // Signals should not starve based on ordering
+ let first_duplicate_signal = signal(kind).expect("failed to register first duplicate signal");
+ let mut sig = signal(kind).expect("failed to register signal");
+ let second_duplicate_signal = signal(kind).expect("failed to register second duplicate signal");
+
+ drop(first_duplicate_signal);
+ drop(second_duplicate_signal);
+
+ send_signal(libc::SIGUSR1);
+ let _ = sig.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_multi_rt.rs b/third_party/rust/tokio/tests/signal_multi_rt.rs
new file mode 100644
index 0000000000..1e0402c479
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_multi_rt.rs
@@ -0,0 +1,54 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::runtime::Runtime;
+use tokio::signal::unix::{signal, SignalKind};
+
+use std::sync::mpsc::channel;
+use std::thread;
+
+#[test]
+fn multi_loop() {
+ // An "ordinary" (non-future) channel
+ let (sender, receiver) = channel();
+ // Run multiple times, to make sure there are no race conditions
+ for _ in 0..10 {
+ // Run multiple event loops, each one in its own thread
+ let threads: Vec<_> = (0..4)
+ .map(|_| {
+ let sender = sender.clone();
+ thread::spawn(move || {
+ let rt = rt();
+ let _ = rt.block_on(async {
+ let mut signal = signal(SignalKind::hangup()).unwrap();
+ sender.send(()).unwrap();
+ signal.recv().await
+ });
+ })
+ })
+ .collect();
+ // Wait for them to declare they're ready
+ for &_ in threads.iter() {
+ receiver.recv().unwrap();
+ }
+ // Send a signal
+ send_signal(libc::SIGHUP);
+ // Make sure the threads terminated correctly
+ for t in threads {
+ t.join().unwrap();
+ }
+ }
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_no_rt.rs b/third_party/rust/tokio/tests/signal_no_rt.rs
new file mode 100644
index 0000000000..b0f32b2d10
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_no_rt.rs
@@ -0,0 +1,11 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[test]
+#[should_panic]
+fn no_runtime_panics_creating_signals() {
+ let _ = signal(SignalKind::hangup());
+}
diff --git a/third_party/rust/tokio/tests/signal_notify_both.rs b/third_party/rust/tokio/tests/signal_notify_both.rs
new file mode 100644
index 0000000000..3481f808b3
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_notify_both.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn notify_both() {
+ let kind = SignalKind::user_defined2();
+
+ let mut signal1 = signal(kind).expect("failed to create signal1");
+ let mut signal2 = signal(kind).expect("failed to create signal2");
+
+ send_signal(libc::SIGUSR2);
+
+ signal1.recv().await;
+ signal2.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_twice.rs b/third_party/rust/tokio/tests/signal_twice.rs
new file mode 100644
index 0000000000..8f33d22a82
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_twice.rs
@@ -0,0 +1,22 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn twice() {
+ let kind = SignalKind::user_defined1();
+ let mut sig = signal(kind).expect("failed to get signal");
+
+ for _ in 0..2 {
+ send_signal(libc::SIGUSR1);
+
+ assert!(sig.recv().await.is_some());
+ }
+}
diff --git a/third_party/rust/tokio/tests/signal_usr1.rs b/third_party/rust/tokio/tests/signal_usr1.rs
new file mode 100644
index 0000000000..d74c7d31ab
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_usr1.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn signal_usr1() {
+ let mut signal = assert_ok!(
+ signal(SignalKind::user_defined1()),
+ "failed to create signal"
+ );
+
+ send_signal(libc::SIGUSR1);
+
+ signal.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/support/io_vec.rs b/third_party/rust/tokio/tests/support/io_vec.rs
new file mode 100644
index 0000000000..4ea47c748d
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/io_vec.rs
@@ -0,0 +1,45 @@
+use std::io::IoSlice;
+use std::ops::Deref;
+use std::slice;
+
+pub struct IoBufs<'a, 'b>(&'b mut [IoSlice<'a>]);
+
+impl<'a, 'b> IoBufs<'a, 'b> {
+ pub fn new(slices: &'b mut [IoSlice<'a>]) -> Self {
+ IoBufs(slices)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ pub fn advance(mut self, n: usize) -> IoBufs<'a, 'b> {
+ let mut to_remove = 0;
+ let mut remaining_len = n;
+ for slice in self.0.iter() {
+ if remaining_len < slice.len() {
+ break;
+ } else {
+ remaining_len -= slice.len();
+ to_remove += 1;
+ }
+ }
+ self.0 = self.0.split_at_mut(to_remove).1;
+ if let Some(slice) = self.0.first_mut() {
+ let tail = &slice[remaining_len..];
+ // Safety: recasts slice to the original lifetime
+ let tail = unsafe { slice::from_raw_parts(tail.as_ptr(), tail.len()) };
+ *slice = IoSlice::new(tail);
+ } else if remaining_len != 0 {
+ panic!("advance past the end of the slice vector");
+ }
+ self
+ }
+}
+
+impl<'a, 'b> Deref for IoBufs<'a, 'b> {
+ type Target = [IoSlice<'a>];
+ fn deref(&self) -> &[IoSlice<'a>] {
+ self.0
+ }
+}
diff --git a/third_party/rust/tokio/tests/support/mpsc_stream.rs b/third_party/rust/tokio/tests/support/mpsc_stream.rs
new file mode 100644
index 0000000000..aa385a39dc
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/mpsc_stream.rs
@@ -0,0 +1,42 @@
+#![allow(dead_code)]
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use tokio::sync::mpsc::{self, Receiver, Sender, UnboundedReceiver, UnboundedSender};
+use tokio_stream::Stream;
+
+struct UnboundedStream<T> {
+ recv: UnboundedReceiver<T>,
+}
+impl<T> Stream for UnboundedStream<T> {
+ type Item = T;
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ Pin::into_inner(self).recv.poll_recv(cx)
+ }
+}
+
+pub fn unbounded_channel_stream<T: Unpin>() -> (UnboundedSender<T>, impl Stream<Item = T>) {
+ let (tx, rx) = mpsc::unbounded_channel();
+
+ let stream = UnboundedStream { recv: rx };
+
+ (tx, stream)
+}
+
+struct BoundedStream<T> {
+ recv: Receiver<T>,
+}
+impl<T> Stream for BoundedStream<T> {
+ type Item = T;
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ Pin::into_inner(self).recv.poll_recv(cx)
+ }
+}
+
+pub fn channel_stream<T: Unpin>(size: usize) -> (Sender<T>, impl Stream<Item = T>) {
+ let (tx, rx) = mpsc::channel(size);
+
+ let stream = BoundedStream { recv: rx };
+
+ (tx, stream)
+}
diff --git a/third_party/rust/tokio/tests/support/signal.rs b/third_party/rust/tokio/tests/support/signal.rs
new file mode 100644
index 0000000000..ea06058764
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/signal.rs
@@ -0,0 +1,7 @@
+pub fn send_signal(signal: libc::c_int) {
+ use libc::{getpid, kill};
+
+ unsafe {
+ assert_eq!(kill(getpid(), signal), 0);
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_barrier.rs b/third_party/rust/tokio/tests/sync_barrier.rs
new file mode 100644
index 0000000000..5fe7ba98c8
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_barrier.rs
@@ -0,0 +1,99 @@
+#![allow(clippy::unnecessary_operation)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use tokio::sync::Barrier;
+
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+struct IsSend<T: Send>(T);
+#[test]
+fn barrier_future_is_send() {
+ let b = Barrier::new(0);
+ IsSend(b.wait());
+}
+
+#[test]
+fn zero_does_not_block() {
+ let b = Barrier::new(0);
+
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+}
+
+#[test]
+fn single() {
+ let b = Barrier::new(1);
+
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+}
+
+#[test]
+fn tango() {
+ let b = Barrier::new(2);
+
+ let mut w1 = spawn(b.wait());
+ assert_pending!(w1.poll());
+
+ let mut w2 = spawn(b.wait());
+ let wr2 = assert_ready!(w2.poll());
+ let wr1 = assert_ready!(w1.poll());
+
+ assert!(wr1.is_leader() || wr2.is_leader());
+ assert!(!(wr1.is_leader() && wr2.is_leader()));
+}
+
+#[test]
+fn lots() {
+ let b = Barrier::new(100);
+
+ for _ in 0..10 {
+ let mut wait = Vec::new();
+ for _ in 0..99 {
+ let mut w = spawn(b.wait());
+ assert_pending!(w.poll());
+ wait.push(w);
+ }
+ for w in &mut wait {
+ assert_pending!(w.poll());
+ }
+
+ // pass the barrier
+ let mut w = spawn(b.wait());
+ let mut found_leader = assert_ready!(w.poll()).is_leader();
+ for mut w in wait {
+ let wr = assert_ready!(w.poll());
+ if wr.is_leader() {
+ assert!(!found_leader);
+ found_leader = true;
+ }
+ }
+ assert!(found_leader);
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_broadcast.rs b/third_party/rust/tokio/tests/sync_broadcast.rs
new file mode 100644
index 0000000000..1b68eb7edb
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_broadcast.rs
@@ -0,0 +1,462 @@
+#![allow(clippy::cognitive_complexity)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use tokio::sync::broadcast;
+use tokio_test::task;
+use tokio_test::{
+ assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok,
+};
+
+use std::sync::Arc;
+
+macro_rules! assert_recv {
+ ($e:expr) => {
+ match $e.try_recv() {
+ Ok(value) => value,
+ Err(e) => panic!("expected recv; got = {:?}", e),
+ }
+ };
+}
+
+macro_rules! assert_empty {
+ ($e:expr) => {
+ match $e.try_recv() {
+ Ok(value) => panic!("expected empty; got = {:?}", value),
+ Err(broadcast::error::TryRecvError::Empty) => {}
+ Err(e) => panic!("expected empty; got = {:?}", e),
+ }
+ };
+}
+
+macro_rules! assert_lagged {
+ ($e:expr, $n:expr) => {
+ match assert_err!($e) {
+ broadcast::error::TryRecvError::Lagged(n) => {
+ assert_eq!(n, $n);
+ }
+ _ => panic!("did not lag"),
+ }
+ };
+}
+
+macro_rules! assert_closed {
+ ($e:expr) => {
+ match assert_err!($e) {
+ broadcast::error::TryRecvError::Closed => {}
+ _ => panic!("did not lag"),
+ }
+ };
+}
+
+trait AssertSend: Send + Sync {}
+impl AssertSend for broadcast::Sender<i32> {}
+impl AssertSend for broadcast::Receiver<i32> {}
+
+#[test]
+fn send_try_recv_bounded() {
+ let (tx, mut rx) = broadcast::channel(16);
+
+ assert_empty!(rx);
+
+ let n = assert_ok!(tx.send("hello"));
+ assert_eq!(n, 1);
+
+ let val = assert_recv!(rx);
+ assert_eq!(val, "hello");
+
+ assert_empty!(rx);
+}
+
+#[test]
+fn send_two_recv() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ assert_empty!(rx1);
+ assert_empty!(rx2);
+
+ let n = assert_ok!(tx.send("hello"));
+ assert_eq!(n, 2);
+
+ let val = assert_recv!(rx1);
+ assert_eq!(val, "hello");
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "hello");
+
+ assert_empty!(rx1);
+ assert_empty!(rx2);
+}
+
+#[test]
+fn send_recv_bounded() {
+ let (tx, mut rx) = broadcast::channel(16);
+
+ let mut recv = task::spawn(rx.recv());
+
+ assert_pending!(recv.poll());
+
+ assert_ok!(tx.send("hello"));
+
+ assert!(recv.is_woken());
+ let val = assert_ready_ok!(recv.poll());
+ assert_eq!(val, "hello");
+}
+
+#[test]
+fn send_two_recv_bounded() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ assert_ok!(tx.send("hello"));
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ let val1 = assert_ready_ok!(recv1.poll());
+ let val2 = assert_ready_ok!(recv2.poll());
+ assert_eq!(val1, "hello");
+ assert_eq!(val2, "hello");
+
+ drop((recv1, recv2));
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+
+ assert_ok!(tx.send("world"));
+
+ assert!(recv1.is_woken());
+ assert!(!recv2.is_woken());
+
+ let val1 = assert_ready_ok!(recv1.poll());
+ let val2 = assert_ready_ok!(recv2.poll());
+ assert_eq!(val1, "world");
+ assert_eq!(val2, "world");
+}
+
+#[test]
+fn change_tasks() {
+ let (tx, mut rx) = broadcast::channel(1);
+
+ let mut recv = Box::pin(rx.recv());
+
+ let mut task1 = task::spawn(&mut recv);
+ assert_pending!(task1.poll());
+
+ let mut task2 = task::spawn(&mut recv);
+ assert_pending!(task2.poll());
+
+ tx.send("hello").unwrap();
+
+ assert!(task2.is_woken());
+}
+
+#[test]
+fn send_slow_rx() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ {
+ let mut recv2 = task::spawn(rx2.recv());
+
+ {
+ let mut recv1 = task::spawn(rx1.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ assert_ok!(tx.send("one"));
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ assert_ok!(tx.send("two"));
+
+ let val = assert_ready_ok!(recv1.poll());
+ assert_eq!(val, "one");
+ }
+
+ let val = assert_ready_ok!(task::spawn(rx1.recv()).poll());
+ assert_eq!(val, "two");
+
+ let mut recv1 = task::spawn(rx1.recv());
+
+ assert_pending!(recv1.poll());
+
+ assert_ok!(tx.send("three"));
+
+ assert!(recv1.is_woken());
+
+ let val = assert_ready_ok!(recv1.poll());
+ assert_eq!(val, "three");
+
+ let val = assert_ready_ok!(recv2.poll());
+ assert_eq!(val, "one");
+ }
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "two");
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "three");
+}
+
+#[test]
+fn drop_rx_while_values_remain() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+
+ assert_recv!(rx1);
+ assert_recv!(rx2);
+
+ drop(rx2);
+ drop(rx1);
+}
+
+#[test]
+fn lagging_rx() {
+ let (tx, mut rx1) = broadcast::channel(2);
+ let mut rx2 = tx.subscribe();
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+
+ assert_eq!("one", assert_recv!(rx1));
+
+ assert_ok!(tx.send("three"));
+
+ // Lagged too far
+ let x = dbg!(rx2.try_recv());
+ assert_lagged!(x, 1);
+
+ // Calling again gets the next value
+ assert_eq!("two", assert_recv!(rx2));
+
+ assert_eq!("two", assert_recv!(rx1));
+ assert_eq!("three", assert_recv!(rx1));
+
+ assert_ok!(tx.send("four"));
+ assert_ok!(tx.send("five"));
+
+ assert_lagged!(rx2.try_recv(), 1);
+
+ assert_ok!(tx.send("six"));
+
+ assert_lagged!(rx2.try_recv(), 1);
+}
+
+#[test]
+fn send_no_rx() {
+ let (tx, _) = broadcast::channel(16);
+
+ assert_err!(tx.send("hello"));
+
+ let mut rx = tx.subscribe();
+
+ assert_ok!(tx.send("world"));
+
+ let val = assert_recv!(rx);
+ assert_eq!("world", val);
+}
+
+#[test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn zero_capacity() {
+ broadcast::channel::<()>(0);
+}
+
+#[test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn capacity_too_big() {
+ use std::usize;
+
+ broadcast::channel::<()>(1 + (usize::MAX >> 1));
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn panic_in_clone() {
+ use std::panic::{self, AssertUnwindSafe};
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct MyVal(usize);
+
+ impl Clone for MyVal {
+ fn clone(&self) -> MyVal {
+ assert_ne!(0, self.0);
+ MyVal(self.0)
+ }
+ }
+
+ let (tx, mut rx) = broadcast::channel(16);
+
+ assert_ok!(tx.send(MyVal(0)));
+ assert_ok!(tx.send(MyVal(1)));
+
+ let res = panic::catch_unwind(AssertUnwindSafe(|| {
+ let _ = rx.try_recv();
+ }));
+
+ assert_err!(res);
+
+ let val = assert_recv!(rx);
+ assert_eq!(val, MyVal(1));
+}
+
+#[test]
+fn dropping_tx_notifies_rx() {
+ let (tx, mut rx1) = broadcast::channel::<()>(16);
+ let mut rx2 = tx.subscribe();
+
+ let tx2 = tx.clone();
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ drop(tx);
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ drop(tx2);
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ let err = assert_ready_err!(recv1.poll());
+ assert!(is_closed(err));
+
+ let err = assert_ready_err!(recv2.poll());
+ assert!(is_closed(err));
+}
+
+#[test]
+fn unconsumed_messages_are_dropped() {
+ let (tx, rx) = broadcast::channel(16);
+
+ let msg = Arc::new(());
+
+ assert_ok!(tx.send(msg.clone()));
+
+ assert_eq!(2, Arc::strong_count(&msg));
+
+ drop(rx);
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn single_capacity_recvs() {
+ let (tx, mut rx) = broadcast::channel(1);
+
+ assert_ok!(tx.send(1));
+
+ assert_eq!(assert_recv!(rx), 1);
+ assert_empty!(rx);
+}
+
+#[test]
+fn single_capacity_recvs_after_drop_1() {
+ let (tx, mut rx) = broadcast::channel(1);
+
+ assert_ok!(tx.send(1));
+ drop(tx);
+
+ assert_eq!(assert_recv!(rx), 1);
+ assert_closed!(rx.try_recv());
+}
+
+#[test]
+fn single_capacity_recvs_after_drop_2() {
+ let (tx, mut rx) = broadcast::channel(1);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ drop(tx);
+
+ assert_lagged!(rx.try_recv(), 1);
+ assert_eq!(assert_recv!(rx), 2);
+ assert_closed!(rx.try_recv());
+}
+
+#[test]
+fn dropping_sender_does_not_overwrite() {
+ let (tx, mut rx) = broadcast::channel(2);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ drop(tx);
+
+ assert_eq!(assert_recv!(rx), 1);
+ assert_eq!(assert_recv!(rx), 2);
+ assert_closed!(rx.try_recv());
+}
+
+#[test]
+fn lagging_receiver_recovers_after_wrap_closed_1() {
+ let (tx, mut rx) = broadcast::channel(2);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ assert_ok!(tx.send(3));
+ drop(tx);
+
+ assert_lagged!(rx.try_recv(), 1);
+ assert_eq!(assert_recv!(rx), 2);
+ assert_eq!(assert_recv!(rx), 3);
+ assert_closed!(rx.try_recv());
+}
+
+#[test]
+fn lagging_receiver_recovers_after_wrap_closed_2() {
+ let (tx, mut rx) = broadcast::channel(2);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ assert_ok!(tx.send(3));
+ assert_ok!(tx.send(4));
+ drop(tx);
+
+ assert_lagged!(rx.try_recv(), 2);
+ assert_eq!(assert_recv!(rx), 3);
+ assert_eq!(assert_recv!(rx), 4);
+ assert_closed!(rx.try_recv());
+}
+
+#[test]
+fn lagging_receiver_recovers_after_wrap_open() {
+ let (tx, mut rx) = broadcast::channel(2);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ assert_ok!(tx.send(3));
+
+ assert_lagged!(rx.try_recv(), 1);
+ assert_eq!(assert_recv!(rx), 2);
+ assert_eq!(assert_recv!(rx), 3);
+ assert_empty!(rx);
+}
+
+fn is_closed(err: broadcast::error::RecvError) -> bool {
+ matches!(err, broadcast::error::RecvError::Closed)
+}
diff --git a/third_party/rust/tokio/tests/sync_errors.rs b/third_party/rust/tokio/tests/sync_errors.rs
new file mode 100644
index 0000000000..2eac585d40
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_errors.rs
@@ -0,0 +1,30 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+fn is_error<T: std::error::Error + Send + Sync>() {}
+
+#[test]
+fn mpsc_error_bound() {
+ use tokio::sync::mpsc::error;
+
+ is_error::<error::SendError<()>>();
+ is_error::<error::TrySendError<()>>();
+}
+
+#[test]
+fn oneshot_error_bound() {
+ use tokio::sync::oneshot::error;
+
+ is_error::<error::RecvError>();
+ is_error::<error::TryRecvError>();
+}
+
+#[test]
+fn watch_error_bound() {
+ use tokio::sync::watch::error;
+
+ is_error::<error::SendError<()>>();
+}
diff --git a/third_party/rust/tokio/tests/sync_mpsc.rs b/third_party/rust/tokio/tests/sync_mpsc.rs
new file mode 100644
index 0000000000..abbfa9d7f4
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_mpsc.rs
@@ -0,0 +1,659 @@
+#![allow(clippy::redundant_clone)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::mpsc;
+use tokio::sync::mpsc::error::{TryRecvError, TrySendError};
+use tokio_test::*;
+
+use std::sync::Arc;
+
+#[cfg(not(target_arch = "wasm32"))]
+mod support {
+ pub(crate) mod mpsc_stream;
+}
+
+trait AssertSend: Send {}
+impl AssertSend for mpsc::Sender<i32> {}
+impl AssertSend for mpsc::Receiver<i32> {}
+
+#[maybe_tokio_test]
+async fn send_recv_with_buffer() {
+ let (tx, mut rx) = mpsc::channel::<i32>(16);
+
+ // Using poll_ready / try_send
+ // let permit assert_ready_ok!(tx.reserve());
+ let permit = tx.reserve().await.unwrap();
+ permit.send(1);
+
+ // Without poll_ready
+ tx.try_send(2).unwrap();
+
+ drop(tx);
+
+ let val = rx.recv().await;
+ assert_eq!(val, Some(1));
+
+ let val = rx.recv().await;
+ assert_eq!(val, Some(2));
+
+ let val = rx.recv().await;
+ assert!(val.is_none());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn reserve_disarm() {
+ let (tx, mut rx) = mpsc::channel::<i32>(2);
+ let tx1 = tx.clone();
+ let tx2 = tx.clone();
+ let tx3 = tx.clone();
+ let tx4 = tx;
+
+ // We should be able to `poll_ready` two handles without problem
+ let permit1 = assert_ok!(tx1.reserve().await);
+ let permit2 = assert_ok!(tx2.reserve().await);
+
+ // But a third should not be ready
+ let mut r3 = tokio_test::task::spawn(tx3.reserve());
+ assert_pending!(r3.poll());
+
+ let mut r4 = tokio_test::task::spawn(tx4.reserve());
+ assert_pending!(r4.poll());
+
+ // Using one of the reserved slots should allow a new handle to become ready
+ permit1.send(1);
+
+ // We also need to receive for the slot to be free
+ assert!(!r3.is_woken());
+ rx.recv().await.unwrap();
+ // Now there's a free slot!
+ assert!(r3.is_woken());
+ assert!(!r4.is_woken());
+
+ // Dropping a permit should also open up a slot
+ drop(permit2);
+ assert!(r4.is_woken());
+
+ let mut r1 = tokio_test::task::spawn(tx1.reserve());
+ assert_pending!(r1.poll());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn send_recv_stream_with_buffer() {
+ use tokio_stream::StreamExt;
+
+ let (tx, rx) = support::mpsc_stream::channel_stream::<i32>(16);
+ let mut rx = Box::pin(rx);
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1).await);
+ assert_ok!(tx.send(2).await);
+ });
+
+ assert_eq!(Some(1), rx.next().await);
+ assert_eq!(Some(2), rx.next().await);
+ assert_eq!(None, rx.next().await);
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn async_send_recv_with_buffer() {
+ let (tx, mut rx) = mpsc::channel(16);
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1).await);
+ assert_ok!(tx.send(2).await);
+ });
+
+ assert_eq!(Some(1), rx.recv().await);
+ assert_eq!(Some(2), rx.recv().await);
+ assert_eq!(None, rx.recv().await);
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn start_send_past_cap() {
+ use std::future::Future;
+
+ let mut t1 = tokio_test::task::spawn(());
+
+ let (tx1, mut rx) = mpsc::channel(1);
+ let tx2 = tx1.clone();
+
+ assert_ok!(tx1.try_send(()));
+
+ let mut r1 = Box::pin(tx1.reserve());
+ t1.enter(|cx, _| assert_pending!(r1.as_mut().poll(cx)));
+
+ {
+ let mut r2 = tokio_test::task::spawn(tx2.reserve());
+ assert_pending!(r2.poll());
+
+ drop(r1);
+
+ assert!(rx.recv().await.is_some());
+
+ assert!(r2.is_woken());
+ assert!(!t1.is_woken());
+ }
+
+ drop(tx1);
+ drop(tx2);
+
+ assert!(rx.recv().await.is_none());
+}
+
+#[test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn buffer_gteq_one() {
+ mpsc::channel::<i32>(0);
+}
+
+#[maybe_tokio_test]
+async fn send_recv_unbounded() {
+ let (tx, mut rx) = mpsc::unbounded_channel::<i32>();
+
+ // Using `try_send`
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+
+ assert_eq!(rx.recv().await, Some(1));
+ assert_eq!(rx.recv().await, Some(2));
+
+ drop(tx);
+
+ assert!(rx.recv().await.is_none());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn async_send_recv_unbounded() {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ });
+
+ assert_eq!(Some(1), rx.recv().await);
+ assert_eq!(Some(2), rx.recv().await);
+ assert_eq!(None, rx.recv().await);
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn send_recv_stream_unbounded() {
+ use tokio_stream::StreamExt;
+
+ let (tx, rx) = support::mpsc_stream::unbounded_channel_stream::<i32>();
+
+ let mut rx = Box::pin(rx);
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ });
+
+ assert_eq!(Some(1), rx.next().await);
+ assert_eq!(Some(2), rx.next().await);
+ assert_eq!(None, rx.next().await);
+}
+
+#[maybe_tokio_test]
+async fn no_t_bounds_buffer() {
+ struct NoImpls;
+
+ let (tx, mut rx) = mpsc::channel(100);
+
+ // sender should be Debug even though T isn't Debug
+ println!("{:?}", tx);
+ // same with Receiver
+ println!("{:?}", rx);
+ // and sender should be Clone even though T isn't Clone
+ assert!(tx.clone().try_send(NoImpls).is_ok());
+
+ assert!(rx.recv().await.is_some());
+}
+
+#[maybe_tokio_test]
+async fn no_t_bounds_unbounded() {
+ struct NoImpls;
+
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ // sender should be Debug even though T isn't Debug
+ println!("{:?}", tx);
+ // same with Receiver
+ println!("{:?}", rx);
+ // and sender should be Clone even though T isn't Clone
+ assert!(tx.clone().send(NoImpls).is_ok());
+
+ assert!(rx.recv().await.is_some());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn send_recv_buffer_limited() {
+ let (tx, mut rx) = mpsc::channel::<i32>(1);
+
+ // Reserve capacity
+ let p1 = assert_ok!(tx.reserve().await);
+
+ // Send first message
+ p1.send(1);
+
+ // Not ready
+ let mut p2 = tokio_test::task::spawn(tx.reserve());
+ assert_pending!(p2.poll());
+
+ // Take the value
+ assert!(rx.recv().await.is_some());
+
+ // Notified
+ assert!(p2.is_woken());
+
+ // Trying to send fails
+ assert_err!(tx.try_send(1337));
+
+ // Send second
+ let permit = assert_ready_ok!(p2.poll());
+ permit.send(2);
+
+ assert!(rx.recv().await.is_some());
+}
+
+#[maybe_tokio_test]
+async fn recv_close_gets_none_idle() {
+ let (tx, mut rx) = mpsc::channel::<i32>(10);
+
+ rx.close();
+
+ assert!(rx.recv().await.is_none());
+
+ assert_err!(tx.send(1).await);
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn recv_close_gets_none_reserved() {
+ let (tx1, mut rx) = mpsc::channel::<i32>(1);
+ let tx2 = tx1.clone();
+
+ let permit1 = assert_ok!(tx1.reserve().await);
+ let mut permit2 = tokio_test::task::spawn(tx2.reserve());
+ assert_pending!(permit2.poll());
+
+ rx.close();
+
+ assert!(permit2.is_woken());
+ assert_ready_err!(permit2.poll());
+
+ {
+ let mut recv = tokio_test::task::spawn(rx.recv());
+ assert_pending!(recv.poll());
+
+ permit1.send(123);
+ assert!(recv.is_woken());
+
+ let v = assert_ready!(recv.poll());
+ assert_eq!(v, Some(123));
+ }
+
+ assert!(rx.recv().await.is_none());
+}
+
+#[maybe_tokio_test]
+async fn tx_close_gets_none() {
+ let (_, mut rx) = mpsc::channel::<i32>(10);
+ assert!(rx.recv().await.is_none());
+}
+
+#[maybe_tokio_test]
+async fn try_send_fail() {
+ let (tx, mut rx) = mpsc::channel(1);
+
+ tx.try_send("hello").unwrap();
+
+ // This should fail
+ match assert_err!(tx.try_send("fail")) {
+ TrySendError::Full(..) => {}
+ _ => panic!(),
+ }
+
+ assert_eq!(rx.recv().await, Some("hello"));
+
+ assert_ok!(tx.try_send("goodbye"));
+ drop(tx);
+
+ assert_eq!(rx.recv().await, Some("goodbye"));
+ assert!(rx.recv().await.is_none());
+}
+
+#[maybe_tokio_test]
+async fn try_send_fail_with_try_recv() {
+ let (tx, mut rx) = mpsc::channel(1);
+
+ tx.try_send("hello").unwrap();
+
+ // This should fail
+ match assert_err!(tx.try_send("fail")) {
+ TrySendError::Full(..) => {}
+ _ => panic!(),
+ }
+
+ assert_eq!(rx.try_recv(), Ok("hello"));
+
+ assert_ok!(tx.try_send("goodbye"));
+ drop(tx);
+
+ assert_eq!(rx.try_recv(), Ok("goodbye"));
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+}
+
+#[maybe_tokio_test]
+async fn try_reserve_fails() {
+ let (tx, mut rx) = mpsc::channel(1);
+
+ let permit = tx.try_reserve().unwrap();
+
+ // This should fail
+ match assert_err!(tx.try_reserve()) {
+ TrySendError::Full(()) => {}
+ _ => panic!(),
+ }
+
+ permit.send("foo");
+
+ assert_eq!(rx.recv().await, Some("foo"));
+
+ // Dropping permit releases the slot.
+ let permit = tx.try_reserve().unwrap();
+ drop(permit);
+
+ let _permit = tx.try_reserve().unwrap();
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn drop_permit_releases_permit() {
+ // poll_ready reserves capacity, ensure that the capacity is released if tx
+ // is dropped w/o sending a value.
+ let (tx1, _rx) = mpsc::channel::<i32>(1);
+ let tx2 = tx1.clone();
+
+ let permit = assert_ok!(tx1.reserve().await);
+
+ let mut reserve2 = tokio_test::task::spawn(tx2.reserve());
+ assert_pending!(reserve2.poll());
+
+ drop(permit);
+
+ assert!(reserve2.is_woken());
+ assert_ready_ok!(reserve2.poll());
+}
+
+#[maybe_tokio_test]
+async fn dropping_rx_closes_channel() {
+ let (tx, rx) = mpsc::channel(100);
+
+ let msg = Arc::new(());
+ assert_ok!(tx.try_send(msg.clone()));
+
+ drop(rx);
+ assert_err!(tx.reserve().await);
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn dropping_rx_closes_channel_for_try() {
+ let (tx, rx) = mpsc::channel(100);
+
+ let msg = Arc::new(());
+ tx.try_send(msg.clone()).unwrap();
+
+ drop(rx);
+
+ assert!(matches!(
+ tx.try_send(msg.clone()),
+ Err(TrySendError::Closed(_))
+ ));
+ assert!(matches!(tx.try_reserve(), Err(TrySendError::Closed(_))));
+ assert!(matches!(
+ tx.try_reserve_owned(),
+ Err(TrySendError::Closed(_))
+ ));
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn unconsumed_messages_are_dropped() {
+ let msg = Arc::new(());
+
+ let (tx, rx) = mpsc::channel(100);
+
+ tx.try_send(msg.clone()).unwrap();
+
+ assert_eq!(2, Arc::strong_count(&msg));
+
+ drop((tx, rx));
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+#[cfg(feature = "full")]
+fn blocking_recv() {
+ let (tx, mut rx) = mpsc::channel::<u8>(1);
+
+ let sync_code = std::thread::spawn(move || {
+ assert_eq!(Some(10), rx.blocking_recv());
+ });
+
+ tokio::runtime::Runtime::new()
+ .unwrap()
+ .block_on(async move {
+ let _ = tx.send(10).await;
+ });
+ sync_code.join().unwrap()
+}
+
+#[tokio::test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+async fn blocking_recv_async() {
+ let (_tx, mut rx) = mpsc::channel::<()>(1);
+ let _ = rx.blocking_recv();
+}
+
+#[test]
+#[cfg(feature = "full")]
+fn blocking_send() {
+ let (tx, mut rx) = mpsc::channel::<u8>(1);
+
+ let sync_code = std::thread::spawn(move || {
+ tx.blocking_send(10).unwrap();
+ });
+
+ tokio::runtime::Runtime::new()
+ .unwrap()
+ .block_on(async move {
+ assert_eq!(Some(10), rx.recv().await);
+ });
+ sync_code.join().unwrap()
+}
+
+#[tokio::test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+async fn blocking_send_async() {
+ let (tx, _rx) = mpsc::channel::<()>(1);
+ let _ = tx.blocking_send(());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn ready_close_cancel_bounded() {
+ let (tx, mut rx) = mpsc::channel::<()>(100);
+ let _tx2 = tx.clone();
+
+ let permit = assert_ok!(tx.reserve().await);
+
+ rx.close();
+
+ let mut recv = tokio_test::task::spawn(rx.recv());
+ assert_pending!(recv.poll());
+
+ drop(permit);
+
+ assert!(recv.is_woken());
+ let val = assert_ready!(recv.poll());
+ assert!(val.is_none());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn permit_available_not_acquired_close() {
+ let (tx1, mut rx) = mpsc::channel::<()>(1);
+ let tx2 = tx1.clone();
+
+ let permit1 = assert_ok!(tx1.reserve().await);
+
+ let mut permit2 = tokio_test::task::spawn(tx2.reserve());
+ assert_pending!(permit2.poll());
+
+ rx.close();
+
+ drop(permit1);
+ assert!(permit2.is_woken());
+
+ drop(permit2);
+ assert!(rx.recv().await.is_none());
+}
+
+#[test]
+fn try_recv_bounded() {
+ let (tx, mut rx) = mpsc::channel(5);
+
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ assert!(tx.try_send("hello").is_err());
+
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
+
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ assert_eq!(Ok("hello"), rx.try_recv());
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ assert!(tx.try_send("hello").is_err());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
+
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ drop(tx);
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Ok("hello"), rx.try_recv());
+ assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
+}
+
+#[test]
+fn try_recv_unbounded() {
+ for num in 0..100 {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ for i in 0..num {
+ tx.send(i).unwrap();
+ }
+
+ for i in 0..num {
+ assert_eq!(rx.try_recv(), Ok(i));
+ }
+
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
+ drop(tx);
+ assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
+ }
+}
+
+#[test]
+fn try_recv_close_while_empty_bounded() {
+ let (tx, mut rx) = mpsc::channel::<()>(5);
+
+ assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
+ drop(tx);
+ assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
+}
+
+#[test]
+fn try_recv_close_while_empty_unbounded() {
+ let (tx, mut rx) = mpsc::unbounded_channel::<()>();
+
+ assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
+ drop(tx);
+ assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
+}
+
+#[tokio::test(start_paused = true)]
+#[cfg(feature = "full")]
+async fn recv_timeout() {
+ use tokio::sync::mpsc::error::SendTimeoutError::{Closed, Timeout};
+ use tokio::time::Duration;
+
+ let (tx, rx) = mpsc::channel(5);
+
+ assert_eq!(tx.send_timeout(10, Duration::from_secs(1)).await, Ok(()));
+ assert_eq!(tx.send_timeout(20, Duration::from_secs(1)).await, Ok(()));
+ assert_eq!(tx.send_timeout(30, Duration::from_secs(1)).await, Ok(()));
+ assert_eq!(tx.send_timeout(40, Duration::from_secs(1)).await, Ok(()));
+ assert_eq!(tx.send_timeout(50, Duration::from_secs(1)).await, Ok(()));
+ assert_eq!(
+ tx.send_timeout(60, Duration::from_secs(1)).await,
+ Err(Timeout(60))
+ );
+
+ drop(rx);
+ assert_eq!(
+ tx.send_timeout(70, Duration::from_secs(1)).await,
+ Err(Closed(70))
+ );
+}
+
+#[test]
+#[should_panic = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn recv_timeout_panic() {
+ use futures::future::FutureExt;
+ use tokio::time::Duration;
+
+ let (tx, _rx) = mpsc::channel(5);
+ tx.send_timeout(10, Duration::from_secs(1)).now_or_never();
+}
diff --git a/third_party/rust/tokio/tests/sync_mutex.rs b/third_party/rust/tokio/tests/sync_mutex.rs
new file mode 100644
index 0000000000..51dbe03dc7
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_mutex.rs
@@ -0,0 +1,178 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::Mutex;
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+use std::sync::Arc;
+
+#[test]
+fn straight_execution() {
+ let l = Mutex::new(100);
+
+ {
+ let mut t = spawn(l.lock());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &100);
+ *g = 99;
+ }
+ {
+ let mut t = spawn(l.lock());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &99);
+ *g = 98;
+ }
+ {
+ let mut t = spawn(l.lock());
+ let g = assert_ready!(t.poll());
+ assert_eq!(&*g, &98);
+ }
+}
+
+#[test]
+fn readiness() {
+ let l1 = Arc::new(Mutex::new(100));
+ let l2 = Arc::clone(&l1);
+ let mut t1 = spawn(l1.lock());
+ let mut t2 = spawn(l2.lock());
+
+ let g = assert_ready!(t1.poll());
+
+ // We can't now acquire the lease since it's already held in g
+ assert_pending!(t2.poll());
+
+ // But once g unlocks, we can acquire it
+ drop(g);
+ assert!(t2.is_woken());
+ assert_ready!(t2.poll());
+}
+
+/*
+#[test]
+#[ignore]
+fn lock() {
+ let mut lock = Mutex::new(false);
+
+ let mut lock2 = lock.clone();
+ std::thread::spawn(move || {
+ let l = lock2.lock();
+ pin_mut!(l);
+
+ let mut task = MockTask::new();
+ let mut g = assert_ready!(task.poll(&mut l));
+ std::thread::sleep(std::time::Duration::from_millis(500));
+ *g = true;
+ drop(g);
+ });
+
+ std::thread::sleep(std::time::Duration::from_millis(50));
+ let mut task = MockTask::new();
+ let l = lock.lock();
+ pin_mut!(l);
+
+ assert_pending!(task.poll(&mut l));
+
+ std::thread::sleep(std::time::Duration::from_millis(500));
+ assert!(task.is_woken());
+ let result = assert_ready!(task.poll(&mut l));
+ assert!(*result);
+}
+*/
+
+/// Ensure a mutex is unlocked if a future holding the lock
+/// is aborted prematurely.
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn aborted_future_1() {
+ use std::time::Duration;
+ use tokio::time::{interval, timeout};
+
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ let iv = interval(Duration::from_millis(1000));
+ tokio::pin!(iv);
+ m2.lock().await;
+ iv.as_mut().tick().await;
+ iv.as_mut().tick().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+/// This test is similar to `aborted_future_1` but this time the
+/// aborted future is waiting for the lock.
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn aborted_future_2() {
+ use std::time::Duration;
+ use tokio::time::timeout;
+
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ // Lock mutex
+ let _lock = m1.lock().await;
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ m2.lock().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+#[test]
+fn try_lock() {
+ let m: Mutex<usize> = Mutex::new(0);
+ {
+ let g1 = m.try_lock();
+ assert!(g1.is_ok());
+ let g2 = m.try_lock();
+ assert!(!g2.is_ok());
+ }
+ let g3 = m.try_lock();
+ assert!(g3.is_ok());
+}
+
+#[maybe_tokio_test]
+async fn debug_format() {
+ let s = "debug";
+ let m = Mutex::new(s.to_string());
+ assert_eq!(format!("{:?}", s), format!("{:?}", m.lock().await));
+}
+
+#[maybe_tokio_test]
+async fn mutex_debug() {
+ let s = "data";
+ let m = Mutex::new(s.to_string());
+ assert_eq!(format!("{:?}", m), r#"Mutex { data: "data" }"#);
+ let _guard = m.lock().await;
+ assert_eq!(format!("{:?}", m), r#"Mutex { data: <locked> }"#)
+}
diff --git a/third_party/rust/tokio/tests/sync_mutex_owned.rs b/third_party/rust/tokio/tests/sync_mutex_owned.rs
new file mode 100644
index 0000000000..2ce15de5b9
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_mutex_owned.rs
@@ -0,0 +1,136 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::Mutex;
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+use std::sync::Arc;
+
+#[test]
+fn straight_execution() {
+ let l = Arc::new(Mutex::new(100));
+
+ {
+ let mut t = spawn(l.clone().lock_owned());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &100);
+ *g = 99;
+ }
+ {
+ let mut t = spawn(l.clone().lock_owned());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &99);
+ *g = 98;
+ }
+ {
+ let mut t = spawn(l.lock_owned());
+ let g = assert_ready!(t.poll());
+ assert_eq!(&*g, &98);
+ }
+}
+
+#[test]
+fn readiness() {
+ let l = Arc::new(Mutex::new(100));
+ let mut t1 = spawn(l.clone().lock_owned());
+ let mut t2 = spawn(l.lock_owned());
+
+ let g = assert_ready!(t1.poll());
+
+ // We can't now acquire the lease since it's already held in g
+ assert_pending!(t2.poll());
+
+ // But once g unlocks, we can acquire it
+ drop(g);
+ assert!(t2.is_woken());
+ assert_ready!(t2.poll());
+}
+
+/// Ensure a mutex is unlocked if a future holding the lock
+/// is aborted prematurely.
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn aborted_future_1() {
+ use std::time::Duration;
+ use tokio::time::{interval, timeout};
+
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ let iv = interval(Duration::from_millis(1000));
+ tokio::pin!(iv);
+ m2.lock_owned().await;
+ iv.as_mut().tick().await;
+ iv.as_mut().tick().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock_owned().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+/// This test is similar to `aborted_future_1` but this time the
+/// aborted future is waiting for the lock.
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn aborted_future_2() {
+ use std::time::Duration;
+ use tokio::time::timeout;
+
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ // Lock mutex
+ let _lock = m1.clone().lock_owned().await;
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ m2.lock_owned().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock_owned().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+#[test]
+fn try_lock_owned() {
+ let m: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ let g1 = m.clone().try_lock_owned();
+ assert!(g1.is_ok());
+ let g2 = m.clone().try_lock_owned();
+ assert!(!g2.is_ok());
+ }
+ let g3 = m.try_lock_owned();
+ assert!(g3.is_ok());
+}
+
+#[maybe_tokio_test]
+async fn debug_format() {
+ let s = "debug";
+ let m = Arc::new(Mutex::new(s.to_string()));
+ assert_eq!(format!("{:?}", s), format!("{:?}", m.lock_owned().await));
+}
diff --git a/third_party/rust/tokio/tests/sync_notify.rs b/third_party/rust/tokio/tests/sync_notify.rs
new file mode 100644
index 0000000000..5318d131cf
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_notify.rs
@@ -0,0 +1,156 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use tokio::sync::Notify;
+use tokio_test::task::spawn;
+use tokio_test::*;
+
+trait AssertSend: Send + Sync {}
+impl AssertSend for Notify {}
+
+#[test]
+fn notify_notified_one() {
+ let notify = Notify::new();
+ let mut notified = spawn(async { notify.notified().await });
+
+ notify.notify_one();
+ assert_ready!(notified.poll());
+}
+
+#[test]
+fn notified_one_notify() {
+ let notify = Notify::new();
+ let mut notified = spawn(async { notify.notified().await });
+
+ assert_pending!(notified.poll());
+
+ notify.notify_one();
+ assert!(notified.is_woken());
+ assert_ready!(notified.poll());
+}
+
+#[test]
+fn notified_multi_notify() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify_one();
+ assert!(notified1.is_woken());
+ assert!(!notified2.is_woken());
+
+ assert_ready!(notified1.poll());
+ assert_pending!(notified2.poll());
+}
+
+#[test]
+fn notify_notified_multi() {
+ let notify = Notify::new();
+
+ notify.notify_one();
+
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_ready!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify_one();
+
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
+
+#[test]
+fn notified_drop_notified_notify() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+
+ drop(notified1);
+
+ assert_pending!(notified2.poll());
+
+ notify.notify_one();
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
+
+#[test]
+fn notified_multi_notify_drop_one() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify_one();
+
+ assert!(notified1.is_woken());
+ assert!(!notified2.is_woken());
+
+ drop(notified1);
+
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
+
+#[test]
+fn notify_in_drop_after_wake() {
+ use futures::task::ArcWake;
+ use std::future::Future;
+ use std::sync::Arc;
+
+ let notify = Arc::new(Notify::new());
+
+ struct NotifyOnDrop(Arc<Notify>);
+
+ impl ArcWake for NotifyOnDrop {
+ fn wake_by_ref(_arc_self: &Arc<Self>) {}
+ }
+
+ impl Drop for NotifyOnDrop {
+ fn drop(&mut self) {
+ self.0.notify_waiters();
+ }
+ }
+
+ let mut fut = Box::pin(async {
+ notify.notified().await;
+ });
+
+ {
+ let waker = futures::task::waker(Arc::new(NotifyOnDrop(notify.clone())));
+ let mut cx = std::task::Context::from_waker(&waker);
+ assert!(fut.as_mut().poll(&mut cx).is_pending());
+ }
+
+ // Now, notifying **should not** deadlock
+ notify.notify_waiters();
+}
+
+#[test]
+fn notify_one_after_dropped_all() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+
+ notify.notify_waiters();
+ notify.notify_one();
+
+ drop(notified1);
+
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_ready!(notified2.poll());
+}
diff --git a/third_party/rust/tokio/tests/sync_once_cell.rs b/third_party/rust/tokio/tests/sync_once_cell.rs
new file mode 100644
index 0000000000..18eaf9382b
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_once_cell.rs
@@ -0,0 +1,274 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::mem;
+use std::ops::Drop;
+use std::sync::atomic::{AtomicU32, Ordering};
+use std::time::Duration;
+use tokio::runtime;
+use tokio::sync::{OnceCell, SetError};
+use tokio::time;
+
+async fn func1() -> u32 {
+ 5
+}
+
+async fn func2() -> u32 {
+ time::sleep(Duration::from_millis(1)).await;
+ 10
+}
+
+async fn func_err() -> Result<u32, ()> {
+ Err(())
+}
+
+async fn func_ok() -> Result<u32, ()> {
+ Ok(10)
+}
+
+async fn func_panic() -> u32 {
+ time::sleep(Duration::from_millis(1)).await;
+ panic!();
+}
+
+async fn sleep_and_set() -> u32 {
+ // Simulate sleep by pausing time and waiting for another thread to
+ // resume clock when calling `set`, then finding the cell being initialized
+ // by this call
+ time::sleep(Duration::from_millis(2)).await;
+ 5
+}
+
+async fn advance_time_and_set(cell: &'static OnceCell<u32>, v: u32) -> Result<(), SetError<u32>> {
+ time::advance(Duration::from_millis(1)).await;
+ cell.set(v)
+}
+
+#[test]
+fn get_or_init() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .start_paused(true)
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_init(func2).await });
+
+ time::advance(Duration::from_millis(1)).await;
+ time::resume();
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert_eq!(*result2, 5);
+ });
+}
+
+#[test]
+fn get_or_init_panic() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ time::pause();
+
+ let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_init(func_panic).await });
+
+ time::advance(Duration::from_millis(1)).await;
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert_eq!(*result2, 5);
+ });
+}
+
+#[test]
+fn set_and_get() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let _ = rt.spawn(async { ONCE.set(5) }).await;
+ let value = ONCE.get().unwrap();
+ assert_eq!(*value, 5);
+ });
+}
+
+#[test]
+fn get_uninit() {
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+ let uninit = ONCE.get();
+ assert!(uninit.is_none());
+}
+
+#[test]
+fn set_twice() {
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ let first = ONCE.set(5);
+ assert_eq!(first, Ok(()));
+ let second = ONCE.set(6);
+ assert!(second.err().unwrap().is_already_init_err());
+}
+
+#[test]
+fn set_while_initializing() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ time::pause();
+
+ let handle1 = rt.spawn(async { ONCE.get_or_init(sleep_and_set).await });
+ let handle2 = rt.spawn(async { advance_time_and_set(&ONCE, 10).await });
+
+ time::advance(Duration::from_millis(2)).await;
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert!(result2.err().unwrap().is_initializing_err());
+ });
+}
+
+#[test]
+fn get_or_try_init() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .start_paused(true)
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let handle1 = rt.spawn(async { ONCE.get_or_try_init(func_err).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_try_init(func_ok).await });
+
+ time::advance(Duration::from_millis(1)).await;
+ time::resume();
+
+ let result1 = handle1.await.unwrap();
+ assert!(result1.is_err());
+
+ let result2 = handle2.await.unwrap();
+ assert_eq!(*result2.unwrap(), 10);
+ });
+}
+
+#[test]
+fn drop_cell() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ {
+ let once_cell = OnceCell::new();
+ let prev = once_cell.set(fooer);
+ assert!(prev.is_ok())
+ }
+ assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
+}
+
+#[test]
+fn drop_cell_new_with() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ {
+ let once_cell = OnceCell::new_with(Some(fooer));
+ assert!(once_cell.initialized());
+ }
+ assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
+}
+
+#[test]
+fn drop_into_inner() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ let once_cell = OnceCell::new();
+ assert!(once_cell.set(fooer).is_ok());
+ let fooer = once_cell.into_inner();
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 0);
+ drop(fooer);
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 1);
+}
+
+#[test]
+fn drop_into_inner_new_with() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ let once_cell = OnceCell::new_with(Some(fooer));
+ let fooer = once_cell.into_inner();
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 0);
+ mem::drop(fooer);
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 1);
+}
+
+#[test]
+fn from() {
+ let cell = OnceCell::from(2);
+ assert_eq!(*cell.get().unwrap(), 2);
+}
diff --git a/third_party/rust/tokio/tests/sync_oneshot.rs b/third_party/rust/tokio/tests/sync_oneshot.rs
new file mode 100644
index 0000000000..44d09e8ade
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_oneshot.rs
@@ -0,0 +1,279 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use tokio::sync::oneshot;
+use tokio::sync::oneshot::error::TryRecvError;
+use tokio_test::*;
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+trait AssertSend: Send {}
+impl AssertSend for oneshot::Sender<i32> {}
+impl AssertSend for oneshot::Receiver<i32> {}
+
+trait SenderExt {
+ fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()>;
+}
+impl<T> SenderExt for oneshot::Sender<T> {
+ fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ tokio::pin! {
+ let fut = self.closed();
+ }
+ fut.poll(cx)
+ }
+}
+
+#[test]
+fn send_recv() {
+ let (tx, rx) = oneshot::channel();
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(rx.poll());
+
+ assert_ok!(tx.send(1));
+
+ assert!(rx.is_woken());
+
+ let val = assert_ready_ok!(rx.poll());
+ assert_eq!(val, 1);
+}
+
+#[maybe_tokio_test]
+async fn async_send_recv() {
+ let (tx, rx) = oneshot::channel();
+
+ assert_ok!(tx.send(1));
+ assert_eq!(1, assert_ok!(rx.await));
+}
+
+#[test]
+fn close_tx() {
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(rx.poll());
+
+ drop(tx);
+
+ assert!(rx.is_woken());
+ assert_ready_err!(rx.poll());
+}
+
+#[test]
+fn close_rx() {
+ // First, without checking poll_closed()
+ //
+ let (tx, _) = oneshot::channel();
+
+ assert_err!(tx.send(1));
+
+ // Second, via poll_closed();
+
+ let (tx, rx) = oneshot::channel();
+ let mut tx = task::spawn(tx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ drop(rx);
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(tx.into_inner().send(1));
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn async_rx_closed() {
+ let (mut tx, rx) = oneshot::channel::<()>();
+
+ tokio::spawn(async move {
+ drop(rx);
+ });
+
+ tx.closed().await;
+}
+
+#[test]
+fn explicit_close_poll() {
+ // First, with message sent
+ let (tx, rx) = oneshot::channel();
+ let mut rx = task::spawn(rx);
+
+ assert_ok!(tx.send(1));
+
+ rx.close();
+
+ let value = assert_ready_ok!(rx.poll());
+ assert_eq!(value, 1);
+
+ // Second, without the message sent
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(tx.into_inner().send(1));
+ assert_ready_err!(rx.poll());
+
+ // Again, but without sending the value this time
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_ready_err!(rx.poll());
+}
+
+#[test]
+fn explicit_close_try_recv() {
+ // First, with message sent
+ let (tx, mut rx) = oneshot::channel();
+
+ assert_ok!(tx.send(1));
+
+ rx.close();
+
+ let val = assert_ok!(rx.try_recv());
+ assert_eq!(1, val);
+
+ // Second, without the message sent
+ let (tx, mut rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(rx.try_recv());
+}
+
+#[test]
+#[should_panic]
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+fn close_try_recv_poll() {
+ let (_tx, rx) = oneshot::channel::<i32>();
+ let mut rx = task::spawn(rx);
+
+ rx.close();
+
+ assert_err!(rx.try_recv());
+
+ let _ = rx.poll();
+}
+
+#[test]
+fn close_after_recv() {
+ let (tx, mut rx) = oneshot::channel::<i32>();
+
+ tx.send(17).unwrap();
+
+ assert_eq!(17, rx.try_recv().unwrap());
+ rx.close();
+}
+
+#[test]
+fn try_recv_after_completion() {
+ let (tx, mut rx) = oneshot::channel::<i32>();
+
+ tx.send(17).unwrap();
+
+ assert_eq!(17, rx.try_recv().unwrap());
+ assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
+ rx.close();
+}
+
+#[test]
+fn drops_tasks() {
+ let (mut tx, mut rx) = oneshot::channel::<i32>();
+ let mut tx_task = task::spawn(());
+ let mut rx_task = task::spawn(());
+
+ assert_pending!(tx_task.enter(|cx, _| tx.poll_closed(cx)));
+ assert_pending!(rx_task.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ drop(tx);
+ drop(rx);
+
+ assert_eq!(1, tx_task.waker_ref_count());
+ assert_eq!(1, rx_task.waker_ref_count());
+}
+
+#[test]
+fn receiver_changes_task() {
+ let (tx, mut rx) = oneshot::channel();
+
+ let mut task1 = task::spawn(());
+ let mut task2 = task::spawn(());
+
+ assert_pending!(task1.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ assert_eq!(2, task1.waker_ref_count());
+ assert_eq!(1, task2.waker_ref_count());
+
+ assert_pending!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ assert_eq!(1, task1.waker_ref_count());
+ assert_eq!(2, task2.waker_ref_count());
+
+ assert_ok!(tx.send(1));
+
+ assert!(!task1.is_woken());
+ assert!(task2.is_woken());
+
+ assert_ready_ok!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+}
+
+#[test]
+fn sender_changes_task() {
+ let (mut tx, rx) = oneshot::channel::<i32>();
+
+ let mut task1 = task::spawn(());
+ let mut task2 = task::spawn(());
+
+ assert_pending!(task1.enter(|cx, _| tx.poll_closed(cx)));
+
+ assert_eq!(2, task1.waker_ref_count());
+ assert_eq!(1, task2.waker_ref_count());
+
+ assert_pending!(task2.enter(|cx, _| tx.poll_closed(cx)));
+
+ assert_eq!(1, task1.waker_ref_count());
+ assert_eq!(2, task2.waker_ref_count());
+
+ drop(rx);
+
+ assert!(!task1.is_woken());
+ assert!(task2.is_woken());
+
+ assert_ready!(task2.enter(|cx, _| tx.poll_closed(cx)));
+}
diff --git a/third_party/rust/tokio/tests/sync_rwlock.rs b/third_party/rust/tokio/tests/sync_rwlock.rs
new file mode 100644
index 0000000000..01bbdbb5a5
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_rwlock.rs
@@ -0,0 +1,281 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
+
+#[cfg(not(target_arch = "wasm32"))]
+use tokio::test as maybe_tokio_test;
+
+use std::task::Poll;
+
+use futures::future::FutureExt;
+
+use tokio::sync::RwLock;
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+#[test]
+fn into_inner() {
+ let rwlock = RwLock::new(42);
+ assert_eq!(rwlock.into_inner(), 42);
+}
+
+// multiple reads should be Ready
+#[test]
+fn read_shared() {
+ let rwlock = RwLock::new(100);
+
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.read());
+ assert_ready!(t2.poll());
+}
+
+// When there is an active shared owner, exclusive access should not be possible
+#[test]
+fn write_shared_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+}
+
+// When there is an active exclusive owner, subsequent exclusive access should not be possible
+#[test]
+fn read_exclusive_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.write());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.read());
+ assert_pending!(t2.poll());
+}
+
+// If the max shared access is reached and subsequent shared access is pending
+// should be made available when one of the shared accesses is dropped
+#[test]
+fn exhaust_reading() {
+ let rwlock = RwLock::with_max_readers(100, 1024);
+ let mut reads = Vec::new();
+ loop {
+ let mut t = spawn(rwlock.read());
+ match t.poll() {
+ Poll::Ready(guard) => reads.push(guard),
+ Poll::Pending => break,
+ }
+ }
+
+ let mut t1 = spawn(rwlock.read());
+ assert_pending!(t1.poll());
+ let g2 = reads.pop().unwrap();
+ drop(g2);
+ assert!(t1.is_woken());
+ assert_ready!(t1.poll());
+}
+
+// When there is an active exclusive owner, subsequent exclusive access should not be possible
+#[test]
+fn write_exclusive_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.write());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+}
+
+// When there is an active shared owner, exclusive access should be possible after shared is dropped
+#[test]
+fn write_shared_drop() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+
+ let g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+ drop(g1);
+ assert!(t2.is_woken());
+ assert_ready!(t2.poll());
+}
+
+// when there is an active shared owner, and exclusive access is triggered,
+// subsequent shared access should not be possible as write gathers all the available semaphore permits
+#[test]
+fn write_read_shared_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+
+ let mut t2 = spawn(rwlock.read());
+ assert_ready!(t2.poll());
+
+ let mut t3 = spawn(rwlock.write());
+ assert_pending!(t3.poll());
+
+ let mut t4 = spawn(rwlock.read());
+ assert_pending!(t4.poll());
+}
+
+// when there is an active shared owner, and exclusive access is triggered,
+// reading should be possible after pending exclusive access is dropped
+#[test]
+fn write_read_shared_drop_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+
+ let mut t3 = spawn(rwlock.read());
+ assert_pending!(t3.poll());
+ drop(t2);
+
+ assert!(t3.is_woken());
+ assert_ready!(t3.poll());
+}
+
+// Acquire an RwLock nonexclusively by a single task
+#[maybe_tokio_test]
+async fn read_uncontested() {
+ let rwlock = RwLock::new(100);
+ let result = *rwlock.read().await;
+
+ assert_eq!(result, 100);
+}
+
+// Acquire an uncontested RwLock in exclusive mode
+#[maybe_tokio_test]
+async fn write_uncontested() {
+ let rwlock = RwLock::new(100);
+ let mut result = rwlock.write().await;
+ *result += 50;
+ assert_eq!(*result, 150);
+}
+
+// RwLocks should be acquired in the order that their Futures are waited upon.
+#[maybe_tokio_test]
+async fn write_order() {
+ let rwlock = RwLock::<Vec<u32>>::new(vec![]);
+ let fut2 = rwlock.write().map(|mut guard| guard.push(2));
+ let fut1 = rwlock.write().map(|mut guard| guard.push(1));
+ fut1.await;
+ fut2.await;
+
+ let g = rwlock.read().await;
+ assert_eq!(*g, vec![1, 2]);
+}
+
+// A single RwLock is contested by tasks in multiple threads
+#[cfg(feature = "full")]
+#[tokio::test(flavor = "multi_thread", worker_threads = 8)]
+async fn multithreaded() {
+ use futures::stream::{self, StreamExt};
+ use std::sync::Arc;
+ use tokio::sync::Barrier;
+
+ let barrier = Arc::new(Barrier::new(5));
+ let rwlock = Arc::new(RwLock::<u32>::new(0));
+ let rwclone1 = rwlock.clone();
+ let rwclone2 = rwlock.clone();
+ let rwclone3 = rwlock.clone();
+ let rwclone4 = rwlock.clone();
+
+ let b1 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone1.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 2;
+ }
+ })
+ .await;
+ b1.wait().await;
+ });
+
+ let b2 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone2.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 3;
+ }
+ })
+ .await;
+ b2.wait().await;
+ });
+
+ let b3 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone3.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 5;
+ }
+ })
+ .await;
+ b3.wait().await;
+ });
+
+ let b4 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone4.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 7;
+ }
+ })
+ .await;
+ b4.wait().await;
+ });
+
+ barrier.wait().await;
+ let g = rwlock.read().await;
+ assert_eq!(*g, 17_000);
+}
+
+#[maybe_tokio_test]
+async fn try_write() {
+ let lock = RwLock::new(0);
+ let read_guard = lock.read().await;
+ assert!(lock.try_write().is_err());
+ drop(read_guard);
+ assert!(lock.try_write().is_ok());
+}
+
+#[test]
+fn try_read_try_write() {
+ let lock: RwLock<usize> = RwLock::new(15);
+
+ {
+ let rg1 = lock.try_read().unwrap();
+ assert_eq!(*rg1, 15);
+
+ assert!(lock.try_write().is_err());
+
+ let rg2 = lock.try_read().unwrap();
+ assert_eq!(*rg2, 15)
+ }
+
+ {
+ let mut wg = lock.try_write().unwrap();
+ *wg = 1515;
+
+ assert!(lock.try_read().is_err())
+ }
+
+ assert_eq!(*lock.try_read().unwrap(), 1515);
+}
diff --git a/third_party/rust/tokio/tests/sync_semaphore.rs b/third_party/rust/tokio/tests/sync_semaphore.rs
new file mode 100644
index 0000000000..d926665008
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_semaphore.rs
@@ -0,0 +1,102 @@
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use std::sync::Arc;
+use tokio::sync::Semaphore;
+
+#[test]
+fn no_permits() {
+ // this should not panic
+ Semaphore::new(0);
+}
+
+#[test]
+fn try_acquire() {
+ let sem = Semaphore::new(1);
+ {
+ let p1 = sem.try_acquire();
+ assert!(p1.is_ok());
+ let p2 = sem.try_acquire();
+ assert!(p2.is_err());
+ }
+ let p3 = sem.try_acquire();
+ assert!(p3.is_ok());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn acquire() {
+ let sem = Arc::new(Semaphore::new(1));
+ let p1 = sem.try_acquire().unwrap();
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire().await;
+ });
+ drop(p1);
+ j.await.unwrap();
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn add_permits() {
+ let sem = Arc::new(Semaphore::new(0));
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire().await;
+ });
+ sem.add_permits(1);
+ j.await.unwrap();
+}
+
+#[test]
+fn forget() {
+ let sem = Arc::new(Semaphore::new(1));
+ {
+ let p = sem.try_acquire().unwrap();
+ assert_eq!(sem.available_permits(), 0);
+ p.forget();
+ assert_eq!(sem.available_permits(), 0);
+ }
+ assert_eq!(sem.available_permits(), 0);
+ assert!(sem.try_acquire().is_err());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn stresstest() {
+ let sem = Arc::new(Semaphore::new(5));
+ let mut join_handles = Vec::new();
+ for _ in 0..1000 {
+ let sem_clone = sem.clone();
+ join_handles.push(tokio::spawn(async move {
+ let _p = sem_clone.acquire().await;
+ }));
+ }
+ for j in join_handles {
+ j.await.unwrap();
+ }
+ // there should be exactly 5 semaphores available now
+ let _p1 = sem.try_acquire().unwrap();
+ let _p2 = sem.try_acquire().unwrap();
+ let _p3 = sem.try_acquire().unwrap();
+ let _p4 = sem.try_acquire().unwrap();
+ let _p5 = sem.try_acquire().unwrap();
+ assert!(sem.try_acquire().is_err());
+}
+
+#[test]
+fn add_max_amount_permits() {
+ let s = tokio::sync::Semaphore::new(0);
+ s.add_permits(usize::MAX >> 3);
+ assert_eq!(s.available_permits(), usize::MAX >> 3);
+}
+
+#[cfg(not(target_arch = "wasm32"))] // wasm currently doesn't support unwinding
+#[test]
+#[should_panic]
+fn add_more_than_max_amount_permits() {
+ let s = tokio::sync::Semaphore::new(1);
+ s.add_permits(usize::MAX >> 3);
+}
diff --git a/third_party/rust/tokio/tests/sync_semaphore_owned.rs b/third_party/rust/tokio/tests/sync_semaphore_owned.rs
new file mode 100644
index 0000000000..98c20d7b03
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_semaphore_owned.rs
@@ -0,0 +1,113 @@
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use std::sync::Arc;
+use tokio::sync::Semaphore;
+
+#[test]
+fn try_acquire() {
+ let sem = Arc::new(Semaphore::new(1));
+ {
+ let p1 = sem.clone().try_acquire_owned();
+ assert!(p1.is_ok());
+ let p2 = sem.clone().try_acquire_owned();
+ assert!(p2.is_err());
+ }
+ let p3 = sem.try_acquire_owned();
+ assert!(p3.is_ok());
+}
+
+#[test]
+fn try_acquire_many() {
+ let sem = Arc::new(Semaphore::new(42));
+ {
+ let p1 = sem.clone().try_acquire_many_owned(42);
+ assert!(p1.is_ok());
+ let p2 = sem.clone().try_acquire_owned();
+ assert!(p2.is_err());
+ }
+ let p3 = sem.clone().try_acquire_many_owned(32);
+ assert!(p3.is_ok());
+ let p4 = sem.clone().try_acquire_many_owned(10);
+ assert!(p4.is_ok());
+ assert!(sem.try_acquire_owned().is_err());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn acquire() {
+ let sem = Arc::new(Semaphore::new(1));
+ let p1 = sem.clone().try_acquire_owned().unwrap();
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire_owned().await;
+ });
+ drop(p1);
+ j.await.unwrap();
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn acquire_many() {
+ let semaphore = Arc::new(Semaphore::new(42));
+ let permit32 = semaphore.clone().try_acquire_many_owned(32).unwrap();
+ let (sender, receiver) = tokio::sync::oneshot::channel();
+ let join_handle = tokio::spawn(async move {
+ let _permit10 = semaphore.clone().acquire_many_owned(10).await.unwrap();
+ sender.send(()).unwrap();
+ let _permit32 = semaphore.acquire_many_owned(32).await.unwrap();
+ });
+ receiver.await.unwrap();
+ drop(permit32);
+ join_handle.await.unwrap();
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn add_permits() {
+ let sem = Arc::new(Semaphore::new(0));
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire_owned().await;
+ });
+ sem.add_permits(1);
+ j.await.unwrap();
+}
+
+#[test]
+fn forget() {
+ let sem = Arc::new(Semaphore::new(1));
+ {
+ let p = sem.clone().try_acquire_owned().unwrap();
+ assert_eq!(sem.available_permits(), 0);
+ p.forget();
+ assert_eq!(sem.available_permits(), 0);
+ }
+ assert_eq!(sem.available_permits(), 0);
+ assert!(sem.try_acquire_owned().is_err());
+}
+
+#[tokio::test]
+#[cfg(feature = "full")]
+async fn stresstest() {
+ let sem = Arc::new(Semaphore::new(5));
+ let mut join_handles = Vec::new();
+ for _ in 0..1000 {
+ let sem_clone = sem.clone();
+ join_handles.push(tokio::spawn(async move {
+ let _p = sem_clone.acquire_owned().await;
+ }));
+ }
+ for j in join_handles {
+ j.await.unwrap();
+ }
+ // there should be exactly 5 semaphores available now
+ let _p1 = sem.clone().try_acquire_owned().unwrap();
+ let _p2 = sem.clone().try_acquire_owned().unwrap();
+ let _p3 = sem.clone().try_acquire_owned().unwrap();
+ let _p4 = sem.clone().try_acquire_owned().unwrap();
+ let _p5 = sem.clone().try_acquire_owned().unwrap();
+ assert!(sem.try_acquire_owned().is_err());
+}
diff --git a/third_party/rust/tokio/tests/sync_watch.rs b/third_party/rust/tokio/tests/sync_watch.rs
new file mode 100644
index 0000000000..8b9ea81bb8
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_watch.rs
@@ -0,0 +1,213 @@
+#![allow(clippy::cognitive_complexity)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+#[cfg(target_arch = "wasm32")]
+use wasm_bindgen_test::wasm_bindgen_test as test;
+
+use tokio::sync::watch;
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok};
+
+#[test]
+fn single_rx_recv() {
+ let (tx, mut rx) = watch::channel("one");
+
+ {
+ // Not initially notified
+ let mut t = spawn(rx.changed());
+ assert_pending!(t.poll());
+ }
+ assert_eq!(*rx.borrow(), "one");
+
+ {
+ let mut t = spawn(rx.changed());
+ assert_pending!(t.poll());
+
+ tx.send("two").unwrap();
+
+ assert!(t.is_woken());
+
+ assert_ready_ok!(t.poll());
+ }
+ assert_eq!(*rx.borrow(), "two");
+
+ {
+ let mut t = spawn(rx.changed());
+ assert_pending!(t.poll());
+
+ drop(tx);
+
+ assert!(t.is_woken());
+ assert_ready_err!(t.poll());
+ }
+ assert_eq!(*rx.borrow(), "two");
+}
+
+#[test]
+fn multi_rx() {
+ let (tx, mut rx1) = watch::channel("one");
+ let mut rx2 = rx1.clone();
+
+ {
+ let mut t1 = spawn(rx1.changed());
+ let mut t2 = spawn(rx2.changed());
+
+ assert_pending!(t1.poll());
+ assert_pending!(t2.poll());
+ }
+ assert_eq!(*rx1.borrow(), "one");
+ assert_eq!(*rx2.borrow(), "one");
+
+ let mut t2 = spawn(rx2.changed());
+
+ {
+ let mut t1 = spawn(rx1.changed());
+
+ assert_pending!(t1.poll());
+ assert_pending!(t2.poll());
+
+ tx.send("two").unwrap();
+
+ assert!(t1.is_woken());
+ assert!(t2.is_woken());
+
+ assert_ready_ok!(t1.poll());
+ }
+ assert_eq!(*rx1.borrow(), "two");
+
+ {
+ let mut t1 = spawn(rx1.changed());
+
+ assert_pending!(t1.poll());
+
+ tx.send("three").unwrap();
+
+ assert!(t1.is_woken());
+ assert!(t2.is_woken());
+
+ assert_ready_ok!(t1.poll());
+ assert_ready_ok!(t2.poll());
+ }
+ assert_eq!(*rx1.borrow(), "three");
+
+ drop(t2);
+
+ assert_eq!(*rx2.borrow(), "three");
+
+ {
+ let mut t1 = spawn(rx1.changed());
+ let mut t2 = spawn(rx2.changed());
+
+ assert_pending!(t1.poll());
+ assert_pending!(t2.poll());
+
+ tx.send("four").unwrap();
+
+ assert_ready_ok!(t1.poll());
+ assert_ready_ok!(t2.poll());
+ }
+ assert_eq!(*rx1.borrow(), "four");
+ assert_eq!(*rx2.borrow(), "four");
+}
+
+#[test]
+fn rx_observes_final_value() {
+ // Initial value
+
+ let (tx, mut rx) = watch::channel("one");
+ drop(tx);
+
+ {
+ let mut t1 = spawn(rx.changed());
+ assert_ready_err!(t1.poll());
+ }
+ assert_eq!(*rx.borrow(), "one");
+
+ // Sending a value
+
+ let (tx, mut rx) = watch::channel("one");
+
+ tx.send("two").unwrap();
+
+ {
+ let mut t1 = spawn(rx.changed());
+ assert_ready_ok!(t1.poll());
+ }
+ assert_eq!(*rx.borrow(), "two");
+
+ {
+ let mut t1 = spawn(rx.changed());
+ assert_pending!(t1.poll());
+
+ tx.send("three").unwrap();
+ drop(tx);
+
+ assert!(t1.is_woken());
+
+ assert_ready_ok!(t1.poll());
+ }
+ assert_eq!(*rx.borrow(), "three");
+
+ {
+ let mut t1 = spawn(rx.changed());
+ assert_ready_err!(t1.poll());
+ }
+ assert_eq!(*rx.borrow(), "three");
+}
+
+#[test]
+fn poll_close() {
+ let (tx, rx) = watch::channel("one");
+
+ {
+ let mut t = spawn(tx.closed());
+ assert_pending!(t.poll());
+
+ drop(rx);
+
+ assert!(t.is_woken());
+ assert_ready!(t.poll());
+ }
+
+ assert!(tx.send("two").is_err());
+}
+
+#[test]
+fn borrow_and_update() {
+ let (tx, mut rx) = watch::channel("one");
+
+ assert!(!rx.has_changed().unwrap());
+
+ tx.send("two").unwrap();
+ assert!(rx.has_changed().unwrap());
+ assert_ready!(spawn(rx.changed()).poll()).unwrap();
+ assert_pending!(spawn(rx.changed()).poll());
+ assert!(!rx.has_changed().unwrap());
+
+ tx.send("three").unwrap();
+ assert!(rx.has_changed().unwrap());
+ assert_eq!(*rx.borrow_and_update(), "three");
+ assert_pending!(spawn(rx.changed()).poll());
+ assert!(!rx.has_changed().unwrap());
+
+ drop(tx);
+ assert_eq!(*rx.borrow_and_update(), "three");
+ assert_ready!(spawn(rx.changed()).poll()).unwrap_err();
+ assert!(rx.has_changed().is_err());
+}
+
+#[test]
+fn reopened_after_subscribe() {
+ let (tx, rx) = watch::channel("one");
+ assert!(!tx.is_closed());
+
+ drop(rx);
+ assert!(tx.is_closed());
+
+ let rx = tx.subscribe();
+ assert!(!tx.is_closed());
+
+ drop(rx);
+ assert!(tx.is_closed());
+}
diff --git a/third_party/rust/tokio/tests/task_abort.rs b/third_party/rust/tokio/tests/task_abort.rs
new file mode 100644
index 0000000000..fe6b50cd46
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_abort.rs
@@ -0,0 +1,224 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::sync::Arc;
+use std::thread::sleep;
+use tokio::time::Duration;
+
+use tokio::runtime::Builder;
+
+struct PanicOnDrop;
+
+impl Drop for PanicOnDrop {
+ fn drop(&mut self) {
+ panic!("Well what did you expect would happen...");
+ }
+}
+
+/// Checks that a suspended task can be aborted without panicking as reported in
+/// issue #3157: <https://github.com/tokio-rs/tokio/issues/3157>.
+#[test]
+fn test_abort_without_panic_3157() {
+ let rt = Builder::new_multi_thread()
+ .enable_time()
+ .worker_threads(1)
+ .build()
+ .unwrap();
+
+ rt.block_on(async move {
+ let handle = tokio::spawn(async move {
+ println!("task started");
+ tokio::time::sleep(Duration::new(100, 0)).await
+ });
+
+ // wait for task to sleep.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ handle.abort();
+ let _ = handle.await;
+ });
+}
+
+/// Checks that a suspended task can be aborted inside of a current_thread
+/// executor without panicking as reported in issue #3662:
+/// <https://github.com/tokio-rs/tokio/issues/3662>.
+#[test]
+fn test_abort_without_panic_3662() {
+ use std::sync::atomic::{AtomicBool, Ordering};
+ use std::sync::Arc;
+
+ struct DropCheck(Arc<AtomicBool>);
+
+ impl Drop for DropCheck {
+ fn drop(&mut self) {
+ self.0.store(true, Ordering::SeqCst);
+ }
+ }
+
+ let rt = Builder::new_current_thread().build().unwrap();
+
+ rt.block_on(async move {
+ let drop_flag = Arc::new(AtomicBool::new(false));
+ let drop_check = DropCheck(drop_flag.clone());
+
+ let j = tokio::spawn(async move {
+ // NB: just grab the drop check here so that it becomes part of the
+ // task.
+ let _drop_check = drop_check;
+ futures::future::pending::<()>().await;
+ });
+
+ let drop_flag2 = drop_flag.clone();
+
+ let task = std::thread::spawn(move || {
+ // This runs in a separate thread so it doesn't have immediate
+ // thread-local access to the executor. It does however transition
+ // the underlying task to be completed, which will cause it to be
+ // dropped (but not in this thread).
+ assert!(!drop_flag2.load(Ordering::SeqCst));
+ j.abort();
+ j
+ })
+ .join()
+ .unwrap();
+
+ let result = task.await;
+ assert!(drop_flag.load(Ordering::SeqCst));
+ assert!(result.unwrap_err().is_cancelled());
+
+ // Note: We do the following to trigger a deferred task cleanup.
+ //
+ // The relevant piece of code you want to look at is in:
+ // `Inner::block_on` of `basic_scheduler.rs`.
+ //
+ // We cause the cleanup to happen by having a poll return Pending once
+ // so that the scheduler can go into the "auxiliary tasks" mode, at
+ // which point the task is removed from the scheduler.
+ let i = tokio::spawn(async move {
+ tokio::task::yield_now().await;
+ });
+
+ i.await.unwrap();
+ });
+}
+
+/// Checks that a suspended LocalSet task can be aborted from a remote thread
+/// without panicking and without running the tasks destructor on the wrong thread.
+/// <https://github.com/tokio-rs/tokio/issues/3929>
+#[test]
+fn remote_abort_local_set_3929() {
+ struct DropCheck {
+ created_on: std::thread::ThreadId,
+ not_send: std::marker::PhantomData<*const ()>,
+ }
+
+ impl DropCheck {
+ fn new() -> Self {
+ Self {
+ created_on: std::thread::current().id(),
+ not_send: std::marker::PhantomData,
+ }
+ }
+ }
+ impl Drop for DropCheck {
+ fn drop(&mut self) {
+ if std::thread::current().id() != self.created_on {
+ panic!("non-Send value dropped in another thread!");
+ }
+ }
+ }
+
+ let rt = Builder::new_current_thread().build().unwrap();
+ let local = tokio::task::LocalSet::new();
+
+ let check = DropCheck::new();
+ let jh = local.spawn_local(async move {
+ futures::future::pending::<()>().await;
+ drop(check);
+ });
+
+ let jh2 = std::thread::spawn(move || {
+ sleep(Duration::from_millis(10));
+ jh.abort();
+ });
+
+ rt.block_on(local);
+ jh2.join().unwrap();
+}
+
+/// Checks that a suspended task can be aborted even if the `JoinHandle` is immediately dropped.
+/// issue #3964: <https://github.com/tokio-rs/tokio/issues/3964>.
+#[test]
+fn test_abort_wakes_task_3964() {
+ let rt = Builder::new_current_thread().enable_time().build().unwrap();
+
+ rt.block_on(async move {
+ let notify_dropped = Arc::new(());
+ let weak_notify_dropped = Arc::downgrade(&notify_dropped);
+
+ let handle = tokio::spawn(async move {
+ // Make sure the Arc is moved into the task
+ let _notify_dropped = notify_dropped;
+ println!("task started");
+ tokio::time::sleep(Duration::new(100, 0)).await
+ });
+
+ // wait for task to sleep.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ handle.abort();
+ drop(handle);
+
+ // wait for task to abort.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ // Check that the Arc has been dropped.
+ assert!(weak_notify_dropped.upgrade().is_none());
+ });
+}
+
+/// Checks that aborting a task whose destructor panics does not allow the
+/// panic to escape the task.
+#[test]
+fn test_abort_task_that_panics_on_drop_contained() {
+ let rt = Builder::new_current_thread().enable_time().build().unwrap();
+
+ rt.block_on(async move {
+ let handle = tokio::spawn(async move {
+ // Make sure the Arc is moved into the task
+ let _panic_dropped = PanicOnDrop;
+ println!("task started");
+ tokio::time::sleep(Duration::new(100, 0)).await
+ });
+
+ // wait for task to sleep.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ handle.abort();
+ drop(handle);
+
+ // wait for task to abort.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ });
+}
+
+/// Checks that aborting a task whose destructor panics has the expected result.
+#[test]
+fn test_abort_task_that_panics_on_drop_returned() {
+ let rt = Builder::new_current_thread().enable_time().build().unwrap();
+
+ rt.block_on(async move {
+ let handle = tokio::spawn(async move {
+ // Make sure the Arc is moved into the task
+ let _panic_dropped = PanicOnDrop;
+ println!("task started");
+ tokio::time::sleep(Duration::new(100, 0)).await
+ });
+
+ // wait for task to sleep.
+ tokio::time::sleep(Duration::from_millis(10)).await;
+
+ handle.abort();
+ assert!(handle.await.unwrap_err().is_panic());
+ });
+}
diff --git a/third_party/rust/tokio/tests/task_blocking.rs b/third_party/rust/tokio/tests/task_blocking.rs
new file mode 100644
index 0000000000..ee7e78ad48
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_blocking.rs
@@ -0,0 +1,228 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::{runtime, task};
+use tokio_test::assert_ok;
+
+use std::thread;
+use std::time::Duration;
+
+mod support {
+ pub(crate) mod mpsc_stream;
+}
+
+#[tokio::test]
+async fn basic_blocking() {
+ // Run a few times
+ for _ in 0..100 {
+ let out = assert_ok!(
+ tokio::spawn(async {
+ assert_ok!(
+ task::spawn_blocking(|| {
+ thread::sleep(Duration::from_millis(5));
+ "hello"
+ })
+ .await
+ )
+ })
+ .await
+ );
+
+ assert_eq!(out, "hello");
+ }
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn block_in_blocking() {
+ // Run a few times
+ for _ in 0..100 {
+ let out = assert_ok!(
+ tokio::spawn(async {
+ assert_ok!(
+ task::spawn_blocking(|| {
+ task::block_in_place(|| {
+ thread::sleep(Duration::from_millis(5));
+ });
+ "hello"
+ })
+ .await
+ )
+ })
+ .await
+ );
+
+ assert_eq!(out, "hello");
+ }
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn block_in_block() {
+ // Run a few times
+ for _ in 0..100 {
+ let out = assert_ok!(
+ tokio::spawn(async {
+ task::block_in_place(|| {
+ task::block_in_place(|| {
+ thread::sleep(Duration::from_millis(5));
+ });
+ "hello"
+ })
+ })
+ .await
+ );
+
+ assert_eq!(out, "hello");
+ }
+}
+
+#[tokio::test(flavor = "current_thread")]
+#[should_panic]
+async fn no_block_in_basic_scheduler() {
+ task::block_in_place(|| {});
+}
+
+#[test]
+fn yes_block_in_threaded_block_on() {
+ let rt = runtime::Runtime::new().unwrap();
+ rt.block_on(async {
+ task::block_in_place(|| {});
+ });
+}
+
+#[test]
+#[should_panic]
+fn no_block_in_basic_block_on() {
+ let rt = runtime::Builder::new_current_thread().build().unwrap();
+ rt.block_on(async {
+ task::block_in_place(|| {});
+ });
+}
+
+#[test]
+fn can_enter_basic_rt_from_within_block_in_place() {
+ let outer = tokio::runtime::Runtime::new().unwrap();
+
+ outer.block_on(async {
+ tokio::task::block_in_place(|| {
+ let inner = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+
+ inner.block_on(async {})
+ })
+ });
+}
+
+#[test]
+fn useful_panic_message_when_dropping_rt_in_rt() {
+ use std::panic::{catch_unwind, AssertUnwindSafe};
+
+ let outer = tokio::runtime::Runtime::new().unwrap();
+
+ let result = catch_unwind(AssertUnwindSafe(|| {
+ outer.block_on(async {
+ let _ = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ });
+ }));
+
+ assert!(result.is_err());
+ let err = result.unwrap_err();
+ let err: &'static str = err.downcast_ref::<&'static str>().unwrap();
+
+ assert!(
+ err.contains("Cannot drop a runtime"),
+ "Wrong panic message: {:?}",
+ err
+ );
+}
+
+#[test]
+fn can_shutdown_with_zero_timeout_in_runtime() {
+ let outer = tokio::runtime::Runtime::new().unwrap();
+
+ outer.block_on(async {
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ rt.shutdown_timeout(Duration::from_nanos(0));
+ });
+}
+
+#[test]
+fn can_shutdown_now_in_runtime() {
+ let outer = tokio::runtime::Runtime::new().unwrap();
+
+ outer.block_on(async {
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+ rt.shutdown_background();
+ });
+}
+
+#[test]
+fn coop_disabled_in_block_in_place() {
+ let outer = tokio::runtime::Builder::new_multi_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ let (tx, rx) = support::mpsc_stream::unbounded_channel_stream();
+
+ for i in 0..200 {
+ tx.send(i).unwrap();
+ }
+ drop(tx);
+
+ outer.block_on(async move {
+ let jh = tokio::spawn(async move {
+ tokio::task::block_in_place(move || {
+ futures::executor::block_on(async move {
+ use tokio_stream::StreamExt;
+ assert_eq!(rx.fold(0, |n, _| n + 1).await, 200);
+ })
+ })
+ });
+
+ tokio::time::timeout(Duration::from_secs(1), jh)
+ .await
+ .expect("timed out (probably hanging)")
+ .unwrap()
+ });
+}
+
+#[test]
+fn coop_disabled_in_block_in_place_in_block_on() {
+ let (done_tx, done_rx) = std::sync::mpsc::channel();
+ let done = done_tx.clone();
+ thread::spawn(move || {
+ let outer = tokio::runtime::Runtime::new().unwrap();
+
+ let (tx, rx) = support::mpsc_stream::unbounded_channel_stream();
+
+ for i in 0..200 {
+ tx.send(i).unwrap();
+ }
+ drop(tx);
+
+ outer.block_on(async move {
+ tokio::task::block_in_place(move || {
+ futures::executor::block_on(async move {
+ use tokio_stream::StreamExt;
+ assert_eq!(rx.fold(0, |n, _| n + 1).await, 200);
+ })
+ })
+ });
+
+ let _ = done.send(Ok(()));
+ });
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_secs(1));
+ let _ = done_tx.send(Err("timed out (probably hanging)"));
+ });
+
+ done_rx.recv().unwrap().unwrap();
+}
diff --git a/third_party/rust/tokio/tests/task_builder.rs b/third_party/rust/tokio/tests/task_builder.rs
new file mode 100644
index 0000000000..1499abf19e
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_builder.rs
@@ -0,0 +1,67 @@
+#[cfg(all(tokio_unstable, feature = "tracing"))]
+mod tests {
+ use std::rc::Rc;
+ use tokio::{
+ task::{Builder, LocalSet},
+ test,
+ };
+
+ #[test]
+ async fn spawn_with_name() {
+ let result = Builder::new()
+ .name("name")
+ .spawn(async { "task executed" })
+ .await;
+
+ assert_eq!(result.unwrap(), "task executed");
+ }
+
+ #[test]
+ async fn spawn_blocking_with_name() {
+ let result = Builder::new()
+ .name("name")
+ .spawn_blocking(|| "task executed")
+ .await;
+
+ assert_eq!(result.unwrap(), "task executed");
+ }
+
+ #[test]
+ async fn spawn_local_with_name() {
+ let unsend_data = Rc::new("task executed");
+ let result = LocalSet::new()
+ .run_until(async move {
+ Builder::new()
+ .name("name")
+ .spawn_local(async move { unsend_data })
+ .await
+ })
+ .await;
+
+ assert_eq!(*result.unwrap(), "task executed");
+ }
+
+ #[test]
+ async fn spawn_without_name() {
+ let result = Builder::new().spawn(async { "task executed" }).await;
+
+ assert_eq!(result.unwrap(), "task executed");
+ }
+
+ #[test]
+ async fn spawn_blocking_without_name() {
+ let result = Builder::new().spawn_blocking(|| "task executed").await;
+
+ assert_eq!(result.unwrap(), "task executed");
+ }
+
+ #[test]
+ async fn spawn_local_without_name() {
+ let unsend_data = Rc::new("task executed");
+ let result = LocalSet::new()
+ .run_until(async move { Builder::new().spawn_local(async move { unsend_data }).await })
+ .await;
+
+ assert_eq!(*result.unwrap(), "task executed");
+ }
+}
diff --git a/third_party/rust/tokio/tests/task_join_set.rs b/third_party/rust/tokio/tests/task_join_set.rs
new file mode 100644
index 0000000000..66a2fbb021
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_join_set.rs
@@ -0,0 +1,192 @@
+#![warn(rust_2018_idioms)]
+#![cfg(all(feature = "full", tokio_unstable))]
+
+use tokio::sync::oneshot;
+use tokio::task::JoinSet;
+use tokio::time::Duration;
+
+use futures::future::FutureExt;
+
+fn rt() -> tokio::runtime::Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap()
+}
+
+#[tokio::test(start_paused = true)]
+async fn test_with_sleep() {
+ let mut set = JoinSet::new();
+
+ for i in 0..10 {
+ set.spawn(async move { i });
+ assert_eq!(set.len(), 1 + i);
+ }
+ set.detach_all();
+ assert_eq!(set.len(), 0);
+
+ assert!(matches!(set.join_one().await, Ok(None)));
+
+ for i in 0..10 {
+ set.spawn(async move {
+ tokio::time::sleep(Duration::from_secs(i as u64)).await;
+ i
+ });
+ assert_eq!(set.len(), 1 + i);
+ }
+
+ let mut seen = [false; 10];
+ while let Some(res) = set.join_one().await.unwrap() {
+ seen[res] = true;
+ }
+
+ for was_seen in &seen {
+ assert!(was_seen);
+ }
+ assert!(matches!(set.join_one().await, Ok(None)));
+
+ // Do it again.
+ for i in 0..10 {
+ set.spawn(async move {
+ tokio::time::sleep(Duration::from_secs(i as u64)).await;
+ i
+ });
+ }
+
+ let mut seen = [false; 10];
+ while let Some(res) = set.join_one().await.unwrap() {
+ seen[res] = true;
+ }
+
+ for was_seen in &seen {
+ assert!(was_seen);
+ }
+ assert!(matches!(set.join_one().await, Ok(None)));
+}
+
+#[tokio::test]
+async fn test_abort_on_drop() {
+ let mut set = JoinSet::new();
+
+ let mut recvs = Vec::new();
+
+ for _ in 0..16 {
+ let (send, recv) = oneshot::channel::<()>();
+ recvs.push(recv);
+
+ set.spawn(async {
+ // This task will never complete on its own.
+ futures::future::pending::<()>().await;
+ drop(send);
+ });
+ }
+
+ drop(set);
+
+ for recv in recvs {
+ // The task is aborted soon and we will receive an error.
+ assert!(recv.await.is_err());
+ }
+}
+
+#[tokio::test]
+async fn alternating() {
+ let mut set = JoinSet::new();
+
+ assert_eq!(set.len(), 0);
+ set.spawn(async {});
+ assert_eq!(set.len(), 1);
+ set.spawn(async {});
+ assert_eq!(set.len(), 2);
+
+ for _ in 0..16 {
+ let () = set.join_one().await.unwrap().unwrap();
+ assert_eq!(set.len(), 1);
+ set.spawn(async {});
+ assert_eq!(set.len(), 2);
+ }
+}
+
+#[test]
+fn runtime_gone() {
+ let mut set = JoinSet::new();
+ {
+ let rt = rt();
+ set.spawn_on(async { 1 }, rt.handle());
+ drop(rt);
+ }
+
+ assert!(rt().block_on(set.join_one()).unwrap_err().is_cancelled());
+}
+
+// This ensures that `join_one` works correctly when the coop budget is
+// exhausted.
+#[tokio::test(flavor = "current_thread")]
+async fn join_set_coop() {
+ // Large enough to trigger coop.
+ const TASK_NUM: u32 = 1000;
+
+ static SEM: tokio::sync::Semaphore = tokio::sync::Semaphore::const_new(0);
+
+ let mut set = JoinSet::new();
+
+ for _ in 0..TASK_NUM {
+ set.spawn(async {
+ SEM.add_permits(1);
+ });
+ }
+
+ // Wait for all tasks to complete.
+ //
+ // Since this is a `current_thread` runtime, there's no race condition
+ // between the last permit being added and the task completing.
+ let _ = SEM.acquire_many(TASK_NUM).await.unwrap();
+
+ let mut count = 0;
+ let mut coop_count = 0;
+ loop {
+ match set.join_one().now_or_never() {
+ Some(Ok(Some(()))) => {}
+ Some(Err(err)) => panic!("failed: {}", err),
+ None => {
+ coop_count += 1;
+ tokio::task::yield_now().await;
+ continue;
+ }
+ Some(Ok(None)) => break,
+ }
+
+ count += 1;
+ }
+ assert!(coop_count >= 1);
+ assert_eq!(count, TASK_NUM);
+}
+
+#[tokio::test(start_paused = true)]
+async fn abort_all() {
+ let mut set: JoinSet<()> = JoinSet::new();
+
+ for _ in 0..5 {
+ set.spawn(futures::future::pending());
+ }
+ for _ in 0..5 {
+ set.spawn(async {
+ tokio::time::sleep(Duration::from_secs(1)).await;
+ });
+ }
+
+ // The join set will now have 5 pending tasks and 5 ready tasks.
+ tokio::time::sleep(Duration::from_secs(2)).await;
+
+ set.abort_all();
+ assert_eq!(set.len(), 10);
+
+ let mut count = 0;
+ while let Some(res) = set.join_one().await.transpose() {
+ if let Err(err) = res {
+ assert!(err.is_cancelled());
+ }
+ count += 1;
+ }
+ assert_eq!(count, 10);
+ assert_eq!(set.len(), 0);
+}
diff --git a/third_party/rust/tokio/tests/task_local.rs b/third_party/rust/tokio/tests/task_local.rs
new file mode 100644
index 0000000000..811d63ea0f
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_local.rs
@@ -0,0 +1,33 @@
+#![cfg(feature = "full")]
+
+tokio::task_local! {
+ static REQ_ID: u32;
+ pub static FOO: bool;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn local() {
+ let j1 = tokio::spawn(REQ_ID.scope(1, async move {
+ assert_eq!(REQ_ID.get(), 1);
+ assert_eq!(REQ_ID.get(), 1);
+ }));
+
+ let j2 = tokio::spawn(REQ_ID.scope(2, async move {
+ REQ_ID.with(|v| {
+ assert_eq!(REQ_ID.get(), 2);
+ assert_eq!(*v, 2);
+ });
+
+ tokio::time::sleep(std::time::Duration::from_millis(10)).await;
+
+ assert_eq!(REQ_ID.get(), 2);
+ }));
+
+ let j3 = tokio::spawn(FOO.scope(true, async move {
+ assert!(FOO.get());
+ }));
+
+ j1.await.unwrap();
+ j2.await.unwrap();
+ j3.await.unwrap();
+}
diff --git a/third_party/rust/tokio/tests/task_local_set.rs b/third_party/rust/tokio/tests/task_local_set.rs
new file mode 100644
index 0000000000..f8a35d0ede
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_local_set.rs
@@ -0,0 +1,525 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use futures::{
+ future::{pending, ready},
+ FutureExt,
+};
+
+use tokio::runtime::{self, Runtime};
+use tokio::sync::{mpsc, oneshot};
+use tokio::task::{self, LocalSet};
+use tokio::time;
+
+use std::cell::Cell;
+use std::sync::atomic::Ordering::{self, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::time::Duration;
+
+#[tokio::test(flavor = "current_thread")]
+async fn local_basic_scheduler() {
+ LocalSet::new()
+ .run_until(async {
+ task::spawn_local(async {}).await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn local_threadpool() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ .await
+ .unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn localset_future_threadpool() {
+ thread_local! {
+ static ON_LOCAL_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_LOCAL_THREAD.with(|cell| cell.set(true));
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ assert!(ON_LOCAL_THREAD.with(|cell| cell.get()));
+ });
+ local.await;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn localset_future_timers() {
+ static RAN1: AtomicBool = AtomicBool::new(false);
+ static RAN2: AtomicBool = AtomicBool::new(false);
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ time::sleep(Duration::from_millis(5)).await;
+ RAN1.store(true, Ordering::SeqCst);
+ });
+ local.spawn_local(async move {
+ time::sleep(Duration::from_millis(10)).await;
+ RAN2.store(true, Ordering::SeqCst);
+ });
+ local.await;
+ assert!(RAN1.load(Ordering::SeqCst));
+ assert!(RAN2.load(Ordering::SeqCst));
+}
+
+#[tokio::test]
+async fn localset_future_drives_all_local_futs() {
+ static RAN1: AtomicBool = AtomicBool::new(false);
+ static RAN2: AtomicBool = AtomicBool::new(false);
+ static RAN3: AtomicBool = AtomicBool::new(false);
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ task::spawn_local(async {
+ task::yield_now().await;
+ RAN3.store(true, Ordering::SeqCst);
+ });
+ task::yield_now().await;
+ RAN1.store(true, Ordering::SeqCst);
+ });
+ local.spawn_local(async move {
+ task::yield_now().await;
+ RAN2.store(true, Ordering::SeqCst);
+ });
+ local.await;
+ assert!(RAN1.load(Ordering::SeqCst));
+ assert!(RAN2.load(Ordering::SeqCst));
+ assert!(RAN3.load(Ordering::SeqCst));
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn local_threadpool_timer() {
+ // This test ensures that runtime services like the timer are properly
+ // set for the local task set.
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ time::sleep(Duration::from_millis(10)).await;
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ })
+ .await;
+}
+
+#[test]
+// This will panic, since the thread that calls `block_on` cannot use
+// in-place blocking inside of `block_on`.
+#[should_panic]
+fn local_threadpool_blocking_in_place() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ let rt = runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap();
+ LocalSet::new().block_on(&rt, async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::block_in_place(|| {});
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ });
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn local_threadpool_blocking_run() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_blocking(|| {
+ assert!(
+ !ON_RT_THREAD.with(|cell| cell.get()),
+ "blocking must not run on the local task set's thread"
+ );
+ })
+ .await
+ .unwrap();
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn all_spawns_are_local() {
+ use futures::future;
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let handles = (0..128)
+ .map(|_| {
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ })
+ .collect::<Vec<_>>();
+ for joined in future::join_all(handles).await {
+ joined.unwrap();
+ }
+ })
+ .await;
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn nested_spawn_is_local() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await;
+}
+
+#[test]
+fn join_local_future_elsewhere() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ let rt = runtime::Runtime::new().unwrap();
+ let local = LocalSet::new();
+ local.block_on(&rt, async move {
+ let (tx, rx) = oneshot::channel();
+ let join = task::spawn_local(async move {
+ println!("hello world running...");
+ assert!(
+ ON_RT_THREAD.with(|cell| cell.get()),
+ "local task must run on local thread, no matter where it is awaited"
+ );
+ rx.await.unwrap();
+
+ println!("hello world task done");
+ "hello world"
+ });
+ let join2 = task::spawn(async move {
+ assert!(
+ !ON_RT_THREAD.with(|cell| cell.get()),
+ "spawned task should be on a worker"
+ );
+
+ tx.send(()).expect("task shouldn't have ended yet");
+ println!("waking up hello world...");
+
+ join.await.expect("task should complete successfully");
+
+ println!("hello world task joined");
+ });
+ join2.await.unwrap()
+ });
+}
+
+#[test]
+fn drop_cancels_tasks() {
+ use std::rc::Rc;
+
+ // This test reproduces issue #1842
+ let rt = rt();
+ let rc1 = Rc::new(());
+ let rc2 = rc1.clone();
+
+ let (started_tx, started_rx) = oneshot::channel();
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ // Move this in
+ let _rc2 = rc2;
+
+ started_tx.send(()).unwrap();
+ futures::future::pending::<()>().await;
+ });
+
+ local.block_on(&rt, async {
+ started_rx.await.unwrap();
+ });
+ drop(local);
+ drop(rt);
+
+ assert_eq!(1, Rc::strong_count(&rc1));
+}
+
+/// Runs a test function in a separate thread, and panics if the test does not
+/// complete within the specified timeout, or if the test function panics.
+///
+/// This is intended for running tests whose failure mode is a hang or infinite
+/// loop that cannot be detected otherwise.
+fn with_timeout(timeout: Duration, f: impl FnOnce() + Send + 'static) {
+ use std::sync::mpsc::RecvTimeoutError;
+
+ let (done_tx, done_rx) = std::sync::mpsc::channel();
+ let thread = std::thread::spawn(move || {
+ f();
+
+ // Send a message on the channel so that the test thread can
+ // determine if we have entered an infinite loop:
+ done_tx.send(()).unwrap();
+ });
+
+ // Since the failure mode of this test is an infinite loop, rather than
+ // something we can easily make assertions about, we'll run it in a
+ // thread. When the test thread finishes, it will send a message on a
+ // channel to this thread. We'll wait for that message with a fairly
+ // generous timeout, and if we don't receive it, we assume the test
+ // thread has hung.
+ //
+ // Note that it should definitely complete in under a minute, but just
+ // in case CI is slow, we'll give it a long timeout.
+ match done_rx.recv_timeout(timeout) {
+ Err(RecvTimeoutError::Timeout) => panic!(
+ "test did not complete within {:?} seconds, \
+ we have (probably) entered an infinite loop!",
+ timeout,
+ ),
+ // Did the test thread panic? We'll find out for sure when we `join`
+ // with it.
+ Err(RecvTimeoutError::Disconnected) => {
+ println!("done_rx dropped, did the test thread panic?");
+ }
+ // Test completed successfully!
+ Ok(()) => {}
+ }
+
+ thread.join().expect("test thread should not panic!")
+}
+
+#[test]
+fn drop_cancels_remote_tasks() {
+ // This test reproduces issue #1885.
+ with_timeout(Duration::from_secs(60), || {
+ let (tx, mut rx) = mpsc::channel::<()>(1024);
+
+ let rt = rt();
+
+ let local = LocalSet::new();
+ local.spawn_local(async move { while rx.recv().await.is_some() {} });
+ local.block_on(&rt, async {
+ time::sleep(Duration::from_millis(1)).await;
+ });
+
+ drop(tx);
+
+ // This enters an infinite loop if the remote notified tasks are not
+ // properly cancelled.
+ drop(local);
+ });
+}
+
+#[test]
+fn local_tasks_wake_join_all() {
+ // This test reproduces issue #2460.
+ with_timeout(Duration::from_secs(60), || {
+ use futures::future::join_all;
+ use tokio::task::LocalSet;
+
+ let rt = rt();
+ let set = LocalSet::new();
+ let mut handles = Vec::new();
+
+ for _ in 1..=128 {
+ handles.push(set.spawn_local(async move {
+ tokio::task::spawn_local(async move {}).await.unwrap();
+ }));
+ }
+
+ rt.block_on(set.run_until(join_all(handles)));
+ });
+}
+
+#[test]
+fn local_tasks_are_polled_after_tick() {
+ // This test depends on timing, so we run it up to five times.
+ for _ in 0..4 {
+ let res = std::panic::catch_unwind(local_tasks_are_polled_after_tick_inner);
+ if res.is_ok() {
+ // success
+ return;
+ }
+ }
+
+ // Test failed 4 times. Try one more time without catching panics. If it
+ // fails again, the test fails.
+ local_tasks_are_polled_after_tick_inner();
+}
+
+#[tokio::main(flavor = "current_thread")]
+async fn local_tasks_are_polled_after_tick_inner() {
+ // Reproduces issues #1899 and #1900
+
+ static RX1: AtomicUsize = AtomicUsize::new(0);
+ static RX2: AtomicUsize = AtomicUsize::new(0);
+ const EXPECTED: usize = 500;
+
+ RX1.store(0, SeqCst);
+ RX2.store(0, SeqCst);
+
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ let local = LocalSet::new();
+
+ local
+ .run_until(async {
+ let task2 = task::spawn(async move {
+ // Wait a bit
+ time::sleep(Duration::from_millis(10)).await;
+
+ let mut oneshots = Vec::with_capacity(EXPECTED);
+
+ // Send values
+ for _ in 0..EXPECTED {
+ let (oneshot_tx, oneshot_rx) = oneshot::channel();
+ oneshots.push(oneshot_tx);
+ tx.send(oneshot_rx).unwrap();
+ }
+
+ time::sleep(Duration::from_millis(10)).await;
+
+ for tx in oneshots.drain(..) {
+ tx.send(()).unwrap();
+ }
+
+ time::sleep(Duration::from_millis(20)).await;
+ let rx1 = RX1.load(SeqCst);
+ let rx2 = RX2.load(SeqCst);
+ println!("EXPECT = {}; RX1 = {}; RX2 = {}", EXPECTED, rx1, rx2);
+ assert_eq!(EXPECTED, rx1);
+ assert_eq!(EXPECTED, rx2);
+ });
+
+ while let Some(oneshot) = rx.recv().await {
+ RX1.fetch_add(1, SeqCst);
+
+ task::spawn_local(async move {
+ oneshot.await.unwrap();
+ RX2.fetch_add(1, SeqCst);
+ });
+ }
+
+ task2.await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test]
+async fn acquire_mutex_in_drop() {
+ use futures::future::pending;
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let local = LocalSet::new();
+
+ local.spawn_local(async move {
+ let _ = rx2.await;
+ unreachable!();
+ });
+
+ local.spawn_local(async move {
+ let _ = rx1.await;
+ tx2.send(()).unwrap();
+ unreachable!();
+ });
+
+ // Spawn a task that will never notify
+ local.spawn_local(async move {
+ pending::<()>().await;
+ tx1.send(()).unwrap();
+ });
+
+ // Tick the loop
+ local
+ .run_until(async {
+ task::yield_now().await;
+ })
+ .await;
+
+ // Drop the LocalSet
+ drop(local);
+}
+
+#[tokio::test]
+async fn spawn_wakes_localset() {
+ let local = LocalSet::new();
+ futures::select! {
+ _ = local.run_until(pending::<()>()).fuse() => unreachable!(),
+ ret = async { local.spawn_local(ready(())).await.unwrap()}.fuse() => ret
+ }
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/tcp_accept.rs b/third_party/rust/tokio/tests/tcp_accept.rs
new file mode 100644
index 0000000000..5ffb946f34
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_accept.rs
@@ -0,0 +1,157 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::{mpsc, oneshot};
+use tokio_test::assert_ok;
+
+use std::io;
+use std::net::{IpAddr, SocketAddr};
+
+macro_rules! test_accept {
+ ($(($ident:ident, $target:expr),)*) => {
+ $(
+ #[tokio::test]
+ async fn $ident() {
+ let listener = assert_ok!(TcpListener::bind($target).await);
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, _) = assert_ok!(listener.accept().await);
+ assert_ok!(tx.send(socket));
+ });
+
+ let cli = assert_ok!(TcpStream::connect(&addr).await);
+ let srv = assert_ok!(rx.await);
+
+ assert_eq!(cli.local_addr().unwrap(), srv.peer_addr().unwrap());
+ }
+ )*
+ }
+}
+
+test_accept! {
+ (ip_str, "127.0.0.1:0"),
+ (host_str, "localhost:0"),
+ (socket_addr, "127.0.0.1:0".parse::<SocketAddr>().unwrap()),
+ (str_port_tuple, ("127.0.0.1", 0)),
+ (ip_port_tuple, ("127.0.0.1".parse::<IpAddr>().unwrap(), 0)),
+}
+
+use std::pin::Pin;
+use std::sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Arc,
+};
+use std::task::{Context, Poll};
+use tokio_stream::{Stream, StreamExt};
+
+struct TrackPolls<'a> {
+ npolls: Arc<AtomicUsize>,
+ listener: &'a mut TcpListener,
+}
+
+impl<'a> Stream for TrackPolls<'a> {
+ type Item = io::Result<(TcpStream, SocketAddr)>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.npolls.fetch_add(1, SeqCst);
+ self.listener.poll_accept(cx).map(Some)
+ }
+}
+
+#[tokio::test]
+async fn no_extra_poll() {
+ let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = oneshot::channel();
+ let (accepted_tx, mut accepted_rx) = mpsc::unbounded_channel();
+
+ tokio::spawn(async move {
+ let mut incoming = TrackPolls {
+ npolls: Arc::new(AtomicUsize::new(0)),
+ listener: &mut listener,
+ };
+ assert_ok!(tx.send(Arc::clone(&incoming.npolls)));
+ while incoming.next().await.is_some() {
+ accepted_tx.send(()).unwrap();
+ }
+ });
+
+ let npolls = assert_ok!(rx.await);
+ tokio::task::yield_now().await;
+
+ // should have been polled exactly once: the initial poll
+ assert_eq!(npolls.load(SeqCst), 1);
+
+ let _ = assert_ok!(TcpStream::connect(&addr).await);
+ accepted_rx.recv().await.unwrap();
+
+ // should have been polled twice more: once to yield Some(), then once to yield Pending
+ assert_eq!(npolls.load(SeqCst), 1 + 2);
+}
+
+#[tokio::test]
+async fn accept_many() {
+ use futures::future::poll_fn;
+ use std::future::Future;
+ use std::sync::atomic::AtomicBool;
+
+ const N: usize = 50;
+
+ let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let listener = Arc::new(listener);
+ let addr = listener.local_addr().unwrap();
+ let connected = Arc::new(AtomicBool::new(false));
+
+ let (pending_tx, mut pending_rx) = mpsc::unbounded_channel();
+ let (notified_tx, mut notified_rx) = mpsc::unbounded_channel();
+
+ for _ in 0..N {
+ let listener = listener.clone();
+ let connected = connected.clone();
+ let pending_tx = pending_tx.clone();
+ let notified_tx = notified_tx.clone();
+
+ tokio::spawn(async move {
+ let accept = listener.accept();
+ tokio::pin!(accept);
+
+ let mut polled = false;
+
+ poll_fn(|cx| {
+ if !polled {
+ polled = true;
+ assert!(Pin::new(&mut accept).poll(cx).is_pending());
+ pending_tx.send(()).unwrap();
+ Poll::Pending
+ } else if connected.load(SeqCst) {
+ notified_tx.send(()).unwrap();
+ Poll::Ready(())
+ } else {
+ Poll::Pending
+ }
+ })
+ .await;
+
+ pending_tx.send(()).unwrap();
+ });
+ }
+
+ // Wait for all tasks to have polled at least once
+ for _ in 0..N {
+ pending_rx.recv().await.unwrap();
+ }
+
+ // Establish a TCP connection
+ connected.store(true, SeqCst);
+ let _sock = TcpStream::connect(addr).await.unwrap();
+
+ // Wait for all notifications
+ for _ in 0..N {
+ notified_rx.recv().await.unwrap();
+ }
+}
diff --git a/third_party/rust/tokio/tests/tcp_connect.rs b/third_party/rust/tokio/tests/tcp_connect.rs
new file mode 100644
index 0000000000..cbe68fa276
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_connect.rs
@@ -0,0 +1,229 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+use futures::join;
+
+#[tokio::test]
+async fn connect_v4() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ assert!(addr.is_ipv4());
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, addr) = assert_ok!(srv.accept().await);
+ assert_eq!(addr, assert_ok!(socket.peer_addr()));
+ assert_ok!(tx.send(socket));
+ });
+
+ let mine = assert_ok!(TcpStream::connect(&addr).await);
+ let theirs = assert_ok!(rx.await);
+
+ assert_eq!(
+ assert_ok!(mine.local_addr()),
+ assert_ok!(theirs.peer_addr())
+ );
+ assert_eq!(
+ assert_ok!(theirs.local_addr()),
+ assert_ok!(mine.peer_addr())
+ );
+}
+
+#[tokio::test]
+async fn connect_v6() {
+ let srv = assert_ok!(TcpListener::bind("[::1]:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ assert!(addr.is_ipv6());
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, addr) = assert_ok!(srv.accept().await);
+ assert_eq!(addr, assert_ok!(socket.peer_addr()));
+ assert_ok!(tx.send(socket));
+ });
+
+ let mine = assert_ok!(TcpStream::connect(&addr).await);
+ let theirs = assert_ok!(rx.await);
+
+ assert_eq!(
+ assert_ok!(mine.local_addr()),
+ assert_ok!(theirs.peer_addr())
+ );
+ assert_eq!(
+ assert_ok!(theirs.local_addr()),
+ assert_ok!(mine.peer_addr())
+ );
+}
+
+#[tokio::test]
+async fn connect_addr_ip_string() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("127.0.0.1:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_str_slice() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("127.0.0.1:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr[..]).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_host_string() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("localhost:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_port_tuple() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = (addr.ip(), addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_str_port_tuple() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = ("127.0.0.1", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_host_str_port_tuple() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = ("localhost", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+/*
+ * TODO: bring this back once TCP exposes HUP again
+ *
+#[cfg(target_os = "linux")]
+mod linux {
+ use tokio::net::{TcpListener, TcpStream};
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+ use tokio_test::assert_ok;
+
+ use mio::unix::UnixReady;
+
+ use futures_util::future::poll_fn;
+ use std::io::Write;
+ use std::time::Duration;
+ use std::{net, thread};
+
+ #[tokio::test]
+ fn poll_hup() {
+ let addr = assert_ok!("127.0.0.1:0".parse());
+ let mut srv = assert_ok!(TcpListener::bind(&addr));
+ let addr = assert_ok!(srv.local_addr());
+
+ tokio::spawn(async move {
+ let (mut client, _) = assert_ok!(srv.accept().await);
+ assert_ok!(client.set_linger(Some(Duration::from_millis(0))));
+ assert_ok!(client.write_all(b"hello world").await);
+
+ // TODO: Drop?
+ });
+
+ /*
+ let t = thread::spawn(move || {
+ let mut client = assert_ok!(srv.accept()).0;
+ client.set_linger(Some(Duration::from_millis(0))).unwrap();
+ client.write(b"hello world").unwrap();
+ thread::sleep(Duration::from_millis(200));
+ });
+ */
+
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ // Poll for HUP before reading.
+ future::poll_fn(|| stream.poll_read_ready(UnixReady::hup().into()))
+ .wait()
+ .unwrap();
+
+ // Same for write half
+ future::poll_fn(|| stream.poll_write_ready())
+ .wait()
+ .unwrap();
+
+ let mut buf = vec![0; 11];
+
+ // Read the data
+ future::poll_fn(|| stream.poll_read(&mut buf))
+ .wait()
+ .unwrap();
+
+ assert_eq!(b"hello world", &buf[..]);
+
+ t.join().unwrap();
+ }
+}
+*/
diff --git a/third_party/rust/tokio/tests/tcp_echo.rs b/third_party/rust/tokio/tests/tcp_echo.rs
new file mode 100644
index 0000000000..5bb7ff0acc
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_echo.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{self, AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn echo_server() {
+ const ITER: usize = 1024;
+
+ let (tx, rx) = oneshot::channel();
+
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ let msg = "foo bar baz";
+ tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ for _ in 0..ITER {
+ // write
+ assert_ok!(stream.write_all(msg.as_bytes()).await);
+
+ // read
+ let mut buf = [0; 11];
+ assert_ok!(stream.read_exact(&mut buf).await);
+ assert_eq!(&buf[..], msg.as_bytes());
+ }
+
+ assert_ok!(tx.send(()));
+ });
+
+ let (mut stream, _) = assert_ok!(srv.accept().await);
+ let (mut rd, mut wr) = stream.split();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, (ITER * msg.len()) as u64);
+
+ assert_ok!(rx.await);
+}
diff --git a/third_party/rust/tokio/tests/tcp_into_split.rs b/third_party/rust/tokio/tests/tcp_into_split.rs
new file mode 100644
index 0000000000..2e06643a02
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_into_split.rs
@@ -0,0 +1,131 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::{Error, ErrorKind, Result};
+use std::io::{Read, Write};
+use std::{net, thread};
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::try_join;
+
+#[tokio::test]
+async fn split() -> Result<()> {
+ const MSG: &[u8] = b"split";
+
+ let listener = TcpListener::bind("127.0.0.1:0").await?;
+ let addr = listener.local_addr()?;
+
+ let (stream1, (mut stream2, _)) = try_join! {
+ TcpStream::connect(&addr),
+ listener.accept(),
+ }?;
+ let (mut read_half, mut write_half) = stream1.into_split();
+
+ let ((), (), ()) = try_join! {
+ async {
+ let len = stream2.write(MSG).await?;
+ assert_eq!(len, MSG.len());
+
+ let mut read_buf = vec![0u8; 32];
+ let read_len = stream2.read(&mut read_buf).await?;
+ assert_eq!(&read_buf[..read_len], MSG);
+ Result::Ok(())
+ },
+ async {
+ let len = write_half.write(MSG).await?;
+ assert_eq!(len, MSG.len());
+ Ok(())
+ },
+ async {
+ let mut read_buf = vec![0u8; 32];
+ let peek_len1 = read_half.peek(&mut read_buf[..]).await?;
+ let peek_len2 = read_half.peek(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, peek_len2);
+
+ let read_len = read_half.read(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, read_len);
+ assert_eq!(&read_buf[..read_len], MSG);
+ Ok(())
+ },
+ }?;
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn reunite() -> Result<()> {
+ let listener = net::TcpListener::bind("127.0.0.1:0")?;
+ let addr = listener.local_addr()?;
+
+ let handle = thread::spawn(move || {
+ drop(listener.accept().unwrap());
+ drop(listener.accept().unwrap());
+ });
+
+ let stream1 = TcpStream::connect(&addr).await?;
+ let (read1, write1) = stream1.into_split();
+
+ let stream2 = TcpStream::connect(&addr).await?;
+ let (_, write2) = stream2.into_split();
+
+ let read1 = match read1.reunite(write2) {
+ Ok(_) => panic!("Reunite should not succeed"),
+ Err(err) => err.0,
+ };
+
+ read1.reunite(write1).expect("Reunite should succeed");
+
+ handle.join().unwrap();
+ Ok(())
+}
+
+/// Test that dropping the write half actually closes the stream.
+#[tokio::test]
+async fn drop_write() -> Result<()> {
+ const MSG: &[u8] = b"split";
+
+ let listener = net::TcpListener::bind("127.0.0.1:0")?;
+ let addr = listener.local_addr()?;
+
+ let handle = thread::spawn(move || {
+ let (mut stream, _) = listener.accept().unwrap();
+ stream.write_all(MSG).unwrap();
+
+ let mut read_buf = [0u8; 32];
+ let res = match stream.read(&mut read_buf) {
+ Ok(0) => Ok(()),
+ Ok(len) => Err(Error::new(
+ ErrorKind::Other,
+ format!("Unexpected read: {} bytes.", len),
+ )),
+ Err(err) => Err(err),
+ };
+
+ drop(stream);
+
+ res
+ });
+
+ let stream = TcpStream::connect(&addr).await?;
+ let (mut read_half, write_half) = stream.into_split();
+
+ let mut read_buf = [0u8; 32];
+ let read_len = read_half.read(&mut read_buf[..]).await?;
+ assert_eq!(&read_buf[..read_len], MSG);
+
+ // drop it while the read is in progress
+ std::thread::spawn(move || {
+ thread::sleep(std::time::Duration::from_millis(10));
+ drop(write_half);
+ });
+
+ match read_half.read(&mut read_buf[..]).await {
+ Ok(0) => {}
+ Ok(len) => panic!("Unexpected read: {} bytes.", len),
+ Err(err) => panic!("Unexpected error: {}.", err),
+ }
+
+ handle.join().unwrap().unwrap();
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/tcp_into_std.rs b/third_party/rust/tokio/tests/tcp_into_std.rs
new file mode 100644
index 0000000000..4bf24c14dd
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_into_std.rs
@@ -0,0 +1,45 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::Read;
+use std::io::Result;
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::TcpListener;
+use tokio::net::TcpStream;
+
+#[tokio::test]
+async fn tcp_into_std() -> Result<()> {
+ let mut data = [0u8; 12];
+ let listener = TcpListener::bind("127.0.0.1:0").await?;
+ let addr = listener.local_addr().unwrap().to_string();
+
+ let handle = tokio::spawn(async {
+ let stream: TcpStream = TcpStream::connect(addr).await.unwrap();
+ stream
+ });
+
+ let (tokio_tcp_stream, _) = listener.accept().await?;
+ let mut std_tcp_stream = tokio_tcp_stream.into_std()?;
+ std_tcp_stream
+ .set_nonblocking(false)
+ .expect("set_nonblocking call failed");
+
+ let mut client = handle.await.expect("The task being joined has panicked");
+ client.write_all(b"Hello world!").await?;
+
+ std_tcp_stream
+ .read_exact(&mut data)
+ .expect("std TcpStream read failed!");
+ assert_eq!(b"Hello world!", &data);
+
+ // test back to tokio stream
+ std_tcp_stream
+ .set_nonblocking(true)
+ .expect("set_nonblocking call failed");
+ let mut tokio_tcp_stream = TcpStream::from_std(std_tcp_stream)?;
+ client.write_all(b"Hello tokio!").await?;
+ let _size = tokio_tcp_stream.read_exact(&mut data).await?;
+ assert_eq!(b"Hello tokio!", &data);
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/tcp_peek.rs b/third_party/rust/tokio/tests/tcp_peek.rs
new file mode 100644
index 0000000000..aecc0ac19c
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_peek.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio::net::TcpStream;
+
+use tokio_test::assert_ok;
+
+use std::thread;
+use std::{convert::TryInto, io::Write, net};
+
+#[tokio::test]
+async fn peek() {
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+ let t = thread::spawn(move || assert_ok!(listener.accept()).0);
+
+ let left = net::TcpStream::connect(&addr).unwrap();
+ let mut right = t.join().unwrap();
+ let _ = right.write(&[1, 2, 3, 4]).unwrap();
+
+ let mut left: TcpStream = left.try_into().unwrap();
+ let mut buf = [0u8; 16];
+ let n = assert_ok!(left.peek(&mut buf).await);
+ assert_eq!([1, 2, 3, 4], buf[..n]);
+
+ let n = assert_ok!(left.read(&mut buf).await);
+ assert_eq!([1, 2, 3, 4], buf[..n]);
+}
diff --git a/third_party/rust/tokio/tests/tcp_shutdown.rs b/third_party/rust/tokio/tests/tcp_shutdown.rs
new file mode 100644
index 0000000000..536a16130a
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_shutdown.rs
@@ -0,0 +1,28 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{self, AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn shutdown() {
+ let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
+
+ let mut buf = [0u8; 1];
+ let n = assert_ok!(stream.read(&mut buf).await);
+ assert_eq!(n, 0);
+ });
+
+ let (mut stream, _) = assert_ok!(srv.accept().await);
+ let (mut rd, mut wr) = stream.split();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, 0);
+}
diff --git a/third_party/rust/tokio/tests/tcp_socket.rs b/third_party/rust/tokio/tests/tcp_socket.rs
new file mode 100644
index 0000000000..3030416502
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_socket.rs
@@ -0,0 +1,74 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::time::Duration;
+use tokio::net::TcpSocket;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn basic_usage_v4() {
+ // Create server
+ let addr = assert_ok!("127.0.0.1:0".parse());
+ let srv = assert_ok!(TcpSocket::new_v4());
+ assert_ok!(srv.bind(addr));
+
+ let srv = assert_ok!(srv.listen(128));
+
+ // Create client & connect
+ let addr = srv.local_addr().unwrap();
+ let cli = assert_ok!(TcpSocket::new_v4());
+ let _cli = assert_ok!(cli.connect(addr).await);
+
+ // Accept
+ let _ = assert_ok!(srv.accept().await);
+}
+
+#[tokio::test]
+async fn basic_usage_v6() {
+ // Create server
+ let addr = assert_ok!("[::1]:0".parse());
+ let srv = assert_ok!(TcpSocket::new_v6());
+ assert_ok!(srv.bind(addr));
+
+ let srv = assert_ok!(srv.listen(128));
+
+ // Create client & connect
+ let addr = srv.local_addr().unwrap();
+ let cli = assert_ok!(TcpSocket::new_v6());
+ let _cli = assert_ok!(cli.connect(addr).await);
+
+ // Accept
+ let _ = assert_ok!(srv.accept().await);
+}
+
+#[tokio::test]
+async fn bind_before_connect() {
+ // Create server
+ let any_addr = assert_ok!("127.0.0.1:0".parse());
+ let srv = assert_ok!(TcpSocket::new_v4());
+ assert_ok!(srv.bind(any_addr));
+
+ let srv = assert_ok!(srv.listen(128));
+
+ // Create client & connect
+ let addr = srv.local_addr().unwrap();
+ let cli = assert_ok!(TcpSocket::new_v4());
+ assert_ok!(cli.bind(any_addr));
+ let _cli = assert_ok!(cli.connect(addr).await);
+
+ // Accept
+ let _ = assert_ok!(srv.accept().await);
+}
+
+#[tokio::test]
+async fn basic_linger() {
+ // Create server
+ let addr = assert_ok!("127.0.0.1:0".parse());
+ let srv = assert_ok!(TcpSocket::new_v4());
+ assert_ok!(srv.bind(addr));
+
+ assert!(srv.linger().unwrap().is_none());
+
+ srv.set_linger(Some(Duration::new(0, 0))).unwrap();
+ assert_eq!(srv.linger().unwrap(), Some(Duration::new(0, 0)));
+}
diff --git a/third_party/rust/tokio/tests/tcp_split.rs b/third_party/rust/tokio/tests/tcp_split.rs
new file mode 100644
index 0000000000..7171dac463
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_split.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::Result;
+use std::io::{Read, Write};
+use std::{net, thread};
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::TcpStream;
+
+#[tokio::test]
+async fn split() -> Result<()> {
+ const MSG: &[u8] = b"split";
+
+ let listener = net::TcpListener::bind("127.0.0.1:0")?;
+ let addr = listener.local_addr()?;
+
+ let handle = thread::spawn(move || {
+ let (mut stream, _) = listener.accept().unwrap();
+ stream.write_all(MSG).unwrap();
+
+ let mut read_buf = [0u8; 32];
+ let read_len = stream.read(&mut read_buf).unwrap();
+ assert_eq!(&read_buf[..read_len], MSG);
+ });
+
+ let mut stream = TcpStream::connect(&addr).await?;
+ let (mut read_half, mut write_half) = stream.split();
+
+ let mut read_buf = [0u8; 32];
+ let peek_len1 = read_half.peek(&mut read_buf[..]).await?;
+ let peek_len2 = read_half.peek(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, peek_len2);
+
+ let read_len = read_half.read(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, read_len);
+ assert_eq!(&read_buf[..read_len], MSG);
+
+ write_half.write(MSG).await?;
+ handle.join().unwrap();
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/tcp_stream.rs b/third_party/rust/tokio/tests/tcp_stream.rs
new file mode 100644
index 0000000000..0b5d12ae8e
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_stream.rs
@@ -0,0 +1,359 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt, Interest};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::try_join;
+use tokio_test::task;
+use tokio_test::{assert_ok, assert_pending, assert_ready_ok};
+
+use std::io;
+use std::task::Poll;
+use std::time::Duration;
+
+use futures::future::poll_fn;
+
+#[tokio::test]
+async fn set_linger() {
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+
+ let stream = TcpStream::connect(listener.local_addr().unwrap())
+ .await
+ .unwrap();
+
+ assert_ok!(stream.set_linger(Some(Duration::from_secs(1))));
+ assert_eq!(stream.linger().unwrap().unwrap().as_secs(), 1);
+
+ assert_ok!(stream.set_linger(None));
+ assert!(stream.linger().unwrap().is_none());
+}
+
+#[tokio::test]
+async fn try_read_write() {
+ const DATA: &[u8] = b"this is some data to write to the socket";
+
+ // Create listener
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+
+ // Create socket pair
+ let client = TcpStream::connect(listener.local_addr().unwrap())
+ .await
+ .unwrap();
+ let (server, _) = listener.accept().await.unwrap();
+ let mut written = DATA.to_vec();
+
+ // Track the server receiving data
+ let mut readable = task::spawn(server.readable());
+ assert_pending!(readable.poll());
+
+ // Write data.
+ client.writable().await.unwrap();
+ assert_eq!(DATA.len(), client.try_write(DATA).unwrap());
+
+ // The task should be notified
+ while !readable.is_woken() {
+ tokio::task::yield_now().await;
+ }
+
+ // Fill the write buffer using non-vectored I/O
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write(DATA) {
+ Ok(n) => written.extend(&DATA[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end using non-vectored I/O
+ let mut read = vec![0; written.len()];
+ let mut i = 0;
+
+ while i < read.len() {
+ server.readable().await.unwrap();
+
+ match server.try_read(&mut read[i..]) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ written.clear();
+ client.writable().await.unwrap();
+
+ // Fill the write buffer using vectored I/O
+ let data_bufs: Vec<_> = DATA.chunks(10).map(io::IoSlice::new).collect();
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write_vectored(&data_bufs) {
+ Ok(n) => written.extend(&DATA[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end using vectored I/O
+ let mut read = vec![0; written.len()];
+ let mut i = 0;
+
+ while i < read.len() {
+ server.readable().await.unwrap();
+
+ let mut bufs: Vec<_> = read[i..]
+ .chunks_mut(0x10000)
+ .map(io::IoSliceMut::new)
+ .collect();
+ match server.try_read_vectored(&mut bufs) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ // Now, we listen for shutdown
+ drop(client);
+
+ loop {
+ let ready = server.ready(Interest::READABLE).await.unwrap();
+
+ if ready.is_read_closed() {
+ return;
+ } else {
+ tokio::task::yield_now().await;
+ }
+ }
+}
+
+#[test]
+fn buffer_not_included_in_future() {
+ use std::mem;
+
+ const N: usize = 4096;
+
+ let fut = async {
+ let stream = TcpStream::connect("127.0.0.1:8080").await.unwrap();
+
+ loop {
+ stream.readable().await.unwrap();
+
+ let mut buf = [0; N];
+ let n = stream.try_read(&mut buf[..]).unwrap();
+
+ if n == 0 {
+ break;
+ }
+ }
+ };
+
+ let n = mem::size_of_val(&fut);
+ assert!(n < 1000);
+}
+
+macro_rules! assert_readable_by_polling {
+ ($stream:expr) => {
+ assert_ok!(poll_fn(|cx| $stream.poll_read_ready(cx)).await);
+ };
+}
+
+macro_rules! assert_not_readable_by_polling {
+ ($stream:expr) => {
+ poll_fn(|cx| {
+ assert_pending!($stream.poll_read_ready(cx));
+ Poll::Ready(())
+ })
+ .await;
+ };
+}
+
+macro_rules! assert_writable_by_polling {
+ ($stream:expr) => {
+ assert_ok!(poll_fn(|cx| $stream.poll_write_ready(cx)).await);
+ };
+}
+
+macro_rules! assert_not_writable_by_polling {
+ ($stream:expr) => {
+ poll_fn(|cx| {
+ assert_pending!($stream.poll_write_ready(cx));
+ Poll::Ready(())
+ })
+ .await;
+ };
+}
+
+#[tokio::test]
+async fn poll_read_ready() {
+ let (mut client, mut server) = create_pair().await;
+
+ // Initial state - not readable.
+ assert_not_readable_by_polling!(server);
+
+ // There is data in the buffer - readable.
+ assert_ok!(client.write_all(b"ping").await);
+ assert_readable_by_polling!(server);
+
+ // Readable until calls to `poll_read` return `Poll::Pending`.
+ let mut buf = [0u8; 4];
+ assert_ok!(server.read_exact(&mut buf).await);
+ assert_readable_by_polling!(server);
+ read_until_pending(&mut server);
+ assert_not_readable_by_polling!(server);
+
+ // Detect the client disconnect.
+ drop(client);
+ assert_readable_by_polling!(server);
+}
+
+#[tokio::test]
+async fn poll_write_ready() {
+ let (mut client, server) = create_pair().await;
+
+ // Initial state - writable.
+ assert_writable_by_polling!(client);
+
+ // No space to write - not writable.
+ write_until_pending(&mut client);
+ assert_not_writable_by_polling!(client);
+
+ // Detect the server disconnect.
+ drop(server);
+ assert_writable_by_polling!(client);
+}
+
+async fn create_pair() -> (TcpStream, TcpStream) {
+ let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(listener.local_addr());
+ let (client, (server, _)) = assert_ok!(try_join!(TcpStream::connect(&addr), listener.accept()));
+ (client, server)
+}
+
+fn read_until_pending(stream: &mut TcpStream) {
+ let mut buf = vec![0u8; 1024 * 1024];
+ loop {
+ match stream.try_read(&mut buf) {
+ Ok(_) => (),
+ Err(err) => {
+ assert_eq!(err.kind(), io::ErrorKind::WouldBlock);
+ break;
+ }
+ }
+ }
+}
+
+fn write_until_pending(stream: &mut TcpStream) {
+ let buf = vec![0u8; 1024 * 1024];
+ loop {
+ match stream.try_write(&buf) {
+ Ok(_) => (),
+ Err(err) => {
+ assert_eq!(err.kind(), io::ErrorKind::WouldBlock);
+ break;
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn try_read_buf() {
+ const DATA: &[u8] = b"this is some data to write to the socket";
+
+ // Create listener
+ let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+
+ // Create socket pair
+ let client = TcpStream::connect(listener.local_addr().unwrap())
+ .await
+ .unwrap();
+ let (server, _) = listener.accept().await.unwrap();
+ let mut written = DATA.to_vec();
+
+ // Track the server receiving data
+ let mut readable = task::spawn(server.readable());
+ assert_pending!(readable.poll());
+
+ // Write data.
+ client.writable().await.unwrap();
+ assert_eq!(DATA.len(), client.try_write(DATA).unwrap());
+
+ // The task should be notified
+ while !readable.is_woken() {
+ tokio::task::yield_now().await;
+ }
+
+ // Fill the write buffer
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write(DATA) {
+ Ok(n) => written.extend(&DATA[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end
+ let mut read = Vec::with_capacity(written.len());
+ let mut i = 0;
+
+ while i < read.capacity() {
+ server.readable().await.unwrap();
+
+ match server.try_read_buf(&mut read) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ // Now, we listen for shutdown
+ drop(client);
+
+ loop {
+ let ready = server.ready(Interest::READABLE).await.unwrap();
+
+ if ready.is_read_closed() {
+ return;
+ } else {
+ tokio::task::yield_now().await;
+ }
+ }
+}
diff --git a/third_party/rust/tokio/tests/test_clock.rs b/third_party/rust/tokio/tests/test_clock.rs
new file mode 100644
index 0000000000..891636fdb2
--- /dev/null
+++ b/third_party/rust/tokio/tests/test_clock.rs
@@ -0,0 +1,50 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, Duration, Instant};
+
+#[tokio::test]
+async fn resume_lets_time_move_forward_instead_of_resetting_it() {
+ let start = Instant::now();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ let advanced_by_ten_secs = Instant::now();
+ assert!(advanced_by_ten_secs - start > Duration::from_secs(10));
+ assert!(advanced_by_ten_secs - start < Duration::from_secs(11));
+ time::resume();
+ assert!(advanced_by_ten_secs < Instant::now());
+ assert!(Instant::now() - advanced_by_ten_secs < Duration::from_secs(1));
+}
+
+#[tokio::test]
+async fn can_pause_after_resume() {
+ let start = Instant::now();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ time::resume();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ assert!(Instant::now() - start > Duration::from_secs(20));
+ assert!(Instant::now() - start < Duration::from_secs(21));
+}
+
+#[tokio::test]
+#[should_panic]
+async fn freezing_time_while_frozen_panics() {
+ time::pause();
+ time::pause();
+}
+
+#[tokio::test]
+#[should_panic]
+async fn advancing_time_when_time_is_not_frozen_panics() {
+ time::advance(Duration::from_secs(1)).await;
+}
+
+#[tokio::test]
+#[should_panic]
+async fn resuming_time_when_not_frozen_panics() {
+ time::pause();
+ time::resume();
+ time::resume();
+}
diff --git a/third_party/rust/tokio/tests/time_interval.rs b/third_party/rust/tokio/tests/time_interval.rs
new file mode 100644
index 0000000000..186582e2e5
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_interval.rs
@@ -0,0 +1,211 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, Duration, Instant, MissedTickBehavior};
+use tokio_test::{assert_pending, assert_ready_eq, task};
+
+use std::task::Poll;
+
+// Takes the `Interval` task, `start` variable, and optional time deltas
+// For each time delta, it polls the `Interval` and asserts that the result is
+// equal to `start` + the specific time delta. Then it asserts that the
+// `Interval` is pending.
+macro_rules! check_interval_poll {
+ ($i:ident, $start:ident, $($delta:expr),*$(,)?) => {
+ $(
+ assert_ready_eq!(poll_next(&mut $i), $start + ms($delta));
+ )*
+ assert_pending!(poll_next(&mut $i));
+ };
+ ($i:ident, $start:ident) => {
+ check_interval_poll!($i, $start,);
+ };
+}
+
+#[tokio::test]
+#[should_panic]
+async fn interval_zero_duration() {
+ let _ = time::interval_at(Instant::now(), ms(0));
+}
+
+// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
+// Actual ticks: | work -----| delay | work | work | work -| work -----|
+// Poll behavior: | | | | | | | |
+// | | | | | | | |
+// Ready(s) | | Ready(s + 2p) | | | |
+// Pending | Ready(s + 3p) | | |
+// Ready(s + p) Ready(s + 4p) | |
+// Ready(s + 5p) |
+// Ready(s + 6p)
+#[tokio::test(start_paused = true)]
+async fn burst() {
+ let start = Instant::now();
+
+ // This is necessary because the timer is only so granular, and in order for
+ // all our ticks to resolve, the time needs to be 1ms ahead of what we
+ // expect, so that the runtime will see that it is time to resolve the timer
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+
+ check_interval_poll!(i, start, 0);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(200)).await;
+ check_interval_poll!(i, start, 300);
+
+ time::advance(ms(650)).await;
+ check_interval_poll!(i, start, 600, 900);
+
+ time::advance(ms(200)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start, 1200);
+
+ time::advance(ms(250)).await;
+ check_interval_poll!(i, start, 1500);
+
+ time::advance(ms(300)).await;
+ check_interval_poll!(i, start, 1800);
+}
+
+// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
+// Actual ticks: | work -----| delay | work -----| work -----| work -----|
+// Poll behavior: | | | | | | | |
+// | | | | | | | |
+// Ready(s) | | Ready(s + 2p) | | | |
+// Pending | Pending | | |
+// Ready(s + p) Ready(s + 2p + d) | |
+// Ready(s + 3p + d) |
+// Ready(s + 4p + d)
+#[tokio::test(start_paused = true)]
+async fn delay() {
+ let start = Instant::now();
+
+ // This is necessary because the timer is only so granular, and in order for
+ // all our ticks to resolve, the time needs to be 1ms ahead of what we
+ // expect, so that the runtime will see that it is time to resolve the timer
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+ i.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+ check_interval_poll!(i, start, 0);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(200)).await;
+ check_interval_poll!(i, start, 300);
+
+ time::advance(ms(650)).await;
+ check_interval_poll!(i, start, 600);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ // We have to add one here for the same reason as is above.
+ // Because `Interval` has reset its timer according to `Instant::now()`,
+ // we have to go forward 1 more millisecond than is expected so that the
+ // runtime realizes that it's time to resolve the timer.
+ time::advance(ms(201)).await;
+ // We add one because when using the `Delay` behavior, `Interval`
+ // adds the `period` from `Instant::now()`, which will always be off by one
+ // because we have to advance time by 1 (see above).
+ check_interval_poll!(i, start, 1251);
+
+ time::advance(ms(300)).await;
+ // Again, we add one.
+ check_interval_poll!(i, start, 1551);
+
+ time::advance(ms(300)).await;
+ check_interval_poll!(i, start, 1851);
+}
+
+// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
+// Actual ticks: | work -----| delay | work ---| work -----| work -----|
+// Poll behavior: | | | | | | |
+// | | | | | | |
+// Ready(s) | | Ready(s + 2p) | | |
+// Pending | Ready(s + 4p) | |
+// Ready(s + p) Ready(s + 5p) |
+// Ready(s + 6p)
+#[tokio::test(start_paused = true)]
+async fn skip() {
+ let start = Instant::now();
+
+ // This is necessary because the timer is only so granular, and in order for
+ // all our ticks to resolve, the time needs to be 1ms ahead of what we
+ // expect, so that the runtime will see that it is time to resolve the timer
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+ i.set_missed_tick_behavior(MissedTickBehavior::Skip);
+
+ check_interval_poll!(i, start, 0);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(200)).await;
+ check_interval_poll!(i, start, 300);
+
+ time::advance(ms(650)).await;
+ check_interval_poll!(i, start, 600);
+
+ time::advance(ms(250)).await;
+ check_interval_poll!(i, start, 1200);
+
+ time::advance(ms(300)).await;
+ check_interval_poll!(i, start, 1500);
+
+ time::advance(ms(300)).await;
+ check_interval_poll!(i, start, 1800);
+}
+
+#[tokio::test(start_paused = true)]
+async fn reset() {
+ let start = Instant::now();
+
+ // This is necessary because the timer is only so granular, and in order for
+ // all our ticks to resolve, the time needs to be 1ms ahead of what we
+ // expect, so that the runtime will see that it is time to resolve the timer
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+
+ check_interval_poll!(i, start, 0);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(200)).await;
+ check_interval_poll!(i, start, 300);
+
+ time::advance(ms(100)).await;
+ check_interval_poll!(i, start);
+
+ i.reset();
+
+ time::advance(ms(250)).await;
+ check_interval_poll!(i, start);
+
+ time::advance(ms(50)).await;
+ // We add one because when using `reset` method, `Interval` adds the
+ // `period` from `Instant::now()`, which will always be off by one
+ check_interval_poll!(i, start, 701);
+
+ time::advance(ms(300)).await;
+ check_interval_poll!(i, start, 1001);
+}
+
+fn poll_next(interval: &mut task::Spawn<time::Interval>) -> Poll<Instant> {
+ interval.enter(|cx, mut interval| interval.poll_tick(cx))
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/time_pause.rs b/third_party/rust/tokio/tests/time_pause.rs
new file mode 100644
index 0000000000..02e050a2dc
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_pause.rs
@@ -0,0 +1,326 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use rand::SeedableRng;
+use rand::{rngs::StdRng, Rng};
+use tokio::time::{self, Duration, Instant, Sleep};
+use tokio_test::{assert_elapsed, assert_err, assert_pending, assert_ready, assert_ready_eq, task};
+
+use std::{
+ future::Future,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
+#[tokio::test]
+async fn pause_time_in_main() {
+ tokio::time::pause();
+}
+
+#[tokio::test]
+async fn pause_time_in_task() {
+ let t = tokio::spawn(async {
+ tokio::time::pause();
+ });
+
+ t.await.unwrap();
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
+#[should_panic]
+async fn pause_time_in_main_threads() {
+ tokio::time::pause();
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
+async fn pause_time_in_spawn_threads() {
+ let t = tokio::spawn(async {
+ tokio::time::pause();
+ });
+
+ assert_err!(t.await);
+}
+
+#[test]
+fn paused_time_is_deterministic() {
+ let run_1 = paused_time_stress_run();
+ let run_2 = paused_time_stress_run();
+
+ assert_eq!(run_1, run_2);
+}
+
+#[tokio::main(flavor = "current_thread", start_paused = true)]
+async fn paused_time_stress_run() -> Vec<Duration> {
+ let mut rng = StdRng::seed_from_u64(1);
+
+ let mut times = vec![];
+ let start = Instant::now();
+ for _ in 0..10_000 {
+ let sleep = rng.gen_range(Duration::from_secs(0)..Duration::from_secs(1));
+ time::sleep(sleep).await;
+ times.push(start.elapsed());
+ }
+
+ times
+}
+
+#[tokio::test(start_paused = true)]
+async fn advance_after_poll() {
+ time::sleep(ms(1)).await;
+
+ let start = Instant::now();
+
+ let mut sleep = task::spawn(time::sleep_until(start + ms(300)));
+
+ assert_pending!(sleep.poll());
+
+ let before = Instant::now();
+ time::advance(ms(100)).await;
+ assert_elapsed!(before, ms(100));
+
+ assert_pending!(sleep.poll());
+}
+
+#[tokio::test(start_paused = true)]
+async fn sleep_no_poll() {
+ let start = Instant::now();
+
+ // TODO: Skip this
+ time::advance(ms(1)).await;
+
+ let mut sleep = task::spawn(time::sleep_until(start + ms(300)));
+
+ let before = Instant::now();
+ time::advance(ms(100)).await;
+ assert_elapsed!(before, ms(100));
+
+ assert_pending!(sleep.poll());
+}
+
+enum State {
+ Begin,
+ AwaitingAdvance(Pin<Box<dyn Future<Output = ()>>>),
+ AfterAdvance,
+}
+
+struct Tester {
+ sleep: Pin<Box<Sleep>>,
+ state: State,
+ before: Option<Instant>,
+ poll: bool,
+}
+
+impl Future for Tester {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match &mut self.state {
+ State::Begin => {
+ if self.poll {
+ assert_pending!(self.sleep.as_mut().poll(cx));
+ }
+ self.before = Some(Instant::now());
+ let advance_fut = Box::pin(time::advance(ms(100)));
+ self.state = State::AwaitingAdvance(advance_fut);
+ self.poll(cx)
+ }
+ State::AwaitingAdvance(ref mut advance_fut) => match advance_fut.as_mut().poll(cx) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(()) => {
+ self.state = State::AfterAdvance;
+ self.poll(cx)
+ }
+ },
+ State::AfterAdvance => {
+ assert_elapsed!(self.before.unwrap(), ms(100));
+
+ assert_pending!(self.sleep.as_mut().poll(cx));
+
+ Poll::Ready(())
+ }
+ }
+ }
+}
+
+#[tokio::test(start_paused = true)]
+async fn sleep_same_task() {
+ let start = Instant::now();
+
+ // TODO: Skip this
+ time::advance(ms(1)).await;
+
+ let sleep = Box::pin(time::sleep_until(start + ms(300)));
+
+ Tester {
+ sleep,
+ state: State::Begin,
+ before: None,
+ poll: true,
+ }
+ .await;
+}
+
+#[tokio::test(start_paused = true)]
+async fn sleep_same_task_no_poll() {
+ let start = Instant::now();
+
+ // TODO: Skip this
+ time::advance(ms(1)).await;
+
+ let sleep = Box::pin(time::sleep_until(start + ms(300)));
+
+ Tester {
+ sleep,
+ state: State::Begin,
+ before: None,
+ poll: false,
+ }
+ .await;
+}
+
+#[tokio::test(start_paused = true)]
+async fn interval() {
+ let start = Instant::now();
+
+ // TODO: Skip this
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+
+ assert_ready_eq!(poll_next(&mut i), start);
+ assert_pending!(poll_next(&mut i));
+
+ let before = Instant::now();
+ time::advance(ms(100)).await;
+ assert_elapsed!(before, ms(100));
+ assert_pending!(poll_next(&mut i));
+
+ let before = Instant::now();
+ time::advance(ms(200)).await;
+ assert_elapsed!(before, ms(200));
+ assert_ready_eq!(poll_next(&mut i), start + ms(300));
+ assert_pending!(poll_next(&mut i));
+
+ let before = Instant::now();
+ time::advance(ms(400)).await;
+ assert_elapsed!(before, ms(400));
+ assert_ready_eq!(poll_next(&mut i), start + ms(600));
+ assert_pending!(poll_next(&mut i));
+
+ let before = Instant::now();
+ time::advance(ms(500)).await;
+ assert_elapsed!(before, ms(500));
+ assert_ready_eq!(poll_next(&mut i), start + ms(900));
+ assert_ready_eq!(poll_next(&mut i), start + ms(1200));
+ assert_pending!(poll_next(&mut i));
+}
+
+#[tokio::test(start_paused = true)]
+async fn test_time_advance_sub_ms() {
+ let now = Instant::now();
+
+ let dur = Duration::from_micros(51_592);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+
+ let now = Instant::now();
+ let dur = Duration::from_micros(1);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+}
+
+#[tokio::test(start_paused = true)]
+async fn test_time_advance_3ms_and_change() {
+ let now = Instant::now();
+
+ let dur = Duration::from_micros(3_141_592);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+
+ let now = Instant::now();
+ let dur = Duration::from_micros(3_123_456);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+}
+
+#[tokio::test(start_paused = true)]
+async fn regression_3710_with_submillis_advance() {
+ let start = Instant::now();
+
+ time::advance(Duration::from_millis(1)).await;
+
+ let mut sleep = task::spawn(time::sleep_until(start + Duration::from_secs(60)));
+
+ assert_pending!(sleep.poll());
+
+ let before = Instant::now();
+ let dur = Duration::from_micros(51_592);
+ time::advance(dur).await;
+ assert_eq!(before.elapsed(), dur);
+
+ assert_pending!(sleep.poll());
+}
+
+#[tokio::test(start_paused = true)]
+async fn exact_1ms_advance() {
+ let now = Instant::now();
+
+ let dur = Duration::from_millis(1);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(1);
+ time::advance(dur).await;
+
+ assert_eq!(now.elapsed(), dur);
+}
+
+#[tokio::test(start_paused = true)]
+async fn advance_once_with_timer() {
+ let mut sleep = task::spawn(time::sleep(Duration::from_millis(1)));
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(250)).await;
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(1500)).await;
+
+ assert!(sleep.is_woken());
+ assert_ready!(sleep.poll());
+}
+
+#[tokio::test(start_paused = true)]
+async fn advance_multi_with_timer() {
+ // Round to the nearest ms
+ // time::sleep(Duration::from_millis(1)).await;
+
+ let mut sleep = task::spawn(time::sleep(Duration::from_millis(1)));
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(250)).await;
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(250)).await;
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(250)).await;
+ assert_pending!(sleep.poll());
+
+ time::advance(Duration::from_micros(250)).await;
+ assert!(sleep.is_woken());
+ assert_ready!(sleep.poll());
+}
+
+fn poll_next(interval: &mut task::Spawn<time::Interval>) -> Poll<Instant> {
+ interval.enter(|cx, mut interval| interval.poll_tick(cx))
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/time_rt.rs b/third_party/rust/tokio/tests/time_rt.rs
new file mode 100644
index 0000000000..23367be418
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_rt.rs
@@ -0,0 +1,89 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::*;
+
+use std::sync::mpsc;
+
+#[test]
+fn timer_with_threaded_runtime() {
+ use tokio::runtime::Runtime;
+
+ let rt = Runtime::new().unwrap();
+ let (tx, rx) = mpsc::channel();
+
+ rt.spawn(async move {
+ let when = Instant::now() + Duration::from_millis(10);
+
+ sleep_until(when).await;
+ assert!(Instant::now() >= when);
+
+ tx.send(()).unwrap();
+ });
+
+ rx.recv().unwrap();
+}
+
+#[test]
+fn timer_with_basic_scheduler() {
+ use tokio::runtime::Builder;
+
+ let rt = Builder::new_current_thread().enable_all().build().unwrap();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async move {
+ let when = Instant::now() + Duration::from_millis(10);
+
+ sleep_until(when).await;
+ assert!(Instant::now() >= when);
+
+ tx.send(()).unwrap();
+ });
+
+ rx.recv().unwrap();
+}
+
+#[tokio::test]
+async fn starving() {
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ struct Starve<T: Future<Output = ()> + Unpin>(T, u64);
+
+ impl<T: Future<Output = ()> + Unpin> Future for Starve<T> {
+ type Output = u64;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<u64> {
+ if Pin::new(&mut self.0).poll(cx).is_ready() {
+ return Poll::Ready(self.1);
+ }
+
+ self.1 += 1;
+
+ cx.waker().wake_by_ref();
+
+ Poll::Pending
+ }
+ }
+
+ let when = Instant::now() + Duration::from_millis(10);
+ let starve = Starve(Box::pin(sleep_until(when)), 0);
+
+ starve.await;
+ assert!(Instant::now() >= when);
+}
+
+#[tokio::test]
+async fn timeout_value() {
+ use tokio::sync::oneshot;
+
+ let (_tx, rx) = oneshot::channel::<()>();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(10);
+
+ let res = timeout(dur, rx).await;
+ assert!(res.is_err());
+ assert!(Instant::now() >= now + dur);
+}
diff --git a/third_party/rust/tokio/tests/time_sleep.rs b/third_party/rust/tokio/tests/time_sleep.rs
new file mode 100644
index 0000000000..97cadecb0a
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_sleep.rs
@@ -0,0 +1,356 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::future::Future;
+use std::task::Context;
+
+use futures::task::noop_waker_ref;
+
+use tokio::time::{self, Duration, Instant};
+use tokio_test::{assert_elapsed, assert_pending, assert_ready, task};
+
+#[tokio::test]
+async fn immediate_sleep() {
+ time::pause();
+
+ let now = Instant::now();
+
+ // Ready!
+ time::sleep_until(now).await;
+ assert_elapsed!(now, ms(1));
+}
+
+#[tokio::test]
+async fn is_elapsed() {
+ time::pause();
+
+ let sleep = time::sleep(Duration::from_millis(10));
+
+ tokio::pin!(sleep);
+
+ assert!(!sleep.is_elapsed());
+
+ assert!(futures::poll!(sleep.as_mut()).is_pending());
+
+ assert!(!sleep.is_elapsed());
+
+ sleep.as_mut().await;
+
+ assert!(sleep.is_elapsed());
+}
+
+#[tokio::test]
+async fn delayed_sleep_level_0() {
+ time::pause();
+
+ for &i in &[1, 10, 60] {
+ let now = Instant::now();
+ let dur = ms(i);
+
+ time::sleep_until(now + dur).await;
+
+ assert_elapsed!(now, dur);
+ }
+}
+
+#[tokio::test]
+async fn sub_ms_delayed_sleep() {
+ time::pause();
+
+ for _ in 0..5 {
+ let now = Instant::now();
+ let deadline = now + ms(1) + Duration::new(0, 1);
+
+ time::sleep_until(deadline).await;
+
+ assert_elapsed!(now, ms(1));
+ }
+}
+
+#[tokio::test]
+async fn delayed_sleep_wrapping_level_0() {
+ time::pause();
+
+ time::sleep(ms(5)).await;
+
+ let now = Instant::now();
+ time::sleep_until(now + ms(60)).await;
+
+ assert_elapsed!(now, ms(60));
+}
+
+#[tokio::test]
+async fn reset_future_sleep_before_fire() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut sleep = task::spawn(Box::pin(time::sleep_until(now + ms(100))));
+ assert_pending!(sleep.poll());
+
+ let mut sleep = sleep.into_inner();
+
+ sleep.as_mut().reset(Instant::now() + ms(200));
+ sleep.await;
+
+ assert_elapsed!(now, ms(200));
+}
+
+#[tokio::test]
+async fn reset_past_sleep_before_turn() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut sleep = task::spawn(Box::pin(time::sleep_until(now + ms(100))));
+ assert_pending!(sleep.poll());
+
+ let mut sleep = sleep.into_inner();
+
+ sleep.as_mut().reset(now + ms(80));
+ sleep.await;
+
+ assert_elapsed!(now, ms(80));
+}
+
+#[tokio::test]
+async fn reset_past_sleep_before_fire() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut sleep = task::spawn(Box::pin(time::sleep_until(now + ms(100))));
+ assert_pending!(sleep.poll());
+
+ let mut sleep = sleep.into_inner();
+
+ time::sleep(ms(10)).await;
+
+ sleep.as_mut().reset(now + ms(80));
+ sleep.await;
+
+ assert_elapsed!(now, ms(80));
+}
+
+#[tokio::test]
+async fn reset_future_sleep_after_fire() {
+ time::pause();
+
+ let now = Instant::now();
+ let mut sleep = Box::pin(time::sleep_until(now + ms(100)));
+
+ sleep.as_mut().await;
+ assert_elapsed!(now, ms(100));
+
+ sleep.as_mut().reset(now + ms(110));
+ sleep.await;
+ assert_elapsed!(now, ms(110));
+}
+
+#[tokio::test]
+async fn reset_sleep_to_past() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut sleep = task::spawn(Box::pin(time::sleep_until(now + ms(100))));
+ assert_pending!(sleep.poll());
+
+ time::sleep(ms(50)).await;
+
+ assert!(!sleep.is_woken());
+
+ sleep.as_mut().reset(now + ms(40));
+
+ // TODO: is this required?
+ //assert!(sleep.is_woken());
+
+ assert_ready!(sleep.poll());
+}
+
+#[test]
+#[should_panic]
+fn creating_sleep_outside_of_context() {
+ let now = Instant::now();
+
+ // This creates a delay outside of the context of a mock timer. This tests
+ // that it will panic.
+ let _fut = time::sleep_until(now + ms(500));
+}
+
+#[tokio::test]
+async fn greater_than_max() {
+ const YR_5: u64 = 5 * 365 * 24 * 60 * 60 * 1000;
+
+ time::pause();
+ time::sleep_until(Instant::now() + ms(YR_5)).await;
+}
+
+#[tokio::test]
+async fn short_sleeps() {
+ for i in 0..10000 {
+ if (i % 10) == 0 {
+ eprintln!("=== {}", i);
+ }
+ tokio::time::sleep(std::time::Duration::from_millis(0)).await;
+ }
+}
+
+#[tokio::test]
+async fn multi_long_sleeps() {
+ tokio::time::pause();
+
+ for _ in 0..5u32 {
+ tokio::time::sleep(Duration::from_secs(
+ // about a year
+ 365 * 24 * 3600,
+ ))
+ .await;
+ }
+
+ let deadline = tokio::time::Instant::now()
+ + Duration::from_secs(
+ // about 10 years
+ 10 * 365 * 24 * 3600,
+ );
+
+ tokio::time::sleep_until(deadline).await;
+
+ assert!(tokio::time::Instant::now() >= deadline);
+}
+
+#[tokio::test]
+async fn long_sleeps() {
+ tokio::time::pause();
+
+ let deadline = tokio::time::Instant::now()
+ + Duration::from_secs(
+ // about 10 years
+ 10 * 365 * 24 * 3600,
+ );
+
+ tokio::time::sleep_until(deadline).await;
+
+ assert!(tokio::time::Instant::now() >= deadline);
+ assert!(tokio::time::Instant::now() <= deadline + Duration::from_millis(1));
+}
+
+#[tokio::test]
+async fn reset_after_firing() {
+ let timer = tokio::time::sleep(std::time::Duration::from_millis(1));
+ tokio::pin!(timer);
+
+ let deadline = timer.deadline();
+
+ timer.as_mut().await;
+ assert_ready!(timer
+ .as_mut()
+ .poll(&mut Context::from_waker(noop_waker_ref())));
+ timer
+ .as_mut()
+ .reset(tokio::time::Instant::now() + std::time::Duration::from_secs(600));
+
+ assert_ne!(deadline, timer.deadline());
+
+ assert_pending!(timer
+ .as_mut()
+ .poll(&mut Context::from_waker(noop_waker_ref())));
+ assert_pending!(timer
+ .as_mut()
+ .poll(&mut Context::from_waker(noop_waker_ref())));
+}
+
+const NUM_LEVELS: usize = 6;
+const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1;
+
+#[tokio::test]
+async fn exactly_max() {
+ time::pause();
+ time::sleep(ms(MAX_DURATION)).await;
+}
+
+#[tokio::test]
+async fn no_out_of_bounds_close_to_max() {
+ time::pause();
+ time::sleep(ms(MAX_DURATION - 1)).await;
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
+
+#[tokio::test]
+async fn drop_after_reschedule_at_new_scheduled_time() {
+ use futures::poll;
+
+ tokio::time::pause();
+
+ let start = tokio::time::Instant::now();
+
+ let mut a = Box::pin(tokio::time::sleep(Duration::from_millis(5)));
+ let mut b = Box::pin(tokio::time::sleep(Duration::from_millis(5)));
+ let mut c = Box::pin(tokio::time::sleep(Duration::from_millis(10)));
+
+ let _ = poll!(&mut a);
+ let _ = poll!(&mut b);
+ let _ = poll!(&mut c);
+
+ b.as_mut().reset(start + Duration::from_millis(10));
+ a.await;
+
+ drop(b);
+}
+
+#[tokio::test]
+async fn drop_from_wake() {
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::sync::atomic::{AtomicBool, Ordering};
+ use std::sync::{Arc, Mutex};
+ use std::task::Context;
+
+ let panicked = Arc::new(AtomicBool::new(false));
+ let list: Arc<Mutex<Vec<Pin<Box<tokio::time::Sleep>>>>> = Arc::new(Mutex::new(Vec::new()));
+
+ let arc_wake = Arc::new(DropWaker(panicked.clone(), list.clone()));
+ let arc_wake = futures::task::waker(arc_wake);
+
+ tokio::time::pause();
+
+ let mut lock = list.lock().unwrap();
+
+ for _ in 0..100 {
+ let mut timer = Box::pin(tokio::time::sleep(Duration::from_millis(10)));
+
+ let _ = timer.as_mut().poll(&mut Context::from_waker(&arc_wake));
+
+ lock.push(timer);
+ }
+
+ drop(lock);
+
+ tokio::time::sleep(Duration::from_millis(11)).await;
+
+ assert!(
+ !panicked.load(Ordering::SeqCst),
+ "panicked when dropping timers"
+ );
+
+ #[derive(Clone)]
+ struct DropWaker(
+ Arc<AtomicBool>,
+ Arc<Mutex<Vec<Pin<Box<tokio::time::Sleep>>>>>,
+ );
+
+ impl futures::task::ArcWake for DropWaker {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
+ *arc_self.1.lock().expect("panic in lock") = Vec::new()
+ }));
+
+ if result.is_err() {
+ arc_self.0.store(true, Ordering::SeqCst);
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/tests/time_timeout.rs b/third_party/rust/tokio/tests/time_timeout.rs
new file mode 100644
index 0000000000..a1ff51e7d2
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_timeout.rs
@@ -0,0 +1,150 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::oneshot;
+use tokio::time::{self, timeout, timeout_at, Instant};
+use tokio_test::*;
+
+use futures::future::pending;
+use std::time::Duration;
+
+#[tokio::test]
+async fn simultaneous_deadline_future_completion() {
+ // Create a future that is immediately ready
+ let mut fut = task::spawn(timeout_at(Instant::now(), async {}));
+
+ // Ready!
+ assert_ready_ok!(fut.poll());
+}
+
+#[tokio::test]
+async fn completed_future_past_deadline() {
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() - ms(1000), async {}));
+
+ // Ready!
+ assert_ready_ok!(fut.poll());
+}
+
+#[tokio::test]
+async fn future_and_deadline_in_future() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() + ms(100), rx));
+
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(ms(90)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+ assert!(fut.is_woken());
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
+async fn future_and_timeout_in_future() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout(ms(100), rx));
+
+ // Ready!
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(ms(90)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
+async fn very_large_timeout() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // copy-paste unstable `Duration::MAX`
+ let duration_max = Duration::from_secs(u64::MAX) + Duration::from_nanos(999_999_999);
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout(duration_max, rx));
+
+ // Ready!
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(Duration::from_secs(86400 * 365 * 10)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
+async fn deadline_now_elapses() {
+ use futures::future::pending;
+
+ time::pause();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now(), pending::<()>()));
+
+ // Factor in jitter
+ // TODO: don't require this
+ time::advance(ms(1)).await;
+
+ assert_ready_err!(fut.poll());
+}
+
+#[tokio::test]
+async fn deadline_future_elapses() {
+ time::pause();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() + ms(300), pending::<()>()));
+
+ assert_pending!(fut.poll());
+
+ time::advance(ms(301)).await;
+
+ assert!(fut.is_woken());
+ assert_ready_err!(fut.poll());
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
+
+#[tokio::test]
+async fn timeout_is_not_exhausted_by_future() {
+ let fut = timeout(ms(1), async {
+ let mut buffer = [0u8; 1];
+ loop {
+ use tokio::io::AsyncReadExt;
+ let _ = tokio::io::empty().read(&mut buffer).await;
+ }
+ });
+
+ assert!(fut.await.is_err());
+}
diff --git a/third_party/rust/tokio/tests/udp.rs b/third_party/rust/tokio/tests/udp.rs
new file mode 100644
index 0000000000..ec2a1e9610
--- /dev/null
+++ b/third_party/rust/tokio/tests/udp.rs
@@ -0,0 +1,486 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use futures::future::poll_fn;
+use std::io;
+use std::sync::Arc;
+use tokio::{io::ReadBuf, net::UdpSocket};
+use tokio_test::assert_ok;
+
+const MSG: &[u8] = b"hello";
+const MSG_LEN: usize = MSG.len();
+
+#[tokio::test]
+async fn send_recv() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ sender.connect(receiver.local_addr()?).await?;
+ receiver.connect(sender.local_addr()?).await?;
+
+ sender.send(MSG).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let len = receiver.recv(&mut recv_buf[..]).await?;
+
+ assert_eq!(&recv_buf[..len], MSG);
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_recv_poll() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ sender.connect(receiver.local_addr()?).await?;
+ receiver.connect(sender.local_addr()?).await?;
+
+ poll_fn(|cx| sender.poll_send(cx, MSG)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let _len = poll_fn(|cx| receiver.poll_recv(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), MSG);
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_recv_from() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ let receiver_addr = receiver.local_addr()?;
+ sender.send_to(MSG, &receiver_addr).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let (len, addr) = receiver.recv_from(&mut recv_buf[..]).await?;
+
+ assert_eq!(&recv_buf[..len], MSG);
+ assert_eq!(addr, sender.local_addr()?);
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_recv_from_poll() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ let receiver_addr = receiver.local_addr()?;
+ poll_fn(|cx| sender.poll_send_to(cx, MSG, receiver_addr)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let addr = poll_fn(|cx| receiver.poll_recv_from(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), MSG);
+ assert_eq!(addr, sender.local_addr()?);
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_peek_from() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ let receiver_addr = receiver.local_addr()?;
+ poll_fn(|cx| sender.poll_send_to(cx, MSG, receiver_addr)).await?;
+
+ // peek
+ let mut recv_buf = [0u8; 32];
+ let (n, addr) = receiver.peek_from(&mut recv_buf).await?;
+ assert_eq!(&recv_buf[..n], MSG);
+ assert_eq!(addr, sender.local_addr()?);
+
+ // peek
+ let mut recv_buf = [0u8; 32];
+ let (n, addr) = receiver.peek_from(&mut recv_buf).await?;
+ assert_eq!(&recv_buf[..n], MSG);
+ assert_eq!(addr, sender.local_addr()?);
+
+ let mut recv_buf = [0u8; 32];
+ let (n, addr) = receiver.recv_from(&mut recv_buf).await?;
+ assert_eq!(&recv_buf[..n], MSG);
+ assert_eq!(addr, sender.local_addr()?);
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_peek_from_poll() -> std::io::Result<()> {
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ let receiver_addr = receiver.local_addr()?;
+ poll_fn(|cx| sender.poll_send_to(cx, MSG, receiver_addr)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let addr = poll_fn(|cx| receiver.poll_peek_from(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), MSG);
+ assert_eq!(addr, sender.local_addr()?);
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ poll_fn(|cx| receiver.poll_peek_from(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), MSG);
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+
+ poll_fn(|cx| receiver.poll_recv_from(cx, &mut read)).await?;
+ assert_eq!(read.filled(), MSG);
+ Ok(())
+}
+
+#[tokio::test]
+async fn split() -> std::io::Result<()> {
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let s = Arc::new(socket);
+ let r = s.clone();
+
+ let addr = s.local_addr()?;
+ tokio::spawn(async move {
+ s.send_to(MSG, &addr).await.unwrap();
+ });
+ let mut recv_buf = [0u8; 32];
+ let (len, _) = r.recv_from(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], MSG);
+ Ok(())
+}
+
+#[tokio::test]
+async fn split_chan() -> std::io::Result<()> {
+ // setup UdpSocket that will echo all sent items
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let addr = socket.local_addr().unwrap();
+ let s = Arc::new(socket);
+ let r = s.clone();
+
+ let (tx, mut rx) = tokio::sync::mpsc::channel::<(Vec<u8>, std::net::SocketAddr)>(1_000);
+ tokio::spawn(async move {
+ while let Some((bytes, addr)) = rx.recv().await {
+ s.send_to(&bytes, &addr).await.unwrap();
+ }
+ });
+
+ tokio::spawn(async move {
+ let mut buf = [0u8; 32];
+ loop {
+ let (len, addr) = r.recv_from(&mut buf).await.unwrap();
+ tx.send((buf[..len].to_vec(), addr)).await.unwrap();
+ }
+ });
+
+ // test that we can send a value and get back some response
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ sender.send_to(MSG, addr).await?;
+ let mut recv_buf = [0u8; 32];
+ let (len, _) = sender.recv_from(&mut recv_buf).await?;
+ assert_eq!(&recv_buf[..len], MSG);
+ Ok(())
+}
+
+#[tokio::test]
+async fn split_chan_poll() -> std::io::Result<()> {
+ // setup UdpSocket that will echo all sent items
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let addr = socket.local_addr().unwrap();
+ let s = Arc::new(socket);
+ let r = s.clone();
+
+ let (tx, mut rx) = tokio::sync::mpsc::channel::<(Vec<u8>, std::net::SocketAddr)>(1_000);
+ tokio::spawn(async move {
+ while let Some((bytes, addr)) = rx.recv().await {
+ poll_fn(|cx| s.poll_send_to(cx, &bytes, addr))
+ .await
+ .unwrap();
+ }
+ });
+
+ tokio::spawn(async move {
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ loop {
+ let addr = poll_fn(|cx| r.poll_recv_from(cx, &mut read)).await.unwrap();
+ tx.send((read.filled().to_vec(), addr)).await.unwrap();
+ }
+ });
+
+ // test that we can send a value and get back some response
+ let sender = UdpSocket::bind("127.0.0.1:0").await?;
+ poll_fn(|cx| sender.poll_send_to(cx, MSG, addr)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let _ = poll_fn(|cx| sender.poll_recv_from(cx, &mut read)).await?;
+ assert_eq!(read.filled(), MSG);
+ Ok(())
+}
+
+// # Note
+//
+// This test is purposely written such that each time `sender` sends data on
+// the socket, `receiver` awaits the data. On Unix, it would be okay waiting
+// until the end of the test to receive all the data. On Windows, this would
+// **not** be okay because it's resources are completion based (via IOCP).
+// If data is sent and not yet received, attempting to send more data will
+// result in `ErrorKind::WouldBlock` until the first operation completes.
+#[tokio::test]
+async fn try_send_spawn() {
+ const MSG2: &[u8] = b"world!";
+ const MSG2_LEN: usize = MSG2.len();
+
+ let sender = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let receiver = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+
+ receiver
+ .connect(sender.local_addr().unwrap())
+ .await
+ .unwrap();
+
+ sender.writable().await.unwrap();
+
+ let sent = &sender
+ .try_send_to(MSG, receiver.local_addr().unwrap())
+ .unwrap();
+ assert_eq!(sent, &MSG_LEN);
+ let mut buf = [0u8; 32];
+ let mut received = receiver.recv(&mut buf[..]).await.unwrap();
+
+ sender
+ .connect(receiver.local_addr().unwrap())
+ .await
+ .unwrap();
+ let sent = &sender.try_send(MSG2).unwrap();
+ assert_eq!(sent, &MSG2_LEN);
+ received += receiver.recv(&mut buf[..]).await.unwrap();
+
+ std::thread::spawn(move || {
+ let sent = &sender.try_send(MSG).unwrap();
+ assert_eq!(sent, &MSG_LEN);
+ })
+ .join()
+ .unwrap();
+ received += receiver.recv(&mut buf[..]).await.unwrap();
+
+ assert_eq!(received, MSG_LEN * 2 + MSG2_LEN);
+}
+
+#[tokio::test]
+async fn try_send_recv() {
+ // Create listener
+ let server = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+
+ // Create socket pair
+ let client = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+
+ // Connect the two
+ client.connect(server.local_addr().unwrap()).await.unwrap();
+ server.connect(client.local_addr().unwrap()).await.unwrap();
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await.unwrap();
+
+ match client.try_send(b"hello world") {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await.unwrap();
+
+ let mut buf = [0; 512];
+
+ match server.try_recv(&mut buf) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn try_send_to_recv_from() {
+ // Create listener
+ let server = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let saddr = server.local_addr().unwrap();
+
+ // Create socket pair
+ let client = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let caddr = client.local_addr().unwrap();
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await.unwrap();
+
+ match client.try_send_to(b"hello world", saddr) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await.unwrap();
+
+ let mut buf = [0; 512];
+
+ match server.try_recv_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr, caddr);
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn try_recv_buf() {
+ // Create listener
+ let server = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+
+ // Create socket pair
+ let client = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+
+ // Connect the two
+ client.connect(server.local_addr().unwrap()).await.unwrap();
+ server.connect(client.local_addr().unwrap()).await.unwrap();
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await.unwrap();
+
+ match client.try_send(b"hello world") {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await.unwrap();
+
+ let mut buf = Vec::with_capacity(512);
+
+ match server.try_recv_buf(&mut buf) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn try_recv_buf_from() {
+ // Create listener
+ let server = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let saddr = server.local_addr().unwrap();
+
+ // Create socket pair
+ let client = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let caddr = client.local_addr().unwrap();
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await.unwrap();
+
+ match client.try_send_to(b"hello world", saddr) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await.unwrap();
+
+ let mut buf = Vec::with_capacity(512);
+
+ match server.try_recv_buf_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr, caddr);
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn poll_ready() {
+ // Create listener
+ let server = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let saddr = server.local_addr().unwrap();
+
+ // Create socket pair
+ let client = UdpSocket::bind("127.0.0.1:0").await.unwrap();
+ let caddr = client.local_addr().unwrap();
+
+ for _ in 0..5 {
+ loop {
+ assert_ok!(poll_fn(|cx| client.poll_send_ready(cx)).await);
+
+ match client.try_send_to(b"hello world", saddr) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ assert_ok!(poll_fn(|cx| server.poll_recv_ready(cx)).await);
+
+ let mut buf = Vec::with_capacity(512);
+
+ match server.try_recv_buf_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr, caddr);
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/tests/uds_cred.rs b/third_party/rust/tokio/tests/uds_cred.rs
new file mode 100644
index 0000000000..c2b3914dfe
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_cred.rs
@@ -0,0 +1,26 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(all(unix, not(target_os = "dragonfly")))]
+
+use tokio::net::UnixStream;
+
+use libc::getegid;
+use libc::geteuid;
+
+#[tokio::test]
+#[cfg_attr(
+ target_os = "netbsd",
+ ignore = "NetBSD does not support getpeereid() for sockets created by socketpair()"
+)]
+async fn test_socket_pair() {
+ let (a, b) = UnixStream::pair().unwrap();
+ let cred_a = a.peer_cred().unwrap();
+ let cred_b = b.peer_cred().unwrap();
+ assert_eq!(cred_a, cred_b);
+
+ let uid = unsafe { geteuid() };
+ let gid = unsafe { getegid() };
+
+ assert_eq!(cred_a.uid(), uid);
+ assert_eq!(cred_a.gid(), gid);
+}
diff --git a/third_party/rust/tokio/tests/uds_datagram.rs b/third_party/rust/tokio/tests/uds_datagram.rs
new file mode 100644
index 0000000000..5e5486ba33
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_datagram.rs
@@ -0,0 +1,377 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use futures::future::poll_fn;
+use tokio::io::ReadBuf;
+use tokio::net::UnixDatagram;
+use tokio::try_join;
+
+use std::io;
+use std::sync::Arc;
+
+async fn echo_server(socket: UnixDatagram) -> io::Result<()> {
+ let mut recv_buf = vec![0u8; 1024];
+ loop {
+ let (len, peer_addr) = socket.recv_from(&mut recv_buf[..]).await?;
+ if let Some(path) = peer_addr.as_pathname() {
+ socket.send_to(&recv_buf[..len], path).await?;
+ }
+ }
+}
+
+#[tokio::test]
+async fn echo() -> io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ let server_socket = UnixDatagram::bind(server_path.clone())?;
+
+ tokio::spawn(async move {
+ if let Err(e) = echo_server(server_socket).await {
+ eprintln!("Error in echo server: {}", e);
+ }
+ });
+
+ {
+ let socket = UnixDatagram::bind(&client_path).unwrap();
+ socket.connect(server_path)?;
+ socket.send(b"ECHO").await?;
+ let mut recv_buf = [0u8; 16];
+ let len = socket.recv(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], b"ECHO");
+ }
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn echo_from() -> io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ let server_socket = UnixDatagram::bind(server_path.clone())?;
+
+ tokio::spawn(async move {
+ if let Err(e) = echo_server(server_socket).await {
+ eprintln!("Error in echo server: {}", e);
+ }
+ });
+
+ {
+ let socket = UnixDatagram::bind(&client_path).unwrap();
+ socket.connect(&server_path)?;
+ socket.send(b"ECHO").await?;
+ let mut recv_buf = [0u8; 16];
+ let (len, addr) = socket.recv_from(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], b"ECHO");
+ assert_eq!(addr.as_pathname(), Some(server_path.as_path()));
+ }
+
+ Ok(())
+}
+
+// Even though we use sync non-blocking io we still need a reactor.
+#[tokio::test]
+async fn try_send_recv_never_block() -> io::Result<()> {
+ let mut recv_buf = [0u8; 16];
+ let payload = b"PAYLOAD";
+ let mut count = 0;
+
+ let (dgram1, dgram2) = UnixDatagram::pair()?;
+
+ // Send until we hit the OS `net.unix.max_dgram_qlen`.
+ loop {
+ dgram1.writable().await.unwrap();
+
+ match dgram1.try_send(payload) {
+ Err(err) => match (err.kind(), err.raw_os_error()) {
+ (io::ErrorKind::WouldBlock, _) => break,
+ (_, Some(libc::ENOBUFS)) => break,
+ _ => {
+ panic!("unexpected error {:?}", err);
+ }
+ },
+ Ok(len) => {
+ assert_eq!(len, payload.len());
+ }
+ }
+ count += 1;
+ }
+
+ // Read every dgram we sent.
+ while count > 0 {
+ dgram2.readable().await.unwrap();
+ let len = dgram2.try_recv(&mut recv_buf[..])?;
+ assert_eq!(len, payload.len());
+ assert_eq!(payload, &recv_buf[..len]);
+ count -= 1;
+ }
+
+ let err = dgram2.try_recv(&mut recv_buf[..]).unwrap_err();
+ match err.kind() {
+ io::ErrorKind::WouldBlock => (),
+ _ => unreachable!("unexpected error {:?}", err),
+ }
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn split() -> std::io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("split.sock");
+ let s = Arc::new(UnixDatagram::bind(path.clone())?);
+ let r = s.clone();
+
+ let msg = b"hello";
+ let ((), ()) = try_join! {
+ async {
+ s.send_to(msg, path).await?;
+ io::Result::Ok(())
+ },
+ async {
+ let mut recv_buf = [0u8; 32];
+ let (len, _) = r.recv_from(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], msg);
+ Ok(())
+ },
+ }?;
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_recv_from_poll() -> std::io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let sender_path = dir.path().join("sender.sock");
+ let receiver_path = dir.path().join("receiver.sock");
+
+ let sender = UnixDatagram::bind(&sender_path)?;
+ let receiver = UnixDatagram::bind(&receiver_path)?;
+
+ let msg = b"hello";
+ poll_fn(|cx| sender.poll_send_to(cx, msg, &receiver_path)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let addr = poll_fn(|cx| receiver.poll_recv_from(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), msg);
+ assert_eq!(addr.as_pathname(), Some(sender_path.as_ref()));
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_recv_poll() -> std::io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let sender_path = dir.path().join("sender.sock");
+ let receiver_path = dir.path().join("receiver.sock");
+
+ let sender = UnixDatagram::bind(&sender_path)?;
+ let receiver = UnixDatagram::bind(&receiver_path)?;
+
+ sender.connect(&receiver_path)?;
+ receiver.connect(&sender_path)?;
+
+ let msg = b"hello";
+ poll_fn(|cx| sender.poll_send(cx, msg)).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let mut read = ReadBuf::new(&mut recv_buf);
+ let _len = poll_fn(|cx| receiver.poll_recv(cx, &mut read)).await?;
+
+ assert_eq!(read.filled(), msg);
+ Ok(())
+}
+
+#[tokio::test]
+async fn try_send_to_recv_from() -> std::io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ // Create listener
+ let server = UnixDatagram::bind(&server_path)?;
+
+ // Create socket pair
+ let client = UnixDatagram::bind(&client_path)?;
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await?;
+
+ match client.try_send_to(b"hello world", &server_path) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await?;
+
+ let mut buf = [0; 512];
+
+ match server.try_recv_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr.as_pathname(), Some(client_path.as_ref()));
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn try_recv_buf_from() -> std::io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ // Create listener
+ let server = UnixDatagram::bind(&server_path)?;
+
+ // Create socket pair
+ let client = UnixDatagram::bind(&client_path)?;
+
+ for _ in 0..5 {
+ loop {
+ client.writable().await?;
+
+ match client.try_send_to(b"hello world", &server_path) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ server.readable().await?;
+
+ let mut buf = Vec::with_capacity(512);
+
+ match server.try_recv_buf_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr.as_pathname(), Some(client_path.as_ref()));
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+
+ Ok(())
+}
+
+// Even though we use sync non-blocking io we still need a reactor.
+#[tokio::test]
+async fn try_recv_buf_never_block() -> io::Result<()> {
+ let payload = b"PAYLOAD";
+ let mut count = 0;
+
+ let (dgram1, dgram2) = UnixDatagram::pair()?;
+
+ // Send until we hit the OS `net.unix.max_dgram_qlen`.
+ loop {
+ dgram1.writable().await.unwrap();
+
+ match dgram1.try_send(payload) {
+ Err(err) => match (err.kind(), err.raw_os_error()) {
+ (io::ErrorKind::WouldBlock, _) => break,
+ (_, Some(libc::ENOBUFS)) => break,
+ _ => {
+ panic!("unexpected error {:?}", err);
+ }
+ },
+ Ok(len) => {
+ assert_eq!(len, payload.len());
+ }
+ }
+ count += 1;
+ }
+
+ // Read every dgram we sent.
+ while count > 0 {
+ let mut recv_buf = Vec::with_capacity(16);
+
+ dgram2.readable().await.unwrap();
+ let len = dgram2.try_recv_buf(&mut recv_buf)?;
+ assert_eq!(len, payload.len());
+ assert_eq!(payload, &recv_buf[..len]);
+ count -= 1;
+ }
+
+ let mut recv_buf = vec![0; 16];
+ let err = dgram2.try_recv_from(&mut recv_buf).unwrap_err();
+ match err.kind() {
+ io::ErrorKind::WouldBlock => (),
+ _ => unreachable!("unexpected error {:?}", err),
+ }
+
+ Ok(())
+}
+
+#[tokio::test]
+async fn poll_ready() -> io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ // Create listener
+ let server = UnixDatagram::bind(&server_path)?;
+
+ // Create socket pair
+ let client = UnixDatagram::bind(&client_path)?;
+
+ for _ in 0..5 {
+ loop {
+ poll_fn(|cx| client.poll_send_ready(cx)).await?;
+
+ match client.try_send_to(b"hello world", &server_path) {
+ Ok(n) => {
+ assert_eq!(n, 11);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+
+ loop {
+ poll_fn(|cx| server.poll_recv_ready(cx)).await?;
+
+ let mut buf = Vec::with_capacity(512);
+
+ match server.try_recv_buf_from(&mut buf) {
+ Ok((n, addr)) => {
+ assert_eq!(n, 11);
+ assert_eq!(addr.as_pathname(), Some(client_path.as_ref()));
+ assert_eq!(&buf[0..11], &b"hello world"[..]);
+ break;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("{:?}", e),
+ }
+ }
+ }
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/uds_split.rs b/third_party/rust/tokio/tests/uds_split.rs
new file mode 100644
index 0000000000..81614237ec
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_split.rs
@@ -0,0 +1,43 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
+use tokio::net::UnixStream;
+
+/// Checks that `UnixStream` can be split into a read half and a write half using
+/// `UnixStream::split` and `UnixStream::split_mut`.
+///
+/// Verifies that the implementation of `AsyncWrite::poll_shutdown` shutdowns the stream for
+/// writing by reading to the end of stream on the other side of the connection.
+#[tokio::test]
+async fn split() -> std::io::Result<()> {
+ let (mut a, mut b) = UnixStream::pair()?;
+
+ let (mut a_read, mut a_write) = a.split();
+ let (mut b_read, mut b_write) = b.split();
+
+ let (a_response, b_response) = futures::future::try_join(
+ send_recv_all(&mut a_read, &mut a_write, b"A"),
+ send_recv_all(&mut b_read, &mut b_write, b"B"),
+ )
+ .await?;
+
+ assert_eq!(a_response, b"B");
+ assert_eq!(b_response, b"A");
+
+ Ok(())
+}
+
+async fn send_recv_all(
+ read: &mut (dyn AsyncRead + Unpin),
+ write: &mut (dyn AsyncWrite + Unpin),
+ input: &[u8],
+) -> std::io::Result<Vec<u8>> {
+ write.write_all(input).await?;
+ write.shutdown().await?;
+
+ let mut output = Vec::new();
+ read.read_to_end(&mut output).await?;
+ Ok(output)
+}
diff --git a/third_party/rust/tokio/tests/uds_stream.rs b/third_party/rust/tokio/tests/uds_stream.rs
new file mode 100644
index 0000000000..5f1b4cffbc
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_stream.rs
@@ -0,0 +1,411 @@
+#![cfg(feature = "full")]
+#![warn(rust_2018_idioms)]
+#![cfg(unix)]
+
+use std::io;
+use std::task::Poll;
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt, Interest};
+use tokio::net::{UnixListener, UnixStream};
+use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task};
+
+use futures::future::{poll_fn, try_join};
+
+#[tokio::test]
+async fn accept_read_write() -> std::io::Result<()> {
+ let dir = tempfile::Builder::new()
+ .prefix("tokio-uds-tests")
+ .tempdir()
+ .unwrap();
+ let sock_path = dir.path().join("connect.sock");
+
+ let listener = UnixListener::bind(&sock_path)?;
+
+ let accept = listener.accept();
+ let connect = UnixStream::connect(&sock_path);
+ let ((mut server, _), mut client) = try_join(accept, connect).await?;
+
+ // Write to the client. TODO: Switch to write_all.
+ let write_len = client.write(b"hello").await?;
+ assert_eq!(write_len, 5);
+ drop(client);
+ // Read from the server. TODO: Switch to read_to_end.
+ let mut buf = [0u8; 5];
+ server.read_exact(&mut buf).await?;
+ assert_eq!(&buf, b"hello");
+ let len = server.read(&mut buf).await?;
+ assert_eq!(len, 0);
+ Ok(())
+}
+
+#[tokio::test]
+async fn shutdown() -> std::io::Result<()> {
+ let dir = tempfile::Builder::new()
+ .prefix("tokio-uds-tests")
+ .tempdir()
+ .unwrap();
+ let sock_path = dir.path().join("connect.sock");
+
+ let listener = UnixListener::bind(&sock_path)?;
+
+ let accept = listener.accept();
+ let connect = UnixStream::connect(&sock_path);
+ let ((mut server, _), mut client) = try_join(accept, connect).await?;
+
+ // Shut down the client
+ AsyncWriteExt::shutdown(&mut client).await?;
+ // Read from the server should return 0 to indicate the channel has been closed.
+ let mut buf = [0u8; 1];
+ let n = server.read(&mut buf).await?;
+ assert_eq!(n, 0);
+ Ok(())
+}
+
+#[tokio::test]
+async fn try_read_write() -> std::io::Result<()> {
+ let msg = b"hello world";
+
+ let dir = tempfile::tempdir()?;
+ let bind_path = dir.path().join("bind.sock");
+
+ // Create listener
+ let listener = UnixListener::bind(&bind_path)?;
+
+ // Create socket pair
+ let client = UnixStream::connect(&bind_path).await?;
+
+ let (server, _) = listener.accept().await?;
+ let mut written = msg.to_vec();
+
+ // Track the server receiving data
+ let mut readable = task::spawn(server.readable());
+ assert_pending!(readable.poll());
+
+ // Write data.
+ client.writable().await?;
+ assert_eq!(msg.len(), client.try_write(msg)?);
+
+ // The task should be notified
+ while !readable.is_woken() {
+ tokio::task::yield_now().await;
+ }
+
+ // Fill the write buffer using non-vectored I/O
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write(msg) {
+ Ok(n) => written.extend(&msg[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end using non-vectored I/O
+ let mut read = vec![0; written.len()];
+ let mut i = 0;
+
+ while i < read.len() {
+ server.readable().await?;
+
+ match server.try_read(&mut read[i..]) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ written.clear();
+ client.writable().await.unwrap();
+
+ // Fill the write buffer using vectored I/O
+ let msg_bufs: Vec<_> = msg.chunks(3).map(io::IoSlice::new).collect();
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write_vectored(&msg_bufs) {
+ Ok(n) => written.extend(&msg[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end using vectored I/O
+ let mut read = vec![0; written.len()];
+ let mut i = 0;
+
+ while i < read.len() {
+ server.readable().await?;
+
+ let mut bufs: Vec<_> = read[i..]
+ .chunks_mut(0x10000)
+ .map(io::IoSliceMut::new)
+ .collect();
+ match server.try_read_vectored(&mut bufs) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ // Now, we listen for shutdown
+ drop(client);
+
+ loop {
+ let ready = server.ready(Interest::READABLE).await?;
+
+ if ready.is_read_closed() {
+ break;
+ } else {
+ tokio::task::yield_now().await;
+ }
+ }
+
+ Ok(())
+}
+
+async fn create_pair() -> (UnixStream, UnixStream) {
+ let dir = assert_ok!(tempfile::tempdir());
+ let bind_path = dir.path().join("bind.sock");
+
+ let listener = assert_ok!(UnixListener::bind(&bind_path));
+
+ let accept = listener.accept();
+ let connect = UnixStream::connect(&bind_path);
+ let ((server, _), client) = assert_ok!(try_join(accept, connect).await);
+
+ (client, server)
+}
+
+macro_rules! assert_readable_by_polling {
+ ($stream:expr) => {
+ assert_ok!(poll_fn(|cx| $stream.poll_read_ready(cx)).await);
+ };
+}
+
+macro_rules! assert_not_readable_by_polling {
+ ($stream:expr) => {
+ poll_fn(|cx| {
+ assert_pending!($stream.poll_read_ready(cx));
+ Poll::Ready(())
+ })
+ .await;
+ };
+}
+
+macro_rules! assert_writable_by_polling {
+ ($stream:expr) => {
+ assert_ok!(poll_fn(|cx| $stream.poll_write_ready(cx)).await);
+ };
+}
+
+macro_rules! assert_not_writable_by_polling {
+ ($stream:expr) => {
+ poll_fn(|cx| {
+ assert_pending!($stream.poll_write_ready(cx));
+ Poll::Ready(())
+ })
+ .await;
+ };
+}
+
+#[tokio::test]
+async fn poll_read_ready() {
+ let (mut client, mut server) = create_pair().await;
+
+ // Initial state - not readable.
+ assert_not_readable_by_polling!(server);
+
+ // There is data in the buffer - readable.
+ assert_ok!(client.write_all(b"ping").await);
+ assert_readable_by_polling!(server);
+
+ // Readable until calls to `poll_read` return `Poll::Pending`.
+ let mut buf = [0u8; 4];
+ assert_ok!(server.read_exact(&mut buf).await);
+ assert_readable_by_polling!(server);
+ read_until_pending(&mut server);
+ assert_not_readable_by_polling!(server);
+
+ // Detect the client disconnect.
+ drop(client);
+ assert_readable_by_polling!(server);
+}
+
+#[tokio::test]
+async fn poll_write_ready() {
+ let (mut client, server) = create_pair().await;
+
+ // Initial state - writable.
+ assert_writable_by_polling!(client);
+
+ // No space to write - not writable.
+ write_until_pending(&mut client);
+ assert_not_writable_by_polling!(client);
+
+ // Detect the server disconnect.
+ drop(server);
+ assert_writable_by_polling!(client);
+}
+
+fn read_until_pending(stream: &mut UnixStream) {
+ let mut buf = vec![0u8; 1024 * 1024];
+ loop {
+ match stream.try_read(&mut buf) {
+ Ok(_) => (),
+ Err(err) => {
+ assert_eq!(err.kind(), io::ErrorKind::WouldBlock);
+ break;
+ }
+ }
+ }
+}
+
+fn write_until_pending(stream: &mut UnixStream) {
+ let buf = vec![0u8; 1024 * 1024];
+ loop {
+ match stream.try_write(&buf) {
+ Ok(_) => (),
+ Err(err) => {
+ assert_eq!(err.kind(), io::ErrorKind::WouldBlock);
+ break;
+ }
+ }
+ }
+}
+
+#[tokio::test]
+async fn try_read_buf() -> std::io::Result<()> {
+ let msg = b"hello world";
+
+ let dir = tempfile::tempdir()?;
+ let bind_path = dir.path().join("bind.sock");
+
+ // Create listener
+ let listener = UnixListener::bind(&bind_path)?;
+
+ // Create socket pair
+ let client = UnixStream::connect(&bind_path).await?;
+
+ let (server, _) = listener.accept().await?;
+ let mut written = msg.to_vec();
+
+ // Track the server receiving data
+ let mut readable = task::spawn(server.readable());
+ assert_pending!(readable.poll());
+
+ // Write data.
+ client.writable().await?;
+ assert_eq!(msg.len(), client.try_write(msg)?);
+
+ // The task should be notified
+ while !readable.is_woken() {
+ tokio::task::yield_now().await;
+ }
+
+ // Fill the write buffer
+ loop {
+ // Still ready
+ let mut writable = task::spawn(client.writable());
+ assert_ready_ok!(writable.poll());
+
+ match client.try_write(msg) {
+ Ok(n) => written.extend(&msg[..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ break;
+ }
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ {
+ // Write buffer full
+ let mut writable = task::spawn(client.writable());
+ assert_pending!(writable.poll());
+
+ // Drain the socket from the server end
+ let mut read = Vec::with_capacity(written.len());
+ let mut i = 0;
+
+ while i < read.capacity() {
+ server.readable().await?;
+
+ match server.try_read_buf(&mut read) {
+ Ok(n) => i += n,
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => continue,
+ Err(e) => panic!("error = {:?}", e),
+ }
+ }
+
+ assert_eq!(read, written);
+ }
+
+ // Now, we listen for shutdown
+ drop(client);
+
+ loop {
+ let ready = server.ready(Interest::READABLE).await?;
+
+ if ready.is_read_closed() {
+ break;
+ } else {
+ tokio::task::yield_now().await;
+ }
+ }
+
+ Ok(())
+}
+
+// https://github.com/tokio-rs/tokio/issues/3879
+#[tokio::test]
+#[cfg(not(target_os = "macos"))]
+async fn epollhup() -> io::Result<()> {
+ let dir = tempfile::Builder::new()
+ .prefix("tokio-uds-tests")
+ .tempdir()
+ .unwrap();
+ let sock_path = dir.path().join("connect.sock");
+
+ let listener = UnixListener::bind(&sock_path)?;
+ let connect = UnixStream::connect(&sock_path);
+ tokio::pin!(connect);
+
+ // Poll `connect` once.
+ poll_fn(|cx| {
+ use std::future::Future;
+
+ assert_pending!(connect.as_mut().poll(cx));
+ Poll::Ready(())
+ })
+ .await;
+
+ drop(listener);
+
+ let err = connect.await.unwrap_err();
+ assert_eq!(err.kind(), io::ErrorKind::ConnectionReset);
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/unwindsafe.rs b/third_party/rust/tokio/tests/unwindsafe.rs
new file mode 100644
index 0000000000..09fe839456
--- /dev/null
+++ b/third_party/rust/tokio/tests/unwindsafe.rs
@@ -0,0 +1,37 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::panic::{RefUnwindSafe, UnwindSafe};
+
+#[test]
+fn join_handle_is_unwind_safe() {
+ is_unwind_safe::<tokio::task::JoinHandle<()>>();
+}
+
+#[test]
+fn net_types_are_unwind_safe() {
+ is_unwind_safe::<tokio::net::TcpListener>();
+ is_unwind_safe::<tokio::net::TcpSocket>();
+ is_unwind_safe::<tokio::net::TcpStream>();
+ is_unwind_safe::<tokio::net::UdpSocket>();
+}
+
+#[test]
+#[cfg(unix)]
+fn unix_net_types_are_unwind_safe() {
+ is_unwind_safe::<tokio::net::UnixDatagram>();
+ is_unwind_safe::<tokio::net::UnixListener>();
+ is_unwind_safe::<tokio::net::UnixStream>();
+}
+
+#[test]
+#[cfg(windows)]
+fn windows_net_types_are_unwind_safe() {
+ use tokio::net::windows::named_pipe::NamedPipeClient;
+ use tokio::net::windows::named_pipe::NamedPipeServer;
+
+ is_unwind_safe::<NamedPipeClient>();
+ is_unwind_safe::<NamedPipeServer>();
+}
+
+fn is_unwind_safe<T: UnwindSafe + RefUnwindSafe>() {}