summaryrefslogtreecommitdiffstats
path: root/third_party/rust/tokio
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/tokio
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/tokio')
-rw-r--r--third_party/rust/tokio/.cargo-checksum.json1
-rw-r--r--third_party/rust/tokio/CHANGELOG.md409
-rw-r--r--third_party/rust/tokio/Cargo.toml134
-rw-r--r--third_party/rust/tokio/LICENSE25
-rw-r--r--third_party/rust/tokio/README.md156
-rw-r--r--third_party/rust/tokio/src/coop.rs379
-rw-r--r--third_party/rust/tokio/src/fs/canonicalize.rs51
-rw-r--r--third_party/rust/tokio/src/fs/copy.rs24
-rw-r--r--third_party/rust/tokio/src/fs/create_dir.rs52
-rw-r--r--third_party/rust/tokio/src/fs/create_dir_all.rs53
-rw-r--r--third_party/rust/tokio/src/fs/file.rs739
-rw-r--r--third_party/rust/tokio/src/fs/hard_link.rs46
-rw-r--r--third_party/rust/tokio/src/fs/metadata.rs47
-rw-r--r--third_party/rust/tokio/src/fs/mod.rs109
-rw-r--r--third_party/rust/tokio/src/fs/open_options.rs397
-rw-r--r--third_party/rust/tokio/src/fs/os/mod.rs7
-rw-r--r--third_party/rust/tokio/src/fs/os/unix/mod.rs4
-rw-r--r--third_party/rust/tokio/src/fs/os/unix/symlink.rs18
-rw-r--r--third_party/rust/tokio/src/fs/os/windows/mod.rs7
-rw-r--r--third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs19
-rw-r--r--third_party/rust/tokio/src/fs/os/windows/symlink_file.rs19
-rw-r--r--third_party/rust/tokio/src/fs/read.rs47
-rw-r--r--third_party/rust/tokio/src/fs/read_dir.rs244
-rw-r--r--third_party/rust/tokio/src/fs/read_link.rs14
-rw-r--r--third_party/rust/tokio/src/fs/read_to_string.rs24
-rw-r--r--third_party/rust/tokio/src/fs/remove_dir.rs12
-rw-r--r--third_party/rust/tokio/src/fs/remove_dir_all.rs14
-rw-r--r--third_party/rust/tokio/src/fs/remove_file.rs18
-rw-r--r--third_party/rust/tokio/src/fs/rename.rs17
-rw-r--r--third_party/rust/tokio/src/fs/set_permissions.rs15
-rw-r--r--third_party/rust/tokio/src/fs/symlink_metadata.rs15
-rw-r--r--third_party/rust/tokio/src/fs/write.rs25
-rw-r--r--third_party/rust/tokio/src/future/maybe_done.rs76
-rw-r--r--third_party/rust/tokio/src/future/mod.rs15
-rw-r--r--third_party/rust/tokio/src/future/pending.rs44
-rw-r--r--third_party/rust/tokio/src/future/poll_fn.rs38
-rw-r--r--third_party/rust/tokio/src/future/ready.rs27
-rw-r--r--third_party/rust/tokio/src/future/try_join.rs82
-rw-r--r--third_party/rust/tokio/src/io/async_buf_read.rs115
-rw-r--r--third_party/rust/tokio/src/io/async_read.rs203
-rw-r--r--third_party/rust/tokio/src/io/async_seek.rs104
-rw-r--r--third_party/rust/tokio/src/io/async_write.rs291
-rw-r--r--third_party/rust/tokio/src/io/blocking.rs279
-rw-r--r--third_party/rust/tokio/src/io/driver/mod.rs396
-rw-r--r--third_party/rust/tokio/src/io/driver/platform.rs44
-rw-r--r--third_party/rust/tokio/src/io/driver/scheduled_io.rs141
-rw-r--r--third_party/rust/tokio/src/io/mod.rs229
-rw-r--r--third_party/rust/tokio/src/io/poll_evented.rs423
-rw-r--r--third_party/rust/tokio/src/io/registration.rs299
-rw-r--r--third_party/rust/tokio/src/io/seek.rs56
-rw-r--r--third_party/rust/tokio/src/io/split.rs195
-rw-r--r--third_party/rust/tokio/src/io/stderr.rs108
-rw-r--r--third_party/rust/tokio/src/io/stdin.rs70
-rw-r--r--third_party/rust/tokio/src/io/stdout.rs108
-rw-r--r--third_party/rust/tokio/src/io/util/async_buf_read_ext.rs258
-rw-r--r--third_party/rust/tokio/src/io/util/async_read_ext.rs807
-rw-r--r--third_party/rust/tokio/src/io/util/async_seek_ext.rs60
-rw-r--r--third_party/rust/tokio/src/io/util/async_write_ext.rs689
-rw-r--r--third_party/rust/tokio/src/io/util/buf_reader.rs194
-rw-r--r--third_party/rust/tokio/src/io/util/buf_stream.rs169
-rw-r--r--third_party/rust/tokio/src/io/util/buf_writer.rs192
-rw-r--r--third_party/rust/tokio/src/io/util/chain.rs141
-rw-r--r--third_party/rust/tokio/src/io/util/copy.rs135
-rw-r--r--third_party/rust/tokio/src/io/util/empty.rs84
-rw-r--r--third_party/rust/tokio/src/io/util/flush.rs47
-rw-r--r--third_party/rust/tokio/src/io/util/lines.rs114
-rw-r--r--third_party/rust/tokio/src/io/util/mod.rs88
-rw-r--r--third_party/rust/tokio/src/io/util/read.rs55
-rw-r--r--third_party/rust/tokio/src/io/util/read_buf.rs41
-rw-r--r--third_party/rust/tokio/src/io/util/read_exact.rs76
-rw-r--r--third_party/rust/tokio/src/io/util/read_int.rs123
-rw-r--r--third_party/rust/tokio/src/io/util/read_line.rs82
-rw-r--r--third_party/rust/tokio/src/io/util/read_to_end.rs113
-rw-r--r--third_party/rust/tokio/src/io/util/read_to_string.rs83
-rw-r--r--third_party/rust/tokio/src/io/util/read_until.rs86
-rw-r--r--third_party/rust/tokio/src/io/util/repeat.rs71
-rw-r--r--third_party/rust/tokio/src/io/util/shutdown.rs47
-rw-r--r--third_party/rust/tokio/src/io/util/sink.rs87
-rw-r--r--third_party/rust/tokio/src/io/util/split.rs112
-rw-r--r--third_party/rust/tokio/src/io/util/stream_reader.rs184
-rw-r--r--third_party/rust/tokio/src/io/util/take.rs131
-rw-r--r--third_party/rust/tokio/src/io/util/write.rs37
-rw-r--r--third_party/rust/tokio/src/io/util/write_all.rs57
-rw-r--r--third_party/rust/tokio/src/io/util/write_buf.rs43
-rw-r--r--third_party/rust/tokio/src/io/util/write_int.rs122
-rw-r--r--third_party/rust/tokio/src/lib.rs390
-rw-r--r--third_party/rust/tokio/src/loom/mocked.rs13
-rw-r--r--third_party/rust/tokio/src/loom/mod.rs12
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_ptr.rs32
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_u16.rs44
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_u32.rs34
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_u64.rs60
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_u8.rs34
-rw-r--r--third_party/rust/tokio/src/loom/std/atomic_usize.rs56
-rw-r--r--third_party/rust/tokio/src/loom/std/mod.rs87
-rw-r--r--third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs79
-rw-r--r--third_party/rust/tokio/src/loom/std/unsafe_cell.rs16
-rw-r--r--third_party/rust/tokio/src/macros/cfg.rs322
-rw-r--r--third_party/rust/tokio/src/macros/join.rs119
-rw-r--r--third_party/rust/tokio/src/macros/loom.rs12
-rw-r--r--third_party/rust/tokio/src/macros/mod.rs35
-rw-r--r--third_party/rust/tokio/src/macros/pin.rs144
-rw-r--r--third_party/rust/tokio/src/macros/ready.rs8
-rw-r--r--third_party/rust/tokio/src/macros/scoped_tls.rs80
-rw-r--r--third_party/rust/tokio/src/macros/select.rs876
-rw-r--r--third_party/rust/tokio/src/macros/support.rs8
-rw-r--r--third_party/rust/tokio/src/macros/thread_local.rs4
-rw-r--r--third_party/rust/tokio/src/macros/try_join.rs132
-rw-r--r--third_party/rust/tokio/src/net/addr.rs281
-rw-r--r--third_party/rust/tokio/src/net/lookup_host.rs38
-rw-r--r--third_party/rust/tokio/src/net/mod.rs49
-rw-r--r--third_party/rust/tokio/src/net/tcp/incoming.rs42
-rw-r--r--third_party/rust/tokio/src/net/tcp/listener.rs441
-rw-r--r--third_party/rust/tokio/src/net/tcp/mod.rs13
-rw-r--r--third_party/rust/tokio/src/net/tcp/split.rs163
-rw-r--r--third_party/rust/tokio/src/net/tcp/stream.rs869
-rw-r--r--third_party/rust/tokio/src/net/udp/mod.rs7
-rw-r--r--third_party/rust/tokio/src/net/udp/socket.rs425
-rw-r--r--third_party/rust/tokio/src/net/udp/split.rs148
-rw-r--r--third_party/rust/tokio/src/net/unix/datagram.rs242
-rw-r--r--third_party/rust/tokio/src/net/unix/incoming.rs42
-rw-r--r--third_party/rust/tokio/src/net/unix/listener.rs229
-rw-r--r--third_party/rust/tokio/src/net/unix/mod.rs18
-rw-r--r--third_party/rust/tokio/src/net/unix/split.rs74
-rw-r--r--third_party/rust/tokio/src/net/unix/stream.rs233
-rw-r--r--third_party/rust/tokio/src/net/unix/ucred.rs151
-rw-r--r--third_party/rust/tokio/src/park/either.rs65
-rw-r--r--third_party/rust/tokio/src/park/mod.rs118
-rw-r--r--third_party/rust/tokio/src/park/thread.rs317
-rw-r--r--third_party/rust/tokio/src/prelude.rs21
-rw-r--r--third_party/rust/tokio/src/process/kill.rs13
-rw-r--r--third_party/rust/tokio/src/process/mod.rs1078
-rw-r--r--third_party/rust/tokio/src/process/unix/mod.rs227
-rw-r--r--third_party/rust/tokio/src/process/unix/orphan.rs191
-rw-r--r--third_party/rust/tokio/src/process/unix/reap.rs342
-rw-r--r--third_party/rust/tokio/src/process/windows.rs191
-rw-r--r--third_party/rust/tokio/src/runtime/basic_scheduler.rs326
-rw-r--r--third_party/rust/tokio/src/runtime/blocking/mod.rs43
-rw-r--r--third_party/rust/tokio/src/runtime/blocking/pool.rs307
-rw-r--r--third_party/rust/tokio/src/runtime/blocking/schedule.rs24
-rw-r--r--third_party/rust/tokio/src/runtime/blocking/shutdown.rs58
-rw-r--r--third_party/rust/tokio/src/runtime/blocking/task.rs40
-rw-r--r--third_party/rust/tokio/src/runtime/builder.rs519
-rw-r--r--third_party/rust/tokio/src/runtime/context.rs73
-rw-r--r--third_party/rust/tokio/src/runtime/enter.rs162
-rw-r--r--third_party/rust/tokio/src/runtime/handle.rs140
-rw-r--r--third_party/rust/tokio/src/runtime/io.rs63
-rw-r--r--third_party/rust/tokio/src/runtime/mod.rs494
-rw-r--r--third_party/rust/tokio/src/runtime/park.rs245
-rw-r--r--third_party/rust/tokio/src/runtime/queue.rs630
-rw-r--r--third_party/rust/tokio/src/runtime/shell.rs62
-rw-r--r--third_party/rust/tokio/src/runtime/spawner.rs37
-rw-r--r--third_party/rust/tokio/src/runtime/task/core.rs279
-rw-r--r--third_party/rust/tokio/src/runtime/task/error.rs163
-rw-r--r--third_party/rust/tokio/src/runtime/task/harness.rs372
-rw-r--r--third_party/rust/tokio/src/runtime/task/join.rs152
-rw-r--r--third_party/rust/tokio/src/runtime/task/mod.rs220
-rw-r--r--third_party/rust/tokio/src/runtime/task/raw.rs131
-rw-r--r--third_party/rust/tokio/src/runtime/task/stack.rs83
-rw-r--r--third_party/rust/tokio/src/runtime/task/state.rs446
-rw-r--r--third_party/rust/tokio/src/runtime/task/waker.rs101
-rw-r--r--third_party/rust/tokio/src/runtime/tests/loom_blocking.rs31
-rw-r--r--third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs49
-rw-r--r--third_party/rust/tokio/src/runtime/tests/loom_pool.rs380
-rw-r--r--third_party/rust/tokio/src/runtime/tests/loom_queue.rs216
-rw-r--r--third_party/rust/tokio/src/runtime/tests/mod.rs13
-rw-r--r--third_party/rust/tokio/src/runtime/tests/queue.rs202
-rw-r--r--third_party/rust/tokio/src/runtime/tests/task.rs159
-rw-r--r--third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs52
-rw-r--r--third_party/rust/tokio/src/runtime/thread_pool/idle.rs222
-rw-r--r--third_party/rust/tokio/src/runtime/thread_pool/mod.rs117
-rw-r--r--third_party/rust/tokio/src/runtime/thread_pool/worker.rs761
-rw-r--r--third_party/rust/tokio/src/runtime/time.rs59
-rw-r--r--third_party/rust/tokio/src/signal/ctrl_c.rs53
-rw-r--r--third_party/rust/tokio/src/signal/mod.rs60
-rw-r--r--third_party/rust/tokio/src/signal/registry.rs321
-rw-r--r--third_party/rust/tokio/src/signal/unix.rs513
-rw-r--r--third_party/rust/tokio/src/signal/windows.rs297
-rw-r--r--third_party/rust/tokio/src/stream/all.rs45
-rw-r--r--third_party/rust/tokio/src/stream/any.rs45
-rw-r--r--third_party/rust/tokio/src/stream/chain.rs57
-rw-r--r--third_party/rust/tokio/src/stream/collect.rs246
-rw-r--r--third_party/rust/tokio/src/stream/empty.rs50
-rw-r--r--third_party/rust/tokio/src/stream/filter.rs58
-rw-r--r--third_party/rust/tokio/src/stream/filter_map.rs58
-rw-r--r--third_party/rust/tokio/src/stream/fold.rs51
-rw-r--r--third_party/rust/tokio/src/stream/fuse.rs53
-rw-r--r--third_party/rust/tokio/src/stream/iter.rs55
-rw-r--r--third_party/rust/tokio/src/stream/map.rs51
-rw-r--r--third_party/rust/tokio/src/stream/merge.rs97
-rw-r--r--third_party/rust/tokio/src/stream/mod.rs819
-rw-r--r--third_party/rust/tokio/src/stream/next.rs28
-rw-r--r--third_party/rust/tokio/src/stream/once.rs52
-rw-r--r--third_party/rust/tokio/src/stream/pending.rs54
-rw-r--r--third_party/rust/tokio/src/stream/skip.rs63
-rw-r--r--third_party/rust/tokio/src/stream/skip_while.rs73
-rw-r--r--third_party/rust/tokio/src/stream/stream_map.rs503
-rw-r--r--third_party/rust/tokio/src/stream/take.rs76
-rw-r--r--third_party/rust/tokio/src/stream/take_while.rs79
-rw-r--r--third_party/rust/tokio/src/stream/timeout.rs65
-rw-r--r--third_party/rust/tokio/src/stream/try_next.rs30
-rw-r--r--third_party/rust/tokio/src/sync/barrier.rs136
-rw-r--r--third_party/rust/tokio/src/sync/batch_semaphore.rs547
-rw-r--r--third_party/rust/tokio/src/sync/broadcast.rs1046
-rw-r--r--third_party/rust/tokio/src/sync/mod.rs472
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/block.rs387
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/bounded.rs479
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/chan.rs524
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/error.rs146
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/list.rs341
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/mod.rs64
-rw-r--r--third_party/rust/tokio/src/sync/mpsc/unbounded.rs176
-rw-r--r--third_party/rust/tokio/src/sync/mutex.rs228
-rw-r--r--third_party/rust/tokio/src/sync/notify.rs556
-rw-r--r--third_party/rust/tokio/src/sync/oneshot.rs784
-rw-r--r--third_party/rust/tokio/src/sync/rwlock.rs294
-rw-r--r--third_party/rust/tokio/src/sync/semaphore.rs105
-rw-r--r--third_party/rust/tokio/src/sync/semaphore_ll.rs1220
-rw-r--r--third_party/rust/tokio/src/sync/task/atomic_waker.rs318
-rw-r--r--third_party/rust/tokio/src/sync/task/mod.rs4
-rw-r--r--third_party/rust/tokio/src/sync/tests/atomic_waker.rs34
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs45
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_broadcast.rs180
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_list.rs48
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_mpsc.rs77
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_notify.rs90
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_oneshot.rs109
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_rwlock.rs78
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs215
-rw-r--r--third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs192
-rw-r--r--third_party/rust/tokio/src/sync/tests/mod.rs16
-rw-r--r--third_party/rust/tokio/src/sync/tests/semaphore_batch.rs250
-rw-r--r--third_party/rust/tokio/src/sync/tests/semaphore_ll.rs470
-rw-r--r--third_party/rust/tokio/src/sync/watch.rs432
-rw-r--r--third_party/rust/tokio/src/task/blocking.rs71
-rw-r--r--third_party/rust/tokio/src/task/local.rs584
-rw-r--r--third_party/rust/tokio/src/task/mod.rs242
-rw-r--r--third_party/rust/tokio/src/task/spawn.rs134
-rw-r--r--third_party/rust/tokio/src/task/task_local.rs240
-rw-r--r--third_party/rust/tokio/src/task/yield_now.rs38
-rw-r--r--third_party/rust/tokio/src/time/clock.rs164
-rw-r--r--third_party/rust/tokio/src/time/delay.rs99
-rw-r--r--third_party/rust/tokio/src/time/delay_queue.rs887
-rw-r--r--third_party/rust/tokio/src/time/driver/atomic_stack.rs124
-rw-r--r--third_party/rust/tokio/src/time/driver/entry.rs345
-rw-r--r--third_party/rust/tokio/src/time/driver/handle.rs38
-rw-r--r--third_party/rust/tokio/src/time/driver/mod.rs391
-rw-r--r--third_party/rust/tokio/src/time/driver/registration.rs53
-rw-r--r--third_party/rust/tokio/src/time/driver/stack.rs121
-rw-r--r--third_party/rust/tokio/src/time/driver/tests/mod.rs55
-rw-r--r--third_party/rust/tokio/src/time/error.rs72
-rw-r--r--third_party/rust/tokio/src/time/instant.rs199
-rw-r--r--third_party/rust/tokio/src/time/interval.rs139
-rw-r--r--third_party/rust/tokio/src/time/mod.rs130
-rw-r--r--third_party/rust/tokio/src/time/tests/mod.rs22
-rw-r--r--third_party/rust/tokio/src/time/tests/test_delay.rs447
-rw-r--r--third_party/rust/tokio/src/time/throttle.rs117
-rw-r--r--third_party/rust/tokio/src/time/timeout.rs185
-rw-r--r--third_party/rust/tokio/src/time/wheel/level.rs255
-rw-r--r--third_party/rust/tokio/src/time/wheel/mod.rs314
-rw-r--r--third_party/rust/tokio/src/time/wheel/stack.rs26
-rw-r--r--third_party/rust/tokio/src/util/bit.rs85
-rw-r--r--third_party/rust/tokio/src/util/linked_list.rs585
-rw-r--r--third_party/rust/tokio/src/util/mod.rs24
-rw-r--r--third_party/rust/tokio/src/util/pad.rs52
-rw-r--r--third_party/rust/tokio/src/util/rand.rs64
-rw-r--r--third_party/rust/tokio/src/util/slab/addr.rs154
-rw-r--r--third_party/rust/tokio/src/util/slab/entry.rs7
-rw-r--r--third_party/rust/tokio/src/util/slab/generation.rs32
-rw-r--r--third_party/rust/tokio/src/util/slab/mod.rs107
-rw-r--r--third_party/rust/tokio/src/util/slab/page.rs187
-rw-r--r--third_party/rust/tokio/src/util/slab/shard.rs105
-rw-r--r--third_party/rust/tokio/src/util/slab/slot.rs42
-rw-r--r--third_party/rust/tokio/src/util/slab/stack.rs58
-rw-r--r--third_party/rust/tokio/src/util/slab/tests/loom_slab.rs327
-rw-r--r--third_party/rust/tokio/src/util/slab/tests/loom_stack.rs88
-rw-r--r--third_party/rust/tokio/src/util/slab/tests/mod.rs2
-rw-r--r--third_party/rust/tokio/src/util/try_lock.rs80
-rw-r--r--third_party/rust/tokio/src/util/wake.rs83
-rw-r--r--third_party/rust/tokio/tests/_require_full.rs2
-rw-r--r--third_party/rust/tokio/tests/async_send_sync.rs258
-rw-r--r--third_party/rust/tokio/tests/buffered.rs51
-rw-r--r--third_party/rust/tokio/tests/fs.rs20
-rw-r--r--third_party/rust/tokio/tests/fs_copy.rs39
-rw-r--r--third_party/rust/tokio/tests/fs_dir.rs102
-rw-r--r--third_party/rust/tokio/tests/fs_file.rs87
-rw-r--r--third_party/rust/tokio/tests/fs_file_mocked.rs777
-rw-r--r--third_party/rust/tokio/tests/fs_link.rs70
-rw-r--r--third_party/rust/tokio/tests/io_async_read.rs148
-rw-r--r--third_party/rust/tokio/tests/io_chain.rs16
-rw-r--r--third_party/rust/tokio/tests/io_copy.rs36
-rw-r--r--third_party/rust/tokio/tests/io_driver.rs88
-rw-r--r--third_party/rust/tokio/tests/io_driver_drop.rs53
-rw-r--r--third_party/rust/tokio/tests/io_lines.rs35
-rw-r--r--third_party/rust/tokio/tests/io_read.rs60
-rw-r--r--third_party/rust/tokio/tests/io_read_exact.rs15
-rw-r--r--third_party/rust/tokio/tests/io_read_line.rs29
-rw-r--r--third_party/rust/tokio/tests/io_read_to_end.rs15
-rw-r--r--third_party/rust/tokio/tests/io_read_to_string.rs15
-rw-r--r--third_party/rust/tokio/tests/io_read_until.rs23
-rw-r--r--third_party/rust/tokio/tests/io_split.rs78
-rw-r--r--third_party/rust/tokio/tests/io_take.rs16
-rw-r--r--third_party/rust/tokio/tests/io_write.rs58
-rw-r--r--third_party/rust/tokio/tests/io_write_all.rs51
-rw-r--r--third_party/rust/tokio/tests/io_write_int.rs37
-rw-r--r--third_party/rust/tokio/tests/macros_join.rs71
-rw-r--r--third_party/rust/tokio/tests/macros_pin.rs13
-rw-r--r--third_party/rust/tokio/tests/macros_select.rs447
-rw-r--r--third_party/rust/tokio/tests/macros_try_join.rs100
-rw-r--r--third_party/rust/tokio/tests/net_bind_resource.rs14
-rw-r--r--third_party/rust/tokio/tests/net_lookup_host.rs36
-rw-r--r--third_party/rust/tokio/tests/no_rt.rs27
-rw-r--r--third_party/rust/tokio/tests/process_issue_2174.rs46
-rw-r--r--third_party/rust/tokio/tests/process_issue_42.rs36
-rw-r--r--third_party/rust/tokio/tests/process_kill_on_drop.rs42
-rw-r--r--third_party/rust/tokio/tests/process_smoke.rs29
-rw-r--r--third_party/rust/tokio/tests/rt_basic.rs135
-rw-r--r--third_party/rust/tokio/tests/rt_common.rs1009
-rw-r--r--third_party/rust/tokio/tests/rt_threaded.rs327
-rw-r--r--third_party/rust/tokio/tests/signal_ctrl_c.rs30
-rw-r--r--third_party/rust/tokio/tests/signal_drop_recv.rs22
-rw-r--r--third_party/rust/tokio/tests/signal_drop_rt.rs45
-rw-r--r--third_party/rust/tokio/tests/signal_drop_signal.rs26
-rw-r--r--third_party/rust/tokio/tests/signal_multi_rt.rs55
-rw-r--r--third_party/rust/tokio/tests/signal_no_rt.rs11
-rw-r--r--third_party/rust/tokio/tests/signal_notify_both.rs23
-rw-r--r--third_party/rust/tokio/tests/signal_twice.rs22
-rw-r--r--third_party/rust/tokio/tests/signal_usr1.rs23
-rw-r--r--third_party/rust/tokio/tests/stream_chain.rs71
-rw-r--r--third_party/rust/tokio/tests/stream_collect.rs172
-rw-r--r--third_party/rust/tokio/tests/stream_empty.rs11
-rw-r--r--third_party/rust/tokio/tests/stream_fuse.rs50
-rw-r--r--third_party/rust/tokio/tests/stream_iter.rs18
-rw-r--r--third_party/rust/tokio/tests/stream_merge.rs54
-rw-r--r--third_party/rust/tokio/tests/stream_once.rs12
-rw-r--r--third_party/rust/tokio/tests/stream_pending.rs14
-rw-r--r--third_party/rust/tokio/tests/stream_reader.rs35
-rw-r--r--third_party/rust/tokio/tests/stream_stream_map.rs374
-rw-r--r--third_party/rust/tokio/tests/stream_timeout.rs109
-rw-r--r--third_party/rust/tokio/tests/support/mock_file.rs281
-rw-r--r--third_party/rust/tokio/tests/support/mock_pool.rs66
-rw-r--r--third_party/rust/tokio/tests/support/signal.rs7
-rw-r--r--third_party/rust/tokio/tests/sync_barrier.rs96
-rw-r--r--third_party/rust/tokio/tests/sync_broadcast.rs357
-rw-r--r--third_party/rust/tokio/tests/sync_errors.rs27
-rw-r--r--third_party/rust/tokio/tests/sync_mpsc.rs492
-rw-r--r--third_party/rust/tokio/tests/sync_mutex.rs154
-rw-r--r--third_party/rust/tokio/tests/sync_notify.rs102
-rw-r--r--third_party/rust/tokio/tests/sync_oneshot.rs234
-rw-r--r--third_party/rust/tokio/tests/sync_rwlock.rs237
-rw-r--r--third_party/rust/tokio/tests/sync_semaphore.rs81
-rw-r--r--third_party/rust/tokio/tests/sync_watch.rs231
-rw-r--r--third_party/rust/tokio/tests/task_blocking.rs29
-rw-r--r--third_party/rust/tokio/tests/task_local.rs31
-rw-r--r--third_party/rust/tokio/tests/task_local_set.rs466
-rw-r--r--third_party/rust/tokio/tests/tcp_accept.rs99
-rw-r--r--third_party/rust/tokio/tests/tcp_connect.rs229
-rw-r--r--third_party/rust/tokio/tests/tcp_echo.rs42
-rw-r--r--third_party/rust/tokio/tests/tcp_peek.rs29
-rw-r--r--third_party/rust/tokio/tests/tcp_shutdown.rs29
-rw-r--r--third_party/rust/tokio/tests/tcp_split.rs42
-rw-r--r--third_party/rust/tokio/tests/test_clock.rs50
-rw-r--r--third_party/rust/tokio/tests/time_delay.rs176
-rw-r--r--third_party/rust/tokio/tests/time_delay_queue.rs448
-rw-r--r--third_party/rust/tokio/tests/time_interval.rs66
-rw-r--r--third_party/rust/tokio/tests/time_rt.rs93
-rw-r--r--third_party/rust/tokio/tests/time_throttle.rs30
-rw-r--r--third_party/rust/tokio/tests/time_timeout.rs110
-rw-r--r--third_party/rust/tokio/tests/udp.rs73
-rw-r--r--third_party/rust/tokio/tests/uds_cred.rs30
-rw-r--r--third_party/rust/tokio/tests/uds_datagram.rs43
-rw-r--r--third_party/rust/tokio/tests/uds_split.rs43
-rw-r--r--third_party/rust/tokio/tests/uds_stream.rs58
373 files changed, 58926 insertions, 0 deletions
diff --git a/third_party/rust/tokio/.cargo-checksum.json b/third_party/rust/tokio/.cargo-checksum.json
new file mode 100644
index 0000000000..c42614815a
--- /dev/null
+++ b/third_party/rust/tokio/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"667ed24c8858176592e538a5553ec9717c23f478139213f32504138b198b6201","Cargo.toml":"5e5b11a16f09ee040e5c1ece4cda5405f690c40674dad4d7f851f425f8635617","LICENSE":"898b1ae9821e98daf8964c8d6c7f61641f5f5aa78ad500020771c0939ee0dea1","README.md":"05c73061b7882e8361a47590dcca13ef1527d3ad5b36bbd902f921fb982e7364","src/coop.rs":"94609c99d552e1cb56bcf6bba4c600a8a51abe53abdc71af0e788006c640150f","src/fs/canonicalize.rs":"93c64b72abdca17877d6ab61d50a43765d6aef9e0a9f7aaf41b6b0b7d9a8a380","src/fs/copy.rs":"6c9ba20cba87eea6806cff998a3326f4f505f0f8eddda50363cd005da9e149fa","src/fs/create_dir.rs":"b279bf045c5168eb14ad04c91732f575fecd96190c74706989980e6484ba52e6","src/fs/create_dir_all.rs":"56081d541caadca0fc59e84d55e78e702fe9373679598016224ad0b072b189a7","src/fs/file.rs":"c4e98ffcd82b8269cae60e467aaf767be4f2dc2b4f81484201c04febd78a71f0","src/fs/hard_link.rs":"98cccbbb3719baee11c232e79723ab1cb3d6c8056bddb109c4990fe2c236c1fb","src/fs/metadata.rs":"782a1a5dbc2cd6c40e928579fbfcf39e5f1de28def78781590c0280acdf02960","src/fs/mod.rs":"e28959095794d5dfa2e663c779700ca2a5c0ae554529800f04aaec31ca6f1b85","src/fs/open_options.rs":"6f4fa79d8cf973fddc89653ff985091245fda43267e4b00e1756baf2f760f44a","src/fs/os/mod.rs":"65d0bd0e2b2c7e142daf6e0f24f3f7fe964dd0fb222a320dfb54435ef05d70ee","src/fs/os/unix/mod.rs":"0b380eb8ff196d4882a6fafb46912ed0a0e435ca1d2440aa74968e177b0dd026","src/fs/os/unix/symlink.rs":"32cf3e906531d30ebe6d8be7ee3bfe049949759b566015b56d0851f51abcff50","src/fs/os/windows/mod.rs":"ca6b3a8a287fdeeff2468e29ae38822f3d0ac23e46b88246536dc85a46250335","src/fs/os/windows/symlink_dir.rs":"5fbd05365555ba7942ffc1c2dfddf201ddad2cf9b005be2ea99849a473fe982b","src/fs/os/windows/symlink_file.rs":"a1170fd40a000dc449de972267f579a0d14f50dbb39466f985f183fdcd1d3438","src/fs/read.rs":"d5b9261d4deba4e43fd6002c12eff7ddd813319c23634795f4551e3c3f89756b","src/fs/read_dir.rs":"251be63189e9d287597591fda447a498ffad56e522e1de226167592e84e0d66b","src/fs/read_link.rs":"93c104a21253372fef7056ab82e6065b0a1a8fc9be8b7329dfd5a8dd07c618b0","src/fs/read_to_string.rs":"7b4c1760bda91a294da19f2154c39a866a761a494af411258df5d29e6fe98601","src/fs/remove_dir.rs":"96475771e9c52678768288d8df6814216e0801bebc848481597ad34e829a5854","src/fs/remove_dir_all.rs":"fbd5499360545d101b4e8b18f08230133996228be6f5aa3d38b8156521378a70","src/fs/remove_file.rs":"1cdf8bf16b3a164c594dac8773b7d1f9ebb28de169343184d34d6aac3b3a7eaa","src/fs/rename.rs":"a97875e92626fa46e23fece7b8698c9c4cea2bae8f1be8726f30ae6fe80ae0c7","src/fs/set_permissions.rs":"353be1c55addde39ea078d86034416ea857c8278c9953fc6704306e57449756a","src/fs/symlink_metadata.rs":"763404b781d073fabc9ef8fabe8a9e3b596cad4cd77c99b2bb214a13d3888740","src/fs/write.rs":"062aa6c0c4862ca7071171f505182202b88d5e919806e61ef83b24a4a4bda36c","src/future/maybe_done.rs":"8098e69168044c2bbdba95a3a3da339fbb2d8f95edd9e74db3ae744a4b6c379f","src/future/mod.rs":"f83ace028a163a352e32ab38a8065886421179a9cd5edba0aece889af9a3145f","src/future/pending.rs":"2bfa584d25299f8d610cae8ebd652f1840dace5086b6d50c64400f4d8680ddd6","src/future/poll_fn.rs":"1ace3c3f0f392589d6454b56218c825cdbf11d72d95b170986782a413a929627","src/future/ready.rs":"56a939d2ad5c9925e073fb4286bf105327fcc9b387b998dcab14df0a50d2b14f","src/future/try_join.rs":"a12b2c22ef92b7887a7e30410a8947fa0f03e3bccf0c754a1f1e4e3c89e23599","src/io/async_buf_read.rs":"938e371350c6e077783cd76063f5fbaf750b2f3f7c7ff521b057e012a7b6f40c","src/io/async_read.rs":"130e3f1c13df0e19f74a0aa85739e250cc11428a0cea49b7f83a85f7ac689c71","src/io/async_seek.rs":"dd04411de11a1a10ac8295440cf31c1f5d6e50325e6676561b330cfc2113cf10","src/io/async_write.rs":"46408699ad19c0aa123d18130d3c875f5997e28558b912c2644cff47559cc1cb","src/io/blocking.rs":"30378c4d6806047ac0f34cb333f158c3dc147d65757bd4452a5fe580c6d9c3c0","src/io/driver/mod.rs":"81191ae2ee401359d6168190f87858cd5ead324a95fd062b53128e0f995d8f03","src/io/driver/platform.rs":"023acd3f2703d241b3e91ab6e4d4c0bc5ccc3451655fffd9f37938224f915494","src/io/driver/scheduled_io.rs":"f8270a5274314cf131cd2ac07b77cbd0a6f500f07a8de1248dbe7974d1be307b","src/io/mod.rs":"66d330ac7b6baef17c627d6ed7f271d6a7d5b8e7abdab63ec5b306b248d1f78c","src/io/poll_evented.rs":"0b12aef3a94256edaf9e7f369c95be1c731372b33dc021fd752cb99b160f82b5","src/io/registration.rs":"8538079a5772be35a87d03973871cc2ba872ff6090b4d57b6bcb2593770eb33f","src/io/seek.rs":"5a33c2d4ef28fbd8e409603222462da88a2c21c8d1419be3ac16c95aac1b2259","src/io/split.rs":"da44112c70436841d8ce04695ba61aec2c1fe41d449bb6c098f7d40c875bb32b","src/io/stderr.rs":"b9a5a87305740bc2220f2838168dda6ed10390a2f990ddcdc61eb79040547265","src/io/stdin.rs":"2f8ab17661b68e9e0be0bb0a8ff990ee5bd0eb51d0aac7466b773b96b0b6be6d","src/io/stdout.rs":"35e751837fd6311180d360b007e7cb77cf314118441347c1037aeca59bdbf53f","src/io/util/async_buf_read_ext.rs":"9ba5c10964369415cdb58891ee2a883faa31b62a56657c6dfea4226c8e06cbed","src/io/util/async_read_ext.rs":"da579c1deef3332f74e1ba7f354786cef4142458ae7b26c4a5804abc1cb3659e","src/io/util/async_seek_ext.rs":"feebd37564f45281b4e5ae0276d02ecdec1b874e82fd58b68a5cf91979e9d5f3","src/io/util/async_write_ext.rs":"a07d4435ca831c7cae97a13e1a2d81f5bc962380617048b3789fe6fc06039931","src/io/util/buf_reader.rs":"c39eeed7a4c8fcd6c2e88aaa828b33b9c77e06b82654288f03c5797e68ada7ff","src/io/util/buf_stream.rs":"405557d37265e4459fac7d57be9cc634fcf083dba9cc098436f916c750f53661","src/io/util/buf_writer.rs":"d31562223daac0c3992b5e1ee1ab247040410d99c046a83e15d06817b13d08ee","src/io/util/chain.rs":"bff98b85745c234c4f25abdf95ae951cbf82a400e672939941eae85a2f0b811e","src/io/util/copy.rs":"71d26e90117f79dac28dd68ef02ecab9a4b6f53fb76f6b596df4c5a5afb52f9c","src/io/util/empty.rs":"a1037dda7fd857ac28064f71d817e5a6b4583fcbd364172310446dba2d3cb3fe","src/io/util/flush.rs":"0c1385ca2f4b1320e78946234b5c2a73d4fbf8c50cfb54b6525de48d1cdd8531","src/io/util/lines.rs":"bb842e3c3601b096d35f55d175d68f6267c51fc2532f87dbb09435f3964d2a23","src/io/util/mod.rs":"e0a146eb248217408e72ef0bde4aa95fa105bf78b0d138a02a885ba8243a7098","src/io/util/read.rs":"0d79b5939801dd5ed54332432d55a965e5e99f898cb1955d30a36b12bd0cccda","src/io/util/read_buf.rs":"8c7e9b0837db44fa2265f8c8d9a3f68d4aec9dccc29b15e6d982d4bd334624b5","src/io/util/read_exact.rs":"6d58c50f2579abbe0a4b30a50f028542b7c9e0618c7d6e62f64b8e3096c53847","src/io/util/read_int.rs":"46a913e2c806a21c205a67b6428ad5fcde00593af14e1f70abd73fc108b51ce7","src/io/util/read_line.rs":"3bc67395b063ee7fdadccba81ba8e3118b6cd2b142afee06c609b5d8ab17709e","src/io/util/read_to_end.rs":"5b52e3e2f8b668e40e4f8abfd7f40efa09427dbfba652886977249848160b83e","src/io/util/read_to_string.rs":"86a05b50addb1f6a6d11ddc1fa9693fcc8baee365a48f9f2410cb70a04ce6d13","src/io/util/read_until.rs":"567e5e7c702895903ac3a0bb70caeafbcd286b869a06dfb7806a57909a99f47c","src/io/util/repeat.rs":"1b9a91fc5a9f2bdba24f3a1ece48f123eeb60e48fc713a5edc88b1698e920675","src/io/util/shutdown.rs":"31df78d39b21b6c4b92ee0d39f304a83a8f8a60cfa8f14de73e7a9b6c7918239","src/io/util/sink.rs":"0dcb794e48ca9b1c28e5f9f2051073ea0951a54c9c7dfc903ce9e5489d3d8cd7","src/io/util/split.rs":"945d7a3a2f678471fd1e8277279be7652770294fbf0c34a2e10bde038af61bae","src/io/util/stream_reader.rs":"853bdd3518eae4612231d68301a51f27abd8a23000a05ff0b87e2c087af415b2","src/io/util/take.rs":"836051a1976ef3aea336005b883c3c787059038e82b4291898af93e16de8e6cb","src/io/util/write.rs":"566fa287807cbb760660cb4979d975aa742673bc5a384df0b4420e753093b22c","src/io/util/write_all.rs":"ff9ea6e45ce6ee23507c8d01e746cf304fd6d3c96a9f4329501f7f06c17ce874","src/io/util/write_buf.rs":"401bf6e657dd36b3780417afb5188bd5eefa6095d38820152ac976b76f26002a","src/io/util/write_int.rs":"59634c4991685dff6e500ae9c02cf541ff3b2bb589cc2d2fada3ae6e84320bda","src/lib.rs":"008f6eaf2c2fc761497a90db834c2a9d4b5cce6d186722384d478493948de682","src/loom/mocked.rs":"7b0cc296fbbc42ffe5fec908cf75ded902dea3be7041f511f15c9a695a4de292","src/loom/mod.rs":"1e4d6445ed9c33495086dbfa640ab8626d86b71927e102c8aa97932c3635eb56","src/loom/std/atomic_ptr.rs":"68925e66a31f2040ed81d9e26358a4eec80c4764cc77003177a9c1f2b1df294a","src/loom/std/atomic_u16.rs":"70a016e77d99248ca7fc2b60352ff13ca9747b88fde15cfc30dcd03074aa7259","src/loom/std/atomic_u32.rs":"9fdc8d389f156510b43274b3230cbc6659aa7497f504cf51190c9400f43d981a","src/loom/std/atomic_u64.rs":"ebcc370af4116e4ad0a3bfb33f693cbc4ce5b254ff270eae26670d971e10cee2","src/loom/std/atomic_u8.rs":"07730a5eeea1034cbba4fcec0a7fcf5a91a40bdc9b5f68e9c12df7867707b564","src/loom/std/atomic_usize.rs":"cdaa249cfb53d42421bbfd3f491e294b24edfe55ee26ddf511935765f2d63c91","src/loom/std/mod.rs":"d5525e2b0e86c9018b9c883600cd8af2b41fc1b815185d58b114f44b93bd20e6","src/loom/std/sync/pl_wrappers.rs":"dbd3adc3cee887143670b8f1729a7cdd410037287b354ad13c702a8077fcf223","src/loom/std/unsafe_cell.rs":"5a153493edc71b28a2f6867349e5f9903b198bc2faf7ae61528f08b09bf9fe04","src/macros/cfg.rs":"5e6c5ee52e72e2ab1e954bf2552bf84a4699e8c7cf81cc06fea8bff41b19ab2a","src/macros/join.rs":"ab10af3f968d26ca82257c2cccf8efa9c4ea21077682660a2c49b88d45152580","src/macros/loom.rs":"80d2e4af9fc50d0bda1b20b95f8873b2f59c3c0e70f2e812a6207855df76204e","src/macros/mod.rs":"f46caf9b639449db666d8537932660179269284840214201017e84fce7b4eac2","src/macros/pin.rs":"680a87c043b176a9c48e24a25f5edafee3a7bb99f7dcf2b251f7bac2de293541","src/macros/ready.rs":"6efd4c866c4718c3a9a7b5564b435e2d13e9c1ae91fd98b1313d5e7c182942d6","src/macros/scoped_tls.rs":"f8ead3b64978b8ed3e9c8a73ab3151921296ebf6335af35edc29d4268ffb0a3a","src/macros/select.rs":"3f4bc371672235b9089b504d98e92e52a3fb4bf2f1dbc4b50f2796df9f193398","src/macros/support.rs":"b25ae54e6a21180d1c11b19899ad64f51e4f95026df443e1019b52708c4d9762","src/macros/thread_local.rs":"8602495ed102b63e3048a261eda7483dc9a24b15a74d7059c31635e8f45de19a","src/macros/try_join.rs":"8e87155b3ef4717783128b5af1c1d4fb6f883ed70df7adec1dce75f1ad5734b1","src/net/addr.rs":"555edd9f20be41f4a035cdb395b6170baf5321c9b6ccd52b50ebfb9324f8229c","src/net/lookup_host.rs":"178ec6165315702fce5aad5ef5e715959945cf33ca9d57c1f3fbc454694a8b05","src/net/mod.rs":"2e6a8af8e6dca7bb6bec13cc558fa2c63d93ce0c1437d750b193e250ab067016","src/net/tcp/incoming.rs":"aefda2a41273a833faec35e4e064f21bdf79811d269a5d419ad0d4984827eb7e","src/net/tcp/listener.rs":"e8b77b0eb1302e9696eef2ebcf1f3ebf99b8c5d25720180a5c839086bc0b591f","src/net/tcp/mod.rs":"28dc77696122eae60c70bfbc7d8abbc795b847d82f497b5a31ff4e396b0a50c9","src/net/tcp/split.rs":"33d207a82fd88fb450c2e2bc96f6fead26ffd969aaf4501a8141dfc941ba925e","src/net/tcp/stream.rs":"0a94895f9b3f8f450e2cfd212f784a6044236fd0b9ed125db56b146a35bcc46b","src/net/udp/mod.rs":"eecf3139b6a5f8cd8e02c5db18d4f7ce3cb005448e490b48f88c0e80e19c5ee0","src/net/udp/socket.rs":"82630bb7a47b088b7376b8870a5c83f75f6f0189bb645886575efe3848ad901a","src/net/udp/split.rs":"a63c51abd3a3dbd7fd1939815eb2e1d26205ce087018c598cf54aea448532951","src/net/unix/datagram.rs":"a3f0a00727cc40ea0b03816616de98b540d2b39dff610fa44223e06d2fb70ba2","src/net/unix/incoming.rs":"597cf15c347f4a48285cc20db5c2a82c46fd033e529c30378988ffd27b68616e","src/net/unix/listener.rs":"34d3230cd5eadf1a73e11f75eabf0cdbc6d49ed14ac0ebaf63663f94d599871b","src/net/unix/mod.rs":"15b1e40d34d0be75b378333c6954ae337dd40ab8e2ccf9355c062a8b77d5459e","src/net/unix/split.rs":"94beded00a3eae157a728259b9b4b89e373b24d64bec0d805c12f4a72a119ef9","src/net/unix/stream.rs":"015b64d039b178ce927910c16f117d3813714cf20123ed7c917ccd785ea1c4a3","src/net/unix/ucred.rs":"e5883de81e1078c0b7373a0f1394632082ba43c031fe9b8081a795194ec40d14","src/park/either.rs":"48411722555ee7584a941151c9a383bd45523b14a83526dc4c3d4d4bc374f5d7","src/park/mod.rs":"c46f06cecc24e948687cb6b633b069288766a5ee0d06df1c720942885ce9c889","src/park/thread.rs":"8dded924c590e05c6349fb51503382b8394f8c332bf96ce18cb7ebad3a342c69","src/prelude.rs":"939b8305ef2c1b74425b1a50c6f25b0a5bed14215fc296385777e60d42d6361d","src/process/kill.rs":"2f98bd1bd28ab37bedc34ab7b737760407ab5315420538acbd18da31d2662d94","src/process/mod.rs":"d74a287bfdbdeceaf427e50fd7953582eec59f7f4a839d4cfef0b337943c0fda","src/process/unix/mod.rs":"1c4e2c8c2bb0a34fb584ca3553f97d25ca3cee25adf7f6239efe38742e2fc22b","src/process/unix/orphan.rs":"54c137c389ec981d59975721d52b7e01cc51100522759fa0fb5bb9c252b937b9","src/process/unix/reap.rs":"9b1c7c5d2f50f7e7d2ce4f4ef9e662a01a577b63c45ba428fc4bd088ec92a75a","src/process/windows.rs":"1ed0999f5e7e3d8c081d55b1f1dd977bca6f20024c1a55de49d374c0dce25605","src/runtime/basic_scheduler.rs":"6cb20803ff59ac8f7b2aa1592febed64311e71e3cda9c5dbc6184e962058b268","src/runtime/blocking/mod.rs":"26e98c01023341fcab14d9eefd2212c80587430fd1fce1a396deea187fe60d1f","src/runtime/blocking/pool.rs":"a57a04e5cb9587665172140f3c4811a2cc7e2430d18c5ba0817f785c482d3569","src/runtime/blocking/schedule.rs":"e7e89a11210048d95292b5c963487393c88aa30f42c5b66f612114d382629aaa","src/runtime/blocking/shutdown.rs":"8350c80fccd40aaed83bcb92be1e495e431bbb2daaa285ac3fc243e0bca40d4a","src/runtime/blocking/task.rs":"0341b38cdb9015d14759b4c724949b37daca1def6c0128c55b0456b91fb5e3ff","src/runtime/builder.rs":"181f882a435973f86b04db6e9c2580e6f9ed4a7ed1047b841aa04b5d186614ba","src/runtime/context.rs":"37547c9238abb6a021d25e56db06e39a1d6d007627ca2b55dbb657bc649039f4","src/runtime/enter.rs":"f1b9579bdbb36fe2f0700013733f4617476df8fc365e3640e89f1b492125661e","src/runtime/handle.rs":"cb70ba21112c2b0dd2d375ee9ea6e22666df9af4c70f46fd4f88eb364d4b2acf","src/runtime/io.rs":"647031bab22ecc9db5eaacdd77d83748cd34f56f49040fe6280db6d90f6851ca","src/runtime/mod.rs":"d21d760d554a26e8f70e04e9efa1ff0417a2294e949c4f6a0cce2bb86a09c31a","src/runtime/park.rs":"e9aad15db4c77677ec4c2e8f8b3b51d726b31203cbcf29125968741855877ae2","src/runtime/queue.rs":"0fe349c5efaa038064fc65ccc67b376b31f4b6df5dd797cca754d25ad65a0da2","src/runtime/shell.rs":"0cfd943c7828d1f9c81c496766651150bcf94050bb828c9b0389adef2233a592","src/runtime/spawner.rs":"f5056374336bd21a10e0e4301f99125bd745fa0d8898c85cfd867e8e1e82999b","src/runtime/task/core.rs":"ee2ddfe828433bc56b580e4ac3a3d7b6cb7d06117603a9af6258616cc325860f","src/runtime/task/error.rs":"c50e7e0945ccd046803d745655566c8f5b83438b10168e01e9e972146941214b","src/runtime/task/harness.rs":"2fc02df67445871f28fa09eba2ea81241f2422f2d7033aaeca4e19bb4d8f011c","src/runtime/task/join.rs":"fe1d232b12715e716baba243414c26d53a0382327eb994e633c34c0af1fbe467","src/runtime/task/mod.rs":"b7c258ef949f2146714c20c2df75f90a04de7b82722a98cc7d99b15d917bd738","src/runtime/task/raw.rs":"8ca0d5e199a0c87ad8dbe5fe1ec7dca38ac18f5088f37e99587e752bb2945083","src/runtime/task/stack.rs":"6f1204e3f96f0b9375c53dce0f061dff7a91e9f111e0be24fe07a5490f6f1720","src/runtime/task/state.rs":"e7c9577e66990704e36f8ccad9b3db41a30cf42aaa74611d648541895fac5146","src/runtime/task/waker.rs":"06d937d78301589f7b6baccdb84bce1266edf0e97462719d8816fdcecb0ebb42","src/runtime/tests/loom_blocking.rs":"38ea6c220fab212a3436c682375e19bbbbf2a49bcd9aeb1adfa200fd205f82f3","src/runtime/tests/loom_oneshot.rs":"82e21a3ae98f937e64c5f5c7357537f60ba2738f312feed5e1c9ff727ee3ee39","src/runtime/tests/loom_pool.rs":"a42836021d5843be97cbd88bd9a8e561426004b15655391395bc54026f433e54","src/runtime/tests/loom_queue.rs":"022527b961e310b6c624a118b71c1f69b3b3cb7b179005b3ccf55b330b0c0a8e","src/runtime/tests/mod.rs":"8d73d4e756ee657bc0ee0f123a9ea0c007a42b9f1a96a521528ab14d51a8c1f1","src/runtime/tests/queue.rs":"4ebc75356991564bc083fbd7b6e823bcce2c9c6ba30bff13eeb1b8afd718de77","src/runtime/tests/task.rs":"a013950824ed3ad940d9a8929b3cf3f914115ed21393e16cc366a7fdd87ee907","src/runtime/thread_pool/atomic_cell.rs":"caa3b2262f68a46a8d9be7f3a2e52025b2595767fa6f8ff2b511504ac32cb246","src/runtime/thread_pool/idle.rs":"c8a7864b00c75baada20a8b57df732f23337428d958da8837951d76af577bb98","src/runtime/thread_pool/mod.rs":"030ba4331bdf00144bb64ff5cca4d8e6d4bec362f0013261c3bf7abfb4e703a3","src/runtime/thread_pool/worker.rs":"6ad71535235d3c6a142a74af7a6d3fc588e5ec28af712275c6591416e27f590d","src/runtime/time.rs":"ef51c298a438fadead903a97b9e508ea3013af8ca8198db95922ed787a19b5cc","src/signal/ctrl_c.rs":"d58397d8057c86408156a5f2e7322de2d99995aba54cdc185bcfff1a702f84e9","src/signal/mod.rs":"d2094a189aa9c7a4b450adaa78b5b6df5f73fb8f474fc3aec04b9338fcc60133","src/signal/registry.rs":"24ce538cf5393cfe34aaba46ab9c081f01803dc7431b9b5da0acab89fa7fb991","src/signal/unix.rs":"00f212b1851c022c18ca02fb784c3b932a18851f7211f9d51cf121fa715a7593","src/signal/windows.rs":"7cdebd8924aaa81ba98e047db08a1bca29f844974f2a1feb59a1ebaf3cbd0434","src/stream/all.rs":"124d4f3846e9a79dbc128c37f2180ea72e6d48ca760e22eb42bf835e31638fa8","src/stream/any.rs":"cc6638cc89c20969ff8df7cde21d5158c1d4f64135cddd56dcc178f7d56ccc8a","src/stream/chain.rs":"c0f775327bd92f6d636c7160d72738736c243a52b45cdb312deb4fe4b022e174","src/stream/collect.rs":"a0e24ad80bc3f3e8ba419a5df5ea7f93209f6eb0dac85f4d2be0e353821ba7a5","src/stream/empty.rs":"fdd02721f67bfb819971ce50d5bcf598a9bbf1dd265d308dc16cd0704ac3e7b8","src/stream/filter.rs":"2191e2c734ec97c9b70da380c808483d62e1099d7feccdb8449192a09dc72ac4","src/stream/filter_map.rs":"70934237d4acc3b21393ff73876dd6c28fb17d9a350c9827fe19261438970a7f","src/stream/fold.rs":"f81518d6f7058f249889fe107f7bf322436f28ff99317682f093ccb41094affe","src/stream/fuse.rs":"a374a9c268cea72a787dde18193f3420133a2acc80e9a242f766430ac72d06cc","src/stream/iter.rs":"512482ce3e54616e8f92d301fed98532b9e372733c86541b0a86e04d3fd1c853","src/stream/map.rs":"c8c84658bd00f1e5a32d2d7b2066f7b9c0f3a5ef76cfccc0a40afde3732b139c","src/stream/merge.rs":"66c9c624244039e6b7816cfd8b6b589ab37f82605177035d5967193d0643ef50","src/stream/mod.rs":"2c27e8ed384964043a0e02524ffd0b5577ff119d9f8b4905dc8f87443f59cdc2","src/stream/next.rs":"b2cadcfd467a8673e6de5023b56131ff23ee5508847c806b778cf48123664407","src/stream/once.rs":"f61e205581443c5f569137591244686fa551b9703aa36fad4219e4f2a8ccdfed","src/stream/pending.rs":"a518fdcb48c1edc911942fbab29fae3507dae355f8bef8f6da842c356891bfda","src/stream/skip.rs":"f3f54b1afc5d353f9480f3e0ad0272f82da458610f772c4becead03983d34b87","src/stream/skip_while.rs":"1a6d9332ff7e5d530e442a2661bd0a30a75674b7bb2c16fb5fdfdb671f42a1de","src/stream/stream_map.rs":"4fa77eebcb58cc79976dd9d04bd497f733b526736bd1e19d4a8a6130d3ee0291","src/stream/take.rs":"4bfbb36f50cc1b90ecc49494e063e113a366468e7daa7643781821668a8fb2ec","src/stream/take_while.rs":"35523b54651a31de22cabdffe09a4ce1cbb548ea121f76625744dad3c0366d97","src/stream/timeout.rs":"28e4d7a18cbf6291f9dedaeac47439db0d0756c3295307a145f494561ab53697","src/stream/try_next.rs":"2a26cd73d624bc936840f8cb2c894ac69dd47ae612f45bfcd47c36447aa1318f","src/sync/barrier.rs":"f5953c6117ec0b0813639d636e4a8e9ece87854f496711605e8dccc7aa7331b3","src/sync/batch_semaphore.rs":"967d65eeb7b8f99dec916cd5f2a5d3e19afde5f0ffb3156fccf1805d1c665be7","src/sync/broadcast.rs":"3f22bdd0903d2fc0a593b676b729b908d910304c44166ac49b78fb871f4d2987","src/sync/mod.rs":"85f1a28d2931e617edd1ce4aac7e33403d87f0acd641cd0142d7bc5d2750b3c9","src/sync/mpsc/block.rs":"afb7210cac1bbdc6940b5f64376692152c769989c40028d7ce5480ffbcda816e","src/sync/mpsc/bounded.rs":"a6abf8814b4bd222c54e29b27d4ed511c9376a416375fd3d652f460d41b471f1","src/sync/mpsc/chan.rs":"9b29da521b191cc3c31627896dad3196e172271aab35ec61c2146b57fb727c8e","src/sync/mpsc/error.rs":"74e5cb0f3d037b3cecd42c59260bca806a2fdee230e5af150e0fee95ead6b6ef","src/sync/mpsc/list.rs":"8a186b3f3cdfd39f4c9af7079104a0daf1bf08eb07dd638ecffc53494cf25db5","src/sync/mpsc/mod.rs":"2b959230525be1dea4c50c78ba2d0182457338fc9cbdeb9ac33ec77c249f171d","src/sync/mpsc/unbounded.rs":"dfad18114ee6029d65a1a9d510c6dc67a2ece6ec2d887c43e0524686f1dd3d79","src/sync/mutex.rs":"34faf060da9b74833c795d3fa86080d0b20161370f82ad18ad22cf6ac7c772da","src/sync/notify.rs":"6ec7e93d8ce4f1fb413aceb84c22ca1541074fc3e92a24cf69d68bf98a63bc58","src/sync/oneshot.rs":"df47a8dc96c29d653603f20ff082bfd1c9782302df96cc07c6733c039f1e5f02","src/sync/rwlock.rs":"9c270e79a141cf422f339fa8ef0516f1e37a9bc6c674e09d8e6101ae9652c411","src/sync/semaphore.rs":"b65b7f9a3d9c5306508764bb920206ff59e9f5a0e56371ab67dfb79ca24084e1","src/sync/semaphore_ll.rs":"d14428776a85fe857af6e44bfe7a7b9d27bb350afd1bccabbdcbf3f90eaf2ea3","src/sync/task/atomic_waker.rs":"ee1191ce709e650a7a9ae43cf5e2529e82013460c1a50fb34dd4d1d9522caf5d","src/sync/task/mod.rs":"f5e38105c7f8a942c0e49b973bad0a8c2a1df81deea19f3c5228edc4896c1725","src/sync/tests/atomic_waker.rs":"aa0184eaef7fb5098d9120c941c282e5400dcce8ad0880d3568644eff134ee94","src/sync/tests/loom_atomic_waker.rs":"d6c110bd8cc99989f1f4160cea929ee6faca695d0bac1a71d257898faaad5d27","src/sync/tests/loom_broadcast.rs":"f251c32d8f5c959707c36b29ab4b0bde518b45205d2e02512a3d2963ae452549","src/sync/tests/loom_list.rs":"f0ce15a0f965fe558a21bca24863c712156eaeb10feb8ef91031a6d6e3cc5dba","src/sync/tests/loom_mpsc.rs":"94a62f2c727b1dce8cbfbcd65303550fffcb5a302359b6f51cd475ba3f5d97b7","src/sync/tests/loom_notify.rs":"36d918142044b2e617330ab3d156d0c3f8f0398bf5d534683428e30f8115fec7","src/sync/tests/loom_oneshot.rs":"32583f9b711b79a74886a23241773819a473ba7e95abfb7d6531f8ddff18bbec","src/sync/tests/loom_rwlock.rs":"a1aca5bb8bdd073d8f8ae1dd921c95f301e6d4aa26eb31332c3c10a93fe2dbdf","src/sync/tests/loom_semaphore_batch.rs":"c6f69b8d5b2e6842287ed34638a9045095d9f94c86ba6bb84c1224bbe10026ff","src/sync/tests/loom_semaphore_ll.rs":"d56bc913f1bce3ee1254cea943dfb6b202ead552fd6fa69418e78cbcc36cfd9c","src/sync/tests/mod.rs":"582ba16eeb98db8ede7c52355c771de206d0c5a1690da5aaeea62353910c2391","src/sync/tests/semaphore_batch.rs":"24b6434fa9b70cfc7e77e079b8609f7c44b3856894479e9eab8c36c2e4326ead","src/sync/tests/semaphore_ll.rs":"9f00c8cc55ab788f63765e11f9d76f10a9a11186d8dd5cf585eaff42415e743c","src/sync/watch.rs":"6b7368c8794e646b06cc7de7df2daf23522b3b1f112858959594de368560f16a","src/task/blocking.rs":"bc6198e4aebb801d1e816be208bf54927a8b60f0f4961641cafa9ad8020365ae","src/task/local.rs":"0bcf136f4ef010d6c360d89f40b73d79865f7f6c74fd1a25bb7faa3677e24323","src/task/mod.rs":"41117cf8803340679aa07ff853c94456e41b2f6f6c373c052463f330e2dfcaaf","src/task/spawn.rs":"05a697fe15a9b8cac2149d875aacd5ae81d0bd334dae6c12e289c9304a1d4e0a","src/task/task_local.rs":"3fedce25b286a0c7359f272f8aaee3409d517442dede604b6cfd1da5272d1313","src/task/yield_now.rs":"d8cb414972c7e867ce44a854dfd799e965868390c63e003616e4b132b38211a9","src/time/clock.rs":"9ef10d3a0696b3b57094c8b462322f47f4864d1fca53794fa12c871c9f891478","src/time/delay.rs":"68634bb8cc82d5403c1f8e02fa9eabc29cd0e502cf5339d5b469d246de5977ff","src/time/delay_queue.rs":"2bbf02ebfe88c3b789b87dafda6824e51775632c40f43ef74890f3ff54387232","src/time/driver/atomic_stack.rs":"3e56ab77ad2ec7c9cc6bbc6e9b3ed108731a80c6828e99bfc5244feed228cc5e","src/time/driver/entry.rs":"352ba5ce477829967760819b1fcd9af4f4934969992dbdf386b70fcf104e4d1a","src/time/driver/handle.rs":"eac61ad62c42fe62462cd9876ebcf4c56db80ed451da96e3243946c7e43457e0","src/time/driver/mod.rs":"662081b388fe694d7a9b5159030e2a9bbed9357989a4833912c8270be60c9f6d","src/time/driver/registration.rs":"82b04bb501ecf9728534efc1ae79fef9f4712fb8eae13a3d9160bc061c8deea4","src/time/driver/stack.rs":"ebfeaa2bc3fbb2ebd0dea0b0e7faccce2786e79a48c81e45809a29142e150260","src/time/driver/tests/mod.rs":"26da5216d385f3d4bedfb0228e698f3a41a92c2f58dec0e371077e6bfc73fd7c","src/time/error.rs":"7615d635036fe9e18d3058bff3b1b95792bcf1b0a35bb6769ec6072f1ba1603f","src/time/instant.rs":"406116d79117135b9717f71d585e3e92139d615d5cd03071beaa9dc14ac913ea","src/time/interval.rs":"52e779ac647b6bb1fee970c367a6390d8f2763258d17c487b37f002852345adf","src/time/mod.rs":"d552fdcf70e8f2b867f78e463a061dc4b21fb3c69b7fe58f90a45a06a80236b4","src/time/tests/mod.rs":"bcbf2373c1b950da5b1418b7e2100f6df492c081cde606e132edae5b22ef37a8","src/time/tests/test_delay.rs":"818e87d5761a360f224deebc70f32dde3bc4787e30aeb612f27351d5a56060ee","src/time/throttle.rs":"4d961fd9304ee4345ac7add488aef7f44fd3910e39e01ee2bbc1ea33ce25a6c4","src/time/timeout.rs":"275936a47f3a074ab036bd6ce8e65965cac721d2214650044693c6e50d689cfb","src/time/wheel/level.rs":"bfa1096cc91b9fa7c58328179194d606c1cc850f953a3ea344f8517c4cb25d72","src/time/wheel/mod.rs":"e49465f9ffbfa61283ce6eb766cf70edc62721cb033d8b98915e61d3814dd316","src/time/wheel/stack.rs":"ac5fd0f6cac1bfdd9497b6eaa4f64beb816bc5605ecae463bd10a5e4c48e78c0","src/util/bit.rs":"df2987358940db917437ac5269d38dce75837cdaae3b5f124cb7cf5fbd806fa8","src/util/linked_list.rs":"5d4023dfbe9410190c7176b66883550813bcf299d146c51646631f4c5974233b","src/util/mod.rs":"f706c2bf08edb1d642cee060b7af5ced7f4abcfc03c6d29ca82fe5ab3ec907d1","src/util/pad.rs":"5dc99dbb3d3d16fecd6228fd2e2f67e5301b2d426e6149f79f93c1af1b4d1d90","src/util/rand.rs":"df99cdedfdc191b9b138c0b8f2bf7b200da09e646f7977706b918593d7f08335","src/util/slab/addr.rs":"1df854b698a1124cad1111113900755ebd8ccdfc282558ff85dec66403cb69d4","src/util/slab/entry.rs":"718e2a7e19c78fe5693376d9152bb16435e5cb898e5191aa7265646e2f19a1d6","src/util/slab/generation.rs":"4cbe46a38c41a3c670ebef7d18862ab00a03b36a2c2597467e68cfa7af5a2374","src/util/slab/mod.rs":"27f921fc35aab54fc9affe7093a0bcace4763c907d105a0d3cfad3961f8aa6ff","src/util/slab/page.rs":"13ca549b1ee909d0dd575fbd3981b1a639663f2f43b3867fd790cdc0b57c459c","src/util/slab/shard.rs":"491db737b4033ae21a3f44d2ed437f4a2aa9f1fd52df3999acb0d27a6b06cba8","src/util/slab/slot.rs":"49e83978919c7fdb9a6fe3cdcd03e83114de6e926c409046b279a2ee95fa3d11","src/util/slab/stack.rs":"82e3c8bd907ae25d05a51a638c48cb2a0cf39312198c5c32498fbcb325654e06","src/util/slab/tests/loom_slab.rs":"f8c787e4ea2976889a355b91c9e594dbacbcecc26dfdda94767e927dfefed6c7","src/util/slab/tests/loom_stack.rs":"d29db751ba9c48908ded814ef957456a054988136b07cd26c97f62d3ba5745e6","src/util/slab/tests/mod.rs":"0423b219f919b8ad7f0c9b6978aaebd7d01139063d68445c8383ddca011cec3f","src/util/try_lock.rs":"c4ee49e1751ee0a7df1a8cbd4f8d36ea1d7355e3ac584fdb8697a94cd7a7a8f8","src/util/wake.rs":"3bdc5724211c852ed19a9ac1676ac8e12dd46381fa5bcc899cd5ecdc33528a79","tests/_require_full.rs":"a7d828d85aee8507a0292fe74ced5f4e75b0aaf3a2789cf0bddd276e9fa12dca","tests/async_send_sync.rs":"6f93c020af4abfdafd095a769419277978032c32ac7a0b60089629aa1af5c489","tests/buffered.rs":"001ffebf7a37cfc718a6e73838e6199000ff710ce387def7f4407b733998bf4e","tests/fs.rs":"b4902aaff2c28ef4d2676462381b04559fb4f7cdc0ecf46c46bccbb6276feb5d","tests/fs_copy.rs":"83448b19bdc332ec315024d4903b0a2ae81221895725a8b750025b47a43b0e79","tests/fs_dir.rs":"429b0903d8fd9c034d523135f337c0be64c9283d80313ea21499a629aba0aa61","tests/fs_file.rs":"9804b09f7c824f58ad3f06db7c769e8f0808d297ac8d78f68b32503eb40b3036","tests/fs_file_mocked.rs":"56bfca35f71f61432d607a71b1b817fcaf0ca76a725f391e86278161c33ed83f","tests/fs_link.rs":"f4cc85530965d97916073fbcaa81c65832b9a29ac4261e086546a11a89364052","tests/io_async_read.rs":"5dfbbb40cd9fb20a8b7af60a62126d34a218062f8f6117b2dc1282b6e2fb2538","tests/io_chain.rs":"f5d3ddc9f6e8152ceb08b5dda2ca3168b174f1f67ff28a4c5983bcbad69d8af6","tests/io_copy.rs":"6b94cd77e6034f39865c93611d65870a70dbae9d3b8e33253095b9d5d16410ad","tests/io_driver.rs":"e945fd55ece9fe5d3c2ef695d151e9799fee645f18b8eddde72c2a9d3336eb3c","tests/io_driver_drop.rs":"168417ec9f256d2604eb022a73a64a88ab5b6fe8d0f4be1d3460dc41d5083571","tests/io_lines.rs":"6660dfaf3d789ed383be6b54bd19c7fcc8a6b58cf685ec17a0d9588333a5099a","tests/io_read.rs":"900e56103d816575bf5647188823bf2ca66e7c9cad29154a9f0b3e9c280c611b","tests/io_read_exact.rs":"b6387dbeb0baceb7a1f74a9a3a8b4a654894465368be27c3bbf4352b79fc4314","tests/io_read_line.rs":"db4ed7dde08b8fb7c839cb3a5d711c053eb74ba7905a93f336dcdccf9a43636c","tests/io_read_to_end.rs":"7d50b76452c84822650225095d2cc83c8a162973470418819ae89268056c8523","tests/io_read_to_string.rs":"4a19e1696f535adb4bf208f84a690c61ca0045db5bb9d0eb1f4752da99dd2986","tests/io_read_until.rs":"39b5f147d276f5df80fcbe66a211a3a50cf3e467dcd295a753f654ea438d3f1f","tests/io_split.rs":"35b3de189ff171d69715fc929b74be0f181cb6dbe4b2392272ff5172e012548c","tests/io_take.rs":"8f4bfc9182539335704b6370a66998ef2a75f508fcdb73a7f8aa50baf0f4aea6","tests/io_write.rs":"98668a8c8feae0f85714df1dfecfcd94fba4ba347bdc3d8aaa4ea8b175055c69","tests/io_write_all.rs":"e171af1ecab45a439b384c3bae7198959c3f5e2e998967dbd9296760b52951b7","tests/io_write_int.rs":"3f4b50345f7d7d558e71ac7f2a8c1c4b7b771dad09fe2e1fbf9a17d4fb93c001","tests/macros_join.rs":"609c8e45d4152404b724c9dc448675c2a51f89619bbfbca9e82a7ef6f517c4ed","tests/macros_pin.rs":"572d65b3894858ad8c2491b6a5f8ffdb3b37ec71d2996831b2ad929c4e47d067","tests/macros_select.rs":"48e3fb1bcac8bf1e876b18b4a7d933f4e5949c3239ff2fdd2b054c475316962c","tests/macros_try_join.rs":"37e354a7802946bdf1a77d43f27aaba9767b71bebfa3b1e40a25ae4c84ce8a61","tests/net_bind_resource.rs":"3abdf9457ebc9f8262c03fa5834f1ceb6312d4a1573b6bdd4e2f584e3cf76b66","tests/net_lookup_host.rs":"fa43c0127277dd0cf750b17157fdc1d438a8009dd890458f9c566b3a1302a461","tests/no_rt.rs":"3f5be6964c08f850b565e9d14ca698395e72d607da69f34aa1ea1d127f5b5f42","tests/process_issue_2174.rs":"2ca9267533ec563df9b8a9572b21f557d38e1ec95a5c4ab8a0c77269b89a832a","tests/process_issue_42.rs":"b2a491c9093f67df000fcaf550ad06a5b09247a61255c7e708418ad7bb0500fa","tests/process_kill_on_drop.rs":"f3c67649fdf69ed83a81906b93774f5c529f2196b4f8b781870f51d5d9160223","tests/process_smoke.rs":"032ff4ab64ffd36da41a453d1d22ecd5dc9f2294048cdf94c62f015e22ce608e","tests/rt_basic.rs":"7de10f04ebc59d48eee1f667d6936cabc89c2969116e081cbcb983a0741605e2","tests/rt_common.rs":"6ad4e73d82cc078878c62a680fcb2230f1453196699b970a844b30cb8206e820","tests/rt_threaded.rs":"6ab471949c158b5de1dbbcdfaf14c5494c9c9fe03a18e9dce902c16f1ca13927","tests/signal_ctrl_c.rs":"9b53065781b37f3db5f7c67938239b0f3b0ebbc5938c14a5b730ad7ec07415d2","tests/signal_drop_recv.rs":"d1ec97213d9c6fd9fb25ea8c2b015c9e9ee1a62fe0853fc558bc8801e5a3a841","tests/signal_drop_rt.rs":"afd272df50241c16c72d0e3cbd26a6d90e94420ceb90314008ee5fd53d95da2a","tests/signal_drop_signal.rs":"041940550863250f359630dc67ef133874d809ddaf0a6c1238cee1565a19efec","tests/signal_multi_rt.rs":"52a964ebb7963f8b84816c2e8019056901abf7cb42a9f9c612611d8327091f05","tests/signal_no_rt.rs":"99714bf488a26b6b394d93e61639c4b6807f9d756c8d5836f31111a30d42609b","tests/signal_notify_both.rs":"bf0b9def20f530d146ee865305833d8e9bee07a0515e66573d7ff30e2c631123","tests/signal_twice.rs":"bce33093eed151955d13c334d6d8a5bc5ca67cf5b37c246e435a24c15bc166a0","tests/signal_usr1.rs":"86ad07594b09d35e71011d1e12a1fa2c477bfbc4a2a36df1421b6594a0930074","tests/stream_chain.rs":"fead3499b449a92efccb92e2c38f7e8ea9f54c3103a69a27fd3f3872fb2c0bd4","tests/stream_collect.rs":"27edfaea1c757dd29e53c253f639fe6d78244042721e1a571bb4e88c6b35ea28","tests/stream_empty.rs":"71c9e7f6040f128341b49a39c08b9616afc6137e764de2f6ab5b4d8f2c601873","tests/stream_fuse.rs":"13af891fa873ec942d8c87decad9419ed5f22a7903de6af1382e8be1dc1a0df6","tests/stream_iter.rs":"621f6b99e39e1e1c42220b04862b7d24584deb251e4de163d1f429a69873b0b9","tests/stream_merge.rs":"199219bc9736c1272588c8969233d915c963c7d8ef50a555fff6e40dae96fa00","tests/stream_once.rs":"58fcd541bfa4ae6d4df1b83c6aa1f303cf3b7410a5fdc7a7049d4682edb9d7d3","tests/stream_pending.rs":"2c1e798dfd15f69b7fd2518999473f835fab77d36c504fee573f23ca65351522","tests/stream_reader.rs":"2d5a02f00e10804452016455f56e6321bbfe7f0c5ee6179aad592c8e17db6b8a","tests/stream_stream_map.rs":"0c6ec9b18965f756ee684cd1fd679deda0302b610ad5f9c1baace1ef32599e80","tests/stream_timeout.rs":"69d440f463c4031a9447409ba72839f1ff826679696dfbfdd863616020ca0894","tests/support/mock_file.rs":"9373a85e4dee4c5aa380e8a499402db357e92a5783c82a2d3ac348afbdc78abb","tests/support/mock_pool.rs":"1ddbf09d10787c5e413880322b70ec5d01ed4ee4b866c14a888d3cabc0275ea5","tests/support/signal.rs":"83531afa2e8e71cfd90cd4e1fc821490ffa824f0f9f0c9c4a027c08fed6b8712","tests/sync_barrier.rs":"7771f9e75ecf24d1a8ff0a8731a6bfced34cc129aba0e6f7b9b73d6a1d6df999","tests/sync_broadcast.rs":"40388d29cd18167b0764f4ca4d63ba2b1fc64832447db5cdcb9a9b68e6d2ed65","tests/sync_errors.rs":"d75d041f946ef8b0a726f4fcb3da29d7248485b5a28f0fd68adcffadd8852549","tests/sync_mpsc.rs":"f216ded9d509222bfd403306c565b81ecc25b07528f16b06b56050d6009019d0","tests/sync_mutex.rs":"0ad4cef283ef47fe938609a8142bfbb21547205cb064494b74ed4a6dd0c21a74","tests/sync_notify.rs":"ea0da778622c8105dbcdefc709984f61fd36e54f39e3c80d9eaedcf036ef3086","tests/sync_oneshot.rs":"da46ed4514aa465e832c12dc00223e2da6a2eec7d0b3bfa7d1186f4d4741b627","tests/sync_rwlock.rs":"1a54885310a87a2f7b2408732530adb0ead97775ebeea336b614ba3960e9e6ed","tests/sync_semaphore.rs":"a86b2839571490b8a83db2fc727ecfe85c8c1a11baee92e7f212ff128f73400c","tests/sync_watch.rs":"51a84de8da804e883ddc97ccbea222168be1dcd9e0a54263839a5b7850dfd050","tests/task_blocking.rs":"9291d0a080f937ddae37fcc25ac9aeb507e937db22117e418d3820c951c15cb7","tests/task_local.rs":"09ecfac3dab2d92d443f0267aebbd484c99474617174459621692185ad366d6d","tests/task_local_set.rs":"2abe0474caba4e6b67d90e6e369c31625b6ca3559f3dde763fdf0950b54e4298","tests/tcp_accept.rs":"043372df97c322d87974928434cf7a2b99ad423b8e2716deb6b7723dd0b5c222","tests/tcp_connect.rs":"bcdefff6cec1f8accca910c27af6bf999265c10629287678f8a89ce08f203b05","tests/tcp_echo.rs":"4943257ba23c326a52b4f6f984259b471165d07a302c4dd92ccc90d2694e78a3","tests/tcp_peek.rs":"ea904d05f9684e6108a698bdbbd856c9849c1a51eb334cf5bd45ef74c8fe585c","tests/tcp_shutdown.rs":"48c626375cb25c77cda4f0893f066348ee000969f0e5fd4e6adf2b56619c4dea","tests/tcp_split.rs":"39c18c414ac25b45e4be7bcee754c4007a869c68bf14cc9d9b1ef8542ee78a2b","tests/test_clock.rs":"d5c9bf7bb5d926e2b29b43e47b5bb051b0f761bfe44d5fef349ed442ea7b416f","tests/time_delay.rs":"3375f76a6bd856d3aae8772732cd86d59ef0ba20318849dad8e2482f9d4ca294","tests/time_delay_queue.rs":"9ee02154378cf29ca3b5682213c60e5ad1e9c1ad8c21f1af37430338c4fca170","tests/time_interval.rs":"71e262819ff32948e7131ae2a945b2dbf053a5a3ae3c7f30a1cf8f2bc4db843a","tests/time_rt.rs":"730728a3206d5f8ed28dd656bd225dcadda478f5ae773e789336814dca638845","tests/time_throttle.rs":"de3e7374076ab3c05d3deb1af10911f236e39537a254f78ccbcb542bd7b998b1","tests/time_timeout.rs":"cb0bc0f4312040fa0d8f10a12bfca081355e22cb6e490577f942a8de4aed43b8","tests/udp.rs":"6509c46ddd398db4f52d43e75757580dc24cf4eb0d638c26aea2ed8e36da122c","tests/uds_cred.rs":"e57a552dd9e70e9ad71b930f6d130e6df939b48b899384f7434cfb7a2539713f","tests/uds_datagram.rs":"6daceccb2bc72f0cbf15c05b9e2455079086a990e0221b6b2edeb6d17c104e5f","tests/uds_split.rs":"9b7cb3b0fde549279133367003606485058b9f8505d7f956eb26e7792dc32402","tests/uds_stream.rs":"af789c0198ee71749178c5784cf21762c1324f2b1888449da0d15e846571b8d7"},"package":"34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713"} \ No newline at end of file
diff --git a/third_party/rust/tokio/CHANGELOG.md b/third_party/rust/tokio/CHANGELOG.md
new file mode 100644
index 0000000000..84e4486dff
--- /dev/null
+++ b/third_party/rust/tokio/CHANGELOG.md
@@ -0,0 +1,409 @@
+# 0.2.18 (April 12, 2020)
+
+### Fixes
+- task: `LocalSet` was incorrectly marked as `Send` (#2398)
+- io: correctly report `WriteZero` failure in `write_int` (#2334)
+
+# 0.2.17 (April 9, 2020)
+
+### Fixes
+- rt: bug in work-stealing queue (#2387)
+
+### Changes
+- rt: threadpool uses logical CPU count instead of physical by default (#2391)
+
+# 0.2.16 (April 3, 2020)
+
+### Fixes
+
+- sync: fix a regression where `Mutex`, `Semaphore`, and `RwLock` futures no
+ longer implement `Sync` (#2375)
+- fs: fix `fs::copy` not copying file permissions (#2354)
+
+### Added
+
+- time: added `deadline` method to `delay_queue::Expired` (#2300)
+- io: added `StreamReader` (#2052)
+
+# 0.2.15 (April 2, 2020)
+
+### Fixes
+
+- rt: fix queue regression (#2362).
+
+### Added
+
+- sync: Add disarm to `mpsc::Sender` (#2358).
+
+# 0.2.14 (April 1, 2020)
+
+### Fixes
+- rt: concurrency bug in scheduler (#2273).
+- rt: concurrency bug with shell runtime (#2333).
+- test-util: correct pause/resume of time (#2253).
+- time: `DelayQueue` correct wakeup after `insert` (#2285).
+
+### Added
+- io: impl `RawFd`, `AsRawHandle` for std io types (#2335).
+- rt: automatic cooperative task yielding (#2160, #2343, #2349).
+- sync: `RwLock::into_inner` (#2321).
+
+### Changed
+- sync: semaphore, mutex internals rewritten to avoid allocations (#2325).
+
+# 0.2.13 (February 28, 2020)
+
+### Fixes
+- macros: unresolved import in `pin!` (#2281).
+
+# 0.2.12 (February 27, 2020)
+
+### Fixes
+- net: `UnixStream::poll_shutdown` should call `shutdown(Write)` (#2245).
+- process: Wake up read and write on `EPOLLERR` (#2218).
+- rt: potential deadlock when using `block_in_place` and shutting down the
+ runtime (#2119).
+- rt: only detect number of CPUs if `core_threads` not specified (#2238).
+- sync: reduce `watch::Receiver` struct size (#2191).
+- time: succeed when setting delay of `$MAX-1` (#2184).
+- time: avoid having to poll `DelayQueue` after inserting new delay (#2217).
+
+### Added
+- macros: `pin!` variant that assigns to identifier and pins (#2274).
+- net: impl `Stream` for `Listener` types (#2275).
+- rt: `Runtime::shutdown_timeout` waits for runtime to shutdown for specified
+ duration (#2186).
+- stream: `StreamMap` merges streams and can insert / remove streams at
+ runtime (#2185).
+- stream: `StreamExt::skip()` skips a fixed number of items (#2204).
+- stream: `StreamExt::skip_while()` skips items based on a predicate (#2205).
+- sync: `Notify` provides basic `async` / `await` task notification (#2210).
+- sync: `Mutex::into_inner` retrieves guarded data (#2250).
+- sync: `mpsc::Sender::send_timeout` sends, waiting for up to specified duration
+ for channel capacity (#2227).
+- time: impl `Ord` and `Hash` for `Instant` (#2239).
+
+# 0.2.11 (January 27, 2020)
+
+### Fixes
+- docs: misc fixes and tweaks (#2155, #2103, #2027, #2167, #2175).
+- macros: handle generics in `#[tokio::main]` method (#2177).
+- sync: `broadcast` potential lost notifications (#2135).
+- rt: improve "no runtime" panic messages (#2145).
+
+### Added
+- optional support for using `parking_lot` internally (#2164).
+- fs: `fs::copy`, an async version of `std::fs::copy` (#2079).
+- macros: `select!` waits for the first branch to complete (#2152).
+- macros: `join!` waits for all branches to complete (#2158).
+- macros: `try_join!` waits for all branches to complete or the first error (#2169).
+- macros: `pin!` pins a value to the stack (#2163).
+- net: `ReadHalf::poll()` and `ReadHalf::poll_peak` (#2151)
+- stream: `StreamExt::timeout()` sets a per-item max duration (#2149).
+- stream: `StreamExt::fold()` applies a function, producing a single value. (#2122).
+- sync: impl `Eq`, `PartialEq` for `oneshot::RecvError` (#2168).
+- task: methods for inspecting the `JoinError` cause (#2051).
+
+# 0.2.10 (January 21, 2020)
+
+### Fixes
+- `#[tokio::main]` when `rt-core` feature flag is not enabled (#2139).
+- remove `AsyncBufRead` from `BufStream` impl block (#2108).
+- potential undefined behavior when implementing `AsyncRead` incorrectly (#2030).
+
+### Added
+- `BufStream::with_capacity` (#2125).
+- impl `From` and `Default` for `RwLock` (#2089).
+- `io::ReadHalf::is_pair_of` checks if provided `WriteHalf` is for the same
+ underlying object (#1762, #2144).
+- `runtime::Handle::try_current()` returns a handle to the current runtime (#2118).
+- `stream::empty()` returns an immediately ready empty stream (#2092).
+- `stream::once(val)` returns a stream that yields a single value: `val` (#2094).
+- `stream::pending()` returns a stream that never becomes ready (#2092).
+- `StreamExt::chain()` sequences a second stream after the first completes (#2093).
+- `StreamExt::collect()` transform a stream into a collection (#2109).
+- `StreamExt::fuse` ends the stream after the first `None` (#2085).
+- `StreamExt::merge` combines two streams, yielding values as they become ready (#2091).
+- Task-local storage (#2126).
+
+# 0.2.9 (January 9, 2020)
+
+### Fixes
+- `AsyncSeek` impl for `File` (#1986).
+- rt: shutdown deadlock in `threaded_scheduler` (#2074, #2082).
+- rt: memory ordering when dropping `JoinHandle` (#2044).
+- docs: misc API documentation fixes and improvements.
+
+# 0.2.8 (January 7, 2020)
+
+### Fixes
+- depend on new version of `tokio-macros`.
+
+# 0.2.7 (January 7, 2020)
+
+### Fixes
+- potential deadlock when dropping `basic_scheduler` Runtime.
+- calling `spawn_blocking` from within a `spawn_blocking` (#2006).
+- storing a `Runtime` instance in a thread-local (#2011).
+- miscellaneous documentation fixes.
+- rt: fix `Waker::will_wake` to return true when tasks match (#2045).
+- test-util: `time::advance` runs pending tasks before changing the time (#2059).
+
+### Added
+- `net::lookup_host` maps a `T: ToSocketAddrs` to a stream of `SocketAddrs` (#1870).
+- `process::Child` fields are made public to match `std` (#2014).
+- impl `Stream` for `sync::broadcast::Receiver` (#2012).
+- `sync::RwLock` provides an asynchonous read-write lock (#1699).
+- `runtime::Handle::current` returns the handle for the current runtime (#2040).
+- `StreamExt::filter` filters stream values according to a predicate (#2001).
+- `StreamExt::filter_map` simultaneously filter and map stream values (#2001).
+- `StreamExt::try_next` convenience for streams of `Result<T, E>` (#2005).
+- `StreamExt::take` limits a stream to a specified number of values (#2025).
+- `StreamExt::take_while` limits a stream based on a predicate (#2029).
+- `StreamExt::all` tests if every element of the stream matches a predicate (#2035).
+- `StreamExt::any` tests if any element of the stream matches a predicate (#2034).
+- `task::LocalSet.await` runs spawned tasks until the set is idle (#1971).
+- `time::DelayQueue::len` returns the number entries in the queue (#1755).
+- expose runtime options from the `#[tokio::main]` and `#[tokio::test]` (#2022).
+
+# 0.2.6 (December 19, 2019)
+
+### Fixes
+- `fs::File::seek` API regression (#1991).
+
+# 0.2.5 (December 18, 2019)
+
+### Added
+- `io::AsyncSeek` trait (#1924).
+- `Mutex::try_lock` (#1939)
+- `mpsc::Receiver::try_recv` and `mpsc::UnboundedReceiver::try_recv` (#1939).
+- `writev` support for `TcpStream` (#1956).
+- `time::throttle` for throttling streams (#1949).
+- implement `Stream` for `time::DelayQueue` (#1975).
+- `sync::broadcast` provides a fan-out channel (#1943).
+- `sync::Semaphore` provides an async semaphore (#1973).
+- `stream::StreamExt` provides stream utilities (#1962).
+
+### Fixes
+- deadlock risk while shutting down the runtime (#1972).
+- panic while shutting down the runtime (#1978).
+- `sync::MutexGuard` debug output (#1961).
+- misc doc improvements (#1933, #1934, #1940, #1942).
+
+### Changes
+- runtime threads are configured with `runtime::Builder::core_threads` and
+ `runtime::Builder::max_threads`. `runtime::Builder::num_threads` is
+ deprecated (#1977).
+
+# 0.2.4 (December 6, 2019)
+
+### Fixes
+- `sync::Mutex` deadlock when `lock()` future is dropped early (#1898).
+
+# 0.2.3 (December 6, 2019)
+
+### Added
+- read / write integers using `AsyncReadExt` and `AsyncWriteExt` (#1863).
+- `read_buf` / `write_buf` for reading / writing `Buf` / `BufMut` (#1881).
+- `TcpStream::poll_peek` - pollable API for performing TCP peek (#1864).
+- `sync::oneshot::error::TryRecvError` provides variants to detect the error
+ kind (#1874).
+- `LocalSet::block_on` accepts `!'static` task (#1882).
+- `task::JoinError` is now `Sync` (#1888).
+- impl conversions between `tokio::time::Instant` and
+ `std::time::Instant` (#1904).
+
+### Fixes
+- calling `spawn_blocking` after runtime shutdown (#1875).
+- `LocalSet` drop inifinite loop (#1892).
+- `LocalSet` hang under load (#1905).
+- improved documentation (#1865, #1866, #1868, #1874, #1876, #1911).
+
+# 0.2.2 (November 29, 2019)
+
+### Fixes
+- scheduling with `basic_scheduler` (#1861).
+- update `spawn` panic message to specify that a task scheduler is required (#1839).
+- API docs example for `runtime::Builder` to include a task scheduler (#1841).
+- general documentation (#1834).
+- building on illumos/solaris (#1772).
+- panic when dropping `LocalSet` (#1843).
+- API docs mention the required Cargo features for `Builder::{basic, threaded}_scheduler` (#1858).
+
+### Added
+- impl `Stream` for `signal::unix::Signal` (#1849).
+- API docs for platform specific behavior of `signal::ctrl_c` and `signal::unix::Signal` (#1854).
+- API docs for `signal::unix::Signal::{recv, poll_recv}` and `signal::windows::CtrlBreak::{recv, poll_recv}` (#1854).
+- `File::into_std` and `File::try_into_std` methods (#1856).
+
+# 0.2.1 (November 26, 2019)
+
+### Fixes
+- API docs for `TcpListener::incoming`, `UnixListener::incoming` (#1831).
+
+### Added
+- `tokio::task::LocalSet` provides a strategy for spawning `!Send` tasks (#1733).
+- export `tokio::time::Elapsed` (#1826).
+- impl `AsRawFd`, `AsRawHandle` for `tokio::fs::File` (#1827).
+
+# 0.2.0 (November 26, 2019)
+
+A major breaking change. Most implementation and APIs have changed one way or
+another. This changelog entry contains a highlight
+
+### Changed
+- APIs are updated to use `async / await`.
+- most `tokio-*` crates are collapsed into this crate.
+- Scheduler is rewritten.
+- `tokio::spawn` returns a `JoinHandle`.
+- A single I/O / timer is used per runtime.
+- I/O driver uses a concurrent slab for allocating state.
+- components are made available via feature flag.
+- Use `bytes` 0.5
+- `tokio::codec` is moved to `tokio-util`.
+
+### Removed
+- Standalone `timer` and `net` drivers are removed, use `Runtime` instead
+- `current_thread` runtime is removed, use `tokio::runtime::Runtime` with
+ `basic_scheduler` instead.
+
+# 0.1.21 (May 30, 2019)
+
+### Changed
+- Bump `tokio-trace-core` version to 0.2 (#1111).
+
+# 0.1.20 (May 14, 2019)
+
+### Added
+- `tokio::runtime::Builder::panic_handler` allows configuring handling
+ panics on the runtime (#1055).
+
+# 0.1.19 (April 22, 2019)
+
+### Added
+- Re-export `tokio::sync::Mutex` primitive (#964).
+
+# 0.1.18 (March 22, 2019)
+
+### Added
+- `TypedExecutor` re-export and implementations (#993).
+
+# 0.1.17 (March 13, 2019)
+
+### Added
+- Propagate trace subscriber in the runtime (#966).
+
+# 0.1.16 (March 1, 2019)
+
+### Fixed
+- async-await: track latest nightly changes (#940).
+
+### Added
+- `sync::Watch`, a single value broadcast channel (#922).
+- Async equivalent of read / write file helpers being added to `std` (#896).
+
+# 0.1.15 (January 24, 2019)
+
+### Added
+- Re-export tokio-sync APIs (#839).
+- Stream enumerate combinator (#832).
+
+# 0.1.14 (January 6, 2019)
+
+* Use feature flags to break up the crate, allowing users to pick & choose
+ components (#808).
+* Export `UnixDatagram` and `UnixDatagramFramed` (#772).
+
+# 0.1.13 (November 21, 2018)
+
+* Fix `Runtime::reactor()` when no tasks are spawned (#721).
+* `runtime::Builder` no longer uses deprecated methods (#749).
+* Provide `after_start` and `before_stop` configuration settings for
+ `Runtime` (#756).
+* Implement throttle stream combinator (#736).
+
+# 0.1.12 (October 23, 2018)
+
+* runtime: expose `keep_alive` on runtime builder (#676).
+* runtime: create a reactor per worker thread (#660).
+* codec: fix panic in `LengthDelimitedCodec` (#682).
+* io: re-export `tokio_io::io::read` function (#689).
+* runtime: check for executor re-entry in more places (#708).
+
+# 0.1.11 (September 28, 2018)
+
+* Fix `tokio-async-await` dependency (#675).
+
+# 0.1.10 (September 27, 2018)
+
+* Fix minimal versions
+
+# 0.1.9 (September 27, 2018)
+
+* Experimental async/await improvements (#661).
+* Re-export `TaskExecutor` from `tokio-current-thread` (#652).
+* Improve `Runtime` builder API (#645).
+* `tokio::run` panics when called from the context of an executor
+ (#646).
+* Introduce `StreamExt` with a `timeout` helper (#573).
+* Move `length_delimited` into `tokio` (#575).
+* Re-organize `tokio::net` module (#548).
+* Re-export `tokio-current-thread::spawn` in current_thread runtime
+ (#579).
+
+# 0.1.8 (August 23, 2018)
+
+* Extract tokio::executor::current_thread to a sub crate (#370)
+* Add `Runtime::block_on` (#398)
+* Add `runtime::current_thread::block_on_all` (#477)
+* Misc documentation improvements (#450)
+* Implement `std::error::Error` for error types (#501)
+
+# 0.1.7 (June 6, 2018)
+
+* Add `Runtime::block_on` for concurrent runtime (#391).
+* Provide handle to `current_thread::Runtime` that allows spawning tasks from
+ other threads (#340).
+* Provide `clock::now()`, a configurable source of time (#381).
+
+# 0.1.6 (May 2, 2018)
+
+* Add asynchronous filesystem APIs (#323).
+* Add "current thread" runtime variant (#308).
+* `CurrentThread`: Expose inner `Park` instance.
+* Improve fairness of `CurrentThread` executor (#313).
+
+# 0.1.5 (March 30, 2018)
+
+* Provide timer API (#266)
+
+# 0.1.4 (March 22, 2018)
+
+* Fix build on FreeBSD (#218)
+* Shutdown the Runtime when the handle is dropped (#214)
+* Set Runtime thread name prefix for worker threads (#232)
+* Add builder for Runtime (#234)
+* Extract TCP and UDP types into separate crates (#224)
+* Optionally support futures 0.2.
+
+# 0.1.3 (March 09, 2018)
+
+* Fix `CurrentThread::turn` to block on idle (#212).
+
+# 0.1.2 (March 09, 2018)
+
+* Introduce Tokio Runtime (#141)
+* Provide `CurrentThread` for more flexible usage of current thread executor (#141).
+* Add Lio for platforms that support it (#142).
+* I/O resources now lazily bind to the reactor (#160).
+* Extract Reactor to dedicated crate (#169)
+* Add facade to sub crates and add prelude (#166).
+* Switch TCP/UDP fns to poll_ -> Poll<...> style (#175)
+
+# 0.1.1 (February 09, 2018)
+
+* Doc fixes
+
+# 0.1.0 (February 07, 2018)
+
+* Initial crate released based on [RFC](https://github.com/tokio-rs/tokio-rfcs/pull/3).
diff --git a/third_party/rust/tokio/Cargo.toml b/third_party/rust/tokio/Cargo.toml
new file mode 100644
index 0000000000..b87e25c4cc
--- /dev/null
+++ b/third_party/rust/tokio/Cargo.toml
@@ -0,0 +1,134 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "tokio"
+version = "0.2.18"
+authors = ["Tokio Contributors <team@tokio.rs>"]
+description = "An event-driven, non-blocking I/O platform for writing asynchronous I/O\nbacked applications.\n"
+homepage = "https://tokio.rs"
+documentation = "https://docs.rs/tokio/0.2.18/tokio/"
+readme = "README.md"
+keywords = ["io", "async", "non-blocking", "futures"]
+categories = ["asynchronous", "network-programming"]
+license = "MIT"
+repository = "https://github.com/tokio-rs/tokio"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[package.metadata.playground]
+features = ["full"]
+[dependencies.bytes]
+version = "0.5.0"
+
+[dependencies.fnv]
+version = "1.0.6"
+optional = true
+
+[dependencies.futures-core]
+version = "0.3.0"
+optional = true
+
+[dependencies.iovec]
+version = "0.1.4"
+optional = true
+
+[dependencies.lazy_static]
+version = "1.0.2"
+optional = true
+
+[dependencies.memchr]
+version = "2.2"
+optional = true
+
+[dependencies.mio]
+version = "0.6.20"
+optional = true
+
+[dependencies.num_cpus]
+version = "1.8.0"
+optional = true
+
+[dependencies.parking_lot]
+version = "0.10.0"
+optional = true
+
+[dependencies.pin-project-lite]
+version = "0.1.1"
+
+[dependencies.slab]
+version = "0.4.1"
+optional = true
+
+[dependencies.tokio-macros]
+version = "0.2.4"
+optional = true
+[dev-dependencies.futures]
+version = "0.3.0"
+features = ["async-await"]
+
+[dev-dependencies.proptest]
+version = "0.9.4"
+
+[dev-dependencies.tempfile]
+version = "3.1.0"
+
+[dev-dependencies.tokio-test]
+version = "0.2.0"
+
+[features]
+blocking = ["rt-core"]
+default = []
+dns = ["rt-core"]
+fs = ["rt-core", "io-util"]
+full = ["blocking", "dns", "fs", "io-driver", "io-util", "io-std", "macros", "net", "process", "rt-core", "rt-util", "rt-threaded", "signal", "stream", "sync", "time"]
+io-driver = ["mio", "lazy_static"]
+io-std = ["rt-core"]
+io-util = ["memchr"]
+macros = ["tokio-macros"]
+net = ["dns", "tcp", "udp", "uds"]
+process = ["io-driver", "libc", "mio-named-pipes", "signal", "winapi/consoleapi", "winapi/minwindef", "winapi/threadpoollegacyapiset", "winapi/winerror"]
+rt-core = []
+rt-threaded = ["num_cpus", "rt-core"]
+rt-util = []
+signal = ["io-driver", "lazy_static", "libc", "mio-uds", "signal-hook-registry", "winapi/consoleapi", "winapi/minwindef"]
+stream = ["futures-core"]
+sync = ["fnv"]
+tcp = ["io-driver", "iovec"]
+test-util = []
+time = ["slab"]
+udp = ["io-driver"]
+uds = ["io-driver", "mio-uds", "libc"]
+[target."cfg(not(windows))".dev-dependencies.loom]
+version = "0.3.1"
+features = ["futures", "checkpoint"]
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.42"
+optional = true
+
+[target."cfg(unix)".dependencies.mio-uds]
+version = "0.6.5"
+optional = true
+
+[target."cfg(unix)".dependencies.signal-hook-registry]
+version = "1.1.1"
+optional = true
+[target."cfg(windows)".dependencies.mio-named-pipes]
+version = "0.1.6"
+optional = true
+
+[target."cfg(windows)".dependencies.winapi]
+version = "0.3.8"
+optional = true
+default-features = false
diff --git a/third_party/rust/tokio/LICENSE b/third_party/rust/tokio/LICENSE
new file mode 100644
index 0000000000..cdb28b4b56
--- /dev/null
+++ b/third_party/rust/tokio/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2019 Tokio Contributors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/tokio/README.md b/third_party/rust/tokio/README.md
new file mode 100644
index 0000000000..080181f897
--- /dev/null
+++ b/third_party/rust/tokio/README.md
@@ -0,0 +1,156 @@
+# Tokio
+
+A runtime for writing reliable, asynchronous, and slim applications with
+the Rust programming language. It is:
+
+* **Fast**: Tokio's zero-cost abstractions give you bare-metal
+ performance.
+
+* **Reliable**: Tokio leverages Rust's ownership, type system, and
+ concurrency model to reduce bugs and ensure thread safety.
+
+* **Scalable**: Tokio has a minimal footprint, and handles backpressure
+ and cancellation naturally.
+
+[![Crates.io][crates-badge]][crates-url]
+[![MIT licensed][mit-badge]][mit-url]
+[![Build Status][azure-badge]][azure-url]
+[![Discord chat][discord-badge]][discord-url]
+
+[crates-badge]: https://img.shields.io/crates/v/tokio.svg
+[crates-url]: https://crates.io/crates/tokio
+[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
+[mit-url]: LICENSE
+[azure-badge]: https://dev.azure.com/tokio-rs/Tokio/_apis/build/status/tokio-rs.tokio?branchName=master
+[azure-url]: https://dev.azure.com/tokio-rs/Tokio/_build/latest?definitionId=1&branchName=master
+[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord&style=flat-square
+[discord-url]: https://discord.gg/6yGkFeN
+
+[Website](https://tokio.rs) |
+[Guides](https://tokio.rs/docs/) |
+[API Docs](https://docs.rs/tokio/0.2/tokio) |
+[Chat](https://discord.gg/6yGkFeN)
+
+## Overview
+
+Tokio is an event-driven, non-blocking I/O platform for writing
+asynchronous applications with the Rust programming language. At a high
+level, it provides a few major components:
+
+* A multithreaded, work-stealing based task [scheduler].
+* A reactor backed by the operating system's event queue (epoll, kqueue,
+ IOCP, etc...).
+* Asynchronous [TCP and UDP][net] sockets.
+
+These components provide the runtime components necessary for building
+an asynchronous application.
+
+[net]: https://docs.rs/tokio/0.2/tokio/net/index.html
+[scheduler]: https://docs.rs/tokio/0.2/tokio/runtime/index.html
+
+## Example
+
+To get started, add the following to `Cargo.toml`.
+
+```toml
+tokio = { version = "0.2", features = ["full"] }
+```
+
+Tokio requires components to be explicitly enabled using feature flags. As a
+shorthand, the `full` feature enables all components.
+
+A basic TCP echo server with Tokio:
+
+```rust,no_run
+use tokio::net::TcpListener;
+use tokio::prelude::*;
+
+#[tokio::main]
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
+ let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+
+ loop {
+ let (mut socket, _) = listener.accept().await?;
+
+ tokio::spawn(async move {
+ let mut buf = [0; 1024];
+
+ // In a loop, read data from the socket and write the data back.
+ loop {
+ let n = match socket.read(&mut buf).await {
+ // socket closed
+ Ok(n) if n == 0 => return,
+ Ok(n) => n,
+ Err(e) => {
+ eprintln!("failed to read from socket; err = {:?}", e);
+ return;
+ }
+ };
+
+ // Write the data back
+ if let Err(e) = socket.write_all(&buf[0..n]).await {
+ eprintln!("failed to write to socket; err = {:?}", e);
+ return;
+ }
+ }
+ });
+ }
+}
+```
+
+More examples can be found [here](../examples).
+
+## Getting Help
+
+First, see if the answer to your question can be found in the [Guides] or the
+[API documentation]. If the answer is not there, there is an active community in
+the [Tokio Discord server][chat]. We would be happy to try to answer your
+question. Last, if that doesn't work, try opening an [issue] with the question.
+
+[Guides]: https://tokio.rs/docs/
+[API documentation]: https://docs.rs/tokio/0.2
+[chat]: https://discord.gg/6yGkFeN
+[issue]: https://github.com/tokio-rs/tokio/issues/new
+
+## Contributing
+
+:balloon: Thanks for your help improving the project! We are so happy to have
+you! We have a [contributing guide][guide] to help you get involved in the Tokio
+project.
+
+[guide]: CONTRIBUTING.md
+
+## Related Projects
+
+In addition to the crates in this repository, the Tokio project also maintains
+several other libraries, including:
+
+* [`tracing`] (formerly `tokio-trace`): A framework for application-level
+ tracing and async-aware diagnostics.
+
+* [`mio`]: A low-level, cross-platform abstraction over OS I/O APIs that powers
+ `tokio`.
+
+* [`bytes`]: Utilities for working with bytes, including efficient byte buffers.
+
+[`tracing`]: https://github.com/tokio-rs/tracing
+[`mio`]: https://github.com/tokio-rs/mio
+[`bytes`]: https://github.com/tokio-rs/bytes
+
+## Supported Rust Versions
+
+Tokio is built against the latest stable, nightly, and beta Rust releases. The
+minimum version supported is the stable release from three months before the
+current stable release version. For example, if the latest stable Rust is 1.29,
+the minimum version supported is 1.26. The current Tokio version is not
+guaranteed to build on Rust versions earlier than the minimum supported version.
+
+## License
+
+This project is licensed under the [MIT license](LICENSE).
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in Tokio by you, shall be licensed as MIT, without any additional
+terms or conditions.
diff --git a/third_party/rust/tokio/src/coop.rs b/third_party/rust/tokio/src/coop.rs
new file mode 100644
index 0000000000..1d62459166
--- /dev/null
+++ b/third_party/rust/tokio/src/coop.rs
@@ -0,0 +1,379 @@
+//! Opt-in yield points for improved cooperative scheduling.
+//!
+//! A single call to [`poll`] on a top-level task may potentially do a lot of work before it
+//! returns `Poll::Pending`. If a task runs for a long period of time without yielding back to the
+//! executor, it can starve other tasks waiting on that executor to execute them, or drive
+//! underlying resources. Since Rust does not have a runtime, it is difficult to forcibly preempt a
+//! long-running task. Instead, this module provides an opt-in mechanism for futures to collaborate
+//! with the executor to avoid starvation.
+//!
+//! Consider a future like this one:
+//!
+//! ```
+//! # use tokio::stream::{Stream, StreamExt};
+//! async fn drop_all<I: Stream + Unpin>(mut input: I) {
+//! while let Some(_) = input.next().await {}
+//! }
+//! ```
+//!
+//! It may look harmless, but consider what happens under heavy load if the input stream is
+//! _always_ ready. If we spawn `drop_all`, the task will never yield, and will starve other tasks
+//! and resources on the same executor. With opt-in yield points, this problem is alleviated:
+//!
+//! ```ignore
+//! # use tokio::stream::{Stream, StreamExt};
+//! async fn drop_all<I: Stream + Unpin>(mut input: I) {
+//! while let Some(_) = input.next().await {
+//! tokio::coop::proceed().await;
+//! }
+//! }
+//! ```
+//!
+//! The `proceed` future will coordinate with the executor to make sure that every so often control
+//! is yielded back to the executor so it can run other tasks.
+//!
+//! # Placing yield points
+//!
+//! Voluntary yield points should be placed _after_ at least some work has been done. If they are
+//! not, a future sufficiently deep in the task hierarchy may end up _never_ getting to run because
+//! of the number of yield points that inevitably appear before it is reached. In general, you will
+//! want yield points to only appear in "leaf" futures -- those that do not themselves poll other
+//! futures. By doing this, you avoid double-counting each iteration of the outer future against
+//! the cooperating budget.
+//!
+//! [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#tymethod.poll
+
+// NOTE: The doctests in this module are ignored since the whole module is (currently) private.
+
+use std::cell::Cell;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Constant used to determine how much "work" a task is allowed to do without yielding.
+///
+/// The value itself is chosen somewhat arbitrarily. It needs to be high enough to amortize wakeup
+/// and scheduling costs, but low enough that we do not starve other tasks for too long. The value
+/// also needs to be high enough that particularly deep tasks are able to do at least some useful
+/// work at all.
+///
+/// Note that as more yield points are added in the ecosystem, this value will probably also have
+/// to be raised.
+const BUDGET: usize = 128;
+
+/// Constant used to determine if budgeting has been disabled.
+const UNCONSTRAINED: usize = usize::max_value();
+
+thread_local! {
+ static HITS: Cell<usize> = Cell::new(UNCONSTRAINED);
+}
+
+/// Run the given closure with a cooperative task budget.
+///
+/// Enabling budgeting when it is already enabled is a no-op.
+#[inline(always)]
+pub(crate) fn budget<F, R>(f: F) -> R
+where
+ F: FnOnce() -> R,
+{
+ HITS.with(move |hits| {
+ if hits.get() != UNCONSTRAINED {
+ // We are already being budgeted.
+ //
+ // Arguably this should be an error, but it can happen "correctly"
+ // such as with block_on + LocalSet, so we make it a no-op.
+ return f();
+ }
+
+ struct Guard<'a>(&'a Cell<usize>);
+ impl<'a> Drop for Guard<'a> {
+ fn drop(&mut self) {
+ self.0.set(UNCONSTRAINED);
+ }
+ }
+
+ hits.set(BUDGET);
+ let _guard = Guard(hits);
+ f()
+ })
+}
+
+cfg_rt_threaded! {
+ #[inline(always)]
+ pub(crate) fn has_budget_remaining() -> bool {
+ HITS.with(|hits| hits.get() > 0)
+ }
+}
+
+cfg_blocking_impl! {
+ /// Forcibly remove the budgeting constraints early.
+ pub(crate) fn stop() {
+ HITS.with(|hits| {
+ hits.set(UNCONSTRAINED);
+ });
+ }
+}
+
+/// Invoke `f` with a subset of the remaining budget.
+///
+/// This is useful if you have sub-futures that you need to poll, but that you want to restrict
+/// from using up your entire budget. For example, imagine the following future:
+///
+/// ```rust
+/// # use std::{future::Future, pin::Pin, task::{Context, Poll}};
+/// use futures::stream::FuturesUnordered;
+/// struct MyFuture<F1, F2> {
+/// big: FuturesUnordered<F1>,
+/// small: F2,
+/// }
+///
+/// use tokio::stream::Stream;
+/// impl<F1, F2> Future for MyFuture<F1, F2>
+/// where F1: Future, F2: Future
+/// # , F1: Unpin, F2: Unpin
+/// {
+/// type Output = F2::Output;
+///
+/// // fn poll(...)
+/// # fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F2::Output> {
+/// # let this = &mut *self;
+/// let mut big = // something to pin self.big
+/// # Pin::new(&mut this.big);
+/// let small = // something to pin self.small
+/// # Pin::new(&mut this.small);
+///
+/// // see if any of the big futures have finished
+/// while let Some(e) = futures::ready!(big.as_mut().poll_next(cx)) {
+/// // do something with e
+/// # let _ = e;
+/// }
+///
+/// // see if the small future has finished
+/// small.poll(cx)
+/// }
+/// # }
+/// ```
+///
+/// It could be that every time `poll` gets called, `big` ends up spending the entire budget, and
+/// `small` never gets polled. That would be sad. If you want to stick up for the little future,
+/// that's what `limit` is for. It lets you portion out a smaller part of the yield budget to a
+/// particular segment of your code. In the code above, you would write
+///
+/// ```rust,ignore
+/// # use std::{future::Future, pin::Pin, task::{Context, Poll}};
+/// # use futures::stream::FuturesUnordered;
+/// # struct MyFuture<F1, F2> {
+/// # big: FuturesUnordered<F1>,
+/// # small: F2,
+/// # }
+/// #
+/// # use tokio::stream::Stream;
+/// # impl<F1, F2> Future for MyFuture<F1, F2>
+/// # where F1: Future, F2: Future
+/// # , F1: Unpin, F2: Unpin
+/// # {
+/// # type Output = F2::Output;
+/// # fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F2::Output> {
+/// # let this = &mut *self;
+/// # let mut big = Pin::new(&mut this.big);
+/// # let small = Pin::new(&mut this.small);
+/// #
+/// // see if any of the big futures have finished
+/// while let Some(e) = futures::ready!(tokio::coop::limit(64, || big.as_mut().poll_next(cx))) {
+/// # // do something with e
+/// # let _ = e;
+/// # }
+/// # small.poll(cx)
+/// # }
+/// # }
+/// ```
+///
+/// Now, even if `big` spends its entire budget, `small` will likely be left with some budget left
+/// to also do useful work. In particular, if the remaining budget was `N` at the start of `poll`,
+/// `small` will have at least a budget of `N - 64`. It may be more if `big` did not spend its
+/// entire budget.
+///
+/// Note that you cannot _increase_ your budget by calling `limit`. The budget provided to the code
+/// inside the buget is the _minimum_ of the _current_ budget and the bound.
+///
+#[allow(unreachable_pub, dead_code)]
+pub fn limit<R>(bound: usize, f: impl FnOnce() -> R) -> R {
+ HITS.with(|hits| {
+ let budget = hits.get();
+ // with_bound cannot _increase_ the remaining budget
+ let bound = std::cmp::min(budget, bound);
+ // When f() exits, how much should we add to what is left?
+ let floor = budget.saturating_sub(bound);
+ // Make sure we restore the remaining budget even on panic
+ struct RestoreBudget<'a>(&'a Cell<usize>, usize);
+ impl<'a> Drop for RestoreBudget<'a> {
+ fn drop(&mut self) {
+ let left = self.0.get();
+ self.0.set(self.1 + left);
+ }
+ }
+ // Time to restrict!
+ hits.set(bound);
+ let _restore = RestoreBudget(&hits, floor);
+ f()
+ })
+}
+
+/// Returns `Poll::Pending` if the current task has exceeded its budget and should yield.
+#[allow(unreachable_pub, dead_code)]
+#[inline]
+pub fn poll_proceed(cx: &mut Context<'_>) -> Poll<()> {
+ HITS.with(|hits| {
+ let n = hits.get();
+ if n == UNCONSTRAINED {
+ // opted out of budgeting
+ Poll::Ready(())
+ } else if n == 0 {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ } else {
+ hits.set(n.saturating_sub(1));
+ Poll::Ready(())
+ }
+ })
+}
+
+/// Resolves immediately unless the current task has already exceeded its budget.
+///
+/// This should be placed after at least some work has been done. Otherwise a future sufficiently
+/// deep in the task hierarchy may end up never getting to run because of the number of yield
+/// points that inevitably appear before it is even reached. For example:
+///
+/// ```ignore
+/// # use tokio::stream::{Stream, StreamExt};
+/// async fn drop_all<I: Stream + Unpin>(mut input: I) {
+/// while let Some(_) = input.next().await {
+/// tokio::coop::proceed().await;
+/// }
+/// }
+/// ```
+#[allow(unreachable_pub, dead_code)]
+#[inline]
+pub async fn proceed() {
+ use crate::future::poll_fn;
+ poll_fn(|cx| poll_proceed(cx)).await;
+}
+
+pin_project_lite::pin_project! {
+ /// A future that cooperatively yields to the task scheduler when polling,
+ /// if the task's budget is exhausted.
+ ///
+ /// Internally, this is simply a future combinator which calls
+ /// [`poll_proceed`] in its `poll` implementation before polling the wrapped
+ /// future.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::coop::CoopFutureExt;
+ ///
+ /// async { /* ... */ }
+ /// .cooperate()
+ /// .await;
+ /// # }
+ /// ```
+ ///
+ /// [`poll_proceed`]: fn@poll_proceed
+ #[derive(Debug)]
+ #[allow(unreachable_pub, dead_code)]
+ pub struct CoopFuture<F> {
+ #[pin]
+ future: F,
+ }
+}
+
+impl<F: Future> Future for CoopFuture<F> {
+ type Output = F::Output;
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ ready!(poll_proceed(cx));
+ self.project().future.poll(cx)
+ }
+}
+
+impl<F: Future> CoopFuture<F> {
+ /// Returns a new `CoopFuture` wrapping the given future.
+ ///
+ #[allow(unreachable_pub, dead_code)]
+ pub fn new(future: F) -> Self {
+ Self { future }
+ }
+}
+
+// Currently only used by `tokio::sync`; and if we make this combinator public,
+// it should probably be on the `FutureExt` trait instead.
+cfg_sync! {
+ /// Extension trait providing `Future::cooperate` extension method.
+ ///
+ /// Note: if/when the co-op API becomes public, this method should probably be
+ /// provided by `FutureExt`, instead.
+ pub(crate) trait CoopFutureExt: Future {
+ /// Wrap `self` to cooperatively yield to the scheduler when polling, if the
+ /// task's budget is exhausted.
+ fn cooperate(self) -> CoopFuture<Self>
+ where
+ Self: Sized,
+ {
+ CoopFuture::new(self)
+ }
+ }
+
+ impl<F> CoopFutureExt for F where F: Future {}
+}
+
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::*;
+
+ fn get() -> usize {
+ HITS.with(|hits| hits.get())
+ }
+
+ #[test]
+ fn bugeting() {
+ use tokio_test::*;
+
+ assert_eq!(get(), UNCONSTRAINED);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), UNCONSTRAINED);
+ budget(|| {
+ assert_eq!(get(), BUDGET);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), BUDGET - 1);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), BUDGET - 2);
+ });
+ assert_eq!(get(), UNCONSTRAINED);
+
+ budget(|| {
+ limit(3, || {
+ assert_eq!(get(), 3);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), 2);
+ limit(4, || {
+ assert_eq!(get(), 2);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), 1);
+ });
+ assert_eq!(get(), 1);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), 0);
+ assert_pending!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), 0);
+ assert_pending!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), 0);
+ });
+ assert_eq!(get(), BUDGET - 3);
+ assert_ready!(task::spawn(()).enter(|cx, _| poll_proceed(cx)));
+ assert_eq!(get(), BUDGET - 4);
+ assert_ready!(task::spawn(proceed()).poll());
+ assert_eq!(get(), BUDGET - 5);
+ });
+ }
+}
diff --git a/third_party/rust/tokio/src/fs/canonicalize.rs b/third_party/rust/tokio/src/fs/canonicalize.rs
new file mode 100644
index 0000000000..403662685c
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/canonicalize.rs
@@ -0,0 +1,51 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::{Path, PathBuf};
+
+/// Returns the canonical, absolute form of a path with all intermediate
+/// components normalized and symbolic links resolved.
+///
+/// This is an async version of [`std::fs::canonicalize`][std]
+///
+/// [std]: std::fs::canonicalize
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `realpath` function on Unix
+/// and the `CreateFile` and `GetFinalPathNameByHandle` functions on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// On Windows, this converts the path to use [extended length path][path]
+/// syntax, which allows your program to use longer path names, but means you
+/// can only join backslash-delimited paths to it, and it may be incompatible
+/// with other applications (if passed to the application on the command-line,
+/// or written to a file another application may read).
+///
+/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+/// [path]: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * `path` does not exist.
+/// * A non-final component in path is not a directory.
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+/// use std::io;
+///
+/// #[tokio::main]
+/// async fn main() -> io::Result<()> {
+/// let path = fs::canonicalize("../a/../foo.txt").await?;
+/// Ok(())
+/// }
+/// ```
+pub async fn canonicalize(path: impl AsRef<Path>) -> io::Result<PathBuf> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::canonicalize(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/copy.rs b/third_party/rust/tokio/src/fs/copy.rs
new file mode 100644
index 0000000000..d4d4d29c85
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/copy.rs
@@ -0,0 +1,24 @@
+use crate::fs::asyncify;
+use std::path::Path;
+
+/// Copies the contents of one file to another. This function will also copy the permission bits of the original file to the destination file.
+/// This function will overwrite the contents of to.
+///
+/// This is the async equivalent of `std::fs::copy`.
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+///
+/// # async fn dox() -> std::io::Result<()> {
+/// fs::copy("foo.txt", "bar.txt").await?;
+/// # Ok(())
+/// # }
+/// ```
+
+pub async fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64, std::io::Error> {
+ let from = from.as_ref().to_owned();
+ let to = to.as_ref().to_owned();
+ asyncify(|| std::fs::copy(from, to)).await
+}
diff --git a/third_party/rust/tokio/src/fs/create_dir.rs b/third_party/rust/tokio/src/fs/create_dir.rs
new file mode 100644
index 0000000000..e03b04dc4b
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/create_dir.rs
@@ -0,0 +1,52 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Creates a new, empty directory at the provided path
+///
+/// This is an async version of [`std::fs::create_dir`][std]
+///
+/// [std]: std::fs::create_dir
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `mkdir` function on Unix
+/// and the `CreateDirectory` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+///
+/// **NOTE**: If a parent of the given path doesn't exist, this function will
+/// return an error. To create a directory and all its missing parents at the
+/// same time, use the [`create_dir_all`] function.
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * User lacks permissions to create directory at `path`.
+/// * A parent of the given path doesn't exist. (To create a directory and all
+/// its missing parents at the same time, use the [`create_dir_all`]
+/// function.)
+/// * `path` already exists.
+///
+/// [`create_dir_all`]: super::create_dir_all()
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+/// use std::io;
+///
+/// #[tokio::main]
+/// async fn main() -> io::Result<()> {
+/// fs::create_dir("/some/dir").await?;
+/// Ok(())
+/// }
+/// ```
+pub async fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::create_dir(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/create_dir_all.rs b/third_party/rust/tokio/src/fs/create_dir_all.rs
new file mode 100644
index 0000000000..21f0c82d11
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/create_dir_all.rs
@@ -0,0 +1,53 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Recursively creates a directory and all of its parent components if they
+/// are missing.
+///
+/// This is an async version of [`std::fs::create_dir_all`][std]
+///
+/// [std]: std::fs::create_dir_all
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `mkdir` function on Unix
+/// and the `CreateDirectory` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * If any directory in the path specified by `path` does not already exist
+/// and it could not be created otherwise. The specific error conditions for
+/// when a directory is being created (after it is determined to not exist) are
+/// outlined by [`fs::create_dir`].
+///
+/// Notable exception is made for situations where any of the directories
+/// specified in the `path` could not be created as it was being created concurrently.
+/// Such cases are considered to be successful. That is, calling `create_dir_all`
+/// concurrently from multiple threads or processes is guaranteed not to fail
+/// due to a race condition with itself.
+///
+/// [`fs::create_dir`]: std::fs::create_dir
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+///
+/// #[tokio::main]
+/// async fn main() -> std::io::Result<()> {
+/// fs::create_dir_all("/some/dir").await?;
+/// Ok(())
+/// }
+/// ```
+pub async fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::create_dir_all(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/file.rs b/third_party/rust/tokio/src/fs/file.rs
new file mode 100644
index 0000000000..a1f22fc9b6
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/file.rs
@@ -0,0 +1,739 @@
+//! Types for working with [`File`].
+//!
+//! [`File`]: File
+
+use self::State::*;
+use crate::fs::{asyncify, sys};
+use crate::io::blocking::Buf;
+use crate::io::{AsyncRead, AsyncSeek, AsyncWrite};
+
+use std::fmt;
+use std::fs::{Metadata, Permissions};
+use std::future::Future;
+use std::io::{self, Seek, SeekFrom};
+use std::path::Path;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::Context;
+use std::task::Poll;
+use std::task::Poll::*;
+
+/// A reference to an open file on the filesystem.
+///
+/// This is a specialized version of [`std::fs::File`][std] for usage from the
+/// Tokio runtime.
+///
+/// An instance of a `File` can be read and/or written depending on what options
+/// it was opened with. Files also implement Seek to alter the logical cursor
+/// that the file contains internally.
+///
+/// Files are automatically closed when they go out of scope.
+///
+/// [std]: std::fs::File
+///
+/// # Examples
+///
+/// Create a new file and asynchronously write bytes to it:
+///
+/// ```no_run
+/// use tokio::fs::File;
+/// use tokio::prelude::*;
+///
+/// # async fn dox() -> std::io::Result<()> {
+/// let mut file = File::create("foo.txt").await?;
+/// file.write_all(b"hello, world!").await?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Read the contents of a file into a buffer
+///
+/// ```no_run
+/// use tokio::fs::File;
+/// use tokio::prelude::*;
+///
+/// # async fn dox() -> std::io::Result<()> {
+/// let mut file = File::open("foo.txt").await?;
+///
+/// let mut contents = vec![];
+/// file.read_to_end(&mut contents).await?;
+///
+/// println!("len = {}", contents.len());
+/// # Ok(())
+/// # }
+/// ```
+pub struct File {
+ std: Arc<sys::File>,
+ state: State,
+
+ /// Errors from writes/flushes are returned in write/flush calls. If a write
+ /// error is observed while performing a read, it is saved until the next
+ /// write / flush call.
+ last_write_err: Option<io::ErrorKind>,
+}
+
+#[derive(Debug)]
+enum State {
+ Idle(Option<Buf>),
+ Busy(sys::Blocking<(Operation, Buf)>),
+}
+
+#[derive(Debug)]
+enum Operation {
+ Read(io::Result<usize>),
+ Write(io::Result<()>),
+ Seek(io::Result<u64>),
+}
+
+impl File {
+ /// Attempts to open a file in read-only mode.
+ ///
+ /// See [`OpenOptions`] for more details.
+ ///
+ /// [`OpenOptions`]: super::OpenOptions
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if called from outside of the Tokio
+ /// runtime or if path does not already exist. Other errors may also be
+ /// returned according to OpenOptions::open.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt").await?;
+ ///
+ /// let mut contents = vec![];
+ /// file.read_to_end(&mut contents).await?;
+ ///
+ /// println!("len = {}", contents.len());
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn open(path: impl AsRef<Path>) -> io::Result<File> {
+ let path = path.as_ref().to_owned();
+ let std = asyncify(|| sys::File::open(path)).await?;
+
+ Ok(File::from_std(std))
+ }
+
+ /// Opens a file in write-only mode.
+ ///
+ /// This function will create a file if it does not exist, and will truncate
+ /// it if it does.
+ ///
+ /// See [`OpenOptions`] for more details.
+ ///
+ /// [`OpenOptions`]: super::OpenOptions
+ ///
+ /// # Errors
+ ///
+ /// Results in an error if called from outside of the Tokio runtime or if
+ /// the underlying [`create`] call results in an error.
+ ///
+ /// [`create`]: std::fs::File::create
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ /// file.write_all(b"hello, world!").await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn create(path: impl AsRef<Path>) -> io::Result<File> {
+ let path = path.as_ref().to_owned();
+ let std_file = asyncify(move || sys::File::create(path)).await?;
+ Ok(File::from_std(std_file))
+ }
+
+ /// Converts a [`std::fs::File`][std] to a [`tokio::fs::File`][file].
+ ///
+ /// [std]: std::fs::File
+ /// [file]: File
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// // This line could block. It is not recommended to do this on the Tokio
+ /// // runtime.
+ /// let std_file = std::fs::File::open("foo.txt").unwrap();
+ /// let file = tokio::fs::File::from_std(std_file);
+ /// ```
+ pub fn from_std(std: sys::File) -> File {
+ File {
+ std: Arc::new(std),
+ state: State::Idle(Some(Buf::with_capacity(0))),
+ last_write_err: None,
+ }
+ }
+
+ /// Seeks to an offset, in bytes, in a stream.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// use std::io::SeekFrom;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt").await?;
+ /// file.seek(SeekFrom::Start(6)).await?;
+ ///
+ /// let mut contents = vec![0u8; 10];
+ /// file.read_exact(&mut contents).await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn seek(&mut self, mut pos: SeekFrom) -> io::Result<u64> {
+ self.complete_inflight().await;
+
+ let mut buf = match self.state {
+ Idle(ref mut buf_cell) => buf_cell.take().unwrap(),
+ _ => unreachable!(),
+ };
+
+ // Factor in any unread data from the buf
+ if !buf.is_empty() {
+ let n = buf.discard_read();
+
+ if let SeekFrom::Current(ref mut offset) = pos {
+ *offset += n;
+ }
+ }
+
+ let std = self.std.clone();
+
+ // Start the operation
+ self.state = Busy(sys::run(move || {
+ let res = (&*std).seek(pos);
+ (Operation::Seek(res), buf)
+ }));
+
+ let (op, buf) = match self.state {
+ Idle(_) => unreachable!(),
+ Busy(ref mut rx) => rx.await.unwrap(),
+ };
+
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Seek(res) => res,
+ _ => unreachable!(),
+ }
+ }
+
+ /// Attempts to sync all OS-internal metadata to disk.
+ ///
+ /// This function will attempt to ensure that all in-core data reaches the
+ /// filesystem before returning.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ /// file.write_all(b"hello, world!").await?;
+ /// file.sync_all().await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn sync_all(&mut self) -> io::Result<()> {
+ self.complete_inflight().await;
+
+ let std = self.std.clone();
+ asyncify(move || std.sync_all()).await
+ }
+
+ /// This function is similar to `sync_all`, except that it may not
+ /// synchronize file metadata to the filesystem.
+ ///
+ /// This is intended for use cases that must synchronize content, but don't
+ /// need the metadata on disk. The goal of this method is to reduce disk
+ /// operations.
+ ///
+ /// Note that some platforms may simply implement this in terms of `sync_all`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ /// file.write_all(b"hello, world!").await?;
+ /// file.sync_data().await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn sync_data(&mut self) -> io::Result<()> {
+ self.complete_inflight().await;
+
+ let std = self.std.clone();
+ asyncify(move || std.sync_data()).await
+ }
+
+ /// Truncates or extends the underlying file, updating the size of this file to become size.
+ ///
+ /// If the size is less than the current file's size, then the file will be
+ /// shrunk. If it is greater than the current file's size, then the file
+ /// will be extended to size and have all of the intermediate data filled in
+ /// with 0s.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the file is not opened for
+ /// writing.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ /// file.write_all(b"hello, world!").await?;
+ /// file.set_len(10).await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn set_len(&mut self, size: u64) -> io::Result<()> {
+ self.complete_inflight().await;
+
+ let mut buf = match self.state {
+ Idle(ref mut buf_cell) => buf_cell.take().unwrap(),
+ _ => unreachable!(),
+ };
+
+ let seek = if !buf.is_empty() {
+ Some(SeekFrom::Current(buf.discard_read()))
+ } else {
+ None
+ };
+
+ let std = self.std.clone();
+
+ self.state = Busy(sys::run(move || {
+ let res = if let Some(seek) = seek {
+ (&*std).seek(seek).and_then(|_| std.set_len(size))
+ } else {
+ std.set_len(size)
+ }
+ .map(|_| 0); // the value is discarded later
+
+ // Return the result as a seek
+ (Operation::Seek(res), buf)
+ }));
+
+ let (op, buf) = match self.state {
+ Idle(_) => unreachable!(),
+ Busy(ref mut rx) => rx.await?,
+ };
+
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Seek(res) => res.map(|_| ()),
+ _ => unreachable!(),
+ }
+ }
+
+ /// Queries metadata about the underlying file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let file = File::open("foo.txt").await?;
+ /// let metadata = file.metadata().await?;
+ ///
+ /// println!("{:?}", metadata);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn metadata(&self) -> io::Result<Metadata> {
+ let std = self.std.clone();
+ asyncify(move || std.metadata()).await
+ }
+
+ /// Create a new `File` instance that shares the same underlying file handle
+ /// as the existing `File` instance. Reads, writes, and seeks will affect both
+ /// File instances simultaneously.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let file = File::open("foo.txt").await?;
+ /// let file_clone = file.try_clone().await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn try_clone(&self) -> io::Result<File> {
+ let std = self.std.clone();
+ let std_file = asyncify(move || std.try_clone()).await?;
+ Ok(File::from_std(std_file))
+ }
+
+ /// Destructures `File` into a [`std::fs::File`][std]. This function is
+ /// async to allow any in-flight operations to complete.
+ ///
+ /// Use `File::try_into_std` to attempt conversion immediately.
+ ///
+ /// [std]: std::fs::File
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let tokio_file = File::open("foo.txt").await?;
+ /// let std_file = tokio_file.into_std().await;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn into_std(mut self) -> sys::File {
+ self.complete_inflight().await;
+ Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed")
+ }
+
+ /// Tries to immediately destructure `File` into a [`std::fs::File`][std].
+ ///
+ /// [std]: std::fs::File
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error containing the file if some
+ /// operation is in-flight.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let tokio_file = File::open("foo.txt").await?;
+ /// let std_file = tokio_file.try_into_std().unwrap();
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn try_into_std(mut self) -> Result<sys::File, Self> {
+ match Arc::try_unwrap(self.std) {
+ Ok(file) => Ok(file),
+ Err(std_file_arc) => {
+ self.std = std_file_arc;
+ Err(self)
+ }
+ }
+ }
+
+ /// Changes the permissions on the underlying file.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// This function currently corresponds to the `fchmod` function on Unix and
+ /// the `SetFileInformationByHandle` function on Windows. Note that, this
+ /// [may change in the future][changes].
+ ///
+ /// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the user lacks permission change
+ /// attributes on the underlying file. It may also return an error in other
+ /// os-specific unspecified cases.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let file = File::open("foo.txt").await?;
+ /// let mut perms = file.metadata().await?.permissions();
+ /// perms.set_readonly(true);
+ /// file.set_permissions(perms).await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> {
+ let std = self.std.clone();
+ asyncify(move || std.set_permissions(perm)).await
+ }
+
+ async fn complete_inflight(&mut self) {
+ use crate::future::poll_fn;
+
+ if let Err(e) = poll_fn(|cx| Pin::new(&mut *self).poll_flush(cx)).await {
+ self.last_write_err = Some(e.kind());
+ }
+ }
+}
+
+impl AsyncRead for File {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ dst: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ loop {
+ match self.state {
+ Idle(ref mut buf_cell) => {
+ let mut buf = buf_cell.take().unwrap();
+
+ if !buf.is_empty() {
+ let n = buf.copy_to(dst);
+ *buf_cell = Some(buf);
+ return Ready(Ok(n));
+ }
+
+ buf.ensure_capacity_for(dst);
+ let std = self.std.clone();
+
+ self.state = Busy(sys::run(move || {
+ let res = buf.read_from(&mut &*std);
+ (Operation::Read(res), buf)
+ }));
+ }
+ Busy(ref mut rx) => {
+ let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?;
+
+ match op {
+ Operation::Read(Ok(_)) => {
+ let n = buf.copy_to(dst);
+ self.state = Idle(Some(buf));
+ return Ready(Ok(n));
+ }
+ Operation::Read(Err(e)) => {
+ assert!(buf.is_empty());
+
+ self.state = Idle(Some(buf));
+ return Ready(Err(e));
+ }
+ Operation::Write(Ok(_)) => {
+ assert!(buf.is_empty());
+ self.state = Idle(Some(buf));
+ continue;
+ }
+ Operation::Write(Err(e)) => {
+ assert!(self.last_write_err.is_none());
+ self.last_write_err = Some(e.kind());
+ self.state = Idle(Some(buf));
+ }
+ Operation::Seek(_) => {
+ assert!(buf.is_empty());
+ self.state = Idle(Some(buf));
+ continue;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+impl AsyncSeek for File {
+ fn start_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut pos: SeekFrom,
+ ) -> Poll<io::Result<()>> {
+ loop {
+ match self.state {
+ Idle(ref mut buf_cell) => {
+ let mut buf = buf_cell.take().unwrap();
+
+ // Factor in any unread data from the buf
+ if !buf.is_empty() {
+ let n = buf.discard_read();
+
+ if let SeekFrom::Current(ref mut offset) = pos {
+ *offset += n;
+ }
+ }
+
+ let std = self.std.clone();
+
+ self.state = Busy(sys::run(move || {
+ let res = (&*std).seek(pos);
+ (Operation::Seek(res), buf)
+ }));
+
+ return Ready(Ok(()));
+ }
+ Busy(ref mut rx) => {
+ let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Read(_) => {}
+ Operation::Write(Err(e)) => {
+ assert!(self.last_write_err.is_none());
+ self.last_write_err = Some(e.kind());
+ }
+ Operation::Write(_) => {}
+ Operation::Seek(_) => {}
+ }
+ }
+ }
+ }
+ }
+
+ fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ loop {
+ match self.state {
+ Idle(_) => panic!("must call start_seek before calling poll_complete"),
+ Busy(ref mut rx) => {
+ let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Read(_) => {}
+ Operation::Write(Err(e)) => {
+ assert!(self.last_write_err.is_none());
+ self.last_write_err = Some(e.kind());
+ }
+ Operation::Write(_) => {}
+ Operation::Seek(res) => return Ready(res),
+ }
+ }
+ }
+ }
+ }
+}
+
+impl AsyncWrite for File {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ src: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if let Some(e) = self.last_write_err.take() {
+ return Ready(Err(e.into()));
+ }
+
+ loop {
+ match self.state {
+ Idle(ref mut buf_cell) => {
+ let mut buf = buf_cell.take().unwrap();
+
+ let seek = if !buf.is_empty() {
+ Some(SeekFrom::Current(buf.discard_read()))
+ } else {
+ None
+ };
+
+ let n = buf.copy_from(src);
+ let std = self.std.clone();
+
+ self.state = Busy(sys::run(move || {
+ let res = if let Some(seek) = seek {
+ (&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std))
+ } else {
+ buf.write_to(&mut &*std)
+ };
+
+ (Operation::Write(res), buf)
+ }));
+
+ return Ready(Ok(n));
+ }
+ Busy(ref mut rx) => {
+ let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Read(_) => {
+ // We don't care about the result here. The fact
+ // that the cursor has advanced will be reflected in
+ // the next iteration of the loop
+ continue;
+ }
+ Operation::Write(res) => {
+ // If the previous write was successful, continue.
+ // Otherwise, error.
+ res?;
+ continue;
+ }
+ Operation::Seek(_) => {
+ // Ignore the seek
+ continue;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ if let Some(e) = self.last_write_err.take() {
+ return Ready(Err(e.into()));
+ }
+
+ let (op, buf) = match self.state {
+ Idle(_) => return Ready(Ok(())),
+ Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?,
+ };
+
+ // The buffer is not used here
+ self.state = Idle(Some(buf));
+
+ match op {
+ Operation::Read(_) => Ready(Ok(())),
+ Operation::Write(res) => Ready(res),
+ Operation::Seek(_) => Ready(Ok(())),
+ }
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl From<sys::File> for File {
+ fn from(std: sys::File) -> Self {
+ Self::from_std(std)
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("tokio::fs::File")
+ .field("std", &self.std)
+ .finish()
+ }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for File {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ self.std.as_raw_fd()
+ }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for File {
+ fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+ self.std.as_raw_handle()
+ }
+}
diff --git a/third_party/rust/tokio/src/fs/hard_link.rs b/third_party/rust/tokio/src/fs/hard_link.rs
new file mode 100644
index 0000000000..50cc17d286
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/hard_link.rs
@@ -0,0 +1,46 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Creates a new hard link on the filesystem.
+///
+/// This is an async version of [`std::fs::hard_link`][std]
+///
+/// [std]: std::fs::hard_link
+///
+/// The `dst` path will be a link pointing to the `src` path. Note that systems
+/// often require these two paths to both be located on the same filesystem.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `link` function on Unix
+/// and the `CreateHardLink` function on Windows.
+/// Note that, this [may change in the future][changes].
+///
+/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The `src` path is not a file or doesn't exist.
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+///
+/// #[tokio::main]
+/// async fn main() -> std::io::Result<()> {
+/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt
+/// Ok(())
+/// }
+/// ```
+pub async fn hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
+ let src = src.as_ref().to_owned();
+ let dst = dst.as_ref().to_owned();
+
+ asyncify(move || std::fs::hard_link(src, dst)).await
+}
diff --git a/third_party/rust/tokio/src/fs/metadata.rs b/third_party/rust/tokio/src/fs/metadata.rs
new file mode 100644
index 0000000000..ff9cded79a
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/metadata.rs
@@ -0,0 +1,47 @@
+use crate::fs::asyncify;
+
+use std::fs::Metadata;
+use std::io;
+use std::path::Path;
+
+/// Given a path, queries the file system to get information about a file,
+/// directory, etc.
+///
+/// This is an async version of [`std::fs::metadata`][std]
+///
+/// This function will traverse symbolic links to query information about the
+/// destination file.
+///
+/// # Platform-specific behavior
+///
+/// This function currently corresponds to the `stat` function on Unix and the
+/// `GetFileAttributesEx` function on Windows. Note that, this [may change in
+/// the future][changes].
+///
+/// [std]: std::fs::metadata
+/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
+///
+/// # Errors
+///
+/// This function will return an error in the following situations, but is not
+/// limited to just these cases:
+///
+/// * The user lacks permissions to perform `metadata` call on `path`.
+/// * `path` does not exist.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// use tokio::fs;
+///
+/// #[tokio::main]
+/// async fn main() -> std::io::Result<()> {
+/// let attr = fs::metadata("/some/file/path.txt").await?;
+/// // inspect attr ...
+/// Ok(())
+/// }
+/// ```
+pub async fn metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
+ let path = path.as_ref().to_owned();
+ asyncify(|| std::fs::metadata(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/mod.rs b/third_party/rust/tokio/src/fs/mod.rs
new file mode 100644
index 0000000000..3eb0376463
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/mod.rs
@@ -0,0 +1,109 @@
+#![cfg(not(loom))]
+
+//! Asynchronous file and standard stream adaptation.
+//!
+//! This module contains utility methods and adapter types for input/output to
+//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and
+//! filesystem manipulation, for use within (and only within) a Tokio runtime.
+//!
+//! Tasks run by *worker* threads should not block, as this could delay
+//! servicing reactor events. Portable filesystem operations are blocking,
+//! however. This module offers adapters which use a `blocking` annotation
+//! to inform the runtime that a blocking operation is required. When
+//! necessary, this allows the runtime to convert the current thread from a
+//! *worker* to a *backup* thread, where blocking is acceptable.
+//!
+//! ## Usage
+//!
+//! Where possible, users should prefer the provided asynchronous-specific
+//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll`
+//! type. Adaptions also extend to traits like `std::io::Read` where methods
+//! return `std::io::Result`. Be warned that these adapted methods may return
+//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted
+//! to a *backup* thread immediately.
+//!
+//! [`AsyncRead`]: https://docs.rs/tokio-io/0.1/tokio_io/trait.AsyncRead.html
+
+mod canonicalize;
+pub use self::canonicalize::canonicalize;
+
+mod create_dir;
+pub use self::create_dir::create_dir;
+
+mod create_dir_all;
+pub use self::create_dir_all::create_dir_all;
+
+mod file;
+pub use self::file::File;
+
+mod hard_link;
+pub use self::hard_link::hard_link;
+
+mod metadata;
+pub use self::metadata::metadata;
+
+mod open_options;
+pub use self::open_options::OpenOptions;
+
+pub mod os;
+
+mod read;
+pub use self::read::read;
+
+mod read_dir;
+pub use self::read_dir::{read_dir, DirEntry, ReadDir};
+
+mod read_link;
+pub use self::read_link::read_link;
+
+mod read_to_string;
+pub use self::read_to_string::read_to_string;
+
+mod remove_dir;
+pub use self::remove_dir::remove_dir;
+
+mod remove_dir_all;
+pub use self::remove_dir_all::remove_dir_all;
+
+mod remove_file;
+pub use self::remove_file::remove_file;
+
+mod rename;
+pub use self::rename::rename;
+
+mod set_permissions;
+pub use self::set_permissions::set_permissions;
+
+mod symlink_metadata;
+pub use self::symlink_metadata::symlink_metadata;
+
+mod write;
+pub use self::write::write;
+
+mod copy;
+pub use self::copy::copy;
+
+use std::io;
+
+pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T>
+where
+ F: FnOnce() -> io::Result<T> + Send + 'static,
+ T: Send + 'static,
+{
+ match sys::run(f).await {
+ Ok(res) => res,
+ Err(_) => Err(io::Error::new(
+ io::ErrorKind::Other,
+ "background task failed",
+ )),
+ }
+}
+
+/// Types in this module can be mocked out in tests.
+mod sys {
+ pub(crate) use std::fs::File;
+
+ // TODO: don't rename
+ pub(crate) use crate::runtime::spawn_blocking as run;
+ pub(crate) use crate::task::JoinHandle as Blocking;
+}
diff --git a/third_party/rust/tokio/src/fs/open_options.rs b/third_party/rust/tokio/src/fs/open_options.rs
new file mode 100644
index 0000000000..3210f4b7b5
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/open_options.rs
@@ -0,0 +1,397 @@
+use crate::fs::{asyncify, File};
+
+use std::io;
+use std::path::Path;
+
+/// Options and flags which can be used to configure how a file is opened.
+///
+/// This builder exposes the ability to configure how a [`File`] is opened and
+/// what operations are permitted on the open file. The [`File::open`] and
+/// [`File::create`] methods are aliases for commonly used options using this
+/// builder.
+///
+/// Generally speaking, when using `OpenOptions`, you'll first call [`new`],
+/// then chain calls to methods to set each option, then call [`open`], passing
+/// the path of the file you're trying to open. This will give you a
+/// [`io::Result`][result] with a [`File`] inside that you can further operate
+/// on.
+///
+/// This is a specialized version of [`std::fs::OpenOptions`] for usage from
+/// the Tokio runtime.
+///
+/// `From<std::fs::OpenOptions>` is implemented for more advanced configuration
+/// than the methods provided here.
+///
+/// [`new`]: OpenOptions::new
+/// [`open`]: OpenOptions::open
+/// [result]: std::io::Result
+/// [`File`]: File
+/// [`File::open`]: File::open
+/// [`File::create`]: File::create
+/// [`std::fs::OpenOptions`]: std::fs::OpenOptions
+///
+/// # Examples
+///
+/// Opening a file to read:
+///
+/// ```no_run
+/// use tokio::fs::OpenOptions;
+/// use std::io;
+///
+/// #[tokio::main]
+/// async fn main() -> io::Result<()> {
+/// let file = OpenOptions::new()
+/// .read(true)
+/// .open("foo.txt")
+/// .await?;
+///
+/// Ok(())
+/// }
+/// ```
+///
+/// Opening a file for both reading and writing, as well as creating it if it
+/// doesn't exist:
+///
+/// ```no_run
+/// use tokio::fs::OpenOptions;
+/// use std::io;
+///
+/// #[tokio::main]
+/// async fn main() -> io::Result<()> {
+/// let file = OpenOptions::new()
+/// .read(true)
+/// .write(true)
+/// .create(true)
+/// .open("foo.txt")
+/// .await?;
+///
+/// Ok(())
+/// }
+/// ```
+#[derive(Clone, Debug)]
+pub struct OpenOptions(std::fs::OpenOptions);
+
+impl OpenOptions {
+ /// Creates a blank new set of options ready for configuration.
+ ///
+ /// All options are initially set to `false`.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::new`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::new
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ ///
+ /// let mut options = OpenOptions::new();
+ /// let future = options.read(true).open("foo.txt");
+ /// ```
+ pub fn new() -> OpenOptions {
+ OpenOptions(std::fs::OpenOptions::new())
+ }
+
+ /// Sets the option for read access.
+ ///
+ /// This option, when true, will indicate that the file should be
+ /// `read`-able if opened.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::read`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::read
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .read(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn read(&mut self, read: bool) -> &mut OpenOptions {
+ self.0.read(read);
+ self
+ }
+
+ /// Sets the option for write access.
+ ///
+ /// This option, when true, will indicate that the file should be
+ /// `write`-able if opened.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::write`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::write
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn write(&mut self, write: bool) -> &mut OpenOptions {
+ self.0.write(write);
+ self
+ }
+
+ /// Sets the option for the append mode.
+ ///
+ /// This option, when true, means that writes will append to a file instead
+ /// of overwriting previous contents. Note that setting
+ /// `.write(true).append(true)` has the same effect as setting only
+ /// `.append(true)`.
+ ///
+ /// For most filesystems, the operating system guarantees that all writes are
+ /// atomic: no writes get mangled because another process writes at the same
+ /// time.
+ ///
+ /// One maybe obvious note when using append-mode: make sure that all data
+ /// that belongs together is written to the file in one operation. This
+ /// can be done by concatenating strings before passing them to [`write()`],
+ /// or using a buffered writer (with a buffer of adequate size),
+ /// and calling [`flush()`] when the message is complete.
+ ///
+ /// If a file is opened with both read and append access, beware that after
+ /// opening, and after every write, the position for reading may be set at the
+ /// end of the file. So, before writing, save the current position (using
+ /// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::append`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::append
+ ///
+ /// ## Note
+ ///
+ /// This function doesn't create the file if it doesn't exist. Use the [`create`]
+ /// method to do so.
+ ///
+ /// [`write()`]: crate::io::AsyncWriteExt::write
+ /// [`flush()`]: crate::io::AsyncWriteExt::flush
+ /// [`seek`]: crate::io::AsyncSeekExt::seek
+ /// [`SeekFrom`]: std::io::SeekFrom
+ /// [`Current`]: std::io::SeekFrom::Current
+ /// [`create`]: OpenOptions::create
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .append(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn append(&mut self, append: bool) -> &mut OpenOptions {
+ self.0.append(append);
+ self
+ }
+
+ /// Sets the option for truncating a previous file.
+ ///
+ /// If a file is successfully opened with this option set it will truncate
+ /// the file to 0 length if it already exists.
+ ///
+ /// The file must be opened with write access for truncate to work.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::truncate`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::truncate
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .truncate(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
+ self.0.truncate(truncate);
+ self
+ }
+
+ /// Sets the option for creating a new file.
+ ///
+ /// This option indicates whether a new file will be created if the file
+ /// does not yet already exist.
+ ///
+ /// In order for the file to be created, [`write`] or [`append`] access must
+ /// be used.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::create`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::create
+ /// [`write`]: OpenOptions::write
+ /// [`append`]: OpenOptions::append
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .create(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn create(&mut self, create: bool) -> &mut OpenOptions {
+ self.0.create(create);
+ self
+ }
+
+ /// Sets the option to always create a new file.
+ ///
+ /// This option indicates whether a new file will be created. No file is
+ /// allowed to exist at the target location, also no (dangling) symlink.
+ ///
+ /// This option is useful because it is atomic. Otherwise between checking
+ /// whether a file exists and creating a new one, the file may have been
+ /// created by another process (a TOCTOU race condition / attack).
+ ///
+ /// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are
+ /// ignored.
+ ///
+ /// The file must be opened with write or append access in order to create a
+ /// new file.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::create_new`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::create_new
+ /// [`.create()`]: OpenOptions::create
+ /// [`.truncate()`]: OpenOptions::truncate
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new()
+ /// .write(true)
+ /// .create_new(true)
+ /// .open("foo.txt")
+ /// .await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions {
+ self.0.create_new(create_new);
+ self
+ }
+
+ /// Opens a file at `path` with the options specified by `self`.
+ ///
+ /// This is an async version of [`std::fs::OpenOptions::open`][std]
+ ///
+ /// [std]: std::fs::OpenOptions::open
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error under a number of different
+ /// circumstances. Some of these error conditions are listed here, together
+ /// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of
+ /// the compatibility contract of the function, especially the `Other` kind
+ /// might change to more specific kinds in the future.
+ ///
+ /// * [`NotFound`]: The specified file does not exist and neither `create`
+ /// or `create_new` is set.
+ /// * [`NotFound`]: One of the directory components of the file path does
+ /// not exist.
+ /// * [`PermissionDenied`]: The user lacks permission to get the specified
+ /// access rights for the file.
+ /// * [`PermissionDenied`]: The user lacks permission to open one of the
+ /// directory components of the specified path.
+ /// * [`AlreadyExists`]: `create_new` was specified and the file already
+ /// exists.
+ /// * [`InvalidInput`]: Invalid combinations of open options (truncate
+ /// without write access, no access mode set, etc.).
+ /// * [`Other`]: One of the directory components of the specified file path
+ /// was not, in fact, a directory.
+ /// * [`Other`]: Filesystem-level errors: full disk, write permission
+ /// requested on a read-only file system, exceeded disk quota, too many
+ /// open files, too long filename, too many symbolic links in the
+ /// specified path (Unix-like systems only), etc.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::OpenOptions;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let file = OpenOptions::new().open("foo.txt").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`ErrorKind`]: std::io::ErrorKind
+ /// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists
+ /// [`InvalidInput`]: std::io::ErrorKind::InvalidInput
+ /// [`NotFound`]: std::io::ErrorKind::NotFound
+ /// [`Other`]: std::io::ErrorKind::Other
+ /// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied
+ pub async fn open(&self, path: impl AsRef<Path>) -> io::Result<File> {
+ let path = path.as_ref().to_owned();
+ let opts = self.0.clone();
+
+ let std = asyncify(move || opts.open(path)).await?;
+ Ok(File::from_std(std))
+ }
+}
+
+impl From<std::fs::OpenOptions> for OpenOptions {
+ fn from(options: std::fs::OpenOptions) -> OpenOptions {
+ OpenOptions(options)
+ }
+}
+
+impl Default for OpenOptions {
+ fn default() -> Self {
+ Self::new()
+ }
+}
diff --git a/third_party/rust/tokio/src/fs/os/mod.rs b/third_party/rust/tokio/src/fs/os/mod.rs
new file mode 100644
index 0000000000..f4b8bfb617
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/mod.rs
@@ -0,0 +1,7 @@
+//! OS-specific functionality.
+
+#[cfg(unix)]
+pub mod unix;
+
+#[cfg(windows)]
+pub mod windows;
diff --git a/third_party/rust/tokio/src/fs/os/unix/mod.rs b/third_party/rust/tokio/src/fs/os/unix/mod.rs
new file mode 100644
index 0000000000..3b0bec38bd
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/unix/mod.rs
@@ -0,0 +1,4 @@
+//! Unix-specific extensions to primitives in the `tokio_fs` module.
+
+mod symlink;
+pub use self::symlink::symlink;
diff --git a/third_party/rust/tokio/src/fs/os/unix/symlink.rs b/third_party/rust/tokio/src/fs/os/unix/symlink.rs
new file mode 100644
index 0000000000..22ece7250f
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/unix/symlink.rs
@@ -0,0 +1,18 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Creates a new symbolic link on the filesystem.
+///
+/// The `dst` path will be a symbolic link pointing to the `src` path.
+///
+/// This is an async version of [`std::os::unix::fs::symlink`][std]
+///
+/// [std]: std::os::unix::fs::symlink
+pub async fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
+ let src = src.as_ref().to_owned();
+ let dst = dst.as_ref().to_owned();
+
+ asyncify(move || std::os::unix::fs::symlink(src, dst)).await
+}
diff --git a/third_party/rust/tokio/src/fs/os/windows/mod.rs b/third_party/rust/tokio/src/fs/os/windows/mod.rs
new file mode 100644
index 0000000000..42eb7bdb92
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/windows/mod.rs
@@ -0,0 +1,7 @@
+//! Windows-specific extensions for the primitives in the `tokio_fs` module.
+
+mod symlink_dir;
+pub use self::symlink_dir::symlink_dir;
+
+mod symlink_file;
+pub use self::symlink_file::symlink_file;
diff --git a/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs b/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs
new file mode 100644
index 0000000000..736e762b48
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/windows/symlink_dir.rs
@@ -0,0 +1,19 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Creates a new directory symlink on the filesystem.
+///
+/// The `dst` path will be a directory symbolic link pointing to the `src`
+/// path.
+///
+/// This is an async version of [`std::os::windows::fs::symlink_dir`][std]
+///
+/// [std]: std::os::windows::fs::symlink_dir
+pub async fn symlink_dir(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
+ let src = src.as_ref().to_owned();
+ let dst = dst.as_ref().to_owned();
+
+ asyncify(move || std::os::windows::fs::symlink_dir(src, dst)).await
+}
diff --git a/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs b/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs
new file mode 100644
index 0000000000..07d8e60419
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/os/windows/symlink_file.rs
@@ -0,0 +1,19 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Creates a new file symbolic link on the filesystem.
+///
+/// The `dst` path will be a file symbolic link pointing to the `src`
+/// path.
+///
+/// This is an async version of [`std::os::windows::fs::symlink_file`][std]
+///
+/// [std]: std::os::windows::fs::symlink_file
+pub async fn symlink_file(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
+ let src = src.as_ref().to_owned();
+ let dst = dst.as_ref().to_owned();
+
+ asyncify(move || std::os::windows::fs::symlink_file(src, dst)).await
+}
diff --git a/third_party/rust/tokio/src/fs/read.rs b/third_party/rust/tokio/src/fs/read.rs
new file mode 100644
index 0000000000..2d80eb5bd3
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/read.rs
@@ -0,0 +1,47 @@
+use crate::fs::asyncify;
+
+use std::{io, path::Path};
+
+/// Reads the entire contents of a file into a bytes vector.
+///
+/// This is an async version of [`std::fs::read`][std]
+///
+/// [std]: std::fs::read
+///
+/// This is a convenience function for using [`File::open`] and [`read_to_end`]
+/// with fewer imports and without an intermediate variable. It pre-allocates a
+/// buffer based on the file size when available, so it is generally faster than
+/// reading into a vector created with `Vec::new()`.
+///
+/// [`File::open`]: super::File::open
+/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end
+///
+/// # Errors
+///
+/// This function will return an error if `path` does not already exist.
+/// Other errors may also be returned according to [`OpenOptions::open`].
+///
+/// [`OpenOptions::open`]: super::OpenOptions::open
+///
+/// It will also return an error if it encounters while reading an error
+/// of a kind other than [`ErrorKind::Interrupted`].
+///
+/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+/// use std::net::SocketAddr;
+///
+/// #[tokio::main]
+/// async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> {
+/// let contents = fs::read("address.txt").await?;
+/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?;
+/// Ok(())
+/// }
+/// ```
+pub async fn read(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::read(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/read_dir.rs b/third_party/rust/tokio/src/fs/read_dir.rs
new file mode 100644
index 0000000000..f9b16c66c5
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/read_dir.rs
@@ -0,0 +1,244 @@
+use crate::fs::{asyncify, sys};
+
+use std::ffi::OsString;
+use std::fs::{FileType, Metadata};
+use std::future::Future;
+use std::io;
+#[cfg(unix)]
+use std::os::unix::fs::DirEntryExt;
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::Context;
+use std::task::Poll;
+
+/// Returns a stream over the entries within a directory.
+///
+/// This is an async version of [`std::fs::read_dir`](std::fs::read_dir)
+pub async fn read_dir(path: impl AsRef<Path>) -> io::Result<ReadDir> {
+ let path = path.as_ref().to_owned();
+ let std = asyncify(|| std::fs::read_dir(path)).await?;
+
+ Ok(ReadDir(State::Idle(Some(std))))
+}
+
+/// Stream of the entries in a directory.
+///
+/// This stream is returned from the [`read_dir`] function of this module and
+/// will yield instances of [`DirEntry`]. Through a [`DirEntry`]
+/// information like the entry's path and possibly other metadata can be
+/// learned.
+///
+/// # Errors
+///
+/// This [`Stream`] will return an [`Err`] if there's some sort of intermittent
+/// IO error during iteration.
+///
+/// [`read_dir`]: read_dir
+/// [`DirEntry`]: DirEntry
+/// [`Stream`]: crate::stream::Stream
+/// [`Err`]: std::result::Result::Err
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct ReadDir(State);
+
+#[derive(Debug)]
+enum State {
+ Idle(Option<std::fs::ReadDir>),
+ Pending(sys::Blocking<(Option<io::Result<std::fs::DirEntry>>, std::fs::ReadDir)>),
+}
+
+impl ReadDir {
+ /// Returns the next entry in the directory stream.
+ pub async fn next_entry(&mut self) -> io::Result<Option<DirEntry>> {
+ use crate::future::poll_fn;
+ poll_fn(|cx| self.poll_next_entry(cx)).await
+ }
+
+ #[doc(hidden)]
+ pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<Option<DirEntry>>> {
+ loop {
+ match self.0 {
+ State::Idle(ref mut std) => {
+ let mut std = std.take().unwrap();
+
+ self.0 = State::Pending(sys::run(move || {
+ let ret = std.next();
+ (ret, std)
+ }));
+ }
+ State::Pending(ref mut rx) => {
+ let (ret, std) = ready!(Pin::new(rx).poll(cx))?;
+ self.0 = State::Idle(Some(std));
+
+ let ret = match ret {
+ Some(Ok(std)) => Ok(Some(DirEntry(Arc::new(std)))),
+ Some(Err(e)) => Err(e),
+ None => Ok(None),
+ };
+
+ return Poll::Ready(ret);
+ }
+ }
+ }
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for ReadDir {
+ type Item = io::Result<DirEntry>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(match ready!(self.poll_next_entry(cx)) {
+ Ok(Some(entry)) => Some(Ok(entry)),
+ Ok(None) => None,
+ Err(err) => Some(Err(err)),
+ })
+ }
+}
+
+/// Entries returned by the [`ReadDir`] stream.
+///
+/// [`ReadDir`]: struct@ReadDir
+///
+/// This is a specialized version of [`std::fs::DirEntry`] for usage from the
+/// Tokio runtime.
+///
+/// An instance of `DirEntry` represents an entry inside of a directory on the
+/// filesystem. Each entry can be inspected via methods to learn about the full
+/// path or possibly other metadata through per-platform extension traits.
+#[derive(Debug)]
+pub struct DirEntry(Arc<std::fs::DirEntry>);
+
+impl DirEntry {
+ /// Returns the full path to the file that this entry represents.
+ ///
+ /// The full path is created by joining the original path to `read_dir`
+ /// with the filename of this entry.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut entries = fs::read_dir(".").await?;
+ ///
+ /// while let Some(entry) = entries.next_entry().await? {
+ /// println!("{:?}", entry.path());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ ///
+ /// This prints output like:
+ ///
+ /// ```text
+ /// "./whatever.txt"
+ /// "./foo.html"
+ /// "./hello_world.rs"
+ /// ```
+ ///
+ /// The exact text, of course, depends on what files you have in `.`.
+ pub fn path(&self) -> PathBuf {
+ self.0.path()
+ }
+
+ /// Returns the bare file name of this directory entry without any other
+ /// leading path component.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::fs;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut entries = fs::read_dir(".").await?;
+ ///
+ /// while let Some(entry) = entries.next_entry().await? {
+ /// println!("{:?}", entry.file_name());
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn file_name(&self) -> OsString {
+ self.0.file_name()
+ }
+
+ /// Returns the metadata for the file that this entry points at.
+ ///
+ /// This function will not traverse symlinks if this entry points at a
+ /// symlink.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// On Windows this function is cheap to call (no extra system calls
+ /// needed), but on Unix platforms this function is the equivalent of
+ /// calling `symlink_metadata` on the path.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::fs;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut entries = fs::read_dir(".").await?;
+ ///
+ /// while let Some(entry) = entries.next_entry().await? {
+ /// if let Ok(metadata) = entry.metadata().await {
+ /// // Now let's show our entry's permissions!
+ /// println!("{:?}: {:?}", entry.path(), metadata.permissions());
+ /// } else {
+ /// println!("Couldn't get file type for {:?}", entry.path());
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn metadata(&self) -> io::Result<Metadata> {
+ let std = self.0.clone();
+ asyncify(move || std.metadata()).await
+ }
+
+ /// Returns the file type for the file that this entry points at.
+ ///
+ /// This function will not traverse symlinks if this entry points at a
+ /// symlink.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// On Windows and most Unix platforms this function is free (no extra
+ /// system calls needed), but some Unix platforms may require the equivalent
+ /// call to `symlink_metadata` to learn about the target file type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::fs;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut entries = fs::read_dir(".").await?;
+ ///
+ /// while let Some(entry) = entries.next_entry().await? {
+ /// if let Ok(file_type) = entry.file_type().await {
+ /// // Now let's show our entry's file type!
+ /// println!("{:?}: {:?}", entry.path(), file_type);
+ /// } else {
+ /// println!("Couldn't get file type for {:?}", entry.path());
+ /// }
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn file_type(&self) -> io::Result<FileType> {
+ let std = self.0.clone();
+ asyncify(move || std.file_type()).await
+ }
+}
+
+#[cfg(unix)]
+impl DirEntryExt for DirEntry {
+ fn ino(&self) -> u64 {
+ self.0.ino()
+ }
+}
diff --git a/third_party/rust/tokio/src/fs/read_link.rs b/third_party/rust/tokio/src/fs/read_link.rs
new file mode 100644
index 0000000000..6c48c5e156
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/read_link.rs
@@ -0,0 +1,14 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::{Path, PathBuf};
+
+/// Reads a symbolic link, returning the file that the link points to.
+///
+/// This is an async version of [`std::fs::read_link`][std]
+///
+/// [std]: std::fs::read_link
+pub async fn read_link(path: impl AsRef<Path>) -> io::Result<PathBuf> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::read_link(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/read_to_string.rs b/third_party/rust/tokio/src/fs/read_to_string.rs
new file mode 100644
index 0000000000..c743bb4ddc
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/read_to_string.rs
@@ -0,0 +1,24 @@
+use crate::fs::asyncify;
+
+use std::{io, path::Path};
+
+/// Creates a future which will open a file for reading and read the entire
+/// contents into a string and return said string.
+///
+/// This is the async equivalent of `std::fs::read_to_string`.
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+///
+/// # async fn dox() -> std::io::Result<()> {
+/// let contents = fs::read_to_string("foo.txt").await?;
+/// println!("foo.txt contains {} bytes", contents.len());
+/// # Ok(())
+/// # }
+/// ```
+pub async fn read_to_string(path: impl AsRef<Path>) -> io::Result<String> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::read_to_string(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/remove_dir.rs b/third_party/rust/tokio/src/fs/remove_dir.rs
new file mode 100644
index 0000000000..6e7cbd08f6
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/remove_dir.rs
@@ -0,0 +1,12 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Removes an existing, empty directory.
+///
+/// This is an async version of [`std::fs::remove_dir`](std::fs::remove_dir)
+pub async fn remove_dir(path: impl AsRef<Path>) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::remove_dir(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/remove_dir_all.rs b/third_party/rust/tokio/src/fs/remove_dir_all.rs
new file mode 100644
index 0000000000..3b2b2e0453
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/remove_dir_all.rs
@@ -0,0 +1,14 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Removes a directory at this path, after removing all its contents. Use carefully!
+///
+/// This is an async version of [`std::fs::remove_dir_all`][std]
+///
+/// [std]: https://doc.rust-lang.org/std/fs/fn.remove_dir_all.html
+pub async fn remove_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::remove_dir_all(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/remove_file.rs b/third_party/rust/tokio/src/fs/remove_file.rs
new file mode 100644
index 0000000000..d22a5bfc88
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/remove_file.rs
@@ -0,0 +1,18 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Removes a file from the filesystem.
+///
+/// Note that there is no guarantee that the file is immediately deleted (e.g.
+/// depending on platform, other open file descriptors may prevent immediate
+/// removal).
+///
+/// This is an async version of [`std::fs::remove_file`][std]
+///
+/// [std]: std::fs::remove_file
+pub async fn remove_file(path: impl AsRef<Path>) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(move || std::fs::remove_file(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/rename.rs b/third_party/rust/tokio/src/fs/rename.rs
new file mode 100644
index 0000000000..4f980821d2
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/rename.rs
@@ -0,0 +1,17 @@
+use crate::fs::asyncify;
+
+use std::io;
+use std::path::Path;
+
+/// Renames a file or directory to a new name, replacing the original file if
+/// `to` already exists.
+///
+/// This will not work if the new name is on a different mount point.
+///
+/// This is an async version of [`std::fs::rename`](std::fs::rename)
+pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
+ let from = from.as_ref().to_owned();
+ let to = to.as_ref().to_owned();
+
+ asyncify(move || std::fs::rename(from, to)).await
+}
diff --git a/third_party/rust/tokio/src/fs/set_permissions.rs b/third_party/rust/tokio/src/fs/set_permissions.rs
new file mode 100644
index 0000000000..b6249d13f0
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/set_permissions.rs
@@ -0,0 +1,15 @@
+use crate::fs::asyncify;
+
+use std::fs::Permissions;
+use std::io;
+use std::path::Path;
+
+/// Changes the permissions found on a file or a directory.
+///
+/// This is an async version of [`std::fs::set_permissions`][std]
+///
+/// [std]: https://doc.rust-lang.org/std/fs/fn.set_permissions.html
+pub async fn set_permissions(path: impl AsRef<Path>, perm: Permissions) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ asyncify(|| std::fs::set_permissions(path, perm)).await
+}
diff --git a/third_party/rust/tokio/src/fs/symlink_metadata.rs b/third_party/rust/tokio/src/fs/symlink_metadata.rs
new file mode 100644
index 0000000000..682b43a70e
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/symlink_metadata.rs
@@ -0,0 +1,15 @@
+use crate::fs::asyncify;
+
+use std::fs::Metadata;
+use std::io;
+use std::path::Path;
+
+/// Queries the file system metadata for a path.
+///
+/// This is an async version of [`std::fs::symlink_metadata`][std]
+///
+/// [std]: https://doc.rust-lang.org/std/fs/fn.symlink_metadata.html
+pub async fn symlink_metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
+ let path = path.as_ref().to_owned();
+ asyncify(|| std::fs::symlink_metadata(path)).await
+}
diff --git a/third_party/rust/tokio/src/fs/write.rs b/third_party/rust/tokio/src/fs/write.rs
new file mode 100644
index 0000000000..0114cab8a8
--- /dev/null
+++ b/third_party/rust/tokio/src/fs/write.rs
@@ -0,0 +1,25 @@
+use crate::fs::asyncify;
+
+use std::{io, path::Path};
+
+/// Creates a future that will open a file for writing and write the entire
+/// contents of `contents` to it.
+///
+/// This is the async equivalent of `std::fs::write`.
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::fs;
+///
+/// # async fn dox() -> std::io::Result<()> {
+/// fs::write("foo.txt", b"Hello world!").await?;
+/// # Ok(())
+/// # }
+/// ```
+pub async fn write<C: AsRef<[u8]> + Unpin>(path: impl AsRef<Path>, contents: C) -> io::Result<()> {
+ let path = path.as_ref().to_owned();
+ let contents = contents.as_ref().to_owned();
+
+ asyncify(move || std::fs::write(path, contents)).await
+}
diff --git a/third_party/rust/tokio/src/future/maybe_done.rs b/third_party/rust/tokio/src/future/maybe_done.rs
new file mode 100644
index 0000000000..1e083ad7fd
--- /dev/null
+++ b/third_party/rust/tokio/src/future/maybe_done.rs
@@ -0,0 +1,76 @@
+//! Definition of the MaybeDone combinator
+
+use std::future::Future;
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// A future that may have completed.
+#[derive(Debug)]
+pub enum MaybeDone<Fut: Future> {
+ /// A not-yet-completed future
+ Future(Fut),
+ /// The output of the completed future
+ Done(Fut::Output),
+ /// The empty variant after the result of a [`MaybeDone`] has been
+ /// taken using the [`take_output`](MaybeDone::take_output) method.
+ Gone,
+}
+
+// Safe because we never generate `Pin<&mut Fut::Output>`
+impl<Fut: Future + Unpin> Unpin for MaybeDone<Fut> {}
+
+/// Wraps a future into a `MaybeDone`
+pub fn maybe_done<Fut: Future>(future: Fut) -> MaybeDone<Fut> {
+ MaybeDone::Future(future)
+}
+
+impl<Fut: Future> MaybeDone<Fut> {
+ /// Returns an [`Option`] containing a mutable reference to the output of the future.
+ /// The output of this method will be [`Some`] if and only if the inner
+ /// future has been completed and [`take_output`](MaybeDone::take_output)
+ /// has not yet been called.
+ pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> {
+ unsafe {
+ let this = self.get_unchecked_mut();
+ match this {
+ MaybeDone::Done(res) => Some(res),
+ _ => None,
+ }
+ }
+ }
+
+ /// Attempts to take the output of a `MaybeDone` without driving it
+ /// towards completion.
+ #[inline]
+ pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Output> {
+ unsafe {
+ let this = self.get_unchecked_mut();
+ match this {
+ MaybeDone::Done(_) => {}
+ MaybeDone::Future(_) | MaybeDone::Gone => return None,
+ };
+ if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) {
+ Some(output)
+ } else {
+ unreachable!()
+ }
+ }
+ }
+}
+
+impl<Fut: Future> Future for MaybeDone<Fut> {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let res = unsafe {
+ match self.as_mut().get_unchecked_mut() {
+ MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)),
+ MaybeDone::Done(_) => return Poll::Ready(()),
+ MaybeDone::Gone => panic!("MaybeDone polled after value taken"),
+ }
+ };
+ self.set(MaybeDone::Done(res));
+ Poll::Ready(())
+ }
+}
diff --git a/third_party/rust/tokio/src/future/mod.rs b/third_party/rust/tokio/src/future/mod.rs
new file mode 100644
index 0000000000..770753f319
--- /dev/null
+++ b/third_party/rust/tokio/src/future/mod.rs
@@ -0,0 +1,15 @@
+#![allow(unused_imports, dead_code)]
+
+//! Asynchronous values.
+
+mod maybe_done;
+pub use maybe_done::{maybe_done, MaybeDone};
+
+mod poll_fn;
+pub use poll_fn::poll_fn;
+
+mod ready;
+pub(crate) use ready::{ok, Ready};
+
+mod try_join;
+pub(crate) use try_join::try_join3;
diff --git a/third_party/rust/tokio/src/future/pending.rs b/third_party/rust/tokio/src/future/pending.rs
new file mode 100644
index 0000000000..287e836fd3
--- /dev/null
+++ b/third_party/rust/tokio/src/future/pending.rs
@@ -0,0 +1,44 @@
+use sdt::pin::Pin;
+use std::future::Future;
+use std::marker;
+use std::task::{Context, Poll};
+
+/// Future for the [`pending()`] function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+struct Pending<T> {
+ _data: marker::PhantomData<T>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// The returned future will forever return [`Poll::Pending`].
+///
+/// # Examples
+///
+/// ```no_run
+/// use tokio::future;
+///
+/// #[tokio::main]
+/// async fn main {
+/// future::pending().await;
+/// unreachable!();
+/// }
+/// ```
+pub async fn pending() -> ! {
+ Pending {
+ _data: marker::PhantomData,
+ }
+ .await
+}
+
+impl<T> Future for Pending<T> {
+ type Output = !;
+
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<T> {
+ Poll::Pending
+ }
+}
+
+impl<T> Unpin for Pending<T> {}
diff --git a/third_party/rust/tokio/src/future/poll_fn.rs b/third_party/rust/tokio/src/future/poll_fn.rs
new file mode 100644
index 0000000000..9b3d1370ea
--- /dev/null
+++ b/third_party/rust/tokio/src/future/poll_fn.rs
@@ -0,0 +1,38 @@
+//! Definition of the `PollFn` adapter combinator
+
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Future for the [`poll_fn`] function.
+pub struct PollFn<F> {
+ f: F,
+}
+
+impl<F> Unpin for PollFn<F> {}
+
+/// Creates a new future wrapping around a function returning [`Poll`].
+pub fn poll_fn<T, F>(f: F) -> PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ PollFn { f }
+}
+
+impl<F> fmt::Debug for PollFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollFn").finish()
+ }
+}
+
+impl<T, F> Future for PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ (&mut self.f)(cx)
+ }
+}
diff --git a/third_party/rust/tokio/src/future/ready.rs b/third_party/rust/tokio/src/future/ready.rs
new file mode 100644
index 0000000000..d74f999e5d
--- /dev/null
+++ b/third_party/rust/tokio/src/future/ready.rs
@@ -0,0 +1,27 @@
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Future for the [`ready`](ready()) function.
+///
+/// `pub` in order to use the future as an associated type in a sealed trait.
+#[derive(Debug)]
+// Used as an associated type in a "sealed" trait.
+#[allow(unreachable_pub)]
+pub struct Ready<T>(Option<T>);
+
+impl<T> Unpin for Ready<T> {}
+
+impl<T> Future for Ready<T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
+ Poll::Ready(self.0.take().unwrap())
+ }
+}
+
+/// Creates a future that is immediately ready with a success value.
+pub(crate) fn ok<T, E>(t: T) -> Ready<Result<T, E>> {
+ Ready(Some(Ok(t)))
+}
diff --git a/third_party/rust/tokio/src/future/try_join.rs b/third_party/rust/tokio/src/future/try_join.rs
new file mode 100644
index 0000000000..5bd80dc89a
--- /dev/null
+++ b/third_party/rust/tokio/src/future/try_join.rs
@@ -0,0 +1,82 @@
+use crate::future::{maybe_done, MaybeDone};
+
+use pin_project_lite::pin_project;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pub(crate) fn try_join3<T1, F1, T2, F2, T3, F3, E>(
+ future1: F1,
+ future2: F2,
+ future3: F3,
+) -> TryJoin3<F1, F2, F3>
+where
+ F1: Future<Output = Result<T1, E>>,
+ F2: Future<Output = Result<T2, E>>,
+ F3: Future<Output = Result<T3, E>>,
+{
+ TryJoin3 {
+ future1: maybe_done(future1),
+ future2: maybe_done(future2),
+ future3: maybe_done(future3),
+ }
+}
+
+pin_project! {
+ pub(crate) struct TryJoin3<F1, F2, F3>
+ where
+ F1: Future,
+ F2: Future,
+ F3: Future,
+ {
+ #[pin]
+ future1: MaybeDone<F1>,
+ #[pin]
+ future2: MaybeDone<F2>,
+ #[pin]
+ future3: MaybeDone<F3>,
+ }
+}
+
+impl<T1, F1, T2, F2, T3, F3, E> Future for TryJoin3<F1, F2, F3>
+where
+ F1: Future<Output = Result<T1, E>>,
+ F2: Future<Output = Result<T2, E>>,
+ F3: Future<Output = Result<T3, E>>,
+{
+ type Output = Result<(T1, T2, T3), E>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut all_done = true;
+
+ let mut me = self.project();
+
+ if me.future1.as_mut().poll(cx).is_pending() {
+ all_done = false;
+ } else if me.future1.as_mut().output_mut().unwrap().is_err() {
+ return Poll::Ready(Err(me.future1.take_output().unwrap().err().unwrap()));
+ }
+
+ if me.future2.as_mut().poll(cx).is_pending() {
+ all_done = false;
+ } else if me.future2.as_mut().output_mut().unwrap().is_err() {
+ return Poll::Ready(Err(me.future2.take_output().unwrap().err().unwrap()));
+ }
+
+ if me.future3.as_mut().poll(cx).is_pending() {
+ all_done = false;
+ } else if me.future3.as_mut().output_mut().unwrap().is_err() {
+ return Poll::Ready(Err(me.future3.take_output().unwrap().err().unwrap()));
+ }
+
+ if all_done {
+ Poll::Ready(Ok((
+ me.future1.take_output().unwrap().ok().unwrap(),
+ me.future2.take_output().unwrap().ok().unwrap(),
+ me.future3.take_output().unwrap().ok().unwrap(),
+ )))
+ } else {
+ Poll::Pending
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/async_buf_read.rs b/third_party/rust/tokio/src/io/async_buf_read.rs
new file mode 100644
index 0000000000..1ab73cd9b7
--- /dev/null
+++ b/third_party/rust/tokio/src/io/async_buf_read.rs
@@ -0,0 +1,115 @@
+use crate::io::AsyncRead;
+
+use std::io;
+use std::ops::DerefMut;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Reads bytes asynchronously.
+///
+/// This trait inherits from [`std::io::BufRead`] and indicates that an I/O object is
+/// **non-blocking**. All non-blocking I/O objects must return an error when
+/// bytes are unavailable instead of blocking the current thread.
+///
+/// Utilities for working with `AsyncBufRead` values are provided by
+/// [`AsyncBufReadExt`].
+///
+/// [`std::io::BufRead`]: std::io::BufRead
+/// [`AsyncBufReadExt`]: crate::io::AsyncBufReadExt
+pub trait AsyncBufRead: AsyncRead {
+ /// Attempts to return the contents of the internal buffer, filling it with more data
+ /// from the inner reader if it is empty.
+ ///
+ /// On success, returns `Poll::Ready(Ok(buf))`.
+ ///
+ /// If no data is available for reading, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`consume`] method to function properly. When calling this
+ /// method, none of the contents will be "read" in the sense that later
+ /// calling [`poll_read`] may return the same contents. As such, [`consume`] must
+ /// be called with the number of bytes that are consumed from this buffer to
+ /// ensure that the bytes are never returned twice.
+ ///
+ /// An empty buffer returned indicates that the stream has reached EOF.
+ ///
+ /// [`poll_read`]: AsyncRead::poll_read
+ /// [`consume`]: AsyncBufRead::consume
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>>;
+
+ /// Tells this buffer that `amt` bytes have been consumed from the buffer,
+ /// so they should no longer be returned in calls to [`poll_read`].
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`poll_fill_buf`] method to function properly. This function does
+ /// not perform any I/O, it simply informs this object that some amount of
+ /// its buffer, returned from [`poll_fill_buf`], has been consumed and should
+ /// no longer be returned. As such, this function may do odd things if
+ /// [`poll_fill_buf`] isn't called before calling it.
+ ///
+ /// The `amt` must be `<=` the number of bytes in the buffer returned by
+ /// [`poll_fill_buf`].
+ ///
+ /// [`poll_read`]: AsyncRead::poll_read
+ /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf
+ fn consume(self: Pin<&mut Self>, amt: usize);
+}
+
+macro_rules! deref_async_buf_read {
+ () => {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>)
+ -> Poll<io::Result<&[u8]>>
+ {
+ Pin::new(&mut **self.get_mut()).poll_fill_buf(cx)
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ Pin::new(&mut **self).consume(amt)
+ }
+ }
+}
+
+impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for Box<T> {
+ deref_async_buf_read!();
+}
+
+impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for &mut T {
+ deref_async_buf_read!();
+}
+
+impl<P> AsyncBufRead for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: AsyncBufRead,
+{
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ self.get_mut().as_mut().poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.get_mut().as_mut().consume(amt)
+ }
+}
+
+impl AsyncBufRead for &[u8] {
+ fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ Poll::Ready(Ok(*self))
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ *self = &self[amt..];
+ }
+}
+
+impl<T: AsRef<[u8]> + Unpin> AsyncBufRead for io::Cursor<T> {
+ fn poll_fill_buf(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ Poll::Ready(io::BufRead::fill_buf(self.get_mut()))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ io::BufRead::consume(self.get_mut(), amt)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/async_read.rs b/third_party/rust/tokio/src/io/async_read.rs
new file mode 100644
index 0000000000..de08d65810
--- /dev/null
+++ b/third_party/rust/tokio/src/io/async_read.rs
@@ -0,0 +1,203 @@
+use bytes::BufMut;
+use std::io;
+use std::mem::MaybeUninit;
+use std::ops::DerefMut;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Reads bytes from a source.
+///
+/// This trait is analogous to the [`std::io::Read`] trait, but integrates with
+/// the asynchronous task system. In particular, the [`poll_read`] method,
+/// unlike [`Read::read`], will automatically queue the current task for wakeup
+/// and return if data is not yet available, rather than blocking the calling
+/// thread.
+///
+/// Specifically, this means that the `poll_read` function will return one of
+/// the following:
+///
+/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately read
+/// and placed into the output buffer, where `n` == 0 implies that EOF has
+/// been reached.
+///
+/// * `Poll::Pending` means that no data was read into the buffer
+/// provided. The I/O object is not currently readable but may become readable
+/// in the future. Most importantly, **the current future's task is scheduled
+/// to get unparked when the object is readable**. This means that like
+/// `Future::poll` you'll receive a notification when the I/O object is
+/// readable again.
+///
+/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the
+/// underlying object.
+///
+/// This trait importantly means that the `read` method only works in the
+/// context of a future's task. The object may panic if used outside of a task.
+///
+/// Utilities for working with `AsyncRead` values are provided by
+/// [`AsyncReadExt`].
+///
+/// [`poll_read`]: AsyncRead::poll_read
+/// [`std::io::Read`]: std::io::Read
+/// [`Read::read`]: std::io::Read::read
+/// [`AsyncReadExt`]: crate::io::AsyncReadExt
+pub trait AsyncRead {
+ /// Prepares an uninitialized buffer to be safe to pass to `read`. Returns
+ /// `true` if the supplied buffer was zeroed out.
+ ///
+ /// While it would be highly unusual, implementations of [`io::Read`] are
+ /// able to read data from the buffer passed as an argument. Because of
+ /// this, the buffer passed to [`io::Read`] must be initialized memory. In
+ /// situations where large numbers of buffers are used, constantly having to
+ /// zero out buffers can be expensive.
+ ///
+ /// This function does any necessary work to prepare an uninitialized buffer
+ /// to be safe to pass to `read`. If `read` guarantees to never attempt to
+ /// read data out of the supplied buffer, then `prepare_uninitialized_buffer`
+ /// doesn't need to do any work.
+ ///
+ /// If this function returns `true`, then the memory has been zeroed out.
+ /// This allows implementations of `AsyncRead` which are composed of
+ /// multiple subimplementations to efficiently implement
+ /// `prepare_uninitialized_buffer`.
+ ///
+ /// This function isn't actually `unsafe` to call but `unsafe` to implement.
+ /// The implementer must ensure that either the whole `buf` has been zeroed
+ /// or `poll_read_buf()` overwrites the buffer without reading it and returns
+ /// correct value.
+ ///
+ /// This function is called from [`poll_read_buf`].
+ ///
+ /// # Safety
+ ///
+ /// Implementations that return `false` must never read from data slices
+ /// that they did not write to.
+ ///
+ /// [`io::Read`]: std::io::Read
+ /// [`poll_read_buf`]: #method.poll_read_buf
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ for x in buf {
+ *x.as_mut_ptr() = 0;
+ }
+
+ true
+ }
+
+ /// Attempts to read from the `AsyncRead` into `buf`.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_read))`.
+ ///
+ /// If no data is available for reading, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>>;
+
+ /// Pulls some bytes from this source into the specified `BufMut`, returning
+ /// how many bytes were read.
+ ///
+ /// The `buf` provided will have bytes read into it and the internal cursor
+ /// will be advanced if any bytes were read. Note that this method typically
+ /// will not reallocate the buffer provided.
+ fn poll_read_buf<B: BufMut>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<io::Result<usize>>
+ where
+ Self: Sized,
+ {
+ if !buf.has_remaining_mut() {
+ return Poll::Ready(Ok(0));
+ }
+
+ unsafe {
+ let n = {
+ let b = buf.bytes_mut();
+
+ self.prepare_uninitialized_buffer(b);
+
+ // Convert to `&mut [u8]`
+ let b = &mut *(b as *mut [MaybeUninit<u8>] as *mut [u8]);
+
+ let n = ready!(self.poll_read(cx, b))?;
+ assert!(n <= b.len(), "Bad AsyncRead implementation, more bytes were reported as read than the buffer can hold");
+ n
+ };
+
+ buf.advance_mut(n);
+ Poll::Ready(Ok(n))
+ }
+ }
+}
+
+macro_rules! deref_async_read {
+ () => {
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ (**self).prepare_uninitialized_buffer(buf)
+ }
+
+ fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8])
+ -> Poll<io::Result<usize>>
+ {
+ Pin::new(&mut **self).poll_read(cx, buf)
+ }
+ }
+}
+
+impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for Box<T> {
+ deref_async_read!();
+}
+
+impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for &mut T {
+ deref_async_read!();
+}
+
+impl<P> AsyncRead for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: AsyncRead,
+{
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ (**self).prepare_uninitialized_buffer(buf)
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_mut().as_mut().poll_read(cx, buf)
+ }
+}
+
+impl AsyncRead for &[u8] {
+ unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Read::read(self.get_mut(), buf))
+ }
+}
+
+impl<T: AsRef<[u8]> + Unpin> AsyncRead for io::Cursor<T> {
+ unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Read::read(self.get_mut(), buf))
+ }
+}
diff --git a/third_party/rust/tokio/src/io/async_seek.rs b/third_party/rust/tokio/src/io/async_seek.rs
new file mode 100644
index 0000000000..0be9c90d56
--- /dev/null
+++ b/third_party/rust/tokio/src/io/async_seek.rs
@@ -0,0 +1,104 @@
+use std::io::{self, SeekFrom};
+use std::ops::DerefMut;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Seek bytes asynchronously.
+///
+/// This trait is analogous to the [`std::io::Seek`] trait, but integrates
+/// with the asynchronous task system. In particular, the `start_seek`
+/// method, unlike [`Seek::seek`], will not block the calling thread.
+///
+/// Utilities for working with `AsyncSeek` values are provided by
+/// [`AsyncSeekExt`].
+///
+/// [`std::io::Seek`]: std::io::Seek
+/// [`Seek::seek`]: std::io::Seek::seek()
+/// [`AsyncSeekExt`]: crate::io::AsyncSeekExt
+pub trait AsyncSeek {
+ /// Attempts to seek to an offset, in bytes, in a stream.
+ ///
+ /// A seek beyond the end of a stream is allowed, but behavior is defined
+ /// by the implementation.
+ ///
+ /// If this function returns successfully, then the job has been submitted.
+ /// To find out when it completes, call `poll_complete`.
+ fn start_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ position: SeekFrom,
+ ) -> Poll<io::Result<()>>;
+
+ /// Waits for a seek operation to complete.
+ ///
+ /// If the seek operation completed successfully,
+ /// this method returns the new position from the start of the stream.
+ /// That position can be used later with [`SeekFrom::Start`].
+ ///
+ /// # Errors
+ ///
+ /// Seeking to a negative offset is considered an error.
+ ///
+ /// # Panics
+ ///
+ /// Calling this method without calling `start_seek` first is an error.
+ fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>>;
+}
+
+macro_rules! deref_async_seek {
+ () => {
+ fn start_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<()>> {
+ Pin::new(&mut **self).start_seek(cx, pos)
+ }
+
+ fn poll_complete(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<u64>> {
+ Pin::new(&mut **self).poll_complete(cx)
+ }
+ }
+}
+
+impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for Box<T> {
+ deref_async_seek!();
+}
+
+impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for &mut T {
+ deref_async_seek!();
+}
+
+impl<P> AsyncSeek for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: AsyncSeek,
+{
+ fn start_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<()>> {
+ self.get_mut().as_mut().start_seek(cx, pos)
+ }
+
+ fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ self.get_mut().as_mut().poll_complete(cx)
+ }
+}
+
+impl<T: AsRef<[u8]> + Unpin> AsyncSeek for io::Cursor<T> {
+ fn start_seek(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Seek::seek(&mut *self, pos).map(drop))
+ }
+ fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ Poll::Ready(Ok(self.get_mut().position()))
+ }
+}
diff --git a/third_party/rust/tokio/src/io/async_write.rs b/third_party/rust/tokio/src/io/async_write.rs
new file mode 100644
index 0000000000..0bfed056ef
--- /dev/null
+++ b/third_party/rust/tokio/src/io/async_write.rs
@@ -0,0 +1,291 @@
+use bytes::Buf;
+use std::io;
+use std::ops::DerefMut;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Writes bytes asynchronously.
+///
+/// The trait inherits from [`std::io::Write`] and indicates that an I/O object is
+/// **nonblocking**. All non-blocking I/O objects must return an error when
+/// bytes cannot be written instead of blocking the current thread.
+///
+/// Specifically, this means that the [`poll_write`] function will return one of
+/// the following:
+///
+/// * `Poll::Ready(Ok(n))` means that `n` bytes of data was immediately
+/// written.
+///
+/// * `Poll::Pending` means that no data was written from the buffer
+/// provided. The I/O object is not currently writable but may become writable
+/// in the future. Most importantly, **the current future's task is scheduled
+/// to get unparked when the object is writable**. This means that like
+/// `Future::poll` you'll receive a notification when the I/O object is
+/// writable again.
+///
+/// * `Poll::Ready(Err(e))` for other errors are standard I/O errors coming from the
+/// underlying object.
+///
+/// This trait importantly means that the [`write`][stdwrite] method only works in
+/// the context of a future's task. The object may panic if used outside of a task.
+///
+/// Note that this trait also represents that the [`Write::flush`][stdflush] method
+/// works very similarly to the `write` method, notably that `Ok(())` means that the
+/// writer has successfully been flushed, a "would block" error means that the
+/// current task is ready to receive a notification when flushing can make more
+/// progress, and otherwise normal errors can happen as well.
+///
+/// Utilities for working with `AsyncWrite` values are provided by
+/// [`AsyncWriteExt`].
+///
+/// [`std::io::Write`]: std::io::Write
+/// [`poll_write`]: AsyncWrite::poll_write()
+/// [stdwrite]: std::io::Write::write()
+/// [stdflush]: std::io::Write::flush()
+/// [`AsyncWriteExt`]: crate::io::AsyncWriteExt
+pub trait AsyncWrite {
+ /// Attempt to write bytes from `buf` into the object.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_written))`.
+ ///
+ /// If the object is not ready for writing, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>>;
+
+ /// Attempts to flush the object, ensuring that any buffered data reach
+ /// their destination.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))`.
+ ///
+ /// If flushing cannot immediately complete, this method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker()`) to receive a notification when the object can make
+ /// progress towards flushing.
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>>;
+
+ /// Initiates or attempts to shut down this writer, returning success when
+ /// the I/O connection has completely shut down.
+ ///
+ /// This method is intended to be used for asynchronous shutdown of I/O
+ /// connections. For example this is suitable for implementing shutdown of a
+ /// TLS connection or calling `TcpStream::shutdown` on a proxied connection.
+ /// Protocols sometimes need to flush out final pieces of data or otherwise
+ /// perform a graceful shutdown handshake, reading/writing more data as
+ /// appropriate. This method is the hook for such protocols to implement the
+ /// graceful shutdown logic.
+ ///
+ /// This `shutdown` method is required by implementers of the
+ /// `AsyncWrite` trait. Wrappers typically just want to proxy this call
+ /// through to the wrapped type, and base types will typically implement
+ /// shutdown logic here or just return `Ok(().into())`. Note that if you're
+ /// wrapping an underlying `AsyncWrite` a call to `shutdown` implies that
+ /// transitively the entire stream has been shut down. After your wrapper's
+ /// shutdown logic has been executed you should shut down the underlying
+ /// stream.
+ ///
+ /// Invocation of a `shutdown` implies an invocation of `flush`. Once this
+ /// method returns `Ready` it implies that a flush successfully happened
+ /// before the shutdown happened. That is, callers don't need to call
+ /// `flush` before calling `shutdown`. They can rely that by calling
+ /// `shutdown` any pending buffered data will be written out.
+ ///
+ /// # Return value
+ ///
+ /// This function returns a `Poll<io::Result<()>>` classified as such:
+ ///
+ /// * `Poll::Ready(Ok(()))` - indicates that the connection was
+ /// successfully shut down and is now safe to deallocate/drop/close
+ /// resources associated with it. This method means that the current task
+ /// will no longer receive any notifications due to this method and the
+ /// I/O object itself is likely no longer usable.
+ ///
+ /// * `Poll::Pending` - indicates that shutdown is initiated but could
+ /// not complete just yet. This may mean that more I/O needs to happen to
+ /// continue this shutdown operation. The current task is scheduled to
+ /// receive a notification when it's otherwise ready to continue the
+ /// shutdown operation. When woken up this method should be called again.
+ ///
+ /// * `Poll::Ready(Err(e))` - indicates a fatal error has happened with shutdown,
+ /// indicating that the shutdown operation did not complete successfully.
+ /// This typically means that the I/O object is no longer usable.
+ ///
+ /// # Errors
+ ///
+ /// This function can return normal I/O errors through `Err`, described
+ /// above. Additionally this method may also render the underlying
+ /// `Write::write` method no longer usable (e.g. will return errors in the
+ /// future). It's recommended that once `shutdown` is called the
+ /// `write` method is no longer called.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if not called within the context of a future's
+ /// task.
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>>;
+
+ /// Writes a `Buf` into this value, returning how many bytes were written.
+ ///
+ /// Note that this method will advance the `buf` provided automatically by
+ /// the number of bytes written.
+ fn poll_write_buf<B: Buf>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<Result<usize, io::Error>>
+ where
+ Self: Sized,
+ {
+ if !buf.has_remaining() {
+ return Poll::Ready(Ok(0));
+ }
+
+ let n = ready!(self.poll_write(cx, buf.bytes()))?;
+ buf.advance(n);
+ Poll::Ready(Ok(n))
+ }
+}
+
+macro_rules! deref_async_write {
+ () => {
+ fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8])
+ -> Poll<io::Result<usize>>
+ {
+ Pin::new(&mut **self).poll_write(cx, buf)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut **self).poll_shutdown(cx)
+ }
+ }
+}
+
+impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> {
+ deref_async_write!();
+}
+
+impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T {
+ deref_async_write!();
+}
+
+impl<P> AsyncWrite for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: AsyncWrite,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_mut().as_mut().poll_write(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.get_mut().as_mut().poll_flush(cx)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.get_mut().as_mut().poll_shutdown(cx)
+ }
+}
+
+impl AsyncWrite for Vec<u8> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_mut().extend_from_slice(buf);
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl AsyncWrite for io::Cursor<&mut [u8]> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write(&mut *self, buf))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Write::flush(&mut *self))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+}
+
+impl AsyncWrite for io::Cursor<&mut Vec<u8>> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write(&mut *self, buf))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Write::flush(&mut *self))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+}
+
+impl AsyncWrite for io::Cursor<Vec<u8>> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write(&mut *self, buf))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Write::flush(&mut *self))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+}
+
+impl AsyncWrite for io::Cursor<Box<[u8]>> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write(&mut *self, buf))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Write::flush(&mut *self))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/blocking.rs b/third_party/rust/tokio/src/io/blocking.rs
new file mode 100644
index 0000000000..2491039a3f
--- /dev/null
+++ b/third_party/rust/tokio/src/io/blocking.rs
@@ -0,0 +1,279 @@
+use crate::io::sys;
+use crate::io::{AsyncRead, AsyncWrite};
+
+use std::cmp;
+use std::future::Future;
+use std::io;
+use std::io::prelude::*;
+use std::pin::Pin;
+use std::task::Poll::*;
+use std::task::{Context, Poll};
+
+use self::State::*;
+
+/// `T` should not implement _both_ Read and Write.
+#[derive(Debug)]
+pub(crate) struct Blocking<T> {
+ inner: Option<T>,
+ state: State<T>,
+ /// `true` if the lower IO layer needs flushing
+ need_flush: bool,
+}
+
+#[derive(Debug)]
+pub(crate) struct Buf {
+ buf: Vec<u8>,
+ pos: usize,
+}
+
+pub(crate) const MAX_BUF: usize = 16 * 1024;
+
+#[derive(Debug)]
+enum State<T> {
+ Idle(Option<Buf>),
+ Busy(sys::Blocking<(io::Result<usize>, Buf, T)>),
+}
+
+cfg_io_std! {
+ impl<T> Blocking<T> {
+ pub(crate) fn new(inner: T) -> Blocking<T> {
+ Blocking {
+ inner: Some(inner),
+ state: State::Idle(Some(Buf::with_capacity(0))),
+ need_flush: false,
+ }
+ }
+ }
+}
+
+impl<T> AsyncRead for Blocking<T>
+where
+ T: Read + Unpin + Send + 'static,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ dst: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ loop {
+ match self.state {
+ Idle(ref mut buf_cell) => {
+ let mut buf = buf_cell.take().unwrap();
+
+ if !buf.is_empty() {
+ let n = buf.copy_to(dst);
+ *buf_cell = Some(buf);
+ return Ready(Ok(n));
+ }
+
+ buf.ensure_capacity_for(dst);
+ let mut inner = self.inner.take().unwrap();
+
+ self.state = Busy(sys::run(move || {
+ let res = buf.read_from(&mut inner);
+ (res, buf, inner)
+ }));
+ }
+ Busy(ref mut rx) => {
+ let (res, mut buf, inner) = ready!(Pin::new(rx).poll(cx))?;
+ self.inner = Some(inner);
+
+ match res {
+ Ok(_) => {
+ let n = buf.copy_to(dst);
+ self.state = Idle(Some(buf));
+ return Ready(Ok(n));
+ }
+ Err(e) => {
+ assert!(buf.is_empty());
+
+ self.state = Idle(Some(buf));
+ return Ready(Err(e));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<T> AsyncWrite for Blocking<T>
+where
+ T: Write + Unpin + Send + 'static,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ src: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ loop {
+ match self.state {
+ Idle(ref mut buf_cell) => {
+ let mut buf = buf_cell.take().unwrap();
+
+ assert!(buf.is_empty());
+
+ let n = buf.copy_from(src);
+ let mut inner = self.inner.take().unwrap();
+
+ self.state = Busy(sys::run(move || {
+ let n = buf.len();
+ let res = buf.write_to(&mut inner).map(|_| n);
+
+ (res, buf, inner)
+ }));
+ self.need_flush = true;
+
+ return Ready(Ok(n));
+ }
+ Busy(ref mut rx) => {
+ let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?;
+ self.state = Idle(Some(buf));
+ self.inner = Some(inner);
+
+ // If error, return
+ res?;
+ }
+ }
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ loop {
+ let need_flush = self.need_flush;
+ match self.state {
+ // The buffer is not used here
+ Idle(ref mut buf_cell) => {
+ if need_flush {
+ let buf = buf_cell.take().unwrap();
+ let mut inner = self.inner.take().unwrap();
+
+ self.state = Busy(sys::run(move || {
+ let res = inner.flush().map(|_| 0);
+ (res, buf, inner)
+ }));
+
+ self.need_flush = false;
+ } else {
+ return Ready(Ok(()));
+ }
+ }
+ Busy(ref mut rx) => {
+ let (res, buf, inner) = ready!(Pin::new(rx).poll(cx))?;
+ self.state = Idle(Some(buf));
+ self.inner = Some(inner);
+
+ // If error, return
+ res?;
+ }
+ }
+ }
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+/// Repeates operations that are interrupted
+macro_rules! uninterruptibly {
+ ($e:expr) => {{
+ loop {
+ match $e {
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ res => break res,
+ }
+ }
+ }};
+}
+
+impl Buf {
+ pub(crate) fn with_capacity(n: usize) -> Buf {
+ Buf {
+ buf: Vec::with_capacity(n),
+ pos: 0,
+ }
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ pub(crate) fn len(&self) -> usize {
+ self.buf.len() - self.pos
+ }
+
+ pub(crate) fn copy_to(&mut self, dst: &mut [u8]) -> usize {
+ let n = cmp::min(self.len(), dst.len());
+ dst[..n].copy_from_slice(&self.bytes()[..n]);
+ self.pos += n;
+
+ if self.pos == self.buf.len() {
+ self.buf.truncate(0);
+ self.pos = 0;
+ }
+
+ n
+ }
+
+ pub(crate) fn copy_from(&mut self, src: &[u8]) -> usize {
+ assert!(self.is_empty());
+
+ let n = cmp::min(src.len(), MAX_BUF);
+
+ self.buf.extend_from_slice(&src[..n]);
+ n
+ }
+
+ pub(crate) fn bytes(&self) -> &[u8] {
+ &self.buf[self.pos..]
+ }
+
+ pub(crate) fn ensure_capacity_for(&mut self, bytes: &[u8]) {
+ assert!(self.is_empty());
+
+ let len = cmp::min(bytes.len(), MAX_BUF);
+
+ if self.buf.len() < len {
+ self.buf.reserve(len - self.buf.len());
+ }
+
+ unsafe {
+ self.buf.set_len(len);
+ }
+ }
+
+ pub(crate) fn read_from<T: Read>(&mut self, rd: &mut T) -> io::Result<usize> {
+ let res = uninterruptibly!(rd.read(&mut self.buf));
+
+ if let Ok(n) = res {
+ self.buf.truncate(n);
+ } else {
+ self.buf.clear();
+ }
+
+ assert_eq!(self.pos, 0);
+
+ res
+ }
+
+ pub(crate) fn write_to<T: Write>(&mut self, wr: &mut T) -> io::Result<()> {
+ assert_eq!(self.pos, 0);
+
+ // `write_all` already ignores interrupts
+ let res = wr.write_all(&self.buf);
+ self.buf.clear();
+ res
+ }
+}
+
+cfg_fs! {
+ impl Buf {
+ pub(crate) fn discard_read(&mut self) -> i64 {
+ let ret = -(self.bytes().len() as i64);
+ self.pos = 0;
+ self.buf.truncate(0);
+ ret
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/driver/mod.rs b/third_party/rust/tokio/src/io/driver/mod.rs
new file mode 100644
index 0000000000..d8535d9ab2
--- /dev/null
+++ b/third_party/rust/tokio/src/io/driver/mod.rs
@@ -0,0 +1,396 @@
+pub(crate) mod platform;
+
+mod scheduled_io;
+pub(crate) use scheduled_io::ScheduledIo; // pub(crate) for tests
+
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::park::{Park, Unpark};
+use crate::runtime::context;
+use crate::util::slab::{Address, Slab};
+
+use mio::event::Evented;
+use std::fmt;
+use std::io;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Weak};
+use std::task::Waker;
+use std::time::Duration;
+
+/// I/O driver, backed by Mio
+pub(crate) struct Driver {
+ /// Reuse the `mio::Events` value across calls to poll.
+ events: mio::Events,
+
+ /// State shared between the reactor and the handles.
+ inner: Arc<Inner>,
+
+ _wakeup_registration: mio::Registration,
+}
+
+/// A reference to an I/O driver
+#[derive(Clone)]
+pub(crate) struct Handle {
+ inner: Weak<Inner>,
+}
+
+pub(super) struct Inner {
+ /// The underlying system event queue.
+ io: mio::Poll,
+
+ /// Dispatch slabs for I/O and futures events
+ pub(super) io_dispatch: Slab<ScheduledIo>,
+
+ /// The number of sources in `io_dispatch`.
+ n_sources: AtomicUsize,
+
+ /// Used to wake up the reactor from a call to `turn`
+ wakeup: mio::SetReadiness,
+}
+
+#[derive(Debug, Eq, PartialEq, Clone, Copy)]
+pub(super) enum Direction {
+ Read,
+ Write,
+}
+
+const TOKEN_WAKEUP: mio::Token = mio::Token(Address::NULL);
+
+fn _assert_kinds() {
+ fn _assert<T: Send + Sync>() {}
+
+ _assert::<Handle>();
+}
+
+// ===== impl Driver =====
+
+impl Driver {
+ /// Creates a new event loop, returning any error that happened during the
+ /// creation.
+ pub(crate) fn new() -> io::Result<Driver> {
+ let io = mio::Poll::new()?;
+ let wakeup_pair = mio::Registration::new2();
+
+ io.register(
+ &wakeup_pair.0,
+ TOKEN_WAKEUP,
+ mio::Ready::readable(),
+ mio::PollOpt::level(),
+ )?;
+
+ Ok(Driver {
+ events: mio::Events::with_capacity(1024),
+ _wakeup_registration: wakeup_pair.0,
+ inner: Arc::new(Inner {
+ io,
+ io_dispatch: Slab::new(),
+ n_sources: AtomicUsize::new(0),
+ wakeup: wakeup_pair.1,
+ }),
+ })
+ }
+
+ /// Returns a handle to this event loop which can be sent across threads
+ /// and can be used as a proxy to the event loop itself.
+ ///
+ /// Handles are cloneable and clones always refer to the same event loop.
+ /// This handle is typically passed into functions that create I/O objects
+ /// to bind them to this event loop.
+ pub(crate) fn handle(&self) -> Handle {
+ Handle {
+ inner: Arc::downgrade(&self.inner),
+ }
+ }
+
+ fn turn(&mut self, max_wait: Option<Duration>) -> io::Result<()> {
+ // Block waiting for an event to happen, peeling out how many events
+ // happened.
+ match self.inner.io.poll(&mut self.events, max_wait) {
+ Ok(_) => {}
+ Err(e) => return Err(e),
+ }
+
+ // Process all the events that came in, dispatching appropriately
+
+ for event in self.events.iter() {
+ let token = event.token();
+
+ if token == TOKEN_WAKEUP {
+ self.inner
+ .wakeup
+ .set_readiness(mio::Ready::empty())
+ .unwrap();
+ } else {
+ self.dispatch(token, event.readiness());
+ }
+ }
+
+ Ok(())
+ }
+
+ fn dispatch(&self, token: mio::Token, ready: mio::Ready) {
+ let mut rd = None;
+ let mut wr = None;
+
+ let address = Address::from_usize(token.0);
+
+ let io = match self.inner.io_dispatch.get(address) {
+ Some(io) => io,
+ None => return,
+ };
+
+ if io
+ .set_readiness(address, |curr| curr | ready.as_usize())
+ .is_err()
+ {
+ // token no longer valid!
+ return;
+ }
+
+ if ready.is_writable() || platform::is_hup(ready) || platform::is_error(ready) {
+ wr = io.writer.take_waker();
+ }
+
+ if !(ready & (!mio::Ready::writable())).is_empty() {
+ rd = io.reader.take_waker();
+ }
+
+ if let Some(w) = rd {
+ w.wake();
+ }
+
+ if let Some(w) = wr {
+ w.wake();
+ }
+ }
+}
+
+impl Park for Driver {
+ type Unpark = Handle;
+ type Error = io::Error;
+
+ fn unpark(&self) -> Self::Unpark {
+ self.handle()
+ }
+
+ fn park(&mut self) -> io::Result<()> {
+ self.turn(None)?;
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> io::Result<()> {
+ self.turn(Some(duration))?;
+ Ok(())
+ }
+}
+
+impl fmt::Debug for Driver {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Driver")
+ }
+}
+
+// ===== impl Handle =====
+
+impl Handle {
+ /// Returns a handle to the current reactor
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is no current reactor set.
+ pub(super) fn current() -> Self {
+ context::io_handle()
+ .expect("there is no reactor running, must be called from the context of Tokio runtime")
+ }
+
+ /// Forces a reactor blocked in a call to `turn` to wakeup, or otherwise
+ /// makes the next call to `turn` return immediately.
+ ///
+ /// This method is intended to be used in situations where a notification
+ /// needs to otherwise be sent to the main reactor. If the reactor is
+ /// currently blocked inside of `turn` then it will wake up and soon return
+ /// after this method has been called. If the reactor is not currently
+ /// blocked in `turn`, then the next call to `turn` will not block and
+ /// return immediately.
+ fn wakeup(&self) {
+ if let Some(inner) = self.inner() {
+ inner.wakeup.set_readiness(mio::Ready::readable()).unwrap();
+ }
+ }
+
+ pub(super) fn inner(&self) -> Option<Arc<Inner>> {
+ self.inner.upgrade()
+ }
+}
+
+impl Unpark for Handle {
+ fn unpark(&self) {
+ self.wakeup();
+ }
+}
+
+impl fmt::Debug for Handle {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Handle")
+ }
+}
+
+// ===== impl Inner =====
+
+impl Inner {
+ /// Registers an I/O resource with the reactor.
+ ///
+ /// The registration token is returned.
+ pub(super) fn add_source(&self, source: &dyn Evented) -> io::Result<Address> {
+ let address = self.io_dispatch.alloc().ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::Other,
+ "reactor at max registered I/O resources",
+ )
+ })?;
+
+ self.n_sources.fetch_add(1, SeqCst);
+
+ self.io.register(
+ source,
+ mio::Token(address.to_usize()),
+ mio::Ready::all(),
+ mio::PollOpt::edge(),
+ )?;
+
+ Ok(address)
+ }
+
+ /// Deregisters an I/O resource from the reactor.
+ pub(super) fn deregister_source(&self, source: &dyn Evented) -> io::Result<()> {
+ self.io.deregister(source)
+ }
+
+ pub(super) fn drop_source(&self, address: Address) {
+ self.io_dispatch.remove(address);
+ self.n_sources.fetch_sub(1, SeqCst);
+ }
+
+ /// Registers interest in the I/O resource associated with `token`.
+ pub(super) fn register(&self, token: Address, dir: Direction, w: Waker) {
+ let sched = self
+ .io_dispatch
+ .get(token)
+ .unwrap_or_else(|| panic!("IO resource for token {:?} does not exist!", token));
+
+ let waker = match dir {
+ Direction::Read => &sched.reader,
+ Direction::Write => &sched.writer,
+ };
+
+ waker.register(w);
+ }
+}
+
+impl Direction {
+ pub(super) fn mask(self) -> mio::Ready {
+ match self {
+ Direction::Read => {
+ // Everything except writable is signaled through read.
+ mio::Ready::all() - mio::Ready::writable()
+ }
+ Direction::Write => mio::Ready::writable() | platform::hup() | platform::error(),
+ }
+ }
+}
+
+#[cfg(all(test, loom))]
+mod tests {
+ use super::*;
+ use loom::thread;
+
+ // No-op `Evented` impl just so we can have something to pass to `add_source`.
+ struct NotEvented;
+
+ impl Evented for NotEvented {
+ fn register(
+ &self,
+ _: &mio::Poll,
+ _: mio::Token,
+ _: mio::Ready,
+ _: mio::PollOpt,
+ ) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn reregister(
+ &self,
+ _: &mio::Poll,
+ _: mio::Token,
+ _: mio::Ready,
+ _: mio::PollOpt,
+ ) -> io::Result<()> {
+ Ok(())
+ }
+
+ fn deregister(&self, _: &mio::Poll) -> io::Result<()> {
+ Ok(())
+ }
+ }
+
+ #[test]
+ fn tokens_unique_when_dropped() {
+ loom::model(|| {
+ let reactor = Driver::new().unwrap();
+ let inner = reactor.inner;
+ let inner2 = inner.clone();
+
+ let token_1 = inner.add_source(&NotEvented).unwrap();
+ let thread = thread::spawn(move || {
+ inner2.drop_source(token_1);
+ });
+
+ let token_2 = inner.add_source(&NotEvented).unwrap();
+ thread.join().unwrap();
+
+ assert!(token_1 != token_2);
+ })
+ }
+
+ #[test]
+ fn tokens_unique_when_dropped_on_full_page() {
+ loom::model(|| {
+ let reactor = Driver::new().unwrap();
+ let inner = reactor.inner;
+ let inner2 = inner.clone();
+ // add sources to fill up the first page so that the dropped index
+ // may be reused.
+ for _ in 0..31 {
+ inner.add_source(&NotEvented).unwrap();
+ }
+
+ let token_1 = inner.add_source(&NotEvented).unwrap();
+ let thread = thread::spawn(move || {
+ inner2.drop_source(token_1);
+ });
+
+ let token_2 = inner.add_source(&NotEvented).unwrap();
+ thread.join().unwrap();
+
+ assert!(token_1 != token_2);
+ })
+ }
+
+ #[test]
+ fn tokens_unique_concurrent_add() {
+ loom::model(|| {
+ let reactor = Driver::new().unwrap();
+ let inner = reactor.inner;
+ let inner2 = inner.clone();
+
+ let thread = thread::spawn(move || {
+ let token_2 = inner2.add_source(&NotEvented).unwrap();
+ token_2
+ });
+
+ let token_1 = inner.add_source(&NotEvented).unwrap();
+ let token_2 = thread.join().unwrap();
+
+ assert!(token_1 != token_2);
+ })
+ }
+}
diff --git a/third_party/rust/tokio/src/io/driver/platform.rs b/third_party/rust/tokio/src/io/driver/platform.rs
new file mode 100644
index 0000000000..6b27988ce6
--- /dev/null
+++ b/third_party/rust/tokio/src/io/driver/platform.rs
@@ -0,0 +1,44 @@
+pub(crate) use self::sys::*;
+
+#[cfg(unix)]
+mod sys {
+ use mio::unix::UnixReady;
+ use mio::Ready;
+
+ pub(crate) fn hup() -> Ready {
+ UnixReady::hup().into()
+ }
+
+ pub(crate) fn is_hup(ready: Ready) -> bool {
+ UnixReady::from(ready).is_hup()
+ }
+
+ pub(crate) fn error() -> Ready {
+ UnixReady::error().into()
+ }
+
+ pub(crate) fn is_error(ready: Ready) -> bool {
+ UnixReady::from(ready).is_error()
+ }
+}
+
+#[cfg(windows)]
+mod sys {
+ use mio::Ready;
+
+ pub(crate) fn hup() -> Ready {
+ Ready::empty()
+ }
+
+ pub(crate) fn is_hup(_: Ready) -> bool {
+ false
+ }
+
+ pub(crate) fn error() -> Ready {
+ Ready::empty()
+ }
+
+ pub(crate) fn is_error(_: Ready) -> bool {
+ false
+ }
+}
diff --git a/third_party/rust/tokio/src/io/driver/scheduled_io.rs b/third_party/rust/tokio/src/io/driver/scheduled_io.rs
new file mode 100644
index 0000000000..7f6446e3f5
--- /dev/null
+++ b/third_party/rust/tokio/src/io/driver/scheduled_io.rs
@@ -0,0 +1,141 @@
+use crate::loom::future::AtomicWaker;
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::util::bit;
+use crate::util::slab::{Address, Entry, Generation};
+
+use std::sync::atomic::Ordering::{AcqRel, Acquire, SeqCst};
+
+#[derive(Debug)]
+pub(crate) struct ScheduledIo {
+ readiness: AtomicUsize,
+ pub(crate) reader: AtomicWaker,
+ pub(crate) writer: AtomicWaker,
+}
+
+const PACK: bit::Pack = bit::Pack::most_significant(Generation::WIDTH);
+
+impl Entry for ScheduledIo {
+ fn generation(&self) -> Generation {
+ unpack_generation(self.readiness.load(SeqCst))
+ }
+
+ fn reset(&self, generation: Generation) -> bool {
+ let mut current = self.readiness.load(Acquire);
+
+ loop {
+ if unpack_generation(current) != generation {
+ return false;
+ }
+
+ let next = PACK.pack(generation.next().to_usize(), 0);
+
+ match self
+ .readiness
+ .compare_exchange(current, next, AcqRel, Acquire)
+ {
+ Ok(_) => break,
+ Err(actual) => current = actual,
+ }
+ }
+
+ drop(self.reader.take_waker());
+ drop(self.writer.take_waker());
+
+ true
+ }
+}
+
+impl Default for ScheduledIo {
+ fn default() -> ScheduledIo {
+ ScheduledIo {
+ readiness: AtomicUsize::new(0),
+ reader: AtomicWaker::new(),
+ writer: AtomicWaker::new(),
+ }
+ }
+}
+
+impl ScheduledIo {
+ #[cfg(all(test, loom))]
+ /// Returns the current readiness value of this `ScheduledIo`, if the
+ /// provided `token` is still a valid access.
+ ///
+ /// # Returns
+ ///
+ /// If the given token's generation no longer matches the `ScheduledIo`'s
+ /// generation, then the corresponding IO resource has been removed and
+ /// replaced with a new resource. In that case, this method returns `None`.
+ /// Otherwise, this returns the current readiness.
+ pub(crate) fn get_readiness(&self, address: Address) -> Option<usize> {
+ let ready = self.readiness.load(Acquire);
+
+ if unpack_generation(ready) != address.generation() {
+ return None;
+ }
+
+ Some(ready & !PACK.mask())
+ }
+
+ /// Sets the readiness on this `ScheduledIo` by invoking the given closure on
+ /// the current value, returning the previous readiness value.
+ ///
+ /// # Arguments
+ /// - `token`: the token for this `ScheduledIo`.
+ /// - `f`: a closure returning a new readiness value given the previous
+ /// readiness.
+ ///
+ /// # Returns
+ ///
+ /// If the given token's generation no longer matches the `ScheduledIo`'s
+ /// generation, then the corresponding IO resource has been removed and
+ /// replaced with a new resource. In that case, this method returns `Err`.
+ /// Otherwise, this returns the previous readiness.
+ pub(crate) fn set_readiness(
+ &self,
+ address: Address,
+ f: impl Fn(usize) -> usize,
+ ) -> Result<usize, ()> {
+ let generation = address.generation();
+
+ let mut current = self.readiness.load(Acquire);
+
+ loop {
+ // Check that the generation for this access is still the current
+ // one.
+ if unpack_generation(current) != generation {
+ return Err(());
+ }
+ // Mask out the generation bits so that the modifying function
+ // doesn't see them.
+ let current_readiness = current & mio::Ready::all().as_usize();
+ let new = f(current_readiness);
+
+ debug_assert!(
+ new <= !PACK.max_value(),
+ "new readiness value would overwrite generation bits!"
+ );
+
+ match self.readiness.compare_exchange(
+ current,
+ PACK.pack(generation.to_usize(), new),
+ AcqRel,
+ Acquire,
+ ) {
+ Ok(_) => return Ok(current),
+ // we lost the race, retry!
+ Err(actual) => current = actual,
+ }
+ }
+ }
+}
+
+impl Drop for ScheduledIo {
+ fn drop(&mut self) {
+ self.writer.wake();
+ self.reader.wake();
+ }
+}
+
+fn unpack_generation(src: usize) -> Generation {
+ Generation::new(PACK.unpack(src))
+}
diff --git a/third_party/rust/tokio/src/io/mod.rs b/third_party/rust/tokio/src/io/mod.rs
new file mode 100644
index 0000000000..29d8bc5554
--- /dev/null
+++ b/third_party/rust/tokio/src/io/mod.rs
@@ -0,0 +1,229 @@
+#![cfg_attr(loom, allow(dead_code, unreachable_pub))]
+
+//! Traits, helpers, and type definitions for asynchronous I/O functionality.
+//!
+//! This module is the asynchronous version of `std::io`. Primarily, it
+//! defines two traits, [`AsyncRead`] and [`AsyncWrite`], which are asynchronous
+//! versions of the [`Read`] and [`Write`] traits in the standard library.
+//!
+//! # AsyncRead and AsyncWrite
+//!
+//! Like the standard library's [`Read`] and [`Write`] traits, [`AsyncRead`] and
+//! [`AsyncWrite`] provide the most general interface for reading and writing
+//! input and output. Unlike the standard library's traits, however, they are
+//! _asynchronous_ &mdash; meaning that reading from or writing to a `tokio::io`
+//! type will _yield_ to the Tokio scheduler when IO is not ready, rather than
+//! blocking. This allows other tasks to run while waiting on IO.
+//!
+//! Another difference is that [`AsyncRead`] and [`AsyncWrite`] only contain
+//! core methods needed to provide asynchronous reading and writing
+//! functionality. Instead, utility methods are defined in the [`AsyncReadExt`]
+//! and [`AsyncWriteExt`] extension traits. These traits are automatically
+//! implemented for all values that implement [`AsyncRead`] and [`AsyncWrite`]
+//! respectively.
+//!
+//! End users will rarely interact directly with [`AsyncRead`] and
+//! [`AsyncWrite`]. Instead, they will use the async functions defined in the
+//! extension traits. Library authors are expected to implement [`AsyncRead`]
+//! and [`AsyncWrite`] in order to provide types that behave like byte streams.
+//!
+//! Even with these differences, Tokio's [`AsyncRead`] and [`AsyncWrite`] traits
+//! can be used in almost exactly the same manner as the standard library's
+//! `Read` and `Write`. Most types in the standard library that implement `Read`
+//! and `Write` have asynchronous equivalents in `tokio` that implement
+//! `AsyncRead` and `AsyncWrite`, such as [`File`] and [`TcpStream`].
+//!
+//! For example, the standard library documentation introduces `Read` by
+//! [demonstrating][std_example] reading some bytes from a [`std::fs::File`]. We
+//! can do the same with [`tokio::fs::File`][`File`]:
+//!
+//! ```no_run
+//! use tokio::io::{self, AsyncReadExt};
+//! use tokio::fs::File;
+//!
+//! #[tokio::main]
+//! async fn main() -> io::Result<()> {
+//! let mut f = File::open("foo.txt").await?;
+//! let mut buffer = [0; 10];
+//!
+//! // read up to 10 bytes
+//! let n = f.read(&mut buffer).await?;
+//!
+//! println!("The bytes: {:?}", &buffer[..n]);
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`File`]: crate::fs::File
+//! [`TcpStream`]: crate::net::TcpStream
+//! [`std::fs::File`]: std::fs::File
+//! [std_example]: https://doc.rust-lang.org/std/io/index.html#read-and-write
+//!
+//! ## Buffered Readers and Writers
+//!
+//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be
+//! making near-constant calls to the operating system. To help with this,
+//! `std::io` comes with [support for _buffered_ readers and writers][stdbuf],
+//! and therefore, `tokio::io` does as well.
+//!
+//! Tokio provides an async version of the [`std::io::BufRead`] trait,
+//! [`AsyncBufRead`]; and async [`BufReader`] and [`BufWriter`] structs, which
+//! wrap readers and writers. These wrappers use a buffer, reducing the number
+//! of calls and providing nicer methods for accessing exactly what you want.
+//!
+//! For example, [`BufReader`] works with the [`AsyncBufRead`] trait to add
+//! extra methods to any async reader:
+//!
+//! ```no_run
+//! use tokio::io::{self, BufReader, AsyncBufReadExt};
+//! use tokio::fs::File;
+//!
+//! #[tokio::main]
+//! async fn main() -> io::Result<()> {
+//! let f = File::open("foo.txt").await?;
+//! let mut reader = BufReader::new(f);
+//! let mut buffer = String::new();
+//!
+//! // read a line into buffer
+//! reader.read_line(&mut buffer).await?;
+//!
+//! println!("{}", buffer);
+//! Ok(())
+//! }
+//! ```
+//!
+//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call
+//! to [`write`](crate::io::AsyncWriteExt::write):
+//!
+//! ```no_run
+//! use tokio::io::{self, BufWriter, AsyncWriteExt};
+//! use tokio::fs::File;
+//!
+//! #[tokio::main]
+//! async fn main() -> io::Result<()> {
+//! let f = File::create("foo.txt").await?;
+//! {
+//! let mut writer = BufWriter::new(f);
+//!
+//! // write a byte to the buffer
+//! writer.write(&[42u8]).await?;
+//!
+//! } // the buffer is flushed once writer goes out of scope
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! [stdbuf]: https://doc.rust-lang.org/std/io/index.html#bufreader-and-bufwriter
+//! [`std::io::BufRead`]: std::io::BufRead
+//! [`AsyncBufRead`]: crate::io::AsyncBufRead
+//! [`BufReader`]: crate::io::BufReader
+//! [`BufWriter`]: crate::io::BufWriter
+//!
+//! ## Implementing AsyncRead and AsyncWrite
+//!
+//! Because they are traits, we can implement `AsyncRead` and `AsyncWrite` for
+//! our own types, as well. Note that these traits must only be implemented for
+//! non-blocking I/O types that integrate with the futures type system. In
+//! other words, these types must never block the thread, and instead the
+//! current task is notified when the I/O resource is ready.
+//!
+//! # Standard input and output
+//!
+//! Tokio provides asynchronous APIs to standard [input], [output], and [error].
+//! These APIs are very similar to the ones provided by `std`, but they also
+//! implement [`AsyncRead`] and [`AsyncWrite`].
+//!
+//! Note that the standard input / output APIs **must** be used from the
+//! context of the Tokio runtime, as they require Tokio-specific features to
+//! function. Calling these functions outside of a Tokio runtime will panic.
+//!
+//! [input]: fn@stdin
+//! [output]: fn@stdout
+//! [error]: fn@stderr
+//!
+//! # `std` re-exports
+//!
+//! Additionally, [`Error`], [`ErrorKind`], and [`Result`] are re-exported
+//! from `std::io` for ease of use.
+//!
+//! [`AsyncRead`]: trait@AsyncRead
+//! [`AsyncWrite`]: trait@AsyncWrite
+//! [`Error`]: struct@Error
+//! [`ErrorKind`]: enum@ErrorKind
+//! [`Result`]: type@Result
+//! [`Read`]: std::io::Read
+//! [`Write`]: std::io::Write
+cfg_io_blocking! {
+ pub(crate) mod blocking;
+}
+
+mod async_buf_read;
+pub use self::async_buf_read::AsyncBufRead;
+
+mod async_read;
+pub use self::async_read::AsyncRead;
+
+mod async_seek;
+pub use self::async_seek::AsyncSeek;
+
+mod async_write;
+pub use self::async_write::AsyncWrite;
+
+cfg_io_driver! {
+ pub(crate) mod driver;
+
+ mod poll_evented;
+ pub use poll_evented::PollEvented;
+
+ mod registration;
+ pub use registration::Registration;
+}
+
+cfg_io_std! {
+ mod stderr;
+ pub use stderr::{stderr, Stderr};
+
+ mod stdin;
+ pub use stdin::{stdin, Stdin};
+
+ mod stdout;
+ pub use stdout::{stdout, Stdout};
+}
+
+cfg_io_util! {
+ mod split;
+ pub use split::{split, ReadHalf, WriteHalf};
+
+ pub(crate) mod seek;
+ pub use self::seek::Seek;
+
+ pub(crate) mod util;
+ pub use util::{
+ copy, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt,
+ BufReader, BufStream, BufWriter, Copy, Empty, Lines, Repeat, Sink, Split, Take,
+ };
+
+ cfg_stream! {
+ pub use util::{stream_reader, StreamReader};
+ }
+
+ // Re-export io::Error so that users don't have to deal with conflicts when
+ // `use`ing `tokio::io` and `std::io`.
+ pub use std::io::{Error, ErrorKind, Result};
+}
+
+cfg_not_io_util! {
+ cfg_process! {
+ pub(crate) mod util;
+ }
+}
+
+cfg_io_blocking! {
+ /// Types in this module can be mocked out in tests.
+ mod sys {
+ // TODO: don't rename
+ pub(crate) use crate::runtime::spawn_blocking as run;
+ pub(crate) use crate::task::JoinHandle as Blocking;
+ }
+}
diff --git a/third_party/rust/tokio/src/io/poll_evented.rs b/third_party/rust/tokio/src/io/poll_evented.rs
new file mode 100644
index 0000000000..298e6e58cf
--- /dev/null
+++ b/third_party/rust/tokio/src/io/poll_evented.rs
@@ -0,0 +1,423 @@
+use crate::io::driver::platform;
+use crate::io::{AsyncRead, AsyncWrite, Registration};
+
+use mio::event::Evented;
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::marker::Unpin;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::Relaxed;
+use std::task::{Context, Poll};
+
+cfg_io_driver! {
+ /// Associates an I/O resource that implements the [`std::io::Read`] and/or
+ /// [`std::io::Write`] traits with the reactor that drives it.
+ ///
+ /// `PollEvented` uses [`Registration`] internally to take a type that
+ /// implements [`mio::Evented`] as well as [`std::io::Read`] and or
+ /// [`std::io::Write`] and associate it with a reactor that will drive it.
+ ///
+ /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be
+ /// used from within the future's execution model. As such, the
+ /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`]
+ /// implementations using the underlying I/O resource as well as readiness
+ /// events provided by the reactor.
+ ///
+ /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is
+ /// `Sync`), the caller must ensure that there are at most two tasks that
+ /// use a `PollEvented` instance concurrently. One for reading and one for
+ /// writing. While violating this requirement is "safe" from a Rust memory
+ /// model point of view, it will result in unexpected behavior in the form
+ /// of lost notifications and tasks hanging.
+ ///
+ /// ## Readiness events
+ ///
+ /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations,
+ /// this type also supports access to the underlying readiness event stream.
+ /// While similar in function to what [`Registration`] provides, the
+ /// semantics are a bit different.
+ ///
+ /// Two functions are provided to access the readiness events:
+ /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the
+ /// current readiness state of the `PollEvented` instance. If
+ /// [`poll_read_ready`] indicates read readiness, immediately calling
+ /// [`poll_read_ready`] again will also indicate read readiness.
+ ///
+ /// When the operation is attempted and is unable to succeed due to the I/O
+ /// resource not being ready, the caller must call [`clear_read_ready`] or
+ /// [`clear_write_ready`]. This clears the readiness state until a new
+ /// readiness event is received.
+ ///
+ /// This allows the caller to implement additional functions. For example,
+ /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and
+ /// [`clear_read_ready`].
+ ///
+ /// ```rust
+ /// use tokio::io::PollEvented;
+ ///
+ /// use futures::ready;
+ /// use mio::Ready;
+ /// use mio::net::{TcpStream, TcpListener};
+ /// use std::io;
+ /// use std::task::{Context, Poll};
+ ///
+ /// struct MyListener {
+ /// poll_evented: PollEvented<TcpListener>,
+ /// }
+ ///
+ /// impl MyListener {
+ /// pub fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll<Result<TcpStream, io::Error>> {
+ /// let ready = Ready::readable();
+ ///
+ /// ready!(self.poll_evented.poll_read_ready(cx, ready))?;
+ ///
+ /// match self.poll_evented.get_ref().accept() {
+ /// Ok((socket, _)) => Poll::Ready(Ok(socket)),
+ /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ /// self.poll_evented.clear_read_ready(cx, ready)?;
+ /// Poll::Pending
+ /// }
+ /// Err(e) => Poll::Ready(Err(e)),
+ /// }
+ /// }
+ /// }
+ /// ```
+ ///
+ /// ## Platform-specific events
+ ///
+ /// `PollEvented` also allows receiving platform-specific `mio::Ready` events.
+ /// These events are included as part of the read readiness event stream. The
+ /// write readiness event stream is only for `Ready::writable()` events.
+ ///
+ /// [`std::io::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
+ /// [`std::io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
+ /// [`AsyncRead`]: ../io/trait.AsyncRead.html
+ /// [`AsyncWrite`]: ../io/trait.AsyncWrite.html
+ /// [`mio::Evented`]: https://docs.rs/mio/0.6/mio/trait.Evented.html
+ /// [`Registration`]: struct@Registration
+ /// [`TcpListener`]: ../net/struct.TcpListener.html
+ /// [`clear_read_ready`]: #method.clear_read_ready
+ /// [`clear_write_ready`]: #method.clear_write_ready
+ /// [`poll_read_ready`]: #method.poll_read_ready
+ /// [`poll_write_ready`]: #method.poll_write_ready
+ pub struct PollEvented<E: Evented> {
+ io: Option<E>,
+ inner: Inner,
+ }
+}
+
+struct Inner {
+ registration: Registration,
+
+ /// Currently visible read readiness
+ read_readiness: AtomicUsize,
+
+ /// Currently visible write readiness
+ write_readiness: AtomicUsize,
+}
+
+// ===== impl PollEvented =====
+
+macro_rules! poll_ready {
+ ($me:expr, $mask:expr, $cache:ident, $take:ident, $poll:expr) => {{
+ // Load cached & encoded readiness.
+ let mut cached = $me.inner.$cache.load(Relaxed);
+ let mask = $mask | platform::hup() | platform::error();
+
+ // See if the current readiness matches any bits.
+ let mut ret = mio::Ready::from_usize(cached) & $mask;
+
+ if ret.is_empty() {
+ // Readiness does not match, consume the registration's readiness
+ // stream. This happens in a loop to ensure that the stream gets
+ // drained.
+ loop {
+ let ready = match $poll? {
+ Poll::Ready(v) => v,
+ Poll::Pending => return Poll::Pending,
+ };
+ cached |= ready.as_usize();
+
+ // Update the cache store
+ $me.inner.$cache.store(cached, Relaxed);
+
+ ret |= ready & mask;
+
+ if !ret.is_empty() {
+ return Poll::Ready(Ok(ret));
+ }
+ }
+ } else {
+ // Check what's new with the registration stream. This will not
+ // request to be notified
+ if let Some(ready) = $me.inner.registration.$take()? {
+ cached |= ready.as_usize();
+ $me.inner.$cache.store(cached, Relaxed);
+ }
+
+ Poll::Ready(Ok(mio::Ready::from_usize(cached)))
+ }
+ }};
+}
+
+impl<E> PollEvented<E>
+where
+ E: Evented,
+{
+ /// Creates a new `PollEvented` associated with the default reactor.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn new(io: E) -> io::Result<Self> {
+ let registration = Registration::new(&io)?;
+ Ok(Self {
+ io: Some(io),
+ inner: Inner {
+ registration,
+ read_readiness: AtomicUsize::new(0),
+ write_readiness: AtomicUsize::new(0),
+ },
+ })
+ }
+
+ /// Returns a shared reference to the underlying I/O object this readiness
+ /// stream is wrapping.
+ pub fn get_ref(&self) -> &E {
+ self.io.as_ref().unwrap()
+ }
+
+ /// Returns a mutable reference to the underlying I/O object this readiness
+ /// stream is wrapping.
+ pub fn get_mut(&mut self) -> &mut E {
+ self.io.as_mut().unwrap()
+ }
+
+ /// Consumes self, returning the inner I/O object
+ ///
+ /// This function will deregister the I/O resource from the reactor before
+ /// returning. If the deregistration operation fails, an error is returned.
+ ///
+ /// Note that deregistering does not guarantee that the I/O resource can be
+ /// registered with a different reactor. Some I/O resource types can only be
+ /// associated with a single reactor instance for their lifetime.
+ pub fn into_inner(mut self) -> io::Result<E> {
+ let io = self.io.take().unwrap();
+ self.inner.registration.deregister(&io)?;
+ Ok(io)
+ }
+
+ /// Checks the I/O resource's read readiness state.
+ ///
+ /// The mask argument allows specifying what readiness to notify on. This
+ /// can be any value, including platform specific readiness, **except**
+ /// `writable`. HUP is always implicitly included on platforms that support
+ /// it.
+ ///
+ /// If the resource is not ready for a read then `Poll::Pending` is returned
+ /// and the current task is notified once a new event is received.
+ ///
+ /// The I/O resource will remain in a read-ready state until readiness is
+ /// cleared by calling [`clear_read_ready`].
+ ///
+ /// [`clear_read_ready`]: #method.clear_read_ready
+ ///
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * `ready` includes writable.
+ /// * called from outside of a task context.
+ pub fn poll_read_ready(
+ &self,
+ cx: &mut Context<'_>,
+ mask: mio::Ready,
+ ) -> Poll<io::Result<mio::Ready>> {
+ assert!(!mask.is_writable(), "cannot poll for write readiness");
+ poll_ready!(
+ self,
+ mask,
+ read_readiness,
+ take_read_ready,
+ self.inner.registration.poll_read_ready(cx)
+ )
+ }
+
+ /// Clears the I/O resource's read readiness state and registers the current
+ /// task to be notified once a read readiness event is received.
+ ///
+ /// After calling this function, `poll_read_ready` will return
+ /// `Poll::Pending` until a new read readiness event has been received.
+ ///
+ /// The `mask` argument specifies the readiness bits to clear. This may not
+ /// include `writable` or `hup`.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * `ready` includes writable or HUP
+ /// * called from outside of a task context.
+ pub fn clear_read_ready(&self, cx: &mut Context<'_>, ready: mio::Ready) -> io::Result<()> {
+ // Cannot clear write readiness
+ assert!(!ready.is_writable(), "cannot clear write readiness");
+ assert!(!platform::is_hup(ready), "cannot clear HUP readiness");
+
+ self.inner
+ .read_readiness
+ .fetch_and(!ready.as_usize(), Relaxed);
+
+ if self.poll_read_ready(cx, ready)?.is_ready() {
+ // Notify the current task
+ cx.waker().wake_by_ref();
+ }
+
+ Ok(())
+ }
+
+ /// Checks the I/O resource's write readiness state.
+ ///
+ /// This always checks for writable readiness and also checks for HUP
+ /// readiness on platforms that support it.
+ ///
+ /// If the resource is not ready for a write then `Poll::Pending` is
+ /// returned and the current task is notified once a new event is received.
+ ///
+ /// The I/O resource will remain in a write-ready state until readiness is
+ /// cleared by calling [`clear_write_ready`].
+ ///
+ /// [`clear_write_ready`]: #method.clear_write_ready
+ ///
+ /// # Panics
+ ///
+ /// This function panics if:
+ ///
+ /// * `ready` contains bits besides `writable` and `hup`.
+ /// * called from outside of a task context.
+ pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> {
+ poll_ready!(
+ self,
+ mio::Ready::writable(),
+ write_readiness,
+ take_write_ready,
+ self.inner.registration.poll_write_ready(cx)
+ )
+ }
+
+ /// Resets the I/O resource's write readiness state and registers the current
+ /// task to be notified once a write readiness event is received.
+ ///
+ /// This only clears writable readiness. HUP (on platforms that support HUP)
+ /// cannot be cleared as it is a final state.
+ ///
+ /// After calling this function, `poll_write_ready(Ready::writable())` will
+ /// return `NotReady` until a new write readiness event has been received.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called from outside of a task context.
+ pub fn clear_write_ready(&self, cx: &mut Context<'_>) -> io::Result<()> {
+ let ready = mio::Ready::writable();
+
+ self.inner
+ .write_readiness
+ .fetch_and(!ready.as_usize(), Relaxed);
+
+ if self.poll_write_ready(cx)?.is_ready() {
+ // Notify the current task
+ cx.waker().wake_by_ref();
+ }
+
+ Ok(())
+ }
+}
+
+// ===== Read / Write impls =====
+
+impl<E> AsyncRead for PollEvented<E>
+where
+ E: Evented + Read + Unpin,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ let r = (*self).get_mut().read(buf);
+
+ if is_wouldblock(&r) {
+ self.clear_read_ready(cx, mio::Ready::readable())?;
+ return Poll::Pending;
+ }
+
+ Poll::Ready(r)
+ }
+}
+
+impl<E> AsyncWrite for PollEvented<E>
+where
+ E: Evented + Write + Unpin,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.poll_write_ready(cx))?;
+
+ let r = (*self).get_mut().write(buf);
+
+ if is_wouldblock(&r) {
+ self.clear_write_ready(cx)?;
+ return Poll::Pending;
+ }
+
+ Poll::Ready(r)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(self.poll_write_ready(cx))?;
+
+ let r = (*self).get_mut().flush();
+
+ if is_wouldblock(&r) {
+ self.clear_write_ready(cx)?;
+ return Poll::Pending;
+ }
+
+ Poll::Ready(r)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
+ match *r {
+ Ok(_) => false,
+ Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
+ }
+}
+
+impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollEvented").field("io", &self.io).finish()
+ }
+}
+
+impl<E: Evented> Drop for PollEvented<E> {
+ fn drop(&mut self) {
+ if let Some(io) = self.io.take() {
+ // Ignore errors
+ let _ = self.inner.registration.deregister(&io);
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/registration.rs b/third_party/rust/tokio/src/io/registration.rs
new file mode 100644
index 0000000000..4df11999f5
--- /dev/null
+++ b/third_party/rust/tokio/src/io/registration.rs
@@ -0,0 +1,299 @@
+use crate::io::driver::{platform, Direction, Handle};
+use crate::util::slab::Address;
+
+use mio::{self, Evented};
+use std::io;
+use std::task::{Context, Poll};
+
+cfg_io_driver! {
+ /// Associates an I/O resource with the reactor instance that drives it.
+ ///
+ /// A registration represents an I/O resource registered with a Reactor such
+ /// that it will receive task notifications on readiness. This is the lowest
+ /// level API for integrating with a reactor.
+ ///
+ /// The association between an I/O resource is made by calling [`new`]. Once
+ /// the association is established, it remains established until the
+ /// registration instance is dropped.
+ ///
+ /// A registration instance represents two separate readiness streams. One
+ /// for the read readiness and one for write readiness. These streams are
+ /// independent and can be consumed from separate tasks.
+ ///
+ /// **Note**: while `Registration` is `Sync`, the caller must ensure that
+ /// there are at most two tasks that use a registration instance
+ /// concurrently. One task for [`poll_read_ready`] and one task for
+ /// [`poll_write_ready`]. While violating this requirement is "safe" from a
+ /// Rust memory safety point of view, it will result in unexpected behavior
+ /// in the form of lost notifications and tasks hanging.
+ ///
+ /// ## Platform-specific events
+ ///
+ /// `Registration` also allows receiving platform-specific `mio::Ready`
+ /// events. These events are included as part of the read readiness event
+ /// stream. The write readiness event stream is only for `Ready::writable()`
+ /// events.
+ ///
+ /// [`new`]: #method.new
+ /// [`poll_read_ready`]: #method.poll_read_ready`]
+ /// [`poll_write_ready`]: #method.poll_write_ready`]
+ #[derive(Debug)]
+ pub struct Registration {
+ handle: Handle,
+ address: Address,
+ }
+}
+
+// ===== impl Registration =====
+
+impl Registration {
+ /// Registers the I/O resource with the default reactor.
+ ///
+ /// # Return
+ ///
+ /// - `Ok` if the registration happened successfully
+ /// - `Err` if an error was encountered during registration
+ ///
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn new<T>(io: &T) -> io::Result<Registration>
+ where
+ T: Evented,
+ {
+ let handle = Handle::current();
+ let address = if let Some(inner) = handle.inner() {
+ inner.add_source(io)?
+ } else {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "failed to find event loop",
+ ));
+ };
+
+ Ok(Registration { handle, address })
+ }
+
+ /// Deregisters the I/O resource from the reactor it is associated with.
+ ///
+ /// This function must be called before the I/O resource associated with the
+ /// registration is dropped.
+ ///
+ /// Note that deregistering does not guarantee that the I/O resource can be
+ /// registered with a different reactor. Some I/O resource types can only be
+ /// associated with a single reactor instance for their lifetime.
+ ///
+ /// # Return
+ ///
+ /// If the deregistration was successful, `Ok` is returned. Any calls to
+ /// `Reactor::turn` that happen after a successful call to `deregister` will
+ /// no longer result in notifications getting sent for this registration.
+ ///
+ /// `Err` is returned if an error is encountered.
+ pub fn deregister<T>(&mut self, io: &T) -> io::Result<()>
+ where
+ T: Evented,
+ {
+ let inner = match self.handle.inner() {
+ Some(inner) => inner,
+ None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")),
+ };
+ inner.deregister_source(io)
+ }
+
+ /// Polls for events on the I/O resource's read readiness stream.
+ ///
+ /// If the I/O resource receives a new read readiness event since the last
+ /// call to `poll_read_ready`, it is returned. If it has not, the current
+ /// task is notified once a new event is received.
+ ///
+ /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned,
+ /// the function will always return `Ready(HUP)`. This should be treated as
+ /// the end of the readiness stream.
+ ///
+ /// Ensure that [`register`] has been called first.
+ ///
+ /// # Return value
+ ///
+ /// There are several possible return values:
+ ///
+ /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received
+ /// a new readiness event. The readiness value is included.
+ ///
+ /// * `Poll::Pending` means that no new readiness events have been received
+ /// since the last call to `poll_read_ready`.
+ ///
+ /// * `Poll::Ready(Err(err))` means that the registration has encountered an
+ /// error. This error either represents a permanent internal error **or**
+ /// the fact that [`register`] was not called first.
+ ///
+ /// [`register`]: #method.register
+ /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called from outside of a task context.
+ pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ let v = self.poll_ready(Direction::Read, Some(cx))?;
+ match v {
+ Some(v) => Poll::Ready(Ok(v)),
+ None => Poll::Pending,
+ }
+ }
+
+ /// Consume any pending read readiness event.
+ ///
+ /// This function is identical to [`poll_read_ready`] **except** that it
+ /// will not notify the current task when a new event is received. As such,
+ /// it is safe to call this function from outside of a task context.
+ ///
+ /// [`poll_read_ready`]: #method.poll_read_ready
+ pub fn take_read_ready(&self) -> io::Result<Option<mio::Ready>> {
+ self.poll_ready(Direction::Read, None)
+ }
+
+ /// Polls for events on the I/O resource's write readiness stream.
+ ///
+ /// If the I/O resource receives a new write readiness event since the last
+ /// call to `poll_write_ready`, it is returned. If it has not, the current
+ /// task is notified once a new event is received.
+ ///
+ /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned,
+ /// the function will always return `Ready(HUP)`. This should be treated as
+ /// the end of the readiness stream.
+ ///
+ /// Ensure that [`register`] has been called first.
+ ///
+ /// # Return value
+ ///
+ /// There are several possible return values:
+ ///
+ /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received
+ /// a new readiness event. The readiness value is included.
+ ///
+ /// * `Poll::Pending` means that no new readiness events have been received
+ /// since the last call to `poll_write_ready`.
+ ///
+ /// * `Poll::Ready(Err(err))` means that the registration has encountered an
+ /// error. This error either represents a permanent internal error **or**
+ /// the fact that [`register`] was not called first.
+ ///
+ /// [`register`]: #method.register
+ /// [edge-triggered]: https://docs.rs/mio/0.6/mio/struct.Poll.html#edge-triggered-and-level-triggered
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called from outside of a task context.
+ pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ let v = self.poll_ready(Direction::Write, Some(cx))?;
+ match v {
+ Some(v) => Poll::Ready(Ok(v)),
+ None => Poll::Pending,
+ }
+ }
+
+ /// Consumes any pending write readiness event.
+ ///
+ /// This function is identical to [`poll_write_ready`] **except** that it
+ /// will not notify the current task when a new event is received. As such,
+ /// it is safe to call this function from outside of a task context.
+ ///
+ /// [`poll_write_ready`]: #method.poll_write_ready
+ pub fn take_write_ready(&self) -> io::Result<Option<mio::Ready>> {
+ self.poll_ready(Direction::Write, None)
+ }
+
+ /// Polls for events on the I/O resource's `direction` readiness stream.
+ ///
+ /// If called with a task context, notify the task when a new event is
+ /// received.
+ fn poll_ready(
+ &self,
+ direction: Direction,
+ cx: Option<&mut Context<'_>>,
+ ) -> io::Result<Option<mio::Ready>> {
+ let inner = match self.handle.inner() {
+ Some(inner) => inner,
+ None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")),
+ };
+
+ // If the task should be notified about new events, ensure that it has
+ // been registered
+ if let Some(ref cx) = cx {
+ inner.register(self.address, direction, cx.waker().clone())
+ }
+
+ let mask = direction.mask();
+ let mask_no_hup = (mask - platform::hup() - platform::error()).as_usize();
+
+ let sched = inner.io_dispatch.get(self.address).unwrap();
+
+ // This consumes the current readiness state **except** for HUP and
+ // error. HUP and error are excluded because a) they are final states
+ // and never transitition out and b) both the read AND the write
+ // directions need to be able to obvserve these states.
+ //
+ // # Platform-specific behavior
+ //
+ // HUP and error readiness are platform-specific. On epoll platforms,
+ // HUP has specific conditions that must be met by both peers of a
+ // connection in order to be triggered.
+ //
+ // On epoll platforms, `EPOLLERR` is signaled through
+ // `UnixReady::error()` and is important to be observable by both read
+ // AND write. A specific case that `EPOLLERR` occurs is when the read
+ // end of a pipe is closed. When this occurs, a peer blocked by
+ // writing to the pipe should be notified.
+ let curr_ready = sched
+ .set_readiness(self.address, |curr| curr & (!mask_no_hup))
+ .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address));
+
+ let mut ready = mask & mio::Ready::from_usize(curr_ready);
+
+ if ready.is_empty() {
+ if let Some(cx) = cx {
+ // Update the task info
+ match direction {
+ Direction::Read => sched.reader.register_by_ref(cx.waker()),
+ Direction::Write => sched.writer.register_by_ref(cx.waker()),
+ }
+
+ // Try again
+ let curr_ready = sched
+ .set_readiness(self.address, |curr| curr & (!mask_no_hup))
+ .unwrap_or_else(|_| panic!("address {:?} no longer valid!", self.address));
+ ready = mask & mio::Ready::from_usize(curr_ready);
+ }
+ }
+
+ if ready.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(ready))
+ }
+ }
+}
+
+unsafe impl Send for Registration {}
+unsafe impl Sync for Registration {}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ let inner = match self.handle.inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+ inner.drop_source(self.address);
+ }
+}
diff --git a/third_party/rust/tokio/src/io/seek.rs b/third_party/rust/tokio/src/io/seek.rs
new file mode 100644
index 0000000000..e3b5bf6b6f
--- /dev/null
+++ b/third_party/rust/tokio/src/io/seek.rs
@@ -0,0 +1,56 @@
+use crate::io::AsyncSeek;
+use std::future::Future;
+use std::io::{self, SeekFrom};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Seek<'a, S: ?Sized> {
+ seek: &'a mut S,
+ pos: Option<SeekFrom>,
+}
+
+pub(crate) fn seek<S>(seek: &mut S, pos: SeekFrom) -> Seek<'_, S>
+where
+ S: AsyncSeek + ?Sized + Unpin,
+{
+ Seek {
+ seek,
+ pos: Some(pos),
+ }
+}
+
+impl<S> Future for Seek<'_, S>
+where
+ S: AsyncSeek + ?Sized + Unpin,
+{
+ type Output = io::Result<u64>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = &mut *self;
+ match me.pos {
+ Some(pos) => match Pin::new(&mut me.seek).start_seek(cx, pos) {
+ Poll::Ready(Ok(())) => {
+ me.pos = None;
+ Pin::new(&mut me.seek).poll_complete(cx)
+ }
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Pending => Poll::Pending,
+ },
+ None => Pin::new(&mut me.seek).poll_complete(cx),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<Seek<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/split.rs b/third_party/rust/tokio/src/io/split.rs
new file mode 100644
index 0000000000..134b937a5f
--- /dev/null
+++ b/third_party/rust/tokio/src/io/split.rs
@@ -0,0 +1,195 @@
+//! Split a single value implementing `AsyncRead + AsyncWrite` into separate
+//! `AsyncRead` and `AsyncWrite` handles.
+//!
+//! To restore this read/write object from its `split::ReadHalf` and
+//! `split::WriteHalf` use `unsplit`.
+
+use crate::io::{AsyncRead, AsyncWrite};
+
+use bytes::{Buf, BufMut};
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::Ordering::{Acquire, Release};
+use std::sync::Arc;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// The readable half of a value returned from [`split`](split()).
+ pub struct ReadHalf<T> {
+ inner: Arc<Inner<T>>,
+ }
+
+ /// The writable half of a value returned from [`split`](split()).
+ pub struct WriteHalf<T> {
+ inner: Arc<Inner<T>>,
+ }
+
+ /// Splits a single value implementing `AsyncRead + AsyncWrite` into separate
+ /// `AsyncRead` and `AsyncWrite` handles.
+ ///
+ /// To restore this read/write object from its `ReadHalf` and
+ /// `WriteHalf` use [`unsplit`](ReadHalf::unsplit()).
+ pub fn split<T>(stream: T) -> (ReadHalf<T>, WriteHalf<T>)
+ where
+ T: AsyncRead + AsyncWrite,
+ {
+ let inner = Arc::new(Inner {
+ locked: AtomicBool::new(false),
+ stream: UnsafeCell::new(stream),
+ });
+
+ let rd = ReadHalf {
+ inner: inner.clone(),
+ };
+
+ let wr = WriteHalf { inner };
+
+ (rd, wr)
+ }
+}
+
+struct Inner<T> {
+ locked: AtomicBool,
+ stream: UnsafeCell<T>,
+}
+
+struct Guard<'a, T> {
+ inner: &'a Inner<T>,
+}
+
+impl<T> ReadHalf<T> {
+ /// Checks if this `ReadHalf` and some `WriteHalf` were split from the same
+ /// stream.
+ pub fn is_pair_of(&self, other: &WriteHalf<T>) -> bool {
+ other.is_pair_of(&self)
+ }
+
+ /// Reunites with a previously split `WriteHalf`.
+ ///
+ /// # Panics
+ ///
+ /// If this `ReadHalf` and the given `WriteHalf` do not originate from the
+ /// same `split` operation this method will panic.
+ /// This can be checked ahead of time by comparing the stream ID
+ /// of the two halves.
+ pub fn unsplit(self, wr: WriteHalf<T>) -> T {
+ if self.is_pair_of(&wr) {
+ drop(wr);
+
+ let inner = Arc::try_unwrap(self.inner)
+ .ok()
+ .expect("`Arc::try_unwrap` failed");
+
+ inner.stream.into_inner()
+ } else {
+ panic!("Unrelated `split::Write` passed to `split::Read::unsplit`.")
+ }
+ }
+}
+
+impl<T> WriteHalf<T> {
+ /// Check if this `WriteHalf` and some `ReadHalf` were split from the same
+ /// stream.
+ pub fn is_pair_of(&self, other: &ReadHalf<T>) -> bool {
+ Arc::ptr_eq(&self.inner, &other.inner)
+ }
+}
+
+impl<T: AsyncRead> AsyncRead for ReadHalf<T> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_read(cx, buf)
+ }
+
+ fn poll_read_buf<B: BufMut>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<io::Result<usize>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_read_buf(cx, buf)
+ }
+}
+
+impl<T: AsyncWrite> AsyncWrite for WriteHalf<T> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_write(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_flush(cx)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_shutdown(cx)
+ }
+
+ fn poll_write_buf<B: Buf>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<Result<usize, io::Error>> {
+ let mut inner = ready!(self.inner.poll_lock(cx));
+ inner.stream_pin().poll_write_buf(cx, buf)
+ }
+}
+
+impl<T> Inner<T> {
+ fn poll_lock(&self, cx: &mut Context<'_>) -> Poll<Guard<'_, T>> {
+ if !self.locked.compare_and_swap(false, true, Acquire) {
+ Poll::Ready(Guard { inner: self })
+ } else {
+ // Spin... but investigate a better strategy
+
+ std::thread::yield_now();
+ cx.waker().wake_by_ref();
+
+ Poll::Pending
+ }
+ }
+}
+
+impl<T> Guard<'_, T> {
+ fn stream_pin(&mut self) -> Pin<&mut T> {
+ // safety: the stream is pinned in `Arc` and the `Guard` ensures mutual
+ // exclusion.
+ unsafe { Pin::new_unchecked(&mut *self.inner.stream.get()) }
+ }
+}
+
+impl<T> Drop for Guard<'_, T> {
+ fn drop(&mut self) {
+ self.inner.locked.store(false, Release);
+ }
+}
+
+unsafe impl<T: Send> Send for ReadHalf<T> {}
+unsafe impl<T: Send> Send for WriteHalf<T> {}
+unsafe impl<T: Sync> Sync for ReadHalf<T> {}
+unsafe impl<T: Sync> Sync for WriteHalf<T> {}
+
+impl<T: fmt::Debug> fmt::Debug for ReadHalf<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("split::ReadHalf").finish()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for WriteHalf<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("split::WriteHalf").finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/io/stderr.rs b/third_party/rust/tokio/src/io/stderr.rs
new file mode 100644
index 0000000000..99607dc604
--- /dev/null
+++ b/third_party/rust/tokio/src/io/stderr.rs
@@ -0,0 +1,108 @@
+use crate::io::blocking::Blocking;
+use crate::io::AsyncWrite;
+
+use std::io;
+use std::pin::Pin;
+use std::task::Context;
+use std::task::Poll;
+
+cfg_io_std! {
+ /// A handle to the standard error stream of a process.
+ ///
+ /// Concurrent writes to stderr must be executed with care: Only individual
+ /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
+ /// you should be aware that writes using [`write_all`] are not guaranteed
+ /// to occur as a single write, so multiple threads writing data with
+ /// [`write_all`] may result in interleaved output.
+ ///
+ /// Created by the [`stderr`] function.
+ ///
+ /// [`stderr`]: stderr()
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`write_all`]: crate::io::AsyncWriteExt::write_all()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stderr = io::stdout();
+ /// stderr.write_all(b"Print some error here.").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[derive(Debug)]
+ pub struct Stderr {
+ std: Blocking<std::io::Stderr>,
+ }
+
+ /// Constructs a new handle to the standard error of the current process.
+ ///
+ /// The returned handle allows writing to standard error from the within the
+ /// Tokio runtime.
+ ///
+ /// Concurrent writes to stderr must be executed with care: Only individual
+ /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
+ /// you should be aware that writes using [`write_all`] are not guaranteed
+ /// to occur as a single write, so multiple threads writing data with
+ /// [`write_all`] may result in interleaved output.
+ ///
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`write_all`]: crate::io::AsyncWriteExt::write_all()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stderr = io::stdout();
+ /// stderr.write_all(b"Print some error here.").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn stderr() -> Stderr {
+ let std = io::stderr();
+ Stderr {
+ std: Blocking::new(std),
+ }
+ }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for Stderr {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ std::io::stderr().as_raw_fd()
+ }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for Stderr {
+ fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+ std::io::stderr().as_raw_handle()
+ }
+}
+
+impl AsyncWrite for Stderr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.std).poll_write(cx, buf)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Pin::new(&mut self.std).poll_flush(cx)
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ Pin::new(&mut self.std).poll_shutdown(cx)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/stdin.rs b/third_party/rust/tokio/src/io/stdin.rs
new file mode 100644
index 0000000000..214c4d0564
--- /dev/null
+++ b/third_party/rust/tokio/src/io/stdin.rs
@@ -0,0 +1,70 @@
+use crate::io::blocking::Blocking;
+use crate::io::AsyncRead;
+
+use std::io;
+use std::pin::Pin;
+use std::task::Context;
+use std::task::Poll;
+
+cfg_io_std! {
+ /// A handle to the standard input stream of a process.
+ ///
+ /// The handle implements the [`AsyncRead`] trait, but beware that concurrent
+ /// reads of `Stdin` must be executed with care.
+ ///
+ /// As an additional caveat, reading from the handle may block the calling
+ /// future indefinitely if there is not enough data available. This makes this
+ /// handle unsuitable for use in any circumstance where immediate reaction to
+ /// available data is required, e.g. interactive use or when implementing a
+ /// subprocess driven by requests on the standard input.
+ ///
+ /// Created by the [`stdin`] function.
+ ///
+ /// [`stdin`]: fn@stdin
+ /// [`AsyncRead`]: trait@AsyncRead
+ #[derive(Debug)]
+ pub struct Stdin {
+ std: Blocking<std::io::Stdin>,
+ }
+
+ /// Constructs a new handle to the standard input of the current process.
+ ///
+ /// The returned handle allows reading from standard input from the within the
+ /// Tokio runtime.
+ ///
+ /// As an additional caveat, reading from the handle may block the calling
+ /// future indefinitely if there is not enough data available. This makes this
+ /// handle unsuitable for use in any circumstance where immediate reaction to
+ /// available data is required, e.g. interactive use or when implementing a
+ /// subprocess driven by requests on the standard input.
+ pub fn stdin() -> Stdin {
+ let std = io::stdin();
+ Stdin {
+ std: Blocking::new(std),
+ }
+ }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for Stdin {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ std::io::stdin().as_raw_fd()
+ }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for Stdin {
+ fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+ std::io::stdin().as_raw_handle()
+ }
+}
+
+impl AsyncRead for Stdin {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.std).poll_read(cx, buf)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/stdout.rs b/third_party/rust/tokio/src/io/stdout.rs
new file mode 100644
index 0000000000..5377993a46
--- /dev/null
+++ b/third_party/rust/tokio/src/io/stdout.rs
@@ -0,0 +1,108 @@
+use crate::io::blocking::Blocking;
+use crate::io::AsyncWrite;
+
+use std::io;
+use std::pin::Pin;
+use std::task::Context;
+use std::task::Poll;
+
+cfg_io_std! {
+ /// A handle to the standard output stream of a process.
+ ///
+ /// Concurrent writes to stdout must be executed with care: Only individual
+ /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
+ /// you should be aware that writes using [`write_all`] are not guaranteed
+ /// to occur as a single write, so multiple threads writing data with
+ /// [`write_all`] may result in interleaved output.
+ ///
+ /// Created by the [`stdout`] function.
+ ///
+ /// [`stdout`]: stdout()
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`write_all`]: crate::io::AsyncWriteExt::write_all()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stdout = io::stdout();
+ /// stdout.write_all(b"Hello world!").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ #[derive(Debug)]
+ pub struct Stdout {
+ std: Blocking<std::io::Stdout>,
+ }
+
+ /// Constructs a new handle to the standard output of the current process.
+ ///
+ /// The returned handle allows writing to standard out from the within the
+ /// Tokio runtime.
+ ///
+ /// Concurrent writes to stdout must be executed with care: Only individual
+ /// writes to this [`AsyncWrite`] are guaranteed to be intact. In particular
+ /// you should be aware that writes using [`write_all`] are not guaranteed
+ /// to occur as a single write, so multiple threads writing data with
+ /// [`write_all`] may result in interleaved output.
+ ///
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`write_all`]: crate::io::AsyncWriteExt::write_all()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stdout = io::stdout();
+ /// stdout.write_all(b"Hello world!").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn stdout() -> Stdout {
+ let std = io::stdout();
+ Stdout {
+ std: Blocking::new(std),
+ }
+ }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for Stdout {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ std::io::stdout().as_raw_fd()
+ }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for Stdout {
+ fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+ std::io::stdout().as_raw_handle()
+ }
+}
+
+impl AsyncWrite for Stdout {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.std).poll_write(cx, buf)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Pin::new(&mut self.std).poll_flush(cx)
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ Pin::new(&mut self.std).poll_shutdown(cx)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs b/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs
new file mode 100644
index 0000000000..1bfab90220
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/async_buf_read_ext.rs
@@ -0,0 +1,258 @@
+use crate::io::util::lines::{lines, Lines};
+use crate::io::util::read_line::{read_line, ReadLine};
+use crate::io::util::read_until::{read_until, ReadUntil};
+use crate::io::util::split::{split, Split};
+use crate::io::AsyncBufRead;
+
+cfg_io_util! {
+ /// An extension trait which adds utility methods to [`AsyncBufRead`] types.
+ ///
+ /// [`AsyncBufRead`]: crate::io::AsyncBufRead
+ pub trait AsyncBufReadExt: AsyncBufRead {
+ /// Reads all bytes into `buf` until the delimiter `byte` or EOF is reached.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_until(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// delimiter or EOF is found. Once found, all bytes up to, and including,
+ /// the delimiter (if found) will be appended to `buf`.
+ ///
+ /// If successful, this function will return the total number of bytes read.
+ ///
+ /// # Errors
+ ///
+ /// This function will ignore all instances of [`ErrorKind::Interrupted`] and
+ /// will otherwise return any errors returned by [`fill_buf`].
+ ///
+ /// If an I/O error is encountered then all bytes read so far will be
+ /// present in `buf` and its length will have been adjusted appropriately.
+ ///
+ /// [`fill_buf`]: AsyncBufRead::poll_fill_buf
+ /// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to read all the bytes in a byte slice
+ /// in hyphen delimited segments:
+ ///
+ /// [`Cursor`]: std::io::Cursor
+ ///
+ /// ```
+ /// use tokio::io::AsyncBufReadExt;
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut cursor = Cursor::new(b"lorem-ipsum");
+ /// let mut buf = vec![];
+ ///
+ /// // cursor is at 'l'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ ///
+ /// assert_eq!(num_bytes, 6);
+ /// assert_eq!(buf, b"lorem-");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'i'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ ///
+ /// assert_eq!(num_bytes, 5);
+ /// assert_eq!(buf, b"ipsum");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_until(b'-', &mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, b"");
+ /// }
+ /// ```
+ fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec<u8>) -> ReadUntil<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read_until(self, byte, buf)
+ }
+
+ /// Reads all bytes until a newline (the 0xA byte) is reached, and append
+ /// them to the provided buffer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_line(&mut self, buf: &mut String) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes
+ /// up to, and including, the delimiter (if found) will be appended to
+ /// `buf`.
+ ///
+ /// If successful, this function will return the total number of bytes read.
+ ///
+ /// If this function returns `Ok(0)`, the stream has reached EOF.
+ ///
+ /// # Errors
+ ///
+ /// This function has the same error semantics as [`read_until`] and will
+ /// also return an error if the read bytes are not valid UTF-8. If an I/O
+ /// error is encountered then `buf` may contain some bytes already read in
+ /// the event that all data read so far was valid UTF-8.
+ ///
+ /// [`read_until`]: AsyncBufReadExt::read_until
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements
+ /// `AsyncBufRead`. In this example, we use [`Cursor`] to read all the
+ /// lines in a byte slice:
+ ///
+ /// [`Cursor`]: std::io::Cursor
+ ///
+ /// ```
+ /// use tokio::io::AsyncBufReadExt;
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut cursor = Cursor::new(b"foo\nbar");
+ /// let mut buf = String::new();
+ ///
+ /// // cursor is at 'f'
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ ///
+ /// assert_eq!(num_bytes, 4);
+ /// assert_eq!(buf, "foo\n");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'b'
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ ///
+ /// assert_eq!(num_bytes, 3);
+ /// assert_eq!(buf, "bar");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_line(&mut buf)
+ /// .await
+ /// .expect("reading from cursor won't fail");
+ ///
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, "");
+ /// }
+ /// ```
+ fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read_line(self, buf)
+ }
+
+ /// Returns a stream of the contents of this reader split on the byte
+ /// `byte`.
+ ///
+ /// This method is the asynchronous equivalent to
+ /// [`BufRead::split`](std::io::BufRead::split).
+ ///
+ /// The stream returned from this function will yield instances of
+ /// [`io::Result`]`<`[`Vec<u8>`]`>`. Each vector returned will *not* have
+ /// the delimiter byte at the end.
+ ///
+ /// [`io::Result`]: std::io::Result
+ /// [`Vec<u8>`]: std::vec::Vec
+ ///
+ /// # Errors
+ ///
+ /// Each item of the stream has the same error semantics as
+ /// [`AsyncBufReadExt::read_until`](AsyncBufReadExt::read_until).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::io::AsyncBufRead;
+ /// use tokio::io::AsyncBufReadExt;
+ ///
+ /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
+ /// let mut segments = my_buf_read.split(b'f');
+ ///
+ /// while let Some(segment) = segments.next_segment().await? {
+ /// println!("length = {}", segment.len())
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ fn split(self, byte: u8) -> Split<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ split(self, byte)
+ }
+
+ /// Returns a stream over the lines of this reader.
+ /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines).
+ ///
+ /// The stream returned from this function will yield instances of
+ /// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline
+ /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end.
+ ///
+ /// [`io::Result`]: std::io::Result
+ /// [`String`]: String
+ ///
+ /// # Errors
+ ///
+ /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`].
+ ///
+ /// # Examples
+ ///
+ /// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
+ /// this example, we use [`Cursor`] to iterate over all the lines in a byte
+ /// slice.
+ ///
+ /// [`Cursor`]: std::io::Cursor
+ ///
+ /// ```
+ /// use tokio::io::AsyncBufReadExt;
+ /// use tokio::stream::StreamExt;
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor");
+ ///
+ /// let mut lines = cursor.lines().map(|res| res.unwrap());
+ ///
+ /// assert_eq!(lines.next().await, Some(String::from("lorem")));
+ /// assert_eq!(lines.next().await, Some(String::from("ipsum")));
+ /// assert_eq!(lines.next().await, Some(String::from("dolor")));
+ /// assert_eq!(lines.next().await, None);
+ /// }
+ /// ```
+ ///
+ /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line
+ fn lines(self) -> Lines<Self>
+ where
+ Self: Sized,
+ {
+ lines(self)
+ }
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized> AsyncBufReadExt for R {}
diff --git a/third_party/rust/tokio/src/io/util/async_read_ext.rs b/third_party/rust/tokio/src/io/util/async_read_ext.rs
new file mode 100644
index 0000000000..d4402db621
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/async_read_ext.rs
@@ -0,0 +1,807 @@
+use crate::io::util::chain::{chain, Chain};
+use crate::io::util::read::{read, Read};
+use crate::io::util::read_buf::{read_buf, ReadBuf};
+use crate::io::util::read_exact::{read_exact, ReadExact};
+use crate::io::util::read_int::{ReadI128, ReadI16, ReadI32, ReadI64, ReadI8};
+use crate::io::util::read_int::{ReadU128, ReadU16, ReadU32, ReadU64, ReadU8};
+use crate::io::util::read_to_end::{read_to_end, ReadToEnd};
+use crate::io::util::read_to_string::{read_to_string, ReadToString};
+use crate::io::util::take::{take, Take};
+use crate::io::AsyncRead;
+
+use bytes::BufMut;
+
+cfg_io_util! {
+ /// Defines numeric reader
+ macro_rules! read_impl {
+ (
+ $(
+ $(#[$outer:meta])*
+ fn $name:ident(&mut self) -> $($fut:ident)*;
+ )*
+ ) => {
+ $(
+ $(#[$outer])*
+ fn $name<'a>(&'a mut self) -> $($fut)*<&'a mut Self> where Self: Unpin {
+ $($fut)*::new(self)
+ }
+ )*
+ }
+ }
+
+ /// Reads bytes from a source.
+ ///
+ /// Implemented as an extention trait, adding utility methods to all
+ /// [`AsyncRead`] types. Callers will tend to import this trait instead of
+ /// [`AsyncRead`].
+ ///
+ /// As a convenience, this trait may be imported using the [`prelude`]:
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // The `read` method is defined by this trait.
+ /// let n = f.read(&mut buffer[..]).await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// See [module][crate::io] documentation for more details.
+ ///
+ /// [`AsyncRead`]: AsyncRead
+ /// [`prelude`]: crate::prelude
+ pub trait AsyncReadExt: AsyncRead {
+ /// Creates a new `AsyncRead` instance that chains this stream with
+ /// `next`.
+ ///
+ /// The returned `AsyncRead` instance will first read all bytes from this object
+ /// until EOF is encountered. Afterwards the output is equivalent to the
+ /// output of `next`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `AsyncRead`:
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let f1 = File::open("foo.txt").await?;
+ /// let f2 = File::open("bar.txt").await?;
+ ///
+ /// let mut handle = f1.chain(f2);
+ /// let mut buffer = String::new();
+ ///
+ /// // read the value into a String. We could use any AsyncRead
+ /// // method here, this is just one example.
+ /// handle.read_to_string(&mut buffer).await?;
+ /// Ok(())
+ /// }
+ /// ```
+ fn chain<R>(self, next: R) -> Chain<Self, R>
+ where
+ Self: Sized,
+ R: AsyncRead,
+ {
+ chain(self, next)
+ }
+
+ /// Pulls some bytes from this source into the specified buffer,
+ /// returning how many bytes were read.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read(&mut self, buf: &mut [u8]) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function does not provide any guarantees about whether it
+ /// completes immediately or asynchronously
+ ///
+ /// If the return value of this method is `Ok(n)`, then it must be
+ /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates
+ /// that the buffer `buf` has been filled in with `n` bytes of data from
+ /// this source. If `n` is `0`, then it can indicate one of two
+ /// scenarios:
+ ///
+ /// 1. This reader has reached its "end of file" and will likely no longer
+ /// be able to produce bytes. Note that this does not mean that the
+ /// reader will *always* no longer be able to produce bytes.
+ /// 2. The buffer specified was 0 bytes in length.
+ ///
+ /// No guarantees are provided about the contents of `buf` when this
+ /// function is called, implementations cannot rely on any property of the
+ /// contents of `buf` being `true`. It is recommended that *implementations*
+ /// only write data to `buf` instead of reading its contents.
+ ///
+ /// Correspondingly, however, *callers* of this method may not assume
+ /// any guarantees about how the implementation uses `buf`. It is
+ /// possible that the code that's supposed to write to the buffer might
+ /// also read from it. It is your responsibility to make sure that `buf`
+ /// is initialized before calling `read`.
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters any form of I/O or other error, an error
+ /// variant will be returned. If an error is returned then it must be
+ /// guaranteed that no bytes were read.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `Read`:
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // read up to 10 bytes
+ /// let n = f.read(&mut buffer[..]).await?;
+ ///
+ /// println!("The bytes: {:?}", &buffer[..n]);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read(self, buf)
+ }
+
+ /// Pulls some bytes from this source into the specified buffer,
+ /// advancing the buffer's internal cursor.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> io::Result<usize>;
+ /// ```
+ ///
+ /// Usually, only a single `read` syscall is issued, even if there is
+ /// more space in the supplied buffer.
+ ///
+ /// This function does not provide any guarantees about whether it
+ /// completes immediately or asynchronously
+ ///
+ /// # Return
+ ///
+ /// On a successful read, the number of read bytes is returned. If the
+ /// supplied buffer is not empty and the function returns `Ok(0)` then
+ /// the source as reached an "end-of-file" event.
+ ///
+ /// # Errors
+ ///
+ /// If this function encounters any form of I/O or other error, an error
+ /// variant will be returned. If an error is returned then it must be
+ /// guaranteed that no bytes were read.
+ ///
+ /// # Examples
+ ///
+ /// [`File`] implements `Read` and [`BytesMut`] implements [`BufMut`]:
+ ///
+ /// [`File`]: crate::fs::File
+ /// [`BytesMut`]: bytes::BytesMut
+ /// [`BufMut`]: bytes::BufMut
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use bytes::BytesMut;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = BytesMut::with_capacity(10);
+ ///
+ /// assert!(buffer.is_empty());
+ ///
+ /// // read up to 10 bytes, note that the return value is not needed
+ /// // to access the data that was read as `buffer`'s internal
+ /// // cursor is updated.
+ /// f.read_buf(&mut buffer).await?;
+ ///
+ /// println!("The bytes: {:?}", &buffer[..]);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_buf<'a, B>(&'a mut self, buf: &'a mut B) -> ReadBuf<'a, Self, B>
+ where
+ Self: Sized,
+ B: BufMut,
+ {
+ read_buf(self, buf)
+ }
+
+ /// Reads the exact number of bytes required to fill `buf`.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function reads as many bytes as necessary to completely fill
+ /// the specified buffer `buf`.
+ ///
+ /// No guarantees are provided about the contents of `buf` when this
+ /// function is called, implementations cannot rely on any property of
+ /// the contents of `buf` being `true`. It is recommended that
+ /// implementations only write data to `buf` instead of reading its
+ /// contents.
+ ///
+ /// # Errors
+ ///
+ /// If the operation encounters an "end of file" before completely
+ /// filling the buffer, it returns an error of the kind
+ /// [`ErrorKind::UnexpectedEof`]. The contents of `buf` are unspecified
+ /// in this case.
+ ///
+ /// If any other read error is encountered then the operation
+ /// immediately returns. The contents of `buf` are unspecified in this
+ /// case.
+ ///
+ /// If this operation returns an error, it is unspecified how many bytes
+ /// it has read, but it will never read more than would be necessary to
+ /// completely fill the buffer.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `Read`:
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = [0; 10];
+ ///
+ /// // read exactly 10 bytes
+ /// f.read_exact(&mut buffer).await?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`ErrorKind::UnexpectedEof`]: std::io::ErrorKind::UnexpectedEof
+ fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read_exact(self, buf)
+ }
+
+ read_impl! {
+ /// Reads an unsigned 8 bit integer from the underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_u8(&mut self) -> io::Result<u8>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 8 bit integers from an `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![2, 5]);
+ ///
+ /// assert_eq!(2, reader.read_u8().await?);
+ /// assert_eq!(5, reader.read_u8().await?);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_u8(&mut self) -> ReadU8;
+
+ /// Reads a signed 8 bit integer from the underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_i8(&mut self) -> io::Result<i8>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 8 bit integers from an `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![0x02, 0xfb]);
+ ///
+ /// assert_eq!(2, reader.read_i8().await?);
+ /// assert_eq!(-5, reader.read_i8().await?);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_i8(&mut self) -> ReadI8;
+
+ /// Reads an unsigned 16-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_u16(&mut self) -> io::Result<u16>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 16 bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![2, 5, 3, 0]);
+ ///
+ /// assert_eq!(517, reader.read_u16().await?);
+ /// assert_eq!(768, reader.read_u16().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_u16(&mut self) -> ReadU16;
+
+ /// Reads a signed 16-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_i16(&mut self) -> io::Result<i16>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read signed 16 bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]);
+ ///
+ /// assert_eq!(193, reader.read_i16().await?);
+ /// assert_eq!(-132, reader.read_i16().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_i16(&mut self) -> ReadI16;
+
+ /// Reads an unsigned 32-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_u32(&mut self) -> io::Result<u32>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 32-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]);
+ ///
+ /// assert_eq!(267, reader.read_u32().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_u32(&mut self) -> ReadU32;
+
+ /// Reads a signed 32-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_i32(&mut self) -> io::Result<i32>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read signed 32-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]);
+ ///
+ /// assert_eq!(-34253, reader.read_i32().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_i32(&mut self) -> ReadI32;
+
+ /// Reads an unsigned 64-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_u64(&mut self) -> io::Result<u64>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 64-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![
+ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83
+ /// ]);
+ ///
+ /// assert_eq!(918733457491587, reader.read_u64().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_u64(&mut self) -> ReadU64;
+
+ /// Reads an signed 64-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_i64(&mut self) -> io::Result<i64>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read signed 64-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]);
+ ///
+ /// assert_eq!(i64::min_value(), reader.read_i64().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_i64(&mut self) -> ReadI64;
+
+ /// Reads an unsigned 128-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_u128(&mut self) -> io::Result<u128>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read unsigned 128-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![
+ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83,
+ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83
+ /// ]);
+ ///
+ /// assert_eq!(16947640962301618749969007319746179, reader.read_u128().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_u128(&mut self) -> ReadU128;
+
+ /// Reads an signed 128-bit integer in big-endian order from the
+ /// underlying reader.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_i128(&mut self) -> io::Result<i128>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered reader to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncReadExt::read_exact`].
+ ///
+ /// [`AsyncReadExt::read_exact`]: AsyncReadExt::read_exact
+ ///
+ /// # Examples
+ ///
+ /// Read signed 128-bit big-endian integers from a `AsyncRead`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut reader = Cursor::new(vec![
+ /// 0x80, 0, 0, 0, 0, 0, 0, 0,
+ /// 0, 0, 0, 0, 0, 0, 0, 0
+ /// ]);
+ ///
+ /// assert_eq!(i128::min_value(), reader.read_i128().await?);
+ /// Ok(())
+ /// }
+ /// ```
+ fn read_i128(&mut self) -> ReadI128;
+ }
+
+ /// Reads all bytes until EOF in this source, placing them into `buf`.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize>;
+ /// ```
+ ///
+ /// All bytes read from this source will be appended to the specified
+ /// buffer `buf`. This function will continuously call [`read()`] to
+ /// append more data to `buf` until [`read()`][read] returns `Ok(0)`.
+ ///
+ /// If successful, the total number of bytes read is returned.
+ ///
+ /// # Errors
+ ///
+ /// If a read error is encountered then the `read_to_end` operation
+ /// immediately completes. Any bytes which have already been read will
+ /// be appended to `buf`.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `Read`:
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncReadExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = Vec::new();
+ ///
+ /// // read the whole file
+ /// f.read_to_end(&mut buffer).await?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// (See also the [`tokio::fs::read`] convenience function for reading from a
+ /// file.)
+ ///
+ /// [`tokio::fs::read`]: crate::fs::read::read
+ fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read_to_end(self, buf)
+ }
+
+ /// Reads all bytes until EOF in this source, appending them to `buf`.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize>;
+ /// ```
+ ///
+ /// If successful, the number of bytes which were read and appended to
+ /// `buf` is returned.
+ ///
+ /// # Errors
+ ///
+ /// If the data in this stream is *not* valid UTF-8 then an error is
+ /// returned and `buf` is unchanged.
+ ///
+ /// See [`read_to_end`][AsyncReadExt::read_to_end] for other error semantics.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `Read`:
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncReadExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut f = File::open("foo.txt").await?;
+ /// let mut buffer = String::new();
+ ///
+ /// f.read_to_string(&mut buffer).await?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// (See also the [`crate::fs::read_to_string`] convenience function for
+ /// reading from a file.)
+ ///
+ /// [`crate::fs::read_to_string`]: crate::fs::read_to_string::read_to_string
+ fn read_to_string<'a>(&'a mut self, dst: &'a mut String) -> ReadToString<'a, Self>
+ where
+ Self: Unpin,
+ {
+ read_to_string(self, dst)
+ }
+
+ /// Creates an adaptor which reads at most `limit` bytes from it.
+ ///
+ /// This function returns a new instance of `AsyncRead` which will read
+ /// at most `limit` bytes, after which it will always return EOF
+ /// (`Ok(0)`). Any read errors will not count towards the number of
+ /// bytes read and future calls to [`read()`][read] may succeed.
+ ///
+ /// # Examples
+ ///
+ /// [`File`][crate::fs::File]s implement `Read`:
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncReadExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let f = File::open("foo.txt").await?;
+ /// let mut buffer = [0; 5];
+ ///
+ /// // read at most five bytes
+ /// let mut handle = f.take(5);
+ ///
+ /// handle.read(&mut buffer).await?;
+ /// Ok(())
+ /// }
+ /// ```
+ fn take(self, limit: u64) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ take(self, limit)
+ }
+ }
+}
+
+impl<R: AsyncRead + ?Sized> AsyncReadExt for R {}
diff --git a/third_party/rust/tokio/src/io/util/async_seek_ext.rs b/third_party/rust/tokio/src/io/util/async_seek_ext.rs
new file mode 100644
index 0000000000..c7243c7f3e
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/async_seek_ext.rs
@@ -0,0 +1,60 @@
+use crate::io::seek::{seek, Seek};
+use crate::io::AsyncSeek;
+use std::io::SeekFrom;
+
+/// An extension trait which adds utility methods to `AsyncSeek` types.
+///
+/// # Examples
+///
+/// ```
+/// use std::io::{Cursor, SeekFrom};
+/// use tokio::prelude::*;
+///
+/// #[tokio::main]
+/// async fn main() -> io::Result<()> {
+/// let mut cursor = Cursor::new(b"abcdefg");
+///
+/// // the `seek` method is defined by this trait
+/// cursor.seek(SeekFrom::Start(3)).await?;
+///
+/// let mut buf = [0; 1];
+/// let n = cursor.read(&mut buf).await?;
+/// assert_eq!(n, 1);
+/// assert_eq!(buf, [b'd']);
+///
+/// Ok(())
+/// }
+/// ```
+pub trait AsyncSeekExt: AsyncSeek {
+ /// Creates a future which will seek an IO object, and then yield the
+ /// new position in the object and the object itself.
+ ///
+ /// In the case of an error the buffer and the object will be discarded, with
+ /// the error yielded.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::fs::File;
+ /// use tokio::prelude::*;
+ ///
+ /// use std::io::SeekFrom;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut file = File::open("foo.txt").await?;
+ /// file.seek(SeekFrom::Start(6)).await?;
+ ///
+ /// let mut contents = vec![0u8; 10];
+ /// file.read_exact(&mut contents).await?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self>
+ where
+ Self: Unpin,
+ {
+ seek(self, pos)
+ }
+}
+
+impl<S: AsyncSeek + ?Sized> AsyncSeekExt for S {}
diff --git a/third_party/rust/tokio/src/io/util/async_write_ext.rs b/third_party/rust/tokio/src/io/util/async_write_ext.rs
new file mode 100644
index 0000000000..377f4ecaf8
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/async_write_ext.rs
@@ -0,0 +1,689 @@
+use crate::io::util::flush::{flush, Flush};
+use crate::io::util::shutdown::{shutdown, Shutdown};
+use crate::io::util::write::{write, Write};
+use crate::io::util::write_all::{write_all, WriteAll};
+use crate::io::util::write_buf::{write_buf, WriteBuf};
+use crate::io::util::write_int::{WriteI128, WriteI16, WriteI32, WriteI64, WriteI8};
+use crate::io::util::write_int::{WriteU128, WriteU16, WriteU32, WriteU64, WriteU8};
+use crate::io::AsyncWrite;
+
+use bytes::Buf;
+
+cfg_io_util! {
+ /// Defines numeric writer
+ macro_rules! write_impl {
+ (
+ $(
+ $(#[$outer:meta])*
+ fn $name:ident(&mut self, n: $ty:ty) -> $($fut:ident)*;
+ )*
+ ) => {
+ $(
+ $(#[$outer])*
+ fn $name<'a>(&'a mut self, n: $ty) -> $($fut)*<&'a mut Self> where Self: Unpin {
+ $($fut)*::new(self, n)
+ }
+ )*
+ }
+ }
+
+ /// Writes bytes to a sink.
+ ///
+ /// Implemented as an extention trait, adding utility methods to all
+ /// [`AsyncWrite`] types. Callers will tend to import this trait instead of
+ /// [`AsyncWrite`].
+ ///
+ /// As a convenience, this trait may be imported using the [`prelude`]:
+ ///
+ /// ```no_run
+ /// use tokio::prelude::*;
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let data = b"some bytes";
+ ///
+ /// let mut pos = 0;
+ /// let mut buffer = File::create("foo.txt").await?;
+ ///
+ /// while pos < data.len() {
+ /// let bytes_written = buffer.write(&data[pos..]).await?;
+ /// pos += bytes_written;
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// See [module][crate::io] documentation for more details.
+ ///
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`prelude`]: crate::prelude
+ pub trait AsyncWriteExt: AsyncWrite {
+ /// Writes a buffer into this writer, returning how many bytes were
+ /// written.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write(&mut self, buf: &[u8]) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function will attempt to write the entire contents of `buf`, but
+ /// the entire write may not succeed, or the write may also generate an
+ /// error. A call to `write` represents *at most one* attempt to write to
+ /// any wrapped object.
+ ///
+ /// # Return
+ ///
+ /// If the return value is `Ok(n)` then it must be guaranteed that `n <=
+ /// buf.len()`. A return value of `0` typically means that the
+ /// underlying object is no longer able to accept bytes and will likely
+ /// not be able to in the future as well, or that the buffer provided is
+ /// empty.
+ ///
+ /// # Errors
+ ///
+ /// Each call to `write` may generate an I/O error indicating that the
+ /// operation could not be completed. If an error is returned then no bytes
+ /// in the buffer were written to this writer.
+ ///
+ /// It is **not** considered an error if the entire buffer could not be
+ /// written to this writer.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncWriteExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ ///
+ /// // Writes some prefix of the byte string, not necessarily all of it.
+ /// file.write(b"some bytes").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ fn write<'a>(&'a mut self, src: &'a [u8]) -> Write<'a, Self>
+ where
+ Self: Unpin,
+ {
+ write(self, src)
+ }
+
+ /// Writes a buffer into this writer, advancing the buffer's internal
+ /// cursor.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_buf<B: Buf>(&mut self, buf: &mut B) -> io::Result<usize>;
+ /// ```
+ ///
+ /// This function will attempt to write the entire contents of `buf`, but
+ /// the entire write may not succeed, or the write may also generate an
+ /// error. After the operation completes, the buffer's
+ /// internal cursor is advanced by the number of bytes written. A
+ /// subsequent call to `write_buf` using the **same** `buf` value will
+ /// resume from the point that the first call to `write_buf` completed.
+ /// A call to `write` represents *at most one* attempt to write to any
+ /// wrapped object.
+ ///
+ /// # Return
+ ///
+ /// If the return value is `Ok(n)` then it must be guaranteed that `n <=
+ /// buf.len()`. A return value of `0` typically means that the
+ /// underlying object is no longer able to accept bytes and will likely
+ /// not be able to in the future as well, or that the buffer provided is
+ /// empty.
+ ///
+ /// # Errors
+ ///
+ /// Each call to `write` may generate an I/O error indicating that the
+ /// operation could not be completed. If an error is returned then no bytes
+ /// in the buffer were written to this writer.
+ ///
+ /// It is **not** considered an error if the entire buffer could not be
+ /// written to this writer.
+ ///
+ /// # Examples
+ ///
+ /// [`File`] implements `Read` and [`Cursor<&[u8]>`] implements [`Buf`]:
+ ///
+ /// [`File`]: crate::fs::File
+ /// [`Buf`]: bytes::Buf
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncWriteExt};
+ /// use tokio::fs::File;
+ ///
+ /// use bytes::Buf;
+ /// use std::io::Cursor;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ /// let mut buffer = Cursor::new(b"data to write");
+ ///
+ /// // Loop until the entire contents of the buffer are written to
+ /// // the file.
+ /// while buffer.has_remaining() {
+ /// // Writes some prefix of the byte string, not necessarily
+ /// // all of it.
+ /// file.write_buf(&mut buffer).await?;
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_buf<'a, B>(&'a mut self, src: &'a mut B) -> WriteBuf<'a, Self, B>
+ where
+ Self: Sized,
+ B: Buf,
+ {
+ write_buf(self, src)
+ }
+
+ /// Attempts to write an entire buffer into this writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_all(&mut self, buf: &[u8]) -> io::Result<()>;
+ /// ```
+ ///
+ /// This method will continuously call [`write`] until there is no more data
+ /// to be written. This method will not return until the entire buffer
+ /// has been successfully written or such an error occurs. The first
+ /// error generated from this method will be returned.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the first error that [`write`] returns.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncWriteExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut buffer = File::create("foo.txt").await?;
+ ///
+ /// buffer.write_all(b"some bytes").await?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`write`]: AsyncWriteExt::write
+ fn write_all<'a>(&'a mut self, src: &'a [u8]) -> WriteAll<'a, Self>
+ where
+ Self: Unpin,
+ {
+ write_all(self, src)
+ }
+
+ write_impl! {
+ /// Writes an unsigned 8-bit integer to the underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_u8(&mut self, n: u8) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 8 bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u8(2).await?;
+ /// writer.write_u8(5).await?;
+ ///
+ /// assert_eq!(writer, b"\x02\x05");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_u8(&mut self, n: u8) -> WriteU8;
+
+ /// Writes an unsigned 8-bit integer to the underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_i8(&mut self, n: i8) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 8 bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u8(2).await?;
+ /// writer.write_u8(5).await?;
+ ///
+ /// assert_eq!(writer, b"\x02\x05");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_i8(&mut self, n: i8) -> WriteI8;
+
+ /// Writes an unsigned 16-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_u16(&mut self, n: u16) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 16-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u16(517).await?;
+ /// writer.write_u16(768).await?;
+ ///
+ /// assert_eq!(writer, b"\x02\x05\x03\x00");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_u16(&mut self, n: u16) -> WriteU16;
+
+ /// Writes a signed 16-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_i16(&mut self, n: i16) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write signed 16-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_i16(193).await?;
+ /// writer.write_i16(-132).await?;
+ ///
+ /// assert_eq!(writer, b"\x00\xc1\xff\x7c");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_i16(&mut self, n: i16) -> WriteI16;
+
+ /// Writes an unsigned 32-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_u32(&mut self, n: u32) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 32-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u32(267).await?;
+ /// writer.write_u32(1205419366).await?;
+ ///
+ /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_u32(&mut self, n: u32) -> WriteU32;
+
+ /// Writes a signed 32-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_i32(&mut self, n: i32) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write signed 32-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_i32(267).await?;
+ /// writer.write_i32(1205419366).await?;
+ ///
+ /// assert_eq!(writer, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_i32(&mut self, n: i32) -> WriteI32;
+
+ /// Writes an unsigned 64-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_u64(&mut self, n: u64) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 64-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u64(918733457491587).await?;
+ /// writer.write_u64(143).await?;
+ ///
+ /// assert_eq!(writer, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_u64(&mut self, n: u64) -> WriteU64;
+
+ /// Writes an signed 64-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_i64(&mut self, n: i64) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write signed 64-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_i64(i64::min_value()).await?;
+ /// writer.write_i64(i64::max_value()).await?;
+ ///
+ /// assert_eq!(writer, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff");
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_i64(&mut self, n: i64) -> WriteI64;
+
+ /// Writes an unsigned 128-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_u128(&mut self, n: u128) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write unsigned 128-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_u128(16947640962301618749969007319746179).await?;
+ ///
+ /// assert_eq!(writer, vec![
+ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83,
+ /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83
+ /// ]);
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_u128(&mut self, n: u128) -> WriteU128;
+
+ /// Writes an signed 128-bit integer in big-endian order to the
+ /// underlying writer.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_i128(&mut self, n: i128) -> io::Result<()>;
+ /// ```
+ ///
+ /// It is recommended to use a buffered writer to avoid excessive
+ /// syscalls.
+ ///
+ /// # Errors
+ ///
+ /// This method returns the same errors as [`AsyncWriteExt::write_all`].
+ ///
+ /// [`AsyncWriteExt::write_all`]: AsyncWriteExt::write_all
+ ///
+ /// # Examples
+ ///
+ /// Write signed 128-bit integers to a `AsyncWrite`:
+ ///
+ /// ```rust
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut writer = Vec::new();
+ ///
+ /// writer.write_i128(i128::min_value()).await?;
+ ///
+ /// assert_eq!(writer, vec![
+ /// 0x80, 0, 0, 0, 0, 0, 0, 0,
+ /// 0, 0, 0, 0, 0, 0, 0, 0
+ /// ]);
+ /// Ok(())
+ /// }
+ /// ```
+ fn write_i128(&mut self, n: i128) -> WriteI128;
+ }
+
+ /// Flushes this output stream, ensuring that all intermediately buffered
+ /// contents reach their destination.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn flush(&mut self) -> io::Result<()>;
+ /// ```
+ ///
+ /// # Errors
+ ///
+ /// It is considered an error if not all bytes could be written due to
+ /// I/O errors or EOF being reached.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, BufWriter, AsyncWriteExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let f = File::create("foo.txt").await?;
+ /// let mut buffer = BufWriter::new(f);
+ ///
+ /// buffer.write_all(b"some bytes").await?;
+ /// buffer.flush().await?;
+ /// Ok(())
+ /// }
+ /// ```
+ fn flush(&mut self) -> Flush<'_, Self>
+ where
+ Self: Unpin,
+ {
+ flush(self)
+ }
+
+ /// Shuts down the output stream, ensuring that the value can be dropped
+ /// cleanly.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn shutdown(&mut self) -> io::Result<()>;
+ /// ```
+ ///
+ /// Similar to [`flush`], all intermediately buffered is written to the
+ /// underlying stream. Once the operation completes, the caller should
+ /// no longer attempt to write to the stream. For example, the
+ /// `TcpStream` implementation will issue a `shutdown(Write)` sys call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, BufWriter, AsyncWriteExt};
+ /// use tokio::fs::File;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let f = File::create("foo.txt").await?;
+ /// let mut buffer = BufWriter::new(f);
+ ///
+ /// buffer.write_all(b"some bytes").await?;
+ /// buffer.shutdown().await?;
+ /// Ok(())
+ /// }
+ /// ```
+ fn shutdown(&mut self) -> Shutdown<'_, Self>
+ where
+ Self: Unpin,
+ {
+ shutdown(self)
+ }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized> AsyncWriteExt for W {}
diff --git a/third_party/rust/tokio/src/io/util/buf_reader.rs b/third_party/rust/tokio/src/io/util/buf_reader.rs
new file mode 100644
index 0000000000..0177c0e344
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/buf_reader.rs
@@ -0,0 +1,194 @@
+use crate::io::util::DEFAULT_BUF_SIZE;
+use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite};
+
+use pin_project_lite::pin_project;
+use std::io::{self, Read};
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{cmp, fmt};
+
+pin_project! {
+ /// The `BufReader` struct adds buffering to any reader.
+ ///
+ /// It can be excessively inefficient to work directly with a [`AsyncRead`]
+ /// instance. A `BufReader` performs large, infrequent reads on the underlying
+ /// [`AsyncRead`] and maintains an in-memory buffer of the results.
+ ///
+ /// `BufReader` can improve the speed of programs that make *small* and
+ /// *repeated* read calls to the same file or network socket. It does not
+ /// help when reading very large amounts at once, or reading just one or a few
+ /// times. It also provides no advantage when reading from a source that is
+ /// already in memory, like a `Vec<u8>`.
+ ///
+ /// When the `BufReader` is dropped, the contents of its buffer will be
+ /// discarded. Creating multiple instances of a `BufReader` on the same
+ /// stream can cause data loss.
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct BufReader<R> {
+ #[pin]
+ pub(super) inner: R,
+ pub(super) buf: Box<[u8]>,
+ pub(super) pos: usize,
+ pub(super) cap: usize,
+ }
+}
+
+impl<R: AsyncRead> BufReader<R> {
+ /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ pub fn new(inner: R) -> Self {
+ Self::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufReader` with the specified buffer capacity.
+ pub fn with_capacity(capacity: usize, inner: R) -> Self {
+ unsafe {
+ let mut buffer = Vec::with_capacity(capacity);
+ buffer.set_len(capacity);
+
+ {
+ // Convert to MaybeUninit
+ let b = &mut *(&mut buffer[..] as *mut [u8] as *mut [MaybeUninit<u8>]);
+ inner.prepare_uninitialized_buffer(b);
+ }
+ Self {
+ inner,
+ buf: buffer.into_boxed_slice(),
+ pos: 0,
+ cap: 0,
+ }
+ }
+ }
+
+ /// Gets a reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ pub fn get_ref(&self) -> &R {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ pub fn get_mut(&mut self) -> &mut R {
+ &mut self.inner
+ }
+
+ /// Gets a pinned mutable reference to the underlying reader.
+ ///
+ /// It is inadvisable to directly read from the underlying reader.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
+ self.project().inner
+ }
+
+ /// Consumes this `BufWriter`, returning the underlying reader.
+ ///
+ /// Note that any leftover data in the internal buffer is lost.
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+
+ /// Returns a reference to the internally buffered data.
+ ///
+ /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
+ pub fn buffer(&self) -> &[u8] {
+ &self.buf[self.pos..self.cap]
+ }
+
+ /// Invalidates all data in the internal buffer.
+ #[inline]
+ fn discard_buffer(self: Pin<&mut Self>) {
+ let me = self.project();
+ *me.pos = 0;
+ *me.cap = 0;
+ }
+}
+
+impl<R: AsyncRead> AsyncRead for BufReader<R> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ // If we don't have any buffered data and we're doing a massive read
+ // (larger than our internal buffer), bypass our internal buffer
+ // entirely.
+ if self.pos == self.cap && buf.len() >= self.buf.len() {
+ let res = ready!(self.as_mut().get_pin_mut().poll_read(cx, buf));
+ self.discard_buffer();
+ return Poll::Ready(res);
+ }
+ let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?;
+ let nread = rem.read(buf)?;
+ self.consume(nread);
+ Poll::Ready(Ok(nread))
+ }
+
+ // we can't skip unconditionally because of the large buffer case in read.
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ self.inner.prepare_uninitialized_buffer(buf)
+ }
+}
+
+impl<R: AsyncRead> AsyncBufRead for BufReader<R> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let me = self.project();
+
+ // If we've reached the end of our internal buffer then we need to fetch
+ // some more data from the underlying reader.
+ // Branch using `>=` instead of the more correct `==`
+ // to tell the compiler that the pos..cap slice is always valid.
+ if *me.pos >= *me.cap {
+ debug_assert!(*me.pos == *me.cap);
+ *me.cap = ready!(me.inner.poll_read(cx, me.buf))?;
+ *me.pos = 0;
+ }
+ Poll::Ready(Ok(&me.buf[*me.pos..*me.cap]))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ let me = self.project();
+ *me.pos = cmp::min(*me.pos + amt, *me.cap);
+ }
+}
+
+impl<R: AsyncRead + AsyncWrite> AsyncWrite for BufReader<R> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_pin_mut().poll_write(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.get_pin_mut().poll_flush(cx)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.get_pin_mut().poll_shutdown(cx)
+ }
+}
+
+impl<R: fmt::Debug> fmt::Debug for BufReader<R> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BufReader")
+ .field("reader", &self.inner)
+ .field(
+ "buffer",
+ &format_args!("{}/{}", self.cap - self.pos, self.buf.len()),
+ )
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<BufReader<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/buf_stream.rs b/third_party/rust/tokio/src/io/util/buf_stream.rs
new file mode 100644
index 0000000000..a56a4517fa
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/buf_stream.rs
@@ -0,0 +1,169 @@
+use crate::io::util::{BufReader, BufWriter};
+use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite};
+
+use pin_project_lite::pin_project;
+use std::io;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Wraps a type that is [`AsyncWrite`] and [`AsyncRead`], and buffers its input and output.
+ ///
+ /// It can be excessively inefficient to work directly with something that implements [`AsyncWrite`]
+ /// and [`AsyncRead`]. For example, every `write`, however small, has to traverse the syscall
+ /// interface, and similarly, every read has to do the same. The [`BufWriter`] and [`BufReader`]
+ /// types aid with these problems respectively, but do so in only one direction. `BufStream` wraps
+ /// one in the other so that both directions are buffered. See their documentation for details.
+ #[derive(Debug)]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct BufStream<RW> {
+ #[pin]
+ inner: BufReader<BufWriter<RW>>,
+ }
+}
+
+impl<RW: AsyncRead + AsyncWrite> BufStream<RW> {
+ /// Wraps a type in both [`BufWriter`] and [`BufReader`].
+ ///
+ /// See the documentation for those types and [`BufStream`] for details.
+ pub fn new(stream: RW) -> BufStream<RW> {
+ BufStream {
+ inner: BufReader::new(BufWriter::new(stream)),
+ }
+ }
+
+ /// Creates a `BufStream` with the specified [`BufReader`] capacity and [`BufWriter`]
+ /// capacity.
+ ///
+ /// See the documentation for those types and [`BufStream`] for details.
+ pub fn with_capacity(
+ reader_capacity: usize,
+ writer_capacity: usize,
+ stream: RW,
+ ) -> BufStream<RW> {
+ BufStream {
+ inner: BufReader::with_capacity(
+ reader_capacity,
+ BufWriter::with_capacity(writer_capacity, stream),
+ ),
+ }
+ }
+
+ /// Gets a reference to the underlying I/O object.
+ ///
+ /// It is inadvisable to directly read from the underlying I/O object.
+ pub fn get_ref(&self) -> &RW {
+ self.inner.get_ref().get_ref()
+ }
+
+ /// Gets a mutable reference to the underlying I/O object.
+ ///
+ /// It is inadvisable to directly read from the underlying I/O object.
+ pub fn get_mut(&mut self) -> &mut RW {
+ self.inner.get_mut().get_mut()
+ }
+
+ /// Gets a pinned mutable reference to the underlying I/O object.
+ ///
+ /// It is inadvisable to directly read from the underlying I/O object.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut RW> {
+ self.project().inner.get_pin_mut().get_pin_mut()
+ }
+
+ /// Consumes this `BufStream`, returning the underlying I/O object.
+ ///
+ /// Note that any leftover data in the internal buffer is lost.
+ pub fn into_inner(self) -> RW {
+ self.inner.into_inner().into_inner()
+ }
+}
+
+impl<RW> From<BufReader<BufWriter<RW>>> for BufStream<RW> {
+ fn from(b: BufReader<BufWriter<RW>>) -> Self {
+ BufStream { inner: b }
+ }
+}
+
+impl<RW> From<BufWriter<BufReader<RW>>> for BufStream<RW> {
+ fn from(b: BufWriter<BufReader<RW>>) -> Self {
+ // we need to "invert" the reader and writer
+ let BufWriter {
+ inner:
+ BufReader {
+ inner,
+ buf: rbuf,
+ pos,
+ cap,
+ },
+ buf: wbuf,
+ written,
+ } = b;
+
+ BufStream {
+ inner: BufReader {
+ inner: BufWriter {
+ inner,
+ buf: wbuf,
+ written,
+ },
+ buf: rbuf,
+ pos,
+ cap,
+ },
+ }
+ }
+}
+
+impl<RW: AsyncRead + AsyncWrite> AsyncWrite for BufStream<RW> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_write(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.project().inner.poll_flush(cx)
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.project().inner.poll_shutdown(cx)
+ }
+}
+
+impl<RW: AsyncRead + AsyncWrite> AsyncRead for BufStream<RW> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_read(cx, buf)
+ }
+
+ // we can't skip unconditionally because of the large buffer case in read.
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ self.inner.prepare_uninitialized_buffer(buf)
+ }
+}
+
+impl<RW: AsyncRead + AsyncWrite> AsyncBufRead for BufStream<RW> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ self.project().inner.poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.project().inner.consume(amt)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<BufStream<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/buf_writer.rs b/third_party/rust/tokio/src/io/util/buf_writer.rs
new file mode 100644
index 0000000000..efd053ebac
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/buf_writer.rs
@@ -0,0 +1,192 @@
+use crate::io::util::DEFAULT_BUF_SIZE;
+use crate::io::{AsyncBufRead, AsyncRead, AsyncWrite};
+
+use pin_project_lite::pin_project;
+use std::fmt;
+use std::io::{self, Write};
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Wraps a writer and buffers its output.
+ ///
+ /// It can be excessively inefficient to work directly with something that
+ /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
+ /// writes it to an underlying writer in large, infrequent batches.
+ ///
+ /// `BufWriter` can improve the speed of programs that make *small* and
+ /// *repeated* write calls to the same file or network socket. It does not
+ /// help when writing very large amounts at once, or writing just one or a few
+ /// times. It also provides no advantage when writing to a destination that is
+ /// in memory, like a `Vec<u8>`.
+ ///
+ /// When the `BufWriter` is dropped, the contents of its buffer will be
+ /// discarded. Creating multiple instances of a `BufWriter` on the same
+ /// stream can cause data loss. If you need to write out the contents of its
+ /// buffer, you must manually call flush before the writer is dropped.
+ ///
+ /// [`AsyncWrite`]: AsyncWrite
+ /// [`flush`]: super::AsyncWriteExt::flush
+ ///
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct BufWriter<W> {
+ #[pin]
+ pub(super) inner: W,
+ pub(super) buf: Vec<u8>,
+ pub(super) written: usize,
+ }
+}
+
+impl<W: AsyncWrite> BufWriter<W> {
+ /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ pub fn new(inner: W) -> Self {
+ Self::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufWriter` with the specified buffer capacity.
+ pub fn with_capacity(cap: usize, inner: W) -> Self {
+ Self {
+ inner,
+ buf: Vec::with_capacity(cap),
+ written: 0,
+ }
+ }
+
+ fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let mut me = self.project();
+
+ let len = me.buf.len();
+ let mut ret = Ok(());
+ while *me.written < len {
+ match ready!(me.inner.as_mut().poll_write(cx, &me.buf[*me.written..])) {
+ Ok(0) => {
+ ret = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ break;
+ }
+ Ok(n) => *me.written += n,
+ Err(e) => {
+ ret = Err(e);
+ break;
+ }
+ }
+ }
+ if *me.written > 0 {
+ me.buf.drain(..*me.written);
+ }
+ *me.written = 0;
+ Poll::Ready(ret)
+ }
+
+ /// Gets a reference to the underlying writer.
+ pub fn get_ref(&self) -> &W {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying writer.
+ ///
+ /// It is inadvisable to directly write to the underlying writer.
+ pub fn get_mut(&mut self) -> &mut W {
+ &mut self.inner
+ }
+
+ /// Gets a pinned mutable reference to the underlying writer.
+ ///
+ /// It is inadvisable to directly write to the underlying writer.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
+ self.project().inner
+ }
+
+ /// Consumes this `BufWriter`, returning the underlying writer.
+ ///
+ /// Note that any leftover data in the internal buffer is lost.
+ pub fn into_inner(self) -> W {
+ self.inner
+ }
+
+ /// Returns a reference to the internally buffered data.
+ pub fn buffer(&self) -> &[u8] {
+ &self.buf
+ }
+}
+
+impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.buf.len() + buf.len() > self.buf.capacity() {
+ ready!(self.as_mut().flush_buf(cx))?;
+ }
+
+ let me = self.project();
+ if buf.len() >= me.buf.capacity() {
+ me.inner.poll_write(cx, buf)
+ } else {
+ Poll::Ready(me.buf.write(buf))
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(self.as_mut().flush_buf(cx))?;
+ self.get_pin_mut().poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(self.as_mut().flush_buf(cx))?;
+ self.get_pin_mut().poll_shutdown(cx)
+ }
+}
+
+impl<W: AsyncWrite + AsyncRead> AsyncRead for BufWriter<W> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.get_pin_mut().poll_read(cx, buf)
+ }
+
+ // we can't skip unconditionally because of the large buffer case in read.
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ self.get_ref().prepare_uninitialized_buffer(buf)
+ }
+}
+
+impl<W: AsyncWrite + AsyncBufRead> AsyncBufRead for BufWriter<W> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ self.get_pin_mut().poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.get_pin_mut().consume(amt)
+ }
+}
+
+impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BufWriter")
+ .field("writer", &self.inner)
+ .field(
+ "buffer",
+ &format_args!("{}/{}", self.buf.len(), self.buf.capacity()),
+ )
+ .field("written", &self.written)
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<BufWriter<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/chain.rs b/third_party/rust/tokio/src/io/util/chain.rs
new file mode 100644
index 0000000000..bc76af341d
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/chain.rs
@@ -0,0 +1,141 @@
+use crate::io::{AsyncBufRead, AsyncRead};
+
+use pin_project_lite::pin_project;
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Stream for the [`chain`](super::AsyncReadExt::chain) method.
+ #[must_use = "streams do nothing unless polled"]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct Chain<T, U> {
+ #[pin]
+ first: T,
+ #[pin]
+ second: U,
+ done_first: bool,
+ }
+}
+
+pub(super) fn chain<T, U>(first: T, second: U) -> Chain<T, U>
+where
+ T: AsyncRead,
+ U: AsyncRead,
+{
+ Chain {
+ first,
+ second,
+ done_first: false,
+ }
+}
+
+impl<T, U> Chain<T, U>
+where
+ T: AsyncRead,
+ U: AsyncRead,
+{
+ /// Gets references to the underlying readers in this `Chain`.
+ pub fn get_ref(&self) -> (&T, &U) {
+ (&self.first, &self.second)
+ }
+
+ /// Gets mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ pub fn get_mut(&mut self) -> (&mut T, &mut U) {
+ (&mut self.first, &mut self.second)
+ }
+
+ /// Gets pinned mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) {
+ let me = self.project();
+ (me.first, me.second)
+ }
+
+ /// Consumes the `Chain`, returning the wrapped readers.
+ pub fn into_inner(self) -> (T, U) {
+ (self.first, self.second)
+ }
+}
+
+impl<T, U> fmt::Debug for Chain<T, U>
+where
+ T: fmt::Debug,
+ U: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Chain")
+ .field("t", &self.first)
+ .field("u", &self.second)
+ .finish()
+ }
+}
+
+impl<T, U> AsyncRead for Chain<T, U>
+where
+ T: AsyncRead,
+ U: AsyncRead,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ let me = self.project();
+
+ if !*me.done_first {
+ match ready!(me.first.poll_read(cx, buf)?) {
+ 0 if !buf.is_empty() => *me.done_first = true,
+ n => return Poll::Ready(Ok(n)),
+ }
+ }
+ me.second.poll_read(cx, buf)
+ }
+}
+
+impl<T, U> AsyncBufRead for Chain<T, U>
+where
+ T: AsyncBufRead,
+ U: AsyncBufRead,
+{
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let me = self.project();
+
+ if !*me.done_first {
+ match ready!(me.first.poll_fill_buf(cx)?) {
+ buf if buf.is_empty() => {
+ *me.done_first = true;
+ }
+ buf => return Poll::Ready(Ok(buf)),
+ }
+ }
+ me.second.poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ let me = self.project();
+ if !*me.done_first {
+ me.first.consume(amt)
+ } else {
+ me.second.consume(amt)
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Chain<(), ()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/copy.rs b/third_party/rust/tokio/src/io/util/copy.rs
new file mode 100644
index 0000000000..8e0058c1c2
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/copy.rs
@@ -0,0 +1,135 @@
+use crate::io::{AsyncRead, AsyncWrite};
+
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// A future that asynchronously copies the entire contents of a reader into a
+ /// writer.
+ ///
+ /// This struct is generally created by calling [`copy`][copy]. Please
+ /// see the documentation of `copy()` for more details.
+ ///
+ /// [copy]: copy()
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Copy<'a, R: ?Sized, W: ?Sized> {
+ reader: &'a mut R,
+ read_done: bool,
+ writer: &'a mut W,
+ pos: usize,
+ cap: usize,
+ amt: u64,
+ buf: Box<[u8]>,
+ }
+
+ /// Asynchronously copies the entire contents of a reader into a writer.
+ ///
+ /// This function returns a future that will continuously read data from
+ /// `reader` and then write it into `writer` in a streaming fashion until
+ /// `reader` returns EOF.
+ ///
+ /// On success, the total number of bytes that were copied from `reader` to
+ /// `writer` is returned.
+ ///
+ /// This is an asynchronous version of [`std::io::copy`][std].
+ ///
+ /// [std]: std::io::copy
+ ///
+ /// # Errors
+ ///
+ /// The returned future will finish with an error will return an error
+ /// immediately if any call to `poll_read` or `poll_write` returns an error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io;
+ ///
+ /// # async fn dox() -> std::io::Result<()> {
+ /// let mut reader: &[u8] = b"hello";
+ /// let mut writer: Vec<u8> = vec![];
+ ///
+ /// io::copy(&mut reader, &mut writer).await?;
+ ///
+ /// assert_eq!(&b"hello"[..], &writer[..]);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn copy<'a, R, W>(reader: &'a mut R, writer: &'a mut W) -> Copy<'a, R, W>
+ where
+ R: AsyncRead + Unpin + ?Sized,
+ W: AsyncWrite + Unpin + ?Sized,
+ {
+ Copy {
+ reader,
+ read_done: false,
+ writer,
+ amt: 0,
+ pos: 0,
+ cap: 0,
+ buf: Box::new([0; 2048]),
+ }
+ }
+}
+
+impl<R, W> Future for Copy<'_, R, W>
+where
+ R: AsyncRead + Unpin + ?Sized,
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<u64>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
+ loop {
+ // If our buffer is empty, then we need to read some data to
+ // continue.
+ if self.pos == self.cap && !self.read_done {
+ let me = &mut *self;
+ let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf))?;
+ if n == 0 {
+ self.read_done = true;
+ } else {
+ self.pos = 0;
+ self.cap = n;
+ }
+ }
+
+ // If our buffer has some data, let's write it out!
+ while self.pos < self.cap {
+ let me = &mut *self;
+ let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, &me.buf[me.pos..me.cap]))?;
+ if i == 0 {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "write zero byte into writer",
+ )));
+ } else {
+ self.pos += i;
+ self.amt += i as u64;
+ }
+ }
+
+ // If we've written all the data and we've seen EOF, flush out the
+ // data and finish the transfer.
+ if self.pos == self.cap && self.read_done {
+ let me = &mut *self;
+ ready!(Pin::new(&mut *me.writer).poll_flush(cx))?;
+ return Poll::Ready(Ok(self.amt));
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<Copy<'_, PhantomPinned, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/empty.rs b/third_party/rust/tokio/src/io/util/empty.rs
new file mode 100644
index 0000000000..121102c78f
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/empty.rs
@@ -0,0 +1,84 @@
+use crate::io::{AsyncBufRead, AsyncRead};
+
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// An async reader which is always at EOF.
+ ///
+ /// This struct is generally created by calling [`empty`]. Please see
+ /// the documentation of [`empty()`][`empty`] for more details.
+ ///
+ /// This is an asynchronous version of [`std::io::empty`][std].
+ ///
+ /// [`empty`]: fn@empty
+ /// [std]: std::io::empty
+ pub struct Empty {
+ _p: (),
+ }
+
+ /// Creates a new empty async reader.
+ ///
+ /// All reads from the returned reader will return `Poll::Ready(Ok(0))`.
+ ///
+ /// This is an asynchronous version of [`std::io::empty`][std].
+ ///
+ /// [std]: std::io::empty
+ ///
+ /// # Examples
+ ///
+ /// A slightly sad example of not reading anything into a buffer:
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut buffer = String::new();
+ /// io::empty().read_to_string(&mut buffer).await.unwrap();
+ /// assert!(buffer.is_empty());
+ /// }
+ /// ```
+ pub fn empty() -> Empty {
+ Empty { _p: () }
+ }
+}
+
+impl AsyncRead for Empty {
+ #[inline]
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ _: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(0))
+ }
+}
+
+impl AsyncBufRead for Empty {
+ #[inline]
+ fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ Poll::Ready(Ok(&[]))
+ }
+
+ #[inline]
+ fn consume(self: Pin<&mut Self>, _: usize) {}
+}
+
+impl fmt::Debug for Empty {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Empty { .. }")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Empty>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/flush.rs b/third_party/rust/tokio/src/io/util/flush.rs
new file mode 100644
index 0000000000..1465f30448
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/flush.rs
@@ -0,0 +1,47 @@
+use crate::io::AsyncWrite;
+
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// A future used to fully flush an I/O object.
+ ///
+ /// Created by the [`AsyncWriteExt::flush`] function.
+ #[derive(Debug)]
+ pub struct Flush<'a, A: ?Sized> {
+ a: &'a mut A,
+ }
+}
+
+/// Creates a future which will entirely flush an I/O object.
+pub(super) fn flush<A>(a: &mut A) -> Flush<'_, A>
+where
+ A: AsyncWrite + Unpin + ?Sized,
+{
+ Flush { a }
+}
+
+impl<A> Future for Flush<'_, A>
+where
+ A: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = &mut *self;
+ Pin::new(&mut *me.a).poll_flush(cx)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<Flush<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/lines.rs b/third_party/rust/tokio/src/io/util/lines.rs
new file mode 100644
index 0000000000..f0e75de4b1
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/lines.rs
@@ -0,0 +1,114 @@
+use crate::io::util::read_line::read_line_internal;
+use crate::io::AsyncBufRead;
+
+use pin_project_lite::pin_project;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Stream for the [`lines`](crate::io::AsyncBufReadExt::lines) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct Lines<R> {
+ #[pin]
+ reader: R,
+ buf: String,
+ bytes: Vec<u8>,
+ read: usize,
+ }
+}
+
+pub(crate) fn lines<R>(reader: R) -> Lines<R>
+where
+ R: AsyncBufRead,
+{
+ Lines {
+ reader,
+ buf: String::new(),
+ bytes: Vec::new(),
+ read: 0,
+ }
+}
+
+impl<R> Lines<R>
+where
+ R: AsyncBufRead + Unpin,
+{
+ /// Returns the next line in the stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::io::AsyncBufRead;
+ /// use tokio::io::AsyncBufReadExt;
+ ///
+ /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
+ /// let mut lines = my_buf_read.lines();
+ ///
+ /// while let Some(line) = lines.next_line().await? {
+ /// println!("length = {}", line.len())
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn next_line(&mut self) -> io::Result<Option<String>> {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| Pin::new(&mut *self).poll_next_line(cx)).await
+ }
+}
+
+impl<R> Lines<R>
+where
+ R: AsyncBufRead,
+{
+ #[doc(hidden)]
+ pub fn poll_next_line(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<Option<String>>> {
+ let me = self.project();
+
+ let n = ready!(read_line_internal(me.reader, cx, me.buf, me.bytes, me.read))?;
+
+ if n == 0 && me.buf.is_empty() {
+ return Poll::Ready(Ok(None));
+ }
+
+ if me.buf.ends_with('\n') {
+ me.buf.pop();
+
+ if me.buf.ends_with('\r') {
+ me.buf.pop();
+ }
+ }
+
+ Poll::Ready(Ok(Some(mem::replace(me.buf, String::new()))))
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<R: AsyncBufRead> crate::stream::Stream for Lines<R> {
+ type Item = io::Result<String>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(match ready!(self.poll_next_line(cx)) {
+ Ok(Some(line)) => Some(Ok(line)),
+ Ok(None) => None,
+ Err(err) => Some(Err(err)),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Lines<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/mod.rs b/third_party/rust/tokio/src/io/util/mod.rs
new file mode 100644
index 0000000000..c4754abf05
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/mod.rs
@@ -0,0 +1,88 @@
+#![allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+
+cfg_io_util! {
+ mod async_buf_read_ext;
+ pub use async_buf_read_ext::AsyncBufReadExt;
+
+ mod async_read_ext;
+ pub use async_read_ext::AsyncReadExt;
+
+ mod async_seek_ext;
+ pub use async_seek_ext::AsyncSeekExt;
+
+ mod async_write_ext;
+ pub use async_write_ext::AsyncWriteExt;
+
+ mod buf_reader;
+ pub use buf_reader::BufReader;
+
+ mod buf_stream;
+ pub use buf_stream::BufStream;
+
+ mod buf_writer;
+ pub use buf_writer::BufWriter;
+
+ mod chain;
+
+ mod copy;
+ pub use copy::{copy, Copy};
+
+ mod empty;
+ pub use empty::{empty, Empty};
+
+ mod flush;
+
+ mod lines;
+ pub use lines::Lines;
+
+ mod read;
+ mod read_buf;
+ mod read_exact;
+ mod read_int;
+ mod read_line;
+
+ mod read_to_end;
+ cfg_process! {
+ pub(crate) use read_to_end::read_to_end;
+ }
+
+ mod read_to_string;
+ mod read_until;
+
+ mod repeat;
+ pub use repeat::{repeat, Repeat};
+
+ mod shutdown;
+
+ mod sink;
+ pub use sink::{sink, Sink};
+
+ mod split;
+ pub use split::Split;
+
+ cfg_stream! {
+ mod stream_reader;
+ pub use stream_reader::{stream_reader, StreamReader};
+ }
+
+ mod take;
+ pub use take::Take;
+
+ mod write;
+ mod write_all;
+ mod write_buf;
+ mod write_int;
+
+
+ // used by `BufReader` and `BufWriter`
+ // https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1
+ const DEFAULT_BUF_SIZE: usize = 8 * 1024;
+}
+
+cfg_not_io_util! {
+ cfg_process! {
+ mod read_to_end;
+ // Used by process
+ pub(crate) use read_to_end::read_to_end;
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read.rs b/third_party/rust/tokio/src/io/util/read.rs
new file mode 100644
index 0000000000..a8ca370ea8
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read.rs
@@ -0,0 +1,55 @@
+use crate::io::AsyncRead;
+
+use std::future::Future;
+use std::io;
+use std::marker::Unpin;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Tries to read some bytes directly into the given `buf` in asynchronous
+/// manner, returning a future type.
+///
+/// The returned future will resolve to both the I/O stream and the buffer
+/// as well as the number of bytes read once the read operation is completed.
+pub(crate) fn read<'a, R>(reader: &'a mut R, buf: &'a mut [u8]) -> Read<'a, R>
+where
+ R: AsyncRead + Unpin + ?Sized,
+{
+ Read { reader, buf }
+}
+
+cfg_io_util! {
+ /// A future which can be used to easily read available number of bytes to fill
+ /// a buffer.
+ ///
+ /// Created by the [`read`] function.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Read<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut [u8],
+ }
+}
+
+impl<R> Future for Read<'_, R>
+where
+ R: AsyncRead + Unpin + ?Sized,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ let me = &mut *self;
+ Pin::new(&mut *me.reader).poll_read(cx, me.buf)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<Read<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_buf.rs b/third_party/rust/tokio/src/io/util/read_buf.rs
new file mode 100644
index 0000000000..550499b933
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_buf.rs
@@ -0,0 +1,41 @@
+use crate::io::AsyncRead;
+
+use bytes::BufMut;
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pub(crate) fn read_buf<'a, R, B>(reader: &'a mut R, buf: &'a mut B) -> ReadBuf<'a, R, B>
+where
+ R: AsyncRead,
+ B: BufMut,
+{
+ ReadBuf { reader, buf }
+}
+
+cfg_io_util! {
+ /// Future returned by [`read_buf`](AsyncReadExt::read_buf).
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ReadBuf<'a, R, B> {
+ reader: &'a mut R,
+ buf: &'a mut B,
+ }
+}
+
+impl<R, B> Future for ReadBuf<'_, R, B>
+where
+ R: AsyncRead,
+ B: BufMut,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ // safety: no data is moved from self
+ unsafe {
+ let me = self.get_unchecked_mut();
+ Pin::new_unchecked(&mut *me.reader).poll_read_buf(cx, &mut me.buf)
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_exact.rs b/third_party/rust/tokio/src/io/util/read_exact.rs
new file mode 100644
index 0000000000..d6983c9953
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_exact.rs
@@ -0,0 +1,76 @@
+use crate::io::AsyncRead;
+
+use std::future::Future;
+use std::io;
+use std::marker::Unpin;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// A future which can be used to easily read exactly enough bytes to fill
+/// a buffer.
+///
+/// Created by the [`AsyncRead::read_exact`].
+pub(crate) fn read_exact<'a, A>(reader: &'a mut A, buf: &'a mut [u8]) -> ReadExact<'a, A>
+where
+ A: AsyncRead + Unpin + ?Sized,
+{
+ ReadExact {
+ reader,
+ buf,
+ pos: 0,
+ }
+}
+
+cfg_io_util! {
+ /// Creates a future which will read exactly enough bytes to fill `buf`,
+ /// returning an error if EOF is hit sooner.
+ ///
+ /// On success the number of bytes is returned
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ReadExact<'a, A: ?Sized> {
+ reader: &'a mut A,
+ buf: &'a mut [u8],
+ pos: usize,
+ }
+}
+
+fn eof() -> io::Error {
+ io::Error::new(io::ErrorKind::UnexpectedEof, "early eof")
+}
+
+impl<A> Future for ReadExact<'_, A>
+where
+ A: AsyncRead + Unpin + ?Sized,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ loop {
+ // if our buffer is empty, then we need to read some data to continue.
+ if self.pos < self.buf.len() {
+ let me = &mut *self;
+ let n = ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut me.buf[me.pos..]))?;
+ me.pos += n;
+ if n == 0 {
+ return Err(eof()).into();
+ }
+ }
+
+ if self.pos >= self.buf.len() {
+ return Poll::Ready(Ok(self.pos));
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<ReadExact<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_int.rs b/third_party/rust/tokio/src/io/util/read_int.rs
new file mode 100644
index 0000000000..9dc4402f88
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_int.rs
@@ -0,0 +1,123 @@
+use crate::io::AsyncRead;
+
+use bytes::Buf;
+use pin_project_lite::pin_project;
+use std::future::Future;
+use std::io;
+use std::io::ErrorKind::UnexpectedEof;
+use std::mem::size_of;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+macro_rules! reader {
+ ($name:ident, $ty:ty, $reader:ident) => {
+ reader!($name, $ty, $reader, size_of::<$ty>());
+ };
+ ($name:ident, $ty:ty, $reader:ident, $bytes:expr) => {
+ pin_project! {
+ #[doc(hidden)]
+ pub struct $name<R> {
+ #[pin]
+ src: R,
+ buf: [u8; $bytes],
+ read: u8,
+ }
+ }
+
+ impl<R> $name<R> {
+ pub(crate) fn new(src: R) -> Self {
+ $name {
+ src,
+ buf: [0; $bytes],
+ read: 0,
+ }
+ }
+ }
+
+ impl<R> Future for $name<R>
+ where
+ R: AsyncRead,
+ {
+ type Output = io::Result<$ty>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut me = self.project();
+
+ if *me.read == $bytes as u8 {
+ return Poll::Ready(Ok(Buf::$reader(&mut &me.buf[..])));
+ }
+
+ while *me.read < $bytes as u8 {
+ *me.read += match me
+ .src
+ .as_mut()
+ .poll_read(cx, &mut me.buf[*me.read as usize..])
+ {
+ Poll::Pending => return Poll::Pending,
+ Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
+ Poll::Ready(Ok(0)) => {
+ return Poll::Ready(Err(UnexpectedEof.into()));
+ }
+ Poll::Ready(Ok(n)) => n as u8,
+ };
+ }
+
+ let num = Buf::$reader(&mut &me.buf[..]);
+
+ Poll::Ready(Ok(num))
+ }
+ }
+ };
+}
+
+macro_rules! reader8 {
+ ($name:ident, $ty:ty) => {
+ pin_project! {
+ /// Future returned from `read_u8`
+ #[doc(hidden)]
+ pub struct $name<R> {
+ #[pin]
+ reader: R,
+ }
+ }
+
+ impl<R> $name<R> {
+ pub(crate) fn new(reader: R) -> $name<R> {
+ $name { reader }
+ }
+ }
+
+ impl<R> Future for $name<R>
+ where
+ R: AsyncRead,
+ {
+ type Output = io::Result<$ty>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = self.project();
+
+ let mut buf = [0; 1];
+ match me.reader.poll_read(cx, &mut buf[..]) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
+ Poll::Ready(Ok(0)) => Poll::Ready(Err(UnexpectedEof.into())),
+ Poll::Ready(Ok(1)) => Poll::Ready(Ok(buf[0] as $ty)),
+ Poll::Ready(Ok(_)) => unreachable!(),
+ }
+ }
+ }
+ };
+}
+
+reader8!(ReadU8, u8);
+reader8!(ReadI8, i8);
+
+reader!(ReadU16, u16, get_u16);
+reader!(ReadU32, u32, get_u32);
+reader!(ReadU64, u64, get_u64);
+reader!(ReadU128, u128, get_u128);
+
+reader!(ReadI16, i16, get_i16);
+reader!(ReadI32, i32, get_i32);
+reader!(ReadI64, i64, get_i64);
+reader!(ReadI128, i128, get_i128);
diff --git a/third_party/rust/tokio/src/io/util/read_line.rs b/third_party/rust/tokio/src/io/util/read_line.rs
new file mode 100644
index 0000000000..c5ee597486
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_line.rs
@@ -0,0 +1,82 @@
+use crate::io::util::read_until::read_until_internal;
+use crate::io::AsyncBufRead;
+
+use std::future::Future;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::str;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// Future for the [`read_line`](crate::io::AsyncBufReadExt::read_line) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ReadLine<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut String,
+ bytes: Vec<u8>,
+ read: usize,
+ }
+}
+
+pub(crate) fn read_line<'a, R>(reader: &'a mut R, buf: &'a mut String) -> ReadLine<'a, R>
+where
+ R: AsyncBufRead + ?Sized + Unpin,
+{
+ ReadLine {
+ reader,
+ bytes: unsafe { mem::replace(buf.as_mut_vec(), Vec::new()) },
+ buf,
+ read: 0,
+ }
+}
+
+pub(super) fn read_line_internal<R: AsyncBufRead + ?Sized>(
+ reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut String,
+ bytes: &mut Vec<u8>,
+ read: &mut usize,
+) -> Poll<io::Result<usize>> {
+ let ret = ready!(read_until_internal(reader, cx, b'\n', bytes, read));
+ if str::from_utf8(&bytes).is_err() {
+ Poll::Ready(ret.and_then(|_| {
+ Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "stream did not contain valid UTF-8",
+ ))
+ }))
+ } else {
+ debug_assert!(buf.is_empty());
+ debug_assert_eq!(*read, 0);
+ // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
+ mem::swap(unsafe { buf.as_mut_vec() }, bytes);
+ Poll::Ready(ret)
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadLine<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self {
+ reader,
+ buf,
+ bytes,
+ read,
+ } = &mut *self;
+ read_line_internal(Pin::new(reader), cx, buf, bytes, read)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<ReadLine<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_to_end.rs b/third_party/rust/tokio/src/io/util/read_to_end.rs
new file mode 100644
index 0000000000..a2cd99bed0
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_to_end.rs
@@ -0,0 +1,113 @@
+use crate::io::AsyncRead;
+
+use std::future::Future;
+use std::io;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+pub struct ReadToEnd<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut Vec<u8>,
+ start_len: usize,
+}
+
+pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, R>
+where
+ R: AsyncRead + Unpin + ?Sized,
+{
+ let start_len = buf.len();
+ ReadToEnd {
+ reader,
+ buf,
+ start_len,
+ }
+}
+
+struct Guard<'a> {
+ buf: &'a mut Vec<u8>,
+ len: usize,
+}
+
+impl Drop for Guard<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ self.buf.set_len(self.len);
+ }
+ }
+}
+
+// This uses an adaptive system to extend the vector when it fills. We want to
+// avoid paying to allocate and zero a huge chunk of memory if the reader only
+// has 4 bytes while still making large reads if the reader does have a ton
+// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
+// time is 4,500 times (!) slower than this if the reader has a very small
+// amount of data to return.
+//
+// Because we're extending the buffer with uninitialized data for trusted
+// readers, we need to make sure to truncate that if any of this panics.
+pub(super) fn read_to_end_internal<R: AsyncRead + ?Sized>(
+ mut rd: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut Vec<u8>,
+ start_len: usize,
+) -> Poll<io::Result<usize>> {
+ let mut g = Guard {
+ len: buf.len(),
+ buf,
+ };
+ let ret;
+ loop {
+ if g.len == g.buf.len() {
+ unsafe {
+ g.buf.reserve(32);
+ let capacity = g.buf.capacity();
+ g.buf.set_len(capacity);
+
+ let b = &mut *(&mut g.buf[g.len..] as *mut [u8] as *mut [MaybeUninit<u8>]);
+
+ rd.prepare_uninitialized_buffer(b);
+ }
+ }
+
+ match ready!(rd.as_mut().poll_read(cx, &mut g.buf[g.len..])) {
+ Ok(0) => {
+ ret = Poll::Ready(Ok(g.len - start_len));
+ break;
+ }
+ Ok(n) => g.len += n,
+ Err(e) => {
+ ret = Poll::Ready(Err(e));
+ break;
+ }
+ }
+ }
+
+ ret
+}
+
+impl<A> Future for ReadToEnd<'_, A>
+where
+ A: AsyncRead + ?Sized + Unpin,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ read_to_end_internal(Pin::new(&mut this.reader), cx, this.buf, this.start_len)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<ReadToEnd<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_to_string.rs b/third_party/rust/tokio/src/io/util/read_to_string.rs
new file mode 100644
index 0000000000..e77d836dee
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_to_string.rs
@@ -0,0 +1,83 @@
+use crate::io::util::read_to_end::read_to_end_internal;
+use crate::io::AsyncRead;
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{io, mem, str};
+
+cfg_io_util! {
+ /// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ReadToString<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut String,
+ bytes: Vec<u8>,
+ start_len: usize,
+ }
+}
+
+pub(crate) fn read_to_string<'a, R>(reader: &'a mut R, buf: &'a mut String) -> ReadToString<'a, R>
+where
+ R: AsyncRead + ?Sized + Unpin,
+{
+ let start_len = buf.len();
+ ReadToString {
+ reader,
+ bytes: unsafe { mem::replace(buf.as_mut_vec(), Vec::new()) },
+ buf,
+ start_len,
+ }
+}
+
+fn read_to_string_internal<R: AsyncRead + ?Sized>(
+ reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut String,
+ bytes: &mut Vec<u8>,
+ start_len: usize,
+) -> Poll<io::Result<usize>> {
+ let ret = ready!(read_to_end_internal(reader, cx, bytes, start_len));
+ if str::from_utf8(&bytes).is_err() {
+ Poll::Ready(ret.and_then(|_| {
+ Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "stream did not contain valid UTF-8",
+ ))
+ }))
+ } else {
+ debug_assert!(buf.is_empty());
+ // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
+ mem::swap(unsafe { buf.as_mut_vec() }, bytes);
+ Poll::Ready(ret)
+ }
+}
+
+impl<A> Future for ReadToString<'_, A>
+where
+ A: AsyncRead + ?Sized + Unpin,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self {
+ reader,
+ buf,
+ bytes,
+ start_len,
+ } = &mut *self;
+ read_to_string_internal(Pin::new(reader), cx, buf, bytes, *start_len)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<ReadToString<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/read_until.rs b/third_party/rust/tokio/src/io/util/read_until.rs
new file mode 100644
index 0000000000..1adeda66f0
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/read_until.rs
@@ -0,0 +1,86 @@
+use crate::io::AsyncBufRead;
+
+use std::future::Future;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// Future for the [`read_until`](crate::io::AsyncBufReadExt::read_until) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ReadUntil<'a, R: ?Sized> {
+ reader: &'a mut R,
+ byte: u8,
+ buf: &'a mut Vec<u8>,
+ read: usize,
+ }
+}
+
+pub(crate) fn read_until<'a, R>(
+ reader: &'a mut R,
+ byte: u8,
+ buf: &'a mut Vec<u8>,
+) -> ReadUntil<'a, R>
+where
+ R: AsyncBufRead + ?Sized + Unpin,
+{
+ ReadUntil {
+ reader,
+ byte,
+ buf,
+ read: 0,
+ }
+}
+
+pub(super) fn read_until_internal<R: AsyncBufRead + ?Sized>(
+ mut reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ byte: u8,
+ buf: &mut Vec<u8>,
+ read: &mut usize,
+) -> Poll<io::Result<usize>> {
+ loop {
+ let (done, used) = {
+ let available = ready!(reader.as_mut().poll_fill_buf(cx))?;
+ if let Some(i) = memchr::memchr(byte, available) {
+ buf.extend_from_slice(&available[..=i]);
+ (true, i + 1)
+ } else {
+ buf.extend_from_slice(available);
+ (false, available.len())
+ }
+ };
+ reader.as_mut().consume(used);
+ *read += used;
+ if done || used == 0 {
+ return Poll::Ready(Ok(mem::replace(read, 0)));
+ }
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadUntil<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self {
+ reader,
+ byte,
+ buf,
+ read,
+ } = &mut *self;
+ read_until_internal(Pin::new(reader), cx, *byte, buf, read)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<ReadUntil<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/repeat.rs b/third_party/rust/tokio/src/io/util/repeat.rs
new file mode 100644
index 0000000000..6b9067e853
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/repeat.rs
@@ -0,0 +1,71 @@
+use crate::io::AsyncRead;
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// An async reader which yields one byte over and over and over and over and
+ /// over and...
+ ///
+ /// This struct is generally created by calling [`repeat`][repeat]. Please
+ /// see the documentation of `repeat()` for more details.
+ ///
+ /// This is an asynchronous version of [`std::io::Repeat`][std].
+ ///
+ /// [repeat]: fn@repeat
+ /// [std]: std::io::Repeat
+ #[derive(Debug)]
+ pub struct Repeat {
+ byte: u8,
+ }
+
+ /// Creates an instance of an async reader that infinitely repeats one byte.
+ ///
+ /// All reads from this reader will succeed by filling the specified buffer with
+ /// the given byte.
+ ///
+ /// This is an asynchronous version of [`std::io::repeat`][std].
+ ///
+ /// [std]: std::io::repeat
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncReadExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut buffer = [0; 3];
+ /// io::repeat(0b101).read_exact(&mut buffer).await.unwrap();
+ /// assert_eq!(buffer, [0b101, 0b101, 0b101]);
+ /// }
+ /// ```
+ pub fn repeat(byte: u8) -> Repeat {
+ Repeat { byte }
+ }
+}
+
+impl AsyncRead for Repeat {
+ #[inline]
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ for byte in &mut *buf {
+ *byte = self.byte;
+ }
+ Poll::Ready(Ok(buf.len()))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Repeat>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/shutdown.rs b/third_party/rust/tokio/src/io/util/shutdown.rs
new file mode 100644
index 0000000000..f24e288541
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/shutdown.rs
@@ -0,0 +1,47 @@
+use crate::io::AsyncWrite;
+
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// A future used to shutdown an I/O object.
+ ///
+ /// Created by the [`AsyncWriteExt::shutdown`] function.
+ #[derive(Debug)]
+ pub struct Shutdown<'a, A: ?Sized> {
+ a: &'a mut A,
+ }
+}
+
+/// Creates a future which will shutdown an I/O object.
+pub(super) fn shutdown<A>(a: &mut A) -> Shutdown<'_, A>
+where
+ A: AsyncWrite + Unpin + ?Sized,
+{
+ Shutdown { a }
+}
+
+impl<A> Future for Shutdown<'_, A>
+where
+ A: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = &mut *self;
+ Pin::new(&mut *me.a).poll_shutdown(cx)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<Shutdown<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/sink.rs b/third_party/rust/tokio/src/io/util/sink.rs
new file mode 100644
index 0000000000..05ee773fa3
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/sink.rs
@@ -0,0 +1,87 @@
+use crate::io::AsyncWrite;
+
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// An async writer which will move data into the void.
+ ///
+ /// This struct is generally created by calling [`sink`][sink]. Please
+ /// see the documentation of `sink()` for more details.
+ ///
+ /// This is an asynchronous version of [`std::io::Sink`][std].
+ ///
+ /// [sink]: sink()
+ /// [std]: std::io::Sink
+ pub struct Sink {
+ _p: (),
+ }
+
+ /// Creates an instance of an async writer which will successfully consume all
+ /// data.
+ ///
+ /// All calls to [`poll_write`] on the returned instance will return
+ /// `Poll::Ready(Ok(buf.len()))` and the contents of the buffer will not be
+ /// inspected.
+ ///
+ /// This is an asynchronous version of [`std::io::sink`][std].
+ ///
+ /// [`poll_write`]: crate::io::AsyncWrite::poll_write()
+ /// [std]: std::io::sink
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::io::{self, AsyncWriteExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let buffer = vec![1, 2, 3, 5, 8];
+ /// let num_bytes = io::sink().write(&buffer).await?;
+ /// assert_eq!(num_bytes, 5);
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn sink() -> Sink {
+ Sink { _p: () }
+ }
+}
+
+impl AsyncWrite for Sink {
+ #[inline]
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ #[inline]
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ #[inline]
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl fmt::Debug for Sink {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Sink { .. }")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Sink>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/split.rs b/third_party/rust/tokio/src/io/util/split.rs
new file mode 100644
index 0000000000..f1ed2fd89d
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/split.rs
@@ -0,0 +1,112 @@
+use crate::io::util::read_until::read_until_internal;
+use crate::io::AsyncBufRead;
+
+use pin_project_lite::pin_project;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Stream for the [`split`](crate::io::AsyncBufReadExt::split) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct Split<R> {
+ #[pin]
+ reader: R,
+ buf: Vec<u8>,
+ delim: u8,
+ read: usize,
+ }
+}
+
+pub(crate) fn split<R>(reader: R, delim: u8) -> Split<R>
+where
+ R: AsyncBufRead,
+{
+ Split {
+ reader,
+ buf: Vec::new(),
+ delim,
+ read: 0,
+ }
+}
+
+impl<R> Split<R>
+where
+ R: AsyncBufRead + Unpin,
+{
+ /// Returns the next segment in the stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::io::AsyncBufRead;
+ /// use tokio::io::AsyncBufReadExt;
+ ///
+ /// # async fn dox(my_buf_read: impl AsyncBufRead + Unpin) -> std::io::Result<()> {
+ /// let mut segments = my_buf_read.split(b'f');
+ ///
+ /// while let Some(segment) = segments.next_segment().await? {
+ /// println!("length = {}", segment.len())
+ /// }
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub async fn next_segment(&mut self) -> io::Result<Option<Vec<u8>>> {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| Pin::new(&mut *self).poll_next_segment(cx)).await
+ }
+}
+
+impl<R> Split<R>
+where
+ R: AsyncBufRead,
+{
+ #[doc(hidden)]
+ pub fn poll_next_segment(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<Option<Vec<u8>>>> {
+ let me = self.project();
+
+ let n = ready!(read_until_internal(
+ me.reader, cx, *me.delim, me.buf, me.read,
+ ))?;
+
+ if n == 0 && me.buf.is_empty() {
+ return Poll::Ready(Ok(None));
+ }
+
+ if me.buf.last() == Some(me.delim) {
+ me.buf.pop();
+ }
+
+ Poll::Ready(Ok(Some(mem::replace(me.buf, Vec::new()))))
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<R: AsyncBufRead> crate::stream::Stream for Split<R> {
+ type Item = io::Result<Vec<u8>>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(match ready!(self.poll_next_segment(cx)) {
+ Ok(Some(segment)) => Some(Ok(segment)),
+ Ok(None) => None,
+ Err(err) => Some(Err(err)),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Split<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/stream_reader.rs b/third_party/rust/tokio/src/io/util/stream_reader.rs
new file mode 100644
index 0000000000..b98f8bdfc2
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/stream_reader.rs
@@ -0,0 +1,184 @@
+use crate::io::{AsyncBufRead, AsyncRead};
+use crate::stream::Stream;
+use bytes::{Buf, BufMut};
+use pin_project_lite::pin_project;
+use std::io;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Convert a stream of byte chunks into an [`AsyncRead`].
+ ///
+ /// This type is usually created using the [`stream_reader`] function.
+ ///
+ /// [`AsyncRead`]: crate::io::AsyncRead
+ /// [`stream_reader`]: crate::io::stream_reader
+ #[derive(Debug)]
+ #[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct StreamReader<S, B> {
+ #[pin]
+ inner: S,
+ chunk: Option<B>,
+ }
+}
+
+/// Convert a stream of byte chunks into an [`AsyncRead`](crate::io::AsyncRead).
+///
+/// # Example
+///
+/// ```
+/// use bytes::Bytes;
+/// use tokio::io::{stream_reader, AsyncReadExt};
+/// # #[tokio::main]
+/// # async fn main() -> std::io::Result<()> {
+///
+/// // Create a stream from an iterator.
+/// let stream = tokio::stream::iter(vec![
+/// Ok(Bytes::from_static(&[0, 1, 2, 3])),
+/// Ok(Bytes::from_static(&[4, 5, 6, 7])),
+/// Ok(Bytes::from_static(&[8, 9, 10, 11])),
+/// ]);
+///
+/// // Convert it to an AsyncRead.
+/// let mut read = stream_reader(stream);
+///
+/// // Read five bytes from the stream.
+/// let mut buf = [0; 5];
+/// read.read_exact(&mut buf).await?;
+/// assert_eq!(buf, [0, 1, 2, 3, 4]);
+///
+/// // Read the rest of the current chunk.
+/// assert_eq!(read.read(&mut buf).await?, 3);
+/// assert_eq!(&buf[..3], [5, 6, 7]);
+///
+/// // Read the next chunk.
+/// assert_eq!(read.read(&mut buf).await?, 4);
+/// assert_eq!(&buf[..4], [8, 9, 10, 11]);
+///
+/// // We have now reached the end.
+/// assert_eq!(read.read(&mut buf).await?, 0);
+///
+/// # Ok(())
+/// # }
+/// ```
+#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+pub fn stream_reader<S, B>(stream: S) -> StreamReader<S, B>
+where
+ S: Stream<Item = Result<B, io::Error>>,
+ B: Buf,
+{
+ StreamReader::new(stream)
+}
+
+impl<S, B> StreamReader<S, B>
+where
+ S: Stream<Item = Result<B, io::Error>>,
+ B: Buf,
+{
+ /// Convert the provided stream into an `AsyncRead`.
+ fn new(stream: S) -> Self {
+ Self {
+ inner: stream,
+ chunk: None,
+ }
+ }
+ /// Do we have a chunk and is it non-empty?
+ fn has_chunk(self: Pin<&mut Self>) -> bool {
+ if let Some(chunk) = self.project().chunk {
+ chunk.remaining() > 0
+ } else {
+ false
+ }
+ }
+}
+
+impl<S, B> AsyncRead for StreamReader<S, B>
+where
+ S: Stream<Item = Result<B, io::Error>>,
+ B: Buf,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ if buf.is_empty() {
+ return Poll::Ready(Ok(0));
+ }
+
+ let inner_buf = match self.as_mut().poll_fill_buf(cx) {
+ Poll::Ready(Ok(buf)) => buf,
+ Poll::Ready(Err(err)) => return Poll::Ready(Err(err)),
+ Poll::Pending => return Poll::Pending,
+ };
+ let len = std::cmp::min(inner_buf.len(), buf.len());
+ (&mut buf[..len]).copy_from_slice(&inner_buf[..len]);
+
+ self.consume(len);
+ Poll::Ready(Ok(len))
+ }
+ fn poll_read_buf<BM: BufMut>(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut BM,
+ ) -> Poll<io::Result<usize>>
+ where
+ Self: Sized,
+ {
+ if !buf.has_remaining_mut() {
+ return Poll::Ready(Ok(0));
+ }
+
+ let inner_buf = match self.as_mut().poll_fill_buf(cx) {
+ Poll::Ready(Ok(buf)) => buf,
+ Poll::Ready(Err(err)) => return Poll::Ready(Err(err)),
+ Poll::Pending => return Poll::Pending,
+ };
+ let len = std::cmp::min(inner_buf.len(), buf.remaining_mut());
+ buf.put_slice(&inner_buf[..len]);
+
+ self.consume(len);
+ Poll::Ready(Ok(len))
+ }
+ unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+}
+
+impl<S, B> AsyncBufRead for StreamReader<S, B>
+where
+ S: Stream<Item = Result<B, io::Error>>,
+ B: Buf,
+{
+ fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ loop {
+ if self.as_mut().has_chunk() {
+ // This unwrap is very sad, but it can't be avoided.
+ let buf = self.project().chunk.as_ref().unwrap().bytes();
+ return Poll::Ready(Ok(buf));
+ } else {
+ match self.as_mut().project().inner.poll_next(cx) {
+ Poll::Ready(Some(Ok(chunk))) => {
+ // Go around the loop in case the chunk is empty.
+ *self.as_mut().project().chunk = Some(chunk);
+ }
+ Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err)),
+ Poll::Ready(None) => return Poll::Ready(Ok(&[])),
+ Poll::Pending => return Poll::Pending,
+ }
+ }
+ }
+ }
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ if amt > 0 {
+ self.project()
+ .chunk
+ .as_mut()
+ .expect("No chunk present")
+ .advance(amt);
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/take.rs b/third_party/rust/tokio/src/io/util/take.rs
new file mode 100644
index 0000000000..5d6bd90aa3
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/take.rs
@@ -0,0 +1,131 @@
+use crate::io::{AsyncBufRead, AsyncRead};
+
+use pin_project_lite::pin_project;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{cmp, io};
+
+pin_project! {
+ /// Stream for the [`take`](super::AsyncReadExt::take) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless you `.await` or poll them"]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ pub struct Take<R> {
+ #[pin]
+ inner: R,
+ // Add '_' to avoid conflicts with `limit` method.
+ limit_: u64,
+ }
+}
+
+pub(super) fn take<R: AsyncRead>(inner: R, limit: u64) -> Take<R> {
+ Take {
+ inner,
+ limit_: limit,
+ }
+}
+
+impl<R: AsyncRead> Take<R> {
+ /// Returns the remaining number of bytes that can be
+ /// read before this instance will return EOF.
+ ///
+ /// # Note
+ ///
+ /// This instance may reach `EOF` after reading fewer bytes than indicated by
+ /// this method if the underlying [`AsyncRead`] instance reaches EOF.
+ pub fn limit(&self) -> u64 {
+ self.limit_
+ }
+
+ /// Sets the number of bytes that can be read before this instance will
+ /// return EOF. This is the same as constructing a new `Take` instance, so
+ /// the amount of bytes read and the previous limit value don't matter when
+ /// calling this method.
+ pub fn set_limit(&mut self, limit: u64) {
+ self.limit_ = limit
+ }
+
+ /// Gets a reference to the underlying reader.
+ pub fn get_ref(&self) -> &R {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying reader.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying reader as doing so may corrupt the internal limit of this
+ /// `Take`.
+ pub fn get_mut(&mut self) -> &mut R {
+ &mut self.inner
+ }
+
+ /// Gets a pinned mutable reference to the underlying reader.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying reader as doing so may corrupt the internal limit of this
+ /// `Take`.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
+ self.project().inner
+ }
+
+ /// Consumes the `Take`, returning the wrapped reader.
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+}
+
+impl<R: AsyncRead> AsyncRead for Take<R> {
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
+ self.inner.prepare_uninitialized_buffer(buf)
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ if self.limit_ == 0 {
+ return Poll::Ready(Ok(0));
+ }
+
+ let me = self.project();
+ let max = std::cmp::min(buf.len() as u64, *me.limit_) as usize;
+ let n = ready!(me.inner.poll_read(cx, &mut buf[..max]))?;
+ *me.limit_ -= n as u64;
+ Poll::Ready(Ok(n))
+ }
+}
+
+impl<R: AsyncBufRead> AsyncBufRead for Take<R> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let me = self.project();
+
+ // Don't call into inner reader at all at EOF because it may still block
+ if *me.limit_ == 0 {
+ return Poll::Ready(Ok(&[]));
+ }
+
+ let buf = ready!(me.inner.poll_fill_buf(cx)?);
+ let cap = cmp::min(buf.len() as u64, *me.limit_) as usize;
+ Poll::Ready(Ok(&buf[..cap]))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ let me = self.project();
+ // Don't let callers reset the limit by passing an overlarge value
+ let amt = cmp::min(amt as u64, *me.limit_) as usize;
+ *me.limit_ -= amt as u64;
+ me.inner.consume(amt);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ crate::is_unpin::<Take<()>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/write.rs b/third_party/rust/tokio/src/io/util/write.rs
new file mode 100644
index 0000000000..433a421d34
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/write.rs
@@ -0,0 +1,37 @@
+use crate::io::AsyncWrite;
+
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// A future to write some of the buffer to an `AsyncWrite`.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Write<'a, W: ?Sized> {
+ writer: &'a mut W,
+ buf: &'a [u8],
+ }
+}
+
+/// Tries to write some bytes from the given `buf` to the writer in an
+/// asynchronous manner, returning a future.
+pub(crate) fn write<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> Write<'a, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ Write { writer, buf }
+}
+
+impl<W> Future for Write<'_, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ let me = &mut *self;
+ Pin::new(&mut *me.writer).poll_write(cx, me.buf)
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/write_all.rs b/third_party/rust/tokio/src/io/util/write_all.rs
new file mode 100644
index 0000000000..898006c56c
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/write_all.rs
@@ -0,0 +1,57 @@
+use crate::io::AsyncWrite;
+
+use std::future::Future;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct WriteAll<'a, W: ?Sized> {
+ writer: &'a mut W,
+ buf: &'a [u8],
+ }
+}
+
+pub(crate) fn write_all<'a, W>(writer: &'a mut W, buf: &'a [u8]) -> WriteAll<'a, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ WriteAll { writer, buf }
+}
+
+impl<W> Future for WriteAll<'_, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let me = &mut *self;
+ while !me.buf.is_empty() {
+ let n = ready!(Pin::new(&mut me.writer).poll_write(cx, me.buf))?;
+ {
+ let (_, rest) = mem::replace(&mut me.buf, &[]).split_at(n);
+ me.buf = rest;
+ }
+ if n == 0 {
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn assert_unpin() {
+ use std::marker::PhantomPinned;
+ crate::is_unpin::<WriteAll<'_, PhantomPinned>>();
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/write_buf.rs b/third_party/rust/tokio/src/io/util/write_buf.rs
new file mode 100644
index 0000000000..e49282fe0c
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/write_buf.rs
@@ -0,0 +1,43 @@
+use crate::io::AsyncWrite;
+
+use bytes::Buf;
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_io_util! {
+ /// A future to write some of the buffer to an `AsyncWrite`.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct WriteBuf<'a, W, B> {
+ writer: &'a mut W,
+ buf: &'a mut B,
+ }
+}
+
+/// Tries to write some bytes from the given `buf` to the writer in an
+/// asynchronous manner, returning a future.
+pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B>
+where
+ W: AsyncWrite,
+ B: Buf,
+{
+ WriteBuf { writer, buf }
+}
+
+impl<W, B> Future for WriteBuf<'_, W, B>
+where
+ W: AsyncWrite,
+ B: Buf,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ // safety: no data is moved from self
+ unsafe {
+ let me = self.get_unchecked_mut();
+ Pin::new_unchecked(&mut *me.writer).poll_write_buf(cx, &mut me.buf)
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/io/util/write_int.rs b/third_party/rust/tokio/src/io/util/write_int.rs
new file mode 100644
index 0000000000..672c35f076
--- /dev/null
+++ b/third_party/rust/tokio/src/io/util/write_int.rs
@@ -0,0 +1,122 @@
+use crate::io::AsyncWrite;
+
+use bytes::BufMut;
+use pin_project_lite::pin_project;
+use std::future::Future;
+use std::io;
+use std::mem::size_of;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+macro_rules! writer {
+ ($name:ident, $ty:ty, $writer:ident) => {
+ writer!($name, $ty, $writer, size_of::<$ty>());
+ };
+ ($name:ident, $ty:ty, $writer:ident, $bytes:expr) => {
+ pin_project! {
+ #[doc(hidden)]
+ pub struct $name<W> {
+ #[pin]
+ dst: W,
+ buf: [u8; $bytes],
+ written: u8,
+ }
+ }
+
+ impl<W> $name<W> {
+ pub(crate) fn new(w: W, value: $ty) -> Self {
+ let mut writer = $name {
+ buf: [0; $bytes],
+ written: 0,
+ dst: w,
+ };
+ BufMut::$writer(&mut &mut writer.buf[..], value);
+ writer
+ }
+ }
+
+ impl<W> Future for $name<W>
+ where
+ W: AsyncWrite,
+ {
+ type Output = io::Result<()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut me = self.project();
+
+ if *me.written == $bytes as u8 {
+ return Poll::Ready(Ok(()));
+ }
+
+ while *me.written < $bytes as u8 {
+ *me.written += match me
+ .dst
+ .as_mut()
+ .poll_write(cx, &me.buf[*me.written as usize..])
+ {
+ Poll::Pending => return Poll::Pending,
+ Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
+ Poll::Ready(Ok(0)) => {
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ Poll::Ready(Ok(n)) => n as u8,
+ };
+ }
+ Poll::Ready(Ok(()))
+ }
+ }
+ };
+}
+
+macro_rules! writer8 {
+ ($name:ident, $ty:ty) => {
+ pin_project! {
+ #[doc(hidden)]
+ pub struct $name<W> {
+ #[pin]
+ dst: W,
+ byte: $ty,
+ }
+ }
+
+ impl<W> $name<W> {
+ pub(crate) fn new(dst: W, byte: $ty) -> Self {
+ Self { dst, byte }
+ }
+ }
+
+ impl<W> Future for $name<W>
+ where
+ W: AsyncWrite,
+ {
+ type Output = io::Result<()>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = self.project();
+
+ let buf = [*me.byte as u8];
+
+ match me.dst.poll_write(cx, &buf[..]) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())),
+ Poll::Ready(Ok(0)) => Poll::Ready(Err(io::ErrorKind::WriteZero.into())),
+ Poll::Ready(Ok(1)) => Poll::Ready(Ok(())),
+ Poll::Ready(Ok(_)) => unreachable!(),
+ }
+ }
+ }
+ };
+}
+
+writer8!(WriteU8, u8);
+writer8!(WriteI8, i8);
+
+writer!(WriteU16, u16, put_u16);
+writer!(WriteU32, u32, put_u32);
+writer!(WriteU64, u64, put_u64);
+writer!(WriteU128, u128, put_u128);
+
+writer!(WriteI16, i16, put_i16);
+writer!(WriteI32, i32, put_i32);
+writer!(WriteI64, i64, put_i64);
+writer!(WriteI128, i128, put_i128);
diff --git a/third_party/rust/tokio/src/lib.rs b/third_party/rust/tokio/src/lib.rs
new file mode 100644
index 0000000000..8172f40a3b
--- /dev/null
+++ b/third_party/rust/tokio/src/lib.rs
@@ -0,0 +1,390 @@
+#![doc(html_root_url = "https://docs.rs/tokio/0.2.18")]
+#![allow(
+ clippy::cognitive_complexity,
+ clippy::large_enum_variant,
+ clippy::needless_doctest_main
+)]
+#![warn(
+ missing_debug_implementations,
+ missing_docs,
+ rust_2018_idioms,
+ unreachable_pub
+)]
+#![deny(intra_doc_link_resolution_failure)]
+#![doc(test(
+ no_crate_inject,
+ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+//! A runtime for writing reliable, asynchronous, and slim applications.
+//!
+//! Tokio is an event-driven, non-blocking I/O platform for writing asynchronous
+//! applications with the Rust programming language. At a high level, it
+//! provides a few major components:
+//!
+//! * Tools for [working with asynchronous tasks][tasks], including
+//! [synchronization primitives and channels][sync] and [timeouts, delays, and
+//! intervals][time].
+//! * APIs for [performing asynchronous I/O][io], including [TCP and UDP][net] sockets,
+//! [filesystem][fs] operations, and [process] and [signal] management.
+//! * A [runtime] for executing asynchronous code, including a task scheduler,
+//! an I/O driver backed by the operating system's event queue (epoll, kqueue,
+//! IOCP, etc...), and a high performance timer.
+//!
+//! Guide level documentation is found on the [website].
+//!
+//! [tasks]: #working-with-tasks
+//! [sync]: crate::sync
+//! [time]: crate::time
+//! [io]: #asynchronous-io
+//! [net]: crate::net
+//! [fs]: crate::fs
+//! [process]: crate::process
+//! [signal]: crate::signal
+//! [fs]: crate::fs
+//! [runtime]: crate::runtime
+//! [website]: https://tokio.rs/docs/overview/
+//!
+//! # A Tour of Tokio
+//!
+//! Tokio consists of a number of modules that provide a range of functionality
+//! essential for implementing asynchronous applications in Rust. In this
+//! section, we will take a brief tour of Tokio, summarizing the major APIs and
+//! their uses.
+//!
+//! The easiest way to get started is to enable all features. Do this by
+//! enabling the `full` feature flag:
+//!
+//! ```toml
+//! tokio = { version = "0.2", features = ["full"] }
+//! ```
+//!
+//! ## Feature flags
+//!
+//! Tokio uses a set of [feature flags] to reduce the amount of compiled code. It
+//! is possible to just enable certain features over others. By default, Tokio
+//! does not enable any features but allows one to enable a subset for their use
+//! case. Below is a list of the available feature flags. You may also notice
+//! above each function, struct and trait there is listed one or more feature flags
+//! that are required for that item to be used. If you are new to Tokio it is
+//! recommended that you use the `full` feature flag which will enable all public APIs.
+//! Beware though that this will pull in many extra dependencies that you may not
+//! need.
+//!
+//! - `full`: Enables all Tokio public API features listed below.
+//! - `rt-core`: Enables `tokio::spawn` and the basic (single-threaded) scheduler.
+//! - `rt-threaded`: Enables the heavier, multi-threaded, work-stealing scheduler.
+//! - `rt-util`: Enables non-scheduler utilities.
+//! - `io-driver`: Enables the `mio` based IO driver.
+//! - `io-util`: Enables the IO based `Ext` traits.
+//! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types.
+//! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and `UdpSocket`.
+//! - `tcp`: Enables all `tokio::net::tcp` types.
+//! - `udp`: Enables all `tokio::net::udp` types.
+//! - `uds`: Enables all `tokio::net::unix` types.
+//! - `time`: Enables `tokio::time` types and allows the schedulers to enable
+//! the built in timer.
+//! - `process`: Enables `tokio::process` types.
+//! - `macros`: Enables `#[tokio::main]` and `#[tokio::test]` macros.
+//! - `sync`: Enables all `tokio::sync` types.
+//! - `stream`: Enables optional `Stream` implementations for types within Tokio.
+//! - `signal`: Enables all `tokio::signal` types.
+//! - `fs`: Enables `tokio::fs` types.
+//! - `dns`: Enables async `tokio::net::ToSocketAddrs`.
+//! - `test-util`: Enables testing based infrastructure for the Tokio runtime.
+//! - `blocking`: Enables `block_in_place` and `spawn_blocking`.
+//!
+//! _Note: `AsyncRead` and `AsyncWrite` traits do not require any features and are
+//! always available._
+//!
+//! ### Internal features
+//!
+//! These features do not expose any new API, but influence internal
+//! implementation aspects of Tokio, and can pull in additional
+//! dependencies. They are not included in `full`:
+//!
+//! - `parking_lot`: As a potential optimization, use the _parking_lot_ crate's
+//! synchronization primitives internally. MSRV may increase according to the
+//! _parking_lot_ release in use.
+//!
+//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
+//!
+//! ### Authoring applications
+//!
+//! Tokio is great for writing applications and most users in this case shouldn't
+//! worry too much about what features they should pick. If you're unsure, we suggest
+//! going with `full` to ensure that you don't run into any road blocks while you're
+//! building your application.
+//!
+//! #### Example
+//!
+//! This example shows the quickest way to get started with Tokio.
+//!
+//! ```toml
+//! tokio = { version = "0.2", features = ["full"] }
+//! ```
+//!
+//! ### Authoring libraries
+//!
+//! As a library author your goal should be to provide the lighest weight crate
+//! that is based on Tokio. To achieve this you should ensure that you only enable
+//! the features you need. This allows users to pick up your crate without having
+//! to enable unnecessary features.
+//!
+//! #### Example
+//!
+//! This example shows how you may want to import features for a library that just
+//! needs to `tokio::spawn` and use a `TcpStream`.
+//!
+//! ```toml
+//! tokio = { version = "0.2", features = ["rt-core", "tcp"] }
+//! ```
+//!
+//! ## Working With Tasks
+//!
+//! Asynchronous programs in Rust are based around lightweight, non-blocking
+//! units of execution called [_tasks_][tasks]. The [`tokio::task`] module provides
+//! important tools for working with tasks:
+//!
+//! * The [`spawn`] function and [`JoinHandle`] type, for scheduling a new task
+//! on the Tokio runtime and awaiting the output of a spawned task, respectively,
+//! * Functions for [running blocking operations][blocking] in an asynchronous
+//! task context.
+//!
+//! The [`tokio::task`] module is present only when the "rt-core" feature flag
+//! is enabled.
+//!
+//! [tasks]: task/index.html#what-are-tasks
+//! [`tokio::task`]: crate::task
+//! [`spawn`]: crate::task::spawn()
+//! [`JoinHandle`]: crate::task::JoinHandle
+//! [blocking]: task/index.html#blocking-and-yielding
+//!
+//! The [`tokio::sync`] module contains synchronization primitives to use when
+//! needing to communicate or share data. These include:
+//!
+//! * channels ([`oneshot`], [`mpsc`], and [`watch`]), for sending values
+//! between tasks,
+//! * a non-blocking [`Mutex`], for controlling access to a shared, mutable
+//! value,
+//! * an asynchronous [`Barrier`] type, for multiple tasks to synchronize before
+//! beginning a computation.
+//!
+//! The `tokio::sync` module is present only when the "sync" feature flag is
+//! enabled.
+//!
+//! [`tokio::sync`]: crate::sync
+//! [`Mutex`]: crate::sync::Mutex
+//! [`Barrier`]: crate::sync::Barrier
+//! [`oneshot`]: crate::sync::oneshot
+//! [`mpsc`]: crate::sync::mpsc
+//! [`watch`]: crate::sync::watch
+//!
+//! The [`tokio::time`] module provides utilities for tracking time and
+//! scheduling work. This includes functions for setting [timeouts][timeout] for
+//! tasks, [delaying][delay] work to run in the future, or [repeating an operation at an
+//! interval][interval].
+//!
+//! In order to use `tokio::time`, the "time" feature flag must be enabled.
+//!
+//! [`tokio::time`]: crate::time
+//! [delay]: crate::time::delay_for()
+//! [interval]: crate::time::interval()
+//! [timeout]: crate::time::timeout()
+//!
+//! Finally, Tokio provides a _runtime_ for executing asynchronous tasks. Most
+//! applications can use the [`#[tokio::main]`][main] macro to run their code on the
+//! Tokio runtime. In use-cases where manual control over the runtime is
+//! required, the [`tokio::runtime`] module provides APIs for configuring and
+//! managing runtimes.
+//!
+//! Using the runtime requires the "rt-core" or "rt-threaded" feature flags, to
+//! enable the basic [single-threaded scheduler][rt-core] and the [thread-pool
+//! scheduler][rt-threaded], respectively. See the [`runtime` module
+//! documentation][rt-features] for details. In addition, the "macros" feature
+//! flag enables the `#[tokio::main]` and `#[tokio::test]` attributes.
+//!
+//! [main]: attr.main.html
+//! [`tokio::runtime`]: crate::runtime
+//! [`Builder`]: crate::runtime::Builder
+//! [`Runtime`]: crate::runtime::Runtime
+//! [rt-core]: runtime/index.html#basic-scheduler
+//! [rt-threaded]: runtime/index.html#threaded-scheduler
+//! [rt-features]: runtime/index.html#runtime-scheduler
+//!
+//! ## Asynchronous IO
+//!
+//! As well as scheduling and running tasks, Tokio provides everything you need
+//! to perform input and output asynchronously.
+//!
+//! The [`tokio::io`] module provides Tokio's asynchronous core I/O primitives,
+//! the [`AsyncRead`], [`AsyncWrite`], and [`AsyncBufRead`] traits. In addition,
+//! when the "io-util" feature flag is enabled, it also provides combinators and
+//! functions for working with these traits, forming as an asynchronous
+//! counterpart to [`std::io`]. When the "io-driver" feature flag is enabled, it
+//! also provides utilities for library authors implementing I/O resources.
+//!
+//! Tokio also includes APIs for performing various kinds of I/O and interacting
+//! with the operating system asynchronously. These include:
+//!
+//! * [`tokio::net`], which contains non-blocking versions of [TCP], [UDP], and
+//! [Unix Domain Sockets][UDS] (enabled by the "net" feature flag),
+//! * [`tokio::fs`], similar to [`std::fs`] but for performing filesystem I/O
+//! asynchronously (enabled by the "fs" feature flag),
+//! * [`tokio::signal`], for asynchronously handling Unix and Windows OS signals
+//! (enabled by the "signal" feature flag),
+//! * [`tokio::process`], for spawning and managing child processes (enabled by
+//! the "process" feature flag).
+//!
+//! [`tokio::io`]: crate::io
+//! [`AsyncRead`]: crate::io::AsyncRead
+//! [`AsyncWrite`]: crate::io::AsyncWrite
+//! [`AsyncBufRead`]: crate::io::AsyncBufRead
+//! [`std::io`]: std::io
+//! [`tokio::net`]: crate::net
+//! [TCP]: crate::net::tcp
+//! [UDP]: crate::net::udp
+//! [UDS]: crate::net::unix
+//! [`tokio::fs`]: crate::fs
+//! [`std::fs`]: std::fs
+//! [`tokio::signal`]: crate::signal
+//! [`tokio::process`]: crate::process
+//!
+//! # Examples
+//!
+//! A simple TCP echo server:
+//!
+//! ```no_run
+//! use tokio::net::TcpListener;
+//! use tokio::prelude::*;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+//!
+//! loop {
+//! let (mut socket, _) = listener.accept().await?;
+//!
+//! tokio::spawn(async move {
+//! let mut buf = [0; 1024];
+//!
+//! // In a loop, read data from the socket and write the data back.
+//! loop {
+//! let n = match socket.read(&mut buf).await {
+//! // socket closed
+//! Ok(n) if n == 0 => return,
+//! Ok(n) => n,
+//! Err(e) => {
+//! eprintln!("failed to read from socket; err = {:?}", e);
+//! return;
+//! }
+//! };
+//!
+//! // Write the data back
+//! if let Err(e) = socket.write_all(&buf[0..n]).await {
+//! eprintln!("failed to write to socket; err = {:?}", e);
+//! return;
+//! }
+//! }
+//! });
+//! }
+//! }
+//! ```
+
+// Includes re-exports used by macros.
+//
+// This module is not intended to be part of the public API. In general, any
+// `doc(hidden)` code is not part of Tokio's public and stable API.
+#[macro_use]
+#[doc(hidden)]
+pub mod macros;
+
+cfg_fs! {
+ pub mod fs;
+}
+
+#[doc(hidden)]
+pub mod future;
+
+pub mod io;
+pub mod net;
+
+mod loom;
+mod park;
+
+pub mod prelude;
+
+cfg_process! {
+ pub mod process;
+}
+
+pub mod runtime;
+
+pub(crate) mod coop;
+
+cfg_signal! {
+ pub mod signal;
+}
+
+cfg_stream! {
+ pub mod stream;
+}
+
+cfg_sync! {
+ pub mod sync;
+}
+cfg_not_sync! {
+ mod sync;
+}
+
+cfg_rt_core! {
+ pub mod task;
+ pub use task::spawn;
+}
+
+cfg_time! {
+ pub mod time;
+}
+
+mod util;
+
+cfg_macros! {
+ /// Implementation detail of the `select!` macro. This macro is **not**
+ /// intended to be used as part of the public API and is permitted to
+ /// change.
+ #[doc(hidden)]
+ pub use tokio_macros::select_priv_declare_output_enum;
+
+ doc_rt_core! {
+ cfg_rt_threaded! {
+ // This is the docs.rs case (with all features) so make sure macros
+ // is included in doc(cfg).
+
+ #[cfg(not(test))] // Work around for rust-lang/rust#62127
+ #[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+ pub use tokio_macros::main_threaded as main;
+
+ #[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+ pub use tokio_macros::test_threaded as test;
+ }
+
+ cfg_not_rt_threaded! {
+ #[cfg(not(test))] // Work around for rust-lang/rust#62127
+ pub use tokio_macros::main_basic as main;
+ pub use tokio_macros::test_basic as test;
+ }
+ }
+
+ // Maintains old behavior
+ cfg_not_rt_core! {
+ #[cfg(not(test))]
+ pub use tokio_macros::main;
+ pub use tokio_macros::test;
+ }
+}
+
+// TODO: rm
+#[cfg(feature = "io-util")]
+#[cfg(test)]
+fn is_unpin<T: Unpin>() {}
diff --git a/third_party/rust/tokio/src/loom/mocked.rs b/third_party/rust/tokio/src/loom/mocked.rs
new file mode 100644
index 0000000000..7891395225
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/mocked.rs
@@ -0,0 +1,13 @@
+pub(crate) use loom::*;
+
+pub(crate) mod rand {
+ pub(crate) fn seed() -> u64 {
+ 1
+ }
+}
+
+pub(crate) mod sys {
+ pub(crate) fn num_cpus() -> usize {
+ 2
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/mod.rs b/third_party/rust/tokio/src/loom/mod.rs
new file mode 100644
index 0000000000..56a41f25a0
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/mod.rs
@@ -0,0 +1,12 @@
+//! This module abstracts over `loom` and `std::sync` depending on whether we
+//! are running tests or not.
+
+#[cfg(not(all(test, loom)))]
+mod std;
+#[cfg(not(all(test, loom)))]
+pub(crate) use self::std::*;
+
+#[cfg(all(test, loom))]
+mod mocked;
+#[cfg(all(test, loom))]
+pub(crate) use self::mocked::*;
diff --git a/third_party/rust/tokio/src/loom/std/atomic_ptr.rs b/third_party/rust/tokio/src/loom/std/atomic_ptr.rs
new file mode 100644
index 0000000000..eb8e47557a
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_ptr.rs
@@ -0,0 +1,32 @@
+use std::fmt;
+use std::ops::Deref;
+
+/// `AtomicPtr` providing an additional `load_unsync` function.
+pub(crate) struct AtomicPtr<T> {
+ inner: std::sync::atomic::AtomicPtr<T>,
+}
+
+impl<T> AtomicPtr<T> {
+ pub(crate) fn new(ptr: *mut T) -> AtomicPtr<T> {
+ let inner = std::sync::atomic::AtomicPtr::new(ptr);
+ AtomicPtr { inner }
+ }
+
+ pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut *mut T) -> R) -> R {
+ f(self.inner.get_mut())
+ }
+}
+
+impl<T> Deref for AtomicPtr<T> {
+ type Target = std::sync::atomic::AtomicPtr<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T> fmt::Debug for AtomicPtr<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.deref().fmt(fmt)
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/atomic_u16.rs b/third_party/rust/tokio/src/loom/std/atomic_u16.rs
new file mode 100644
index 0000000000..70390972b4
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_u16.rs
@@ -0,0 +1,44 @@
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::ops::Deref;
+
+/// `AtomicU16` providing an additional `load_unsync` function.
+pub(crate) struct AtomicU16 {
+ inner: UnsafeCell<std::sync::atomic::AtomicU16>,
+}
+
+unsafe impl Send for AtomicU16 {}
+unsafe impl Sync for AtomicU16 {}
+
+impl AtomicU16 {
+ pub(crate) fn new(val: u16) -> AtomicU16 {
+ let inner = UnsafeCell::new(std::sync::atomic::AtomicU16::new(val));
+ AtomicU16 { inner }
+ }
+
+ /// Performs an unsynchronized load.
+ ///
+ /// # Safety
+ ///
+ /// All mutations must have happened before the unsynchronized load.
+ /// Additionally, there must be no concurrent mutations.
+ pub(crate) unsafe fn unsync_load(&self) -> u16 {
+ *(*self.inner.get()).get_mut()
+ }
+}
+
+impl Deref for AtomicU16 {
+ type Target = std::sync::atomic::AtomicU16;
+
+ fn deref(&self) -> &Self::Target {
+ // safety: it is always safe to access `&self` fns on the inner value as
+ // we never perform unsafe mutations.
+ unsafe { &*self.inner.get() }
+ }
+}
+
+impl fmt::Debug for AtomicU16 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.deref().fmt(fmt)
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/atomic_u32.rs b/third_party/rust/tokio/src/loom/std/atomic_u32.rs
new file mode 100644
index 0000000000..6f786c519f
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_u32.rs
@@ -0,0 +1,34 @@
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::ops::Deref;
+
+/// `AtomicU32` providing an additional `load_unsync` function.
+pub(crate) struct AtomicU32 {
+ inner: UnsafeCell<std::sync::atomic::AtomicU32>,
+}
+
+unsafe impl Send for AtomicU32 {}
+unsafe impl Sync for AtomicU32 {}
+
+impl AtomicU32 {
+ pub(crate) fn new(val: u32) -> AtomicU32 {
+ let inner = UnsafeCell::new(std::sync::atomic::AtomicU32::new(val));
+ AtomicU32 { inner }
+ }
+}
+
+impl Deref for AtomicU32 {
+ type Target = std::sync::atomic::AtomicU32;
+
+ fn deref(&self) -> &Self::Target {
+ // safety: it is always safe to access `&self` fns on the inner value as
+ // we never perform unsafe mutations.
+ unsafe { &*self.inner.get() }
+ }
+}
+
+impl fmt::Debug for AtomicU32 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.deref().fmt(fmt)
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/atomic_u64.rs b/third_party/rust/tokio/src/loom/std/atomic_u64.rs
new file mode 100644
index 0000000000..206954fcc3
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_u64.rs
@@ -0,0 +1,60 @@
+//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a
+//! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a
+//! `Mutex`.
+
+pub(crate) use self::imp::AtomicU64;
+
+// `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater.
+// Once `cfg_target_has_atomic` feature is stable, we can replace it with
+// `#[cfg(target_has_atomic = "64")]`.
+// Refs: https://github.com/rust-lang/rust/tree/master/src/librustc_target
+#[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))]
+mod imp {
+ pub(crate) use std::sync::atomic::AtomicU64;
+}
+
+#[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))]
+mod imp {
+ use std::sync::atomic::Ordering;
+ use std::sync::Mutex;
+
+ #[derive(Debug)]
+ pub(crate) struct AtomicU64 {
+ inner: Mutex<u64>,
+ }
+
+ impl AtomicU64 {
+ pub(crate) fn new(val: u64) -> AtomicU64 {
+ AtomicU64 {
+ inner: Mutex::new(val),
+ }
+ }
+
+ pub(crate) fn load(&self, _: Ordering) -> u64 {
+ *self.inner.lock().unwrap()
+ }
+
+ pub(crate) fn store(&self, val: u64, _: Ordering) {
+ *self.inner.lock().unwrap() = val;
+ }
+
+ pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
+ let mut lock = self.inner.lock().unwrap();
+ let prev = *lock;
+ *lock = prev | val;
+ prev
+ }
+
+ pub(crate) fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 {
+ let mut lock = self.inner.lock().unwrap();
+ let prev = *lock;
+
+ if prev != old {
+ return prev;
+ }
+
+ *lock = new;
+ prev
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/atomic_u8.rs b/third_party/rust/tokio/src/loom/std/atomic_u8.rs
new file mode 100644
index 0000000000..4fcd0df3d4
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_u8.rs
@@ -0,0 +1,34 @@
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::ops::Deref;
+
+/// `AtomicU8` providing an additional `load_unsync` function.
+pub(crate) struct AtomicU8 {
+ inner: UnsafeCell<std::sync::atomic::AtomicU8>,
+}
+
+unsafe impl Send for AtomicU8 {}
+unsafe impl Sync for AtomicU8 {}
+
+impl AtomicU8 {
+ pub(crate) fn new(val: u8) -> AtomicU8 {
+ let inner = UnsafeCell::new(std::sync::atomic::AtomicU8::new(val));
+ AtomicU8 { inner }
+ }
+}
+
+impl Deref for AtomicU8 {
+ type Target = std::sync::atomic::AtomicU8;
+
+ fn deref(&self) -> &Self::Target {
+ // safety: it is always safe to access `&self` fns on the inner value as
+ // we never perform unsafe mutations.
+ unsafe { &*self.inner.get() }
+ }
+}
+
+impl fmt::Debug for AtomicU8 {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.deref().fmt(fmt)
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/atomic_usize.rs b/third_party/rust/tokio/src/loom/std/atomic_usize.rs
new file mode 100644
index 0000000000..0fe998f1f9
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/atomic_usize.rs
@@ -0,0 +1,56 @@
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::ops;
+
+/// `AtomicUsize` providing an additional `load_unsync` function.
+pub(crate) struct AtomicUsize {
+ inner: UnsafeCell<std::sync::atomic::AtomicUsize>,
+}
+
+unsafe impl Send for AtomicUsize {}
+unsafe impl Sync for AtomicUsize {}
+
+impl AtomicUsize {
+ pub(crate) fn new(val: usize) -> AtomicUsize {
+ let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val));
+ AtomicUsize { inner }
+ }
+
+ /// Performs an unsynchronized load.
+ ///
+ /// # Safety
+ ///
+ /// All mutations must have happened before the unsynchronized load.
+ /// Additionally, there must be no concurrent mutations.
+ pub(crate) unsafe fn unsync_load(&self) -> usize {
+ *(*self.inner.get()).get_mut()
+ }
+
+ pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R {
+ // safety: we have mutable access
+ f(unsafe { (*self.inner.get()).get_mut() })
+ }
+}
+
+impl ops::Deref for AtomicUsize {
+ type Target = std::sync::atomic::AtomicUsize;
+
+ fn deref(&self) -> &Self::Target {
+ // safety: it is always safe to access `&self` fns on the inner value as
+ // we never perform unsafe mutations.
+ unsafe { &*self.inner.get() }
+ }
+}
+
+impl ops::DerefMut for AtomicUsize {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // safety: we hold `&mut self`
+ unsafe { &mut *self.inner.get() }
+ }
+}
+
+impl fmt::Debug for AtomicUsize {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(fmt)
+ }
+}
diff --git a/third_party/rust/tokio/src/loom/std/mod.rs b/third_party/rust/tokio/src/loom/std/mod.rs
new file mode 100644
index 0000000000..595bdf60ed
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/mod.rs
@@ -0,0 +1,87 @@
+#![cfg_attr(any(not(feature = "full"), loom), allow(unused_imports, dead_code))]
+
+mod atomic_ptr;
+mod atomic_u16;
+mod atomic_u32;
+mod atomic_u64;
+mod atomic_u8;
+mod atomic_usize;
+mod unsafe_cell;
+
+pub(crate) mod cell {
+ pub(crate) use super::unsafe_cell::UnsafeCell;
+}
+
+#[cfg(any(feature = "sync", feature = "io-driver"))]
+pub(crate) mod future {
+ pub(crate) use crate::sync::AtomicWaker;
+}
+
+pub(crate) mod rand {
+ use std::collections::hash_map::RandomState;
+ use std::hash::{BuildHasher, Hash, Hasher};
+ use std::sync::atomic::AtomicU32;
+ use std::sync::atomic::Ordering::Relaxed;
+
+ static COUNTER: AtomicU32 = AtomicU32::new(1);
+
+ pub(crate) fn seed() -> u64 {
+ let rand_state = RandomState::new();
+
+ let mut hasher = rand_state.build_hasher();
+
+ // Hash some unique-ish data to generate some new state
+ COUNTER.fetch_add(1, Relaxed).hash(&mut hasher);
+
+ // Get the seed
+ hasher.finish()
+ }
+}
+
+pub(crate) mod sync {
+ pub(crate) use std::sync::Arc;
+
+ #[cfg(feature = "parking_lot")]
+ mod pl_wrappers;
+
+ // Below, make sure all the feature-influenced types are exported for
+ // internal use. Note however that some are not _currently_ named by
+ // consuming code.
+
+ #[cfg(feature = "parking_lot")]
+ #[allow(unused_imports)]
+ pub(crate) use pl_wrappers::{Condvar, Mutex};
+
+ #[cfg(feature = "parking_lot")]
+ #[allow(unused_imports)]
+ pub(crate) use parking_lot::{MutexGuard, WaitTimeoutResult};
+
+ #[cfg(not(feature = "parking_lot"))]
+ #[allow(unused_imports)]
+ pub(crate) use std::sync::{Condvar, Mutex, MutexGuard, WaitTimeoutResult};
+
+ pub(crate) mod atomic {
+ pub(crate) use crate::loom::std::atomic_ptr::AtomicPtr;
+ pub(crate) use crate::loom::std::atomic_u16::AtomicU16;
+ pub(crate) use crate::loom::std::atomic_u32::AtomicU32;
+ pub(crate) use crate::loom::std::atomic_u64::AtomicU64;
+ pub(crate) use crate::loom::std::atomic_u8::AtomicU8;
+ pub(crate) use crate::loom::std::atomic_usize::AtomicUsize;
+
+ pub(crate) use std::sync::atomic::{spin_loop_hint, AtomicBool};
+ }
+}
+
+pub(crate) mod sys {
+ #[cfg(feature = "rt-threaded")]
+ pub(crate) fn num_cpus() -> usize {
+ usize::max(1, num_cpus::get())
+ }
+
+ #[cfg(not(feature = "rt-threaded"))]
+ pub(crate) fn num_cpus() -> usize {
+ 1
+ }
+}
+
+pub(crate) use std::thread;
diff --git a/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs b/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs
new file mode 100644
index 0000000000..3be8ba1c10
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/sync/pl_wrappers.rs
@@ -0,0 +1,79 @@
+//! A minimal adaption of the `parking_lot` synchronization primitives to the
+//! equivalent `std::sync` types.
+//!
+//! This can be extended to additional types/methods as required.
+
+use std::sync::{LockResult, TryLockError, TryLockResult};
+use std::time::Duration;
+
+use parking_lot as pl;
+
+/// Adapter for `parking_lot::Mutex` to the `std::sync::Mutex` interface.
+#[derive(Debug)]
+pub(crate) struct Mutex<T: ?Sized>(pl::Mutex<T>);
+
+impl<T> Mutex<T> {
+ #[inline]
+ pub(crate) fn new(t: T) -> Mutex<T> {
+ Mutex(pl::Mutex::new(t))
+ }
+
+ #[inline]
+ pub(crate) fn lock(&self) -> LockResult<pl::MutexGuard<'_, T>> {
+ Ok(self.0.lock())
+ }
+
+ #[inline]
+ pub(crate) fn try_lock(&self) -> TryLockResult<pl::MutexGuard<'_, T>> {
+ match self.0.try_lock() {
+ Some(guard) => Ok(guard),
+ None => Err(TryLockError::WouldBlock),
+ }
+ }
+
+ // Note: Additional methods `is_poisoned` and `into_inner`, can be
+ // provided here as needed.
+}
+
+/// Adapter for `parking_lot::Condvar` to the `std::sync::Condvar` interface.
+#[derive(Debug)]
+pub(crate) struct Condvar(pl::Condvar);
+
+impl Condvar {
+ #[inline]
+ pub(crate) fn new() -> Condvar {
+ Condvar(pl::Condvar::new())
+ }
+
+ #[inline]
+ pub(crate) fn notify_one(&self) {
+ self.0.notify_one();
+ }
+
+ #[inline]
+ pub(crate) fn notify_all(&self) {
+ self.0.notify_all();
+ }
+
+ #[inline]
+ pub(crate) fn wait<'a, T>(
+ &self,
+ mut guard: pl::MutexGuard<'a, T>,
+ ) -> LockResult<pl::MutexGuard<'a, T>> {
+ self.0.wait(&mut guard);
+ Ok(guard)
+ }
+
+ #[inline]
+ pub(crate) fn wait_timeout<'a, T>(
+ &self,
+ mut guard: pl::MutexGuard<'a, T>,
+ timeout: Duration,
+ ) -> LockResult<(pl::MutexGuard<'a, T>, pl::WaitTimeoutResult)> {
+ let wtr = self.0.wait_for(&mut guard, timeout);
+ Ok((guard, wtr))
+ }
+
+ // Note: Additional methods `wait_timeout_ms`, `wait_timeout_until`,
+ // `wait_until` can be provided here as needed.
+}
diff --git a/third_party/rust/tokio/src/loom/std/unsafe_cell.rs b/third_party/rust/tokio/src/loom/std/unsafe_cell.rs
new file mode 100644
index 0000000000..f2b03d8dc2
--- /dev/null
+++ b/third_party/rust/tokio/src/loom/std/unsafe_cell.rs
@@ -0,0 +1,16 @@
+#[derive(Debug)]
+pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>);
+
+impl<T> UnsafeCell<T> {
+ pub(crate) fn new(data: T) -> UnsafeCell<T> {
+ UnsafeCell(std::cell::UnsafeCell::new(data))
+ }
+
+ pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R {
+ f(self.0.get())
+ }
+
+ pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R {
+ f(self.0.get())
+ }
+}
diff --git a/third_party/rust/tokio/src/macros/cfg.rs b/third_party/rust/tokio/src/macros/cfg.rs
new file mode 100644
index 0000000000..18beb1bdbb
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/cfg.rs
@@ -0,0 +1,322 @@
+#![allow(unused_macros)]
+
+macro_rules! cfg_resource_drivers {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(feature = "io-driver", feature = "time"))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_blocking {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "blocking")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))]
+ $item
+ )*
+ }
+}
+
+/// Enables blocking API internals
+macro_rules! cfg_blocking_impl {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(
+ feature = "blocking",
+ feature = "fs",
+ feature = "dns",
+ feature = "io-std",
+ feature = "rt-threaded",
+ ))]
+ $item
+ )*
+ }
+}
+
+/// Enables blocking API internals
+macro_rules! cfg_not_blocking_impl {
+ ($($item:item)*) => {
+ $(
+ #[cfg(not(any(
+ feature = "blocking",
+ feature = "fs",
+ feature = "dns",
+ feature = "io-std",
+ feature = "rt-threaded",
+ )))]
+ $item
+ )*
+ }
+}
+
+/// Enables internal `AtomicWaker` impl
+macro_rules! cfg_atomic_waker_impl {
+ ($($item:item)*) => {
+ $(
+ #[cfg(any(
+ feature = "io-driver",
+ feature = "time",
+ all(feature = "rt-core", feature = "rt-util")
+ ))]
+ #[cfg(not(loom))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_dns {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "dns")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "dns")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_fs {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "fs")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "fs")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_io_blocking {
+ ($($item:item)*) => {
+ $( #[cfg(any(feature = "io-std", feature = "fs"))] $item )*
+ }
+}
+
+macro_rules! cfg_io_driver {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "io-driver")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-driver")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_io_driver {
+ ($($item:item)*) => {
+ $(
+ #[cfg(not(feature = "io-driver"))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_io_std {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "io-std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-std")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_io_util {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "io-util")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_io_util {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "io-util"))] $item )*
+ }
+}
+
+macro_rules! cfg_loom {
+ ($($item:item)*) => {
+ $( #[cfg(loom)] $item )*
+ }
+}
+
+macro_rules! cfg_not_loom {
+ ($($item:item)*) => {
+ $( #[cfg(not(loom))] $item )*
+ }
+}
+
+macro_rules! cfg_macros {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "macros")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+ #[doc(inline)]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_process {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "process")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "process")))]
+ #[cfg(not(loom))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_signal {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "signal")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "signal")))]
+ #[cfg(not(loom))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_stream {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "stream")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_sync {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "sync")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sync")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_sync {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "sync"))] $item )*
+ }
+}
+
+macro_rules! cfg_rt_core {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "rt-core")]
+ $item
+ )*
+ }
+}
+
+macro_rules! doc_rt_core {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "rt-core")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "rt-core")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_rt_core {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "rt-core"))] $item )*
+ }
+}
+
+macro_rules! cfg_rt_threaded {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "rt-threaded")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "rt-threaded")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_rt_util {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "rt-util")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "rt-util")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_rt_threaded {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "rt-threaded"))] $item )*
+ }
+}
+
+macro_rules! cfg_tcp {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "tcp")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_test_util {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "test-util")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_test_util {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "test-util"))] $item )*
+ }
+}
+
+macro_rules! cfg_time {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "time")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "time")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_not_time {
+ ($($item:item)*) => {
+ $( #[cfg(not(feature = "time"))] $item )*
+ }
+}
+
+macro_rules! cfg_udp {
+ ($($item:item)*) => {
+ $(
+ #[cfg(feature = "udp")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "udp")))]
+ $item
+ )*
+ }
+}
+
+macro_rules! cfg_uds {
+ ($($item:item)*) => {
+ $(
+ #[cfg(all(unix, feature = "uds"))]
+ #[cfg_attr(docsrs, doc(cfg(feature = "uds")))]
+ $item
+ )*
+ }
+}
diff --git a/third_party/rust/tokio/src/macros/join.rs b/third_party/rust/tokio/src/macros/join.rs
new file mode 100644
index 0000000000..5f37af510d
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/join.rs
@@ -0,0 +1,119 @@
+/// Wait on multiple concurrent branches, returning when **all** branches
+/// complete.
+///
+/// The `join!` macro must be used inside of async functions, closures, and
+/// blocks.
+///
+/// The `join!` macro takes a list of async expressions and evaluates them
+/// concurrently on the same task. Each async expression evaluates to a future
+/// and the futures from each expression are multiplexed on the current task.
+///
+/// When working with async expressions returning `Result`, `join!` will wait
+/// for **all** branches complete regardless if any complete with `Err`. Use
+/// [`try_join!`] to return early when `Err` is encountered.
+///
+/// [`try_join!`]: macro@try_join
+///
+/// # Notes
+///
+/// The supplied futures are stored inline and does not require allocating a
+/// `Vec`.
+///
+/// ### Runtime characteristics
+///
+/// By running all async expressions on the current task, the expressions are
+/// able to run **concurrently** but not in **parallel**. This means all
+/// expressions are run on the same thread and if one branch blocks the thread,
+/// all other expressions will be unable to continue. If parallelism is
+/// required, spawn each async expression using [`tokio::spawn`] and pass the
+/// join handle to `join!`.
+///
+/// [`tokio::spawn`]: crate::spawn
+///
+/// # Examples
+///
+/// Basic join with two branches
+///
+/// ```
+/// async fn do_stuff_async() {
+/// // async work
+/// }
+///
+/// async fn more_async_work() {
+/// // more here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (first, second) = tokio::join!(
+/// do_stuff_async(),
+/// more_async_work());
+///
+/// // do something with the values
+/// }
+/// ```
+#[macro_export]
+#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+macro_rules! join {
+ (@ {
+ // One `_` for each branch in the `join!` macro. This is not used once
+ // normalization is complete.
+ ( $($count:tt)* )
+
+ // Normalized join! branches
+ $( ( $($skip:tt)* ) $e:expr, )*
+
+ }) => {{
+ use $crate::macros::support::{maybe_done, poll_fn, Future, Pin};
+ use $crate::macros::support::Poll::{Ready, Pending};
+
+ // Safety: nothing must be moved out of `futures`. This is to satisfy
+ // the requirement of `Pin::new_unchecked` called below.
+ let mut futures = ( $( maybe_done($e), )* );
+
+ poll_fn(move |cx| {
+ let mut is_pending = false;
+
+ $(
+ // Extract the future for this branch from the tuple.
+ let ( $($skip,)* fut, .. ) = &mut futures;
+
+ // Safety: future is stored on the stack above
+ // and never moved.
+ let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+ // Try polling
+ if fut.poll(cx).is_pending() {
+ is_pending = true;
+ }
+ )*
+
+ if is_pending {
+ Pending
+ } else {
+ Ready(($({
+ // Extract the future for this branch from the tuple.
+ let ( $($skip,)* fut, .. ) = &mut futures;
+
+ // Safety: future is stored on the stack above
+ // and never moved.
+ let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+ fut.take_output().expect("expected completed future")
+ },)*))
+ }
+ }).await
+ }};
+
+ // ===== Normalize =====
+
+ (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
+ $crate::join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*)
+ };
+
+ // ===== Entry point =====
+
+ ( $($e:expr),* $(,)?) => {
+ $crate::join!(@{ () } $($e,)*)
+ };
+}
diff --git a/third_party/rust/tokio/src/macros/loom.rs b/third_party/rust/tokio/src/macros/loom.rs
new file mode 100644
index 0000000000..d57d9fb0f7
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/loom.rs
@@ -0,0 +1,12 @@
+macro_rules! if_loom {
+ ($($t:tt)*) => {{
+ #[cfg(loom)]
+ const LOOM: bool = true;
+ #[cfg(not(loom))]
+ const LOOM: bool = false;
+
+ if LOOM {
+ $($t)*
+ }
+ }}
+}
diff --git a/third_party/rust/tokio/src/macros/mod.rs b/third_party/rust/tokio/src/macros/mod.rs
new file mode 100644
index 0000000000..2643c36018
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/mod.rs
@@ -0,0 +1,35 @@
+#![cfg_attr(not(feature = "full"), allow(unused_macros))]
+
+#[macro_use]
+mod cfg;
+
+#[macro_use]
+mod loom;
+
+#[macro_use]
+mod pin;
+
+#[macro_use]
+mod ready;
+
+#[macro_use]
+mod thread_local;
+
+#[macro_use]
+#[cfg(feature = "rt-core")]
+pub(crate) mod scoped_tls;
+
+cfg_macros! {
+ #[macro_use]
+ mod select;
+
+ #[macro_use]
+ mod join;
+
+ #[macro_use]
+ mod try_join;
+}
+
+// Includes re-exports needed to implement macros
+#[doc(hidden)]
+pub mod support;
diff --git a/third_party/rust/tokio/src/macros/pin.rs b/third_party/rust/tokio/src/macros/pin.rs
new file mode 100644
index 0000000000..33d8499e10
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/pin.rs
@@ -0,0 +1,144 @@
+/// Pins a value on the stack.
+///
+/// Calls to `async fn` return anonymous [`Future`] values that are `!Unpin`.
+/// These values must be pinned before they can be polled. Calling `.await` will
+/// handle this, but consumes the future. If it is required to call `.await` on
+/// a `&mut _` reference, the caller is responsible for pinning the future.
+///
+/// Pinning may be done by allocating with [`Box::pin`] or by using the stack
+/// with the `pin!` macro.
+///
+/// The following will **fail to compile**:
+///
+/// ```compile_fail
+/// async fn my_async_fn() {
+/// // async logic here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut future = my_async_fn();
+/// (&mut future).await;
+/// }
+/// ```
+///
+/// To make this work requires pinning:
+///
+/// ```
+/// use tokio::pin;
+///
+/// async fn my_async_fn() {
+/// // async logic here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let future = my_async_fn();
+/// pin!(future);
+///
+/// (&mut future).await;
+/// }
+/// ```
+///
+/// Pinning is useful when using `select!` and stream operators that require `T:
+/// Stream + Unpin`.
+///
+/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html
+/// [`Box::pin`]: #
+///
+/// # Usage
+///
+/// The `pin!` macro takes **identifiers** as arguments. It does **not** work
+/// with expressions.
+///
+/// The following does not compile as an expression is passed to `pin!`.
+///
+/// ```compile_fail
+/// async fn my_async_fn() {
+/// // async logic here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut future = pin!(my_async_fn());
+/// (&mut future).await;
+/// }
+/// ```
+///
+/// # Examples
+///
+/// Using with select:
+///
+/// ```
+/// use tokio::{pin, select};
+/// use tokio::stream::{self, StreamExt};
+///
+/// async fn my_async_fn() {
+/// // async logic here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut stream = stream::iter(vec![1, 2, 3, 4]);
+///
+/// let future = my_async_fn();
+/// pin!(future);
+///
+/// loop {
+/// select! {
+/// _ = &mut future => {
+/// // Stop looping `future` will be polled after completion
+/// break;
+/// }
+/// Some(val) = stream.next() => {
+/// println!("got value = {}", val);
+/// }
+/// }
+/// }
+/// }
+/// ```
+///
+/// Because assigning to a variable followed by pinning is common, there is also
+/// a variant of the macro that supports doing both in one go.
+///
+/// ```
+/// use tokio::{pin, select};
+///
+/// async fn my_async_fn() {
+/// // async logic here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// pin! {
+/// let future1 = my_async_fn();
+/// let future2 = my_async_fn();
+/// }
+///
+/// select! {
+/// _ = &mut future1 => {}
+/// _ = &mut future2 => {}
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! pin {
+ ($($x:ident),*) => { $(
+ // Move the value to ensure that it is owned
+ let mut $x = $x;
+ // Shadow the original binding so that it can't be directly accessed
+ // ever again.
+ #[allow(unused_mut)]
+ let mut $x = unsafe {
+ $crate::macros::support::Pin::new_unchecked(&mut $x)
+ };
+ )* };
+ ($(
+ let $x:ident = $init:expr;
+ )*) => {
+ $(
+ let $x = $init;
+ $crate::pin!($x);
+ )*
+ };
+}
diff --git a/third_party/rust/tokio/src/macros/ready.rs b/third_party/rust/tokio/src/macros/ready.rs
new file mode 100644
index 0000000000..1f48623b80
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/ready.rs
@@ -0,0 +1,8 @@
+macro_rules! ready {
+ ($e:expr $(,)?) => {
+ match $e {
+ std::task::Poll::Ready(t) => t,
+ std::task::Poll::Pending => return std::task::Poll::Pending,
+ }
+ };
+}
diff --git a/third_party/rust/tokio/src/macros/scoped_tls.rs b/third_party/rust/tokio/src/macros/scoped_tls.rs
new file mode 100644
index 0000000000..666f382b28
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/scoped_tls.rs
@@ -0,0 +1,80 @@
+use crate::loom::thread::LocalKey;
+
+use std::cell::Cell;
+use std::marker;
+
+/// Set a reference as a thread-local
+#[macro_export]
+macro_rules! scoped_thread_local {
+ ($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => (
+ $(#[$attrs])*
+ $vis static $name: $crate::macros::scoped_tls::ScopedKey<$ty>
+ = $crate::macros::scoped_tls::ScopedKey {
+ inner: {
+ thread_local!(static FOO: ::std::cell::Cell<*const ()> = {
+ std::cell::Cell::new(::std::ptr::null())
+ });
+ &FOO
+ },
+ _marker: ::std::marker::PhantomData,
+ };
+ )
+}
+
+/// Type representing a thread local storage key corresponding to a reference
+/// to the type parameter `T`.
+pub(crate) struct ScopedKey<T> {
+ #[doc(hidden)]
+ pub(crate) inner: &'static LocalKey<Cell<*const ()>>,
+ #[doc(hidden)]
+ pub(crate) _marker: marker::PhantomData<T>,
+}
+
+unsafe impl<T> Sync for ScopedKey<T> {}
+
+impl<T> ScopedKey<T> {
+ /// Inserts a value into this scoped thread local storage slot for a
+ /// duration of a closure.
+ pub(crate) fn set<F, R>(&'static self, t: &T, f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ struct Reset {
+ key: &'static LocalKey<Cell<*const ()>>,
+ val: *const (),
+ }
+
+ impl Drop for Reset {
+ fn drop(&mut self) {
+ self.key.with(|c| c.set(self.val));
+ }
+ }
+
+ let prev = self.inner.with(|c| {
+ let prev = c.get();
+ c.set(t as *const _ as *const ());
+ prev
+ });
+
+ let _reset = Reset {
+ key: self.inner,
+ val: prev,
+ };
+
+ f()
+ }
+
+ /// Gets a value out of this scoped variable.
+ pub(crate) fn with<F, R>(&'static self, f: F) -> R
+ where
+ F: FnOnce(Option<&T>) -> R,
+ {
+ let val = self.inner.with(|c| c.get());
+
+ if val.is_null() {
+ f(None)
+ } else {
+ unsafe { f(Some(&*(val as *const T))) }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/macros/select.rs b/third_party/rust/tokio/src/macros/select.rs
new file mode 100644
index 0000000000..51b6fcd608
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/select.rs
@@ -0,0 +1,876 @@
+/// Wait on multiple concurrent branches, returning when the **first** branch
+/// completes, cancelling the remaining branches.
+///
+/// The `select!` macro must be used inside of async functions, closures, and
+/// blocks.
+///
+/// The `select!` macro accepts one or more branches with the following pattern:
+///
+/// ```text
+/// <pattern> = <async expression> (, if <precondition>)? => <handler>,
+/// ```
+///
+/// Additionally, the `select!` macro may include a single, optional `else`
+/// branch, which evaluates if none of the other branches match their patterns:
+///
+/// ```text
+/// else <expression>
+/// ```
+///
+/// The macro aggregates all `<async expression>` expressions and runs them
+/// concurrently on the **current** task. Once the **first** expression
+/// completes with a value that matches its `<pattern>`, the `select!` macro
+/// returns the result of evaluating the completed branch's `<handler>`
+/// expression.
+///
+/// Additionally, each branch may include an optional `if` precondition. This
+/// precondition is evaluated **before** the `<async expression>`. If the
+/// precondition returns `false`, the branch is entirely disabled. This
+/// capability is useful when using `select!` within a loop.
+///
+/// The complete lifecycle of a `select!` expression is as follows:
+///
+/// 1. Evaluate all provded `<precondition>` expressions. If the precondition
+/// returns `false`, disable the branch for the remainder of the current call
+/// to `select!`. Re-entering `select!` due to a loop clears the "disabled"
+/// state.
+/// 2. Aggregate the `<async expression>`s from each branch, including the
+/// disabled ones. If the branch is disabled, `<async expression>` is still
+/// evaluated, but the resulting future is not polled.
+/// 3. Concurrently await on the results for all remaining `<async expression>`s.
+/// 4. Once an `<async expression>` returns a value, attempt to apply the value
+/// to the provided `<pattern>`, if the pattern matches, evaluate `<handler>`
+/// and return. If the pattern **does not** match, disable the current branch
+/// and for the remainder of the current call to `select!`. Continue from step 3.
+/// 5. If **all** branches are disabled, evaluate the `else` expression. If none
+/// is provided, panic.
+///
+/// # Notes
+///
+/// ### Runtime characteristics
+///
+/// By running all async expressions on the current task, the expressions are
+/// able to run **concurrently** but not in **parallel**. This means all
+/// expressions are run on the same thread and if one branch blocks the thread,
+/// all other expressions will be unable to continue. If parallelism is
+/// required, spawn each async expression using [`tokio::spawn`] and pass the
+/// join handle to `select!`.
+///
+/// [`tokio::spawn`]: crate::spawn
+///
+/// ### Avoid racy `if` preconditions
+///
+/// Given that `if` preconditions are used to disable `select!` branches, some
+/// caution must be used to avoid missing values.
+///
+/// For example, here is **incorrect** usage of `delay` with `if`. The objective
+/// is to repeatedly run an asynchronous task for up to 50 milliseconds.
+/// However, there is a potential for the `delay` completion to be missed.
+///
+/// ```no_run
+/// use tokio::time::{self, Duration};
+///
+/// async fn some_async_work() {
+/// // do work
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut delay = time::delay_for(Duration::from_millis(50));
+///
+/// while !delay.is_elapsed() {
+/// tokio::select! {
+/// _ = &mut delay, if !delay.is_elapsed() => {
+/// println!("operation timed out");
+/// }
+/// _ = some_async_work() => {
+/// println!("operation completed");
+/// }
+/// }
+/// }
+/// }
+/// ```
+///
+/// In the above example, `delay.is_elapsed()` may return `true` even if
+/// `delay.poll()` never returned `Ready`. This opens up a potential race
+/// condition where `delay` expires between the `while !delay.is_elapsed()`
+/// check and the call to `select!` resulting in the `some_async_work()` call to
+/// run uninterrupted despite the delay having elapsed.
+///
+/// One way to write the above example without the race would be:
+///
+/// ```
+/// use tokio::time::{self, Duration};
+///
+/// async fn some_async_work() {
+/// # time::delay_for(Duration::from_millis(10)).await;
+/// // do work
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut delay = time::delay_for(Duration::from_millis(50));
+///
+/// loop {
+/// tokio::select! {
+/// _ = &mut delay => {
+/// println!("operation timed out");
+/// break;
+/// }
+/// _ = some_async_work() => {
+/// println!("operation completed");
+/// }
+/// }
+/// }
+/// }
+/// ```
+///
+/// ### Fairness
+///
+/// `select!` randomly picks a branch to check first. This provides some level
+/// of fairness when calling `select!` in a loop with branches that are always
+/// ready.
+///
+/// # Panics
+///
+/// `select!` panics if all branches are disabled **and** there is no provided
+/// `else` branch. A branch is disabled when the provided `if` precondition
+/// returns `false` **or** when the pattern does not match the result of `<async
+/// expression>.
+///
+/// # Examples
+///
+/// Basic select with two branches.
+///
+/// ```
+/// async fn do_stuff_async() {
+/// // async work
+/// }
+///
+/// async fn more_async_work() {
+/// // more here
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// tokio::select! {
+/// _ = do_stuff_async() => {
+/// println!("do_stuff_async() completed first")
+/// }
+/// _ = more_async_work() => {
+/// println!("more_async_work() completed first")
+/// }
+/// };
+/// }
+/// ```
+///
+/// Basic stream selecting.
+///
+/// ```
+/// use tokio::stream::{self, StreamExt};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut stream1 = stream::iter(vec![1, 2, 3]);
+/// let mut stream2 = stream::iter(vec![4, 5, 6]);
+///
+/// let next = tokio::select! {
+/// v = stream1.next() => v.unwrap(),
+/// v = stream2.next() => v.unwrap(),
+/// };
+///
+/// assert!(next == 1 || next == 4);
+/// }
+/// ```
+///
+/// Collect the contents of two streams. In this example, we rely on pattern
+/// matching and the fact that `stream::iter` is "fused", i.e. once the stream
+/// is complete, all calls to `next()` return `None`.
+///
+/// ```
+/// use tokio::stream::{self, StreamExt};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut stream1 = stream::iter(vec![1, 2, 3]);
+/// let mut stream2 = stream::iter(vec![4, 5, 6]);
+///
+/// let mut values = vec![];
+///
+/// loop {
+/// tokio::select! {
+/// Some(v) = stream1.next() => values.push(v),
+/// Some(v) = stream2.next() => values.push(v),
+/// else => break,
+/// }
+/// }
+///
+/// values.sort();
+/// assert_eq!(&[1, 2, 3, 4, 5, 6], &values[..]);
+/// }
+/// ```
+///
+/// Using the same future in multiple `select!` expressions can be done by passing
+/// a reference to the future. Doing so requires the future to be [`Unpin`]. A
+/// future can be made [`Unpin`] by either using [`Box::pin`] or stack pinning.
+///
+/// [`Unpin`]: std::marker::Unpin
+/// [`Box::pin`]: std::boxed::Box::pin
+///
+/// Here, a stream is consumed for at most 1 second.
+///
+/// ```
+/// use tokio::stream::{self, StreamExt};
+/// use tokio::time::{self, Duration};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut stream = stream::iter(vec![1, 2, 3]);
+/// let mut delay = time::delay_for(Duration::from_secs(1));
+///
+/// loop {
+/// tokio::select! {
+/// maybe_v = stream.next() => {
+/// if let Some(v) = maybe_v {
+/// println!("got = {}", v);
+/// } else {
+/// break;
+/// }
+/// }
+/// _ = &mut delay => {
+/// println!("timeout");
+/// break;
+/// }
+/// }
+/// }
+/// }
+/// ```
+///
+/// Joining two values using `select!`.
+///
+/// ```
+/// use tokio::sync::oneshot;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx1, mut rx1) = oneshot::channel();
+/// let (tx2, mut rx2) = oneshot::channel();
+///
+/// tokio::spawn(async move {
+/// tx1.send("first").unwrap();
+/// });
+///
+/// tokio::spawn(async move {
+/// tx2.send("second").unwrap();
+/// });
+///
+/// let mut a = None;
+/// let mut b = None;
+///
+/// while a.is_none() || b.is_none() {
+/// tokio::select! {
+/// v1 = (&mut rx1), if a.is_none() => a = Some(v1.unwrap()),
+/// v2 = (&mut rx2), if b.is_none() => b = Some(v2.unwrap()),
+/// }
+/// }
+///
+/// let res = (a.unwrap(), b.unwrap());
+///
+/// assert_eq!(res.0, "first");
+/// assert_eq!(res.1, "second");
+/// }
+/// ```
+#[macro_export]
+#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+macro_rules! select {
+ // Uses a declarative macro to do **most** of the work. While it is possible
+ // to implement fully with a declarative macro, a procedural macro is used
+ // to enable improved error messages.
+ //
+ // The macro is structured as a tt-muncher. All branches are processed and
+ // normalized. Once the input is normalized, it is passed to the top-most
+ // rule. When entering the macro, `@{ }` is inserted at the front. This is
+ // used to collect the normalized input.
+ //
+ // The macro only recurses once per branch. This allows using `select!`
+ // without requiring the user to increase the recursion limit.
+
+ // All input is normalized, now transform.
+ (@ {
+ // One `_` for each branch in the `select!` macro. Passing this to
+ // `count!` converts $skip to an integer.
+ ( $($count:tt)* )
+
+ // Normalized select branches. `( $skip )` is a set of `_` characters.
+ // There is one `_` for each select branch **before** this one. Given
+ // that all input futures are stored in a tuple, $skip is useful for
+ // generating a pattern to reference the future for the current branch.
+ // $skip is also used as an argument to `count!`, returning the index of
+ // the current select branch.
+ $( ( $($skip:tt)* ) $bind:pat = $fut:expr, if $c:expr => $handle:expr, )+
+
+ // Fallback expression used when all select branches have been disabled.
+ ; $else:expr
+
+ }) => {{
+ // Enter a context where stable "function-like" proc macros can be used.
+ //
+ // This module is defined within a scope and should not leak out of this
+ // macro.
+ mod util {
+ // Generate an enum with one variant per select branch
+ $crate::select_priv_declare_output_enum!( ( $($count)* ) );
+ }
+
+ // `tokio::macros::support` is a public, but doc(hidden) module
+ // including a re-export of all types needed by this macro.
+ use $crate::macros::support::Future;
+ use $crate::macros::support::Pin;
+ use $crate::macros::support::Poll::{Ready, Pending};
+
+ const BRANCHES: u32 = $crate::count!( $($count)* );
+
+ let mut disabled: util::Mask = Default::default();
+
+ // First, invoke all the pre-conditions. For any that return true,
+ // set the appropriate bit in `disabled`.
+ $(
+ if !$c {
+ let mask = 1 << $crate::count!( $($skip)* );
+ disabled |= mask;
+ }
+ )*
+
+ // Create a scope to separate polling from handling the output. This
+ // adds borrow checker flexibility when using the macro.
+ let mut output = {
+ // Safety: Nothing must be moved out of `futures`. This is to
+ // satisfy the requirement of `Pin::new_unchecked` called below.
+ let mut futures = ( $( $fut , )+ );
+
+ $crate::macros::support::poll_fn(|cx| {
+ // Track if any branch returns pending. If no branch completes
+ // **or** returns pending, this implies that all branches are
+ // disabled.
+ let mut is_pending = false;
+
+ // Randomly generate a starting point. This makes `select!` a
+ // bit more fair and avoids always polling the first future.
+ let start = $crate::macros::support::thread_rng_n(BRANCHES);
+
+ for i in 0..BRANCHES {
+ let branch = (start + i) % BRANCHES;
+
+ match branch {
+ $(
+ $crate::count!( $($skip)* ) => {
+ // First, if the future has previously been
+ // disabled, do not poll it again. This is done
+ // by checking the associated bit in the
+ // `disabled` bit field.
+ let mask = 1 << branch;
+
+ if disabled & mask == mask {
+ // The future has been disabled.
+ continue;
+ }
+
+ // Extract the future for this branch from the
+ // tuple
+ let ( $($skip,)* fut, .. ) = &mut futures;
+
+ // Safety: future is stored on the stack above
+ // and never moved.
+ let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+ // Try polling it
+ let out = match fut.poll(cx) {
+ Ready(out) => out,
+ Pending => {
+ // Track that at least one future is
+ // still pending and continue polling.
+ is_pending = true;
+ continue;
+ }
+ };
+
+ // Disable the future from future polling.
+ disabled |= mask;
+
+ // The future returned a value, check if matches
+ // the specified pattern.
+ #[allow(unused_variables)]
+ match &out {
+ $bind => {}
+ _ => continue,
+ }
+
+ // The select is complete, return the value
+ return Ready($crate::select_variant!(util::Out, ($($skip)*))(out));
+ }
+ )*
+ _ => unreachable!("reaching this means there probably is an off by one bug"),
+ }
+ }
+
+ if is_pending {
+ Pending
+ } else {
+ // All branches have been disabled.
+ Ready(util::Out::Disabled)
+ }
+ }).await
+ };
+
+ match output {
+ $(
+ $crate::select_variant!(util::Out, ($($skip)*) ($bind)) => $handle,
+ )*
+ util::Out::Disabled => $else,
+ _ => unreachable!("failed to match bind"),
+ }
+ }};
+
+ // ==== Normalize =====
+
+ // These rules match a single `select!` branch and normalize it for
+ // processing by the first rule.
+
+ (@ { $($t:tt)* } ) => {
+ // No `else` branch
+ $crate::select!(@{ $($t)*; unreachable!() })
+ };
+ (@ { $($t:tt)* } else => $else:expr $(,)?) => {
+ $crate::select!(@{ $($t)*; $else })
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, })
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, })
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ };
+ (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => {
+ $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ };
+
+ // ===== Entry point =====
+
+ ( $p:pat = $($t:tt)* ) => {
+ $crate::select!(@{ () } $p = $($t)*)
+ };
+ () => {
+ compile_error!("select! requires at least one branch.")
+ };
+}
+
+// And here... we manually list out matches for up to 64 branches... I'm not
+// happy about it either, but this is how we manage to use a declarative macro!
+
+#[macro_export]
+#[doc(hidden)]
+macro_rules! count {
+ () => {
+ 0
+ };
+ (_) => {
+ 1
+ };
+ (_ _) => {
+ 2
+ };
+ (_ _ _) => {
+ 3
+ };
+ (_ _ _ _) => {
+ 4
+ };
+ (_ _ _ _ _) => {
+ 5
+ };
+ (_ _ _ _ _ _) => {
+ 6
+ };
+ (_ _ _ _ _ _ _) => {
+ 7
+ };
+ (_ _ _ _ _ _ _ _) => {
+ 8
+ };
+ (_ _ _ _ _ _ _ _ _) => {
+ 9
+ };
+ (_ _ _ _ _ _ _ _ _ _) => {
+ 10
+ };
+ (_ _ _ _ _ _ _ _ _ _ _) => {
+ 11
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _) => {
+ 12
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 13
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 14
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 15
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 16
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 17
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 18
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 19
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 20
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 21
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 22
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 23
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 24
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 25
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 26
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 27
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 28
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 29
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 30
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 31
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 32
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 33
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 34
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 35
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 36
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 37
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 38
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 39
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 40
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 41
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 42
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 43
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 44
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 45
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 46
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 47
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 48
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 49
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 50
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 51
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 52
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 53
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 54
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 55
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 56
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 57
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 58
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 59
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 60
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 61
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 62
+ };
+ (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) => {
+ 63
+ };
+}
+
+#[macro_export]
+#[doc(hidden)]
+macro_rules! select_variant {
+ ($($p:ident)::*, () $($t:tt)*) => {
+ $($p)::*::_0 $($t)*
+ };
+ ($($p:ident)::*, (_) $($t:tt)*) => {
+ $($p)::*::_1 $($t)*
+ };
+ ($($p:ident)::*, (_ _) $($t:tt)*) => {
+ $($p)::*::_2 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _) $($t:tt)*) => {
+ $($p)::*::_3 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _) $($t:tt)*) => {
+ $($p)::*::_4 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_5 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_6 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_7 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_8 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_9 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_10 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_11 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_12 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_13 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_14 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_15 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_16 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_17 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_18 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_19 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_20 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_21 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_22 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_23 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_24 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_25 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_26 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_27 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_28 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_29 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_30 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_31 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_32 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_33 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_34 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_35 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_36 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_37 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_38 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_39 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_40 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_41 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_42 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_43 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_44 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_45 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_46 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_47 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_48 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_49 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_50 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_51 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_52 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_53 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_54 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_55 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_56 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_57 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_58 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_59 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_60 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_61 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_62 $($t)*
+ };
+ ($($p:ident)::*, (_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) $($t:tt)*) => {
+ $($p)::*::_63 $($t)*
+ };
+}
diff --git a/third_party/rust/tokio/src/macros/support.rs b/third_party/rust/tokio/src/macros/support.rs
new file mode 100644
index 0000000000..fc1cdfcfa0
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/support.rs
@@ -0,0 +1,8 @@
+cfg_macros! {
+ pub use crate::future::{maybe_done, poll_fn};
+ pub use crate::util::thread_rng_n;
+}
+
+pub use std::future::Future;
+pub use std::pin::Pin;
+pub use std::task::Poll;
diff --git a/third_party/rust/tokio/src/macros/thread_local.rs b/third_party/rust/tokio/src/macros/thread_local.rs
new file mode 100644
index 0000000000..d848947350
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/thread_local.rs
@@ -0,0 +1,4 @@
+#[cfg(all(loom, test))]
+macro_rules! thread_local {
+ ($($tts:tt)+) => { loom::thread_local!{ $($tts)+ } }
+}
diff --git a/third_party/rust/tokio/src/macros/try_join.rs b/third_party/rust/tokio/src/macros/try_join.rs
new file mode 100644
index 0000000000..fa5850ef0e
--- /dev/null
+++ b/third_party/rust/tokio/src/macros/try_join.rs
@@ -0,0 +1,132 @@
+/// Wait on multiple concurrent branches, returning when **all** branches
+/// complete with `Ok(_)` or on the first `Err(_)`.
+///
+/// The `try_join!` macro must be used inside of async functions, closures, and
+/// blocks.
+///
+/// Similar to [`join!`], the `try_join!` macro takes a list of async
+/// expressions and evaluates them concurrently on the same task. Each async
+/// expression evaluates to a future and the futures from each expression are
+/// multiplexed on the current task. The `try_join!` macro returns when **all**
+/// branches return with `Ok` or when the **first** branch returns with `Err`.
+///
+/// [`join!`]: macro@join
+///
+/// # Notes
+///
+/// The supplied futures are stored inline and does not require allocating a
+/// `Vec`.
+///
+/// ### Runtime characteristics
+///
+/// By running all async expressions on the current task, the expressions are
+/// able to run **concurrently** but not in **parallel**. This means all
+/// expressions are run on the same thread and if one branch blocks the thread,
+/// all other expressions will be unable to continue. If parallelism is
+/// required, spawn each async expression using [`tokio::spawn`] and pass the
+/// join handle to `try_join!`.
+///
+/// [`tokio::spawn`]: crate::spawn
+///
+/// # Examples
+///
+/// Basic try_join with two branches.
+///
+/// ```
+/// async fn do_stuff_async() -> Result<(), &'static str> {
+/// // async work
+/// # Ok(())
+/// }
+///
+/// async fn more_async_work() -> Result<(), &'static str> {
+/// // more here
+/// # Ok(())
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let res = tokio::try_join!(
+/// do_stuff_async(),
+/// more_async_work());
+///
+/// match res {
+/// Ok((first, second)) => {
+/// // do something with the values
+/// }
+/// Err(err) => {
+/// println!("processing failed; error = {}", err);
+/// }
+/// }
+/// }
+/// ```
+#[macro_export]
+#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
+macro_rules! try_join {
+ (@ {
+ // One `_` for each branch in the `try_join!` macro. This is not used once
+ // normalization is complete.
+ ( $($count:tt)* )
+
+ // Normalized try_join! branches
+ $( ( $($skip:tt)* ) $e:expr, )*
+
+ }) => {{
+ use $crate::macros::support::{maybe_done, poll_fn, Future, Pin};
+ use $crate::macros::support::Poll::{Ready, Pending};
+
+ // Safety: nothing must be moved out of `futures`. This is to satisfy
+ // the requirement of `Pin::new_unchecked` called below.
+ let mut futures = ( $( maybe_done($e), )* );
+
+ poll_fn(move |cx| {
+ let mut is_pending = false;
+
+ $(
+ // Extract the future for this branch from the tuple.
+ let ( $($skip,)* fut, .. ) = &mut futures;
+
+ // Safety: future is stored on the stack above
+ // and never moved.
+ let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+ // Try polling
+ if fut.as_mut().poll(cx).is_pending() {
+ is_pending = true;
+ } else if fut.as_mut().output_mut().expect("expected completed future").is_err() {
+ return Ready(Err(fut.take_output().expect("expected completed future").err().unwrap()))
+ }
+ )*
+
+ if is_pending {
+ Pending
+ } else {
+ Ready(Ok(($({
+ // Extract the future for this branch from the tuple.
+ let ( $($skip,)* fut, .. ) = &mut futures;
+
+ // Safety: future is stored on the stack above
+ // and never moved.
+ let mut fut = unsafe { Pin::new_unchecked(fut) };
+
+ fut
+ .take_output()
+ .expect("expected completed future")
+ .ok()
+ .expect("expected Ok(_)")
+ },)*)))
+ }
+ }).await
+ }};
+
+ // ===== Normalize =====
+
+ (@ { ( $($s:tt)* ) $($t:tt)* } $e:expr, $($r:tt)* ) => {
+ $crate::try_join!(@{ ($($s)* _) $($t)* ($($s)*) $e, } $($r)*)
+ };
+
+ // ===== Entry point =====
+
+ ( $($e:expr),* $(,)?) => {
+ $crate::try_join!(@{ () } $($e,)*)
+ };
+}
diff --git a/third_party/rust/tokio/src/net/addr.rs b/third_party/rust/tokio/src/net/addr.rs
new file mode 100644
index 0000000000..343d4e21ff
--- /dev/null
+++ b/third_party/rust/tokio/src/net/addr.rs
@@ -0,0 +1,281 @@
+use crate::future;
+
+use std::io;
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
+
+/// Converts or resolves without blocking to one or more `SocketAddr` values.
+///
+/// # DNS
+///
+/// Implementations of `ToSocketAddrs` for string types require a DNS lookup.
+/// These implementations are only provided when Tokio is used with the
+/// **`dns`** feature flag.
+///
+/// # Calling
+///
+/// Currently, this trait is only used as an argument to Tokio functions that
+/// need to reference a target socket address. To perform a `SocketAddr`
+/// conversion directly, use [`lookup_host()`](super::lookup_host()).
+///
+/// This trait is sealed and is intended to be opaque. The details of the trait
+/// will change. Stabilization is pending enhancements to the Rust langague.
+pub trait ToSocketAddrs: sealed::ToSocketAddrsPriv {}
+
+type ReadyFuture<T> = future::Ready<io::Result<T>>;
+
+// ===== impl &impl ToSocketAddrs =====
+
+impl<T: ToSocketAddrs + ?Sized> ToSocketAddrs for &T {}
+
+impl<T> sealed::ToSocketAddrsPriv for &T
+where
+ T: sealed::ToSocketAddrsPriv + ?Sized,
+{
+ type Iter = T::Iter;
+ type Future = T::Future;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ (**self).to_socket_addrs()
+ }
+}
+
+// ===== impl SocketAddr =====
+
+impl ToSocketAddrs for SocketAddr {}
+
+impl sealed::ToSocketAddrsPriv for SocketAddr {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ let iter = Some(*self).into_iter();
+ future::ok(iter)
+ }
+}
+
+// ===== impl SocketAddrV4 =====
+
+impl ToSocketAddrs for SocketAddrV4 {}
+
+impl sealed::ToSocketAddrsPriv for SocketAddrV4 {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ SocketAddr::V4(*self).to_socket_addrs()
+ }
+}
+
+// ===== impl SocketAddrV6 =====
+
+impl ToSocketAddrs for SocketAddrV6 {}
+
+impl sealed::ToSocketAddrsPriv for SocketAddrV6 {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ SocketAddr::V6(*self).to_socket_addrs()
+ }
+}
+
+// ===== impl (IpAddr, u16) =====
+
+impl ToSocketAddrs for (IpAddr, u16) {}
+
+impl sealed::ToSocketAddrsPriv for (IpAddr, u16) {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ let iter = Some(SocketAddr::from(*self)).into_iter();
+ future::ok(iter)
+ }
+}
+
+// ===== impl (Ipv4Addr, u16) =====
+
+impl ToSocketAddrs for (Ipv4Addr, u16) {}
+
+impl sealed::ToSocketAddrsPriv for (Ipv4Addr, u16) {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ let (ip, port) = *self;
+ SocketAddrV4::new(ip, port).to_socket_addrs()
+ }
+}
+
+// ===== impl (Ipv6Addr, u16) =====
+
+impl ToSocketAddrs for (Ipv6Addr, u16) {}
+
+impl sealed::ToSocketAddrsPriv for (Ipv6Addr, u16) {
+ type Iter = std::option::IntoIter<SocketAddr>;
+ type Future = ReadyFuture<Self::Iter>;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ let (ip, port) = *self;
+ SocketAddrV6::new(ip, port, 0, 0).to_socket_addrs()
+ }
+}
+
+cfg_dns! {
+ // ===== impl str =====
+
+ impl ToSocketAddrs for str {}
+
+ impl sealed::ToSocketAddrsPriv for str {
+ type Iter = sealed::OneOrMore;
+ type Future = sealed::MaybeReady;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ use crate::runtime::spawn_blocking;
+ use sealed::MaybeReady;
+
+ // First check if the input parses as a socket address
+ let res: Result<SocketAddr, _> = self.parse();
+
+ if let Ok(addr) = res {
+ return MaybeReady::Ready(Some(addr));
+ }
+
+ // Run DNS lookup on the blocking pool
+ let s = self.to_owned();
+
+ MaybeReady::Blocking(spawn_blocking(move || {
+ std::net::ToSocketAddrs::to_socket_addrs(&s)
+ }))
+ }
+ }
+
+ // ===== impl (&str, u16) =====
+
+ impl ToSocketAddrs for (&str, u16) {}
+
+ impl sealed::ToSocketAddrsPriv for (&str, u16) {
+ type Iter = sealed::OneOrMore;
+ type Future = sealed::MaybeReady;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ use crate::runtime::spawn_blocking;
+ use sealed::MaybeReady;
+
+ let (host, port) = *self;
+
+ // try to parse the host as a regular IP address first
+ if let Ok(addr) = host.parse::<Ipv4Addr>() {
+ let addr = SocketAddrV4::new(addr, port);
+ let addr = SocketAddr::V4(addr);
+
+ return MaybeReady::Ready(Some(addr));
+ }
+
+ if let Ok(addr) = host.parse::<Ipv6Addr>() {
+ let addr = SocketAddrV6::new(addr, port, 0, 0);
+ let addr = SocketAddr::V6(addr);
+
+ return MaybeReady::Ready(Some(addr));
+ }
+
+ let host = host.to_owned();
+
+ MaybeReady::Blocking(spawn_blocking(move || {
+ std::net::ToSocketAddrs::to_socket_addrs(&(&host[..], port))
+ }))
+ }
+ }
+
+ // ===== impl String =====
+
+ impl ToSocketAddrs for String {}
+
+ impl sealed::ToSocketAddrsPriv for String {
+ type Iter = <str as sealed::ToSocketAddrsPriv>::Iter;
+ type Future = <str as sealed::ToSocketAddrsPriv>::Future;
+
+ fn to_socket_addrs(&self) -> Self::Future {
+ (&self[..]).to_socket_addrs()
+ }
+ }
+}
+
+pub(crate) mod sealed {
+ //! The contents of this trait are intended to remain private and __not__
+ //! part of the `ToSocketAddrs` public API. The details will change over
+ //! time.
+
+ use std::future::Future;
+ use std::io;
+ use std::net::SocketAddr;
+
+ cfg_dns! {
+ use crate::task::JoinHandle;
+
+ use std::option;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+ use std::vec;
+ }
+
+ #[doc(hidden)]
+ pub trait ToSocketAddrsPriv {
+ type Iter: Iterator<Item = SocketAddr> + Send + 'static;
+ type Future: Future<Output = io::Result<Self::Iter>> + Send + 'static;
+
+ fn to_socket_addrs(&self) -> Self::Future;
+ }
+
+ cfg_dns! {
+ #[doc(hidden)]
+ #[derive(Debug)]
+ pub enum MaybeReady {
+ Ready(Option<SocketAddr>),
+ Blocking(JoinHandle<io::Result<vec::IntoIter<SocketAddr>>>),
+ }
+
+ #[doc(hidden)]
+ #[derive(Debug)]
+ pub enum OneOrMore {
+ One(option::IntoIter<SocketAddr>),
+ More(vec::IntoIter<SocketAddr>),
+ }
+
+ impl Future for MaybeReady {
+ type Output = io::Result<OneOrMore>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match *self {
+ MaybeReady::Ready(ref mut i) => {
+ let iter = OneOrMore::One(i.take().into_iter());
+ Poll::Ready(Ok(iter))
+ }
+ MaybeReady::Blocking(ref mut rx) => {
+ let res = ready!(Pin::new(rx).poll(cx))?.map(OneOrMore::More);
+
+ Poll::Ready(res)
+ }
+ }
+ }
+ }
+
+ impl Iterator for OneOrMore {
+ type Item = SocketAddr;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self {
+ OneOrMore::One(i) => i.next(),
+ OneOrMore::More(i) => i.next(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ OneOrMore::One(i) => i.size_hint(),
+ OneOrMore::More(i) => i.size_hint(),
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/net/lookup_host.rs b/third_party/rust/tokio/src/net/lookup_host.rs
new file mode 100644
index 0000000000..3098b463e3
--- /dev/null
+++ b/third_party/rust/tokio/src/net/lookup_host.rs
@@ -0,0 +1,38 @@
+cfg_dns! {
+ use crate::net::addr::ToSocketAddrs;
+
+ use std::io;
+ use std::net::SocketAddr;
+
+ /// Performs a DNS resolution.
+ ///
+ /// The returned iterator may not actually yield any values depending on the
+ /// outcome of any resolution performed.
+ ///
+ /// This API is not intended to cover all DNS use cases. Anything beyond the
+ /// basic use case should be done with a specialized library.
+ ///
+ /// # Examples
+ ///
+ /// To resolve a DNS entry:
+ ///
+ /// ```no_run
+ /// use tokio::net;
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// for addr in net::lookup_host("localhost:3000").await? {
+ /// println!("socket address is {}", addr);
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub async fn lookup_host<T>(host: T) -> io::Result<impl Iterator<Item = SocketAddr>>
+ where
+ T: ToSocketAddrs
+ {
+ host.to_socket_addrs().await
+ }
+}
diff --git a/third_party/rust/tokio/src/net/mod.rs b/third_party/rust/tokio/src/net/mod.rs
new file mode 100644
index 0000000000..eb24ac0ba5
--- /dev/null
+++ b/third_party/rust/tokio/src/net/mod.rs
@@ -0,0 +1,49 @@
+#![cfg(not(loom))]
+
+//! TCP/UDP/Unix bindings for `tokio`.
+//!
+//! This module contains the TCP/UDP/Unix networking types, similar to the standard
+//! library, which can be used to implement networking protocols.
+//!
+//! # Organization
+//!
+//! * [`TcpListener`] and [`TcpStream`] provide functionality for communication over TCP
+//! * [`UdpSocket`] provides functionality for communication over UDP
+//! * [`UnixListener`] and [`UnixStream`] provide functionality for communication over a
+//! Unix Domain Stream Socket **(available on Unix only)**
+//! * [`UnixDatagram`] provides functionality for communication
+//! over Unix Domain Datagram Socket **(available on Unix only)**
+
+//!
+//! [`TcpListener`]: TcpListener
+//! [`TcpStream`]: TcpStream
+//! [`UdpSocket`]: UdpSocket
+//! [`UnixListener`]: UnixListener
+//! [`UnixStream`]: UnixStream
+//! [`UnixDatagram`]: UnixDatagram
+
+mod addr;
+pub use addr::ToSocketAddrs;
+
+cfg_dns! {
+ mod lookup_host;
+ pub use lookup_host::lookup_host;
+}
+
+cfg_tcp! {
+ pub mod tcp;
+ pub use tcp::listener::TcpListener;
+ pub use tcp::stream::TcpStream;
+}
+
+cfg_udp! {
+ pub mod udp;
+ pub use udp::socket::UdpSocket;
+}
+
+cfg_uds! {
+ pub mod unix;
+ pub use unix::datagram::UnixDatagram;
+ pub use unix::listener::UnixListener;
+ pub use unix::stream::UnixStream;
+}
diff --git a/third_party/rust/tokio/src/net/tcp/incoming.rs b/third_party/rust/tokio/src/net/tcp/incoming.rs
new file mode 100644
index 0000000000..062be1e9cf
--- /dev/null
+++ b/third_party/rust/tokio/src/net/tcp/incoming.rs
@@ -0,0 +1,42 @@
+use crate::net::tcp::{TcpListener, TcpStream};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Stream returned by the `TcpListener::incoming` function representing the
+/// stream of sockets received from a listener.
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub struct Incoming<'a> {
+ inner: &'a mut TcpListener,
+}
+
+impl Incoming<'_> {
+ pub(crate) fn new(listener: &mut TcpListener) -> Incoming<'_> {
+ Incoming { inner: listener }
+ }
+
+ /// Attempts to poll `TcpStream` by polling inner `TcpListener` to accept
+ /// connection.
+ ///
+ /// If `TcpListener` isn't ready yet, `Poll::Pending` is returned and
+ /// current task will be notified by a waker.
+ pub fn poll_accept(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<TcpStream>> {
+ let (socket, _) = ready!(self.inner.poll_accept(cx))?;
+ Poll::Ready(Ok(socket))
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for Incoming<'_> {
+ type Item = io::Result<TcpStream>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let (socket, _) = ready!(self.inner.poll_accept(cx))?;
+ Poll::Ready(Some(Ok(socket)))
+ }
+}
diff --git a/third_party/rust/tokio/src/net/tcp/listener.rs b/third_party/rust/tokio/src/net/tcp/listener.rs
new file mode 100644
index 0000000000..cde22cb636
--- /dev/null
+++ b/third_party/rust/tokio/src/net/tcp/listener.rs
@@ -0,0 +1,441 @@
+use crate::future::poll_fn;
+use crate::io::PollEvented;
+use crate::net::tcp::{Incoming, TcpStream};
+use crate::net::ToSocketAddrs;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::net::{self, SocketAddr};
+use std::task::{Context, Poll};
+
+cfg_tcp! {
+ /// A TCP socket server, listening for connections.
+ ///
+ /// You can accept a new connection by using the [`accept`](`TcpListener::accept`) method. Alternatively `TcpListener`
+ /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a
+ /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over
+ /// it is equivalent to calling accept in a loop.
+ ///
+ /// # Errors
+ ///
+ /// Note that accepting a connection can lead to various errors and not all
+ /// of them are necessarily fatal ‒ for example having too many open file
+ /// descriptors or the other side closing the connection while it waits in
+ /// an accept queue. These would terminate the stream if not handled in any
+ /// way.
+ ///
+ /// # Examples
+ ///
+ /// Using `accept`:
+ /// ```no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ ///
+ /// async fn process_socket<T>(socket: T) {
+ /// # drop(socket);
+ /// // do work with socket here
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+ ///
+ /// loop {
+ /// let (socket, _) = listener.accept().await?;
+ /// process_socket(socket).await;
+ /// }
+ /// }
+ /// ```
+ ///
+ /// Using `impl Stream`:
+ /// ```no_run
+ /// use tokio::{net::TcpListener, stream::StreamExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap();
+ /// while let Some(stream) = listener.next().await {
+ /// match stream {
+ /// Ok(stream) => {
+ /// println!("new client!");
+ /// }
+ /// Err(e) => { /* connection failed */ }
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub struct TcpListener {
+ io: PollEvented<mio::net::TcpListener>,
+ }
+}
+
+impl TcpListener {
+ /// Creates a new TcpListener which will be bound to the specified address.
+ ///
+ /// The returned listener is ready for accepting connections.
+ ///
+ /// Binding with a port number of 0 will request that the OS assigns a port
+ /// to this listener. The port allocated can be queried via the `local_addr`
+ /// method.
+ ///
+ /// The address type can be any implementor of `ToSocketAddrs` trait.
+ ///
+ /// If `addr` yields multiple addresses, bind will be attempted with each of
+ /// the addresses until one succeeds and returns the listener. If none of
+ /// the addresses succeed in creating a listener, the error returned from
+ /// the last attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let listener = TcpListener::bind("127.0.0.1:0").await?;
+ ///
+ /// // use the listener
+ ///
+ /// # let _ = listener;
+ /// Ok(())
+ /// }
+ /// ```
+ pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<TcpListener> {
+ let addrs = addr.to_socket_addrs().await?;
+
+ let mut last_err = None;
+
+ for addr in addrs {
+ match TcpListener::bind_addr(addr) {
+ Ok(listener) => return Ok(listener),
+ Err(e) => last_err = Some(e),
+ }
+ }
+
+ Err(last_err.unwrap_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "could not resolve to any address",
+ )
+ }))
+ }
+
+ fn bind_addr(addr: SocketAddr) -> io::Result<TcpListener> {
+ let listener = mio::net::TcpListener::bind(&addr)?;
+ TcpListener::new(listener)
+ }
+
+ /// Accepts a new incoming connection from this listener.
+ ///
+ /// This function will yield once a new TCP connection is established. When
+ /// established, the corresponding [`TcpStream`] and the remote peer's
+ /// address will be returned.
+ ///
+ /// [`TcpStream`]: ../struct.TcpStream.html
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+ ///
+ /// match listener.accept().await {
+ /// Ok((_socket, addr)) => println!("new client: {:?}", addr),
+ /// Err(e) => println!("couldn't get client: {:?}", e),
+ /// }
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub async fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> {
+ poll_fn(|cx| self.poll_accept(cx)).await
+ }
+
+ /// Attempts to poll `SocketAddr` and `TcpStream` bound to this address.
+ ///
+ /// In case if I/O resource isn't ready yet, `Poll::Pending` is returned and
+ /// current task will be notified by a waker.
+ pub fn poll_accept(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<(TcpStream, SocketAddr)>> {
+ let (io, addr) = ready!(self.poll_accept_std(cx))?;
+
+ let io = mio::net::TcpStream::from_stream(io)?;
+ let io = TcpStream::new(io)?;
+
+ Poll::Ready(Ok((io, addr)))
+ }
+
+ fn poll_accept_std(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<(net::TcpStream, SocketAddr)>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().accept_std() {
+ Ok(pair) => Poll::Ready(Ok(pair)),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+
+ /// Creates a new TCP listener from the standard library's TCP listener.
+ ///
+ /// This method can be used when the `Handle::tcp_listen` method isn't
+ /// sufficient because perhaps some more configuration is needed in terms of
+ /// before the calls to `bind` and `listen`.
+ ///
+ /// This API is typically paired with the `net2` crate and the `TcpBuilder`
+ /// type to build up and customize a listener before it's shipped off to the
+ /// backing event loop. This allows configuration of options like
+ /// `SO_REUSEPORT`, binding to multiple addresses, etc.
+ ///
+ /// The `addr` argument here is one of the addresses that `listener` is
+ /// bound to and the listener will only be guaranteed to accept connections
+ /// of the same address type currently.
+ ///
+ /// The platform specific behavior of this function looks like:
+ ///
+ /// * On Unix, the socket is placed into nonblocking mode and connections
+ /// can be accepted as normal
+ ///
+ /// * On Windows, the address is stored internally and all future accepts
+ /// will only be for the same IP version as `addr` specified. That is, if
+ /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as
+ /// well (same for IPv6).
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ /// use tokio::net::TcpListener;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let std_listener = std::net::TcpListener::bind("127.0.0.1:0")?;
+ /// let listener = TcpListener::from_std(std_listener)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(listener: net::TcpListener) -> io::Result<TcpListener> {
+ let io = mio::net::TcpListener::from_std(listener)?;
+ let io = PollEvented::new(io)?;
+ Ok(TcpListener { io })
+ }
+
+ fn new(listener: mio::net::TcpListener) -> io::Result<TcpListener> {
+ let io = PollEvented::new(listener)?;
+ Ok(TcpListener { io })
+ }
+
+ /// Returns the local address that this listener is bound to.
+ ///
+ /// This can be useful, for example, when binding to port 0 to figure out
+ /// which port was actually bound.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ /// use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let listener = TcpListener::bind("127.0.0.1:8080").await?;
+ ///
+ /// assert_eq!(listener.local_addr()?,
+ /// SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080)));
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Returns a stream over the connections being received on this listener.
+ ///
+ /// Note that `TcpListener` also directly implements `Stream`.
+ ///
+ /// The returned stream will never return `None` and will also not yield the
+ /// peer's `SocketAddr` structure. Iterating over it is equivalent to
+ /// calling accept in a loop.
+ ///
+ /// # Errors
+ ///
+ /// Note that accepting a connection can lead to various errors and not all
+ /// of them are necessarily fatal ‒ for example having too many open file
+ /// descriptors or the other side closing the connection while it waits in
+ /// an accept queue. These would terminate the stream if not handled in any
+ /// way.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::{net::TcpListener, stream::StreamExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut listener = TcpListener::bind("127.0.0.1:8080").await.unwrap();
+ /// let mut incoming = listener.incoming();
+ ///
+ /// while let Some(stream) = incoming.next().await {
+ /// match stream {
+ /// Ok(stream) => {
+ /// println!("new client!");
+ /// }
+ /// Err(e) => { /* connection failed */ }
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub fn incoming(&mut self) -> Incoming<'_> {
+ Incoming::new(self)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`].
+ ///
+ /// [`set_ttl`]: #method.set_ttl
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let listener = TcpListener::bind("127.0.0.1:0").await?;
+ ///
+ /// listener.set_ttl(100).expect("could not set TTL");
+ /// assert_eq!(listener.ttl()?, 100);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.get_ref().ttl()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpListener;
+ ///
+ /// use std::io;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let listener = TcpListener::bind("127.0.0.1:0").await?;
+ ///
+ /// listener.set_ttl(100).expect("could not set TTL");
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.get_ref().set_ttl(ttl)
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for TcpListener {
+ type Item = io::Result<TcpStream>;
+
+ fn poll_next(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Self::Item>> {
+ let (socket, _) = ready!(self.poll_accept(cx))?;
+ Poll::Ready(Some(Ok(socket)))
+ }
+}
+
+impl TryFrom<TcpListener> for mio::net::TcpListener {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: TcpListener) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::TcpListener> for TcpListener {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`TcpListener::from_std(stream)`](TcpListener::from_std).
+ fn try_from(stream: net::TcpListener) -> Result<Self, Self::Error> {
+ Self::from_std(stream)
+ }
+}
+
+impl fmt::Debug for TcpListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+#[cfg(unix)]
+mod sys {
+ use super::TcpListener;
+ use std::os::unix::prelude::*;
+
+ impl AsRawFd for TcpListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+ }
+}
+
+#[cfg(windows)]
+mod sys {
+ // TODO: let's land these upstream with mio and then we can add them here.
+ //
+ // use std::os::windows::prelude::*;
+ // use super::{TcpListener;
+ //
+ // impl AsRawHandle for TcpListener {
+ // fn as_raw_handle(&self) -> RawHandle {
+ // self.listener.io().as_raw_handle()
+ // }
+ // }
+}
diff --git a/third_party/rust/tokio/src/net/tcp/mod.rs b/third_party/rust/tokio/src/net/tcp/mod.rs
new file mode 100644
index 0000000000..d5354b38d2
--- /dev/null
+++ b/third_party/rust/tokio/src/net/tcp/mod.rs
@@ -0,0 +1,13 @@
+//! TCP utility types
+
+pub(crate) mod listener;
+pub(crate) use listener::TcpListener;
+
+mod incoming;
+pub use incoming::Incoming;
+
+mod split;
+pub use split::{ReadHalf, WriteHalf};
+
+pub(crate) mod stream;
+pub(crate) use stream::TcpStream;
diff --git a/third_party/rust/tokio/src/net/tcp/split.rs b/third_party/rust/tokio/src/net/tcp/split.rs
new file mode 100644
index 0000000000..cce50f6ab3
--- /dev/null
+++ b/third_party/rust/tokio/src/net/tcp/split.rs
@@ -0,0 +1,163 @@
+//! `TcpStream` split support.
+//!
+//! A `TcpStream` can be split into a `ReadHalf` and a
+//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf`
+//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`.
+//!
+//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized
+//! split has no associated overhead and enforces all invariants at the type
+//! level.
+
+use crate::future::poll_fn;
+use crate::io::{AsyncRead, AsyncWrite};
+use crate::net::TcpStream;
+
+use bytes::Buf;
+use std::io;
+use std::mem::MaybeUninit;
+use std::net::Shutdown;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Read half of a `TcpStream`.
+#[derive(Debug)]
+pub struct ReadHalf<'a>(&'a TcpStream);
+
+/// Write half of a `TcpStream`.
+///
+/// Note that in the `AsyncWrite` implemenation of `TcpStreamWriteHalf`,
+/// `poll_shutdown` actually shuts down the TCP stream in the write direction.
+#[derive(Debug)]
+pub struct WriteHalf<'a>(&'a TcpStream);
+
+pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) {
+ (ReadHalf(&*stream), WriteHalf(&*stream))
+}
+
+impl ReadHalf<'_> {
+ /// Attempt to receive data on the socket, without removing that data from
+ /// the queue, registering the current task for wakeup if data is not yet
+ /// available.
+ ///
+ /// See the [`TcpStream::poll_peek`] level documenation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io;
+ /// use tokio::net::TcpStream;
+ ///
+ /// use futures::future::poll_fn;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?;
+ /// let (mut read_half, _) = stream.split();
+ /// let mut buf = [0; 10];
+ ///
+ /// poll_fn(|cx| {
+ /// read_half.poll_peek(cx, &mut buf)
+ /// }).await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`TcpStream::poll_peek`]: TcpStream::poll_peek
+ pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
+ self.0.poll_peek2(cx, buf)
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// See the [`TcpStream::peek`] level documenation for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ /// use tokio::prelude::*;
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// // Connect to a peer
+ /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
+ /// let (mut read_half, _) = stream.split();
+ ///
+ /// let mut b1 = [0; 10];
+ /// let mut b2 = [0; 10];
+ ///
+ /// // Peek at the data
+ /// let n = read_half.peek(&mut b1).await?;
+ ///
+ /// // Read the data
+ /// assert_eq!(n, read_half.read(&mut b2[..n]).await?);
+ /// assert_eq!(&b1[..n], &b2[..n]);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`TcpStream::peek`]: TcpStream::peek
+ pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_peek(cx, buf)).await
+ }
+}
+
+impl AsyncRead for ReadHalf<'_> {
+ unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_read_priv(cx, buf)
+ }
+}
+
+impl AsyncWrite for WriteHalf<'_> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_write_priv(cx, buf)
+ }
+
+ fn poll_write_buf<B: Buf>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_write_buf_priv(cx, buf)
+ }
+
+ #[inline]
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ // tcp flush is a no-op
+ Poll::Ready(Ok(()))
+ }
+
+ // `poll_shutdown` on a write half shutdowns the stream in the "write" direction.
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.0.shutdown(Shutdown::Write).into()
+ }
+}
+
+impl AsRef<TcpStream> for ReadHalf<'_> {
+ fn as_ref(&self) -> &TcpStream {
+ self.0
+ }
+}
+
+impl AsRef<TcpStream> for WriteHalf<'_> {
+ fn as_ref(&self) -> &TcpStream {
+ self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/net/tcp/stream.rs b/third_party/rust/tokio/src/net/tcp/stream.rs
new file mode 100644
index 0000000000..732c0ca381
--- /dev/null
+++ b/third_party/rust/tokio/src/net/tcp/stream.rs
@@ -0,0 +1,869 @@
+use crate::future::poll_fn;
+use crate::io::{AsyncRead, AsyncWrite, PollEvented};
+use crate::net::tcp::split::{split, ReadHalf, WriteHalf};
+use crate::net::ToSocketAddrs;
+
+use bytes::Buf;
+use iovec::IoVec;
+use std::convert::TryFrom;
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::mem::MaybeUninit;
+use std::net::{self, Shutdown, SocketAddr};
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+cfg_tcp! {
+ /// A TCP stream between a local and a remote socket.
+ ///
+ /// A TCP stream can either be created by connecting to an endpoint, via the
+ /// [`connect`] method, or by [accepting] a connection from a [listener].
+ ///
+ /// [`connect`]: method@TcpStream::connect
+ /// [accepting]: method@super::TcpListener::accept
+ /// [listener]: struct@super::TcpListener
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ /// use tokio::prelude::*;
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// // Connect to a peer
+ /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// // Write some data.
+ /// stream.write_all(b"hello world!").await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub struct TcpStream {
+ io: PollEvented<mio::net::TcpStream>,
+ }
+}
+
+impl TcpStream {
+ /// Opens a TCP connection to a remote host.
+ ///
+ /// `addr` is an address of the remote host. Anything which implements
+ /// `ToSocketAddrs` trait can be supplied for the address.
+ ///
+ /// If `addr` yields multiple addresses, connect will be attempted with each
+ /// of the addresses until a connection is successful. If none of the
+ /// addresses result in a successful connection, the error returned from the
+ /// last connection attempt (the last address) is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ /// use tokio::prelude::*;
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// // Connect to a peer
+ /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// // Write some data.
+ /// stream.write_all(b"hello world!").await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> {
+ let addrs = addr.to_socket_addrs().await?;
+
+ let mut last_err = None;
+
+ for addr in addrs {
+ match TcpStream::connect_addr(addr).await {
+ Ok(stream) => return Ok(stream),
+ Err(e) => last_err = Some(e),
+ }
+ }
+
+ Err(last_err.unwrap_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "could not resolve to any address",
+ )
+ }))
+ }
+
+ /// Establishes a connection to the specified `addr`.
+ async fn connect_addr(addr: SocketAddr) -> io::Result<TcpStream> {
+ let sys = mio::net::TcpStream::connect(&addr)?;
+ let stream = TcpStream::new(sys)?;
+
+ // Once we've connected, wait for the stream to be writable as
+ // that's when the actual connection has been initiated. Once we're
+ // writable we check for `take_socket_error` to see if the connect
+ // actually hit an error or not.
+ //
+ // If all that succeeded then we ship everything on up.
+ poll_fn(|cx| stream.io.poll_write_ready(cx)).await?;
+
+ if let Some(e) = stream.io.get_ref().take_error()? {
+ return Err(e);
+ }
+
+ Ok(stream)
+ }
+
+ pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result<TcpStream> {
+ let io = PollEvented::new(connected)?;
+ Ok(TcpStream { io })
+ }
+
+ /// Creates new `TcpStream` from a `std::net::TcpStream`.
+ ///
+ /// This function will convert a TCP stream created by the standard library
+ /// to a TCP stream ready to be used with the provided event loop handle.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ /// use tokio::net::TcpStream;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?;
+ /// let stream = TcpStream::from_std(std_stream)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(stream: net::TcpStream) -> io::Result<TcpStream> {
+ let io = mio::net::TcpStream::from_stream(stream)?;
+ let io = PollEvented::new(io)?;
+ Ok(TcpStream { io })
+ }
+
+ // Connects `TcpStream` asynchronously that may be built with a net2 `TcpBuilder`.
+ //
+ // This should be removed in favor of some in-crate TcpSocket builder API.
+ #[doc(hidden)]
+ pub async fn connect_std(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
+ let io = mio::net::TcpStream::connect_stream(stream, addr)?;
+ let io = PollEvented::new(io)?;
+ let stream = TcpStream { io };
+
+ // Once we've connected, wait for the stream to be writable as
+ // that's when the actual connection has been initiated. Once we're
+ // writable we check for `take_socket_error` to see if the connect
+ // actually hit an error or not.
+ //
+ // If all that succeeded then we ship everything on up.
+ poll_fn(|cx| stream.io.poll_write_ready(cx)).await?;
+
+ if let Some(e) = stream.io.get_ref().take_error()? {
+ return Err(e);
+ }
+
+ Ok(stream)
+ }
+
+ /// Returns the local address that this stream is bound to.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.local_addr()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Returns the remote address that this stream is connected to.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.peer_addr()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().peer_addr()
+ }
+
+ /// Attempts to receive data on the socket, without removing that data from
+ /// the queue, registering the current task for wakeup if data is not yet
+ /// available.
+ ///
+ /// # Return value
+ ///
+ /// The function returns:
+ ///
+ /// * `Poll::Pending` if data is not yet available.
+ /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked.
+ /// * `Poll::Ready(Err(e))` if an error is encountered.
+ ///
+ /// # Errors
+ ///
+ /// This function may encounter any standard I/O error except `WouldBlock`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io;
+ /// use tokio::net::TcpStream;
+ ///
+ /// use futures::future::poll_fn;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut stream = TcpStream::connect("127.0.0.1:8000").await?;
+ /// let mut buf = [0; 10];
+ ///
+ /// poll_fn(|cx| {
+ /// stream.poll_peek(cx, &mut buf)
+ /// }).await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn poll_peek(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
+ self.poll_peek2(cx, buf)
+ }
+
+ pub(super) fn poll_peek2(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().peek(buf) {
+ Ok(ret) => Poll::Ready(Ok(ret)),
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+
+ /// Receives data on the socket from the remote address to which it is
+ /// connected, without removing that data from the queue. On success,
+ /// returns the number of bytes peeked.
+ ///
+ /// Successive calls return the same data. This is accomplished by passing
+ /// `MSG_PEEK` as a flag to the underlying recv system call.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ /// use tokio::prelude::*;
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// // Connect to a peer
+ /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// let mut b1 = [0; 10];
+ /// let mut b2 = [0; 10];
+ ///
+ /// // Peek at the data
+ /// let n = stream.peek(&mut b1).await?;
+ ///
+ /// // Read the data
+ /// assert_eq!(n, stream.read(&mut b2[..n]).await?);
+ /// assert_eq!(&b1[..n], &b2[..n]);
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_peek(cx, buf)).await
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O on the specified
+ /// portions to return immediately with an appropriate value (see the
+ /// documentation of `Shutdown`).
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ /// use std::error::Error;
+ /// use std::net::Shutdown;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// // Connect to a peer
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// // Shutdown the stream
+ /// stream.shutdown(Shutdown::Write)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.io.get_ref().shutdown(how)
+ }
+
+ /// Gets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// For more information about this option, see [`set_nodelay`].
+ ///
+ /// [`set_nodelay`]: TcpStream::set_nodelay
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.nodelay()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn nodelay(&self) -> io::Result<bool> {
+ self.io.get_ref().nodelay()
+ }
+
+ /// Sets the value of the `TCP_NODELAY` option on this socket.
+ ///
+ /// If set, this option disables the Nagle algorithm. This means that
+ /// segments are always sent as soon as possible, even if there is only a
+ /// small amount of data. When not set, data is buffered until there is a
+ /// sufficient amount to send out, thereby avoiding the frequent sending of
+ /// small packets.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_nodelay(true)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+ self.io.get_ref().set_nodelay(nodelay)
+ }
+
+ /// Gets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// For more information about this option, see [`set_recv_buffer_size`].
+ ///
+ /// [`set_recv_buffer_size`]: TcpStream::set_recv_buffer_size
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.recv_buffer_size()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn recv_buffer_size(&self) -> io::Result<usize> {
+ self.io.get_ref().recv_buffer_size()
+ }
+
+ /// Sets the value of the `SO_RCVBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's receive buffer associated
+ /// with the socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_recv_buffer_size(100)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.get_ref().set_recv_buffer_size(size)
+ }
+
+ /// Gets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// For more information about this option, see [`set_send_buffer_size`].
+ ///
+ /// [`set_send_buffer_size`]: TcpStream::set_send_buffer_size
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.send_buffer_size()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn send_buffer_size(&self) -> io::Result<usize> {
+ self.io.get_ref().send_buffer_size()
+ }
+
+ /// Sets the value of the `SO_SNDBUF` option on this socket.
+ ///
+ /// Changes the size of the operating system's send buffer associated with
+ /// the socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_send_buffer_size(100)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> {
+ self.io.get_ref().set_send_buffer_size(size)
+ }
+
+ /// Returns whether keepalive messages are enabled on this socket, and if so
+ /// the duration of time between them.
+ ///
+ /// For more information about this option, see [`set_keepalive`].
+ ///
+ /// [`set_keepalive`]: TcpStream::set_keepalive
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.keepalive()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn keepalive(&self) -> io::Result<Option<Duration>> {
+ self.io.get_ref().keepalive()
+ }
+
+ /// Sets whether keepalive messages are enabled to be sent on this socket.
+ ///
+ /// On Unix, this option will set the `SO_KEEPALIVE` as well as the
+ /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
+ /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
+ ///
+ /// If `None` is specified then keepalive messages are disabled, otherwise
+ /// the duration specified will be the time to remain idle before sending a
+ /// TCP keepalive probe.
+ ///
+ /// Some platforms specify this value in seconds, so sub-second
+ /// specifications may be omitted.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_keepalive(None)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_keepalive(&self, keepalive: Option<Duration>) -> io::Result<()> {
+ self.io.get_ref().set_keepalive(keepalive)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`].
+ ///
+ /// [`set_ttl`]: TcpStream::set_ttl
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.ttl()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.get_ref().ttl()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_ttl(123)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.get_ref().set_ttl(ttl)
+ }
+
+ /// Reads the linger duration for this socket by getting the `SO_LINGER`
+ /// option.
+ ///
+ /// For more information about this option, see [`set_linger`].
+ ///
+ /// [`set_linger`]: TcpStream::set_linger
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// println!("{:?}", stream.linger()?);
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn linger(&self) -> io::Result<Option<Duration>> {
+ self.io.get_ref().linger()
+ }
+
+ /// Sets the linger duration of this socket by setting the `SO_LINGER`
+ /// option.
+ ///
+ /// This option controls the action taken when a stream has unsent messages
+ /// and the stream is closed. If `SO_LINGER` is set, the system
+ /// shall block the process until it can transmit the data or until the
+ /// time expires.
+ ///
+ /// If `SO_LINGER` is not specified, and the stream is closed, the system
+ /// handles the call in a way that allows the process to continue as quickly
+ /// as possible.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::TcpStream;
+ ///
+ /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+ /// let stream = TcpStream::connect("127.0.0.1:8080").await?;
+ ///
+ /// stream.set_linger(None)?;
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> {
+ self.io.get_ref().set_linger(dur)
+ }
+
+ /// Splits a `TcpStream` into a read half and a write half, which can be used
+ /// to read and write the stream concurrently.
+ pub fn split(&mut self) -> (ReadHalf<'_>, WriteHalf<'_>) {
+ split(self)
+ }
+
+ // == Poll IO functions that takes `&self` ==
+ //
+ // They are not public because (taken from the doc of `PollEvented`):
+ //
+ // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the
+ // caller must ensure that there are at most two tasks that use a
+ // `PollEvented` instance concurrently. One for reading and one for writing.
+ // While violating this requirement is "safe" from a Rust memory model point
+ // of view, it will result in unexpected behavior in the form of lost
+ // notifications and tasks hanging.
+
+ pub(crate) fn poll_read_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().read(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ pub(super) fn poll_write_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().write(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ pub(super) fn poll_write_buf_priv<B: Buf>(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<io::Result<usize>> {
+ use std::io::IoSlice;
+
+ ready!(self.io.poll_write_ready(cx))?;
+
+ // The `IoVec` (v0.1.x) type can't have a zero-length size, so create
+ // a dummy version from a 1-length slice which we'll overwrite with
+ // the `bytes_vectored` method.
+ static S: &[u8] = &[0];
+ const MAX_BUFS: usize = 64;
+
+ // IoSlice isn't Copy, so we must expand this manually ;_;
+ let mut slices: [IoSlice<'_>; MAX_BUFS] = [
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ IoSlice::new(S),
+ ];
+ let cnt = buf.bytes_vectored(&mut slices);
+
+ let iovec = <&IoVec>::from(S);
+ let mut vecs = [iovec; MAX_BUFS];
+ for i in 0..cnt {
+ vecs[i] = (*slices[i]).into();
+ }
+
+ match self.io.get_ref().write_bufs(&vecs[..cnt]) {
+ Ok(n) => {
+ buf.advance(n);
+ Poll::Ready(Ok(n))
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+}
+
+impl TryFrom<TcpStream> for mio::net::TcpStream {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: TcpStream) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::TcpStream> for TcpStream {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`TcpStream::from_std(stream)`](TcpStream::from_std).
+ fn try_from(stream: net::TcpStream) -> Result<Self, Self::Error> {
+ Self::from_std(stream)
+ }
+}
+
+// ===== impl Read / Write =====
+
+impl AsyncRead for TcpStream {
+ unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.poll_read_priv(cx, buf)
+ }
+}
+
+impl AsyncWrite for TcpStream {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.poll_write_priv(cx, buf)
+ }
+
+ fn poll_write_buf<B: Buf>(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut B,
+ ) -> Poll<io::Result<usize>> {
+ self.poll_write_buf_priv(cx, buf)
+ }
+
+ #[inline]
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ // tcp flush is a no-op
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.shutdown(std::net::Shutdown::Write)?;
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl fmt::Debug for TcpStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+#[cfg(unix)]
+mod sys {
+ use super::TcpStream;
+ use std::os::unix::prelude::*;
+
+ impl AsRawFd for TcpStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+ }
+}
+
+#[cfg(windows)]
+mod sys {
+ // TODO: let's land these upstream with mio and then we can add them here.
+ //
+ // use std::os::windows::prelude::*;
+ // use super::TcpStream;
+ //
+ // impl AsRawHandle for TcpStream {
+ // fn as_raw_handle(&self) -> RawHandle {
+ // self.io.get_ref().as_raw_handle()
+ // }
+ // }
+}
diff --git a/third_party/rust/tokio/src/net/udp/mod.rs b/third_party/rust/tokio/src/net/udp/mod.rs
new file mode 100644
index 0000000000..d43121a1ca
--- /dev/null
+++ b/third_party/rust/tokio/src/net/udp/mod.rs
@@ -0,0 +1,7 @@
+//! UDP utility types.
+
+pub(crate) mod socket;
+pub(crate) use socket::UdpSocket;
+
+mod split;
+pub use split::{RecvHalf, ReuniteError, SendHalf};
diff --git a/third_party/rust/tokio/src/net/udp/socket.rs b/third_party/rust/tokio/src/net/udp/socket.rs
new file mode 100644
index 0000000000..faf1dca615
--- /dev/null
+++ b/third_party/rust/tokio/src/net/udp/socket.rs
@@ -0,0 +1,425 @@
+use crate::future::poll_fn;
+use crate::io::PollEvented;
+use crate::net::udp::split::{split, RecvHalf, SendHalf};
+use crate::net::ToSocketAddrs;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::net::{self, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::task::{Context, Poll};
+
+cfg_udp! {
+ /// A UDP socket
+ pub struct UdpSocket {
+ io: PollEvented<mio::net::UdpSocket>,
+ }
+}
+
+impl UdpSocket {
+ /// This function will create a new UDP socket and attempt to bind it to
+ /// the `addr` provided.
+ pub async fn bind<A: ToSocketAddrs>(addr: A) -> io::Result<UdpSocket> {
+ let addrs = addr.to_socket_addrs().await?;
+ let mut last_err = None;
+
+ for addr in addrs {
+ match UdpSocket::bind_addr(addr) {
+ Ok(socket) => return Ok(socket),
+ Err(e) => last_err = Some(e),
+ }
+ }
+
+ Err(last_err.unwrap_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "could not resolve to any address",
+ )
+ }))
+ }
+
+ fn bind_addr(addr: SocketAddr) -> io::Result<UdpSocket> {
+ let sys = mio::net::UdpSocket::bind(&addr)?;
+ UdpSocket::new(sys)
+ }
+
+ fn new(socket: mio::net::UdpSocket) -> io::Result<UdpSocket> {
+ let io = PollEvented::new(socket)?;
+ Ok(UdpSocket { io })
+ }
+
+ /// Creates a new `UdpSocket` from the previously bound socket provided.
+ ///
+ /// The socket given will be registered with the event loop that `handle`
+ /// is associated with. This function requires that `socket` has previously
+ /// been bound to an address to work correctly.
+ ///
+ /// This can be used in conjunction with net2's `UdpBuilder` interface to
+ /// configure a socket before it's handed off, such as setting options like
+ /// `reuse_address` or binding to multiple addresses.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(socket: net::UdpSocket) -> io::Result<UdpSocket> {
+ let io = mio::net::UdpSocket::from_socket(socket)?;
+ let io = PollEvented::new(io)?;
+ Ok(UdpSocket { io })
+ }
+
+ /// Splits the `UdpSocket` into a receive half and a send half. The two parts
+ /// can be used to receive and send datagrams concurrently, even from two
+ /// different tasks.
+ pub fn split(self) -> (RecvHalf, SendHalf) {
+ split(self)
+ }
+
+ /// Returns the local address that this socket is bound to.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Connects the UDP socket setting the default destination for send() and
+ /// limiting packets that are read via recv from the address specified in
+ /// `addr`.
+ pub async fn connect<A: ToSocketAddrs>(&self, addr: A) -> io::Result<()> {
+ let addrs = addr.to_socket_addrs().await?;
+ let mut last_err = None;
+
+ for addr in addrs {
+ match self.io.get_ref().connect(addr) {
+ Ok(_) => return Ok(()),
+ Err(e) => last_err = Some(e),
+ }
+ }
+
+ Err(last_err.unwrap_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "could not resolve to any address",
+ )
+ }))
+ }
+
+ /// Returns a future that sends data on the socket to the remote address to which it is connected.
+ /// On success, the future will resolve to the number of bytes written.
+ ///
+ /// The [`connect`] method will connect this socket to a remote address. The future
+ /// will resolve to an error if the socket is not connected.
+ ///
+ /// [`connect`]: #method.connect
+ pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_send(cx, buf)).await
+ }
+
+ // Poll IO functions that takes `&self` are provided for the split API.
+ //
+ // They are not public because (taken from the doc of `PollEvented`):
+ //
+ // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the
+ // caller must ensure that there are at most two tasks that use a
+ // `PollEvented` instance concurrently. One for reading and one for writing.
+ // While violating this requirement is "safe" from a Rust memory model point
+ // of view, it will result in unexpected behavior in the form of lost
+ // notifications and tasks hanging.
+ #[doc(hidden)]
+ pub fn poll_send(&self, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().send(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Returns a future that receives a single datagram message on the socket from
+ /// the remote address to which it is connected. On success, the future will resolve
+ /// to the number of bytes read.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The [`connect`] method will connect this socket to a remote address. The future
+ /// will fail if the socket is not connected.
+ ///
+ /// [`connect`]: #method.connect
+ pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_recv(cx, buf)).await
+ }
+
+ #[doc(hidden)]
+ pub fn poll_recv(&self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().recv(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Returns a future that sends data on the socket to the given address.
+ /// On success, the future will resolve to the number of bytes written.
+ ///
+ /// The future will resolve to an error if the IP version of the socket does
+ /// not match that of `target`.
+ pub async fn send_to<A: ToSocketAddrs>(&mut self, buf: &[u8], target: A) -> io::Result<usize> {
+ let mut addrs = target.to_socket_addrs().await?;
+
+ match addrs.next() {
+ Some(target) => poll_fn(|cx| self.poll_send_to(cx, buf, &target)).await,
+ None => Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "no addresses to send data to",
+ )),
+ }
+ }
+
+ // TODO: Public or not?
+ #[doc(hidden)]
+ pub fn poll_send_to(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ target: &SocketAddr,
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().send_to(buf, target) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Returns a future that receives a single datagram on the socket. On success,
+ /// the future resolves to the number of bytes read and the origin.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size
+ /// to hold the message bytes. If a message is too long to fit in the supplied
+ /// buffer, excess bytes may be discarded.
+ pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ poll_fn(|cx| self.poll_recv_from(cx, buf)).await
+ }
+
+ #[doc(hidden)]
+ pub fn poll_recv_from(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<(usize, SocketAddr), io::Error>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().recv_from(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Gets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// For more information about this option, see [`set_broadcast`].
+ ///
+ /// [`set_broadcast`]: #method.set_broadcast
+ pub fn broadcast(&self) -> io::Result<bool> {
+ self.io.get_ref().broadcast()
+ }
+
+ /// Sets the value of the `SO_BROADCAST` option for this socket.
+ ///
+ /// When enabled, this socket is allowed to send packets to a broadcast
+ /// address.
+ pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+ self.io.get_ref().set_broadcast(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see [`set_multicast_loop_v4`].
+ ///
+ /// [`set_multicast_loop_v4`]: #method.set_multicast_loop_v4
+ pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+ self.io.get_ref().multicast_loop_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+ ///
+ /// If enabled, multicast packets will be looped back to the local socket.
+ ///
+ /// # Note
+ ///
+ /// This may not have any affect on IPv6 sockets.
+ pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+ self.io.get_ref().set_multicast_loop_v4(on)
+ }
+
+ /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_multicast_ttl_v4`].
+ ///
+ /// [`set_multicast_ttl_v4`]: #method.set_multicast_ttl_v4
+ pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+ self.io.get_ref().multicast_ttl_v4()
+ }
+
+ /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+ ///
+ /// Indicates the time-to-live value of outgoing multicast packets for
+ /// this socket. The default value is 1 which means that multicast packets
+ /// don't leave the local network unless explicitly requested.
+ ///
+ /// # Note
+ ///
+ /// This may not have any affect on IPv6 sockets.
+ pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+ self.io.get_ref().set_multicast_ttl_v4(ttl)
+ }
+
+ /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// For more information about this option, see [`set_multicast_loop_v6`].
+ ///
+ /// [`set_multicast_loop_v6`]: #method.set_multicast_loop_v6
+ pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+ self.io.get_ref().multicast_loop_v6()
+ }
+
+ /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+ ///
+ /// Controls whether this socket sees the multicast packets it sends itself.
+ ///
+ /// # Note
+ ///
+ /// This may not have any affect on IPv4 sockets.
+ pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+ self.io.get_ref().set_multicast_loop_v6(on)
+ }
+
+ /// Gets the value of the `IP_TTL` option for this socket.
+ ///
+ /// For more information about this option, see [`set_ttl`].
+ ///
+ /// [`set_ttl`]: #method.set_ttl
+ pub fn ttl(&self) -> io::Result<u32> {
+ self.io.get_ref().ttl()
+ }
+
+ /// Sets the value for the `IP_TTL` option on this socket.
+ ///
+ /// This value sets the time-to-live field that is used in every packet sent
+ /// from this socket.
+ pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+ self.io.get_ref().set_ttl(ttl)
+ }
+
+ /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// address of the local interface with which the system should join the
+ /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+ /// interface is chosen by the system.
+ pub fn join_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> {
+ self.io.get_ref().join_multicast_v4(&multiaddr, &interface)
+ }
+
+ /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+ ///
+ /// This function specifies a new multicast group for this socket to join.
+ /// The address must be a valid multicast address, and `interface` is the
+ /// index of the interface to join/leave (or 0 to indicate any interface).
+ pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.io.get_ref().join_multicast_v6(multiaddr, interface)
+ }
+
+ /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see [`join_multicast_v4`].
+ ///
+ /// [`join_multicast_v4`]: #method.join_multicast_v4
+ pub fn leave_multicast_v4(&self, multiaddr: Ipv4Addr, interface: Ipv4Addr) -> io::Result<()> {
+ self.io.get_ref().leave_multicast_v4(&multiaddr, &interface)
+ }
+
+ /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+ ///
+ /// For more information about this option, see [`join_multicast_v6`].
+ ///
+ /// [`join_multicast_v6`]: #method.join_multicast_v6
+ pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
+ self.io.get_ref().leave_multicast_v6(multiaddr, interface)
+ }
+}
+
+impl TryFrom<UdpSocket> for mio::net::UdpSocket {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: UdpSocket) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::UdpSocket> for UdpSocket {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`UdpSocket::from_std(stream)`](UdpSocket::from_std).
+ fn try_from(stream: net::UdpSocket) -> Result<Self, Self::Error> {
+ Self::from_std(stream)
+ }
+}
+
+impl fmt::Debug for UdpSocket {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+#[cfg(all(unix))]
+mod sys {
+ use super::UdpSocket;
+ use std::os::unix::prelude::*;
+
+ impl AsRawFd for UdpSocket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+ }
+}
+
+#[cfg(windows)]
+mod sys {
+ // TODO: let's land these upstream with mio and then we can add them here.
+ //
+ // use std::os::windows::prelude::*;
+ // use super::UdpSocket;
+ //
+ // impl AsRawHandle for UdpSocket {
+ // fn as_raw_handle(&self) -> RawHandle {
+ // self.io.get_ref().as_raw_handle()
+ // }
+ // }
+}
diff --git a/third_party/rust/tokio/src/net/udp/split.rs b/third_party/rust/tokio/src/net/udp/split.rs
new file mode 100644
index 0000000000..55542cb631
--- /dev/null
+++ b/third_party/rust/tokio/src/net/udp/split.rs
@@ -0,0 +1,148 @@
+//! [`UdpSocket`](../struct.UdpSocket.html) split support.
+//!
+//! The [`split`](../struct.UdpSocket.html#method.split) method splits a
+//! `UdpSocket` into a receive half and a send half, which can be used to
+//! receive and send datagrams concurrently, even from two different tasks.
+//!
+//! The halves provide access to the underlying socket, implementing
+//! `AsRef<UdpSocket>`. This allows you to call `UdpSocket` methods that takes
+//! `&self`, e.g., to get local address, to get and set socket options, to join
+//! or leave multicast groups, etc.
+//!
+//! The halves can be reunited to the original socket with their `reunite`
+//! methods.
+
+use crate::future::poll_fn;
+use crate::net::udp::UdpSocket;
+
+use std::error::Error;
+use std::fmt;
+use std::io;
+use std::net::SocketAddr;
+use std::sync::Arc;
+
+/// The send half after [`split`](super::UdpSocket::split).
+///
+/// Use [`send_to`](#method.send_to) or [`send`](#method.send) to send
+/// datagrams.
+#[derive(Debug)]
+pub struct SendHalf(Arc<UdpSocket>);
+
+/// The recv half after [`split`](super::UdpSocket::split).
+///
+/// Use [`recv_from`](#method.recv_from) or [`recv`](#method.recv) to receive
+/// datagrams.
+#[derive(Debug)]
+pub struct RecvHalf(Arc<UdpSocket>);
+
+pub(crate) fn split(socket: UdpSocket) -> (RecvHalf, SendHalf) {
+ let shared = Arc::new(socket);
+ let send = shared.clone();
+ let recv = shared;
+ (RecvHalf(recv), SendHalf(send))
+}
+
+/// Error indicating two halves were not from the same socket, and thus could
+/// not be `reunite`d.
+#[derive(Debug)]
+pub struct ReuniteError(pub SendHalf, pub RecvHalf);
+
+impl fmt::Display for ReuniteError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "tried to reunite halves that are not from the same socket"
+ )
+ }
+}
+
+impl Error for ReuniteError {}
+
+fn reunite(s: SendHalf, r: RecvHalf) -> Result<UdpSocket, ReuniteError> {
+ if Arc::ptr_eq(&s.0, &r.0) {
+ drop(r);
+ // Only two instances of the `Arc` are ever created, one for the
+ // receiver and one for the sender, and those `Arc`s are never exposed
+ // externally. And so when we drop one here, the other one must be the
+ // only remaining one.
+ Ok(Arc::try_unwrap(s.0).expect("udp: try_unwrap failed in reunite"))
+ } else {
+ Err(ReuniteError(s, r))
+ }
+}
+
+impl RecvHalf {
+ /// Attempts to put the two "halves" of a `UdpSocket` back together and
+ /// recover the original socket. Succeeds only if the two "halves"
+ /// originated from the same call to `UdpSocket::split`.
+ pub fn reunite(self, other: SendHalf) -> Result<UdpSocket, ReuniteError> {
+ reunite(other, self)
+ }
+
+ /// Returns a future that receives a single datagram on the socket. On success,
+ /// the future resolves to the number of bytes read and the origin.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size
+ /// to hold the message bytes. If a message is too long to fit in the supplied
+ /// buffer, excess bytes may be discarded.
+ pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ poll_fn(|cx| self.0.poll_recv_from(cx, buf)).await
+ }
+
+ /// Returns a future that receives a single datagram message on the socket from
+ /// the remote address to which it is connected. On success, the future will resolve
+ /// to the number of bytes read.
+ ///
+ /// The function must be called with valid byte array `buf` of sufficient size to
+ /// hold the message bytes. If a message is too long to fit in the supplied buffer,
+ /// excess bytes may be discarded.
+ ///
+ /// The [`connect`] method will connect this socket to a remote address. The future
+ /// will fail if the socket is not connected.
+ ///
+ /// [`connect`]: super::UdpSocket::connect
+ pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.0.poll_recv(cx, buf)).await
+ }
+}
+
+impl SendHalf {
+ /// Attempts to put the two "halves" of a `UdpSocket` back together and
+ /// recover the original socket. Succeeds only if the two "halves"
+ /// originated from the same call to `UdpSocket::split`.
+ pub fn reunite(self, other: RecvHalf) -> Result<UdpSocket, ReuniteError> {
+ reunite(self, other)
+ }
+
+ /// Returns a future that sends data on the socket to the given address.
+ /// On success, the future will resolve to the number of bytes written.
+ ///
+ /// The future will resolve to an error if the IP version of the socket does
+ /// not match that of `target`.
+ pub async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+ poll_fn(|cx| self.0.poll_send_to(cx, buf, target)).await
+ }
+
+ /// Returns a future that sends data on the socket to the remote address to which it is connected.
+ /// On success, the future will resolve to the number of bytes written.
+ ///
+ /// The [`connect`] method will connect this socket to a remote address. The future
+ /// will resolve to an error if the socket is not connected.
+ ///
+ /// [`connect`]: super::UdpSocket::connect
+ pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.0.poll_send(cx, buf)).await
+ }
+}
+
+impl AsRef<UdpSocket> for SendHalf {
+ fn as_ref(&self) -> &UdpSocket {
+ &self.0
+ }
+}
+
+impl AsRef<UdpSocket> for RecvHalf {
+ fn as_ref(&self) -> &UdpSocket {
+ &self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/datagram.rs b/third_party/rust/tokio/src/net/unix/datagram.rs
new file mode 100644
index 0000000000..ff0f4241d5
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/datagram.rs
@@ -0,0 +1,242 @@
+use crate::future::poll_fn;
+use crate::io::PollEvented;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::net::{self, SocketAddr};
+use std::path::Path;
+use std::task::{Context, Poll};
+
+cfg_uds! {
+ /// An I/O object representing a Unix datagram socket.
+ pub struct UnixDatagram {
+ io: PollEvented<mio_uds::UnixDatagram>,
+ }
+}
+
+impl UnixDatagram {
+ /// Creates a new `UnixDatagram` bound to the specified path.
+ pub fn bind<P>(path: P) -> io::Result<UnixDatagram>
+ where
+ P: AsRef<Path>,
+ {
+ let socket = mio_uds::UnixDatagram::bind(path)?;
+ UnixDatagram::new(socket)
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// This function will create a pair of interconnected Unix sockets for
+ /// communicating back and forth between one another. Each socket will
+ /// be associated with the default event loop's handle.
+ pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
+ let (a, b) = mio_uds::UnixDatagram::pair()?;
+ let a = UnixDatagram::new(a)?;
+ let b = UnixDatagram::new(b)?;
+
+ Ok((a, b))
+ }
+
+ /// Consumes a `UnixDatagram` in the standard library and returns a
+ /// nonblocking `UnixDatagram` from this crate.
+ ///
+ /// The returned datagram will be associated with the given event loop
+ /// specified by `handle` and is ready to perform I/O.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(datagram: net::UnixDatagram) -> io::Result<UnixDatagram> {
+ let socket = mio_uds::UnixDatagram::from_datagram(datagram)?;
+ let io = PollEvented::new(socket)?;
+ Ok(UnixDatagram { io })
+ }
+
+ fn new(socket: mio_uds::UnixDatagram) -> io::Result<UnixDatagram> {
+ let io = PollEvented::new(socket)?;
+ Ok(UnixDatagram { io })
+ }
+
+ /// Creates a new `UnixDatagram` which is not bound to any address.
+ pub fn unbound() -> io::Result<UnixDatagram> {
+ let socket = mio_uds::UnixDatagram::unbound()?;
+ UnixDatagram::new(socket)
+ }
+
+ /// Connects the socket to the specified address.
+ ///
+ /// The `send` method may be used to send data to the specified address.
+ /// `recv` and `recv_from` will only receive data from that address.
+ pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
+ self.io.get_ref().connect(path)
+ }
+
+ /// Sends data on the socket to the socket's peer.
+ pub async fn send(&mut self, buf: &[u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_send_priv(cx, buf)).await
+ }
+
+ // Poll IO functions that takes `&self` are provided for the split API.
+ //
+ // They are not public because (taken from the doc of `PollEvented`):
+ //
+ // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the
+ // caller must ensure that there are at most two tasks that use a
+ // `PollEvented` instance concurrently. One for reading and one for writing.
+ // While violating this requirement is "safe" from a Rust memory model point
+ // of view, it will result in unexpected behavior in the form of lost
+ // notifications and tasks hanging.
+ pub(crate) fn poll_send_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().send(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Receives data from the socket.
+ pub async fn recv(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ poll_fn(|cx| self.poll_recv_priv(cx, buf)).await
+ }
+
+ pub(crate) fn poll_recv_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().recv(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Sends data on the socket to the specified address.
+ pub async fn send_to<P>(&mut self, buf: &[u8], target: P) -> io::Result<usize>
+ where
+ P: AsRef<Path> + Unpin,
+ {
+ poll_fn(|cx| self.poll_send_to_priv(cx, buf, target.as_ref())).await
+ }
+
+ pub(crate) fn poll_send_to_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ target: &Path,
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().send_to(buf, target) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Receives data from the socket.
+ pub async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+ poll_fn(|cx| self.poll_recv_from_priv(cx, buf)).await
+ }
+
+ pub(crate) fn poll_recv_from_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<(usize, SocketAddr), io::Error>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().recv_from(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ /// Returns the local address that this socket is bound to.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Returns the address of this socket's peer.
+ ///
+ /// The `connect` method will connect the socket to a peer.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().peer_addr()
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.get_ref().take_error()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.io.get_ref().shutdown(how)
+ }
+}
+
+impl TryFrom<UnixDatagram> for mio_uds::UnixDatagram {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: UnixDatagram) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::UnixDatagram> for UnixDatagram {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`UnixDatagram::from_std(stream)`](UnixDatagram::from_std).
+ fn try_from(stream: net::UnixDatagram) -> Result<Self, Self::Error> {
+ Self::from_std(stream)
+ }
+}
+
+impl fmt::Debug for UnixDatagram {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+impl AsRawFd for UnixDatagram {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/incoming.rs b/third_party/rust/tokio/src/net/unix/incoming.rs
new file mode 100644
index 0000000000..af49360435
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/incoming.rs
@@ -0,0 +1,42 @@
+use crate::net::unix::{UnixListener, UnixStream};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Stream of listeners
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Incoming<'a> {
+ inner: &'a mut UnixListener,
+}
+
+impl Incoming<'_> {
+ pub(crate) fn new(listener: &mut UnixListener) -> Incoming<'_> {
+ Incoming { inner: listener }
+ }
+
+ /// Attempts to poll `UnixStream` by polling inner `UnixListener` to accept
+ /// connection.
+ ///
+ /// If `UnixListener` isn't ready yet, `Poll::Pending` is returned and
+ /// current task will be notified by a waker. Otherwise `Poll::Ready` with
+ /// `Result` containing `UnixStream` will be returned.
+ pub fn poll_accept(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<UnixStream>> {
+ let (socket, _) = ready!(self.inner.poll_accept(cx))?;
+ Poll::Ready(Ok(socket))
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for Incoming<'_> {
+ type Item = io::Result<UnixStream>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let (socket, _) = ready!(self.inner.poll_accept(cx))?;
+ Poll::Ready(Some(Ok(socket)))
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/listener.rs b/third_party/rust/tokio/src/net/unix/listener.rs
new file mode 100644
index 0000000000..5acc1b7e82
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/listener.rs
@@ -0,0 +1,229 @@
+use crate::future::poll_fn;
+use crate::io::PollEvented;
+use crate::net::unix::{Incoming, UnixStream};
+
+use mio::Ready;
+use mio_uds;
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::net::{self, SocketAddr};
+use std::path::Path;
+use std::task::{Context, Poll};
+
+cfg_uds! {
+ /// A Unix socket which can accept connections from other Unix sockets.
+ ///
+ /// You can accept a new connection by using the [`accept`](`UnixListener::accept`) method. Alternatively `UnixListener`
+ /// implements the [`Stream`](`crate::stream::Stream`) trait, which allows you to use the listener in places that want a
+ /// stream. The stream will never return `None` and will also not yield the peer's `SocketAddr` structure. Iterating over
+ /// it is equivalent to calling accept in a loop.
+ ///
+ /// # Errors
+ ///
+ /// Note that accepting a connection can lead to various errors and not all
+ /// of them are necessarily fatal ‒ for example having too many open file
+ /// descriptors or the other side closing the connection while it waits in
+ /// an accept queue. These would terminate the stream if not handled in any
+ /// way.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::UnixListener;
+ /// use tokio::stream::StreamExt;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap();
+ /// while let Some(stream) = listener.next().await {
+ /// match stream {
+ /// Ok(stream) => {
+ /// println!("new client!");
+ /// }
+ /// Err(e) => { /* connection failed */ }
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub struct UnixListener {
+ io: PollEvented<mio_uds::UnixListener>,
+ }
+}
+
+impl UnixListener {
+ /// Creates a new `UnixListener` bound to the specified path.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn bind<P>(path: P) -> io::Result<UnixListener>
+ where
+ P: AsRef<Path>,
+ {
+ let listener = mio_uds::UnixListener::bind(path)?;
+ let io = PollEvented::new(listener)?;
+ Ok(UnixListener { io })
+ }
+
+ /// Consumes a `UnixListener` in the standard library and returns a
+ /// nonblocking `UnixListener` from this crate.
+ ///
+ /// The returned listener will be associated with the given event loop
+ /// specified by `handle` and is ready to perform I/O.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(listener: net::UnixListener) -> io::Result<UnixListener> {
+ let listener = mio_uds::UnixListener::from_listener(listener)?;
+ let io = PollEvented::new(listener)?;
+ Ok(UnixListener { io })
+ }
+
+ /// Returns the local socket address of this listener.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.get_ref().take_error()
+ }
+
+ /// Accepts a new incoming connection to this listener.
+ pub async fn accept(&mut self) -> io::Result<(UnixStream, SocketAddr)> {
+ poll_fn(|cx| self.poll_accept(cx)).await
+ }
+
+ pub(crate) fn poll_accept(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<(UnixStream, SocketAddr)>> {
+ let (io, addr) = ready!(self.poll_accept_std(cx))?;
+
+ let io = mio_uds::UnixStream::from_stream(io)?;
+ Ok((UnixStream::new(io)?, addr)).into()
+ }
+
+ fn poll_accept_std(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<io::Result<(net::UnixStream, SocketAddr)>> {
+ ready!(self.io.poll_read_ready(cx, Ready::readable()))?;
+
+ match self.io.get_ref().accept_std() {
+ Ok(None) => {
+ self.io.clear_read_ready(cx, Ready::readable())?;
+ Poll::Pending
+ }
+ Ok(Some((sock, addr))) => Ok((sock, addr)).into(),
+ Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, Ready::readable())?;
+ Poll::Pending
+ }
+ Err(err) => Err(err).into(),
+ }
+ }
+
+ /// Returns a stream over the connections being received on this listener.
+ ///
+ /// Note that `UnixListener` also directly implements `Stream`.
+ ///
+ /// The returned stream will never return `None` and will also not yield the
+ /// peer's `SocketAddr` structure. Iterating over it is equivalent to
+ /// calling accept in a loop.
+ ///
+ /// # Errors
+ ///
+ /// Note that accepting a connection can lead to various errors and not all
+ /// of them are necessarily fatal ‒ for example having too many open file
+ /// descriptors or the other side closing the connection while it waits in
+ /// an accept queue. These would terminate the stream if not handled in any
+ /// way.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::net::UnixListener;
+ /// use tokio::stream::StreamExt;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut listener = UnixListener::bind("/path/to/the/socket").unwrap();
+ /// let mut incoming = listener.incoming();
+ ///
+ /// while let Some(stream) = incoming.next().await {
+ /// match stream {
+ /// Ok(stream) => {
+ /// println!("new client!");
+ /// }
+ /// Err(e) => { /* connection failed */ }
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub fn incoming(&mut self) -> Incoming<'_> {
+ Incoming::new(self)
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for UnixListener {
+ type Item = io::Result<UnixStream>;
+
+ fn poll_next(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Self::Item>> {
+ let (socket, _) = ready!(self.poll_accept(cx))?;
+ Poll::Ready(Some(Ok(socket)))
+ }
+}
+
+impl TryFrom<UnixListener> for mio_uds::UnixListener {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: UnixListener) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::UnixListener> for UnixListener {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`UnixListener::from_std(stream)`](UnixListener::from_std).
+ fn try_from(stream: net::UnixListener) -> io::Result<Self> {
+ Self::from_std(stream)
+ }
+}
+
+impl fmt::Debug for UnixListener {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+impl AsRawFd for UnixListener {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/mod.rs b/third_party/rust/tokio/src/net/unix/mod.rs
new file mode 100644
index 0000000000..ddba60d10a
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/mod.rs
@@ -0,0 +1,18 @@
+//! Unix domain socket utility types
+
+pub(crate) mod datagram;
+
+mod incoming;
+pub use incoming::Incoming;
+
+pub(crate) mod listener;
+pub(crate) use listener::UnixListener;
+
+mod split;
+pub use split::{ReadHalf, WriteHalf};
+
+pub(crate) mod stream;
+pub(crate) use stream::UnixStream;
+
+mod ucred;
+pub use ucred::UCred;
diff --git a/third_party/rust/tokio/src/net/unix/split.rs b/third_party/rust/tokio/src/net/unix/split.rs
new file mode 100644
index 0000000000..9b9fa5ee1d
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/split.rs
@@ -0,0 +1,74 @@
+//! `UnixStream` split support.
+//!
+//! A `UnixStream` can be split into a read half and a write half with
+//! `UnixStream::split`. The read half implements `AsyncRead` while the write
+//! half implements `AsyncWrite`.
+//!
+//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized
+//! split has no associated overhead and enforces all invariants at the type
+//! level.
+
+use crate::io::{AsyncRead, AsyncWrite};
+use crate::net::UnixStream;
+
+use std::io;
+use std::mem::MaybeUninit;
+use std::net::Shutdown;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Read half of a `UnixStream`.
+#[derive(Debug)]
+pub struct ReadHalf<'a>(&'a UnixStream);
+
+/// Write half of a `UnixStream`.
+#[derive(Debug)]
+pub struct WriteHalf<'a>(&'a UnixStream);
+
+pub(crate) fn split(stream: &mut UnixStream) -> (ReadHalf<'_>, WriteHalf<'_>) {
+ (ReadHalf(stream), WriteHalf(stream))
+}
+
+impl AsyncRead for ReadHalf<'_> {
+ unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_read_priv(cx, buf)
+ }
+}
+
+impl AsyncWrite for WriteHalf<'_> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_write_priv(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.0.shutdown(Shutdown::Write).into()
+ }
+}
+
+impl AsRef<UnixStream> for ReadHalf<'_> {
+ fn as_ref(&self) -> &UnixStream {
+ self.0
+ }
+}
+
+impl AsRef<UnixStream> for WriteHalf<'_> {
+ fn as_ref(&self) -> &UnixStream {
+ self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/stream.rs b/third_party/rust/tokio/src/net/unix/stream.rs
new file mode 100644
index 0000000000..beae699962
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/stream.rs
@@ -0,0 +1,233 @@
+use crate::future::poll_fn;
+use crate::io::{AsyncRead, AsyncWrite, PollEvented};
+use crate::net::unix::split::{split, ReadHalf, WriteHalf};
+use crate::net::unix::ucred::{self, UCred};
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::mem::MaybeUninit;
+use std::net::Shutdown;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::net::{self, SocketAddr};
+use std::path::Path;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+cfg_uds! {
+ /// A structure representing a connected Unix socket.
+ ///
+ /// This socket can be connected directly with `UnixStream::connect` or accepted
+ /// from a listener with `UnixListener::incoming`. Additionally, a pair of
+ /// anonymous Unix sockets can be created with `UnixStream::pair`.
+ pub struct UnixStream {
+ io: PollEvented<mio_uds::UnixStream>,
+ }
+}
+
+impl UnixStream {
+ /// Connects to the socket named by `path`.
+ ///
+ /// This function will create a new Unix socket and connect to the path
+ /// specified, associating the returned stream with the default event loop's
+ /// handle.
+ pub async fn connect<P>(path: P) -> io::Result<UnixStream>
+ where
+ P: AsRef<Path>,
+ {
+ let stream = mio_uds::UnixStream::connect(path)?;
+ let stream = UnixStream::new(stream)?;
+
+ poll_fn(|cx| stream.io.poll_write_ready(cx)).await?;
+ Ok(stream)
+ }
+
+ /// Consumes a `UnixStream` in the standard library and returns a
+ /// nonblocking `UnixStream` from this crate.
+ ///
+ /// The returned stream will be associated with the given event loop
+ /// specified by `handle` and is ready to perform I/O.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if thread-local runtime is not set.
+ ///
+ /// The runtime is usually set implicitly when this function is called
+ /// from a future driven by a tokio runtime, otherwise runtime can be set
+ /// explicitly with [`Handle::enter`](crate::runtime::Handle::enter) function.
+ pub fn from_std(stream: net::UnixStream) -> io::Result<UnixStream> {
+ let stream = mio_uds::UnixStream::from_stream(stream)?;
+ let io = PollEvented::new(stream)?;
+
+ Ok(UnixStream { io })
+ }
+
+ /// Creates an unnamed pair of connected sockets.
+ ///
+ /// This function will create a pair of interconnected Unix sockets for
+ /// communicating back and forth between one another. Each socket will
+ /// be associated with the default event loop's handle.
+ pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
+ let (a, b) = mio_uds::UnixStream::pair()?;
+ let a = UnixStream::new(a)?;
+ let b = UnixStream::new(b)?;
+
+ Ok((a, b))
+ }
+
+ pub(crate) fn new(stream: mio_uds::UnixStream) -> io::Result<UnixStream> {
+ let io = PollEvented::new(stream)?;
+ Ok(UnixStream { io })
+ }
+
+ /// Returns the socket address of the local half of this connection.
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().local_addr()
+ }
+
+ /// Returns the socket address of the remote half of this connection.
+ pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+ self.io.get_ref().peer_addr()
+ }
+
+ /// Returns effective credentials of the process which called `connect` or `pair`.
+ pub fn peer_cred(&self) -> io::Result<UCred> {
+ ucred::get_peer_cred(self)
+ }
+
+ /// Returns the value of the `SO_ERROR` option.
+ pub fn take_error(&self) -> io::Result<Option<io::Error>> {
+ self.io.get_ref().take_error()
+ }
+
+ /// Shuts down the read, write, or both halves of this connection.
+ ///
+ /// This function will cause all pending and future I/O calls on the
+ /// specified portions to immediately return with an appropriate value
+ /// (see the documentation of `Shutdown`).
+ pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+ self.io.get_ref().shutdown(how)
+ }
+
+ /// Split a `UnixStream` into a read half and a write half, which can be used
+ /// to read and write the stream concurrently.
+ pub fn split(&mut self) -> (ReadHalf<'_>, WriteHalf<'_>) {
+ split(self)
+ }
+}
+
+impl TryFrom<UnixStream> for mio_uds::UnixStream {
+ type Error = io::Error;
+
+ /// Consumes value, returning the mio I/O object.
+ ///
+ /// See [`PollEvented::into_inner`] for more details about
+ /// resource deregistration that happens during the call.
+ ///
+ /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner
+ fn try_from(value: UnixStream) -> Result<Self, Self::Error> {
+ value.io.into_inner()
+ }
+}
+
+impl TryFrom<net::UnixStream> for UnixStream {
+ type Error = io::Error;
+
+ /// Consumes stream, returning the tokio I/O object.
+ ///
+ /// This is equivalent to
+ /// [`UnixStream::from_std(stream)`](UnixStream::from_std).
+ fn try_from(stream: net::UnixStream) -> io::Result<Self> {
+ Self::from_std(stream)
+ }
+}
+
+impl AsyncRead for UnixStream {
+ unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.poll_read_priv(cx, buf)
+ }
+}
+
+impl AsyncWrite for UnixStream {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.poll_write_priv(cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.shutdown(std::net::Shutdown::Write)?;
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl UnixStream {
+ // == Poll IO functions that takes `&self` ==
+ //
+ // They are not public because (taken from the doc of `PollEvented`):
+ //
+ // While `PollEvented` is `Sync` (if the underlying I/O type is `Sync`), the
+ // caller must ensure that there are at most two tasks that use a
+ // `PollEvented` instance concurrently. One for reading and one for writing.
+ // While violating this requirement is "safe" from a Rust memory model point
+ // of view, it will result in unexpected behavior in the form of lost
+ // notifications and tasks hanging.
+
+ pub(crate) fn poll_read_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?;
+
+ match self.io.get_ref().read(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_read_ready(cx, mio::Ready::readable())?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+
+ pub(crate) fn poll_write_priv(
+ &self,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ ready!(self.io.poll_write_ready(cx))?;
+
+ match self.io.get_ref().write(buf) {
+ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+ self.io.clear_write_ready(cx)?;
+ Poll::Pending
+ }
+ x => Poll::Ready(x),
+ }
+ }
+}
+
+impl fmt::Debug for UnixStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.io.get_ref().fmt(f)
+ }
+}
+
+impl AsRawFd for UnixStream {
+ fn as_raw_fd(&self) -> RawFd {
+ self.io.get_ref().as_raw_fd()
+ }
+}
diff --git a/third_party/rust/tokio/src/net/unix/ucred.rs b/third_party/rust/tokio/src/net/unix/ucred.rs
new file mode 100644
index 0000000000..cdd77ea414
--- /dev/null
+++ b/third_party/rust/tokio/src/net/unix/ucred.rs
@@ -0,0 +1,151 @@
+use libc::{gid_t, uid_t};
+
+/// Credentials of a process
+#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
+pub struct UCred {
+ /// UID (user ID) of the process
+ pub uid: uid_t,
+ /// GID (group ID) of the process
+ pub gid: gid_t,
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub(crate) use self::impl_linux::get_peer_cred;
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub(crate) use self::impl_macos::get_peer_cred;
+
+#[cfg(any(target_os = "solaris"))]
+pub(crate) use self::impl_solaris::get_peer_cred;
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub(crate) mod impl_linux {
+ use crate::net::unix::UnixStream;
+
+ use libc::{c_void, getsockopt, socklen_t, SOL_SOCKET, SO_PEERCRED};
+ use std::{io, mem};
+
+ use libc::ucred;
+
+ pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> {
+ use std::os::unix::io::AsRawFd;
+
+ unsafe {
+ let raw_fd = sock.as_raw_fd();
+
+ let mut ucred = ucred {
+ pid: 0,
+ uid: 0,
+ gid: 0,
+ };
+
+ let ucred_size = mem::size_of::<ucred>();
+
+ // These paranoid checks should be optimized-out
+ assert!(mem::size_of::<u32>() <= mem::size_of::<usize>());
+ assert!(ucred_size <= u32::max_value() as usize);
+
+ let mut ucred_size = ucred_size as socklen_t;
+
+ let ret = getsockopt(
+ raw_fd,
+ SOL_SOCKET,
+ SO_PEERCRED,
+ &mut ucred as *mut ucred as *mut c_void,
+ &mut ucred_size,
+ );
+ if ret == 0 && ucred_size as usize == mem::size_of::<ucred>() {
+ Ok(super::UCred {
+ uid: ucred.uid,
+ gid: ucred.gid,
+ })
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+}
+
+#[cfg(any(
+ target_os = "dragonfly",
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "freebsd",
+ target_os = "netbsd",
+ target_os = "openbsd"
+))]
+pub(crate) mod impl_macos {
+ use crate::net::unix::UnixStream;
+
+ use libc::getpeereid;
+ use std::io;
+ use std::mem::MaybeUninit;
+ use std::os::unix::io::AsRawFd;
+
+ pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> {
+ unsafe {
+ let raw_fd = sock.as_raw_fd();
+
+ let mut uid = MaybeUninit::uninit();
+ let mut gid = MaybeUninit::uninit();
+
+ let ret = getpeereid(raw_fd, uid.as_mut_ptr(), gid.as_mut_ptr());
+
+ if ret == 0 {
+ Ok(super::UCred {
+ uid: uid.assume_init(),
+ gid: gid.assume_init(),
+ })
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+}
+
+#[cfg(any(target_os = "solaris"))]
+pub(crate) mod impl_solaris {
+ use crate::net::unix::UnixStream;
+ use std::io;
+ use std::os::unix::io::AsRawFd;
+ use std::ptr;
+
+ #[allow(non_camel_case_types)]
+ enum ucred_t {}
+
+ extern "C" {
+ fn ucred_free(cred: *mut ucred_t);
+ fn ucred_geteuid(cred: *const ucred_t) -> super::uid_t;
+ fn ucred_getegid(cred: *const ucred_t) -> super::gid_t;
+
+ fn getpeerucred(fd: std::os::raw::c_int, cred: *mut *mut ucred_t) -> std::os::raw::c_int;
+ }
+
+ pub(crate) fn get_peer_cred(sock: &UnixStream) -> io::Result<super::UCred> {
+ unsafe {
+ let raw_fd = sock.as_raw_fd();
+
+ let mut cred = ptr::null_mut::<*mut ucred_t>() as *mut ucred_t;
+
+ let ret = getpeerucred(raw_fd, &mut cred);
+
+ if ret == 0 {
+ let uid = ucred_geteuid(cred);
+ let gid = ucred_getegid(cred);
+
+ ucred_free(cred);
+
+ Ok(super::UCred { uid, gid })
+ } else {
+ Err(io::Error::last_os_error())
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/park/either.rs b/third_party/rust/tokio/src/park/either.rs
new file mode 100644
index 0000000000..67f1e17274
--- /dev/null
+++ b/third_party/rust/tokio/src/park/either.rs
@@ -0,0 +1,65 @@
+use crate::park::{Park, Unpark};
+
+use std::fmt;
+use std::time::Duration;
+
+pub(crate) enum Either<A, B> {
+ A(A),
+ B(B),
+}
+
+impl<A, B> Park for Either<A, B>
+where
+ A: Park,
+ B: Park,
+{
+ type Unpark = Either<A::Unpark, B::Unpark>;
+ type Error = Either<A::Error, B::Error>;
+
+ fn unpark(&self) -> Self::Unpark {
+ match self {
+ Either::A(a) => Either::A(a.unpark()),
+ Either::B(b) => Either::B(b.unpark()),
+ }
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ match self {
+ Either::A(a) => a.park().map_err(Either::A),
+ Either::B(b) => b.park().map_err(Either::B),
+ }
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ match self {
+ Either::A(a) => a.park_timeout(duration).map_err(Either::A),
+ Either::B(b) => b.park_timeout(duration).map_err(Either::B),
+ }
+ }
+}
+
+impl<A, B> Unpark for Either<A, B>
+where
+ A: Unpark,
+ B: Unpark,
+{
+ fn unpark(&self) {
+ match self {
+ Either::A(a) => a.unpark(),
+ Either::B(b) => b.unpark(),
+ }
+ }
+}
+
+impl<A, B> fmt::Debug for Either<A, B>
+where
+ A: fmt::Debug,
+ B: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Either::A(a) => a.fmt(fmt),
+ Either::B(b) => b.fmt(fmt),
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/park/mod.rs b/third_party/rust/tokio/src/park/mod.rs
new file mode 100644
index 0000000000..a3e49bbede
--- /dev/null
+++ b/third_party/rust/tokio/src/park/mod.rs
@@ -0,0 +1,118 @@
+//! Abstraction over blocking and unblocking the current thread.
+//!
+//! Provides an abstraction over blocking the current thread. This is similar to
+//! the park / unpark constructs provided by `std` but made generic. This allows
+//! embedding custom functionality to perform when the thread is blocked.
+//!
+//! A blocked `Park` instance is unblocked by calling `unpark` on its
+//! `Unpark` handle.
+//!
+//! The `ParkThread` struct implements `Park` using `thread::park` to put the
+//! thread to sleep. The Tokio reactor also implements park, but uses
+//! `mio::Poll` to block the thread instead.
+//!
+//! The `Park` trait is composable. A timer implementation might decorate a
+//! `Park` implementation by checking if any timeouts have elapsed after the
+//! inner `Park` implementation unblocks.
+//!
+//! # Model
+//!
+//! Conceptually, each `Park` instance has an associated token, which is
+//! initially not present:
+//!
+//! * The `park` method blocks the current thread unless or until the token is
+//! available, at which point it atomically consumes the token.
+//! * The `unpark` method atomically makes the token available if it wasn't
+//! already.
+//!
+//! Some things to note:
+//!
+//! * If `unpark` is called before `park`, the next call to `park` will
+//! **not** block the thread.
+//! * **Spurious** wakeups are permitted, i.e., the `park` method may unblock
+//! even if `unpark` was not called.
+//! * `park_timeout` does the same as `park` but allows specifying a maximum
+//! time to block the thread for.
+
+cfg_resource_drivers! {
+ mod either;
+ pub(crate) use self::either::Either;
+}
+
+mod thread;
+pub(crate) use self::thread::ParkThread;
+
+cfg_blocking_impl! {
+ pub(crate) use self::thread::{CachedParkThread, ParkError};
+}
+
+use std::sync::Arc;
+use std::time::Duration;
+
+/// Block the current thread.
+pub(crate) trait Park {
+ /// Unpark handle type for the `Park` implementation.
+ type Unpark: Unpark;
+
+ /// Error returned by `park`
+ type Error;
+
+ /// Gets a new `Unpark` handle associated with this `Park` instance.
+ fn unpark(&self) -> Self::Unpark;
+
+ /// Blocks the current thread unless or until the token is available.
+ ///
+ /// A call to `park` does not guarantee that the thread will remain blocked
+ /// forever, and callers should be prepared for this possibility. This
+ /// function may wakeup spuriously for any reason.
+ ///
+ /// # Panics
+ ///
+ /// This function **should** not panic, but ultimately, panics are left as
+ /// an implementation detail. Refer to the documentation for the specific
+ /// `Park` implementation
+ fn park(&mut self) -> Result<(), Self::Error>;
+
+ /// Parks the current thread for at most `duration`.
+ ///
+ /// This function is the same as `park` but allows specifying a maximum time
+ /// to block the thread for.
+ ///
+ /// Same as `park`, there is no guarantee that the thread will remain
+ /// blocked for any amount of time. Spurious wakeups are permitted for any
+ /// reason.
+ ///
+ /// # Panics
+ ///
+ /// This function **should** not panic, but ultimately, panics are left as
+ /// an implementation detail. Refer to the documentation for the specific
+ /// `Park` implementation
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error>;
+}
+
+/// Unblock a thread blocked by the associated `Park` instance.
+pub(crate) trait Unpark: Sync + Send + 'static {
+ /// Unblocks a thread that is blocked by the associated `Park` handle.
+ ///
+ /// Calling `unpark` atomically makes available the unpark token, if it is
+ /// not already available.
+ ///
+ /// # Panics
+ ///
+ /// This function **should** not panic, but ultimately, panics are left as
+ /// an implementation detail. Refer to the documentation for the specific
+ /// `Unpark` implementation
+ fn unpark(&self);
+}
+
+impl Unpark for Box<dyn Unpark> {
+ fn unpark(&self) {
+ (**self).unpark()
+ }
+}
+
+impl Unpark for Arc<dyn Unpark> {
+ fn unpark(&self) {
+ (**self).unpark()
+ }
+}
diff --git a/third_party/rust/tokio/src/park/thread.rs b/third_party/rust/tokio/src/park/thread.rs
new file mode 100644
index 0000000000..a8cdf1432b
--- /dev/null
+++ b/third_party/rust/tokio/src/park/thread.rs
@@ -0,0 +1,317 @@
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::{Arc, Condvar, Mutex};
+use crate::park::{Park, Unpark};
+
+use std::sync::atomic::Ordering::SeqCst;
+use std::time::Duration;
+
+#[derive(Debug)]
+pub(crate) struct ParkThread {
+ inner: Arc<Inner>,
+}
+
+pub(crate) type ParkError = ();
+
+/// Unblocks a thread that was blocked by `ParkThread`.
+#[derive(Clone, Debug)]
+pub(crate) struct UnparkThread {
+ inner: Arc<Inner>,
+}
+
+#[derive(Debug)]
+struct Inner {
+ state: AtomicUsize,
+ mutex: Mutex<()>,
+ condvar: Condvar,
+}
+
+const EMPTY: usize = 0;
+const PARKED: usize = 1;
+const NOTIFIED: usize = 2;
+
+thread_local! {
+ static CURRENT_PARKER: ParkThread = ParkThread::new();
+}
+
+// ==== impl ParkThread ====
+
+impl ParkThread {
+ pub(crate) fn new() -> Self {
+ Self {
+ inner: Arc::new(Inner {
+ state: AtomicUsize::new(EMPTY),
+ mutex: Mutex::new(()),
+ condvar: Condvar::new(),
+ }),
+ }
+ }
+}
+
+impl Park for ParkThread {
+ type Unpark = UnparkThread;
+ type Error = ParkError;
+
+ fn unpark(&self) -> Self::Unpark {
+ let inner = self.inner.clone();
+ UnparkThread { inner }
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ self.inner.park();
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ self.inner.park_timeout(duration);
+ Ok(())
+ }
+}
+
+// ==== impl Inner ====
+
+impl Inner {
+ /// Park the current thread for at most `dur`.
+ fn park(&self) {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
+ .is_ok()
+ {
+ return;
+ }
+
+ // Otherwise we need to coordinate going to sleep
+ let mut m = self.mutex.lock().unwrap();
+
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+
+ return;
+ }
+ Err(actual) => panic!("inconsistent park state; actual = {}", actual),
+ }
+
+ loop {
+ m = self.condvar.wait(m).unwrap();
+
+ if self
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
+ .is_ok()
+ {
+ // got a notification
+ return;
+ }
+
+ // spurious wakeup, go back to sleep
+ }
+ }
+
+ fn park_timeout(&self, dur: Duration) {
+ // Like `park` above we have a fast path for an already-notified thread,
+ // and afterwards we start coordinating for a sleep. Return quickly.
+ if self
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
+ .is_ok()
+ {
+ return;
+ }
+
+ if dur == Duration::from_millis(0) {
+ return;
+ }
+
+ let m = self.mutex.lock().unwrap();
+
+ match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read again here, see `park`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+
+ return;
+ }
+ Err(actual) => panic!("inconsistent park_timeout state; actual = {}", actual),
+ }
+
+ // Wait with a timeout, and if we spuriously wake up or otherwise wake up
+ // from a notification, we just want to unconditionally set the state back to
+ // empty, either consuming a notification or un-flagging ourselves as
+ // parked.
+ let (_m, _result) = self.condvar.wait_timeout(m, dur).unwrap();
+
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => {} // got a notification, hurray!
+ PARKED => {} // no notification, alas
+ n => panic!("inconsistent park_timeout state: {}", n),
+ }
+ }
+
+ fn unpark(&self) {
+ // To ensure the unparked thread will observe any writes we made before
+ // this call, we must perform a release operation that `park` can
+ // synchronize with. To do that we must write `NOTIFIED` even if `state`
+ // is already `NOTIFIED`. That is why this must be a swap rather than a
+ // compare-and-swap that returns if it reads `NOTIFIED` on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => return, // no one was waiting
+ NOTIFIED => return, // already unparked
+ PARKED => {} // gotta go wake someone up
+ _ => panic!("inconsistent state in unpark"),
+ }
+
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ drop(self.mutex.lock().unwrap());
+
+ self.condvar.notify_one()
+ }
+}
+
+impl Default for ParkThread {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// ===== impl UnparkThread =====
+
+impl Unpark for UnparkThread {
+ fn unpark(&self) {
+ self.inner.unpark();
+ }
+}
+
+cfg_blocking_impl! {
+ use std::marker::PhantomData;
+ use std::rc::Rc;
+
+ use std::mem;
+ use std::task::{RawWaker, RawWakerVTable, Waker};
+
+ /// Blocks the current thread using a condition variable.
+ #[derive(Debug)]
+ pub(crate) struct CachedParkThread {
+ _anchor: PhantomData<Rc<()>>,
+ }
+
+ impl CachedParkThread {
+ /// Create a new `ParkThread` handle for the current thread.
+ ///
+ /// This type cannot be moved to other threads, so it should be created on
+ /// the thread that the caller intends to park.
+ pub(crate) fn new() -> CachedParkThread {
+ CachedParkThread {
+ _anchor: PhantomData,
+ }
+ }
+
+ pub(crate) fn get_unpark(&self) -> Result<UnparkThread, ParkError> {
+ self.with_current(|park_thread| park_thread.unpark())
+ }
+
+ /// Get a reference to the `ParkThread` handle for this thread.
+ fn with_current<F, R>(&self, f: F) -> Result<R, ParkError>
+ where
+ F: FnOnce(&ParkThread) -> R,
+ {
+ CURRENT_PARKER.try_with(|inner| f(inner))
+ .map_err(|_| ())
+ }
+ }
+
+ impl Park for CachedParkThread {
+ type Unpark = UnparkThread;
+ type Error = ParkError;
+
+ fn unpark(&self) -> Self::Unpark {
+ self.get_unpark().unwrap()
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ self.with_current(|park_thread| park_thread.inner.park())?;
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ self.with_current(|park_thread| park_thread.inner.park_timeout(duration))?;
+ Ok(())
+ }
+ }
+
+
+ impl UnparkThread {
+ pub(crate) fn into_waker(self) -> Waker {
+ unsafe {
+ let raw = unparker_to_raw_waker(self.inner);
+ Waker::from_raw(raw)
+ }
+ }
+ }
+
+ impl Inner {
+ #[allow(clippy::wrong_self_convention)]
+ fn into_raw(this: Arc<Inner>) -> *const () {
+ Arc::into_raw(this) as *const ()
+ }
+
+ unsafe fn from_raw(ptr: *const ()) -> Arc<Inner> {
+ Arc::from_raw(ptr as *const Inner)
+ }
+ }
+
+ unsafe fn unparker_to_raw_waker(unparker: Arc<Inner>) -> RawWaker {
+ RawWaker::new(
+ Inner::into_raw(unparker),
+ &RawWakerVTable::new(clone, wake, wake_by_ref, drop_waker),
+ )
+ }
+
+ unsafe fn clone(raw: *const ()) -> RawWaker {
+ let unparker = Inner::from_raw(raw);
+
+ // Increment the ref count
+ mem::forget(unparker.clone());
+
+ unparker_to_raw_waker(unparker)
+ }
+
+ unsafe fn drop_waker(raw: *const ()) {
+ let _ = Inner::from_raw(raw);
+ }
+
+ unsafe fn wake(raw: *const ()) {
+ let unparker = Inner::from_raw(raw);
+ unparker.unpark();
+ }
+
+ unsafe fn wake_by_ref(raw: *const ()) {
+ let unparker = Inner::from_raw(raw);
+ unparker.unpark();
+
+ // We don't actually own a reference to the unparker
+ mem::forget(unparker);
+ }
+}
diff --git a/third_party/rust/tokio/src/prelude.rs b/third_party/rust/tokio/src/prelude.rs
new file mode 100644
index 0000000000..1909f9da6a
--- /dev/null
+++ b/third_party/rust/tokio/src/prelude.rs
@@ -0,0 +1,21 @@
+#![cfg(not(loom))]
+
+//! A "prelude" for users of the `tokio` crate.
+//!
+//! This prelude is similar to the standard library's prelude in that you'll
+//! almost always want to import its entire contents, but unlike the standard
+//! library's prelude you'll have to do so manually:
+//!
+//! ```
+//! # #![allow(warnings)]
+//! use tokio::prelude::*;
+//! ```
+//!
+//! The prelude may grow over time as additional items see ubiquitous use.
+
+pub use crate::io::{self, AsyncBufRead, AsyncRead, AsyncWrite};
+
+cfg_io_util! {
+ #[doc(no_inline)]
+ pub use crate::io::{AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _};
+}
diff --git a/third_party/rust/tokio/src/process/kill.rs b/third_party/rust/tokio/src/process/kill.rs
new file mode 100644
index 0000000000..a1f1652281
--- /dev/null
+++ b/third_party/rust/tokio/src/process/kill.rs
@@ -0,0 +1,13 @@
+use std::io;
+
+/// An interface for killing a running process.
+pub(crate) trait Kill {
+ /// Forcefully kills the process.
+ fn kill(&mut self) -> io::Result<()>;
+}
+
+impl<T: Kill> Kill for &mut T {
+ fn kill(&mut self) -> io::Result<()> {
+ (**self).kill()
+ }
+}
diff --git a/third_party/rust/tokio/src/process/mod.rs b/third_party/rust/tokio/src/process/mod.rs
new file mode 100644
index 0000000000..7231511235
--- /dev/null
+++ b/third_party/rust/tokio/src/process/mod.rs
@@ -0,0 +1,1078 @@
+//! An implementation of asynchronous process management for Tokio.
+//!
+//! This module provides a [`Command`] struct that imitates the interface of the
+//! [`std::process::Command`] type in the standard library, but provides asynchronous versions of
+//! functions that create processes. These functions (`spawn`, `status`, `output` and their
+//! variants) return "future aware" types that interoperate with Tokio. The asynchronous process
+//! support is provided through signal handling on Unix and system APIs on Windows.
+//!
+//! # Examples
+//!
+//! Here's an example program which will spawn `echo hello world` and then wait
+//! for it complete.
+//!
+//! ```no_run
+//! use tokio::process::Command;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // The usage is the same as with the standard library's `Command` type, however the value
+//! // returned from `spawn` is a `Result` containing a `Future`.
+//! let child = Command::new("echo").arg("hello").arg("world")
+//! .spawn();
+//!
+//! // Make sure our child succeeded in spawning and process the result
+//! let future = child.expect("failed to spawn");
+//!
+//! // Await until the future (and the command) completes
+//! let status = future.await?;
+//! println!("the command exited with: {}", status);
+//! Ok(())
+//! }
+//! ```
+//!
+//! Next, let's take a look at an example where we not only spawn `echo hello
+//! world` but we also capture its output.
+//!
+//! ```no_run
+//! use tokio::process::Command;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // Like above, but use `output` which returns a future instead of
+//! // immediately returning the `Child`.
+//! let output = Command::new("echo").arg("hello").arg("world")
+//! .output();
+//!
+//! let output = output.await?;
+//!
+//! assert!(output.status.success());
+//! assert_eq!(output.stdout, b"hello world\n");
+//! Ok(())
+//! }
+//! ```
+//!
+//! We can also read input line by line.
+//!
+//! ```no_run
+//! use tokio::io::{BufReader, AsyncBufReadExt};
+//! use tokio::process::Command;
+//!
+//! use std::process::Stdio;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let mut cmd = Command::new("cat");
+//!
+//! // Specify that we want the command's standard output piped back to us.
+//! // By default, standard input/output/error will be inherited from the
+//! // current process (for example, this means that standard input will
+//! // come from the keyboard and standard output/error will go directly to
+//! // the terminal if this process is invoked from the command line).
+//! cmd.stdout(Stdio::piped());
+//!
+//! let mut child = cmd.spawn()
+//! .expect("failed to spawn command");
+//!
+//! let stdout = child.stdout.take()
+//! .expect("child did not have a handle to stdout");
+//!
+//! let mut reader = BufReader::new(stdout).lines();
+//!
+//! // Ensure the child process is spawned in the runtime so it can
+//! // make progress on its own while we await for any output.
+//! tokio::spawn(async {
+//! let status = child.await
+//! .expect("child process encountered an error");
+//!
+//! println!("child status was: {}", status);
+//! });
+//!
+//! while let Some(line) = reader.next_line().await? {
+//! println!("Line: {}", line);
+//! }
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! # Caveats
+//!
+//! Similar to the behavior to the standard library, and unlike the futures
+//! paradigm of dropping-implies-cancellation, a spawned process will, by
+//! default, continue to execute even after the `Child` handle has been dropped.
+//!
+//! The `Command::kill_on_drop` method can be used to modify this behavior
+//! and kill the child process if the `Child` wrapper is dropped before it
+//! has exited.
+//!
+//! [`Command`]: crate::process::Command
+
+#[path = "unix/mod.rs"]
+#[cfg(unix)]
+mod imp;
+
+#[path = "windows.rs"]
+#[cfg(windows)]
+mod imp;
+
+mod kill;
+
+use crate::io::{AsyncRead, AsyncWrite};
+use crate::process::kill::Kill;
+
+use std::ffi::OsStr;
+use std::future::Future;
+use std::io;
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+#[cfg(windows)]
+use std::os::windows::process::CommandExt;
+use std::path::Path;
+use std::pin::Pin;
+use std::process::{Command as StdCommand, ExitStatus, Output, Stdio};
+use std::task::Context;
+use std::task::Poll;
+
+/// This structure mimics the API of [`std::process::Command`] found in the standard library, but
+/// replaces functions that create a process with an asynchronous variant. The main provided
+/// asynchronous functions are [spawn](Command::spawn), [status](Command::status), and
+/// [output](Command::output).
+///
+/// `Command` uses asynchronous versions of some `std` types (for example [`Child`]).
+#[derive(Debug)]
+pub struct Command {
+ std: StdCommand,
+ kill_on_drop: bool,
+}
+
+pub(crate) struct SpawnedChild {
+ child: imp::Child,
+ stdin: Option<imp::ChildStdin>,
+ stdout: Option<imp::ChildStdout>,
+ stderr: Option<imp::ChildStderr>,
+}
+
+impl Command {
+ /// Constructs a new `Command` for launching the program at
+ /// path `program`, with the following default configuration:
+ ///
+ /// * No arguments to the program
+ /// * Inherit the current process's environment
+ /// * Inherit the current process's working directory
+ /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output`
+ ///
+ /// Builder methods are provided to change these defaults and
+ /// otherwise configure the process.
+ ///
+ /// If `program` is not an absolute path, the `PATH` will be searched in
+ /// an OS-defined way.
+ ///
+ /// The search path to be used may be controlled by setting the
+ /// `PATH` environment variable on the Command,
+ /// but this has some implementation limitations on Windows
+ /// (see issue rust-lang/rust#37519).
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ /// let command = Command::new("sh");
+ /// ```
+ pub fn new<S: AsRef<OsStr>>(program: S) -> Command {
+ Self::from(StdCommand::new(program))
+ }
+
+ /// Adds an argument to pass to the program.
+ ///
+ /// Only one argument can be passed per use. So instead of:
+ ///
+ /// ```no_run
+ /// tokio::process::Command::new("sh")
+ /// .arg("-C /path/to/repo");
+ /// ```
+ ///
+ /// usage would be:
+ ///
+ /// ```no_run
+ /// tokio::process::Command::new("sh")
+ /// .arg("-C")
+ /// .arg("/path/to/repo");
+ /// ```
+ ///
+ /// To pass multiple arguments see [`args`].
+ ///
+ /// [`args`]: #method.args
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .arg("-l")
+ /// .arg("-a");
+ /// ```
+ pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
+ self.std.arg(arg);
+ self
+ }
+
+ /// Adds multiple arguments to pass to the program.
+ ///
+ /// To pass a single argument see [`arg`].
+ ///
+ /// [`arg`]: #method.arg
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .args(&["-l", "-a"]);
+ /// ```
+ pub fn args<I, S>(&mut self, args: I) -> &mut Command
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<OsStr>,
+ {
+ self.std.args(args);
+ self
+ }
+
+ /// Inserts or updates an environment variable mapping.
+ ///
+ /// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
+ /// and case-sensitive on all other platforms.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .env("PATH", "/bin");
+ /// ```
+ pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ self.std.env(key, val);
+ self
+ }
+
+ /// Adds or updates multiple environment variable mappings.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ /// use std::process::{Stdio};
+ /// use std::env;
+ /// use std::collections::HashMap;
+ ///
+ /// let filtered_env : HashMap<String, String> =
+ /// env::vars().filter(|&(ref k, _)|
+ /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH"
+ /// ).collect();
+ ///
+ /// let command = Command::new("printenv")
+ /// .stdin(Stdio::null())
+ /// .stdout(Stdio::inherit())
+ /// .env_clear()
+ /// .envs(&filtered_env);
+ /// ```
+ pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command
+ where
+ I: IntoIterator<Item = (K, V)>,
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ self.std.envs(vars);
+ self
+ }
+
+ /// Removes an environment variable mapping.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .env_remove("PATH");
+ /// ```
+ pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command {
+ self.std.env_remove(key);
+ self
+ }
+
+ /// Clears the entire environment map for the child process.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .env_clear();
+ /// ```
+ pub fn env_clear(&mut self) -> &mut Command {
+ self.std.env_clear();
+ self
+ }
+
+ /// Sets the working directory for the child process.
+ ///
+ /// # Platform-specific behavior
+ ///
+ /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous
+ /// whether it should be interpreted relative to the parent's working
+ /// directory or relative to `current_dir`. The behavior in this case is
+ /// platform specific and unstable, and it's recommended to use
+ /// [`canonicalize`] to get an absolute program path instead.
+ ///
+ /// [`canonicalize`]: crate::fs::canonicalize()
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .current_dir("/bin");
+ /// ```
+ pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command {
+ self.std.current_dir(dir);
+ self
+ }
+
+ /// Sets configuration for the child process's standard input (stdin) handle.
+ ///
+ /// Defaults to [`inherit`] when used with `spawn` or `status`, and
+ /// defaults to [`piped`] when used with `output`.
+ ///
+ /// [`inherit`]: std::process::Stdio::inherit
+ /// [`piped`]: std::process::Stdio::piped
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use std::process::{Stdio};
+ /// use tokio::process::Command;
+ ///
+ /// let command = Command::new("ls")
+ /// .stdin(Stdio::null());
+ /// ```
+ pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.std.stdin(cfg);
+ self
+ }
+
+ /// Sets configuration for the child process's standard output (stdout) handle.
+ ///
+ /// Defaults to [`inherit`] when used with `spawn` or `status`, and
+ /// defaults to [`piped`] when used with `output`.
+ ///
+ /// [`inherit`]: std::process::Stdio::inherit
+ /// [`piped`]: std::process::Stdio::piped
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;;
+ /// use std::process::Stdio;
+ ///
+ /// let command = Command::new("ls")
+ /// .stdout(Stdio::null());
+ /// ```
+ pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.std.stdout(cfg);
+ self
+ }
+
+ /// Sets configuration for the child process's standard error (stderr) handle.
+ ///
+ /// Defaults to [`inherit`] when used with `spawn` or `status`, and
+ /// defaults to [`piped`] when used with `output`.
+ ///
+ /// [`inherit`]: std::process::Stdio::inherit
+ /// [`piped`]: std::process::Stdio::piped
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;;
+ /// use std::process::{Stdio};
+ ///
+ /// let command = Command::new("ls")
+ /// .stderr(Stdio::null());
+ /// ```
+ pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command {
+ self.std.stderr(cfg);
+ self
+ }
+
+ /// Controls whether a `kill` operation should be invoked on a spawned child
+ /// process when its corresponding `Child` handle is dropped.
+ ///
+ /// By default, this value is assumed to be `false`, meaning the next spawned
+ /// process will not be killed on drop, similar to the behavior of the standard
+ /// library.
+ pub fn kill_on_drop(&mut self, kill_on_drop: bool) -> &mut Command {
+ self.kill_on_drop = kill_on_drop;
+ self
+ }
+
+ /// Sets the [process creation flags][1] to be passed to `CreateProcess`.
+ ///
+ /// These will always be ORed with `CREATE_UNICODE_ENVIRONMENT`.
+ ///
+ /// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
+ #[cfg(windows)]
+ pub fn creation_flags(&mut self, flags: u32) -> &mut Command {
+ self.std.creation_flags(flags);
+ self
+ }
+
+ /// Sets the child process's user ID. This translates to a
+ /// `setuid` call in the child process. Failure in the `setuid`
+ /// call will cause the spawn to fail.
+ #[cfg(unix)]
+ pub fn uid(&mut self, id: u32) -> &mut Command {
+ self.std.uid(id);
+ self
+ }
+
+ /// Similar to `uid` but sets the group ID of the child process. This has
+ /// the same semantics as the `uid` field.
+ #[cfg(unix)]
+ pub fn gid(&mut self, id: u32) -> &mut Command {
+ self.std.gid(id);
+ self
+ }
+
+ /// Schedules a closure to be run just before the `exec` function is
+ /// invoked.
+ ///
+ /// The closure is allowed to return an I/O error whose OS error code will
+ /// be communicated back to the parent and returned as an error from when
+ /// the spawn was requested.
+ ///
+ /// Multiple closures can be registered and they will be called in order of
+ /// their registration. If a closure returns `Err` then no further closures
+ /// will be called and the spawn operation will immediately return with a
+ /// failure.
+ ///
+ /// # Safety
+ ///
+ /// This closure will be run in the context of the child process after a
+ /// `fork`. This primarily means that any modifications made to memory on
+ /// behalf of this closure will **not** be visible to the parent process.
+ /// This is often a very constrained environment where normal operations
+ /// like `malloc` or acquiring a mutex are not guaranteed to work (due to
+ /// other threads perhaps still running when the `fork` was run).
+ ///
+ /// This also means that all resources such as file descriptors and
+ /// memory-mapped regions got duplicated. It is your responsibility to make
+ /// sure that the closure does not violate library invariants by making
+ /// invalid use of these duplicates.
+ ///
+ /// When this closure is run, aspects such as the stdio file descriptors and
+ /// working directory have successfully been changed, so output to these
+ /// locations may not appear where intended.
+ #[cfg(unix)]
+ pub unsafe fn pre_exec<F>(&mut self, f: F) -> &mut Command
+ where
+ F: FnMut() -> io::Result<()> + Send + Sync + 'static,
+ {
+ self.std.pre_exec(f);
+ self
+ }
+
+ /// Executes the command as a child process, returning a handle to it.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent.
+ ///
+ /// This method will spawn the child process synchronously and return a
+ /// handle to a future-aware child process. The `Child` returned implements
+ /// `Future` itself to acquire the `ExitStatus` of the child, and otherwise
+ /// the `Child` has methods to acquire handles to the stdin, stdout, and
+ /// stderr streams.
+ ///
+ /// All I/O this child does will be associated with the current default
+ /// event loop.
+ ///
+ /// # Caveats
+ ///
+ /// Similar to the behavior to the standard library, and unlike the futures
+ /// paradigm of dropping-implies-cancellation, the spawned process will, by
+ /// default, continue to execute even after the `Child` handle has been dropped.
+ ///
+ /// The `Command::kill_on_drop` method can be used to modify this behavior
+ /// and kill the child process if the `Child` wrapper is dropped before it
+ /// has exited.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// async fn run_ls() -> std::process::ExitStatus {
+ /// Command::new("ls")
+ /// .spawn()
+ /// .expect("ls command failed to start")
+ /// .await
+ /// .expect("ls command failed to run")
+ /// }
+ /// ```
+ pub fn spawn(&mut self) -> io::Result<Child> {
+ imp::spawn_child(&mut self.std).map(|spawned_child| Child {
+ child: ChildDropGuard {
+ inner: spawned_child.child,
+ kill_on_drop: self.kill_on_drop,
+ },
+ stdin: spawned_child.stdin.map(|inner| ChildStdin { inner }),
+ stdout: spawned_child.stdout.map(|inner| ChildStdout { inner }),
+ stderr: spawned_child.stderr.map(|inner| ChildStderr { inner }),
+ })
+ }
+
+ /// Executes the command as a child process, waiting for it to finish and
+ /// collecting its exit status.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent.
+ /// If any input/output handles are set to a pipe then they will be immediately
+ /// closed after the child is spawned.
+ ///
+ /// All I/O this child does will be associated with the current default
+ /// event loop.
+ ///
+ /// If this future is dropped before the future resolves, then
+ /// the child will be killed, if it was spawned.
+ ///
+ /// # Errors
+ ///
+ /// This future will return an error if the child process cannot be spawned
+ /// or if there is an error while awaiting its status.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// async fn run_ls() -> std::process::ExitStatus {
+ /// Command::new("ls")
+ /// .status()
+ /// .await
+ /// .expect("ls command failed to run")
+ /// }
+ pub fn status(&mut self) -> impl Future<Output = io::Result<ExitStatus>> {
+ let child = self.spawn();
+
+ async {
+ let mut child = child?;
+
+ // Ensure we close any stdio handles so we can't deadlock
+ // waiting on the child which may be waiting to read/write
+ // to a pipe we're holding.
+ child.stdin.take();
+ child.stdout.take();
+ child.stderr.take();
+
+ child.await
+ }
+ }
+
+ /// Executes the command as a child process, waiting for it to finish and
+ /// collecting all of its output.
+ ///
+ /// > **Note**: this method, unlike the standard library, will
+ /// > unconditionally configure the stdout/stderr handles to be pipes, even
+ /// > if they have been previously configured. If this is not desired then
+ /// > the `spawn` method should be used in combination with the
+ /// > `wait_with_output` method on child.
+ ///
+ /// This method will return a future representing the collection of the
+ /// child process's stdout/stderr. It will resolve to
+ /// the `Output` type in the standard library, containing `stdout` and
+ /// `stderr` as `Vec<u8>` along with an `ExitStatus` representing how the
+ /// process exited.
+ ///
+ /// All I/O this child does will be associated with the current default
+ /// event loop.
+ ///
+ /// If this future is dropped before the future resolves, then
+ /// the child will be killed, if it was spawned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```no_run
+ /// use tokio::process::Command;
+ ///
+ /// async fn run_ls() {
+ /// let output: std::process::Output = Command::new("ls")
+ /// .output()
+ /// .await
+ /// .expect("ls command failed to run");
+ /// println!("stderr of ls: {:?}", output.stderr);
+ /// }
+ pub fn output(&mut self) -> impl Future<Output = io::Result<Output>> {
+ self.std.stdout(Stdio::piped());
+ self.std.stderr(Stdio::piped());
+
+ let child = self.spawn();
+
+ async { child?.wait_with_output().await }
+ }
+}
+
+impl From<StdCommand> for Command {
+ fn from(std: StdCommand) -> Command {
+ Command {
+ std,
+ kill_on_drop: false,
+ }
+ }
+}
+
+/// A drop guard which can ensure the child process is killed on drop if specified.
+#[derive(Debug)]
+struct ChildDropGuard<T: Kill> {
+ inner: T,
+ kill_on_drop: bool,
+}
+
+impl<T: Kill> Kill for ChildDropGuard<T> {
+ fn kill(&mut self) -> io::Result<()> {
+ let ret = self.inner.kill();
+
+ if ret.is_ok() {
+ self.kill_on_drop = false;
+ }
+
+ ret
+ }
+}
+
+impl<T: Kill> Drop for ChildDropGuard<T> {
+ fn drop(&mut self) {
+ if self.kill_on_drop {
+ drop(self.kill());
+ }
+ }
+}
+
+impl<T, E, F> Future for ChildDropGuard<F>
+where
+ F: Future<Output = Result<T, E>> + Kill + Unpin,
+{
+ type Output = Result<T, E>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ let ret = Pin::new(&mut self.inner).poll(cx);
+
+ if let Poll::Ready(Ok(_)) = ret {
+ // Avoid the overhead of trying to kill a reaped process
+ self.kill_on_drop = false;
+ }
+
+ ret
+ }
+}
+
+/// Representation of a child process spawned onto an event loop.
+///
+/// This type is also a future which will yield the `ExitStatus` of the
+/// underlying child process. A `Child` here also provides access to information
+/// like the OS-assigned identifier and the stdio streams.
+///
+/// # Caveats
+/// Similar to the behavior to the standard library, and unlike the futures
+/// paradigm of dropping-implies-cancellation, a spawned process will, by
+/// default, continue to execute even after the `Child` handle has been dropped.
+///
+/// The `Command::kill_on_drop` method can be used to modify this behavior
+/// and kill the child process if the `Child` wrapper is dropped before it
+/// has exited.
+#[must_use = "futures do nothing unless polled"]
+#[derive(Debug)]
+pub struct Child {
+ child: ChildDropGuard<imp::Child>,
+
+ /// The handle for writing to the child's standard input (stdin), if it has
+ /// been captured.
+ pub stdin: Option<ChildStdin>,
+
+ /// The handle for reading from the child's standard output (stdout), if it
+ /// has been captured.
+ pub stdout: Option<ChildStdout>,
+
+ /// The handle for reading from the child's standard error (stderr), if it
+ /// has been captured.
+ pub stderr: Option<ChildStderr>,
+}
+
+impl Child {
+ /// Returns the OS-assigned process identifier associated with this child.
+ pub fn id(&self) -> u32 {
+ self.child.inner.id()
+ }
+
+ /// Forces the child to exit.
+ ///
+ /// This is equivalent to sending a SIGKILL on unix platforms.
+ pub fn kill(&mut self) -> io::Result<()> {
+ self.child.kill()
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "please use `child.stdin` instead")]
+ pub fn stdin(&mut self) -> &mut Option<ChildStdin> {
+ &mut self.stdin
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "please use `child.stdout` instead")]
+ pub fn stdout(&mut self) -> &mut Option<ChildStdout> {
+ &mut self.stdout
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "please use `child.stderr` instead")]
+ pub fn stderr(&mut self) -> &mut Option<ChildStderr> {
+ &mut self.stderr
+ }
+
+ /// Returns a future that will resolve to an `Output`, containing the exit
+ /// status, stdout, and stderr of the child process.
+ ///
+ /// The returned future will simultaneously waits for the child to exit and
+ /// collect all remaining output on the stdout/stderr handles, returning an
+ /// `Output` instance.
+ ///
+ /// The stdin handle to the child process, if any, will be closed before
+ /// waiting. This helps avoid deadlock: it ensures that the child does not
+ /// block waiting for input from the parent, while the parent waits for the
+ /// child to exit.
+ ///
+ /// By default, stdin, stdout and stderr are inherited from the parent. In
+ /// order to capture the output into this `Output` it is necessary to create
+ /// new pipes between parent and child. Use `stdout(Stdio::piped())` or
+ /// `stderr(Stdio::piped())`, respectively, when creating a `Command`.
+ pub async fn wait_with_output(mut self) -> io::Result<Output> {
+ use crate::future::try_join3;
+
+ async fn read_to_end<A: AsyncRead + Unpin>(io: Option<A>) -> io::Result<Vec<u8>> {
+ let mut vec = Vec::new();
+ if let Some(mut io) = io {
+ crate::io::util::read_to_end(&mut io, &mut vec).await?;
+ }
+ Ok(vec)
+ }
+
+ drop(self.stdin.take());
+ let stdout_fut = read_to_end(self.stdout.take());
+ let stderr_fut = read_to_end(self.stderr.take());
+
+ let (status, stdout, stderr) = try_join3(self, stdout_fut, stderr_fut).await?;
+
+ Ok(Output {
+ status,
+ stdout,
+ stderr,
+ })
+ }
+}
+
+impl Future for Child {
+ type Output = io::Result<ExitStatus>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.child).poll(cx)
+ }
+}
+
+/// The standard input stream for spawned children.
+///
+/// This type implements the `AsyncWrite` trait to pass data to the stdin handle of
+/// handle of a child process asynchronously.
+#[derive(Debug)]
+pub struct ChildStdin {
+ inner: imp::ChildStdin,
+}
+
+/// The standard output stream for spawned children.
+///
+/// This type implements the `AsyncRead` trait to read data from the stdout
+/// handle of a child process asynchronously.
+#[derive(Debug)]
+pub struct ChildStdout {
+ inner: imp::ChildStdout,
+}
+
+/// The standard error stream for spawned children.
+///
+/// This type implements the `AsyncRead` trait to read data from the stderr
+/// handle of a child process asynchronously.
+#[derive(Debug)]
+pub struct ChildStderr {
+ inner: imp::ChildStderr,
+}
+
+impl AsyncWrite for ChildStdin {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_shutdown(cx)
+ }
+}
+
+impl AsyncRead for ChildStdout {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ }
+}
+
+impl AsyncRead for ChildStderr {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ }
+}
+
+#[cfg(unix)]
+mod sys {
+ use std::os::unix::io::{AsRawFd, RawFd};
+
+ use super::{ChildStderr, ChildStdin, ChildStdout};
+
+ impl AsRawFd for ChildStdin {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.get_ref().as_raw_fd()
+ }
+ }
+
+ impl AsRawFd for ChildStdout {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.get_ref().as_raw_fd()
+ }
+ }
+
+ impl AsRawFd for ChildStderr {
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.get_ref().as_raw_fd()
+ }
+ }
+}
+
+#[cfg(windows)]
+mod sys {
+ use std::os::windows::io::{AsRawHandle, RawHandle};
+
+ use super::{ChildStderr, ChildStdin, ChildStdout};
+
+ impl AsRawHandle for ChildStdin {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.get_ref().as_raw_handle()
+ }
+ }
+
+ impl AsRawHandle for ChildStdout {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.get_ref().as_raw_handle()
+ }
+ }
+
+ impl AsRawHandle for ChildStderr {
+ fn as_raw_handle(&self) -> RawHandle {
+ self.inner.get_ref().as_raw_handle()
+ }
+ }
+}
+
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::kill::Kill;
+ use super::ChildDropGuard;
+
+ use futures::future::FutureExt;
+ use std::future::Future;
+ use std::io;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ struct Mock {
+ num_kills: usize,
+ num_polls: usize,
+ poll_result: Poll<Result<(), ()>>,
+ }
+
+ impl Mock {
+ fn new() -> Self {
+ Self::with_result(Poll::Pending)
+ }
+
+ fn with_result(result: Poll<Result<(), ()>>) -> Self {
+ Self {
+ num_kills: 0,
+ num_polls: 0,
+ poll_result: result,
+ }
+ }
+ }
+
+ impl Kill for Mock {
+ fn kill(&mut self) -> io::Result<()> {
+ self.num_kills += 1;
+ Ok(())
+ }
+ }
+
+ impl Future for Mock {
+ type Output = Result<(), ()>;
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = Pin::get_mut(self);
+ inner.num_polls += 1;
+ inner.poll_result
+ }
+ }
+
+ #[test]
+ fn kills_on_drop_if_specified() {
+ let mut mock = Mock::new();
+
+ {
+ let guard = ChildDropGuard {
+ inner: &mut mock,
+ kill_on_drop: true,
+ };
+ drop(guard);
+ }
+
+ assert_eq!(1, mock.num_kills);
+ assert_eq!(0, mock.num_polls);
+ }
+
+ #[test]
+ fn no_kill_on_drop_by_default() {
+ let mut mock = Mock::new();
+
+ {
+ let guard = ChildDropGuard {
+ inner: &mut mock,
+ kill_on_drop: false,
+ };
+ drop(guard);
+ }
+
+ assert_eq!(0, mock.num_kills);
+ assert_eq!(0, mock.num_polls);
+ }
+
+ #[test]
+ fn no_kill_if_already_killed() {
+ let mut mock = Mock::new();
+
+ {
+ let mut guard = ChildDropGuard {
+ inner: &mut mock,
+ kill_on_drop: true,
+ };
+ let _ = guard.kill();
+ drop(guard);
+ }
+
+ assert_eq!(1, mock.num_kills);
+ assert_eq!(0, mock.num_polls);
+ }
+
+ #[test]
+ fn no_kill_if_reaped() {
+ let mut mock_pending = Mock::with_result(Poll::Pending);
+ let mut mock_reaped = Mock::with_result(Poll::Ready(Ok(())));
+ let mut mock_err = Mock::with_result(Poll::Ready(Err(())));
+
+ let waker = futures::task::noop_waker();
+ let mut context = Context::from_waker(&waker);
+ {
+ let mut guard = ChildDropGuard {
+ inner: &mut mock_pending,
+ kill_on_drop: true,
+ };
+ let _ = guard.poll_unpin(&mut context);
+
+ let mut guard = ChildDropGuard {
+ inner: &mut mock_reaped,
+ kill_on_drop: true,
+ };
+ let _ = guard.poll_unpin(&mut context);
+
+ let mut guard = ChildDropGuard {
+ inner: &mut mock_err,
+ kill_on_drop: true,
+ };
+ let _ = guard.poll_unpin(&mut context);
+ }
+
+ assert_eq!(1, mock_pending.num_kills);
+ assert_eq!(1, mock_pending.num_polls);
+
+ assert_eq!(0, mock_reaped.num_kills);
+ assert_eq!(1, mock_reaped.num_polls);
+
+ assert_eq!(1, mock_err.num_kills);
+ assert_eq!(1, mock_err.num_polls);
+ }
+}
diff --git a/third_party/rust/tokio/src/process/unix/mod.rs b/third_party/rust/tokio/src/process/unix/mod.rs
new file mode 100644
index 0000000000..c25d98974a
--- /dev/null
+++ b/third_party/rust/tokio/src/process/unix/mod.rs
@@ -0,0 +1,227 @@
+//! Unix handling of child processes
+//!
+//! Right now the only "fancy" thing about this is how we implement the
+//! `Future` implementation on `Child` to get the exit status. Unix offers
+//! no way to register a child with epoll, and the only real way to get a
+//! notification when a process exits is the SIGCHLD signal.
+//!
+//! Signal handling in general is *super* hairy and complicated, and it's even
+//! more complicated here with the fact that signals are coalesced, so we may
+//! not get a SIGCHLD-per-child.
+//!
+//! Our best approximation here is to check *all spawned processes* for all
+//! SIGCHLD signals received. To do that we create a `Signal`, implemented in
+//! the `tokio-net` crate, which is a stream over signals being received.
+//!
+//! Later when we poll the process's exit status we simply check to see if a
+//! SIGCHLD has happened since we last checked, and while that returns "yes" we
+//! keep trying.
+//!
+//! Note that this means that this isn't really scalable, but then again
+//! processes in general aren't scalable (e.g. millions) so it shouldn't be that
+//! bad in theory...
+
+mod orphan;
+use orphan::{OrphanQueue, OrphanQueueImpl, Wait};
+
+mod reap;
+use reap::Reaper;
+
+use crate::io::PollEvented;
+use crate::process::kill::Kill;
+use crate::process::SpawnedChild;
+use crate::signal::unix::{signal, Signal, SignalKind};
+
+use mio::event::Evented;
+use mio::unix::{EventedFd, UnixReady};
+use mio::{Poll as MioPoll, PollOpt, Ready, Token};
+use std::fmt;
+use std::future::Future;
+use std::io;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::pin::Pin;
+use std::process::ExitStatus;
+use std::task::Context;
+use std::task::Poll;
+
+impl Wait for std::process::Child {
+ fn id(&self) -> u32 {
+ self.id()
+ }
+
+ fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ self.try_wait()
+ }
+}
+
+impl Kill for std::process::Child {
+ fn kill(&mut self) -> io::Result<()> {
+ self.kill()
+ }
+}
+
+lazy_static::lazy_static! {
+ static ref ORPHAN_QUEUE: OrphanQueueImpl<std::process::Child> = OrphanQueueImpl::new();
+}
+
+struct GlobalOrphanQueue;
+
+impl fmt::Debug for GlobalOrphanQueue {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ORPHAN_QUEUE.fmt(fmt)
+ }
+}
+
+impl OrphanQueue<std::process::Child> for GlobalOrphanQueue {
+ fn push_orphan(&self, orphan: std::process::Child) {
+ ORPHAN_QUEUE.push_orphan(orphan)
+ }
+
+ fn reap_orphans(&self) {
+ ORPHAN_QUEUE.reap_orphans()
+ }
+}
+
+#[must_use = "futures do nothing unless polled"]
+pub(crate) struct Child {
+ inner: Reaper<std::process::Child, GlobalOrphanQueue, Signal>,
+}
+
+impl fmt::Debug for Child {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Child")
+ .field("pid", &self.inner.id())
+ .finish()
+ }
+}
+
+pub(crate) fn spawn_child(cmd: &mut std::process::Command) -> io::Result<SpawnedChild> {
+ let mut child = cmd.spawn()?;
+ let stdin = stdio(child.stdin.take())?;
+ let stdout = stdio(child.stdout.take())?;
+ let stderr = stdio(child.stderr.take())?;
+
+ let signal = signal(SignalKind::child())?;
+
+ Ok(SpawnedChild {
+ child: Child {
+ inner: Reaper::new(child, GlobalOrphanQueue, signal),
+ },
+ stdin,
+ stdout,
+ stderr,
+ })
+}
+
+impl Child {
+ pub(crate) fn id(&self) -> u32 {
+ self.inner.id()
+ }
+}
+
+impl Kill for Child {
+ fn kill(&mut self) -> io::Result<()> {
+ self.inner.kill()
+ }
+}
+
+impl Future for Child {
+ type Output = io::Result<ExitStatus>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.inner).poll(cx)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct Fd<T> {
+ inner: T,
+}
+
+impl<T> io::Read for Fd<T>
+where
+ T: io::Read,
+{
+ fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
+ self.inner.read(bytes)
+ }
+}
+
+impl<T> io::Write for Fd<T>
+where
+ T: io::Write,
+{
+ fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
+ self.inner.write(bytes)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+}
+
+impl<T> AsRawFd for Fd<T>
+where
+ T: AsRawFd,
+{
+ fn as_raw_fd(&self) -> RawFd {
+ self.inner.as_raw_fd()
+ }
+}
+
+impl<T> Evented for Fd<T>
+where
+ T: AsRawFd,
+{
+ fn register(
+ &self,
+ poll: &MioPoll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).register(poll, token, interest | UnixReady::hup(), opts)
+ }
+
+ fn reregister(
+ &self,
+ poll: &MioPoll,
+ token: Token,
+ interest: Ready,
+ opts: PollOpt,
+ ) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).reregister(poll, token, interest | UnixReady::hup(), opts)
+ }
+
+ fn deregister(&self, poll: &MioPoll) -> io::Result<()> {
+ EventedFd(&self.as_raw_fd()).deregister(poll)
+ }
+}
+
+pub(crate) type ChildStdin = PollEvented<Fd<std::process::ChildStdin>>;
+pub(crate) type ChildStdout = PollEvented<Fd<std::process::ChildStdout>>;
+pub(crate) type ChildStderr = PollEvented<Fd<std::process::ChildStderr>>;
+
+fn stdio<T>(option: Option<T>) -> io::Result<Option<PollEvented<Fd<T>>>>
+where
+ T: AsRawFd,
+{
+ let io = match option {
+ Some(io) => io,
+ None => return Ok(None),
+ };
+
+ // Set the fd to nonblocking before we pass it to the event loop
+ unsafe {
+ let fd = io.as_raw_fd();
+ let r = libc::fcntl(fd, libc::F_GETFL);
+ if r == -1 {
+ return Err(io::Error::last_os_error());
+ }
+ let r = libc::fcntl(fd, libc::F_SETFL, r | libc::O_NONBLOCK);
+ if r == -1 {
+ return Err(io::Error::last_os_error());
+ }
+ }
+ Ok(Some(PollEvented::new(Fd { inner: io })?))
+}
diff --git a/third_party/rust/tokio/src/process/unix/orphan.rs b/third_party/rust/tokio/src/process/unix/orphan.rs
new file mode 100644
index 0000000000..6c449a9093
--- /dev/null
+++ b/third_party/rust/tokio/src/process/unix/orphan.rs
@@ -0,0 +1,191 @@
+use std::io;
+use std::process::ExitStatus;
+use std::sync::Mutex;
+
+/// An interface for waiting on a process to exit.
+pub(crate) trait Wait {
+ /// Get the identifier for this process or diagnostics.
+ fn id(&self) -> u32;
+ /// Try waiting for a process to exit in a non-blocking manner.
+ fn try_wait(&mut self) -> io::Result<Option<ExitStatus>>;
+}
+
+impl<T: Wait> Wait for &mut T {
+ fn id(&self) -> u32 {
+ (**self).id()
+ }
+
+ fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ (**self).try_wait()
+ }
+}
+
+/// An interface for queueing up an orphaned process so that it can be reaped.
+pub(crate) trait OrphanQueue<T> {
+ /// Adds an orphan to the queue.
+ fn push_orphan(&self, orphan: T);
+ /// Attempts to reap every process in the queue, ignoring any errors and
+ /// enqueueing any orphans which have not yet exited.
+ fn reap_orphans(&self);
+}
+
+impl<T, O: OrphanQueue<T>> OrphanQueue<T> for &O {
+ fn push_orphan(&self, orphan: T) {
+ (**self).push_orphan(orphan);
+ }
+
+ fn reap_orphans(&self) {
+ (**self).reap_orphans()
+ }
+}
+
+/// An implementation of `OrphanQueue`.
+#[derive(Debug)]
+pub(crate) struct OrphanQueueImpl<T> {
+ queue: Mutex<Vec<T>>,
+}
+
+impl<T> OrphanQueueImpl<T> {
+ pub(crate) fn new() -> Self {
+ Self {
+ queue: Mutex::new(Vec::new()),
+ }
+ }
+
+ #[cfg(test)]
+ fn len(&self) -> usize {
+ self.queue.lock().unwrap().len()
+ }
+}
+
+impl<T: Wait> OrphanQueue<T> for OrphanQueueImpl<T> {
+ fn push_orphan(&self, orphan: T) {
+ self.queue.lock().unwrap().push(orphan)
+ }
+
+ fn reap_orphans(&self) {
+ let mut queue = self.queue.lock().unwrap();
+ let queue = &mut *queue;
+
+ let mut i = 0;
+ while i < queue.len() {
+ match queue[i].try_wait() {
+ Ok(Some(_)) => {}
+ Err(_) => {
+ // TODO: bubble up error some how. Is this an internal bug?
+ // Shoudl we panic? Is it OK for this to be silently
+ // dropped?
+ }
+ // Still not done yet
+ Ok(None) => {
+ i += 1;
+ continue;
+ }
+ }
+
+ queue.remove(i);
+ }
+ }
+}
+
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::Wait;
+ use super::{OrphanQueue, OrphanQueueImpl};
+ use std::cell::Cell;
+ use std::io;
+ use std::os::unix::process::ExitStatusExt;
+ use std::process::ExitStatus;
+ use std::rc::Rc;
+
+ struct MockWait {
+ total_waits: Rc<Cell<usize>>,
+ num_wait_until_status: usize,
+ return_err: bool,
+ }
+
+ impl MockWait {
+ fn new(num_wait_until_status: usize) -> Self {
+ Self {
+ total_waits: Rc::new(Cell::new(0)),
+ num_wait_until_status,
+ return_err: false,
+ }
+ }
+
+ fn with_err() -> Self {
+ Self {
+ total_waits: Rc::new(Cell::new(0)),
+ num_wait_until_status: 0,
+ return_err: true,
+ }
+ }
+ }
+
+ impl Wait for MockWait {
+ fn id(&self) -> u32 {
+ 42
+ }
+
+ fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ let waits = self.total_waits.get();
+
+ let ret = if self.num_wait_until_status == waits {
+ if self.return_err {
+ Ok(Some(ExitStatus::from_raw(0)))
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, "mock err"))
+ }
+ } else {
+ Ok(None)
+ };
+
+ self.total_waits.set(waits + 1);
+ ret
+ }
+ }
+
+ #[test]
+ fn drain_attempts_a_single_reap_of_all_queued_orphans() {
+ let first_orphan = MockWait::new(0);
+ let second_orphan = MockWait::new(1);
+ let third_orphan = MockWait::new(2);
+ let fourth_orphan = MockWait::with_err();
+
+ let first_waits = first_orphan.total_waits.clone();
+ let second_waits = second_orphan.total_waits.clone();
+ let third_waits = third_orphan.total_waits.clone();
+ let fourth_waits = fourth_orphan.total_waits.clone();
+
+ let orphanage = OrphanQueueImpl::new();
+ orphanage.push_orphan(first_orphan);
+ orphanage.push_orphan(third_orphan);
+ orphanage.push_orphan(second_orphan);
+ orphanage.push_orphan(fourth_orphan);
+
+ assert_eq!(orphanage.len(), 4);
+
+ orphanage.reap_orphans();
+ assert_eq!(orphanage.len(), 2);
+ assert_eq!(first_waits.get(), 1);
+ assert_eq!(second_waits.get(), 1);
+ assert_eq!(third_waits.get(), 1);
+ assert_eq!(fourth_waits.get(), 1);
+
+ orphanage.reap_orphans();
+ assert_eq!(orphanage.len(), 1);
+ assert_eq!(first_waits.get(), 1);
+ assert_eq!(second_waits.get(), 2);
+ assert_eq!(third_waits.get(), 2);
+ assert_eq!(fourth_waits.get(), 1);
+
+ orphanage.reap_orphans();
+ assert_eq!(orphanage.len(), 0);
+ assert_eq!(first_waits.get(), 1);
+ assert_eq!(second_waits.get(), 2);
+ assert_eq!(third_waits.get(), 3);
+ assert_eq!(fourth_waits.get(), 1);
+
+ orphanage.reap_orphans(); // Safe to reap when empty
+ }
+}
diff --git a/third_party/rust/tokio/src/process/unix/reap.rs b/third_party/rust/tokio/src/process/unix/reap.rs
new file mode 100644
index 0000000000..8963805afe
--- /dev/null
+++ b/third_party/rust/tokio/src/process/unix/reap.rs
@@ -0,0 +1,342 @@
+use crate::process::imp::orphan::{OrphanQueue, Wait};
+use crate::process::kill::Kill;
+use crate::signal::unix::Signal;
+
+use std::future::Future;
+use std::io;
+use std::ops::Deref;
+use std::pin::Pin;
+use std::process::ExitStatus;
+use std::task::Context;
+use std::task::Poll;
+
+/// Orchestrates between registering interest for receiving signals when a
+/// child process has exited, and attempting to poll for process completion.
+#[derive(Debug)]
+pub(crate) struct Reaper<W, Q, S>
+where
+ W: Wait + Unpin,
+ Q: OrphanQueue<W>,
+{
+ inner: Option<W>,
+ orphan_queue: Q,
+ signal: S,
+}
+
+// Work around removal of `futures_core` dependency
+pub(crate) trait Stream: Unpin {
+ fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>>;
+}
+
+impl Stream for Signal {
+ fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ Signal::poll_recv(self, cx)
+ }
+}
+
+impl<W, Q, S> Deref for Reaper<W, Q, S>
+where
+ W: Wait + Unpin,
+ Q: OrphanQueue<W>,
+{
+ type Target = W;
+
+ fn deref(&self) -> &Self::Target {
+ self.inner()
+ }
+}
+
+impl<W, Q, S> Reaper<W, Q, S>
+where
+ W: Wait + Unpin,
+ Q: OrphanQueue<W>,
+{
+ pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self {
+ Self {
+ inner: Some(inner),
+ orphan_queue,
+ signal,
+ }
+ }
+
+ fn inner(&self) -> &W {
+ self.inner.as_ref().expect("inner has gone away")
+ }
+
+ fn inner_mut(&mut self) -> &mut W {
+ self.inner.as_mut().expect("inner has gone away")
+ }
+}
+
+impl<W, Q, S> Future for Reaper<W, Q, S>
+where
+ W: Wait + Unpin,
+ Q: OrphanQueue<W> + Unpin,
+ S: Stream,
+{
+ type Output = io::Result<ExitStatus>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ loop {
+ // If the child hasn't exited yet, then it's our responsibility to
+ // ensure the current task gets notified when it might be able to
+ // make progress.
+ //
+ // As described in `spawn` above, we just indicate that we can
+ // next make progress once a SIGCHLD is received.
+ //
+ // However, we will register for a notification on the next signal
+ // BEFORE we poll the child. Otherwise it is possible that the child
+ // can exit and the signal can arrive after we last polled the child,
+ // but before we've registered for a notification on the next signal
+ // (this can cause a deadlock if there are no more spawned children
+ // which can generate a different signal for us). A side effect of
+ // pre-registering for signal notifications is that when the child
+ // exits, we will have already registered for an additional
+ // notification we don't need to consume. If another signal arrives,
+ // this future's task will be notified/woken up again. Since the
+ // futures model allows for spurious wake ups this extra wakeup
+ // should not cause significant issues with parent futures.
+ let registered_interest = self.signal.poll_recv(cx).is_pending();
+
+ self.orphan_queue.reap_orphans();
+ if let Some(status) = self.inner_mut().try_wait()? {
+ return Poll::Ready(Ok(status));
+ }
+
+ // If our attempt to poll for the next signal was not ready, then
+ // we've arranged for our task to get notified and we can bail out.
+ if registered_interest {
+ return Poll::Pending;
+ } else {
+ // Otherwise, if the signal stream delivered a signal to us, we
+ // won't get notified at the next signal, so we'll loop and try
+ // again.
+ continue;
+ }
+ }
+ }
+}
+
+impl<W, Q, S> Kill for Reaper<W, Q, S>
+where
+ W: Kill + Wait + Unpin,
+ Q: OrphanQueue<W>,
+{
+ fn kill(&mut self) -> io::Result<()> {
+ self.inner_mut().kill()
+ }
+}
+
+impl<W, Q, S> Drop for Reaper<W, Q, S>
+where
+ W: Wait + Unpin,
+ Q: OrphanQueue<W>,
+{
+ fn drop(&mut self) {
+ if let Ok(Some(_)) = self.inner_mut().try_wait() {
+ return;
+ }
+
+ let orphan = self.inner.take().unwrap();
+ self.orphan_queue.push_orphan(orphan);
+ }
+}
+
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::*;
+
+ use futures::future::FutureExt;
+ use std::cell::{Cell, RefCell};
+ use std::os::unix::process::ExitStatusExt;
+ use std::process::ExitStatus;
+ use std::task::Context;
+ use std::task::Poll;
+
+ #[derive(Debug)]
+ struct MockWait {
+ total_kills: usize,
+ total_waits: usize,
+ num_wait_until_status: usize,
+ status: ExitStatus,
+ }
+
+ impl MockWait {
+ fn new(status: ExitStatus, num_wait_until_status: usize) -> Self {
+ Self {
+ total_kills: 0,
+ total_waits: 0,
+ num_wait_until_status,
+ status,
+ }
+ }
+ }
+
+ impl Wait for MockWait {
+ fn id(&self) -> u32 {
+ 0
+ }
+
+ fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
+ let ret = if self.num_wait_until_status == self.total_waits {
+ Some(self.status)
+ } else {
+ None
+ };
+
+ self.total_waits += 1;
+ Ok(ret)
+ }
+ }
+
+ impl Kill for MockWait {
+ fn kill(&mut self) -> io::Result<()> {
+ self.total_kills += 1;
+ Ok(())
+ }
+ }
+
+ struct MockStream {
+ total_polls: usize,
+ values: Vec<Option<()>>,
+ }
+
+ impl MockStream {
+ fn new(values: Vec<Option<()>>) -> Self {
+ Self {
+ total_polls: 0,
+ values,
+ }
+ }
+ }
+
+ impl Stream for MockStream {
+ fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll<Option<()>> {
+ self.total_polls += 1;
+ match self.values.remove(0) {
+ Some(()) => Poll::Ready(Some(())),
+ None => Poll::Pending,
+ }
+ }
+ }
+
+ struct MockQueue<W> {
+ all_enqueued: RefCell<Vec<W>>,
+ total_reaps: Cell<usize>,
+ }
+
+ impl<W> MockQueue<W> {
+ fn new() -> Self {
+ Self {
+ all_enqueued: RefCell::new(Vec::new()),
+ total_reaps: Cell::new(0),
+ }
+ }
+ }
+
+ impl<W: Wait> OrphanQueue<W> for MockQueue<W> {
+ fn push_orphan(&self, orphan: W) {
+ self.all_enqueued.borrow_mut().push(orphan);
+ }
+
+ fn reap_orphans(&self) {
+ self.total_reaps.set(self.total_reaps.get() + 1);
+ }
+ }
+
+ #[test]
+ fn reaper() {
+ let exit = ExitStatus::from_raw(0);
+ let mock = MockWait::new(exit, 3);
+ let mut grim = Reaper::new(
+ mock,
+ MockQueue::new(),
+ MockStream::new(vec![None, Some(()), None, None, None]),
+ );
+
+ let waker = futures::task::noop_waker();
+ let mut context = Context::from_waker(&waker);
+
+ // Not yet exited, interest registered
+ assert!(grim.poll_unpin(&mut context).is_pending());
+ assert_eq!(1, grim.signal.total_polls);
+ assert_eq!(1, grim.total_waits);
+ assert_eq!(1, grim.orphan_queue.total_reaps.get());
+ assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
+
+ // Not yet exited, couldn't register interest the first time
+ // but managed to register interest the second time around
+ assert!(grim.poll_unpin(&mut context).is_pending());
+ assert_eq!(3, grim.signal.total_polls);
+ assert_eq!(3, grim.total_waits);
+ assert_eq!(3, grim.orphan_queue.total_reaps.get());
+ assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
+
+ // Exited
+ if let Poll::Ready(r) = grim.poll_unpin(&mut context) {
+ assert!(r.is_ok());
+ let exit_code = r.unwrap();
+ assert_eq!(exit_code, exit);
+ } else {
+ unreachable!();
+ }
+ assert_eq!(4, grim.signal.total_polls);
+ assert_eq!(4, grim.total_waits);
+ assert_eq!(4, grim.orphan_queue.total_reaps.get());
+ assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
+ }
+
+ #[test]
+ fn kill() {
+ let exit = ExitStatus::from_raw(0);
+ let mut grim = Reaper::new(
+ MockWait::new(exit, 0),
+ MockQueue::new(),
+ MockStream::new(vec![None]),
+ );
+
+ grim.kill().unwrap();
+ assert_eq!(1, grim.total_kills);
+ assert_eq!(0, grim.orphan_queue.total_reaps.get());
+ assert!(grim.orphan_queue.all_enqueued.borrow().is_empty());
+ }
+
+ #[test]
+ fn drop_reaps_if_possible() {
+ let exit = ExitStatus::from_raw(0);
+ let mut mock = MockWait::new(exit, 0);
+
+ {
+ let queue = MockQueue::new();
+
+ let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![]));
+
+ drop(grim);
+
+ assert_eq!(0, queue.total_reaps.get());
+ assert!(queue.all_enqueued.borrow().is_empty());
+ }
+
+ assert_eq!(1, mock.total_waits);
+ assert_eq!(0, mock.total_kills);
+ }
+
+ #[test]
+ fn drop_enqueues_orphan_if_wait_fails() {
+ let exit = ExitStatus::from_raw(0);
+ let mut mock = MockWait::new(exit, 2);
+
+ {
+ let queue = MockQueue::<&mut MockWait>::new();
+ let grim = Reaper::new(&mut mock, &queue, MockStream::new(vec![]));
+ drop(grim);
+
+ assert_eq!(0, queue.total_reaps.get());
+ assert_eq!(1, queue.all_enqueued.borrow().len());
+ }
+
+ assert_eq!(1, mock.total_waits);
+ assert_eq!(0, mock.total_kills);
+ }
+}
diff --git a/third_party/rust/tokio/src/process/windows.rs b/third_party/rust/tokio/src/process/windows.rs
new file mode 100644
index 0000000000..cbe2fa7596
--- /dev/null
+++ b/third_party/rust/tokio/src/process/windows.rs
@@ -0,0 +1,191 @@
+//! Windows asynchronous process handling.
+//!
+//! Like with Unix we don't actually have a way of registering a process with an
+//! IOCP object. As a result we similarly need another mechanism for getting a
+//! signal when a process has exited. For now this is implemented with the
+//! `RegisterWaitForSingleObject` function in the kernel32.dll.
+//!
+//! This strategy is the same that libuv takes and essentially just queues up a
+//! wait for the process in a kernel32-specific thread pool. Once the object is
+//! notified (e.g. the process exits) then we have a callback that basically
+//! just completes a `Oneshot`.
+//!
+//! The `poll_exit` implementation will attempt to wait for the process in a
+//! nonblocking fashion, but failing that it'll fire off a
+//! `RegisterWaitForSingleObject` and then wait on the other end of the oneshot
+//! from then on out.
+
+use crate::io::PollEvented;
+use crate::process::kill::Kill;
+use crate::process::SpawnedChild;
+use crate::sync::oneshot;
+
+use mio_named_pipes::NamedPipe;
+use std::fmt;
+use std::future::Future;
+use std::io;
+use std::os::windows::prelude::*;
+use std::os::windows::process::ExitStatusExt;
+use std::pin::Pin;
+use std::process::{Child as StdChild, Command as StdCommand, ExitStatus};
+use std::ptr;
+use std::task::Context;
+use std::task::Poll;
+use winapi::shared::minwindef::FALSE;
+use winapi::shared::winerror::WAIT_TIMEOUT;
+use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+use winapi::um::processthreadsapi::GetExitCodeProcess;
+use winapi::um::synchapi::WaitForSingleObject;
+use winapi::um::threadpoollegacyapiset::UnregisterWaitEx;
+use winapi::um::winbase::{RegisterWaitForSingleObject, INFINITE, WAIT_OBJECT_0};
+use winapi::um::winnt::{BOOLEAN, HANDLE, PVOID, WT_EXECUTEINWAITTHREAD, WT_EXECUTEONLYONCE};
+
+#[must_use = "futures do nothing unless polled"]
+pub(crate) struct Child {
+ child: StdChild,
+ waiting: Option<Waiting>,
+}
+
+impl fmt::Debug for Child {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Child")
+ .field("pid", &self.id())
+ .field("child", &self.child)
+ .field("waiting", &"..")
+ .finish()
+ }
+}
+
+struct Waiting {
+ rx: oneshot::Receiver<()>,
+ wait_object: HANDLE,
+ tx: *mut Option<oneshot::Sender<()>>,
+}
+
+unsafe impl Sync for Waiting {}
+unsafe impl Send for Waiting {}
+
+pub(crate) fn spawn_child(cmd: &mut StdCommand) -> io::Result<SpawnedChild> {
+ let mut child = cmd.spawn()?;
+ let stdin = stdio(child.stdin.take());
+ let stdout = stdio(child.stdout.take());
+ let stderr = stdio(child.stderr.take());
+
+ Ok(SpawnedChild {
+ child: Child {
+ child,
+ waiting: None,
+ },
+ stdin,
+ stdout,
+ stderr,
+ })
+}
+
+impl Child {
+ pub(crate) fn id(&self) -> u32 {
+ self.child.id()
+ }
+}
+
+impl Kill for Child {
+ fn kill(&mut self) -> io::Result<()> {
+ self.child.kill()
+ }
+}
+
+impl Future for Child {
+ type Output = io::Result<ExitStatus>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = Pin::get_mut(self);
+ loop {
+ if let Some(ref mut w) = inner.waiting {
+ match Pin::new(&mut w.rx).poll(cx) {
+ Poll::Ready(Ok(())) => {}
+ Poll::Ready(Err(_)) => panic!("should not be canceled"),
+ Poll::Pending => return Poll::Pending,
+ }
+ let status = try_wait(&inner.child)?.expect("not ready yet");
+ return Poll::Ready(Ok(status));
+ }
+
+ if let Some(e) = try_wait(&inner.child)? {
+ return Poll::Ready(Ok(e));
+ }
+ let (tx, rx) = oneshot::channel();
+ let ptr = Box::into_raw(Box::new(Some(tx)));
+ let mut wait_object = ptr::null_mut();
+ let rc = unsafe {
+ RegisterWaitForSingleObject(
+ &mut wait_object,
+ inner.child.as_raw_handle(),
+ Some(callback),
+ ptr as *mut _,
+ INFINITE,
+ WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE,
+ )
+ };
+ if rc == 0 {
+ let err = io::Error::last_os_error();
+ drop(unsafe { Box::from_raw(ptr) });
+ return Poll::Ready(Err(err));
+ }
+ inner.waiting = Some(Waiting {
+ rx,
+ wait_object,
+ tx: ptr,
+ });
+ }
+ }
+}
+
+impl Drop for Waiting {
+ fn drop(&mut self) {
+ unsafe {
+ let rc = UnregisterWaitEx(self.wait_object, INVALID_HANDLE_VALUE);
+ if rc == 0 {
+ panic!("failed to unregister: {}", io::Error::last_os_error());
+ }
+ drop(Box::from_raw(self.tx));
+ }
+ }
+}
+
+unsafe extern "system" fn callback(ptr: PVOID, _timer_fired: BOOLEAN) {
+ let complete = &mut *(ptr as *mut Option<oneshot::Sender<()>>);
+ let _ = complete.take().unwrap().send(());
+}
+
+pub(crate) fn try_wait(child: &StdChild) -> io::Result<Option<ExitStatus>> {
+ unsafe {
+ match WaitForSingleObject(child.as_raw_handle(), 0) {
+ WAIT_OBJECT_0 => {}
+ WAIT_TIMEOUT => return Ok(None),
+ _ => return Err(io::Error::last_os_error()),
+ }
+ let mut status = 0;
+ let rc = GetExitCodeProcess(child.as_raw_handle(), &mut status);
+ if rc == FALSE {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(Some(ExitStatus::from_raw(status)))
+ }
+ }
+}
+
+pub(crate) type ChildStdin = PollEvented<NamedPipe>;
+pub(crate) type ChildStdout = PollEvented<NamedPipe>;
+pub(crate) type ChildStderr = PollEvented<NamedPipe>;
+
+fn stdio<T>(option: Option<T>) -> Option<PollEvented<NamedPipe>>
+where
+ T: IntoRawHandle,
+{
+ let io = match option {
+ Some(io) => io,
+ None => return None,
+ };
+ let pipe = unsafe { NamedPipe::from_raw_handle(io.into_raw_handle()) };
+ PollEvented::new(pipe).ok()
+}
diff --git a/third_party/rust/tokio/src/runtime/basic_scheduler.rs b/third_party/rust/tokio/src/runtime/basic_scheduler.rs
new file mode 100644
index 0000000000..301554280f
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/basic_scheduler.rs
@@ -0,0 +1,326 @@
+use crate::park::{Park, Unpark};
+use crate::runtime;
+use crate::runtime::task::{self, JoinHandle, Schedule, Task};
+use crate::util::linked_list::LinkedList;
+use crate::util::{waker_ref, Wake};
+
+use std::cell::RefCell;
+use std::collections::VecDeque;
+use std::fmt;
+use std::future::Future;
+use std::sync::{Arc, Mutex};
+use std::task::Poll::Ready;
+use std::time::Duration;
+
+/// Executes tasks on the current thread
+pub(crate) struct BasicScheduler<P>
+where
+ P: Park,
+{
+ /// Scheduler run queue
+ ///
+ /// When the scheduler is executed, the queue is removed from `self` and
+ /// moved into `Context`.
+ ///
+ /// This indirection is to allow `BasicScheduler` to be `Send`.
+ tasks: Option<Tasks>,
+
+ /// Sendable task spawner
+ spawner: Spawner,
+
+ /// Current tick
+ tick: u8,
+
+ /// Thread park handle
+ park: P,
+}
+
+#[derive(Clone)]
+pub(crate) struct Spawner {
+ shared: Arc<Shared>,
+}
+
+struct Tasks {
+ /// Collection of all active tasks spawned onto this executor.
+ owned: LinkedList<Task<Arc<Shared>>>,
+
+ /// Local run queue.
+ ///
+ /// Tasks notified from the current thread are pushed into this queue.
+ queue: VecDeque<task::Notified<Arc<Shared>>>,
+}
+
+/// Scheduler state shared between threads.
+struct Shared {
+ /// Remote run queue
+ queue: Mutex<VecDeque<task::Notified<Arc<Shared>>>>,
+
+ /// Unpark the blocked thread
+ unpark: Box<dyn Unpark>,
+}
+
+/// Thread-local context
+struct Context {
+ /// Shared scheduler state
+ shared: Arc<Shared>,
+
+ /// Local queue
+ tasks: RefCell<Tasks>,
+}
+
+/// Initial queue capacity
+const INITIAL_CAPACITY: usize = 64;
+
+/// Max number of tasks to poll per tick.
+const MAX_TASKS_PER_TICK: usize = 61;
+
+/// How often ot check the remote queue first
+const REMOTE_FIRST_INTERVAL: u8 = 31;
+
+// Tracks the current BasicScheduler
+scoped_thread_local!(static CURRENT: Context);
+
+impl<P> BasicScheduler<P>
+where
+ P: Park,
+{
+ pub(crate) fn new(park: P) -> BasicScheduler<P> {
+ let unpark = Box::new(park.unpark());
+
+ BasicScheduler {
+ tasks: Some(Tasks {
+ owned: LinkedList::new(),
+ queue: VecDeque::with_capacity(INITIAL_CAPACITY),
+ }),
+ spawner: Spawner {
+ shared: Arc::new(Shared {
+ queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)),
+ unpark: unpark as Box<dyn Unpark>,
+ }),
+ },
+ tick: 0,
+ park,
+ }
+ }
+
+ pub(crate) fn spawner(&self) -> &Spawner {
+ &self.spawner
+ }
+
+ /// Spawns a future onto the thread pool
+ pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ self.spawner.spawn(future)
+ }
+
+ pub(crate) fn block_on<F>(&mut self, future: F) -> F::Output
+ where
+ F: Future,
+ {
+ enter(self, |scheduler, context| {
+ let _enter = runtime::enter();
+ let waker = waker_ref(&scheduler.spawner.shared);
+ let mut cx = std::task::Context::from_waker(&waker);
+
+ pin!(future);
+
+ 'outer: loop {
+ if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) {
+ return v;
+ }
+
+ for _ in 0..MAX_TASKS_PER_TICK {
+ // Get and increment the current tick
+ let tick = scheduler.tick;
+ scheduler.tick = scheduler.tick.wrapping_add(1);
+
+ let next = if tick % REMOTE_FIRST_INTERVAL == 0 {
+ scheduler
+ .spawner
+ .pop()
+ .or_else(|| context.tasks.borrow_mut().queue.pop_front())
+ } else {
+ context
+ .tasks
+ .borrow_mut()
+ .queue
+ .pop_front()
+ .or_else(|| scheduler.spawner.pop())
+ };
+
+ match next {
+ Some(task) => crate::coop::budget(|| task.run()),
+ None => {
+ // Park until the thread is signaled
+ scheduler.park.park().ok().expect("failed to park");
+
+ // Try polling the `block_on` future next
+ continue 'outer;
+ }
+ }
+ }
+
+ // Yield to the park, this drives the timer and pulls any pending
+ // I/O events.
+ scheduler
+ .park
+ .park_timeout(Duration::from_millis(0))
+ .ok()
+ .expect("failed to park");
+ }
+ })
+ }
+}
+
+/// Enter the scheduler context. This sets the queue and other necessary
+/// scheduler state in the thread-local
+fn enter<F, R, P>(scheduler: &mut BasicScheduler<P>, f: F) -> R
+where
+ F: FnOnce(&mut BasicScheduler<P>, &Context) -> R,
+ P: Park,
+{
+ // Ensures the run queue is placed back in the `BasicScheduler` instance
+ // once `block_on` returns.`
+ struct Guard<'a, P: Park> {
+ context: Option<Context>,
+ scheduler: &'a mut BasicScheduler<P>,
+ }
+
+ impl<P: Park> Drop for Guard<'_, P> {
+ fn drop(&mut self) {
+ let Context { tasks, .. } = self.context.take().expect("context missing");
+ self.scheduler.tasks = Some(tasks.into_inner());
+ }
+ }
+
+ // Remove `tasks` from `self` and place it in a `Context`.
+ let tasks = scheduler.tasks.take().expect("invalid state");
+
+ let guard = Guard {
+ context: Some(Context {
+ shared: scheduler.spawner.shared.clone(),
+ tasks: RefCell::new(tasks),
+ }),
+ scheduler,
+ };
+
+ let context = guard.context.as_ref().unwrap();
+ let scheduler = &mut *guard.scheduler;
+
+ CURRENT.set(context, || f(scheduler, context))
+}
+
+impl<P> Drop for BasicScheduler<P>
+where
+ P: Park,
+{
+ fn drop(&mut self) {
+ enter(self, |scheduler, context| {
+ // Loop required here to ensure borrow is dropped between iterations
+ #[allow(clippy::while_let_loop)]
+ loop {
+ let task = match context.tasks.borrow_mut().owned.pop_back() {
+ Some(task) => task,
+ None => break,
+ };
+
+ task.shutdown();
+ }
+
+ // Drain local queue
+ for task in context.tasks.borrow_mut().queue.drain(..) {
+ task.shutdown();
+ }
+
+ // Drain remote queue
+ for task in scheduler.spawner.shared.queue.lock().unwrap().drain(..) {
+ task.shutdown();
+ }
+
+ assert!(context.tasks.borrow().owned.is_empty());
+ });
+ }
+}
+
+impl<P: Park> fmt::Debug for BasicScheduler<P> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("BasicScheduler").finish()
+ }
+}
+
+// ===== impl Spawner =====
+
+impl Spawner {
+ /// Spawns a future onto the thread pool
+ pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ let (task, handle) = task::joinable(future);
+ self.shared.schedule(task);
+ handle
+ }
+
+ fn pop(&self) -> Option<task::Notified<Arc<Shared>>> {
+ self.shared.queue.lock().unwrap().pop_front()
+ }
+}
+
+impl fmt::Debug for Spawner {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Spawner").finish()
+ }
+}
+
+// ===== impl Shared =====
+
+impl Schedule for Arc<Shared> {
+ fn bind(task: Task<Self>) -> Arc<Shared> {
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+ cx.tasks.borrow_mut().owned.push_front(task);
+ cx.shared.clone()
+ })
+ }
+
+ fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
+ use std::ptr::NonNull;
+
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+
+ // safety: the task is inserted in the list in `bind`.
+ unsafe {
+ let ptr = NonNull::from(task.header());
+ cx.tasks.borrow_mut().owned.remove(ptr)
+ }
+ })
+ }
+
+ fn schedule(&self, task: task::Notified<Self>) {
+ CURRENT.with(|maybe_cx| match maybe_cx {
+ Some(cx) if Arc::ptr_eq(self, &cx.shared) => {
+ cx.tasks.borrow_mut().queue.push_back(task);
+ }
+ _ => {
+ self.queue.lock().unwrap().push_back(task);
+ self.unpark.unpark();
+ }
+ });
+ }
+}
+
+impl Wake for Shared {
+ fn wake(self: Arc<Self>) {
+ Wake::wake_by_ref(&self)
+ }
+
+ /// Wake by reference
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.unpark.unpark();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/blocking/mod.rs b/third_party/rust/tokio/src/runtime/blocking/mod.rs
new file mode 100644
index 0000000000..5c808335cc
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/blocking/mod.rs
@@ -0,0 +1,43 @@
+//! Abstracts out the APIs necessary to `Runtime` for integrating the blocking
+//! pool. When the `blocking` feature flag is **not** enabled, these APIs are
+//! shells. This isolates the complexity of dealing with conditional
+//! compilation.
+
+cfg_blocking_impl! {
+ mod pool;
+ pub(crate) use pool::{spawn_blocking, try_spawn_blocking, BlockingPool, Spawner};
+
+ mod schedule;
+ mod shutdown;
+ mod task;
+
+ use crate::runtime::Builder;
+
+ pub(crate) fn create_blocking_pool(builder: &Builder, thread_cap: usize) -> BlockingPool {
+ BlockingPool::new(builder, thread_cap)
+
+ }
+}
+
+cfg_not_blocking_impl! {
+ use crate::runtime::Builder;
+ use std::time::Duration;
+
+ #[derive(Debug, Clone)]
+ pub(crate) struct BlockingPool {}
+
+ pub(crate) use BlockingPool as Spawner;
+
+ pub(crate) fn create_blocking_pool(_builder: &Builder, _thread_cap: usize) -> BlockingPool {
+ BlockingPool {}
+ }
+
+ impl BlockingPool {
+ pub(crate) fn spawner(&self) -> &BlockingPool {
+ self
+ }
+
+ pub(crate) fn shutdown(&mut self, _duration: Option<Duration>) {
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/blocking/pool.rs b/third_party/rust/tokio/src/runtime/blocking/pool.rs
new file mode 100644
index 0000000000..a3b208d171
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/blocking/pool.rs
@@ -0,0 +1,307 @@
+//! Thread pool for blocking operations
+
+use crate::loom::sync::{Arc, Condvar, Mutex};
+use crate::loom::thread;
+use crate::runtime::blocking::schedule::NoopSchedule;
+use crate::runtime::blocking::shutdown;
+use crate::runtime::blocking::task::BlockingTask;
+use crate::runtime::task::{self, JoinHandle};
+use crate::runtime::{Builder, Callback, Handle};
+
+use std::collections::VecDeque;
+use std::fmt;
+use std::time::Duration;
+
+pub(crate) struct BlockingPool {
+ spawner: Spawner,
+ shutdown_rx: shutdown::Receiver,
+}
+
+#[derive(Clone)]
+pub(crate) struct Spawner {
+ inner: Arc<Inner>,
+}
+
+struct Inner {
+ /// State shared between worker threads
+ shared: Mutex<Shared>,
+
+ /// Pool threads wait on this.
+ condvar: Condvar,
+
+ /// Spawned threads use this name
+ thread_name: String,
+
+ /// Spawned thread stack size
+ stack_size: Option<usize>,
+
+ /// Call after a thread starts
+ after_start: Option<Callback>,
+
+ /// Call before a thread stops
+ before_stop: Option<Callback>,
+
+ thread_cap: usize,
+}
+
+struct Shared {
+ queue: VecDeque<Task>,
+ num_th: usize,
+ num_idle: u32,
+ num_notify: u32,
+ shutdown: bool,
+ shutdown_tx: Option<shutdown::Sender>,
+}
+
+type Task = task::Notified<NoopSchedule>;
+
+const KEEP_ALIVE: Duration = Duration::from_secs(10);
+
+/// Run the provided function on an executor dedicated to blocking operations.
+pub(crate) fn spawn_blocking<F, R>(func: F) -> JoinHandle<R>
+where
+ F: FnOnce() -> R + Send + 'static,
+{
+ let rt = Handle::current();
+
+ let (task, handle) = task::joinable(BlockingTask::new(func));
+ let _ = rt.blocking_spawner.spawn(task, &rt);
+ handle
+}
+
+#[allow(dead_code)]
+pub(crate) fn try_spawn_blocking<F, R>(func: F) -> Result<(), ()>
+where
+ F: FnOnce() -> R + Send + 'static,
+{
+ let rt = Handle::current();
+
+ let (task, _handle) = task::joinable(BlockingTask::new(func));
+ rt.blocking_spawner.spawn(task, &rt)
+}
+
+// ===== impl BlockingPool =====
+
+impl BlockingPool {
+ pub(crate) fn new(builder: &Builder, thread_cap: usize) -> BlockingPool {
+ let (shutdown_tx, shutdown_rx) = shutdown::channel();
+
+ BlockingPool {
+ spawner: Spawner {
+ inner: Arc::new(Inner {
+ shared: Mutex::new(Shared {
+ queue: VecDeque::new(),
+ num_th: 0,
+ num_idle: 0,
+ num_notify: 0,
+ shutdown: false,
+ shutdown_tx: Some(shutdown_tx),
+ }),
+ condvar: Condvar::new(),
+ thread_name: builder.thread_name.clone(),
+ stack_size: builder.thread_stack_size,
+ after_start: builder.after_start.clone(),
+ before_stop: builder.before_stop.clone(),
+ thread_cap,
+ }),
+ },
+ shutdown_rx,
+ }
+ }
+
+ pub(crate) fn spawner(&self) -> &Spawner {
+ &self.spawner
+ }
+
+ pub(crate) fn shutdown(&mut self, timeout: Option<Duration>) {
+ let mut shared = self.spawner.inner.shared.lock().unwrap();
+
+ // The function can be called multiple times. First, by explicitly
+ // calling `shutdown` then by the drop handler calling `shutdown`. This
+ // prevents shutting down twice.
+ if shared.shutdown {
+ return;
+ }
+
+ shared.shutdown = true;
+ shared.shutdown_tx = None;
+ self.spawner.inner.condvar.notify_all();
+
+ drop(shared);
+
+ self.shutdown_rx.wait(timeout);
+ }
+}
+
+impl Drop for BlockingPool {
+ fn drop(&mut self) {
+ self.shutdown(None);
+ }
+}
+
+impl fmt::Debug for BlockingPool {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("BlockingPool").finish()
+ }
+}
+
+// ===== impl Spawner =====
+
+impl Spawner {
+ fn spawn(&self, task: Task, rt: &Handle) -> Result<(), ()> {
+ let shutdown_tx = {
+ let mut shared = self.inner.shared.lock().unwrap();
+
+ if shared.shutdown {
+ // Shutdown the task
+ task.shutdown();
+
+ // no need to even push this task; it would never get picked up
+ return Err(());
+ }
+
+ shared.queue.push_back(task);
+
+ if shared.num_idle == 0 {
+ // No threads are able to process the task.
+
+ if shared.num_th == self.inner.thread_cap {
+ // At max number of threads
+ None
+ } else {
+ shared.num_th += 1;
+ assert!(shared.shutdown_tx.is_some());
+ shared.shutdown_tx.clone()
+ }
+ } else {
+ // Notify an idle worker thread. The notification counter
+ // is used to count the needed amount of notifications
+ // exactly. Thread libraries may generate spurious
+ // wakeups, this counter is used to keep us in a
+ // consistent state.
+ shared.num_idle -= 1;
+ shared.num_notify += 1;
+ self.inner.condvar.notify_one();
+ None
+ }
+ };
+
+ if let Some(shutdown_tx) = shutdown_tx {
+ self.spawn_thread(shutdown_tx, rt);
+ }
+
+ Ok(())
+ }
+
+ fn spawn_thread(&self, shutdown_tx: shutdown::Sender, rt: &Handle) {
+ let mut builder = thread::Builder::new().name(self.inner.thread_name.clone());
+
+ if let Some(stack_size) = self.inner.stack_size {
+ builder = builder.stack_size(stack_size);
+ }
+
+ let rt = rt.clone();
+
+ builder
+ .spawn(move || {
+ // Only the reference should be moved into the closure
+ let rt = &rt;
+ rt.enter(move || {
+ rt.blocking_spawner.inner.run();
+ drop(shutdown_tx);
+ })
+ })
+ .unwrap();
+ }
+}
+
+impl Inner {
+ fn run(&self) {
+ if let Some(f) = &self.after_start {
+ f()
+ }
+
+ let mut shared = self.shared.lock().unwrap();
+
+ 'main: loop {
+ // BUSY
+ while let Some(task) = shared.queue.pop_front() {
+ drop(shared);
+ task.run();
+
+ shared = self.shared.lock().unwrap();
+ }
+
+ // IDLE
+ shared.num_idle += 1;
+
+ while !shared.shutdown {
+ let lock_result = self.condvar.wait_timeout(shared, KEEP_ALIVE).unwrap();
+
+ shared = lock_result.0;
+ let timeout_result = lock_result.1;
+
+ if shared.num_notify != 0 {
+ // We have received a legitimate wakeup,
+ // acknowledge it by decrementing the counter
+ // and transition to the BUSY state.
+ shared.num_notify -= 1;
+ break;
+ }
+
+ // Even if the condvar "timed out", if the pool is entering the
+ // shutdown phase, we want to perform the cleanup logic.
+ if !shared.shutdown && timeout_result.timed_out() {
+ break 'main;
+ }
+
+ // Spurious wakeup detected, go back to sleep.
+ }
+
+ if shared.shutdown {
+ // Drain the queue
+ while let Some(task) = shared.queue.pop_front() {
+ drop(shared);
+ task.shutdown();
+
+ shared = self.shared.lock().unwrap();
+ }
+
+ // Work was produced, and we "took" it (by decrementing num_notify).
+ // This means that num_idle was decremented once for our wakeup.
+ // But, since we are exiting, we need to "undo" that, as we'll stay idle.
+ shared.num_idle += 1;
+ // NOTE: Technically we should also do num_notify++ and notify again,
+ // but since we're shutting down anyway, that won't be necessary.
+ break;
+ }
+ }
+
+ // Thread exit
+ shared.num_th -= 1;
+
+ // num_idle should now be tracked exactly, panic
+ // with a descriptive message if it is not the
+ // case.
+ shared.num_idle = shared
+ .num_idle
+ .checked_sub(1)
+ .expect("num_idle underflowed on thread exit");
+
+ if shared.shutdown && shared.num_th == 0 {
+ self.condvar.notify_one();
+ }
+
+ drop(shared);
+
+ if let Some(f) = &self.before_stop {
+ f()
+ }
+ }
+}
+
+impl fmt::Debug for Spawner {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("blocking::Spawner").finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/blocking/schedule.rs b/third_party/rust/tokio/src/runtime/blocking/schedule.rs
new file mode 100644
index 0000000000..e10778d530
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/blocking/schedule.rs
@@ -0,0 +1,24 @@
+use crate::runtime::task::{self, Task};
+
+/// `task::Schedule` implementation that does nothing. This is unique to the
+/// blocking scheduler as tasks scheduled are not really futures but blocking
+/// operations.
+///
+/// We avoid storing the task by forgetting it in `bind` and re-materializing it
+/// in `release.
+pub(super) struct NoopSchedule;
+
+impl task::Schedule for NoopSchedule {
+ fn bind(_task: Task<Self>) -> NoopSchedule {
+ // Do nothing w/ the task
+ NoopSchedule
+ }
+
+ fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
+ None
+ }
+
+ fn schedule(&self, _task: task::Notified<Self>) {
+ unreachable!();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/blocking/shutdown.rs b/third_party/rust/tokio/src/runtime/blocking/shutdown.rs
new file mode 100644
index 0000000000..5ee8af0fbc
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/blocking/shutdown.rs
@@ -0,0 +1,58 @@
+//! A shutdown channel.
+//!
+//! Each worker holds the `Sender` half. When all the `Sender` halves are
+//! dropped, the `Receiver` receives a notification.
+
+use crate::loom::sync::Arc;
+use crate::sync::oneshot;
+
+use std::time::Duration;
+
+#[derive(Debug, Clone)]
+pub(super) struct Sender {
+ tx: Arc<oneshot::Sender<()>>,
+}
+
+#[derive(Debug)]
+pub(super) struct Receiver {
+ rx: oneshot::Receiver<()>,
+}
+
+pub(super) fn channel() -> (Sender, Receiver) {
+ let (tx, rx) = oneshot::channel();
+ let tx = Sender { tx: Arc::new(tx) };
+ let rx = Receiver { rx };
+
+ (tx, rx)
+}
+
+impl Receiver {
+ /// Blocks the current thread until all `Sender` handles drop.
+ ///
+ /// If `timeout` is `Some`, the thread is blocked for **at most** `timeout`
+ /// duration. If `timeout` is `None`, then the thread is blocked until the
+ /// shutdown signal is received.
+ pub(crate) fn wait(&mut self, timeout: Option<Duration>) {
+ use crate::runtime::enter::{enter, try_enter};
+
+ let mut e = if std::thread::panicking() {
+ match try_enter() {
+ Some(enter) => enter,
+ _ => return,
+ }
+ } else {
+ enter()
+ };
+
+ // The oneshot completes with an Err
+ //
+ // If blocking fails to wait, this indicates a problem parking the
+ // current thread (usually, shutting down a runtime stored in a
+ // thread-local).
+ if let Some(timeout) = timeout {
+ let _ = e.block_on_timeout(&mut self.rx, timeout);
+ } else {
+ let _ = e.block_on(&mut self.rx);
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/blocking/task.rs b/third_party/rust/tokio/src/runtime/blocking/task.rs
new file mode 100644
index 0000000000..f98b85494c
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/blocking/task.rs
@@ -0,0 +1,40 @@
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Converts a function to a future that completes on poll
+pub(super) struct BlockingTask<T> {
+ func: Option<T>,
+}
+
+impl<T> BlockingTask<T> {
+ /// Initializes a new blocking task from the given function
+ pub(super) fn new(func: T) -> BlockingTask<T> {
+ BlockingTask { func: Some(func) }
+ }
+}
+
+impl<T, R> Future for BlockingTask<T>
+where
+ T: FnOnce() -> R,
+{
+ type Output = R;
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<R> {
+ let me = unsafe { self.get_unchecked_mut() };
+ let func = me
+ .func
+ .take()
+ .expect("[internal exception] blocking task ran twice.");
+
+ // This is a little subtle:
+ // For convenience, we'd like _every_ call tokio ever makes to Task::poll() to be budgeted
+ // using coop. However, the way things are currently modeled, even running a blocking task
+ // currently goes through Task::poll(), and so is subject to budgeting. That isn't really
+ // what we want; a blocking task may itself want to run tasks (it might be a Worker!), so
+ // we want it to start without any budgeting.
+ crate::coop::stop();
+
+ Poll::Ready(func())
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/builder.rs b/third_party/rust/tokio/src/runtime/builder.rs
new file mode 100644
index 0000000000..cfde998251
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/builder.rs
@@ -0,0 +1,519 @@
+use crate::runtime::handle::Handle;
+use crate::runtime::shell::Shell;
+use crate::runtime::{blocking, io, time, Callback, Runtime, Spawner};
+
+use std::fmt;
+#[cfg(not(loom))]
+use std::sync::Arc;
+
+/// Builds Tokio Runtime with custom configuration values.
+///
+/// Methods can be chained in order to set the configuration values. The
+/// Runtime is constructed by calling [`build`].
+///
+/// New instances of `Builder` are obtained via [`Builder::new`].
+///
+/// See function level documentation for details on the various configuration
+/// settings.
+///
+/// [`build`]: #method.build
+/// [`Builder::new`]: #method.new
+///
+/// # Examples
+///
+/// ```
+/// use tokio::runtime::Builder;
+///
+/// fn main() {
+/// // build runtime
+/// let runtime = Builder::new()
+/// .threaded_scheduler()
+/// .core_threads(4)
+/// .thread_name("my-custom-name")
+/// .thread_stack_size(3 * 1024 * 1024)
+/// .build()
+/// .unwrap();
+///
+/// // use runtime ...
+/// }
+/// ```
+pub struct Builder {
+ /// The task execution model to use.
+ kind: Kind,
+
+ /// Whether or not to enable the I/O driver
+ enable_io: bool,
+
+ /// Whether or not to enable the time driver
+ enable_time: bool,
+
+ /// The number of worker threads, used by Runtime.
+ ///
+ /// Only used when not using the current-thread executor.
+ core_threads: Option<usize>,
+
+ /// Cap on thread usage.
+ max_threads: usize,
+
+ /// Name used for threads spawned by the runtime.
+ pub(super) thread_name: String,
+
+ /// Stack size used for threads spawned by the runtime.
+ pub(super) thread_stack_size: Option<usize>,
+
+ /// Callback to run after each thread starts.
+ pub(super) after_start: Option<Callback>,
+
+ /// To run before each worker thread stops
+ pub(super) before_stop: Option<Callback>,
+}
+
+#[derive(Debug, Clone, Copy)]
+enum Kind {
+ Shell,
+ #[cfg(feature = "rt-core")]
+ Basic,
+ #[cfg(feature = "rt-threaded")]
+ ThreadPool,
+}
+
+impl Builder {
+ /// Returns a new runtime builder initialized with default configuration
+ /// values.
+ ///
+ /// Configuration methods can be chained on the return value.
+ pub fn new() -> Builder {
+ Builder {
+ // No task execution by default
+ kind: Kind::Shell,
+
+ // I/O defaults to "off"
+ enable_io: false,
+
+ // Time defaults to "off"
+ enable_time: false,
+
+ // Default to lazy auto-detection (one thread per CPU core)
+ core_threads: None,
+
+ max_threads: 512,
+
+ // Default thread name
+ thread_name: "tokio-runtime-worker".into(),
+
+ // Do not set a stack size by default
+ thread_stack_size: None,
+
+ // No worker thread callbacks
+ after_start: None,
+ before_stop: None,
+ }
+ }
+
+ /// Enables both I/O and time drivers.
+ ///
+ /// Doing this is a shorthand for calling `enable_io` and `enable_time`
+ /// individually. If additional components are added to Tokio in the future,
+ /// `enable_all` will include these future components.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime;
+ ///
+ /// let rt = runtime::Builder::new()
+ /// .threaded_scheduler()
+ /// .enable_all()
+ /// .build()
+ /// .unwrap();
+ /// ```
+ pub fn enable_all(&mut self) -> &mut Self {
+ #[cfg(feature = "io-driver")]
+ self.enable_io();
+ #[cfg(feature = "time")]
+ self.enable_time();
+
+ self
+ }
+
+ #[deprecated(note = "In future will be replaced by core_threads method")]
+ /// Sets the maximum number of worker threads for the `Runtime`'s thread pool.
+ ///
+ /// This must be a number between 1 and 32,768 though it is advised to keep
+ /// this value on the smaller side.
+ ///
+ /// The default value is the number of cores available to the system.
+ pub fn num_threads(&mut self, val: usize) -> &mut Self {
+ self.core_threads = Some(val);
+ self
+ }
+
+ /// Sets the core number of worker threads for the `Runtime`'s thread pool.
+ ///
+ /// This should be a number between 1 and 32,768 though it is advised to keep
+ /// this value on the smaller side.
+ ///
+ /// The default value is the number of cores available to the system.
+ ///
+ /// These threads will be always active and running.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime;
+ ///
+ /// let rt = runtime::Builder::new()
+ /// .threaded_scheduler()
+ /// .core_threads(4)
+ /// .build()
+ /// .unwrap();
+ /// ```
+ pub fn core_threads(&mut self, val: usize) -> &mut Self {
+ assert_ne!(val, 0, "Core threads cannot be zero");
+ self.core_threads = Some(val);
+ self
+ }
+
+ /// Specifies limit for threads, spawned by the Runtime.
+ ///
+ /// This is number of threads to be used by Runtime, including `core_threads`
+ /// Having `max_threads` less than `core_threads` results in invalid configuration
+ /// when building multi-threaded `Runtime`, which would cause a panic.
+ ///
+ /// Similarly to the `core_threads`, this number should be between 1 and 32,768.
+ ///
+ /// The default value is 512.
+ ///
+ /// When multi-threaded runtime is not used, will act as limit on additional threads.
+ ///
+ /// Otherwise as `core_threads` are always active, it limits additional threads (e.g. for
+ /// blocking annotations) as `max_threads - core_threads`.
+ pub fn max_threads(&mut self, val: usize) -> &mut Self {
+ assert_ne!(val, 0, "Thread limit cannot be zero");
+ self.max_threads = val;
+ self
+ }
+
+ /// Sets name of threads spawned by the `Runtime`'s thread pool.
+ ///
+ /// The default name is "tokio-runtime-worker".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::runtime;
+ ///
+ /// # pub fn main() {
+ /// let rt = runtime::Builder::new()
+ /// .thread_name("my-pool")
+ /// .build();
+ /// # }
+ /// ```
+ pub fn thread_name(&mut self, val: impl Into<String>) -> &mut Self {
+ self.thread_name = val.into();
+ self
+ }
+
+ /// Sets the stack size (in bytes) for worker threads.
+ ///
+ /// The actual stack size may be greater than this value if the platform
+ /// specifies minimal stack size.
+ ///
+ /// The default stack size for spawned threads is 2 MiB, though this
+ /// particular stack size is subject to change in the future.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::runtime;
+ ///
+ /// # pub fn main() {
+ /// let rt = runtime::Builder::new()
+ /// .threaded_scheduler()
+ /// .thread_stack_size(32 * 1024)
+ /// .build();
+ /// # }
+ /// ```
+ pub fn thread_stack_size(&mut self, val: usize) -> &mut Self {
+ self.thread_stack_size = Some(val);
+ self
+ }
+
+ /// Executes function `f` after each thread is started but before it starts
+ /// doing work.
+ ///
+ /// This is intended for bookkeeping and monitoring use cases.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::runtime;
+ ///
+ /// # pub fn main() {
+ /// let runtime = runtime::Builder::new()
+ /// .threaded_scheduler()
+ /// .on_thread_start(|| {
+ /// println!("thread started");
+ /// })
+ /// .build();
+ /// # }
+ /// ```
+ #[cfg(not(loom))]
+ pub fn on_thread_start<F>(&mut self, f: F) -> &mut Self
+ where
+ F: Fn() + Send + Sync + 'static,
+ {
+ self.after_start = Some(Arc::new(f));
+ self
+ }
+
+ /// Executes function `f` before each thread stops.
+ ///
+ /// This is intended for bookkeeping and monitoring use cases.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::runtime;
+ ///
+ /// # pub fn main() {
+ /// let runtime = runtime::Builder::new()
+ /// .threaded_scheduler()
+ /// .on_thread_stop(|| {
+ /// println!("thread stopping");
+ /// })
+ /// .build();
+ /// # }
+ /// ```
+ #[cfg(not(loom))]
+ pub fn on_thread_stop<F>(&mut self, f: F) -> &mut Self
+ where
+ F: Fn() + Send + Sync + 'static,
+ {
+ self.before_stop = Some(Arc::new(f));
+ self
+ }
+
+ /// Creates the configured `Runtime`.
+ ///
+ /// The returned `ThreadPool` instance is ready to spawn tasks.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Builder;
+ ///
+ /// let mut rt = Builder::new().build().unwrap();
+ ///
+ /// rt.block_on(async {
+ /// println!("Hello from the Tokio runtime");
+ /// });
+ /// ```
+ pub fn build(&mut self) -> io::Result<Runtime> {
+ match self.kind {
+ Kind::Shell => self.build_shell_runtime(),
+ #[cfg(feature = "rt-core")]
+ Kind::Basic => self.build_basic_runtime(),
+ #[cfg(feature = "rt-threaded")]
+ Kind::ThreadPool => self.build_threaded_runtime(),
+ }
+ }
+
+ fn build_shell_runtime(&mut self) -> io::Result<Runtime> {
+ use crate::runtime::Kind;
+
+ let clock = time::create_clock();
+
+ // Create I/O driver
+ let (io_driver, io_handle) = io::create_driver(self.enable_io)?;
+ let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone());
+
+ let spawner = Spawner::Shell;
+
+ let blocking_pool = blocking::create_blocking_pool(self, self.max_threads);
+ let blocking_spawner = blocking_pool.spawner().clone();
+
+ Ok(Runtime {
+ kind: Kind::Shell(Shell::new(driver)),
+ handle: Handle {
+ spawner,
+ io_handle,
+ time_handle,
+ clock,
+ blocking_spawner,
+ },
+ blocking_pool,
+ })
+ }
+}
+
+cfg_io_driver! {
+ impl Builder {
+ /// Enables the I/O driver.
+ ///
+ /// Doing this enables using net, process, signal, and some I/O types on
+ /// the runtime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime;
+ ///
+ /// let rt = runtime::Builder::new()
+ /// .enable_io()
+ /// .build()
+ /// .unwrap();
+ /// ```
+ pub fn enable_io(&mut self) -> &mut Self {
+ self.enable_io = true;
+ self
+ }
+ }
+}
+
+cfg_time! {
+ impl Builder {
+ /// Enables the time driver.
+ ///
+ /// Doing this enables using `tokio::time` on the runtime.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime;
+ ///
+ /// let rt = runtime::Builder::new()
+ /// .enable_time()
+ /// .build()
+ /// .unwrap();
+ /// ```
+ pub fn enable_time(&mut self) -> &mut Self {
+ self.enable_time = true;
+ self
+ }
+ }
+}
+
+cfg_rt_core! {
+ impl Builder {
+ /// Sets runtime to use a simpler scheduler that runs all tasks on the current-thread.
+ ///
+ /// The executor and all necessary drivers will all be run on the current
+ /// thread during `block_on` calls.
+ ///
+ /// See also [the module level documentation][1], which has a section on scheduler
+ /// types.
+ ///
+ /// [1]: index.html#runtime-configurations
+ pub fn basic_scheduler(&mut self) -> &mut Self {
+ self.kind = Kind::Basic;
+ self
+ }
+
+ fn build_basic_runtime(&mut self) -> io::Result<Runtime> {
+ use crate::runtime::{BasicScheduler, Kind};
+
+ let clock = time::create_clock();
+
+ // Create I/O driver
+ let (io_driver, io_handle) = io::create_driver(self.enable_io)?;
+
+ let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone());
+
+ // And now put a single-threaded scheduler on top of the timer. When
+ // there are no futures ready to do something, it'll let the timer or
+ // the reactor to generate some new stimuli for the futures to continue
+ // in their life.
+ let scheduler = BasicScheduler::new(driver);
+ let spawner = Spawner::Basic(scheduler.spawner().clone());
+
+ // Blocking pool
+ let blocking_pool = blocking::create_blocking_pool(self, self.max_threads);
+ let blocking_spawner = blocking_pool.spawner().clone();
+
+ Ok(Runtime {
+ kind: Kind::Basic(scheduler),
+ handle: Handle {
+ spawner,
+ io_handle,
+ time_handle,
+ clock,
+ blocking_spawner,
+ },
+ blocking_pool,
+ })
+ }
+ }
+}
+
+cfg_rt_threaded! {
+ impl Builder {
+ /// Sets runtime to use a multi-threaded scheduler for executing tasks.
+ ///
+ /// See also [the module level documentation][1], which has a section on scheduler
+ /// types.
+ ///
+ /// [1]: index.html#runtime-configurations
+ pub fn threaded_scheduler(&mut self) -> &mut Self {
+ self.kind = Kind::ThreadPool;
+ self
+ }
+
+ fn build_threaded_runtime(&mut self) -> io::Result<Runtime> {
+ use crate::runtime::{Kind, ThreadPool};
+ use crate::runtime::park::Parker;
+
+ let core_threads = self.core_threads.unwrap_or_else(crate::loom::sys::num_cpus);
+ assert!(core_threads <= self.max_threads, "Core threads number cannot be above max limit");
+
+ let clock = time::create_clock();
+
+ let (io_driver, io_handle) = io::create_driver(self.enable_io)?;
+ let (driver, time_handle) = time::create_driver(self.enable_time, io_driver, clock.clone());
+ let (scheduler, launch) = ThreadPool::new(core_threads, Parker::new(driver));
+ let spawner = Spawner::ThreadPool(scheduler.spawner().clone());
+
+ // Create the blocking pool
+ let blocking_pool = blocking::create_blocking_pool(self, self.max_threads);
+ let blocking_spawner = blocking_pool.spawner().clone();
+
+ // Create the runtime handle
+ let handle = Handle {
+ spawner,
+ io_handle,
+ time_handle,
+ clock,
+ blocking_spawner,
+ };
+
+ // Spawn the thread pool workers
+ handle.enter(|| launch.launch());
+
+ Ok(Runtime {
+ kind: Kind::ThreadPool(scheduler),
+ handle,
+ blocking_pool,
+ })
+ }
+ }
+}
+
+impl Default for Builder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl fmt::Debug for Builder {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Builder")
+ .field("kind", &self.kind)
+ .field("core_threads", &self.core_threads)
+ .field("max_threads", &self.max_threads)
+ .field("thread_name", &self.thread_name)
+ .field("thread_stack_size", &self.thread_stack_size)
+ .field("after_start", &self.after_start.as_ref().map(|_| "..."))
+ .field("before_stop", &self.after_start.as_ref().map(|_| "..."))
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/context.rs b/third_party/rust/tokio/src/runtime/context.rs
new file mode 100644
index 0000000000..4af2df23eb
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/context.rs
@@ -0,0 +1,73 @@
+//! Thread local runtime context
+use crate::runtime::Handle;
+
+use std::cell::RefCell;
+
+thread_local! {
+ static CONTEXT: RefCell<Option<Handle>> = RefCell::new(None)
+}
+
+pub(crate) fn current() -> Option<Handle> {
+ CONTEXT.with(|ctx| ctx.borrow().clone())
+}
+
+cfg_io_driver! {
+ pub(crate) fn io_handle() -> crate::runtime::io::Handle {
+ CONTEXT.with(|ctx| match *ctx.borrow() {
+ Some(ref ctx) => ctx.io_handle.clone(),
+ None => Default::default(),
+ })
+ }
+}
+
+cfg_time! {
+ pub(crate) fn time_handle() -> crate::runtime::time::Handle {
+ CONTEXT.with(|ctx| match *ctx.borrow() {
+ Some(ref ctx) => ctx.time_handle.clone(),
+ None => Default::default(),
+ })
+ }
+
+ cfg_test_util! {
+ pub(crate) fn clock() -> Option<crate::runtime::time::Clock> {
+ CONTEXT.with(|ctx| match *ctx.borrow() {
+ Some(ref ctx) => Some(ctx.clock.clone()),
+ None => None,
+ })
+ }
+ }
+}
+
+cfg_rt_core! {
+ pub(crate) fn spawn_handle() -> Option<crate::runtime::Spawner> {
+ CONTEXT.with(|ctx| match *ctx.borrow() {
+ Some(ref ctx) => Some(ctx.spawner.clone()),
+ None => None,
+ })
+ }
+}
+
+/// Set this [`ThreadContext`] as the current active [`ThreadContext`].
+///
+/// [`ThreadContext`]: struct@ThreadContext
+pub(crate) fn enter<F, R>(new: Handle, f: F) -> R
+where
+ F: FnOnce() -> R,
+{
+ struct DropGuard(Option<Handle>);
+
+ impl Drop for DropGuard {
+ fn drop(&mut self) {
+ CONTEXT.with(|ctx| {
+ *ctx.borrow_mut() = self.0.take();
+ });
+ }
+ }
+
+ let _guard = CONTEXT.with(|ctx| {
+ let old = ctx.borrow_mut().replace(new);
+ DropGuard(old)
+ });
+
+ f()
+}
diff --git a/third_party/rust/tokio/src/runtime/enter.rs b/third_party/rust/tokio/src/runtime/enter.rs
new file mode 100644
index 0000000000..afdb67a3b7
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/enter.rs
@@ -0,0 +1,162 @@
+use std::cell::{Cell, RefCell};
+use std::fmt;
+use std::marker::PhantomData;
+
+thread_local!(static ENTERED: Cell<bool> = Cell::new(false));
+
+/// Represents an executor context.
+pub(crate) struct Enter {
+ _p: PhantomData<RefCell<()>>,
+}
+
+/// Marks the current thread as being within the dynamic extent of an
+/// executor.
+pub(crate) fn enter() -> Enter {
+ if let Some(enter) = try_enter() {
+ return enter;
+ }
+
+ panic!(
+ "Cannot start a runtime from within a runtime. This happens \
+ because a function (like `block_on`) attempted to block the \
+ current thread while the thread is being used to drive \
+ asynchronous tasks."
+ );
+}
+
+/// Tries to enter a runtime context, returns `None` if already in a runtime
+/// context.
+pub(crate) fn try_enter() -> Option<Enter> {
+ ENTERED.with(|c| {
+ if c.get() {
+ None
+ } else {
+ c.set(true);
+ Some(Enter { _p: PhantomData })
+ }
+ })
+}
+
+// Forces the current "entered" state to be cleared while the closure
+// is executed.
+//
+// # Warning
+//
+// This is hidden for a reason. Do not use without fully understanding
+// executors. Misuing can easily cause your program to deadlock.
+#[cfg(all(feature = "rt-threaded", feature = "blocking"))]
+pub(crate) fn exit<F: FnOnce() -> R, R>(f: F) -> R {
+ // Reset in case the closure panics
+ struct Reset;
+ impl Drop for Reset {
+ fn drop(&mut self) {
+ ENTERED.with(|c| {
+ c.set(true);
+ });
+ }
+ }
+
+ ENTERED.with(|c| {
+ debug_assert!(c.get());
+ c.set(false);
+ });
+
+ let reset = Reset;
+ let ret = f();
+ std::mem::forget(reset);
+
+ ENTERED.with(|c| {
+ assert!(!c.get(), "closure claimed permanent executor");
+ c.set(true);
+ });
+
+ ret
+}
+
+cfg_blocking_impl! {
+ use crate::park::ParkError;
+ use std::time::Duration;
+
+ impl Enter {
+ /// Blocks the thread on the specified future, returning the value with
+ /// which that future completes.
+ pub(crate) fn block_on<F>(&mut self, mut f: F) -> Result<F::Output, ParkError>
+ where
+ F: std::future::Future,
+ {
+ use crate::park::{CachedParkThread, Park};
+ use std::pin::Pin;
+ use std::task::Context;
+ use std::task::Poll::Ready;
+
+ let mut park = CachedParkThread::new();
+ let waker = park.get_unpark()?.into_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ // `block_on` takes ownership of `f`. Once it is pinned here, the original `f` binding can
+ // no longer be accessed, making the pinning safe.
+ let mut f = unsafe { Pin::new_unchecked(&mut f) };
+
+ loop {
+ if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) {
+ return Ok(v);
+ }
+
+ park.park()?;
+ }
+ }
+
+ /// Blocks the thread on the specified future for **at most** `timeout`
+ ///
+ /// If the future completes before `timeout`, the result is returned. If
+ /// `timeout` elapses, then `Err` is returned.
+ pub(crate) fn block_on_timeout<F>(&mut self, mut f: F, timeout: Duration) -> Result<F::Output, ParkError>
+ where
+ F: std::future::Future,
+ {
+ use crate::park::{CachedParkThread, Park};
+ use std::pin::Pin;
+ use std::task::Context;
+ use std::task::Poll::Ready;
+ use std::time::Instant;
+
+ let mut park = CachedParkThread::new();
+ let waker = park.get_unpark()?.into_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ // `block_on` takes ownership of `f`. Once it is pinned here, the original `f` binding can
+ // no longer be accessed, making the pinning safe.
+ let mut f = unsafe { Pin::new_unchecked(&mut f) };
+ let when = Instant::now() + timeout;
+
+ loop {
+ if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) {
+ return Ok(v);
+ }
+
+ let now = Instant::now();
+
+ if now >= when {
+ return Err(());
+ }
+
+ park.park_timeout(when - now)?;
+ }
+ }
+ }
+}
+
+impl fmt::Debug for Enter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Enter").finish()
+ }
+}
+
+impl Drop for Enter {
+ fn drop(&mut self) {
+ ENTERED.with(|c| {
+ assert!(c.get());
+ c.set(false);
+ });
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/handle.rs b/third_party/rust/tokio/src/runtime/handle.rs
new file mode 100644
index 0000000000..db53543e85
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/handle.rs
@@ -0,0 +1,140 @@
+use crate::runtime::{blocking, context, io, time, Spawner};
+use std::{error, fmt};
+
+cfg_rt_core! {
+ use crate::task::JoinHandle;
+
+ use std::future::Future;
+}
+
+/// Handle to the runtime.
+///
+/// The handle is internally reference-counted and can be freely cloned. A handle can be
+/// obtained using the [`Runtime::handle`] method.
+///
+/// [`Runtime::handle`]: crate::runtime::Runtime::handle()
+#[derive(Debug, Clone)]
+pub struct Handle {
+ pub(super) spawner: Spawner,
+
+ /// Handles to the I/O drivers
+ pub(super) io_handle: io::Handle,
+
+ /// Handles to the time drivers
+ pub(super) time_handle: time::Handle,
+
+ /// Source of `Instant::now()`
+ pub(super) clock: time::Clock,
+
+ /// Blocking pool spawner
+ pub(super) blocking_spawner: blocking::Spawner,
+}
+
+impl Handle {
+ /// Enter the runtime context.
+ pub fn enter<F, R>(&self, f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ context::enter(self.clone(), f)
+ }
+
+ /// Returns a Handle view over the currently running Runtime
+ ///
+ /// # Panic
+ ///
+ /// This will panic if called outside the context of a Tokio runtime.
+ ///
+ /// # Examples
+ ///
+ /// This can be used to obtain the handle of the surrounding runtime from an async
+ /// block or function running on that runtime.
+ ///
+ /// ```
+ /// # use tokio::runtime::Runtime;
+ /// # fn dox() {
+ /// # let rt = Runtime::new().unwrap();
+ /// # rt.spawn(async {
+ /// use tokio::runtime::Handle;
+ ///
+ /// // Inside an async block or function.
+ /// let handle = Handle::current();
+ /// handle.spawn(async {
+ /// println!("now running in the existing Runtime");
+ /// })
+ /// # });
+ /// # }
+ /// ```
+ pub fn current() -> Self {
+ context::current().expect("not currently running on the Tokio runtime.")
+ }
+
+ /// Returns a Handle view over the currently running Runtime
+ ///
+ /// Returns an error if no Runtime has been started
+ ///
+ /// Contrary to `current`, this never panics
+ pub fn try_current() -> Result<Self, TryCurrentError> {
+ context::current().ok_or(TryCurrentError(()))
+ }
+}
+
+cfg_rt_core! {
+ impl Handle {
+ /// Spawns a future onto the Tokio runtime.
+ ///
+ /// This spawns the given future onto the runtime's executor, usually a
+ /// thread pool. The thread pool is then responsible for polling the future
+ /// until it completes.
+ ///
+ /// See [module level][mod] documentation for more details.
+ ///
+ /// [mod]: index.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ ///
+ /// # fn dox() {
+ /// // Create the runtime
+ /// let rt = Runtime::new().unwrap();
+ /// let handle = rt.handle();
+ ///
+ /// // Spawn a future onto the runtime
+ /// handle.spawn(async {
+ /// println!("now running on a worker thread");
+ /// });
+ /// # }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the spawn fails. Failure occurs if the executor
+ /// is currently at capacity and is unable to spawn a new future.
+ pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ self.spawner.spawn(future)
+ }
+ }
+}
+
+/// Error returned by `try_current` when no Runtime has been started
+pub struct TryCurrentError(());
+
+impl fmt::Debug for TryCurrentError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryCurrentError").finish()
+ }
+}
+
+impl fmt::Display for TryCurrentError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("no tokio Runtime has been initialized")
+ }
+}
+
+impl error::Error for TryCurrentError {}
diff --git a/third_party/rust/tokio/src/runtime/io.rs b/third_party/rust/tokio/src/runtime/io.rs
new file mode 100644
index 0000000000..6a0953af85
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/io.rs
@@ -0,0 +1,63 @@
+//! Abstracts out the APIs necessary to `Runtime` for integrating the I/O
+//! driver. When the `time` feature flag is **not** enabled. These APIs are
+//! shells. This isolates the complexity of dealing with conditional
+//! compilation.
+
+/// Re-exported for convenience.
+pub(crate) use std::io::Result;
+
+pub(crate) use variant::*;
+
+#[cfg(feature = "io-driver")]
+mod variant {
+ use crate::io::driver;
+ use crate::park::{Either, ParkThread};
+
+ use std::io;
+
+ /// The driver value the runtime passes to the `timer` layer.
+ ///
+ /// When the `io-driver` feature is enabled, this is the "real" I/O driver
+ /// backed by Mio. Without the `io-driver` feature, this is a thread parker
+ /// backed by a condition variable.
+ pub(crate) type Driver = Either<driver::Driver, ParkThread>;
+
+ /// The handle the runtime stores for future use.
+ ///
+ /// When the `io-driver` feature is **not** enabled, this is `()`.
+ pub(crate) type Handle = Option<driver::Handle>;
+
+ pub(crate) fn create_driver(enable: bool) -> io::Result<(Driver, Handle)> {
+ #[cfg(loom)]
+ assert!(!enable);
+
+ if enable {
+ let driver = driver::Driver::new()?;
+ let handle = driver.handle();
+
+ Ok((Either::A(driver), Some(handle)))
+ } else {
+ let driver = ParkThread::new();
+ Ok((Either::B(driver), None))
+ }
+ }
+}
+
+#[cfg(not(feature = "io-driver"))]
+mod variant {
+ use crate::park::ParkThread;
+
+ use std::io;
+
+ /// I/O is not enabled, use a condition variable based parker
+ pub(crate) type Driver = ParkThread;
+
+ /// There is no handle
+ pub(crate) type Handle = ();
+
+ pub(crate) fn create_driver(_enable: bool) -> io::Result<(Driver, Handle)> {
+ let driver = ParkThread::new();
+
+ Ok((driver, ()))
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/mod.rs b/third_party/rust/tokio/src/runtime/mod.rs
new file mode 100644
index 0000000000..36b2b442ee
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/mod.rs
@@ -0,0 +1,494 @@
+//! The Tokio runtime.
+//!
+//! Unlike other Rust programs, asynchronous applications require
+//! runtime support. In particular, the following runtime services are
+//! necessary:
+//!
+//! * An **I/O event loop**, called the driver, which drives I/O resources and
+//! dispatches I/O events to tasks that depend on them.
+//! * A **scheduler** to execute [tasks] that use these I/O resources.
+//! * A **timer** for scheduling work to run after a set period of time.
+//!
+//! Tokio's [`Runtime`] bundles all of these services as a single type, allowing
+//! them to be started, shut down, and configured together. However, most
+//! applications won't need to use [`Runtime`] directly. Instead, they can
+//! use the [`tokio::main`] attribute macro, which creates a [`Runtime`] under
+//! the hood.
+//!
+//! # Usage
+//!
+//! Most applications will use the [`tokio::main`] attribute macro.
+//!
+//! ```no_run
+//! use tokio::net::TcpListener;
+//! use tokio::prelude::*;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+//!
+//! loop {
+//! let (mut socket, _) = listener.accept().await?;
+//!
+//! tokio::spawn(async move {
+//! let mut buf = [0; 1024];
+//!
+//! // In a loop, read data from the socket and write the data back.
+//! loop {
+//! let n = match socket.read(&mut buf).await {
+//! // socket closed
+//! Ok(n) if n == 0 => return,
+//! Ok(n) => n,
+//! Err(e) => {
+//! println!("failed to read from socket; err = {:?}", e);
+//! return;
+//! }
+//! };
+//!
+//! // Write the data back
+//! if let Err(e) = socket.write_all(&buf[0..n]).await {
+//! println!("failed to write to socket; err = {:?}", e);
+//! return;
+//! }
+//! }
+//! });
+//! }
+//! }
+//! ```
+//!
+//! From within the context of the runtime, additional tasks are spawned using
+//! the [`tokio::spawn`] function. Futures spawned using this function will be
+//! executed on the same thread pool used by the [`Runtime`].
+//!
+//! A [`Runtime`] instance can also be used directly.
+//!
+//! ```no_run
+//! use tokio::net::TcpListener;
+//! use tokio::prelude::*;
+//! use tokio::runtime::Runtime;
+//!
+//! fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // Create the runtime
+//! let mut rt = Runtime::new()?;
+//!
+//! // Spawn the root task
+//! rt.block_on(async {
+//! let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+//!
+//! loop {
+//! let (mut socket, _) = listener.accept().await?;
+//!
+//! tokio::spawn(async move {
+//! let mut buf = [0; 1024];
+//!
+//! // In a loop, read data from the socket and write the data back.
+//! loop {
+//! let n = match socket.read(&mut buf).await {
+//! // socket closed
+//! Ok(n) if n == 0 => return,
+//! Ok(n) => n,
+//! Err(e) => {
+//! println!("failed to read from socket; err = {:?}", e);
+//! return;
+//! }
+//! };
+//!
+//! // Write the data back
+//! if let Err(e) = socket.write_all(&buf[0..n]).await {
+//! println!("failed to write to socket; err = {:?}", e);
+//! return;
+//! }
+//! }
+//! });
+//! }
+//! })
+//! }
+//! ```
+//!
+//! ## Runtime Configurations
+//!
+//! Tokio provides multiple task scheduling strategies, suitable for different
+//! applications. The [runtime builder] or `#[tokio::main]` attribute may be
+//! used to select which scheduler to use.
+//!
+//! #### Basic Scheduler
+//!
+//! The basic scheduler provides a _single-threaded_ future executor. All tasks
+//! will be created and executed on the current thread. The basic scheduler
+//! requires the `rt-core` feature flag, and can be selected using the
+//! [`Builder::basic_scheduler`] method:
+//! ```
+//! use tokio::runtime;
+//!
+//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let basic_rt = runtime::Builder::new()
+//! .basic_scheduler()
+//! .build()?;
+//! # Ok(()) }
+//! ```
+//!
+//! If the `rt-core` feature is enabled and `rt-threaded` is not,
+//! [`Runtime::new`] will return a basic scheduler runtime by default.
+//!
+//! #### Threaded Scheduler
+//!
+//! The threaded scheduler executes futures on a _thread pool_, using a
+//! work-stealing strategy. By default, it will start a worker thread for each
+//! CPU core available on the system. This tends to be the ideal configurations
+//! for most applications. The threaded scheduler requires the `rt-threaded` feature
+//! flag, and can be selected using the [`Builder::threaded_scheduler`] method:
+//! ```
+//! use tokio::runtime;
+//!
+//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let threaded_rt = runtime::Builder::new()
+//! .threaded_scheduler()
+//! .build()?;
+//! # Ok(()) }
+//! ```
+//!
+//! If the `rt-threaded` feature flag is enabled, [`Runtime::new`] will return a
+//! threaded scheduler runtime by default.
+//!
+//! Most applications should use the threaded scheduler, except in some niche
+//! use-cases, such as when running only a single thread is required.
+//!
+//! #### Resource drivers
+//!
+//! When configuring a runtime by hand, no resource drivers are enabled by
+//! default. In this case, attempting to use networking types or time types will
+//! fail. In order to enable these types, the resource drivers must be enabled.
+//! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a
+//! shorthand, [`Builder::enable_all`] enables both resource drivers.
+//!
+//! ## Lifetime of spawned threads
+//!
+//! The runtime may spawn threads depending on its configuration and usage. The
+//! threaded scheduler spawns threads to schedule tasks and calls to
+//! `spawn_blocking` spawn threads to run blocking operations.
+//!
+//! While the `Runtime` is active, threads may shutdown after periods of being
+//! idle. Once `Runtime` is dropped, all runtime threads are forcibly shutdown.
+//! Any tasks that have not yet completed will be dropped.
+//!
+//! [tasks]: crate::task
+//! [`Runtime`]: Runtime
+//! [`tokio::spawn`]: crate::spawn
+//! [`tokio::main`]: ../attr.main.html
+//! [runtime builder]: crate::runtime::Builder
+//! [`Runtime::new`]: crate::runtime::Runtime::new
+//! [`Builder::basic_scheduler`]: crate::runtime::Builder::basic_scheduler
+//! [`Builder::threaded_scheduler`]: crate::runtime::Builder::threaded_scheduler
+//! [`Builder::enable_io`]: crate::runtime::Builder::enable_io
+//! [`Builder::enable_time`]: crate::runtime::Builder::enable_time
+//! [`Builder::enable_all`]: crate::runtime::Builder::enable_all
+
+// At the top due to macros
+#[cfg(test)]
+#[macro_use]
+mod tests;
+
+pub(crate) mod context;
+
+cfg_rt_core! {
+ mod basic_scheduler;
+ use basic_scheduler::BasicScheduler;
+
+ pub(crate) mod task;
+}
+
+mod blocking;
+use blocking::BlockingPool;
+
+cfg_blocking_impl! {
+ #[allow(unused_imports)]
+ pub(crate) use blocking::{spawn_blocking, try_spawn_blocking};
+}
+
+mod builder;
+pub use self::builder::Builder;
+
+pub(crate) mod enter;
+use self::enter::enter;
+
+mod handle;
+pub use self::handle::{Handle, TryCurrentError};
+
+mod io;
+
+cfg_rt_threaded! {
+ mod park;
+ use park::Parker;
+}
+
+mod shell;
+use self::shell::Shell;
+
+mod spawner;
+use self::spawner::Spawner;
+
+mod time;
+
+cfg_rt_threaded! {
+ mod queue;
+
+ pub(crate) mod thread_pool;
+ use self::thread_pool::ThreadPool;
+}
+
+cfg_rt_core! {
+ use crate::task::JoinHandle;
+}
+
+use std::future::Future;
+use std::time::Duration;
+
+/// The Tokio runtime.
+///
+/// The runtime provides an I/O [driver], task scheduler, [timer], and blocking
+/// pool, necessary for running asynchronous tasks.
+///
+/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
+/// most users will use the `#[tokio::main]` annotation on their entry point instead.
+///
+/// See [module level][mod] documentation for more details.
+///
+/// # Shutdown
+///
+/// Shutting down the runtime is done by dropping the value. The current thread
+/// will block until the shut down operation has completed.
+///
+/// * Drain any scheduled work queues.
+/// * Drop any futures that have not yet completed.
+/// * Drop the reactor.
+///
+/// Once the reactor has dropped, any outstanding I/O resources bound to
+/// that reactor will no longer function. Calling any method on them will
+/// result in an error.
+///
+/// [driver]: crate::io::driver
+/// [timer]: crate::time
+/// [mod]: index.html
+/// [`new`]: #method.new
+/// [`Builder`]: struct@Builder
+/// [`tokio::run`]: fn@run
+#[derive(Debug)]
+pub struct Runtime {
+ /// Task executor
+ kind: Kind,
+
+ /// Handle to runtime, also contains driver handles
+ handle: Handle,
+
+ /// Blocking pool handle, used to signal shutdown
+ blocking_pool: BlockingPool,
+}
+
+/// The runtime executor is either a thread-pool or a current-thread executor.
+#[derive(Debug)]
+enum Kind {
+ /// Not able to execute concurrent tasks. This variant is mostly used to get
+ /// access to the driver handles.
+ Shell(Shell),
+
+ /// Execute all tasks on the current-thread.
+ #[cfg(feature = "rt-core")]
+ Basic(BasicScheduler<time::Driver>),
+
+ /// Execute tasks across multiple threads.
+ #[cfg(feature = "rt-threaded")]
+ ThreadPool(ThreadPool),
+}
+
+/// After thread starts / before thread stops
+type Callback = std::sync::Arc<dyn Fn() + Send + Sync>;
+
+impl Runtime {
+ /// Create a new runtime instance with default configuration values.
+ ///
+ /// This results in a scheduler, I/O driver, and time driver being
+ /// initialized. The type of scheduler used depends on what feature flags
+ /// are enabled: if the `rt-threaded` feature is enabled, the [threaded
+ /// scheduler] is used, while if only the `rt-core` feature is enabled, the
+ /// [basic scheduler] is used instead.
+ ///
+ /// If the threaded scheduler is selected, it will not spawn
+ /// any worker threads until it needs to, i.e. tasks are scheduled to run.
+ ///
+ /// Most applications will not need to call this function directly. Instead,
+ /// they will use the [`#[tokio::main]` attribute][main]. When more complex
+ /// configuration is necessary, the [runtime builder] may be used.
+ ///
+ /// See [module level][mod] documentation for more details.
+ ///
+ /// # Examples
+ ///
+ /// Creating a new `Runtime` with default configuration values.
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ ///
+ /// let rt = Runtime::new()
+ /// .unwrap();
+ ///
+ /// // Use the runtime...
+ /// ```
+ ///
+ /// [mod]: index.html
+ /// [main]: ../../tokio_macros/attr.main.html
+ /// [threaded scheduler]: index.html#threaded-scheduler
+ /// [basic scheduler]: index.html#basic-scheduler
+ /// [runtime builder]: crate::runtime::Builder
+ pub fn new() -> io::Result<Runtime> {
+ #[cfg(feature = "rt-threaded")]
+ let ret = Builder::new().threaded_scheduler().enable_all().build();
+
+ #[cfg(all(not(feature = "rt-threaded"), feature = "rt-core"))]
+ let ret = Builder::new().basic_scheduler().enable_all().build();
+
+ #[cfg(not(feature = "rt-core"))]
+ let ret = Builder::new().enable_all().build();
+
+ ret
+ }
+
+ /// Spawn a future onto the Tokio runtime.
+ ///
+ /// This spawns the given future onto the runtime's executor, usually a
+ /// thread pool. The thread pool is then responsible for polling the future
+ /// until it completes.
+ ///
+ /// See [module level][mod] documentation for more details.
+ ///
+ /// [mod]: index.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ ///
+ /// # fn dox() {
+ /// // Create the runtime
+ /// let rt = Runtime::new().unwrap();
+ ///
+ /// // Spawn a future onto the runtime
+ /// rt.spawn(async {
+ /// println!("now running on a worker thread");
+ /// });
+ /// # }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the spawn fails. Failure occurs if the executor
+ /// is currently at capacity and is unable to spawn a new future.
+ #[cfg(feature = "rt-core")]
+ pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ match &self.kind {
+ Kind::Shell(_) => panic!("task execution disabled"),
+ #[cfg(feature = "rt-threaded")]
+ Kind::ThreadPool(exec) => exec.spawn(future),
+ Kind::Basic(exec) => exec.spawn(future),
+ }
+ }
+
+ /// Run a future to completion on the Tokio runtime. This is the runtime's
+ /// entry point.
+ ///
+ /// This runs the given future on the runtime, blocking until it is
+ /// complete, and yielding its resolved result. Any tasks or timers which
+ /// the future spawns internally will be executed on the runtime.
+ ///
+ /// This method should not be called from an asynchronous context.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the executor is at capacity, if the provided
+ /// future panics, or if called within an asynchronous execution context.
+ pub fn block_on<F: Future>(&mut self, future: F) -> F::Output {
+ let kind = &mut self.kind;
+
+ self.handle.enter(|| match kind {
+ Kind::Shell(exec) => exec.block_on(future),
+ #[cfg(feature = "rt-core")]
+ Kind::Basic(exec) => exec.block_on(future),
+ #[cfg(feature = "rt-threaded")]
+ Kind::ThreadPool(exec) => exec.block_on(future),
+ })
+ }
+
+ /// Enter the runtime context.
+ pub fn enter<F, R>(&self, f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ self.handle.enter(f)
+ }
+
+ /// Return a handle to the runtime's spawner.
+ ///
+ /// The returned handle can be used to spawn tasks that run on this runtime, and can
+ /// be cloned to allow moving the `Handle` to other threads.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ ///
+ /// let rt = Runtime::new()
+ /// .unwrap();
+ ///
+ /// let handle = rt.handle();
+ ///
+ /// handle.spawn(async { println!("hello"); });
+ /// ```
+ pub fn handle(&self) -> &Handle {
+ &self.handle
+ }
+
+ /// Shutdown the runtime, waiting for at most `duration` for all spawned
+ /// task to shutdown.
+ ///
+ /// Usually, dropping a `Runtime` handle is sufficient as tasks are able to
+ /// shutdown in a timely fashion. However, dropping a `Runtime` will wait
+ /// indefinitely for all tasks to terminate, and there are cases where a long
+ /// blocking task has been spawned which can block dropping `Runtime`.
+ ///
+ /// In this case, calling `shutdown_timeout` with an explicit wait timeout
+ /// can work. The `shutdown_timeout` will signal all tasks to shutdown and
+ /// will wait for at most `duration` for all spawned tasks to terminate. If
+ /// `timeout` elapses before all tasks are dropped, the function returns and
+ /// outstanding tasks are potentially leaked.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ /// use tokio::task;
+ ///
+ /// use std::thread;
+ /// use std::time::Duration;
+ ///
+ /// fn main() {
+ /// let mut runtime = Runtime::new().unwrap();
+ ///
+ /// runtime.block_on(async move {
+ /// task::spawn_blocking(move || {
+ /// thread::sleep(Duration::from_secs(10_000));
+ /// });
+ /// });
+ ///
+ /// runtime.shutdown_timeout(Duration::from_millis(100));
+ /// }
+ /// ```
+ pub fn shutdown_timeout(self, duration: Duration) {
+ let Runtime {
+ mut blocking_pool, ..
+ } = self;
+ blocking_pool.shutdown(Some(duration));
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/park.rs b/third_party/rust/tokio/src/runtime/park.rs
new file mode 100644
index 0000000000..ee437d1d94
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/park.rs
@@ -0,0 +1,245 @@
+//! Parks the runtime.
+//!
+//! A combination of the various resource driver park handles.
+
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::{Arc, Condvar, Mutex};
+use crate::loom::thread;
+use crate::park::{Park, Unpark};
+use crate::runtime::time;
+use crate::util::TryLock;
+
+use std::sync::atomic::Ordering::SeqCst;
+use std::time::Duration;
+
+pub(crate) struct Parker {
+ inner: Arc<Inner>,
+}
+
+pub(crate) struct Unparker {
+ inner: Arc<Inner>,
+}
+
+struct Inner {
+ /// Avoids entering the park if possible
+ state: AtomicUsize,
+
+ /// Used to coordinate access to the driver / condvar
+ mutex: Mutex<()>,
+
+ /// Condvar to block on if the driver is unavailable.
+ condvar: Condvar,
+
+ /// Resource (I/O, time, ...) driver
+ shared: Arc<Shared>,
+}
+
+const EMPTY: usize = 0;
+const PARKED_CONDVAR: usize = 1;
+const PARKED_DRIVER: usize = 2;
+const NOTIFIED: usize = 3;
+
+/// Shared across multiple Parker handles
+struct Shared {
+ /// Shared driver. Only one thread at a time can use this
+ driver: TryLock<time::Driver>,
+
+ /// Unpark handle
+ handle: <time::Driver as Park>::Unpark,
+}
+
+impl Parker {
+ pub(crate) fn new(driver: time::Driver) -> Parker {
+ let handle = driver.unpark();
+
+ Parker {
+ inner: Arc::new(Inner {
+ state: AtomicUsize::new(EMPTY),
+ mutex: Mutex::new(()),
+ condvar: Condvar::new(),
+ shared: Arc::new(Shared {
+ driver: TryLock::new(driver),
+ handle,
+ }),
+ }),
+ }
+ }
+}
+
+impl Clone for Parker {
+ fn clone(&self) -> Parker {
+ Parker {
+ inner: Arc::new(Inner {
+ state: AtomicUsize::new(EMPTY),
+ mutex: Mutex::new(()),
+ condvar: Condvar::new(),
+ shared: self.inner.shared.clone(),
+ }),
+ }
+ }
+}
+
+impl Park for Parker {
+ type Unpark = Unparker;
+ type Error = ();
+
+ fn unpark(&self) -> Unparker {
+ Unparker {
+ inner: self.inner.clone(),
+ }
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ self.inner.park();
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ // Only parking with zero is supported...
+ assert_eq!(duration, Duration::from_millis(0));
+
+ if let Some(mut driver) = self.inner.shared.driver.try_lock() {
+ driver.park_timeout(duration).map_err(|_| ())
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl Unpark for Unparker {
+ fn unpark(&self) {
+ self.inner.unpark();
+ }
+}
+
+impl Inner {
+ /// Parks the current thread for at most `dur`.
+ fn park(&self) {
+ for _ in 0..3 {
+ // If we were previously notified then we consume this notification and
+ // return quickly.
+ if self
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
+ .is_ok()
+ {
+ return;
+ }
+
+ thread::yield_now();
+ }
+
+ if let Some(mut driver) = self.shared.driver.try_lock() {
+ self.park_driver(&mut driver);
+ } else {
+ self.park_condvar();
+ }
+ }
+
+ fn park_condvar(&self) {
+ // Otherwise we need to coordinate going to sleep
+ let mut m = self.mutex.lock().unwrap();
+
+ match self
+ .state
+ .compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst)
+ {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+
+ return;
+ }
+ Err(actual) => panic!("inconsistent park state; actual = {}", actual),
+ }
+
+ loop {
+ m = self.condvar.wait(m).unwrap();
+
+ if self
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst)
+ .is_ok()
+ {
+ // got a notification
+ return;
+ }
+
+ // spurious wakeup, go back to sleep
+ }
+ }
+
+ fn park_driver(&self, driver: &mut time::Driver) {
+ match self
+ .state
+ .compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst)
+ {
+ Ok(_) => {}
+ Err(NOTIFIED) => {
+ // We must read here, even though we know it will be `NOTIFIED`.
+ // This is because `unpark` may have been called again since we read
+ // `NOTIFIED` in the `compare_exchange` above. We must perform an
+ // acquire operation that synchronizes with that `unpark` to observe
+ // any writes it made before the call to unpark. To do that we must
+ // read from the write it made to `state`.
+ let old = self.state.swap(EMPTY, SeqCst);
+ debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly");
+
+ return;
+ }
+ Err(actual) => panic!("inconsistent park state; actual = {}", actual),
+ }
+
+ // TODO: don't unwrap
+ driver.park().unwrap();
+
+ match self.state.swap(EMPTY, SeqCst) {
+ NOTIFIED => {} // got a notification, hurray!
+ PARKED_DRIVER => {} // no notification, alas
+ n => panic!("inconsistent park_timeout state: {}", n),
+ }
+ }
+
+ fn unpark(&self) {
+ // To ensure the unparked thread will observe any writes we made before
+ // this call, we must perform a release operation that `park` can
+ // synchronize with. To do that we must write `NOTIFIED` even if `state`
+ // is already `NOTIFIED`. That is why this must be a swap rather than a
+ // compare-and-swap that returns if it reads `NOTIFIED` on failure.
+ match self.state.swap(NOTIFIED, SeqCst) {
+ EMPTY => {} // no one was waiting
+ NOTIFIED => {} // already unparked
+ PARKED_CONDVAR => self.unpark_condvar(),
+ PARKED_DRIVER => self.unpark_driver(),
+ actual => panic!("inconsistent state in unpark; actual = {}", actual),
+ }
+ }
+
+ fn unpark_condvar(&self) {
+ // There is a period between when the parked thread sets `state` to
+ // `PARKED` (or last checked `state` in the case of a spurious wake
+ // up) and when it actually waits on `cvar`. If we were to notify
+ // during this period it would be ignored and then when the parked
+ // thread went to sleep it would never wake up. Fortunately, it has
+ // `lock` locked at this stage so we can acquire `lock` to wait until
+ // it is ready to receive the notification.
+ //
+ // Releasing `lock` before the call to `notify_one` means that when the
+ // parked thread wakes it doesn't get woken only to have to wait for us
+ // to release `lock`.
+ drop(self.mutex.lock().unwrap());
+
+ self.condvar.notify_one()
+ }
+
+ fn unpark_driver(&self) {
+ self.shared.handle.unpark();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/queue.rs b/third_party/rust/tokio/src/runtime/queue.rs
new file mode 100644
index 0000000000..c654514bbc
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/queue.rs
@@ -0,0 +1,630 @@
+//! Run-queue structures to support a work-stealing scheduler
+
+use crate::loom::cell::UnsafeCell;
+use crate::loom::sync::atomic::{AtomicU16, AtomicU32, AtomicUsize};
+use crate::loom::sync::{Arc, Mutex};
+use crate::runtime::task;
+
+use std::marker::PhantomData;
+use std::mem::MaybeUninit;
+use std::ptr::{self, NonNull};
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
+
+/// Producer handle. May only be used from a single thread.
+pub(super) struct Local<T: 'static> {
+ inner: Arc<Inner<T>>,
+}
+
+/// Consumer handle. May be used from many threads.
+pub(super) struct Steal<T: 'static>(Arc<Inner<T>>);
+
+/// Growable, MPMC queue used to inject new tasks into the scheduler and as an
+/// overflow queue when the local, fixed-size, array queue overflows.
+pub(super) struct Inject<T: 'static> {
+ /// Pointers to the head and tail of the queue
+ pointers: Mutex<Pointers>,
+
+ /// Number of pending tasks in the queue. This helps prevent unnecessary
+ /// locking in the hot path.
+ len: AtomicUsize,
+
+ _p: PhantomData<T>,
+}
+
+pub(super) struct Inner<T: 'static> {
+ /// Concurrently updated by many threads.
+ ///
+ /// Contains two `u16` values. The LSB byte is the "real" head of the queue.
+ /// The `u16` in the MSB is set by a stealer in process of stealing values.
+ /// It represents the first value being stolen in the batch. `u16` is used
+ /// in order to distinguish between `head == tail` and `head == tail -
+ /// capacity`.
+ ///
+ /// When both `u16` values are the same, there is no active stealer.
+ ///
+ /// Tracking an in-progress stealer prevents a wrapping scenario.
+ head: AtomicU32,
+
+ /// Only updated by producer thread but read by many threads.
+ tail: AtomicU16,
+
+ /// Elements
+ buffer: Box<[UnsafeCell<MaybeUninit<task::Notified<T>>>]>,
+}
+
+struct Pointers {
+ /// True if the queue is closed
+ is_closed: bool,
+
+ /// Linked-list head
+ head: Option<NonNull<task::Header>>,
+
+ /// Linked-list tail
+ tail: Option<NonNull<task::Header>>,
+}
+
+unsafe impl<T> Send for Inner<T> {}
+unsafe impl<T> Sync for Inner<T> {}
+unsafe impl<T> Send for Inject<T> {}
+unsafe impl<T> Sync for Inject<T> {}
+
+#[cfg(not(loom))]
+const LOCAL_QUEUE_CAPACITY: usize = 256;
+
+// Shrink the size of the local queue when using loom. This shouldn't impact
+// logic, but allows loom to test more edge cases in a reasonable a mount of
+// time.
+#[cfg(loom)]
+const LOCAL_QUEUE_CAPACITY: usize = 4;
+
+const MASK: usize = LOCAL_QUEUE_CAPACITY - 1;
+
+/// Create a new local run-queue
+pub(super) fn local<T: 'static>() -> (Steal<T>, Local<T>) {
+ let mut buffer = Vec::with_capacity(LOCAL_QUEUE_CAPACITY);
+
+ for _ in 0..LOCAL_QUEUE_CAPACITY {
+ buffer.push(UnsafeCell::new(MaybeUninit::uninit()));
+ }
+
+ let inner = Arc::new(Inner {
+ head: AtomicU32::new(0),
+ tail: AtomicU16::new(0),
+ buffer: buffer.into(),
+ });
+
+ let local = Local {
+ inner: inner.clone(),
+ };
+
+ let remote = Steal(inner);
+
+ (remote, local)
+}
+
+impl<T> Local<T> {
+ /// Returns true if the queue has entries that can be stealed.
+ pub(super) fn is_stealable(&self) -> bool {
+ !self.inner.is_empty()
+ }
+
+ /// Pushes a task to the back of the local queue, skipping the LIFO slot.
+ pub(super) fn push_back(&mut self, mut task: task::Notified<T>, inject: &Inject<T>) {
+ let tail = loop {
+ let head = self.inner.head.load(Acquire);
+ let (steal, real) = unpack(head);
+
+ // safety: this is the **only** thread that updates this cell.
+ let tail = unsafe { self.inner.tail.unsync_load() };
+
+ if tail.wrapping_sub(steal) < LOCAL_QUEUE_CAPACITY as u16 {
+ // There is capacity for the task
+ break tail;
+ } else if steal != real {
+ // Concurrently stealing, this will free up capacity, so
+ // only push the new task onto the inject queue
+ inject.push(task);
+ return;
+ } else {
+ // Push the current task and half of the queue into the
+ // inject queue.
+ match self.push_overflow(task, real, tail, inject) {
+ Ok(_) => return,
+ // Lost the race, try again
+ Err(v) => {
+ task = v;
+ }
+ }
+ }
+ };
+
+ // Map the position to a slot index.
+ let idx = tail as usize & MASK;
+
+ self.inner.buffer[idx].with_mut(|ptr| {
+ // Write the task to the slot
+ //
+ // Safety: There is only one producer and the above `if`
+ // condition ensures we don't touch a cell if there is a
+ // value, thus no consumer.
+ unsafe {
+ ptr::write((*ptr).as_mut_ptr(), task);
+ }
+ });
+
+ // Make the task available. Synchronizes with a load in
+ // `steal_into2`.
+ self.inner.tail.store(tail.wrapping_add(1), Release);
+ }
+
+ /// Moves a batch of tasks into the inject queue.
+ ///
+ /// This will temporarily make some of the tasks unavailable to stealers.
+ /// Once `push_overflow` is done, a notification is sent out, so if other
+ /// workers "missed" some of the tasks during a steal, they will get
+ /// another opportunity.
+ #[inline(never)]
+ fn push_overflow(
+ &mut self,
+ task: task::Notified<T>,
+ head: u16,
+ tail: u16,
+ inject: &Inject<T>,
+ ) -> Result<(), task::Notified<T>> {
+ const BATCH_LEN: usize = LOCAL_QUEUE_CAPACITY / 2 + 1;
+
+ let n = (LOCAL_QUEUE_CAPACITY / 2) as u16;
+ assert_eq!(
+ tail.wrapping_sub(head) as usize,
+ LOCAL_QUEUE_CAPACITY,
+ "queue is not full; tail = {}; head = {}",
+ tail,
+ head
+ );
+
+ let prev = pack(head, head);
+
+ // Claim a bunch of tasks
+ //
+ // We are claiming the tasks **before** reading them out of the buffer.
+ // This is safe because only the **current** thread is able to push new
+ // tasks.
+ //
+ // There isn't really any need for memory ordering... Relaxed would
+ // work. This is because all tasks are pushed into the queue from the
+ // current thread (or memory has been acquired if the local queue handle
+ // moved).
+ let actual = self.inner.head.compare_and_swap(
+ prev,
+ pack(head.wrapping_add(n), head.wrapping_add(n)),
+ Release,
+ );
+
+ if actual != prev {
+ // We failed to claim the tasks, losing the race. Return out of
+ // this function and try the full `push` routine again. The queue
+ // may not be full anymore.
+ return Err(task);
+ }
+
+ // link the tasks
+ for i in 0..n {
+ let j = i + 1;
+
+ let i_idx = i.wrapping_add(head) as usize & MASK;
+ let j_idx = j.wrapping_add(head) as usize & MASK;
+
+ // Get the next pointer
+ let next = if j == n {
+ // The last task in the local queue being moved
+ task.header().into()
+ } else {
+ // safety: The above CAS prevents a stealer from accessing these
+ // tasks and we are the only producer.
+ self.inner.buffer[j_idx].with(|ptr| unsafe {
+ let value = (*ptr).as_ptr();
+ (*value).header().into()
+ })
+ };
+
+ // safety: the above CAS prevents a stealer from accessing these
+ // tasks and we are the only producer.
+ self.inner.buffer[i_idx].with_mut(|ptr| unsafe {
+ let ptr = (*ptr).as_ptr();
+ (*ptr).header().queue_next.with_mut(|ptr| *ptr = Some(next));
+ });
+ }
+
+ // safety: the above CAS prevents a stealer from accessing these tasks
+ // and we are the only producer.
+ let head = self.inner.buffer[head as usize & MASK]
+ .with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) });
+
+ // Push the tasks onto the inject queue
+ inject.push_batch(head, task, BATCH_LEN);
+
+ Ok(())
+ }
+
+ /// Pops a task from the local queue.
+ pub(super) fn pop(&mut self) -> Option<task::Notified<T>> {
+ let mut head = self.inner.head.load(Acquire);
+
+ let idx = loop {
+ let (steal, real) = unpack(head);
+
+ // safety: this is the **only** thread that updates this cell.
+ let tail = unsafe { self.inner.tail.unsync_load() };
+
+ if real == tail {
+ // queue is empty
+ return None;
+ }
+
+ let next_real = real.wrapping_add(1);
+
+ // If `steal == real` there are no concurrent stealers. Both `steal`
+ // and `real` are updated.
+ let next = if steal == real {
+ pack(next_real, next_real)
+ } else {
+ assert_ne!(steal, next_real);
+ pack(steal, next_real)
+ };
+
+ // Attempt to claim a task.
+ let res = self
+ .inner
+ .head
+ .compare_exchange(head, next, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => break real as usize & MASK,
+ Err(actual) => head = actual,
+ }
+ };
+
+ Some(self.inner.buffer[idx].with(|ptr| unsafe { ptr::read(ptr).assume_init() }))
+ }
+}
+
+impl<T> Steal<T> {
+ pub(super) fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ /// Steals half the tasks from self and place them into `dst`.
+ pub(super) fn steal_into(&self, dst: &mut Local<T>) -> Option<task::Notified<T>> {
+ // Safety: the caller is the only thread that mutates `dst.tail` and
+ // holds a mutable reference.
+ let dst_tail = unsafe { dst.inner.tail.unsync_load() };
+
+ // To the caller, `dst` may **look** empty but still have values
+ // contained in the buffer. If another thread is concurrently stealing
+ // from `dst` there may not be enough capacity to steal.
+ let (steal, _) = unpack(dst.inner.head.load(Acquire));
+
+ if dst_tail.wrapping_sub(steal) > LOCAL_QUEUE_CAPACITY as u16 / 2 {
+ // we *could* try to steal less here, but for simplicity, we're just
+ // going to abort.
+ return None;
+ }
+
+ // Steal the tasks into `dst`'s buffer. This does not yet expose the
+ // tasks in `dst`.
+ let mut n = self.steal_into2(dst, dst_tail);
+
+ if n == 0 {
+ // No tasks were stolen
+ return None;
+ }
+
+ // We are returning a task here
+ n -= 1;
+
+ let ret_pos = dst_tail.wrapping_add(n);
+ let ret_idx = ret_pos as usize & MASK;
+
+ // safety: the value was written as part of `steal_into2` and not
+ // exposed to stealers, so no other thread can access it.
+ let ret = dst.inner.buffer[ret_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) });
+
+ if n == 0 {
+ // The `dst` queue is empty, but a single task was stolen
+ return Some(ret);
+ }
+
+ // Make the stolen items available to consumers
+ dst.inner.tail.store(dst_tail.wrapping_add(n), Release);
+
+ Some(ret)
+ }
+
+ // Steal tasks from `self`, placing them into `dst`. Returns the number of
+ // tasks that were stolen.
+ fn steal_into2(&self, dst: &mut Local<T>, dst_tail: u16) -> u16 {
+ let mut prev_packed = self.0.head.load(Acquire);
+ let mut next_packed;
+
+ let n = loop {
+ let (src_head_steal, src_head_real) = unpack(prev_packed);
+ let src_tail = self.0.tail.load(Acquire);
+
+ // If these two do not match, another thread is concurrently
+ // stealing from the queue.
+ if src_head_steal != src_head_real {
+ return 0;
+ }
+
+ // Number of available tasks to steal
+ let n = src_tail.wrapping_sub(src_head_real);
+ let n = n - n / 2;
+
+ if n == 0 {
+ // No tasks available to steal
+ return 0;
+ }
+
+ // Update the real head index to acquire the tasks.
+ let steal_to = src_head_real.wrapping_add(n);
+ assert_ne!(src_head_steal, steal_to);
+ next_packed = pack(src_head_steal, steal_to);
+
+ // Claim all those tasks. This is done by incrementing the "real"
+ // head but not the steal. By doing this, no other thread is able to
+ // steal from this queue until the current thread completes.
+ let res = self
+ .0
+ .head
+ .compare_exchange(prev_packed, next_packed, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => break n,
+ Err(actual) => prev_packed = actual,
+ }
+ };
+
+ assert!(n <= LOCAL_QUEUE_CAPACITY as u16 / 2, "actual = {}", n);
+
+ let (first, _) = unpack(next_packed);
+
+ // Take all the tasks
+ for i in 0..n {
+ // Compute the positions
+ let src_pos = first.wrapping_add(i);
+ let dst_pos = dst_tail.wrapping_add(i);
+
+ // Map to slots
+ let src_idx = src_pos as usize & MASK;
+ let dst_idx = dst_pos as usize & MASK;
+
+ // Read the task
+ //
+ // safety: We acquired the task with the atomic exchange above.
+ let task = self.0.buffer[src_idx].with(|ptr| unsafe { ptr::read((*ptr).as_ptr()) });
+
+ // Write the task to the new slot
+ //
+ // safety: `dst` queue is empty and we are the only producer to
+ // this queue.
+ dst.inner.buffer[dst_idx]
+ .with_mut(|ptr| unsafe { ptr::write((*ptr).as_mut_ptr(), task) });
+ }
+
+ let mut prev_packed = next_packed;
+
+ // Update `src_head_steal` to match `src_head_real` signalling that the
+ // stealing routine is complete.
+ loop {
+ let head = unpack(prev_packed).1;
+ next_packed = pack(head, head);
+
+ let res = self
+ .0
+ .head
+ .compare_exchange(prev_packed, next_packed, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => return n,
+ Err(actual) => {
+ let (actual_steal, actual_real) = unpack(actual);
+
+ assert_ne!(actual_steal, actual_real);
+
+ prev_packed = actual;
+ }
+ }
+ }
+ }
+}
+
+impl<T> Clone for Steal<T> {
+ fn clone(&self) -> Steal<T> {
+ Steal(self.0.clone())
+ }
+}
+
+impl<T> Drop for Local<T> {
+ fn drop(&mut self) {
+ if !std::thread::panicking() {
+ assert!(self.pop().is_none(), "queue not empty");
+ }
+ }
+}
+
+impl<T> Inner<T> {
+ fn is_empty(&self) -> bool {
+ let (_, head) = unpack(self.head.load(Acquire));
+ let tail = self.tail.load(Acquire);
+
+ head == tail
+ }
+}
+
+impl<T: 'static> Inject<T> {
+ pub(super) fn new() -> Inject<T> {
+ Inject {
+ pointers: Mutex::new(Pointers {
+ is_closed: false,
+ head: None,
+ tail: None,
+ }),
+ len: AtomicUsize::new(0),
+ _p: PhantomData,
+ }
+ }
+
+ pub(super) fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Close the injection queue, returns `true` if the queue is open when the
+ /// transition is made.
+ pub(super) fn close(&self) -> bool {
+ let mut p = self.pointers.lock().unwrap();
+
+ if p.is_closed {
+ return false;
+ }
+
+ p.is_closed = true;
+ true
+ }
+
+ pub(super) fn is_closed(&self) -> bool {
+ self.pointers.lock().unwrap().is_closed
+ }
+
+ pub(super) fn len(&self) -> usize {
+ self.len.load(Acquire)
+ }
+
+ /// Pushes a value into the queue.
+ pub(super) fn push(&self, task: task::Notified<T>) {
+ // Acquire queue lock
+ let mut p = self.pointers.lock().unwrap();
+
+ if p.is_closed {
+ // Drop the mutex to avoid a potential deadlock when
+ // re-entering.
+ drop(p);
+ drop(task);
+ return;
+ }
+
+ // safety: only mutated with the lock held
+ let len = unsafe { self.len.unsync_load() };
+ let task = task.into_raw();
+
+ // The next pointer should already be null
+ debug_assert!(get_next(task).is_none());
+
+ if let Some(tail) = p.tail {
+ set_next(tail, Some(task));
+ } else {
+ p.head = Some(task);
+ }
+
+ p.tail = Some(task);
+
+ self.len.store(len + 1, Release);
+ }
+
+ pub(super) fn push_batch(
+ &self,
+ batch_head: task::Notified<T>,
+ batch_tail: task::Notified<T>,
+ num: usize,
+ ) {
+ let batch_head = batch_head.into_raw();
+ let batch_tail = batch_tail.into_raw();
+
+ debug_assert!(get_next(batch_tail).is_none());
+
+ let mut p = self.pointers.lock().unwrap();
+
+ if let Some(tail) = p.tail {
+ set_next(tail, Some(batch_head));
+ } else {
+ p.head = Some(batch_head);
+ }
+
+ p.tail = Some(batch_tail);
+
+ // Increment the count.
+ //
+ // safety: All updates to the len atomic are guarded by the mutex. As
+ // such, a non-atomic load followed by a store is safe.
+ let len = unsafe { self.len.unsync_load() };
+
+ self.len.store(len + num, Release);
+ }
+
+ pub(super) fn pop(&self) -> Option<task::Notified<T>> {
+ // Fast path, if len == 0, then there are no values
+ if self.is_empty() {
+ return None;
+ }
+
+ let mut p = self.pointers.lock().unwrap();
+
+ // It is possible to hit null here if another thread poped the last
+ // task between us checking `len` and acquiring the lock.
+ let task = p.head?;
+
+ p.head = get_next(task);
+
+ if p.head.is_none() {
+ p.tail = None;
+ }
+
+ set_next(task, None);
+
+ // Decrement the count.
+ //
+ // safety: All updates to the len atomic are guarded by the mutex. As
+ // such, a non-atomic load followed by a store is safe.
+ self.len
+ .store(unsafe { self.len.unsync_load() } - 1, Release);
+
+ // safety: a `Notified` is pushed into the queue and now it is popped!
+ Some(unsafe { task::Notified::from_raw(task) })
+ }
+}
+
+impl<T: 'static> Drop for Inject<T> {
+ fn drop(&mut self) {
+ if !std::thread::panicking() {
+ assert!(self.pop().is_none(), "queue not empty");
+ }
+ }
+}
+
+fn get_next(header: NonNull<task::Header>) -> Option<NonNull<task::Header>> {
+ unsafe { header.as_ref().queue_next.with(|ptr| *ptr) }
+}
+
+fn set_next(header: NonNull<task::Header>, val: Option<NonNull<task::Header>>) {
+ unsafe {
+ header.as_ref().queue_next.with_mut(|ptr| *ptr = val);
+ }
+}
+
+/// Split the head value into the real head and the index a stealer is working
+/// on.
+fn unpack(n: u32) -> (u16, u16) {
+ let real = n & u16::max_value() as u32;
+ let steal = n >> 16;
+
+ (steal as u16, real as u16)
+}
+
+/// Join the two head values
+fn pack(steal: u16, real: u16) -> u32 {
+ (real as u32) | ((steal as u32) << 16)
+}
+
+#[test]
+fn test_local_queue_capacity() {
+ assert!(LOCAL_QUEUE_CAPACITY - 1 <= u8::max_value() as usize);
+}
diff --git a/third_party/rust/tokio/src/runtime/shell.rs b/third_party/rust/tokio/src/runtime/shell.rs
new file mode 100644
index 0000000000..294f2a16d8
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/shell.rs
@@ -0,0 +1,62 @@
+#![allow(clippy::redundant_clone)]
+
+use crate::park::{Park, Unpark};
+use crate::runtime::enter;
+use crate::runtime::time;
+use crate::util::{waker_ref, Wake};
+
+use std::future::Future;
+use std::sync::Arc;
+use std::task::Context;
+use std::task::Poll::Ready;
+
+#[derive(Debug)]
+pub(super) struct Shell {
+ driver: time::Driver,
+
+ /// TODO: don't store this
+ unpark: Arc<Handle>,
+}
+
+#[derive(Debug)]
+struct Handle(<time::Driver as Park>::Unpark);
+
+impl Shell {
+ pub(super) fn new(driver: time::Driver) -> Shell {
+ let unpark = Arc::new(Handle(driver.unpark()));
+
+ Shell { driver, unpark }
+ }
+
+ pub(super) fn block_on<F>(&mut self, f: F) -> F::Output
+ where
+ F: Future,
+ {
+ let _e = enter();
+
+ pin!(f);
+
+ let waker = waker_ref(&self.unpark);
+ let mut cx = Context::from_waker(&waker);
+
+ loop {
+ if let Ready(v) = crate::coop::budget(|| f.as_mut().poll(&mut cx)) {
+ return v;
+ }
+
+ self.driver.park().unwrap();
+ }
+ }
+}
+
+impl Wake for Handle {
+ /// Wake by value
+ fn wake(self: Arc<Self>) {
+ Wake::wake_by_ref(&self);
+ }
+
+ /// Wake by reference
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.0.unpark();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/spawner.rs b/third_party/rust/tokio/src/runtime/spawner.rs
new file mode 100644
index 0000000000..d136945cdc
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/spawner.rs
@@ -0,0 +1,37 @@
+cfg_rt_core! {
+ use crate::runtime::basic_scheduler;
+ use crate::task::JoinHandle;
+
+ use std::future::Future;
+}
+
+cfg_rt_threaded! {
+ use crate::runtime::thread_pool;
+}
+
+#[derive(Debug, Clone)]
+pub(crate) enum Spawner {
+ Shell,
+ #[cfg(feature = "rt-core")]
+ Basic(basic_scheduler::Spawner),
+ #[cfg(feature = "rt-threaded")]
+ ThreadPool(thread_pool::Spawner),
+}
+
+cfg_rt_core! {
+ impl Spawner {
+ pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ match self {
+ Spawner::Shell => panic!("spawning not enabled for runtime"),
+ #[cfg(feature = "rt-core")]
+ Spawner::Basic(spawner) => spawner.spawn(future),
+ #[cfg(feature = "rt-threaded")]
+ Spawner::ThreadPool(spawner) => spawner.spawn(future),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/core.rs b/third_party/rust/tokio/src/runtime/task/core.rs
new file mode 100644
index 0000000000..573b9f3c9c
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/core.rs
@@ -0,0 +1,279 @@
+use crate::loom::cell::UnsafeCell;
+use crate::runtime::task::raw::{self, Vtable};
+use crate::runtime::task::state::State;
+use crate::runtime::task::waker::waker_ref;
+use crate::runtime::task::{Notified, Schedule, Task};
+use crate::util::linked_list;
+
+use std::future::Future;
+use std::pin::Pin;
+use std::ptr::NonNull;
+use std::task::{Context, Poll, Waker};
+
+/// The task cell. Contains the components of the task.
+///
+/// It is critical for `Header` to be the first field as the task structure will
+/// be referenced by both *mut Cell and *mut Header.
+#[repr(C)]
+pub(super) struct Cell<T: Future, S> {
+ /// Hot task state data
+ pub(super) header: Header,
+
+ /// Either the future or output, depending on the execution stage.
+ pub(super) core: Core<T, S>,
+
+ /// Cold data
+ pub(super) trailer: Trailer,
+}
+
+/// The core of the task.
+///
+/// Holds the future or output, depending on the stage of execution.
+pub(super) struct Core<T: Future, S> {
+ /// Scheduler used to drive this future
+ pub(super) scheduler: UnsafeCell<Option<S>>,
+
+ /// Either the future or the output
+ pub(super) stage: UnsafeCell<Stage<T>>,
+}
+
+/// Crate public as this is also needed by the pool.
+#[repr(C)]
+pub(crate) struct Header {
+ /// Task state
+ pub(super) state: State,
+
+ pub(crate) owned: UnsafeCell<linked_list::Pointers<Header>>,
+
+ /// Pointer to next task, used with the injection queue
+ pub(crate) queue_next: UnsafeCell<Option<NonNull<Header>>>,
+
+ /// Pointer to the next task in the transfer stack
+ pub(super) stack_next: UnsafeCell<Option<NonNull<Header>>>,
+
+ /// Table of function pointers for executing actions on the task.
+ pub(super) vtable: &'static Vtable,
+}
+
+unsafe impl Send for Header {}
+unsafe impl Sync for Header {}
+
+/// Cold data is stored after the future.
+pub(super) struct Trailer {
+ /// Consumer task waiting on completion of this task.
+ pub(super) waker: UnsafeCell<Option<Waker>>,
+}
+
+/// Either the future or the output.
+pub(super) enum Stage<T: Future> {
+ Running(T),
+ Finished(super::Result<T::Output>),
+ Consumed,
+}
+
+impl<T: Future, S: Schedule> Cell<T, S> {
+ /// Allocates a new task cell, containing the header, trailer, and core
+ /// structures.
+ pub(super) fn new(future: T, state: State) -> Box<Cell<T, S>> {
+ Box::new(Cell {
+ header: Header {
+ state,
+ owned: UnsafeCell::new(linked_list::Pointers::new()),
+ queue_next: UnsafeCell::new(None),
+ stack_next: UnsafeCell::new(None),
+ vtable: raw::vtable::<T, S>(),
+ },
+ core: Core {
+ scheduler: UnsafeCell::new(None),
+ stage: UnsafeCell::new(Stage::Running(future)),
+ },
+ trailer: Trailer {
+ waker: UnsafeCell::new(None),
+ },
+ })
+ }
+}
+
+impl<T: Future, S: Schedule> Core<T, S> {
+ /// If needed, bind a scheduler to the task.
+ ///
+ /// This only happens on the first poll.
+ pub(super) fn bind_scheduler(&self, task: Task<S>) {
+ use std::mem::ManuallyDrop;
+
+ // TODO: it would be nice to not have to wrap with a ManuallyDrop
+ let task = ManuallyDrop::new(task);
+
+ // This function may be called concurrently, but the __first__ time it
+ // is called, the caller has unique access to this field. All subsequent
+ // concurrent calls will be via the `Waker`, which will "happens after"
+ // the first poll.
+ //
+ // In other words, it is always safe to read the field and it is safe to
+ // write to the field when it is `None`.
+ if self.is_bound() {
+ return;
+ }
+
+ // Bind the task to the scheduler
+ let scheduler = S::bind(ManuallyDrop::into_inner(task));
+
+ // Safety: As `scheduler` is not set, this is the first poll
+ self.scheduler.with_mut(|ptr| unsafe {
+ *ptr = Some(scheduler);
+ });
+ }
+
+ /// Returns true if the task is bound to a scheduler.
+ pub(super) fn is_bound(&self) -> bool {
+ // Safety: never called concurrently w/ a mutation.
+ self.scheduler.with(|ptr| unsafe { (*ptr).is_some() })
+ }
+
+ /// Poll the future
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure it is safe to mutate the `state` field. This
+ /// requires ensuring mutal exclusion between any concurrent thread that
+ /// might modify the future or output field.
+ ///
+ /// The mutual exclusion is implemented by `Harness` and the `Lifecycle`
+ /// component of the task state.
+ ///
+ /// `self` must also be pinned. This is handled by storing the task on the
+ /// heap.
+ pub(super) fn poll(&self, header: &Header) -> Poll<T::Output> {
+ let res = {
+ self.stage.with_mut(|ptr| {
+ // Safety: The caller ensures mutual exclusion to the field.
+ let future = match unsafe { &mut *ptr } {
+ Stage::Running(future) => future,
+ _ => unreachable!("unexpected stage"),
+ };
+
+ // Safety: The caller ensures the future is pinned.
+ let future = unsafe { Pin::new_unchecked(future) };
+
+ // The waker passed into the `poll` function does not require a ref
+ // count increment.
+ let waker_ref = waker_ref::<T, S>(header);
+ let mut cx = Context::from_waker(&*waker_ref);
+
+ future.poll(&mut cx)
+ })
+ };
+
+ if res.is_ready() {
+ self.drop_future_or_output();
+ }
+
+ res
+ }
+
+ /// Drop the future
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure it is safe to mutate the `stage` field.
+ pub(super) fn drop_future_or_output(&self) {
+ self.stage.with_mut(|ptr| {
+ // Safety: The caller ensures mutal exclusion to the field.
+ unsafe { *ptr = Stage::Consumed };
+ });
+ }
+
+ /// Store the task output
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure it is safe to mutate the `stage` field.
+ pub(super) fn store_output(&self, output: super::Result<T::Output>) {
+ self.stage.with_mut(|ptr| {
+ // Safety: the caller ensures mutual exclusion to the field.
+ unsafe { *ptr = Stage::Finished(output) };
+ });
+ }
+
+ /// Take the task output
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure it is safe to mutate the `stage` field.
+ pub(super) fn take_output(&self) -> super::Result<T::Output> {
+ use std::mem;
+
+ self.stage.with_mut(|ptr| {
+ // Safety:: the caller ensures mutal exclusion to the field.
+ match mem::replace(unsafe { &mut *ptr }, Stage::Consumed) {
+ Stage::Finished(output) => output,
+ _ => panic!("unexpected task state"),
+ }
+ })
+ }
+
+ /// Schedule the future for execution
+ pub(super) fn schedule(&self, task: Notified<S>) {
+ self.scheduler.with(|ptr| {
+ // Safety: Can only be called after initial `poll`, which is the
+ // only time the field is mutated.
+ match unsafe { &*ptr } {
+ Some(scheduler) => scheduler.schedule(task),
+ None => panic!("no scheduler set"),
+ }
+ });
+ }
+
+ /// Schedule the future for execution in the near future, yielding the
+ /// thread to other tasks.
+ pub(super) fn yield_now(&self, task: Notified<S>) {
+ self.scheduler.with(|ptr| {
+ // Safety: Can only be called after initial `poll`, which is the
+ // only time the field is mutated.
+ match unsafe { &*ptr } {
+ Some(scheduler) => scheduler.yield_now(task),
+ None => panic!("no scheduler set"),
+ }
+ });
+ }
+
+ /// Release the task
+ ///
+ /// If the `Scheduler` implementation is able to, it returns the `Task`
+ /// handle immediately. The caller of this function will batch a ref-dec
+ /// with a state change.
+ pub(super) fn release(&self, task: Task<S>) -> Option<Task<S>> {
+ use std::mem::ManuallyDrop;
+
+ let task = ManuallyDrop::new(task);
+
+ self.scheduler.with(|ptr| {
+ // Safety: Can only be called after initial `poll`, which is the
+ // only time the field is mutated.
+ match unsafe { &*ptr } {
+ Some(scheduler) => scheduler.release(&*task),
+ // Task was never polled
+ None => None,
+ }
+ })
+ }
+}
+
+cfg_rt_threaded! {
+ impl Header {
+ pub(crate) fn shutdown(&self) {
+ use crate::runtime::task::RawTask;
+
+ let task = unsafe { RawTask::from_raw(self.into()) };
+ task.shutdown();
+ }
+ }
+}
+
+#[test]
+#[cfg(not(loom))]
+fn header_lte_cache_line() {
+ use std::mem::size_of;
+
+ assert!(size_of::<Header>() <= 8 * size_of::<*const ()>());
+}
diff --git a/third_party/rust/tokio/src/runtime/task/error.rs b/third_party/rust/tokio/src/runtime/task/error.rs
new file mode 100644
index 0000000000..d5f65a4981
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/error.rs
@@ -0,0 +1,163 @@
+use std::any::Any;
+use std::fmt;
+use std::io;
+use std::sync::Mutex;
+
+doc_rt_core! {
+ /// Task failed to execute to completion.
+ pub struct JoinError {
+ repr: Repr,
+ }
+}
+
+enum Repr {
+ Cancelled,
+ Panic(Mutex<Box<dyn Any + Send + 'static>>),
+}
+
+impl JoinError {
+ #[doc(hidden)]
+ #[deprecated]
+ pub fn cancelled() -> JoinError {
+ Self::cancelled2()
+ }
+
+ pub(crate) fn cancelled2() -> JoinError {
+ JoinError {
+ repr: Repr::Cancelled,
+ }
+ }
+
+ #[doc(hidden)]
+ #[deprecated]
+ pub fn panic(err: Box<dyn Any + Send + 'static>) -> JoinError {
+ Self::panic2(err)
+ }
+
+ pub(crate) fn panic2(err: Box<dyn Any + Send + 'static>) -> JoinError {
+ JoinError {
+ repr: Repr::Panic(Mutex::new(err)),
+ }
+ }
+
+ /// Returns true if the error was caused by the task being cancelled
+ pub fn is_cancelled(&self) -> bool {
+ match &self.repr {
+ Repr::Cancelled => true,
+ _ => false,
+ }
+ }
+
+ /// Returns true if the error was caused by the task panicking
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::panic;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let err = tokio::spawn(async {
+ /// panic!("boom");
+ /// }).await.unwrap_err();
+ ///
+ /// assert!(err.is_panic());
+ /// }
+ /// ```
+ pub fn is_panic(&self) -> bool {
+ match &self.repr {
+ Repr::Panic(_) => true,
+ _ => false,
+ }
+ }
+
+ /// Consumes the join error, returning the object with which the task panicked.
+ ///
+ /// # Panics
+ ///
+ /// `into_panic()` panics if the `Error` does not represent the underlying
+ /// task terminating with a panic. Use `is_panic` to check the error reason
+ /// or `try_into_panic` for a variant that does not panic.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let err = tokio::spawn(async {
+ /// panic!("boom");
+ /// }).await.unwrap_err();
+ ///
+ /// if err.is_panic() {
+ /// // Resume the panic on the main task
+ /// panic::resume_unwind(err.into_panic());
+ /// }
+ /// }
+ /// ```
+ pub fn into_panic(self) -> Box<dyn Any + Send + 'static> {
+ self.try_into_panic()
+ .expect("`JoinError` reason is not a panic.")
+ }
+
+ /// Consumes the join error, returning the object with which the task
+ /// panicked if the task terminated due to a panic. Otherwise, `self` is
+ /// returned.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let err = tokio::spawn(async {
+ /// panic!("boom");
+ /// }).await.unwrap_err();
+ ///
+ /// if let Ok(reason) = err.try_into_panic() {
+ /// // Resume the panic on the main task
+ /// panic::resume_unwind(reason);
+ /// }
+ /// }
+ /// ```
+ pub fn try_into_panic(self) -> Result<Box<dyn Any + Send + 'static>, JoinError> {
+ match self.repr {
+ Repr::Panic(p) => Ok(p.into_inner().expect("Extracting panic from mutex")),
+ _ => Err(self),
+ }
+ }
+}
+
+impl fmt::Display for JoinError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self.repr {
+ Repr::Cancelled => write!(fmt, "cancelled"),
+ Repr::Panic(_) => write!(fmt, "panic"),
+ }
+ }
+}
+
+impl fmt::Debug for JoinError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self.repr {
+ Repr::Cancelled => write!(fmt, "JoinError::Cancelled"),
+ Repr::Panic(_) => write!(fmt, "JoinError::Panic(...)"),
+ }
+ }
+}
+
+impl std::error::Error for JoinError {}
+
+impl From<JoinError> for io::Error {
+ fn from(src: JoinError) -> io::Error {
+ io::Error::new(
+ io::ErrorKind::Other,
+ match src.repr {
+ Repr::Cancelled => "task was cancelled",
+ Repr::Panic(_) => "task panicked",
+ },
+ )
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/harness.rs b/third_party/rust/tokio/src/runtime/task/harness.rs
new file mode 100644
index 0000000000..29b231ea88
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/harness.rs
@@ -0,0 +1,372 @@
+use crate::runtime::task::core::{Cell, Core, Header, Trailer};
+use crate::runtime::task::state::Snapshot;
+use crate::runtime::task::{JoinError, Notified, Schedule, Task};
+
+use std::future::Future;
+use std::mem;
+use std::panic;
+use std::ptr::NonNull;
+use std::task::{Poll, Waker};
+
+/// Typed raw task handle
+pub(super) struct Harness<T: Future, S: 'static> {
+ cell: NonNull<Cell<T, S>>,
+}
+
+impl<T, S> Harness<T, S>
+where
+ T: Future,
+ S: 'static,
+{
+ pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> Harness<T, S> {
+ Harness {
+ cell: ptr.cast::<Cell<T, S>>(),
+ }
+ }
+
+ fn header(&self) -> &Header {
+ unsafe { &self.cell.as_ref().header }
+ }
+
+ fn trailer(&self) -> &Trailer {
+ unsafe { &self.cell.as_ref().trailer }
+ }
+
+ fn core(&self) -> &Core<T, S> {
+ unsafe { &self.cell.as_ref().core }
+ }
+}
+
+impl<T, S> Harness<T, S>
+where
+ T: Future,
+ S: Schedule,
+{
+ /// Polls the inner future.
+ ///
+ /// All necessary state checks and transitions are performed.
+ ///
+ /// Panics raised while polling the future are handled.
+ pub(super) fn poll(self) {
+ // If this is the first time the task is polled, the task will be bound
+ // to the scheduler, in which case the task ref count must be
+ // incremented.
+ let ref_inc = !self.core().is_bound();
+
+ // Transition the task to the running state.
+ //
+ // A failure to transition here indicates the task has been cancelled
+ // while in the run queue pending execution.
+ let snapshot = match self.header().state.transition_to_running(ref_inc) {
+ Ok(snapshot) => snapshot,
+ Err(_) => {
+ // The task was shutdown while in the run queue. At this point,
+ // we just hold a ref counted reference. Drop it here.
+ self.drop_reference();
+ return;
+ }
+ };
+
+ // Ensure the task is bound to a scheduler instance. If this is the
+ // first time polling the task, a scheduler instance is pulled from the
+ // local context and assigned to the task.
+ //
+ // The scheduler maintains ownership of the task and responds to `wake`
+ // calls.
+ //
+ // The task reference count has been incremented.
+ self.core().bind_scheduler(self.to_task());
+
+ // The transition to `Running` done above ensures that a lock on the
+ // future has been obtained. This also ensures the `*mut T` pointer
+ // contains the future (as opposed to the output) and is initialized.
+
+ let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ struct Guard<'a, T: Future, S: Schedule> {
+ core: &'a Core<T, S>,
+ polled: bool,
+ }
+
+ impl<T: Future, S: Schedule> Drop for Guard<'_, T, S> {
+ fn drop(&mut self) {
+ if !self.polled {
+ self.core.drop_future_or_output();
+ }
+ }
+ }
+
+ let mut guard = Guard {
+ core: self.core(),
+ polled: false,
+ };
+
+ // If the task is cancelled, avoid polling it, instead signalling it
+ // is complete.
+ if snapshot.is_cancelled() {
+ Poll::Ready(Err(JoinError::cancelled2()))
+ } else {
+ let res = guard.core.poll(self.header());
+
+ // prevent the guard from dropping the future
+ guard.polled = true;
+
+ res.map(Ok)
+ }
+ }));
+
+ match res {
+ Ok(Poll::Ready(out)) => {
+ self.complete(out, snapshot.is_join_interested());
+ }
+ Ok(Poll::Pending) => {
+ match self.header().state.transition_to_idle() {
+ Ok(snapshot) => {
+ if snapshot.is_notified() {
+ // Signal yield
+ self.core().yield_now(Notified(self.to_task()));
+ // The ref-count was incremented as part of
+ // `transition_to_idle`.
+ self.drop_reference();
+ }
+ }
+ Err(_) => self.cancel_task(),
+ }
+ }
+ Err(err) => {
+ self.complete(Err(JoinError::panic2(err)), snapshot.is_join_interested());
+ }
+ }
+ }
+
+ pub(super) fn dealloc(self) {
+ // Release the join waker, if there is one.
+ self.trailer().waker.with_mut(|_| ());
+
+ // Check causality
+ self.core().stage.with_mut(|_| {});
+ self.core().scheduler.with_mut(|_| {});
+
+ unsafe {
+ drop(Box::from_raw(self.cell.as_ptr()));
+ }
+ }
+
+ // ===== join handle =====
+
+ /// Read the task output into `dst`.
+ pub(super) fn try_read_output(self, dst: &mut Poll<super::Result<T::Output>>, waker: &Waker) {
+ // Load a snapshot of the current task state
+ let snapshot = self.header().state.load();
+
+ debug_assert!(snapshot.is_join_interested());
+
+ if !snapshot.is_complete() {
+ // The waker must be stored in the task struct.
+ let res = if snapshot.has_join_waker() {
+ // There already is a waker stored in the struct. If it matches
+ // the provided waker, then there is no further work to do.
+ // Otherwise, the waker must be swapped.
+ let will_wake = unsafe {
+ // Safety: when `JOIN_INTEREST` is set, only `JOIN_HANDLE`
+ // may mutate the `waker` field.
+ self.trailer()
+ .waker
+ .with(|ptr| (*ptr).as_ref().unwrap().will_wake(waker))
+ };
+
+ if will_wake {
+ // The task is not complete **and** the waker is up to date,
+ // there is nothing further that needs to be done.
+ return;
+ }
+
+ // Unset the `JOIN_WAKER` to gain mutable access to the `waker`
+ // field then update the field with the new join worker.
+ //
+ // This requires two atomic operations, unsetting the bit and
+ // then resetting it. If the task transitions to complete
+ // concurrently to either one of those operations, then setting
+ // the join waker fails and we proceed to reading the task
+ // output.
+ self.header()
+ .state
+ .unset_waker()
+ .and_then(|snapshot| self.set_join_waker(waker.clone(), snapshot))
+ } else {
+ self.set_join_waker(waker.clone(), snapshot)
+ };
+
+ match res {
+ Ok(_) => return,
+ Err(snapshot) => {
+ assert!(snapshot.is_complete());
+ }
+ }
+ }
+
+ *dst = Poll::Ready(self.core().take_output());
+ }
+
+ fn set_join_waker(&self, waker: Waker, snapshot: Snapshot) -> Result<Snapshot, Snapshot> {
+ assert!(snapshot.is_join_interested());
+ assert!(!snapshot.has_join_waker());
+
+ // Safety: Only the `JoinHandle` may set the `waker` field. When
+ // `JOIN_INTEREST` is **not** set, nothing else will touch the field.
+ unsafe {
+ self.trailer().waker.with_mut(|ptr| {
+ *ptr = Some(waker);
+ });
+ }
+
+ // Update the `JoinWaker` state accordingly
+ let res = self.header().state.set_join_waker();
+
+ // If the state could not be updated, then clear the join waker
+ if res.is_err() {
+ unsafe {
+ self.trailer().waker.with_mut(|ptr| {
+ *ptr = None;
+ });
+ }
+ }
+
+ res
+ }
+
+ pub(super) fn drop_join_handle_slow(self) {
+ // Try to unset `JOIN_INTEREST`. This must be done as a first step in
+ // case the task concurrently completed.
+ if self.header().state.unset_join_interested().is_err() {
+ // It is our responsibility to drop the output. This is critical as
+ // the task output may not be `Send` and as such must remain with
+ // the scheduler or `JoinHandle`. i.e. if the output remains in the
+ // task structure until the task is deallocated, it may be dropped
+ // by a Waker on any arbitrary thread.
+ self.core().drop_future_or_output();
+ }
+
+ // Drop the `JoinHandle` reference, possibly deallocating the task
+ self.drop_reference();
+ }
+
+ // ===== waker behavior =====
+
+ pub(super) fn wake_by_val(self) {
+ self.wake_by_ref();
+ self.drop_reference();
+ }
+
+ pub(super) fn wake_by_ref(&self) {
+ if self.header().state.transition_to_notified() {
+ self.core().schedule(Notified(self.to_task()));
+ }
+ }
+
+ pub(super) fn drop_reference(self) {
+ if self.header().state.ref_dec() {
+ self.dealloc();
+ }
+ }
+
+ /// Forcibly shutdown the task
+ ///
+ /// Attempt to transition to `Running` in order to forcibly shutdown the
+ /// task. If the task is currently running or in a state of completion, then
+ /// there is nothing further to do. When the task completes running, it will
+ /// notice the `CANCELLED` bit and finalize the task.
+ pub(super) fn shutdown(self) {
+ if !self.header().state.transition_to_shutdown() {
+ // The task is concurrently running. No further work needed.
+ return;
+ }
+
+ // By transitioning the lifcycle to `Running`, we have permission to
+ // drop the future.
+ self.cancel_task();
+ }
+
+ // ====== internal ======
+
+ fn cancel_task(self) {
+ // Drop the future from a panic guard.
+ let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ self.core().drop_future_or_output();
+ }));
+
+ if let Err(err) = res {
+ // Dropping the future panicked, complete the join
+ // handle with the panic to avoid dropping the panic
+ // on the ground.
+ self.complete(Err(JoinError::panic2(err)), true);
+ } else {
+ self.complete(Err(JoinError::cancelled2()), true);
+ }
+ }
+
+ fn complete(mut self, output: super::Result<T::Output>, is_join_interested: bool) {
+ if is_join_interested {
+ // Store the output. The future has already been dropped
+ //
+ // Safety: Mutual exclusion is obtained by having transitioned the task
+ // state -> Running
+ self.core().store_output(output);
+
+ // Transition to `Complete`, notifying the `JoinHandle` if necessary.
+ self.transition_to_complete();
+ }
+
+ // The task has completed execution and will no longer be scheduled.
+ //
+ // Attempts to batch a ref-dec with the state transition below.
+ let ref_dec = if self.core().is_bound() {
+ if let Some(task) = self.core().release(self.to_task()) {
+ mem::forget(task);
+ true
+ } else {
+ false
+ }
+ } else {
+ false
+ };
+
+ // This might deallocate
+ let snapshot = self
+ .header()
+ .state
+ .transition_to_terminal(!is_join_interested, ref_dec);
+
+ if snapshot.ref_count() == 0 {
+ self.dealloc()
+ }
+ }
+
+ /// Transitions the task's lifecycle to `Complete`. Notifies the
+ /// `JoinHandle` if it still has interest in the completion.
+ fn transition_to_complete(&mut self) {
+ // Transition the task's lifecycle to `Complete` and get a snapshot of
+ // the task's sate.
+ let snapshot = self.header().state.transition_to_complete();
+
+ if !snapshot.is_join_interested() {
+ // The `JoinHandle` is not interested in the output of this task. It
+ // is our responsibility to drop the output.
+ self.core().drop_future_or_output();
+ } else if snapshot.has_join_waker() {
+ // Notify the join handle. The previous transition obtains the
+ // lock on the waker cell.
+ self.wake_join();
+ }
+ }
+
+ fn wake_join(&self) {
+ self.trailer().waker.with(|ptr| match unsafe { &*ptr } {
+ Some(waker) => waker.wake_by_ref(),
+ None => panic!("waker missing"),
+ });
+ }
+
+ fn to_task(&self) -> Task<S> {
+ unsafe { Task::from_raw(self.header().into()) }
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/join.rs b/third_party/rust/tokio/src/runtime/task/join.rs
new file mode 100644
index 0000000000..fdcc346e5c
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/join.rs
@@ -0,0 +1,152 @@
+use crate::runtime::task::RawTask;
+
+use std::fmt;
+use std::future::Future;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+doc_rt_core! {
+ /// An owned permission to join on a task (await its termination).
+ ///
+ /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] for
+ /// a task rather than a thread.
+ ///
+ /// A `JoinHandle` *detaches* the associated task when it is dropped, which
+ /// means that there is no longer any handle to the task, and no way to `join`
+ /// on it.
+ ///
+ /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`]
+ /// functions.
+ ///
+ /// # Examples
+ ///
+ /// Creation from [`task::spawn`]:
+ ///
+ /// ```
+ /// use tokio::task;
+ ///
+ /// # async fn doc() {
+ /// let join_handle: task::JoinHandle<_> = task::spawn(async {
+ /// // some work here
+ /// });
+ /// # }
+ /// ```
+ ///
+ /// Creation from [`task::spawn_blocking`]:
+ ///
+ /// ```
+ /// use tokio::task;
+ ///
+ /// # async fn doc() {
+ /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| {
+ /// // some blocking work here
+ /// });
+ /// # }
+ /// ```
+ ///
+ /// Child being detached and outliving its parent:
+ ///
+ /// ```no_run
+ /// use tokio::task;
+ /// use tokio::time;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main] async fn main() {
+ /// let original_task = task::spawn(async {
+ /// let _detached_task = task::spawn(async {
+ /// // Here we sleep to make sure that the first task returns before.
+ /// time::delay_for(Duration::from_millis(10)).await;
+ /// // This will be called, even though the JoinHandle is dropped.
+ /// println!("♫ Still alive ♫");
+ /// });
+ /// });
+ ///
+ /// original_task.await.expect("The task being joined has panicked");
+ /// println!("Original task is joined.");
+ ///
+ /// // We make sure that the new task has time to run, before the main
+ /// // task returns.
+ ///
+ /// time::delay_for(Duration::from_millis(1000)).await;
+ /// # }
+ /// ```
+ ///
+ /// [`task::spawn`]: crate::task::spawn()
+ /// [`task::spawn_blocking`]: crate::task::spawn_blocking
+ /// [`std::thread::JoinHandle`]: std::thread::JoinHandle
+ pub struct JoinHandle<T> {
+ raw: Option<RawTask>,
+ _p: PhantomData<T>,
+ }
+}
+
+unsafe impl<T: Send> Send for JoinHandle<T> {}
+unsafe impl<T: Send> Sync for JoinHandle<T> {}
+
+impl<T> JoinHandle<T> {
+ pub(super) fn new(raw: RawTask) -> JoinHandle<T> {
+ JoinHandle {
+ raw: Some(raw),
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T> Unpin for JoinHandle<T> {}
+
+impl<T> Future for JoinHandle<T> {
+ type Output = super::Result<T>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut ret = Poll::Pending;
+
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ // Raw should always be set. If it is not, this is due to polling after
+ // completion
+ let raw = self
+ .raw
+ .as_ref()
+ .expect("polling after `JoinHandle` already completed");
+
+ // Try to read the task output. If the task is not yet complete, the
+ // waker is stored and is notified once the task does complete.
+ //
+ // The function must go via the vtable, which requires erasing generic
+ // types. To do this, the function "return" is placed on the stack
+ // **before** calling the function and is passed into the function using
+ // `*mut ()`.
+ //
+ // Safety:
+ //
+ // The type of `T` must match the task's output type.
+ unsafe {
+ raw.try_read_output(&mut ret as *mut _ as *mut (), cx.waker());
+ }
+
+ ret
+ }
+}
+
+impl<T> Drop for JoinHandle<T> {
+ fn drop(&mut self) {
+ if let Some(raw) = self.raw.take() {
+ if raw.header().state.drop_join_handle_fast().is_ok() {
+ return;
+ }
+
+ raw.drop_join_handle_slow();
+ }
+ }
+}
+
+impl<T> fmt::Debug for JoinHandle<T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("JoinHandle").finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/mod.rs b/third_party/rust/tokio/src/runtime/task/mod.rs
new file mode 100644
index 0000000000..17b5157e84
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/mod.rs
@@ -0,0 +1,220 @@
+mod core;
+use self::core::Cell;
+pub(crate) use self::core::Header;
+
+mod error;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::error::JoinError;
+
+mod harness;
+use self::harness::Harness;
+
+mod join;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::join::JoinHandle;
+
+mod raw;
+use self::raw::RawTask;
+
+mod state;
+use self::state::State;
+
+mod waker;
+
+cfg_rt_threaded! {
+ mod stack;
+ pub(crate) use self::stack::TransferStack;
+}
+
+use crate::util::linked_list;
+
+use std::future::Future;
+use std::marker::PhantomData;
+use std::ptr::NonNull;
+use std::{fmt, mem};
+
+/// An owned handle to the task, tracked by ref count
+#[repr(transparent)]
+pub(crate) struct Task<S: 'static> {
+ raw: RawTask,
+ _p: PhantomData<S>,
+}
+
+unsafe impl<S> Send for Task<S> {}
+unsafe impl<S> Sync for Task<S> {}
+
+/// A task was notified
+#[repr(transparent)]
+pub(crate) struct Notified<S: 'static>(Task<S>);
+
+unsafe impl<S: Schedule> Send for Notified<S> {}
+unsafe impl<S: Schedule> Sync for Notified<S> {}
+
+/// Task result sent back
+pub(crate) type Result<T> = std::result::Result<T, JoinError>;
+
+pub(crate) trait Schedule: Sync + Sized + 'static {
+ /// Bind a task to the executor.
+ ///
+ /// Guaranteed to be called from the thread that called `poll` on the task.
+ /// The returned `Schedule` instance is associated with the task and is used
+ /// as `&self` in the other methods on this trait.
+ fn bind(task: Task<Self>) -> Self;
+
+ /// The task has completed work and is ready to be released. The scheduler
+ /// is free to drop it whenever.
+ ///
+ /// If the scheduler can immediately release the task, it should return
+ /// it as part of the function. This enables the task module to batch
+ /// the ref-dec with other options.
+ fn release(&self, task: &Task<Self>) -> Option<Task<Self>>;
+
+ /// Schedule the task
+ fn schedule(&self, task: Notified<Self>);
+
+ /// Schedule the task to run in the near future, yielding the thread to
+ /// other tasks.
+ fn yield_now(&self, task: Notified<Self>) {
+ self.schedule(task);
+ }
+}
+
+/// Create a new task with an associated join handle
+pub(crate) fn joinable<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>)
+where
+ T: Future + Send + 'static,
+ S: Schedule,
+{
+ let raw = RawTask::new::<_, S>(task);
+
+ let task = Task {
+ raw,
+ _p: PhantomData,
+ };
+
+ let join = JoinHandle::new(raw);
+
+ (Notified(task), join)
+}
+
+cfg_rt_util! {
+ /// Create a new `!Send` task with an associated join handle
+ pub(crate) unsafe fn joinable_local<T, S>(task: T) -> (Notified<S>, JoinHandle<T::Output>)
+ where
+ T: Future + 'static,
+ S: Schedule,
+ {
+ let raw = RawTask::new::<_, S>(task);
+
+ let task = Task {
+ raw,
+ _p: PhantomData,
+ };
+
+ let join = JoinHandle::new(raw);
+
+ (Notified(task), join)
+ }
+}
+
+impl<S: 'static> Task<S> {
+ pub(crate) unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> {
+ Task {
+ raw: RawTask::from_raw(ptr),
+ _p: PhantomData,
+ }
+ }
+
+ pub(crate) fn header(&self) -> &Header {
+ self.raw.header()
+ }
+}
+
+cfg_rt_threaded! {
+ impl<S: 'static> Notified<S> {
+ pub(crate) unsafe fn from_raw(ptr: NonNull<Header>) -> Notified<S> {
+ Notified(Task::from_raw(ptr))
+ }
+
+ pub(crate) fn header(&self) -> &Header {
+ self.0.header()
+ }
+ }
+
+ impl<S: 'static> Task<S> {
+ pub(crate) fn into_raw(self) -> NonNull<Header> {
+ let ret = self.header().into();
+ mem::forget(self);
+ ret
+ }
+ }
+
+ impl<S: 'static> Notified<S> {
+ pub(crate) fn into_raw(self) -> NonNull<Header> {
+ self.0.into_raw()
+ }
+ }
+}
+
+impl<S: Schedule> Task<S> {
+ /// Pre-emptively cancel the task as part of the shutdown process.
+ pub(crate) fn shutdown(&self) {
+ self.raw.shutdown();
+ }
+}
+
+impl<S: Schedule> Notified<S> {
+ /// Run the task
+ pub(crate) fn run(self) {
+ self.0.raw.poll();
+ mem::forget(self);
+ }
+
+ /// Pre-emptively cancel the task as part of the shutdown process.
+ pub(crate) fn shutdown(self) {
+ self.0.shutdown();
+ }
+}
+
+impl<S: 'static> Drop for Task<S> {
+ fn drop(&mut self) {
+ // Decrement the ref count
+ if self.header().state.ref_dec() {
+ // Deallocate if this is the final ref count
+ self.raw.dealloc();
+ }
+ }
+}
+
+impl<S> fmt::Debug for Task<S> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "Task({:p})", self.header())
+ }
+}
+
+impl<S> fmt::Debug for Notified<S> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "task::Notified({:p})", self.0.header())
+ }
+}
+
+/// # Safety
+///
+/// Tasks are pinned
+unsafe impl<S> linked_list::Link for Task<S> {
+ type Handle = Task<S>;
+ type Target = Header;
+
+ fn as_raw(handle: &Task<S>) -> NonNull<Header> {
+ handle.header().into()
+ }
+
+ unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> {
+ Task::from_raw(ptr)
+ }
+
+ unsafe fn pointers(target: NonNull<Header>) -> NonNull<linked_list::Pointers<Header>> {
+ // Not super great as it avoids some of looms checking...
+ NonNull::from(target.as_ref().owned.with_mut(|ptr| &mut *ptr))
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/raw.rs b/third_party/rust/tokio/src/runtime/task/raw.rs
new file mode 100644
index 0000000000..cae56d037d
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/raw.rs
@@ -0,0 +1,131 @@
+use crate::runtime::task::{Cell, Harness, Header, Schedule, State};
+
+use std::future::Future;
+use std::ptr::NonNull;
+use std::task::{Poll, Waker};
+
+/// Raw task handle
+pub(super) struct RawTask {
+ ptr: NonNull<Header>,
+}
+
+pub(super) struct Vtable {
+ /// Poll the future
+ pub(super) poll: unsafe fn(NonNull<Header>),
+
+ /// Deallocate the memory
+ pub(super) dealloc: unsafe fn(NonNull<Header>),
+
+ /// Read the task output, if complete
+ pub(super) try_read_output: unsafe fn(NonNull<Header>, *mut (), &Waker),
+
+ /// The join handle has been dropped
+ pub(super) drop_join_handle_slow: unsafe fn(NonNull<Header>),
+
+ /// Scheduler is being shutdown
+ pub(super) shutdown: unsafe fn(NonNull<Header>),
+}
+
+/// Get the vtable for the requested `T` and `S` generics.
+pub(super) fn vtable<T: Future, S: Schedule>() -> &'static Vtable {
+ &Vtable {
+ poll: poll::<T, S>,
+ dealloc: dealloc::<T, S>,
+ try_read_output: try_read_output::<T, S>,
+ drop_join_handle_slow: drop_join_handle_slow::<T, S>,
+ shutdown: shutdown::<T, S>,
+ }
+}
+
+impl RawTask {
+ pub(super) fn new<T, S>(task: T) -> RawTask
+ where
+ T: Future,
+ S: Schedule,
+ {
+ let ptr = Box::into_raw(Cell::<_, S>::new(task, State::new()));
+ let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) };
+
+ RawTask { ptr }
+ }
+
+ pub(super) unsafe fn from_raw(ptr: NonNull<Header>) -> RawTask {
+ RawTask { ptr }
+ }
+
+ /// Returns a reference to the task's meta structure.
+ ///
+ /// Safe as `Header` is `Sync`.
+ pub(super) fn header(&self) -> &Header {
+ unsafe { self.ptr.as_ref() }
+ }
+
+ /// Safety: mutual exclusion is required to call this function.
+ pub(super) fn poll(self) {
+ let vtable = self.header().vtable;
+ unsafe { (vtable.poll)(self.ptr) }
+ }
+
+ pub(super) fn dealloc(self) {
+ let vtable = self.header().vtable;
+ unsafe {
+ (vtable.dealloc)(self.ptr);
+ }
+ }
+
+ /// Safety: `dst` must be a `*mut Poll<super::Result<T::Output>>` where `T`
+ /// is the future stored by the task.
+ pub(super) unsafe fn try_read_output(self, dst: *mut (), waker: &Waker) {
+ let vtable = self.header().vtable;
+ (vtable.try_read_output)(self.ptr, dst, waker);
+ }
+
+ pub(super) fn drop_join_handle_slow(self) {
+ let vtable = self.header().vtable;
+ unsafe { (vtable.drop_join_handle_slow)(self.ptr) }
+ }
+
+ pub(super) fn shutdown(self) {
+ let vtable = self.header().vtable;
+ unsafe { (vtable.shutdown)(self.ptr) }
+ }
+}
+
+impl Clone for RawTask {
+ fn clone(&self) -> Self {
+ RawTask { ptr: self.ptr }
+ }
+}
+
+impl Copy for RawTask {}
+
+unsafe fn poll<T: Future, S: Schedule>(ptr: NonNull<Header>) {
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.poll();
+}
+
+unsafe fn dealloc<T: Future, S: Schedule>(ptr: NonNull<Header>) {
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.dealloc();
+}
+
+unsafe fn try_read_output<T: Future, S: Schedule>(
+ ptr: NonNull<Header>,
+ dst: *mut (),
+ waker: &Waker,
+) {
+ let out = &mut *(dst as *mut Poll<super::Result<T::Output>>);
+
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.try_read_output(out, waker);
+}
+
+unsafe fn drop_join_handle_slow<T: Future, S: Schedule>(ptr: NonNull<Header>) {
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.drop_join_handle_slow()
+}
+
+unsafe fn shutdown<T: Future, S: Schedule>(ptr: NonNull<Header>) {
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.shutdown()
+}
diff --git a/third_party/rust/tokio/src/runtime/task/stack.rs b/third_party/rust/tokio/src/runtime/task/stack.rs
new file mode 100644
index 0000000000..9dd8d3f43f
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/stack.rs
@@ -0,0 +1,83 @@
+use crate::loom::sync::atomic::AtomicPtr;
+use crate::runtime::task::{Header, Task};
+
+use std::marker::PhantomData;
+use std::ptr::{self, NonNull};
+use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+/// Concurrent stack of tasks, used to pass ownership of a task from one worker
+/// to another.
+pub(crate) struct TransferStack<T: 'static> {
+ head: AtomicPtr<Header>,
+ _p: PhantomData<T>,
+}
+
+impl<T: 'static> TransferStack<T> {
+ pub(crate) fn new() -> TransferStack<T> {
+ TransferStack {
+ head: AtomicPtr::new(ptr::null_mut()),
+ _p: PhantomData,
+ }
+ }
+
+ pub(crate) fn push(&self, task: Task<T>) {
+ let task = task.into_raw();
+
+ // We don't care about any memory associated w/ setting the `head`
+ // field, just the current value.
+ //
+ // The compare-exchange creates a release sequence.
+ let mut curr = self.head.load(Relaxed);
+
+ loop {
+ unsafe {
+ task.as_ref()
+ .stack_next
+ .with_mut(|ptr| *ptr = NonNull::new(curr))
+ };
+
+ let res = self
+ .head
+ .compare_exchange(curr, task.as_ptr() as *mut _, Release, Relaxed);
+
+ match res {
+ Ok(_) => return,
+ Err(actual) => {
+ curr = actual;
+ }
+ }
+ }
+ }
+
+ pub(crate) fn drain(&self) -> impl Iterator<Item = Task<T>> {
+ struct Iter<T: 'static>(Option<NonNull<Header>>, PhantomData<T>);
+
+ impl<T: 'static> Iterator for Iter<T> {
+ type Item = Task<T>;
+
+ fn next(&mut self) -> Option<Task<T>> {
+ let task = self.0?;
+
+ // Move the cursor forward
+ self.0 = unsafe { task.as_ref().stack_next.with(|ptr| *ptr) };
+
+ // Return the task
+ unsafe { Some(Task::from_raw(task)) }
+ }
+ }
+
+ impl<T: 'static> Drop for Iter<T> {
+ fn drop(&mut self) {
+ use std::process;
+
+ if self.0.is_some() {
+ // we have bugs
+ process::abort();
+ }
+ }
+ }
+
+ let ptr = self.head.swap(ptr::null_mut(), Acquire);
+ Iter(NonNull::new(ptr), PhantomData)
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/state.rs b/third_party/rust/tokio/src/runtime/task/state.rs
new file mode 100644
index 0000000000..21e90430db
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/state.rs
@@ -0,0 +1,446 @@
+use crate::loom::sync::atomic::AtomicUsize;
+
+use std::fmt;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
+use std::usize;
+
+pub(super) struct State {
+ val: AtomicUsize,
+}
+
+/// Current state value
+#[derive(Copy, Clone)]
+pub(super) struct Snapshot(usize);
+
+type UpdateResult = Result<Snapshot, Snapshot>;
+
+/// The task is currently being run.
+const RUNNING: usize = 0b0001;
+
+/// The task is complete.
+///
+/// Once this bit is set, it is never unset
+const COMPLETE: usize = 0b0010;
+
+/// Extracts the task's lifecycle value from the state
+const LIFECYCLE_MASK: usize = 0b11;
+
+/// Flag tracking if the task has been pushed into a run queue.
+const NOTIFIED: usize = 0b100;
+
+/// The join handle is still around
+const JOIN_INTEREST: usize = 0b1_000;
+
+/// A join handle waker has been set
+const JOIN_WAKER: usize = 0b10_000;
+
+/// The task has been forcibly cancelled.
+const CANCELLED: usize = 0b100_000;
+
+/// All bits
+const STATE_MASK: usize = LIFECYCLE_MASK | NOTIFIED | JOIN_INTEREST | JOIN_WAKER | CANCELLED;
+
+/// Bits used by the ref count portion of the state.
+const REF_COUNT_MASK: usize = !STATE_MASK;
+
+/// Number of positions to shift the ref count
+const REF_COUNT_SHIFT: usize = REF_COUNT_MASK.count_zeros() as usize;
+
+/// One ref count
+const REF_ONE: usize = 1 << REF_COUNT_SHIFT;
+
+/// State a task is initialized with
+///
+/// A task is initialized with two references: one for the scheduler and one for
+/// the `JoinHandle`. As the task starts with a `JoinHandle`, `JOIN_INTERST` is
+/// set. A new task is immediately pushed into the run queue for execution and
+/// starts with the `NOTIFIED` flag set.
+const INITIAL_STATE: usize = (REF_ONE * 2) | JOIN_INTEREST | NOTIFIED;
+
+/// All transitions are performed via RMW operations. This establishes an
+/// unambiguous modification order.
+impl State {
+ /// Return a task's initial state
+ pub(super) fn new() -> State {
+ // A task is initialized with three references: one for the scheduler,
+ // one for the `JoinHandle`, one for the task handle made available in
+ // release. As the task starts with a `JoinHandle`, `JOIN_INTERST` is
+ // set. A new task is immediately pushed into the run queue for
+ // execution and starts with the `NOTIFIED` flag set.
+ State {
+ val: AtomicUsize::new(INITIAL_STATE),
+ }
+ }
+
+ /// Loads the current state, establishes `Acquire` ordering.
+ pub(super) fn load(&self) -> Snapshot {
+ Snapshot(self.val.load(Acquire))
+ }
+
+ /// Attempt to transition the lifecycle to `Running`.
+ ///
+ /// If `ref_inc` is set, the reference count is also incremented.
+ ///
+ /// The `NOTIFIED` bit is always unset.
+ pub(super) fn transition_to_running(&self, ref_inc: bool) -> UpdateResult {
+ self.fetch_update(|curr| {
+ assert!(curr.is_notified());
+
+ let mut next = curr;
+
+ if !next.is_idle() {
+ return None;
+ }
+
+ if ref_inc {
+ next.ref_inc();
+ }
+
+ next.set_running();
+ next.unset_notified();
+ Some(next)
+ })
+ }
+
+ /// Transitions the task from `Running` -> `Idle`.
+ ///
+ /// Returns `Ok` if the transition to `Idle` is successful, `Err` otherwise.
+ /// In both cases, a snapshot of the state from **after** the transition is
+ /// returned.
+ ///
+ /// The transition to `Idle` fails if the task has been flagged to be
+ /// cancelled.
+ pub(super) fn transition_to_idle(&self) -> UpdateResult {
+ self.fetch_update(|curr| {
+ assert!(curr.is_running());
+
+ if curr.is_cancelled() {
+ return None;
+ }
+
+ let mut next = curr;
+ next.unset_running();
+
+ if next.is_notified() {
+ // The caller needs to schedule the task. To do this, it needs a
+ // waker. The waker requires a ref count.
+ next.ref_inc();
+ }
+
+ Some(next)
+ })
+ }
+
+ /// Transitions the task from `Running` -> `Complete`.
+ pub(super) fn transition_to_complete(&self) -> Snapshot {
+ const DELTA: usize = RUNNING | COMPLETE;
+
+ let prev = Snapshot(self.val.fetch_xor(DELTA, AcqRel));
+ assert!(prev.is_running());
+ assert!(!prev.is_complete());
+
+ Snapshot(prev.0 ^ DELTA)
+ }
+
+ /// Transition from `Complete` -> `Terminal`, decrementing the reference
+ /// count by 1.
+ ///
+ /// When `ref_dec` is set, an additional ref count decrement is performed.
+ /// This is used to batch atomic ops when possible.
+ pub(super) fn transition_to_terminal(&self, complete: bool, ref_dec: bool) -> Snapshot {
+ self.fetch_update(|mut snapshot| {
+ if complete {
+ snapshot.set_complete();
+ } else {
+ assert!(snapshot.is_complete());
+ }
+
+ // Decrement the primary handle
+ snapshot.ref_dec();
+
+ if ref_dec {
+ // Decrement a second time
+ snapshot.ref_dec();
+ }
+
+ Some(snapshot)
+ })
+ .unwrap()
+ }
+
+ /// Transitions the state to `NOTIFIED`.
+ ///
+ /// Returns `true` if the task needs to be submitted to the pool for
+ /// execution
+ pub(super) fn transition_to_notified(&self) -> bool {
+ let prev = Snapshot(self.val.fetch_or(NOTIFIED, AcqRel));
+ prev.will_need_queueing()
+ }
+
+ /// Set the `CANCELLED` bit and attempt to transition to `Running`.
+ ///
+ /// Returns `true` if the transition to `Running` succeeded.
+ pub(super) fn transition_to_shutdown(&self) -> bool {
+ let mut prev = Snapshot(0);
+
+ let _ = self.fetch_update(|mut snapshot| {
+ prev = snapshot;
+
+ if snapshot.is_idle() {
+ snapshot.set_running();
+
+ if snapshot.is_notified() {
+ // If the task is idle and notified, this indicates the task is
+ // in the run queue and is considered owned by the scheduler.
+ // The shutdown operation claims ownership of the task, which
+ // means we need to assign an additional ref-count to the task
+ // in the queue.
+ snapshot.ref_inc();
+ }
+ }
+
+ snapshot.set_cancelled();
+ Some(snapshot)
+ });
+
+ prev.is_idle()
+ }
+
+ /// Optimistically tries to swap the state assuming the join handle is
+ /// __immediately__ dropped on spawn
+ pub(super) fn drop_join_handle_fast(&self) -> Result<(), ()> {
+ use std::sync::atomic::Ordering::Relaxed;
+
+ // Relaxed is acceptable as if this function is called and succeeds,
+ // then nothing has been done w/ the join handle.
+ //
+ // The moment the join handle is used (polled), the `JOIN_WAKER` flag is
+ // set, at which point the CAS will fail.
+ //
+ // Given this, there is no risk if this operation is reordered.
+ self.val
+ .compare_exchange_weak(
+ INITIAL_STATE,
+ (INITIAL_STATE - REF_ONE) & !JOIN_INTEREST,
+ Release,
+ Relaxed,
+ )
+ .map(|_| ())
+ .map_err(|_| ())
+ }
+
+ /// Try to unset the JOIN_INTEREST flag.
+ ///
+ /// Returns `Ok` if the operation happens before the task transitions to a
+ /// completed state, `Err` otherwise.
+ pub(super) fn unset_join_interested(&self) -> UpdateResult {
+ self.fetch_update(|curr| {
+ assert!(curr.is_join_interested());
+
+ if curr.is_complete() {
+ return None;
+ }
+
+ let mut next = curr;
+ next.unset_join_interested();
+
+ Some(next)
+ })
+ }
+
+ /// Set the `JOIN_WAKER` bit.
+ ///
+ /// Returns `Ok` if the bit is set, `Err` otherwise. This operation fails if
+ /// the task has completed.
+ pub(super) fn set_join_waker(&self) -> UpdateResult {
+ self.fetch_update(|curr| {
+ assert!(curr.is_join_interested());
+ assert!(!curr.has_join_waker());
+
+ if curr.is_complete() {
+ return None;
+ }
+
+ let mut next = curr;
+ next.set_join_waker();
+
+ Some(next)
+ })
+ }
+
+ /// Unsets the `JOIN_WAKER` bit.
+ ///
+ /// Returns `Ok` has been unset, `Err` otherwise. This operation fails if
+ /// the task has completed.
+ pub(super) fn unset_waker(&self) -> UpdateResult {
+ self.fetch_update(|curr| {
+ assert!(curr.is_join_interested());
+ assert!(curr.has_join_waker());
+
+ if curr.is_complete() {
+ return None;
+ }
+
+ let mut next = curr;
+ next.unset_join_waker();
+
+ Some(next)
+ })
+ }
+
+ pub(super) fn ref_inc(&self) {
+ use std::process;
+ use std::sync::atomic::Ordering::Relaxed;
+
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let prev = self.val.fetch_add(REF_ONE, Relaxed);
+
+ // If the reference count overflowed, abort.
+ if prev > isize::max_value() as usize {
+ process::abort();
+ }
+ }
+
+ /// Returns `true` if the task should be released.
+ pub(super) fn ref_dec(&self) -> bool {
+ let prev = Snapshot(self.val.fetch_sub(REF_ONE, AcqRel));
+ prev.ref_count() == 1
+ }
+
+ fn fetch_update<F>(&self, mut f: F) -> Result<Snapshot, Snapshot>
+ where
+ F: FnMut(Snapshot) -> Option<Snapshot>,
+ {
+ let mut curr = self.load();
+
+ loop {
+ let next = match f(curr) {
+ Some(next) => next,
+ None => return Err(curr),
+ };
+
+ let res = self.val.compare_exchange(curr.0, next.0, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => return Ok(next),
+ Err(actual) => curr = Snapshot(actual),
+ }
+ }
+ }
+}
+
+// ===== impl Snapshot =====
+
+impl Snapshot {
+ /// Returns `true` if the task is in an idle state.
+ pub(super) fn is_idle(self) -> bool {
+ self.0 & (RUNNING | COMPLETE) == 0
+ }
+
+ /// Returns `true` if the task has been flagged as notified.
+ pub(super) fn is_notified(self) -> bool {
+ self.0 & NOTIFIED == NOTIFIED
+ }
+
+ fn unset_notified(&mut self) {
+ self.0 &= !NOTIFIED
+ }
+
+ pub(super) fn is_running(self) -> bool {
+ self.0 & RUNNING == RUNNING
+ }
+
+ fn set_running(&mut self) {
+ self.0 |= RUNNING;
+ }
+
+ fn unset_running(&mut self) {
+ self.0 &= !RUNNING;
+ }
+
+ pub(super) fn is_cancelled(self) -> bool {
+ self.0 & CANCELLED == CANCELLED
+ }
+
+ fn set_cancelled(&mut self) {
+ self.0 |= CANCELLED;
+ }
+
+ fn set_complete(&mut self) {
+ self.0 |= COMPLETE;
+ }
+
+ /// Returns `true` if the task's future has completed execution.
+ pub(super) fn is_complete(self) -> bool {
+ self.0 & COMPLETE == COMPLETE
+ }
+
+ pub(super) fn is_join_interested(self) -> bool {
+ self.0 & JOIN_INTEREST == JOIN_INTEREST
+ }
+
+ fn unset_join_interested(&mut self) {
+ self.0 &= !JOIN_INTEREST
+ }
+
+ pub(super) fn has_join_waker(self) -> bool {
+ self.0 & JOIN_WAKER == JOIN_WAKER
+ }
+
+ fn set_join_waker(&mut self) {
+ self.0 |= JOIN_WAKER;
+ }
+
+ fn unset_join_waker(&mut self) {
+ self.0 &= !JOIN_WAKER
+ }
+
+ pub(super) fn ref_count(self) -> usize {
+ (self.0 & REF_COUNT_MASK) >> REF_COUNT_SHIFT
+ }
+
+ fn ref_inc(&mut self) {
+ assert!(self.0 <= isize::max_value() as usize);
+ self.0 += REF_ONE;
+ }
+
+ pub(super) fn ref_dec(&mut self) {
+ assert!(self.ref_count() > 0);
+ self.0 -= REF_ONE
+ }
+
+ fn will_need_queueing(self) -> bool {
+ !self.is_notified() && self.is_idle()
+ }
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let snapshot = self.load();
+ snapshot.fmt(fmt)
+ }
+}
+
+impl fmt::Debug for Snapshot {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Snapshot")
+ .field("is_running", &self.is_running())
+ .field("is_complete", &self.is_complete())
+ .field("is_notified", &self.is_notified())
+ .field("is_cancelled", &self.is_cancelled())
+ .field("is_join_interested", &self.is_join_interested())
+ .field("has_join_waker", &self.has_join_waker())
+ .field("ref_count", &self.ref_count())
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/task/waker.rs b/third_party/rust/tokio/src/runtime/task/waker.rs
new file mode 100644
index 0000000000..5c2d478fbb
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/task/waker.rs
@@ -0,0 +1,101 @@
+use crate::runtime::task::harness::Harness;
+use crate::runtime::task::{Header, Schedule};
+
+use std::future::Future;
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::ops;
+use std::ptr::NonNull;
+use std::task::{RawWaker, RawWakerVTable, Waker};
+
+pub(super) struct WakerRef<'a, S: 'static> {
+ waker: ManuallyDrop<Waker>,
+ _p: PhantomData<(&'a Header, S)>,
+}
+
+/// Returns a `WakerRef` which avoids having to pre-emptively increase the
+/// refcount if there is no need to do so.
+pub(super) fn waker_ref<T, S>(header: &Header) -> WakerRef<'_, S>
+where
+ T: Future,
+ S: Schedule,
+{
+ // `Waker::will_wake` uses the VTABLE pointer as part of the check. This
+ // means that `will_wake` will always return false when using the current
+ // task's waker. (discussion at rust-lang/rust#66281).
+ //
+ // To fix this, we use a single vtable. Since we pass in a reference at this
+ // point and not an *owned* waker, we must ensure that `drop` is never
+ // called on this waker instance. This is done by wrapping it with
+ // `ManuallyDrop` and then never calling drop.
+ let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker::<T, S>(header))) };
+
+ WakerRef {
+ waker,
+ _p: PhantomData,
+ }
+}
+
+impl<S> ops::Deref for WakerRef<'_, S> {
+ type Target = Waker;
+
+ fn deref(&self) -> &Waker {
+ &self.waker
+ }
+}
+
+unsafe fn clone_waker<T, S>(ptr: *const ()) -> RawWaker
+where
+ T: Future,
+ S: Schedule,
+{
+ let header = ptr as *const Header;
+ (*header).state.ref_inc();
+ raw_waker::<T, S>(header)
+}
+
+unsafe fn drop_waker<T, S>(ptr: *const ())
+where
+ T: Future,
+ S: Schedule,
+{
+ let ptr = NonNull::new_unchecked(ptr as *mut Header);
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.drop_reference();
+}
+
+unsafe fn wake_by_val<T, S>(ptr: *const ())
+where
+ T: Future,
+ S: Schedule,
+{
+ let ptr = NonNull::new_unchecked(ptr as *mut Header);
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.wake_by_val();
+}
+
+// Wake without consuming the waker
+unsafe fn wake_by_ref<T, S>(ptr: *const ())
+where
+ T: Future,
+ S: Schedule,
+{
+ let ptr = NonNull::new_unchecked(ptr as *mut Header);
+ let harness = Harness::<T, S>::from_raw(ptr);
+ harness.wake_by_ref();
+}
+
+fn raw_waker<T, S>(header: *const Header) -> RawWaker
+where
+ T: Future,
+ S: Schedule,
+{
+ let ptr = header as *const ();
+ let vtable = &RawWakerVTable::new(
+ clone_waker::<T, S>,
+ wake_by_val::<T, S>,
+ wake_by_ref::<T, S>,
+ drop_waker::<T, S>,
+ );
+ RawWaker::new(ptr, vtable)
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs b/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs
new file mode 100644
index 0000000000..db7048e3f9
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/loom_blocking.rs
@@ -0,0 +1,31 @@
+use crate::runtime::{self, Runtime};
+
+use std::sync::Arc;
+
+#[test]
+fn blocking_shutdown() {
+ loom::model(|| {
+ let v = Arc::new(());
+
+ let rt = mk_runtime(1);
+ rt.enter(|| {
+ for _ in 0..2 {
+ let v = v.clone();
+ crate::task::spawn_blocking(move || {
+ assert!(1 < Arc::strong_count(&v));
+ });
+ }
+ });
+
+ drop(rt);
+ assert_eq!(1, Arc::strong_count(&v));
+ });
+}
+
+fn mk_runtime(num_threads: usize) -> Runtime {
+ runtime::Builder::new()
+ .threaded_scheduler()
+ .core_threads(num_threads)
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs b/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs
new file mode 100644
index 0000000000..c126fe479a
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/loom_oneshot.rs
@@ -0,0 +1,49 @@
+use loom::sync::Notify;
+
+use std::sync::{Arc, Mutex};
+
+pub(crate) fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let inner = Arc::new(Inner {
+ notify: Notify::new(),
+ value: Mutex::new(None),
+ });
+
+ let tx = Sender {
+ inner: inner.clone(),
+ };
+ let rx = Receiver { inner };
+
+ (tx, rx)
+}
+
+pub(crate) struct Sender<T> {
+ inner: Arc<Inner<T>>,
+}
+
+pub(crate) struct Receiver<T> {
+ inner: Arc<Inner<T>>,
+}
+
+struct Inner<T> {
+ notify: Notify,
+ value: Mutex<Option<T>>,
+}
+
+impl<T> Sender<T> {
+ pub(crate) fn send(self, value: T) {
+ *self.inner.value.lock().unwrap() = Some(value);
+ self.inner.notify.notify();
+ }
+}
+
+impl<T> Receiver<T> {
+ pub(crate) fn recv(self) -> T {
+ loop {
+ if let Some(v) = self.inner.value.lock().unwrap().take() {
+ return v;
+ }
+
+ self.inner.notify.wait();
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/loom_pool.rs b/third_party/rust/tokio/src/runtime/tests/loom_pool.rs
new file mode 100644
index 0000000000..c08658cde8
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/loom_pool.rs
@@ -0,0 +1,380 @@
+/// Full runtime loom tests. These are heavy tests and take significant time to
+/// run on CI.
+///
+/// Use `LOOM_MAX_PREEMPTIONS=1` to do a "quick" run as a smoke test.
+///
+/// In order to speed up the C
+use crate::future::poll_fn;
+use crate::runtime::tests::loom_oneshot as oneshot;
+use crate::runtime::{self, Runtime};
+use crate::{spawn, task};
+use tokio_test::assert_ok;
+
+use loom::sync::atomic::{AtomicBool, AtomicUsize};
+use loom::sync::{Arc, Mutex};
+
+use pin_project_lite::pin_project;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::task::{Context, Poll};
+
+/// Tests are divided into groups to make the runs faster on CI.
+mod group_a {
+ use super::*;
+
+ #[test]
+ fn racy_shutdown() {
+ loom::model(|| {
+ let pool = mk_pool(1);
+
+ // here's the case we want to exercise:
+ //
+ // a worker that still has tasks in its local queue gets sent to the blocking pool (due to
+ // block_in_place). the blocking pool is shut down, so drops the worker. the worker's
+ // shutdown method never gets run.
+ //
+ // we do this by spawning two tasks on one worker, the first of which does block_in_place,
+ // and then immediately drop the pool.
+
+ pool.spawn(track(async {
+ crate::task::block_in_place(|| {});
+ }));
+ pool.spawn(track(async {}));
+ drop(pool);
+ });
+ }
+
+ #[test]
+ fn pool_multi_spawn() {
+ loom::model(|| {
+ let pool = mk_pool(2);
+ let c1 = Arc::new(AtomicUsize::new(0));
+
+ let (tx, rx) = oneshot::channel();
+ let tx1 = Arc::new(Mutex::new(Some(tx)));
+
+ // Spawn a task
+ let c2 = c1.clone();
+ let tx2 = tx1.clone();
+ pool.spawn(track(async move {
+ spawn(track(async move {
+ if 1 == c1.fetch_add(1, Relaxed) {
+ tx1.lock().unwrap().take().unwrap().send(());
+ }
+ }));
+ }));
+
+ // Spawn a second task
+ pool.spawn(track(async move {
+ spawn(track(async move {
+ if 1 == c2.fetch_add(1, Relaxed) {
+ tx2.lock().unwrap().take().unwrap().send(());
+ }
+ }));
+ }));
+
+ rx.recv();
+ });
+ }
+
+ fn only_blocking_inner(first_pending: bool) {
+ loom::model(move || {
+ let pool = mk_pool(1);
+ let (block_tx, block_rx) = oneshot::channel();
+
+ pool.spawn(track(async move {
+ crate::task::block_in_place(move || {
+ block_tx.send(());
+ });
+ if first_pending {
+ task::yield_now().await
+ }
+ }));
+
+ block_rx.recv();
+ drop(pool);
+ });
+ }
+
+ #[test]
+ fn only_blocking_without_pending() {
+ only_blocking_inner(false)
+ }
+
+ #[test]
+ fn only_blocking_with_pending() {
+ only_blocking_inner(true)
+ }
+}
+
+mod group_b {
+ use super::*;
+
+ fn blocking_and_regular_inner(first_pending: bool) {
+ const NUM: usize = 3;
+ loom::model(move || {
+ let pool = mk_pool(1);
+ let cnt = Arc::new(AtomicUsize::new(0));
+
+ let (block_tx, block_rx) = oneshot::channel();
+ let (done_tx, done_rx) = oneshot::channel();
+ let done_tx = Arc::new(Mutex::new(Some(done_tx)));
+
+ pool.spawn(track(async move {
+ crate::task::block_in_place(move || {
+ block_tx.send(());
+ });
+ if first_pending {
+ task::yield_now().await
+ }
+ }));
+
+ for _ in 0..NUM {
+ let cnt = cnt.clone();
+ let done_tx = done_tx.clone();
+
+ pool.spawn(track(async move {
+ if NUM == cnt.fetch_add(1, Relaxed) + 1 {
+ done_tx.lock().unwrap().take().unwrap().send(());
+ }
+ }));
+ }
+
+ done_rx.recv();
+ block_rx.recv();
+
+ drop(pool);
+ });
+ }
+
+ #[test]
+ fn blocking_and_regular() {
+ blocking_and_regular_inner(false);
+ }
+
+ #[test]
+ fn blocking_and_regular_with_pending() {
+ blocking_and_regular_inner(true);
+ }
+
+ #[test]
+ fn pool_shutdown() {
+ loom::model(|| {
+ let pool = mk_pool(2);
+
+ pool.spawn(track(async move {
+ gated2(true).await;
+ }));
+
+ pool.spawn(track(async move {
+ gated2(false).await;
+ }));
+
+ drop(pool);
+ });
+ }
+
+ #[test]
+ fn join_output() {
+ loom::model(|| {
+ let mut rt = mk_pool(1);
+
+ rt.block_on(async {
+ let t = crate::spawn(track(async { "hello" }));
+
+ let out = assert_ok!(t.await);
+ assert_eq!("hello", out.into_inner());
+ });
+ });
+ }
+
+ #[test]
+ fn poll_drop_handle_then_drop() {
+ loom::model(|| {
+ let mut rt = mk_pool(1);
+
+ rt.block_on(async move {
+ let mut t = crate::spawn(track(async { "hello" }));
+
+ poll_fn(|cx| {
+ let _ = Pin::new(&mut t).poll(cx);
+ Poll::Ready(())
+ })
+ .await;
+ });
+ })
+ }
+
+ #[test]
+ fn complete_block_on_under_load() {
+ loom::model(|| {
+ let mut pool = mk_pool(1);
+
+ pool.block_on(async {
+ // Trigger a re-schedule
+ crate::spawn(track(async {
+ for _ in 0..2 {
+ task::yield_now().await;
+ }
+ }));
+
+ gated2(true).await
+ });
+ });
+ }
+}
+
+mod group_c {
+ use super::*;
+
+ #[test]
+ fn shutdown_with_notification() {
+ use crate::sync::oneshot;
+
+ loom::model(|| {
+ let rt = mk_pool(2);
+ let (done_tx, done_rx) = oneshot::channel::<()>();
+
+ rt.spawn(track(async move {
+ let (tx, rx) = oneshot::channel::<()>();
+
+ crate::spawn(async move {
+ crate::task::spawn_blocking(move || {
+ let _ = tx.send(());
+ });
+
+ let _ = done_rx.await;
+ });
+
+ let _ = rx.await;
+
+ let _ = done_tx.send(());
+ }));
+ });
+ }
+}
+
+mod group_d {
+ use super::*;
+
+ #[test]
+ fn pool_multi_notify() {
+ loom::model(|| {
+ let pool = mk_pool(2);
+
+ let c1 = Arc::new(AtomicUsize::new(0));
+
+ let (done_tx, done_rx) = oneshot::channel();
+ let done_tx1 = Arc::new(Mutex::new(Some(done_tx)));
+
+ // Spawn a task
+ let c2 = c1.clone();
+ let done_tx2 = done_tx1.clone();
+ pool.spawn(track(async move {
+ gated().await;
+ gated().await;
+
+ if 1 == c1.fetch_add(1, Relaxed) {
+ done_tx1.lock().unwrap().take().unwrap().send(());
+ }
+ }));
+
+ // Spawn a second task
+ pool.spawn(track(async move {
+ gated().await;
+ gated().await;
+
+ if 1 == c2.fetch_add(1, Relaxed) {
+ done_tx2.lock().unwrap().take().unwrap().send(());
+ }
+ }));
+
+ done_rx.recv();
+ });
+ }
+}
+
+fn mk_pool(num_threads: usize) -> Runtime {
+ runtime::Builder::new()
+ .threaded_scheduler()
+ .core_threads(num_threads)
+ .build()
+ .unwrap()
+}
+
+fn gated() -> impl Future<Output = &'static str> {
+ gated2(false)
+}
+
+fn gated2(thread: bool) -> impl Future<Output = &'static str> {
+ use loom::thread;
+ use std::sync::Arc;
+
+ let gate = Arc::new(AtomicBool::new(false));
+ let mut fired = false;
+
+ poll_fn(move |cx| {
+ if !fired {
+ let gate = gate.clone();
+ let waker = cx.waker().clone();
+
+ if thread {
+ thread::spawn(move || {
+ gate.store(true, SeqCst);
+ waker.wake_by_ref();
+ });
+ } else {
+ spawn(track(async move {
+ gate.store(true, SeqCst);
+ waker.wake_by_ref();
+ }));
+ }
+
+ fired = true;
+
+ return Poll::Pending;
+ }
+
+ if gate.load(SeqCst) {
+ Poll::Ready("hello world")
+ } else {
+ Poll::Pending
+ }
+ })
+}
+
+fn track<T: Future>(f: T) -> Track<T> {
+ Track {
+ inner: f,
+ arc: Arc::new(()),
+ }
+}
+
+pin_project! {
+ struct Track<T> {
+ #[pin]
+ inner: T,
+ // Arc is used to hook into loom's leak tracking.
+ arc: Arc<()>,
+ }
+}
+
+impl<T> Track<T> {
+ fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+impl<T: Future> Future for Track<T> {
+ type Output = Track<T::Output>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let me = self.project();
+
+ Poll::Ready(Track {
+ inner: ready!(me.inner.poll(cx)),
+ arc: me.arc.clone(),
+ })
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/loom_queue.rs b/third_party/rust/tokio/src/runtime/tests/loom_queue.rs
new file mode 100644
index 0000000000..de02610db0
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/loom_queue.rs
@@ -0,0 +1,216 @@
+use crate::runtime::queue;
+use crate::runtime::task::{self, Schedule, Task};
+
+use loom::thread;
+
+#[test]
+fn basic() {
+ loom::model(|| {
+ let (steal, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ let th = thread::spawn(move || {
+ let (_, mut local) = queue::local();
+ let mut n = 0;
+
+ for _ in 0..3 {
+ if steal.steal_into(&mut local).is_some() {
+ n += 1;
+ }
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+ }
+
+ n
+ });
+
+ let mut n = 0;
+
+ for _ in 0..2 {
+ for _ in 0..2 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ if local.pop().is_some() {
+ n += 1;
+ }
+
+ // Push another task
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+ }
+
+ while inject.pop().is_some() {
+ n += 1;
+ }
+
+ n += th.join().unwrap();
+
+ assert_eq!(6, n);
+ });
+}
+
+#[test]
+fn steal_overflow() {
+ loom::model(|| {
+ let (steal, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ let th = thread::spawn(move || {
+ let (_, mut local) = queue::local();
+ let mut n = 0;
+
+ if steal.steal_into(&mut local).is_some() {
+ n += 1;
+ }
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ n
+ });
+
+ let mut n = 0;
+
+ // push a task, pop a task
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+
+ if local.pop().is_some() {
+ n += 1;
+ }
+
+ for _ in 0..6 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ n += th.join().unwrap();
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ while inject.pop().is_some() {
+ n += 1;
+ }
+
+ assert_eq!(7, n);
+ });
+}
+
+#[test]
+fn multi_stealer() {
+ const NUM_TASKS: usize = 5;
+
+ fn steal_tasks(steal: queue::Steal<Runtime>) -> usize {
+ let (_, mut local) = queue::local();
+
+ if steal.steal_into(&mut local).is_none() {
+ return 0;
+ }
+
+ let mut n = 1;
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ n
+ }
+
+ loom::model(|| {
+ let (steal, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ // Push work
+ for _ in 0..NUM_TASKS {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ let th1 = {
+ let steal = steal.clone();
+ thread::spawn(move || steal_tasks(steal))
+ };
+
+ let th2 = thread::spawn(move || steal_tasks(steal));
+
+ let mut n = 0;
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ while inject.pop().is_some() {
+ n += 1;
+ }
+
+ n += th1.join().unwrap();
+ n += th2.join().unwrap();
+
+ assert_eq!(n, NUM_TASKS);
+ });
+}
+
+#[test]
+fn chained_steal() {
+ loom::model(|| {
+ let (s1, mut l1) = queue::local();
+ let (s2, mut l2) = queue::local();
+ let inject = queue::Inject::new();
+
+ // Load up some tasks
+ for _ in 0..4 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ l1.push_back(task, &inject);
+
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ l2.push_back(task, &inject);
+ }
+
+ // Spawn a task to steal from **our** queue
+ let th = thread::spawn(move || {
+ let (_, mut local) = queue::local();
+ s1.steal_into(&mut local);
+
+ while local.pop().is_some() {}
+ });
+
+ // Drain our tasks, then attempt to steal
+ while l1.pop().is_some() {}
+
+ s2.steal_into(&mut l1);
+
+ th.join().unwrap();
+
+ while l1.pop().is_some() {}
+ while l2.pop().is_some() {}
+ while inject.pop().is_some() {}
+ });
+}
+
+struct Runtime;
+
+impl Schedule for Runtime {
+ fn bind(task: Task<Self>) -> Runtime {
+ std::mem::forget(task);
+ Runtime
+ }
+
+ fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
+ None
+ }
+
+ fn schedule(&self, _task: task::Notified<Self>) {
+ unreachable!();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/mod.rs b/third_party/rust/tokio/src/runtime/tests/mod.rs
new file mode 100644
index 0000000000..123a7e35a3
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/mod.rs
@@ -0,0 +1,13 @@
+cfg_loom! {
+ mod loom_blocking;
+ mod loom_oneshot;
+ mod loom_pool;
+ mod loom_queue;
+}
+
+cfg_not_loom! {
+ mod queue;
+
+ #[cfg(miri)]
+ mod task;
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/queue.rs b/third_party/rust/tokio/src/runtime/tests/queue.rs
new file mode 100644
index 0000000000..d228d5dcc7
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/queue.rs
@@ -0,0 +1,202 @@
+use crate::runtime::queue;
+use crate::runtime::task::{self, Schedule, Task};
+
+use std::thread;
+use std::time::Duration;
+
+#[test]
+fn fits_256() {
+ let (_, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ for _ in 0..256 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ assert!(inject.pop().is_none());
+
+ while local.pop().is_some() {}
+}
+
+#[test]
+fn overflow() {
+ let (_, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ for _ in 0..257 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ let mut n = 0;
+
+ while inject.pop().is_some() {
+ n += 1;
+ }
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ assert_eq!(n, 257);
+}
+
+#[test]
+fn steal_batch() {
+ let (steal1, mut local1) = queue::local();
+ let (_, mut local2) = queue::local();
+ let inject = queue::Inject::new();
+
+ for _ in 0..4 {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local1.push_back(task, &inject);
+ }
+
+ assert!(steal1.steal_into(&mut local2).is_some());
+
+ for _ in 0..1 {
+ assert!(local2.pop().is_some());
+ }
+
+ assert!(local2.pop().is_none());
+
+ for _ in 0..2 {
+ assert!(local1.pop().is_some());
+ }
+
+ assert!(local1.pop().is_none());
+}
+
+#[test]
+fn stress1() {
+ const NUM_ITER: usize = 1;
+ const NUM_STEAL: usize = 1_000;
+ const NUM_LOCAL: usize = 1_000;
+ const NUM_PUSH: usize = 500;
+ const NUM_POP: usize = 250;
+
+ for _ in 0..NUM_ITER {
+ let (steal, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ let th = thread::spawn(move || {
+ let (_, mut local) = queue::local();
+ let mut n = 0;
+
+ for _ in 0..NUM_STEAL {
+ if steal.steal_into(&mut local).is_some() {
+ n += 1;
+ }
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ thread::yield_now();
+ }
+
+ n
+ });
+
+ let mut n = 0;
+
+ for _ in 0..NUM_LOCAL {
+ for _ in 0..NUM_PUSH {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+ }
+
+ for _ in 0..NUM_POP {
+ if local.pop().is_some() {
+ n += 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ while inject.pop().is_some() {
+ n += 1;
+ }
+
+ n += th.join().unwrap();
+
+ assert_eq!(n, NUM_LOCAL * NUM_PUSH);
+ }
+}
+
+#[test]
+fn stress2() {
+ const NUM_ITER: usize = 1;
+ const NUM_TASKS: usize = 1_000_000;
+ const NUM_STEAL: usize = 1_000;
+
+ for _ in 0..NUM_ITER {
+ let (steal, mut local) = queue::local();
+ let inject = queue::Inject::new();
+
+ let th = thread::spawn(move || {
+ let (_, mut local) = queue::local();
+ let mut n = 0;
+
+ for _ in 0..NUM_STEAL {
+ if steal.steal_into(&mut local).is_some() {
+ n += 1;
+ }
+
+ while local.pop().is_some() {
+ n += 1;
+ }
+
+ thread::sleep(Duration::from_micros(10));
+ }
+
+ n
+ });
+
+ let mut num_pop = 0;
+
+ for i in 0..NUM_TASKS {
+ let (task, _) = task::joinable::<_, Runtime>(async {});
+ local.push_back(task, &inject);
+
+ if i % 128 == 0 && local.pop().is_some() {
+ num_pop += 1;
+ }
+
+ while inject.pop().is_some() {
+ num_pop += 1;
+ }
+ }
+
+ num_pop += th.join().unwrap();
+
+ while local.pop().is_some() {
+ num_pop += 1;
+ }
+
+ while inject.pop().is_some() {
+ num_pop += 1;
+ }
+
+ assert_eq!(num_pop, NUM_TASKS);
+ }
+}
+
+struct Runtime;
+
+impl Schedule for Runtime {
+ fn bind(task: Task<Self>) -> Runtime {
+ std::mem::forget(task);
+ Runtime
+ }
+
+ fn release(&self, _task: &Task<Self>) -> Option<Task<Self>> {
+ None
+ }
+
+ fn schedule(&self, _task: task::Notified<Self>) {
+ unreachable!();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/tests/task.rs b/third_party/rust/tokio/src/runtime/tests/task.rs
new file mode 100644
index 0000000000..82315a04ff
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/tests/task.rs
@@ -0,0 +1,159 @@
+use crate::runtime::task::{self, Schedule, Task};
+use crate::util::linked_list::LinkedList;
+use crate::util::TryLock;
+
+use std::collections::VecDeque;
+use std::sync::Arc;
+
+#[test]
+fn create_drop() {
+ let _ = task::joinable::<_, Runtime>(async { unreachable!() });
+}
+
+#[test]
+fn schedule() {
+ with(|rt| {
+ let (task, _) = task::joinable(async {
+ crate::task::yield_now().await;
+ });
+
+ rt.schedule(task);
+
+ assert_eq!(2, rt.tick());
+ })
+}
+
+#[test]
+fn shutdown() {
+ with(|rt| {
+ let (task, _) = task::joinable(async {
+ loop {
+ crate::task::yield_now().await;
+ }
+ });
+
+ rt.schedule(task);
+ rt.tick_max(1);
+
+ rt.shutdown();
+ })
+}
+
+fn with(f: impl FnOnce(Runtime)) {
+ struct Reset;
+
+ impl Drop for Reset {
+ fn drop(&mut self) {
+ let _rt = CURRENT.try_lock().unwrap().take();
+ }
+ }
+
+ let _reset = Reset;
+
+ let rt = Runtime(Arc::new(Inner {
+ released: task::TransferStack::new(),
+ core: TryLock::new(Core {
+ queue: VecDeque::new(),
+ tasks: LinkedList::new(),
+ }),
+ }));
+
+ *CURRENT.try_lock().unwrap() = Some(rt.clone());
+ f(rt)
+}
+
+#[derive(Clone)]
+struct Runtime(Arc<Inner>);
+
+struct Inner {
+ released: task::TransferStack<Runtime>,
+ core: TryLock<Core>,
+}
+
+struct Core {
+ queue: VecDeque<task::Notified<Runtime>>,
+ tasks: LinkedList<Task<Runtime>>,
+}
+
+static CURRENT: TryLock<Option<Runtime>> = TryLock::new(None);
+
+impl Runtime {
+ fn tick(&self) -> usize {
+ self.tick_max(usize::max_value())
+ }
+
+ fn tick_max(&self, max: usize) -> usize {
+ let mut n = 0;
+
+ while !self.is_empty() && n < max {
+ let task = self.next_task();
+ n += 1;
+ task.run();
+ }
+
+ self.0.maintenance();
+
+ n
+ }
+
+ fn is_empty(&self) -> bool {
+ self.0.core.try_lock().unwrap().queue.is_empty()
+ }
+
+ fn next_task(&self) -> task::Notified<Runtime> {
+ self.0.core.try_lock().unwrap().queue.pop_front().unwrap()
+ }
+
+ fn shutdown(&self) {
+ let mut core = self.0.core.try_lock().unwrap();
+
+ for task in core.tasks.iter() {
+ task.shutdown();
+ }
+
+ while let Some(task) = core.queue.pop_back() {
+ task.shutdown();
+ }
+
+ drop(core);
+
+ while !self.0.core.try_lock().unwrap().tasks.is_empty() {
+ self.0.maintenance();
+ }
+ }
+}
+
+impl Inner {
+ fn maintenance(&self) {
+ use std::mem::ManuallyDrop;
+
+ for task in self.released.drain() {
+ let task = ManuallyDrop::new(task);
+
+ // safety: see worker.rs
+ unsafe {
+ let ptr = task.header().into();
+ self.core.try_lock().unwrap().tasks.remove(ptr);
+ }
+ }
+ }
+}
+
+impl Schedule for Runtime {
+ fn bind(task: Task<Self>) -> Runtime {
+ let rt = CURRENT.try_lock().unwrap().as_ref().unwrap().clone();
+ rt.0.core.try_lock().unwrap().tasks.push_front(task);
+ rt
+ }
+
+ fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
+ // safety: copying worker.rs
+ let task = unsafe { Task::from_raw(task.header().into()) };
+ self.0.released.push(task);
+ None
+ }
+
+ fn schedule(&self, task: task::Notified<Self>) {
+ self.0.core.try_lock().unwrap().queue.push_back(task);
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs b/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs
new file mode 100644
index 0000000000..2bda0fc738
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/thread_pool/atomic_cell.rs
@@ -0,0 +1,52 @@
+use crate::loom::sync::atomic::AtomicPtr;
+
+use std::ptr;
+use std::sync::atomic::Ordering::AcqRel;
+
+pub(super) struct AtomicCell<T> {
+ data: AtomicPtr<T>,
+}
+
+unsafe impl<T: Send> Send for AtomicCell<T> {}
+unsafe impl<T: Send> Sync for AtomicCell<T> {}
+
+impl<T> AtomicCell<T> {
+ pub(super) fn new(data: Option<Box<T>>) -> AtomicCell<T> {
+ AtomicCell {
+ data: AtomicPtr::new(to_raw(data)),
+ }
+ }
+
+ pub(super) fn swap(&self, val: Option<Box<T>>) -> Option<Box<T>> {
+ let old = self.data.swap(to_raw(val), AcqRel);
+ from_raw(old)
+ }
+
+ #[cfg(feature = "blocking")]
+ pub(super) fn set(&self, val: Box<T>) {
+ let _ = self.swap(Some(val));
+ }
+
+ pub(super) fn take(&self) -> Option<Box<T>> {
+ self.swap(None)
+ }
+}
+
+fn to_raw<T>(data: Option<Box<T>>) -> *mut T {
+ data.map(Box::into_raw).unwrap_or(ptr::null_mut())
+}
+
+fn from_raw<T>(val: *mut T) -> Option<Box<T>> {
+ if val.is_null() {
+ None
+ } else {
+ Some(unsafe { Box::from_raw(val) })
+ }
+}
+
+impl<T> Drop for AtomicCell<T> {
+ fn drop(&mut self) {
+ // Free any data still held by the cell
+ let _ = self.take();
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/thread_pool/idle.rs b/third_party/rust/tokio/src/runtime/thread_pool/idle.rs
new file mode 100644
index 0000000000..ae87ca4ba1
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/thread_pool/idle.rs
@@ -0,0 +1,222 @@
+//! Coordinates idling workers
+
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::Mutex;
+
+use std::fmt;
+use std::sync::atomic::Ordering::{self, SeqCst};
+
+pub(super) struct Idle {
+ /// Tracks both the number of searching workers and the number of unparked
+ /// workers.
+ ///
+ /// Used as a fast-path to avoid acquiring the lock when needed.
+ state: AtomicUsize,
+
+ /// Sleeping workers
+ sleepers: Mutex<Vec<usize>>,
+
+ /// Total number of workers.
+ num_workers: usize,
+}
+
+const UNPARK_SHIFT: usize = 16;
+const UNPARK_MASK: usize = !SEARCH_MASK;
+const SEARCH_MASK: usize = (1 << UNPARK_SHIFT) - 1;
+
+#[derive(Copy, Clone)]
+struct State(usize);
+
+impl Idle {
+ pub(super) fn new(num_workers: usize) -> Idle {
+ let init = State::new(num_workers);
+
+ Idle {
+ state: AtomicUsize::new(init.into()),
+ sleepers: Mutex::new(Vec::with_capacity(num_workers)),
+ num_workers,
+ }
+ }
+
+ /// If there are no workers actively searching, returns the index of a
+ /// worker currently sleeping.
+ pub(super) fn worker_to_notify(&self) -> Option<usize> {
+ // If at least one worker is spinning, work being notified will
+ // eventully be found. A searching thread will find **some** work and
+ // notify another worker, eventually leading to our work being found.
+ //
+ // For this to happen, this load must happen before the thread
+ // transitioning `num_searching` to zero. Acquire / Relese does not
+ // provide sufficient guarantees, so this load is done with `SeqCst` and
+ // will pair with the `fetch_sub(1)` when transitioning out of
+ // searching.
+ if !self.notify_should_wakeup() {
+ return None;
+ }
+
+ // Acquire the lock
+ let mut sleepers = self.sleepers.lock().unwrap();
+
+ // Check again, now that the lock is acquired
+ if !self.notify_should_wakeup() {
+ return None;
+ }
+
+ // A worker should be woken up, atomically increment the number of
+ // searching workers as well as the number of unparked workers.
+ State::unpark_one(&self.state);
+
+ // Get the worker to unpark
+ let ret = sleepers.pop();
+ debug_assert!(ret.is_some());
+
+ ret
+ }
+
+ /// Returns `true` if the worker needs to do a final check for submitted
+ /// work.
+ pub(super) fn transition_worker_to_parked(&self, worker: usize, is_searching: bool) -> bool {
+ // Acquire the lock
+ let mut sleepers = self.sleepers.lock().unwrap();
+
+ // Decrement the number of unparked threads
+ let ret = State::dec_num_unparked(&self.state, is_searching);
+
+ // Track the sleeping worker
+ sleepers.push(worker);
+
+ ret
+ }
+
+ pub(super) fn transition_worker_to_searching(&self) -> bool {
+ let state = State::load(&self.state, SeqCst);
+ if 2 * state.num_searching() >= self.num_workers {
+ return false;
+ }
+
+ // It is possible for this routine to allow more than 50% of the workers
+ // to search. That is OK. Limiting searchers is only an optimization to
+ // prevent too much contention.
+ State::inc_num_searching(&self.state, SeqCst);
+ true
+ }
+
+ /// A lightweight transition from searching -> running.
+ ///
+ /// Returns `true` if this is the final searching worker. The caller
+ /// **must** notify a new worker.
+ pub(super) fn transition_worker_from_searching(&self) -> bool {
+ State::dec_num_searching(&self.state)
+ }
+
+ /// Unpark a specific worker. This happens if tasks are submitted from
+ /// within the worker's park routine.
+ pub(super) fn unpark_worker_by_id(&self, worker_id: usize) {
+ let mut sleepers = self.sleepers.lock().unwrap();
+
+ for index in 0..sleepers.len() {
+ if sleepers[index] == worker_id {
+ sleepers.swap_remove(index);
+
+ // Update the state accordingly whle the lock is held.
+ State::unpark_one(&self.state);
+
+ return;
+ }
+ }
+ }
+
+ /// Returns `true` if `worker_id` is contained in the sleep set
+ pub(super) fn is_parked(&self, worker_id: usize) -> bool {
+ let sleepers = self.sleepers.lock().unwrap();
+ sleepers.contains(&worker_id)
+ }
+
+ fn notify_should_wakeup(&self) -> bool {
+ let state = State(self.state.fetch_add(0, SeqCst));
+ state.num_searching() == 0 && state.num_unparked() < self.num_workers
+ }
+}
+
+impl State {
+ fn new(num_workers: usize) -> State {
+ // All workers start in the unparked state
+ let ret = State(num_workers << UNPARK_SHIFT);
+ debug_assert_eq!(num_workers, ret.num_unparked());
+ debug_assert_eq!(0, ret.num_searching());
+ ret
+ }
+
+ fn load(cell: &AtomicUsize, ordering: Ordering) -> State {
+ State(cell.load(ordering))
+ }
+
+ fn unpark_one(cell: &AtomicUsize) {
+ cell.fetch_add(1 | (1 << UNPARK_SHIFT), SeqCst);
+ }
+
+ fn inc_num_searching(cell: &AtomicUsize, ordering: Ordering) {
+ cell.fetch_add(1, ordering);
+ }
+
+ /// Returns `true` if this is the final searching worker
+ fn dec_num_searching(cell: &AtomicUsize) -> bool {
+ let state = State(cell.fetch_sub(1, SeqCst));
+ state.num_searching() == 1
+ }
+
+ /// Track a sleeping worker
+ ///
+ /// Returns `true` if this is the final searching worker.
+ fn dec_num_unparked(cell: &AtomicUsize, is_searching: bool) -> bool {
+ let mut dec = 1 << UNPARK_SHIFT;
+
+ if is_searching {
+ dec += 1;
+ }
+
+ let prev = State(cell.fetch_sub(dec, SeqCst));
+ is_searching && prev.num_searching() == 1
+ }
+
+ /// Number of workers currently searching
+ fn num_searching(self) -> usize {
+ self.0 & SEARCH_MASK
+ }
+
+ /// Number of workers currently unparked
+ fn num_unparked(self) -> usize {
+ (self.0 & UNPARK_MASK) >> UNPARK_SHIFT
+ }
+}
+
+impl From<usize> for State {
+ fn from(src: usize) -> State {
+ State(src)
+ }
+}
+
+impl From<State> for usize {
+ fn from(src: State) -> usize {
+ src.0
+ }
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("worker::State")
+ .field("num_unparked", &self.num_unparked())
+ .field("num_searching", &self.num_searching())
+ .finish()
+ }
+}
+
+#[test]
+fn test_state() {
+ assert_eq!(0, UNPARK_MASK & SEARCH_MASK);
+ assert_eq!(0, !(UNPARK_MASK | SEARCH_MASK));
+
+ let state = State::new(10);
+ assert_eq!(10, state.num_unparked());
+ assert_eq!(0, state.num_searching());
+}
diff --git a/third_party/rust/tokio/src/runtime/thread_pool/mod.rs b/third_party/rust/tokio/src/runtime/thread_pool/mod.rs
new file mode 100644
index 0000000000..82e82d5b30
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/thread_pool/mod.rs
@@ -0,0 +1,117 @@
+//! Threadpool
+
+mod atomic_cell;
+use atomic_cell::AtomicCell;
+
+mod idle;
+use self::idle::Idle;
+
+mod worker;
+pub(crate) use worker::Launch;
+
+cfg_blocking! {
+ pub(crate) use worker::block_in_place;
+}
+
+use crate::loom::sync::Arc;
+use crate::runtime::task::{self, JoinHandle};
+use crate::runtime::Parker;
+
+use std::fmt;
+use std::future::Future;
+
+/// Work-stealing based thread pool for executing futures.
+pub(crate) struct ThreadPool {
+ spawner: Spawner,
+}
+
+/// Submit futures to the associated thread pool for execution.
+///
+/// A `Spawner` instance is a handle to a single thread pool that allows the owner
+/// of the handle to spawn futures onto the thread pool.
+///
+/// The `Spawner` handle is *only* used for spawning new futures. It does not
+/// impact the lifecycle of the thread pool in any way. The thread pool may
+/// shutdown while there are outstanding `Spawner` instances.
+///
+/// `Spawner` instances are obtained by calling [`ThreadPool::spawner`].
+///
+/// [`ThreadPool::spawner`]: method@ThreadPool::spawner
+#[derive(Clone)]
+pub(crate) struct Spawner {
+ shared: Arc<worker::Shared>,
+}
+
+// ===== impl ThreadPool =====
+
+impl ThreadPool {
+ pub(crate) fn new(size: usize, parker: Parker) -> (ThreadPool, Launch) {
+ let (shared, launch) = worker::create(size, parker);
+ let spawner = Spawner { shared };
+ let thread_pool = ThreadPool { spawner };
+
+ (thread_pool, launch)
+ }
+
+ /// Returns reference to `Spawner`.
+ ///
+ /// The `Spawner` handle can be cloned and enables spawning tasks from other
+ /// threads.
+ pub(crate) fn spawner(&self) -> &Spawner {
+ &self.spawner
+ }
+
+ /// Spawns a task
+ pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ self.spawner.spawn(future)
+ }
+
+ /// Blocks the current thread waiting for the future to complete.
+ ///
+ /// The future will execute on the current thread, but all spawned tasks
+ /// will be executed on the thread pool.
+ pub(crate) fn block_on<F>(&self, future: F) -> F::Output
+ where
+ F: Future,
+ {
+ let mut enter = crate::runtime::enter();
+ enter.block_on(future).expect("failed to park thread")
+ }
+}
+
+impl fmt::Debug for ThreadPool {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("ThreadPool").finish()
+ }
+}
+
+impl Drop for ThreadPool {
+ fn drop(&mut self) {
+ self.spawner.shared.close();
+ }
+}
+
+// ==== impl Spawner =====
+
+impl Spawner {
+ /// Spawns a future onto the thread pool
+ pub(crate) fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ let (task, handle) = task::joinable(future);
+ self.shared.schedule(task, false);
+ handle
+ }
+}
+
+impl fmt::Debug for Spawner {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Spawner").finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/thread_pool/worker.rs b/third_party/rust/tokio/src/runtime/thread_pool/worker.rs
new file mode 100644
index 0000000000..400e2a938c
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/thread_pool/worker.rs
@@ -0,0 +1,761 @@
+//! A scheduler is initialized with a fixed number of workers. Each worker is
+//! driven by a thread. Each worker has a "core" which contains data such as the
+//! run queue and other state. When `block_in_place` is called, the worker's
+//! "core" is handed off to a new thread allowing the scheduler to continue to
+//! make progress while the originating thread blocks.
+
+use crate::loom::rand::seed;
+use crate::loom::sync::{Arc, Mutex};
+use crate::park::{Park, Unpark};
+use crate::runtime;
+use crate::runtime::park::{Parker, Unparker};
+use crate::runtime::thread_pool::{AtomicCell, Idle};
+use crate::runtime::{queue, task};
+use crate::util::linked_list::LinkedList;
+use crate::util::FastRand;
+
+use std::cell::RefCell;
+use std::time::Duration;
+
+/// A scheduler worker
+pub(super) struct Worker {
+ /// Reference to shared state
+ shared: Arc<Shared>,
+
+ /// Index holding this worker's remote state
+ index: usize,
+
+ /// Used to hand-off a worker's core to another thread.
+ core: AtomicCell<Core>,
+}
+
+/// Core data
+struct Core {
+ /// Used to schedule bookkeeping tasks every so often.
+ tick: u8,
+
+ /// When a task is scheduled from a worker, it is stored in this slot. The
+ /// worker will check this slot for a task **before** checking the run
+ /// queue. This effectively results in the **last** scheduled task to be run
+ /// next (LIFO). This is an optimization for message passing patterns and
+ /// helps to reduce latency.
+ lifo_slot: Option<Notified>,
+
+ /// The worker-local run queue.
+ run_queue: queue::Local<Arc<Worker>>,
+
+ /// True if the worker is currently searching for more work. Searching
+ /// involves attempting to steal from other workers.
+ is_searching: bool,
+
+ /// True if the scheduler is being shutdown
+ is_shutdown: bool,
+
+ /// Tasks owned by the core
+ tasks: LinkedList<Task>,
+
+ /// Parker
+ ///
+ /// Stored in an `Option` as the parker is added / removed to make the
+ /// borrow checker happy.
+ park: Option<Parker>,
+
+ /// Fast random number generator.
+ rand: FastRand,
+}
+
+/// State shared across all workers
+pub(super) struct Shared {
+ /// Per-worker remote state. All other workers have access to this and is
+ /// how they communicate between each other.
+ remotes: Box<[Remote]>,
+
+ /// Submit work to the scheduler while **not** currently on a worker thread.
+ inject: queue::Inject<Arc<Worker>>,
+
+ /// Coordinates idle workers
+ idle: Idle,
+
+ /// Workers have have observed the shutdown signal
+ ///
+ /// The core is **not** placed back in the worker to avoid it from being
+ /// stolen by a thread that was spawned as part of `block_in_place`.
+ shutdown_workers: Mutex<Vec<(Box<Core>, Arc<Worker>)>>,
+}
+
+/// Used to communicate with a worker from other threads.
+struct Remote {
+ /// Steal tasks from this worker.
+ steal: queue::Steal<Arc<Worker>>,
+
+ /// Transfers tasks to be released. Any worker pushes tasks, only the owning
+ /// worker pops.
+ pending_drop: task::TransferStack<Arc<Worker>>,
+
+ /// Unparks the associated worker thread
+ unpark: Unparker,
+}
+
+/// Thread-local context
+struct Context {
+ /// Worker
+ worker: Arc<Worker>,
+
+ /// Core data
+ core: RefCell<Option<Box<Core>>>,
+}
+
+/// Starts the workers
+pub(crate) struct Launch(Vec<Arc<Worker>>);
+
+/// Running a task may consume the core. If the core is still available when
+/// running the task completes, it is returned. Otherwise, the worker will need
+/// to stop processing.
+type RunResult = Result<Box<Core>, ()>;
+
+/// A task handle
+type Task = task::Task<Arc<Worker>>;
+
+/// A notified task handle
+type Notified = task::Notified<Arc<Worker>>;
+
+// Tracks thread-local state
+scoped_thread_local!(static CURRENT: Context);
+
+pub(super) fn create(size: usize, park: Parker) -> (Arc<Shared>, Launch) {
+ let mut cores = vec![];
+ let mut remotes = vec![];
+
+ // Create the local queues
+ for _ in 0..size {
+ let (steal, run_queue) = queue::local();
+
+ let park = park.clone();
+ let unpark = park.unpark();
+
+ cores.push(Box::new(Core {
+ tick: 0,
+ lifo_slot: None,
+ run_queue,
+ is_searching: false,
+ is_shutdown: false,
+ tasks: LinkedList::new(),
+ park: Some(park),
+ rand: FastRand::new(seed()),
+ }));
+
+ remotes.push(Remote {
+ steal,
+ pending_drop: task::TransferStack::new(),
+ unpark,
+ });
+ }
+
+ let shared = Arc::new(Shared {
+ remotes: remotes.into_boxed_slice(),
+ inject: queue::Inject::new(),
+ idle: Idle::new(size),
+ shutdown_workers: Mutex::new(vec![]),
+ });
+
+ let mut launch = Launch(vec![]);
+
+ for (index, core) in cores.drain(..).enumerate() {
+ launch.0.push(Arc::new(Worker {
+ shared: shared.clone(),
+ index,
+ core: AtomicCell::new(Some(core)),
+ }));
+ }
+
+ (shared, launch)
+}
+
+cfg_blocking! {
+ pub(crate) fn block_in_place<F, R>(f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ // Try to steal the worker core back
+ struct Reset;
+
+ impl Drop for Reset {
+ fn drop(&mut self) {
+ CURRENT.with(|maybe_cx| {
+ if let Some(cx) = maybe_cx {
+ let core = cx.worker.core.take();
+ *cx.core.borrow_mut() = core;
+ }
+ });
+ }
+ }
+
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("can call blocking only when running in a spawned task");
+
+ // Get the worker core. If none is set, then blocking is fine!
+ let core = match cx.core.borrow_mut().take() {
+ Some(core) => {
+ // We are effectively leaving the executor, so we need to
+ // forcibly end budgeting.
+ crate::coop::stop();
+ core
+ },
+ None => return,
+ };
+
+ // The parker should be set here
+ assert!(core.park.is_some());
+
+ // In order to block, the core must be sent to another thread for
+ // execution.
+ //
+ // First, move the core back into the worker's shared core slot.
+ cx.worker.core.set(core);
+
+ // Next, clone the worker handle and send it to a new thread for
+ // processing.
+ //
+ // Once the blocking task is done executing, we will attempt to
+ // steal the core back.
+ let worker = cx.worker.clone();
+ runtime::spawn_blocking(move || run(worker));
+ });
+
+ let _reset = Reset;
+
+ f()
+ }
+}
+
+/// After how many ticks is the global queue polled. This helps to ensure
+/// fairness.
+///
+/// The number is fairly arbitrary. I believe this value was copied from golang.
+const GLOBAL_POLL_INTERVAL: u8 = 61;
+
+impl Launch {
+ pub(crate) fn launch(mut self) {
+ for worker in self.0.drain(..) {
+ runtime::spawn_blocking(move || run(worker));
+ }
+ }
+}
+
+fn run(worker: Arc<Worker>) {
+ // Acquire a core. If this fails, then another thread is running this
+ // worker and there is nothing further to do.
+ let core = match worker.core.take() {
+ Some(core) => core,
+ None => return,
+ };
+
+ // Set the worker context.
+ let cx = Context {
+ worker,
+ core: RefCell::new(None),
+ };
+
+ let _enter = crate::runtime::enter();
+
+ CURRENT.set(&cx, || {
+ // This should always be an error. It only returns a `Result` to support
+ // using `?` to short circuit.
+ assert!(cx.run(core).is_err());
+ });
+}
+
+impl Context {
+ fn run(&self, mut core: Box<Core>) -> RunResult {
+ while !core.is_shutdown {
+ // Increment the tick
+ core.tick();
+
+ // Run maintenance, if needed
+ core = self.maintenance(core);
+
+ // First, check work available to the current worker.
+ if let Some(task) = core.next_task(&self.worker) {
+ core = self.run_task(task, core)?;
+ continue;
+ }
+
+ // There is no more **local** work to process, try to steal work
+ // from other workers.
+ if let Some(task) = core.steal_work(&self.worker) {
+ core = self.run_task(task, core)?;
+ } else {
+ // Wait for work
+ core = self.park(core);
+ }
+ }
+
+ // Signal shutdown
+ self.worker.shared.shutdown(core, self.worker.clone());
+ Err(())
+ }
+
+ fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult {
+ // Make sure thew orker is not in the **searching** state. This enables
+ // another idle worker to try to steal work.
+ core.transition_from_searching(&self.worker);
+
+ // Make the core available to the runtime context
+ *self.core.borrow_mut() = Some(core);
+
+ // Run the task
+ crate::coop::budget(|| {
+ task.run();
+
+ // As long as there is budget remaining and a task exists in the
+ // `lifo_slot`, then keep running.
+ loop {
+ // Check if we still have the core. If not, the core was stolen
+ // by another worker.
+ let mut core = match self.core.borrow_mut().take() {
+ Some(core) => core,
+ None => return Err(()),
+ };
+
+ // Check for a task in the LIFO slot
+ let task = match core.lifo_slot.take() {
+ Some(task) => task,
+ None => return Ok(core),
+ };
+
+ if crate::coop::has_budget_remaining() {
+ // Run the LIFO task, then loop
+ *self.core.borrow_mut() = Some(core);
+ task.run();
+ } else {
+ // Not enough budget left to run the LIFO task, push it to
+ // the back of the queue and return.
+ core.run_queue.push_back(task, self.worker.inject());
+ return Ok(core);
+ }
+ }
+ })
+ }
+
+ fn maintenance(&self, mut core: Box<Core>) -> Box<Core> {
+ if core.tick % GLOBAL_POLL_INTERVAL == 0 {
+ // Call `park` with a 0 timeout. This enables the I/O driver, timer, ...
+ // to run without actually putting the thread to sleep.
+ core = self.park_timeout(core, Some(Duration::from_millis(0)));
+
+ // Run regularly scheduled maintenance
+ core.maintenance(&self.worker);
+ }
+
+ core
+ }
+
+ fn park(&self, mut core: Box<Core>) -> Box<Core> {
+ core.transition_to_parked(&self.worker);
+
+ while !core.is_shutdown {
+ core = self.park_timeout(core, None);
+
+ // Run regularly scheduled maintenance
+ core.maintenance(&self.worker);
+
+ if core.transition_from_parked(&self.worker) {
+ return core;
+ }
+ }
+
+ core
+ }
+
+ fn park_timeout(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> {
+ // Take the parker out of core
+ let mut park = core.park.take().expect("park missing");
+
+ // Store `core` in context
+ *self.core.borrow_mut() = Some(core);
+
+ // Park thread
+ if let Some(timeout) = duration {
+ park.park_timeout(timeout).expect("park failed");
+ } else {
+ park.park().expect("park failed");
+ }
+
+ // Remove `core` from context
+ core = self.core.borrow_mut().take().expect("core missing");
+
+ // Place `park` back in `core`
+ core.park = Some(park);
+
+ // If there are tasks available to steal, notify a worker
+ if core.run_queue.is_stealable() {
+ self.worker.shared.notify_parked();
+ }
+
+ core
+ }
+}
+
+impl Core {
+ /// Increment the tick
+ fn tick(&mut self) {
+ self.tick = self.tick.wrapping_add(1);
+ }
+
+ /// Return the next notified task available to this worker.
+ fn next_task(&mut self, worker: &Worker) -> Option<Notified> {
+ if self.tick % GLOBAL_POLL_INTERVAL == 0 {
+ worker.inject().pop().or_else(|| self.next_local_task())
+ } else {
+ self.next_local_task().or_else(|| worker.inject().pop())
+ }
+ }
+
+ fn next_local_task(&mut self) -> Option<Notified> {
+ self.lifo_slot.take().or_else(|| self.run_queue.pop())
+ }
+
+ fn steal_work(&mut self, worker: &Worker) -> Option<Notified> {
+ if !self.transition_to_searching(worker) {
+ return None;
+ }
+
+ let num = worker.shared.remotes.len();
+ // Start from a random worker
+ let start = self.rand.fastrand_n(num as u32) as usize;
+
+ for i in 0..num {
+ let i = (start + i) % num;
+
+ // Don't steal from ourself! We know we don't have work.
+ if i == worker.index {
+ continue;
+ }
+
+ let target = &worker.shared.remotes[i];
+ if let Some(task) = target.steal.steal_into(&mut self.run_queue) {
+ return Some(task);
+ }
+ }
+
+ // Fallback on checking the global queue
+ worker.shared.inject.pop()
+ }
+
+ fn transition_to_searching(&mut self, worker: &Worker) -> bool {
+ if !self.is_searching {
+ self.is_searching = worker.shared.idle.transition_worker_to_searching();
+ }
+
+ self.is_searching
+ }
+
+ fn transition_from_searching(&mut self, worker: &Worker) {
+ if !self.is_searching {
+ return;
+ }
+
+ self.is_searching = false;
+ worker.shared.transition_worker_from_searching();
+ }
+
+ /// Prepare the worker state for parking
+ fn transition_to_parked(&mut self, worker: &Worker) {
+ // When the final worker transitions **out** of searching to parked, it
+ // must check all the queues one last time in case work materialized
+ // between the last work scan and transitioning out of searching.
+ let is_last_searcher = worker
+ .shared
+ .idle
+ .transition_worker_to_parked(worker.index, self.is_searching);
+
+ // The worker is no longer searching. Setting this is the local cache
+ // only.
+ self.is_searching = false;
+
+ if is_last_searcher {
+ worker.shared.notify_if_work_pending();
+ }
+ }
+
+ /// Returns `true` if the transition happened.
+ fn transition_from_parked(&mut self, worker: &Worker) -> bool {
+ // If a task is in the lifo slot, then we must unpark regardless of
+ // being notified
+ if self.lifo_slot.is_some() {
+ worker.shared.idle.unpark_worker_by_id(worker.index);
+ self.is_searching = true;
+ return true;
+ }
+
+ if worker.shared.idle.is_parked(worker.index) {
+ return false;
+ }
+
+ // When unparked, the worker is in the searching state.
+ self.is_searching = true;
+ true
+ }
+
+ /// Runs maintenance work such as free pending tasks and check the pool's
+ /// state.
+ fn maintenance(&mut self, worker: &Worker) {
+ self.drain_pending_drop(worker);
+
+ if !self.is_shutdown {
+ // Check if the scheduler has been shutdown
+ self.is_shutdown = worker.inject().is_closed();
+ }
+ }
+
+ // Shutdown the core
+ fn shutdown(&mut self, worker: &Worker) {
+ // Take the core
+ let mut park = self.park.take().expect("park missing");
+
+ // Signal to all tasks to shut down.
+ for header in self.tasks.iter() {
+ header.shutdown();
+ }
+
+ loop {
+ self.drain_pending_drop(worker);
+
+ if self.tasks.is_empty() {
+ break;
+ }
+
+ // Wait until signalled
+ park.park().expect("park failed");
+ }
+
+ // Drain the queue
+ while let Some(_) = self.next_local_task() {}
+ }
+
+ fn drain_pending_drop(&mut self, worker: &Worker) {
+ use std::mem::ManuallyDrop;
+
+ for task in worker.remote().pending_drop.drain() {
+ let task = ManuallyDrop::new(task);
+
+ // safety: tasks are only pushed into the `pending_drop` stacks that
+ // are associated with the list they are inserted into. When a task
+ // is pushed into `pending_drop`, the ref-inc is skipped, so we must
+ // not ref-dec here.
+ //
+ // See `bind` and `release` implementations.
+ unsafe {
+ self.tasks.remove(task.header().into());
+ }
+ }
+ }
+}
+
+impl Worker {
+ /// Returns a reference to the scheduler's injection queue
+ fn inject(&self) -> &queue::Inject<Arc<Worker>> {
+ &self.shared.inject
+ }
+
+ /// Return a reference to this worker's remote data
+ fn remote(&self) -> &Remote {
+ &self.shared.remotes[self.index]
+ }
+
+ fn eq(&self, other: &Worker) -> bool {
+ self.shared.ptr_eq(&other.shared) && self.index == other.index
+ }
+}
+
+impl task::Schedule for Arc<Worker> {
+ fn bind(task: Task) -> Arc<Worker> {
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+
+ // Track the task
+ cx.core
+ .borrow_mut()
+ .as_mut()
+ .expect("scheduler core missing")
+ .tasks
+ .push_front(task);
+
+ // Return a clone of the worker
+ cx.worker.clone()
+ })
+ }
+
+ fn release(&self, task: &Task) -> Option<Task> {
+ use std::ptr::NonNull;
+
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+
+ if self.eq(&cx.worker) {
+ let mut maybe_core = cx.core.borrow_mut();
+
+ if let Some(core) = &mut *maybe_core {
+ // Directly remove the task
+ //
+ // safety: the task is inserted in the list in `bind`.
+ unsafe {
+ let ptr = NonNull::from(task.header());
+ return core.tasks.remove(ptr);
+ }
+ }
+ }
+
+ // Track the task to be released by the worker that owns it
+ //
+ // Safety: We get a new handle without incrementing the ref-count.
+ // A ref-count is held by the "owned" linked list and it is only
+ // ever removed from that list as part of the release process: this
+ // method or popping the task from `pending_drop`. Thus, we can rely
+ // on the ref-count held by the linked-list to keep the memory
+ // alive.
+ //
+ // When the task is removed from the stack, it is forgotten instead
+ // of dropped.
+ let task = unsafe { Task::from_raw(task.header().into()) };
+
+ self.remote().pending_drop.push(task);
+
+ if cx.core.borrow().is_some() {
+ return None;
+ }
+
+ // The worker core has been handed off to another thread. In the
+ // event that the scheduler is currently shutting down, the thread
+ // that owns the task may be waiting on the release to complete
+ // shutdown.
+ if self.inject().is_closed() {
+ self.remote().unpark.unpark();
+ }
+
+ None
+ })
+ }
+
+ fn schedule(&self, task: Notified) {
+ self.shared.schedule(task, false);
+ }
+
+ fn yield_now(&self, task: Notified) {
+ self.shared.schedule(task, true);
+ }
+}
+
+impl Shared {
+ pub(super) fn schedule(&self, task: Notified, is_yield: bool) {
+ CURRENT.with(|maybe_cx| {
+ if let Some(cx) = maybe_cx {
+ // Make sure the task is part of the **current** scheduler.
+ if self.ptr_eq(&cx.worker.shared) {
+ // And the current thread still holds a core
+ if let Some(core) = cx.core.borrow_mut().as_mut() {
+ self.schedule_local(core, task, is_yield);
+ return;
+ }
+ }
+ }
+
+ // Otherwise, use the inject queue
+ self.inject.push(task);
+ self.notify_parked();
+ });
+ }
+
+ fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) {
+ // Spawning from the worker thread. If scheduling a "yield" then the
+ // task must always be pushed to the back of the queue, enabling other
+ // tasks to be executed. If **not** a yield, then there is more
+ // flexibility and the task may go to the front of the queue.
+ let should_notify = if is_yield {
+ core.run_queue.push_back(task, &self.inject);
+ true
+ } else {
+ // Push to the LIFO slot
+ let prev = core.lifo_slot.take();
+ let ret = prev.is_some();
+
+ if let Some(prev) = prev {
+ core.run_queue.push_back(prev, &self.inject);
+ }
+
+ core.lifo_slot = Some(task);
+
+ ret
+ };
+
+ // Only notify if not currently parked. If `park` is `None`, then the
+ // scheduling is from a resource driver. As notifications often come in
+ // batches, the notification is delayed until the park is complete.
+ if should_notify && core.park.is_some() {
+ self.notify_parked();
+ }
+ }
+
+ pub(super) fn close(&self) {
+ if self.inject.close() {
+ self.notify_all();
+ }
+ }
+
+ fn notify_parked(&self) {
+ if let Some(index) = self.idle.worker_to_notify() {
+ self.remotes[index].unpark.unpark();
+ }
+ }
+
+ fn notify_all(&self) {
+ for remote in &self.remotes[..] {
+ remote.unpark.unpark();
+ }
+ }
+
+ fn notify_if_work_pending(&self) {
+ for remote in &self.remotes[..] {
+ if !remote.steal.is_empty() {
+ self.notify_parked();
+ return;
+ }
+ }
+
+ if !self.inject.is_empty() {
+ self.notify_parked();
+ }
+ }
+
+ fn transition_worker_from_searching(&self) {
+ if self.idle.transition_worker_from_searching() {
+ // We are the final searching worker. Because work was found, we
+ // need to notify another worker.
+ self.notify_parked();
+ }
+ }
+
+ /// Signals that a worker has observed the shutdown signal and has replaced
+ /// its core back into its handle.
+ ///
+ /// If all workers have reached this point, the final cleanup is performed.
+ fn shutdown(&self, core: Box<Core>, worker: Arc<Worker>) {
+ let mut workers = self.shutdown_workers.lock().unwrap();
+ workers.push((core, worker));
+
+ if workers.len() != self.remotes.len() {
+ return;
+ }
+
+ for (mut core, worker) in workers.drain(..) {
+ core.shutdown(&worker);
+ }
+
+ // Drain the injection queue
+ while let Some(_) = self.inject.pop() {}
+ }
+
+ fn ptr_eq(&self, other: &Shared) -> bool {
+ self as *const _ == other as *const _
+ }
+}
diff --git a/third_party/rust/tokio/src/runtime/time.rs b/third_party/rust/tokio/src/runtime/time.rs
new file mode 100644
index 0000000000..c623d9641a
--- /dev/null
+++ b/third_party/rust/tokio/src/runtime/time.rs
@@ -0,0 +1,59 @@
+//! Abstracts out the APIs necessary to `Runtime` for integrating the time
+//! driver. When the `time` feature flag is **not** enabled. These APIs are
+//! shells. This isolates the complexity of dealing with conditional
+//! compilation.
+
+pub(crate) use variant::*;
+
+#[cfg(feature = "time")]
+mod variant {
+ use crate::park::Either;
+ use crate::runtime::io;
+ use crate::time::{self, driver};
+
+ pub(crate) type Clock = time::Clock;
+ pub(crate) type Driver = Either<driver::Driver<io::Driver>, io::Driver>;
+ pub(crate) type Handle = Option<driver::Handle>;
+
+ pub(crate) fn create_clock() -> Clock {
+ Clock::new()
+ }
+
+ /// Create a new timer driver / handle pair
+ pub(crate) fn create_driver(
+ enable: bool,
+ io_driver: io::Driver,
+ clock: Clock,
+ ) -> (Driver, Handle) {
+ if enable {
+ let driver = driver::Driver::new(io_driver, clock);
+ let handle = driver.handle();
+
+ (Either::A(driver), Some(handle))
+ } else {
+ (Either::B(io_driver), None)
+ }
+ }
+}
+
+#[cfg(not(feature = "time"))]
+mod variant {
+ use crate::runtime::io;
+
+ pub(crate) type Clock = ();
+ pub(crate) type Driver = io::Driver;
+ pub(crate) type Handle = ();
+
+ pub(crate) fn create_clock() -> Clock {
+ ()
+ }
+
+ /// Create a new timer driver / handle pair
+ pub(crate) fn create_driver(
+ _enable: bool,
+ io_driver: io::Driver,
+ _clock: Clock,
+ ) -> (Driver, Handle) {
+ (io_driver, ())
+ }
+}
diff --git a/third_party/rust/tokio/src/signal/ctrl_c.rs b/third_party/rust/tokio/src/signal/ctrl_c.rs
new file mode 100644
index 0000000000..1eeeb85aa1
--- /dev/null
+++ b/third_party/rust/tokio/src/signal/ctrl_c.rs
@@ -0,0 +1,53 @@
+#[cfg(unix)]
+use super::unix::{self as os_impl};
+#[cfg(windows)]
+use super::windows::{self as os_impl};
+
+use std::io;
+
+/// Completes when a "ctrl-c" notification is sent to the process.
+///
+/// While signals are handled very differently between Unix and Windows, both
+/// platforms support receiving a signal on "ctrl-c". This function provides a
+/// portable API for receiving this notification.
+///
+/// Once the returned future is polled, a listener is registered. The future
+/// will complete on the first received `ctrl-c` **after** the initial call to
+/// either `Future::poll` or `.await`.
+///
+/// # Caveats
+///
+/// On Unix platforms, the first time that a `Signal` instance is registered for a
+/// particular signal kind, an OS signal-handler is installed which replaces the
+/// default platform behavior when that signal is received, **for the duration of
+/// the entire process**.
+///
+/// For example, Unix systems will terminate a process by default when it
+/// receives a signal generated by "CTRL+C" on the terminal. But, when a
+/// `ctrl_c` stream is created to listen for this signal, the time it arrives,
+/// it will be translated to a stream event, and the process will continue to
+/// execute. **Even if this `Signal` instance is dropped, subsequent SIGINT
+/// deliveries will end up captured by Tokio, and the default platform behavior
+/// will NOT be reset**.
+///
+/// Thus, applications should take care to ensure the expected signal behavior
+/// occurs as expected after listening for specific signals.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// use tokio::signal;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// println!("waiting for ctrl-c");
+///
+/// signal::ctrl_c().await.expect("failed to listen for event");
+///
+/// println!("received ctrl-c event");
+/// }
+/// ```
+pub async fn ctrl_c() -> io::Result<()> {
+ os_impl::ctrl_c()?.recv().await;
+ Ok(())
+}
diff --git a/third_party/rust/tokio/src/signal/mod.rs b/third_party/rust/tokio/src/signal/mod.rs
new file mode 100644
index 0000000000..6e5e350df5
--- /dev/null
+++ b/third_party/rust/tokio/src/signal/mod.rs
@@ -0,0 +1,60 @@
+//! Asynchronous signal handling for Tokio
+//!
+//! Note that signal handling is in general a very tricky topic and should be
+//! used with great care. This crate attempts to implement 'best practice' for
+//! signal handling, but it should be evaluated for your own applications' needs
+//! to see if it's suitable.
+//!
+//! The are some fundamental limitations of this crate documented on the OS
+//! specific structures, as well.
+//!
+//! # Examples
+//!
+//! Print on "ctrl-c" notification.
+//!
+//! ```rust,no_run
+//! use tokio::signal;
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! signal::ctrl_c().await?;
+//! println!("ctrl-c received!");
+//! Ok(())
+//! }
+//! ```
+//!
+//! Wait for SIGHUP on Unix
+//!
+//! ```rust,no_run
+//! # #[cfg(unix)] {
+//! use tokio::signal::unix::{signal, SignalKind};
+//!
+//! #[tokio::main]
+//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! // An infinite stream of hangup signals.
+//! let mut stream = signal(SignalKind::hangup())?;
+//!
+//! // Print whenever a HUP signal is received
+//! loop {
+//! stream.recv().await;
+//! println!("got signal HUP");
+//! }
+//! }
+//! # }
+//! ```
+
+mod ctrl_c;
+pub use ctrl_c::ctrl_c;
+
+mod registry;
+
+mod os {
+ #[cfg(unix)]
+ pub(crate) use super::unix::{OsExtraData, OsStorage};
+
+ #[cfg(windows)]
+ pub(crate) use super::windows::{OsExtraData, OsStorage};
+}
+
+pub mod unix;
+pub mod windows;
diff --git a/third_party/rust/tokio/src/signal/registry.rs b/third_party/rust/tokio/src/signal/registry.rs
new file mode 100644
index 0000000000..50edd2b6c4
--- /dev/null
+++ b/third_party/rust/tokio/src/signal/registry.rs
@@ -0,0 +1,321 @@
+#![allow(clippy::unit_arg)]
+
+use crate::signal::os::{OsExtraData, OsStorage};
+
+use crate::sync::mpsc::Sender;
+
+use lazy_static::lazy_static;
+use std::ops;
+use std::pin::Pin;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Mutex;
+
+pub(crate) type EventId = usize;
+
+/// State for a specific event, whether a notification is pending delivery,
+/// and what listeners are registered.
+#[derive(Default, Debug)]
+pub(crate) struct EventInfo {
+ pending: AtomicBool,
+ recipients: Mutex<Vec<Sender<()>>>,
+}
+
+/// An interface for retrieving the `EventInfo` for a particular eventId.
+pub(crate) trait Storage {
+ /// Gets the `EventInfo` for `id` if it exists.
+ fn event_info(&self, id: EventId) -> Option<&EventInfo>;
+
+ /// Invokes `f` once for each defined `EventInfo` in this storage.
+ fn for_each<'a, F>(&'a self, f: F)
+ where
+ F: FnMut(&'a EventInfo);
+}
+
+impl Storage for Vec<EventInfo> {
+ fn event_info(&self, id: EventId) -> Option<&EventInfo> {
+ self.get(id)
+ }
+
+ fn for_each<'a, F>(&'a self, f: F)
+ where
+ F: FnMut(&'a EventInfo),
+ {
+ self.iter().for_each(f)
+ }
+}
+
+/// An interface for initializing a type. Useful for situations where we cannot
+/// inject a configured instance in the constructor of another type.
+pub(crate) trait Init {
+ fn init() -> Self;
+}
+
+/// Manages and distributes event notifications to any registered listeners.
+///
+/// Generic over the underlying storage to allow for domain specific
+/// optimizations (e.g. eventIds may or may not be contiguous).
+#[derive(Debug)]
+pub(crate) struct Registry<S> {
+ storage: S,
+}
+
+impl<S> Registry<S> {
+ fn new(storage: S) -> Self {
+ Self { storage }
+ }
+}
+
+impl<S: Storage> Registry<S> {
+ /// Registers a new listener for `event_id`.
+ fn register_listener(&self, event_id: EventId, listener: Sender<()>) {
+ self.storage
+ .event_info(event_id)
+ .unwrap_or_else(|| panic!("invalid event_id: {}", event_id))
+ .recipients
+ .lock()
+ .unwrap()
+ .push(listener);
+ }
+
+ /// Marks `event_id` as having been delivered, without broadcasting it to
+ /// any listeners.
+ fn record_event(&self, event_id: EventId) {
+ if let Some(event_info) = self.storage.event_info(event_id) {
+ event_info.pending.store(true, Ordering::SeqCst)
+ }
+ }
+
+ /// Broadcasts all previously recorded events to their respective listeners.
+ ///
+ /// Returns `true` if an event was delivered to at least one listener.
+ fn broadcast(&self) -> bool {
+ use crate::sync::mpsc::error::TrySendError;
+
+ let mut did_notify = false;
+ self.storage.for_each(|event_info| {
+ // Any signal of this kind arrived since we checked last?
+ if !event_info.pending.swap(false, Ordering::SeqCst) {
+ return;
+ }
+
+ let mut recipients = event_info.recipients.lock().unwrap();
+
+ // Notify all waiters on this signal that the signal has been
+ // received. If we can't push a message into the queue then we don't
+ // worry about it as everything is coalesced anyway. If the channel
+ // has gone away then we can remove that slot.
+ for i in (0..recipients.len()).rev() {
+ match recipients[i].try_send(()) {
+ Ok(()) => did_notify = true,
+ Err(TrySendError::Closed(..)) => {
+ recipients.swap_remove(i);
+ }
+
+ // Channel is full, ignore the error since the
+ // receiver has already been woken up
+ Err(_) => {}
+ }
+ }
+ });
+
+ did_notify
+ }
+}
+
+pub(crate) struct Globals {
+ extra: OsExtraData,
+ registry: Registry<OsStorage>,
+}
+
+impl ops::Deref for Globals {
+ type Target = OsExtraData;
+
+ fn deref(&self) -> &Self::Target {
+ &self.extra
+ }
+}
+
+impl Globals {
+ /// Registers a new listener for `event_id`.
+ pub(crate) fn register_listener(&self, event_id: EventId, listener: Sender<()>) {
+ self.registry.register_listener(event_id, listener);
+ }
+
+ /// Marks `event_id` as having been delivered, without broadcasting it to
+ /// any listeners.
+ pub(crate) fn record_event(&self, event_id: EventId) {
+ self.registry.record_event(event_id);
+ }
+
+ /// Broadcasts all previously recorded events to their respective listeners.
+ ///
+ /// Returns `true` if an event was delivered to at least one listener.
+ pub(crate) fn broadcast(&self) -> bool {
+ self.registry.broadcast()
+ }
+
+ #[cfg(unix)]
+ pub(crate) fn storage(&self) -> &OsStorage {
+ &self.registry.storage
+ }
+}
+
+pub(crate) fn globals() -> Pin<&'static Globals>
+where
+ OsExtraData: 'static + Send + Sync + Init,
+ OsStorage: 'static + Send + Sync + Init,
+{
+ lazy_static! {
+ static ref GLOBALS: Pin<Box<Globals>> = Box::pin(Globals {
+ extra: OsExtraData::init(),
+ registry: Registry::new(OsStorage::init()),
+ });
+ }
+
+ GLOBALS.as_ref()
+}
+
+#[cfg(all(test, not(loom)))]
+mod tests {
+ use super::*;
+ use crate::runtime::{self, Runtime};
+ use crate::sync::{mpsc, oneshot};
+
+ use futures::future;
+
+ #[test]
+ fn smoke() {
+ let mut rt = rt();
+ rt.block_on(async move {
+ let registry = Registry::new(vec![
+ EventInfo::default(),
+ EventInfo::default(),
+ EventInfo::default(),
+ ]);
+
+ let (first_tx, first_rx) = mpsc::channel(3);
+ let (second_tx, second_rx) = mpsc::channel(3);
+ let (third_tx, third_rx) = mpsc::channel(3);
+
+ registry.register_listener(0, first_tx);
+ registry.register_listener(1, second_tx);
+ registry.register_listener(2, third_tx);
+
+ let (fire, wait) = oneshot::channel();
+
+ crate::spawn(async {
+ wait.await.expect("wait failed");
+
+ // Record some events which should get coalesced
+ registry.record_event(0);
+ registry.record_event(0);
+ registry.record_event(1);
+ registry.record_event(1);
+ registry.broadcast();
+
+ // Send subsequent signal
+ registry.record_event(0);
+ registry.broadcast();
+
+ drop(registry);
+ });
+
+ let _ = fire.send(());
+ let all = future::join3(collect(first_rx), collect(second_rx), collect(third_rx));
+
+ let (first_results, second_results, third_results) = all.await;
+ assert_eq!(2, first_results.len());
+ assert_eq!(1, second_results.len());
+ assert_eq!(0, third_results.len());
+ });
+ }
+
+ #[test]
+ #[should_panic = "invalid event_id: 1"]
+ fn register_panics_on_invalid_input() {
+ let registry = Registry::new(vec![EventInfo::default()]);
+
+ let (tx, _) = mpsc::channel(1);
+ registry.register_listener(1, tx);
+ }
+
+ #[test]
+ fn record_invalid_event_does_nothing() {
+ let registry = Registry::new(vec![EventInfo::default()]);
+ registry.record_event(42);
+ }
+
+ #[test]
+ fn broadcast_cleans_up_disconnected_listeners() {
+ let mut rt = Runtime::new().unwrap();
+
+ rt.block_on(async {
+ let registry = Registry::new(vec![EventInfo::default()]);
+
+ let (first_tx, first_rx) = mpsc::channel(1);
+ let (second_tx, second_rx) = mpsc::channel(1);
+ let (third_tx, third_rx) = mpsc::channel(1);
+
+ registry.register_listener(0, first_tx);
+ registry.register_listener(0, second_tx);
+ registry.register_listener(0, third_tx);
+
+ drop(first_rx);
+ drop(second_rx);
+
+ let (fire, wait) = oneshot::channel();
+
+ crate::spawn(async {
+ wait.await.expect("wait failed");
+
+ registry.record_event(0);
+ registry.broadcast();
+
+ assert_eq!(1, registry.storage[0].recipients.lock().unwrap().len());
+ drop(registry);
+ });
+
+ let _ = fire.send(());
+ let results = collect(third_rx).await;
+
+ assert_eq!(1, results.len());
+ });
+ }
+
+ #[test]
+ fn broadcast_returns_if_at_least_one_event_fired() {
+ let registry = Registry::new(vec![EventInfo::default()]);
+
+ registry.record_event(0);
+ assert_eq!(false, registry.broadcast());
+
+ let (first_tx, first_rx) = mpsc::channel(1);
+ let (second_tx, second_rx) = mpsc::channel(1);
+
+ registry.register_listener(0, first_tx);
+ registry.register_listener(0, second_tx);
+
+ registry.record_event(0);
+ assert_eq!(true, registry.broadcast());
+
+ drop(first_rx);
+ registry.record_event(0);
+ assert_eq!(false, registry.broadcast());
+
+ drop(second_rx);
+ }
+
+ fn rt() -> Runtime {
+ runtime::Builder::new().basic_scheduler().build().unwrap()
+ }
+
+ async fn collect(mut rx: crate::sync::mpsc::Receiver<()>) -> Vec<()> {
+ let mut ret = vec![];
+
+ while let Some(v) = rx.recv().await {
+ ret.push(v);
+ }
+
+ ret
+ }
+}
diff --git a/third_party/rust/tokio/src/signal/unix.rs b/third_party/rust/tokio/src/signal/unix.rs
new file mode 100644
index 0000000000..06f5cf4eba
--- /dev/null
+++ b/third_party/rust/tokio/src/signal/unix.rs
@@ -0,0 +1,513 @@
+//! Unix-specific types for signal handling.
+//!
+//! This module is only defined on Unix platforms and contains the primary
+//! `Signal` type for receiving notifications of signals.
+
+#![cfg(unix)]
+
+use crate::io::{AsyncRead, PollEvented};
+use crate::signal::registry::{globals, EventId, EventInfo, Globals, Init, Storage};
+use crate::sync::mpsc::{channel, Receiver};
+
+use libc::c_int;
+use mio_uds::UnixStream;
+use std::io::{self, Error, ErrorKind, Write};
+use std::pin::Pin;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Once;
+use std::task::{Context, Poll};
+
+pub(crate) type OsStorage = Vec<SignalInfo>;
+
+// Number of different unix signals
+// (FreeBSD has 33)
+const SIGNUM: usize = 33;
+
+impl Init for OsStorage {
+ fn init() -> Self {
+ (0..SIGNUM).map(|_| SignalInfo::default()).collect()
+ }
+}
+
+impl Storage for OsStorage {
+ fn event_info(&self, id: EventId) -> Option<&EventInfo> {
+ self.get(id).map(|si| &si.event_info)
+ }
+
+ fn for_each<'a, F>(&'a self, f: F)
+ where
+ F: FnMut(&'a EventInfo),
+ {
+ self.iter().map(|si| &si.event_info).for_each(f)
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct OsExtraData {
+ sender: UnixStream,
+ receiver: UnixStream,
+}
+
+impl Init for OsExtraData {
+ fn init() -> Self {
+ let (receiver, sender) = UnixStream::pair().expect("failed to create UnixStream");
+
+ Self { sender, receiver }
+ }
+}
+
+/// Represents the specific kind of signal to listen for.
+#[derive(Debug, Clone, Copy)]
+pub struct SignalKind(c_int);
+
+impl SignalKind {
+ /// Allows for listening to any valid OS signal.
+ ///
+ /// For example, this can be used for listening for platform-specific
+ /// signals.
+ /// ```rust,no_run
+ /// # use tokio::signal::unix::SignalKind;
+ /// # let signum = -1;
+ /// // let signum = libc::OS_SPECIFIC_SIGNAL;
+ /// let kind = SignalKind::from_raw(signum);
+ /// ```
+ pub fn from_raw(signum: c_int) -> Self {
+ Self(signum)
+ }
+
+ /// Represents the SIGALRM signal.
+ ///
+ /// On Unix systems this signal is sent when a real-time timer has expired.
+ /// By default, the process is terminated by this signal.
+ pub fn alarm() -> Self {
+ Self(libc::SIGALRM)
+ }
+
+ /// Represents the SIGCHLD signal.
+ ///
+ /// On Unix systems this signal is sent when the status of a child process
+ /// has changed. By default, this signal is ignored.
+ pub fn child() -> Self {
+ Self(libc::SIGCHLD)
+ }
+
+ /// Represents the SIGHUP signal.
+ ///
+ /// On Unix systems this signal is sent when the terminal is disconnected.
+ /// By default, the process is terminated by this signal.
+ pub fn hangup() -> Self {
+ Self(libc::SIGHUP)
+ }
+
+ /// Represents the SIGINFO signal.
+ ///
+ /// On Unix systems this signal is sent to request a status update from the
+ /// process. By default, this signal is ignored.
+ #[cfg(any(
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "macos",
+ target_os = "netbsd",
+ target_os = "openbsd"
+ ))]
+ pub fn info() -> Self {
+ Self(libc::SIGINFO)
+ }
+
+ /// Represents the SIGINT signal.
+ ///
+ /// On Unix systems this signal is sent to interrupt a program.
+ /// By default, the process is terminated by this signal.
+ pub fn interrupt() -> Self {
+ Self(libc::SIGINT)
+ }
+
+ /// Represents the SIGIO signal.
+ ///
+ /// On Unix systems this signal is sent when I/O operations are possible
+ /// on some file descriptor. By default, this signal is ignored.
+ pub fn io() -> Self {
+ Self(libc::SIGIO)
+ }
+
+ /// Represents the SIGPIPE signal.
+ ///
+ /// On Unix systems this signal is sent when the process attempts to write
+ /// to a pipe which has no reader. By default, the process is terminated by
+ /// this signal.
+ pub fn pipe() -> Self {
+ Self(libc::SIGPIPE)
+ }
+
+ /// Represents the SIGQUIT signal.
+ ///
+ /// On Unix systems this signal is sent to issue a shutdown of the
+ /// process, after which the OS will dump the process core.
+ /// By default, the process is terminated by this signal.
+ pub fn quit() -> Self {
+ Self(libc::SIGQUIT)
+ }
+
+ /// Represents the SIGTERM signal.
+ ///
+ /// On Unix systems this signal is sent to issue a shutdown of the
+ /// process. By default, the process is terminated by this signal.
+ pub fn terminate() -> Self {
+ Self(libc::SIGTERM)
+ }
+
+ /// Represents the SIGUSR1 signal.
+ ///
+ /// On Unix systems this is a user defined signal.
+ /// By default, the process is terminated by this signal.
+ pub fn user_defined1() -> Self {
+ Self(libc::SIGUSR1)
+ }
+
+ /// Represents the SIGUSR2 signal.
+ ///
+ /// On Unix systems this is a user defined signal.
+ /// By default, the process is terminated by this signal.
+ pub fn user_defined2() -> Self {
+ Self(libc::SIGUSR2)
+ }
+
+ /// Represents the SIGWINCH signal.
+ ///
+ /// On Unix systems this signal is sent when the terminal window is resized.
+ /// By default, this signal is ignored.
+ pub fn window_change() -> Self {
+ Self(libc::SIGWINCH)
+ }
+}
+
+pub(crate) struct SignalInfo {
+ event_info: EventInfo,
+ init: Once,
+ initialized: AtomicBool,
+}
+
+impl Default for SignalInfo {
+ fn default() -> SignalInfo {
+ SignalInfo {
+ event_info: Default::default(),
+ init: Once::new(),
+ initialized: AtomicBool::new(false),
+ }
+ }
+}
+
+/// Our global signal handler for all signals registered by this module.
+///
+/// The purpose of this signal handler is to primarily:
+///
+/// 1. Flag that our specific signal was received (e.g. store an atomic flag)
+/// 2. Wake up driver tasks by writing a byte to a pipe
+///
+/// Those two operations shoudl both be async-signal safe.
+fn action(globals: Pin<&'static Globals>, signal: c_int) {
+ globals.record_event(signal as EventId);
+
+ // Send a wakeup, ignore any errors (anything reasonably possible is
+ // full pipe and then it will wake up anyway).
+ let mut sender = &globals.sender;
+ drop(sender.write(&[1]));
+}
+
+/// Enables this module to receive signal notifications for the `signal`
+/// provided.
+///
+/// This will register the signal handler if it hasn't already been registered,
+/// returning any error along the way if that fails.
+fn signal_enable(signal: c_int) -> io::Result<()> {
+ if signal < 0 || signal_hook_registry::FORBIDDEN.contains(&signal) {
+ return Err(Error::new(
+ ErrorKind::Other,
+ format!("Refusing to register signal {}", signal),
+ ));
+ }
+
+ let globals = globals();
+ let siginfo = match globals.storage().get(signal as EventId) {
+ Some(slot) => slot,
+ None => return Err(io::Error::new(io::ErrorKind::Other, "signal too large")),
+ };
+ let mut registered = Ok(());
+ siginfo.init.call_once(|| {
+ registered = unsafe {
+ signal_hook_registry::register(signal, move || action(globals, signal)).map(|_| ())
+ };
+ if registered.is_ok() {
+ siginfo.initialized.store(true, Ordering::Relaxed);
+ }
+ });
+ registered?;
+ // If the call_once failed, it won't be retried on the next attempt to register the signal. In
+ // such case it is not run, registered is still `Ok(())`, initialized is still `false`.
+ if siginfo.initialized.load(Ordering::Relaxed) {
+ Ok(())
+ } else {
+ Err(Error::new(
+ ErrorKind::Other,
+ "Failed to register signal handler",
+ ))
+ }
+}
+
+#[derive(Debug)]
+struct Driver {
+ wakeup: PollEvented<UnixStream>,
+}
+
+impl Driver {
+ fn poll(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ // Drain the data from the pipe and maintain interest in getting more
+ self.drain(cx);
+ // Broadcast any signals which were received
+ globals().broadcast();
+
+ Poll::Pending
+ }
+}
+
+impl Driver {
+ fn new() -> io::Result<Driver> {
+ // NB: We give each driver a "fresh" reciever file descriptor to avoid
+ // the issues described in alexcrichton/tokio-process#42.
+ //
+ // In the past we would reuse the actual receiver file descriptor and
+ // swallow any errors around double registration of the same descriptor.
+ // I'm not sure if the second (failed) registration simply doesn't end up
+ // receiving wake up notifications, or there could be some race condition
+ // when consuming readiness events, but having distinct descriptors for
+ // distinct PollEvented instances appears to mitigate this.
+ //
+ // Unfortunately we cannot just use a single global PollEvented instance
+ // either, since we can't compare Handles or assume they will always
+ // point to the exact same reactor.
+ let stream = globals().receiver.try_clone()?;
+ let wakeup = PollEvented::new(stream)?;
+
+ Ok(Driver { wakeup })
+ }
+
+ /// Drain all data in the global receiver, ensuring we'll get woken up when
+ /// there is a write on the other end.
+ ///
+ /// We do *NOT* use the existence of any read bytes as evidence a signal was
+ /// received since the `pending` flags would have already been set if that
+ /// was the case. See
+ /// [#38](https://github.com/alexcrichton/tokio-signal/issues/38) for more
+ /// info.
+ fn drain(&mut self, cx: &mut Context<'_>) {
+ loop {
+ match Pin::new(&mut self.wakeup).poll_read(cx, &mut [0; 128]) {
+ Poll::Ready(Ok(0)) => panic!("EOF on self-pipe"),
+ Poll::Ready(Ok(_)) => {}
+ Poll::Ready(Err(e)) => panic!("Bad read on self-pipe: {}", e),
+ Poll::Pending => break,
+ }
+ }
+ }
+}
+
+/// A stream of events for receiving a particular type of OS signal.
+///
+/// In general signal handling on Unix is a pretty tricky topic, and this
+/// structure is no exception! There are some important limitations to keep in
+/// mind when using `Signal` streams:
+///
+/// * Signals handling in Unix already necessitates coalescing signals
+/// together sometimes. This `Signal` stream is also no exception here in
+/// that it will also coalesce signals. That is, even if the signal handler
+/// for this process runs multiple times, the `Signal` stream may only return
+/// one signal notification. Specifically, before `poll` is called, all
+/// signal notifications are coalesced into one item returned from `poll`.
+/// Once `poll` has been called, however, a further signal is guaranteed to
+/// be yielded as an item.
+///
+/// Put another way, any element pulled off the returned stream corresponds to
+/// *at least one* signal, but possibly more.
+///
+/// * Signal handling in general is relatively inefficient. Although some
+/// improvements are possible in this crate, it's recommended to not plan on
+/// having millions of signal channels open.
+///
+/// If you've got any questions about this feel free to open an issue on the
+/// repo! New approaches to alleviate some of these limitations are always
+/// appreciated!
+///
+/// # Caveats
+///
+/// The first time that a `Signal` instance is registered for a particular
+/// signal kind, an OS signal-handler is installed which replaces the default
+/// platform behavior when that signal is received, **for the duration of the
+/// entire process**.
+///
+/// For example, Unix systems will terminate a process by default when it
+/// receives SIGINT. But, when a `Signal` instance is created to listen for
+/// this signal, the next SIGINT that arrives will be translated to a stream
+/// event, and the process will continue to execute. **Even if this `Signal`
+/// instance is dropped, subsequent SIGINT deliveries will end up captured by
+/// Tokio, and the default platform behavior will NOT be reset**.
+///
+/// Thus, applications should take care to ensure the expected signal behavior
+/// occurs as expected after listening for specific signals.
+///
+/// # Examples
+///
+/// Wait for SIGHUP
+///
+/// ```rust,no_run
+/// use tokio::signal::unix::{signal, SignalKind};
+///
+/// #[tokio::main]
+/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// // An infinite stream of hangup signals.
+/// let mut stream = signal(SignalKind::hangup())?;
+///
+/// // Print whenever a HUP signal is received
+/// loop {
+/// stream.recv().await;
+/// println!("got signal HUP");
+/// }
+/// }
+/// ```
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub struct Signal {
+ driver: Driver,
+ rx: Receiver<()>,
+}
+
+/// Creates a new stream which will receive notifications when the current
+/// process receives the specified signal `kind`.
+///
+/// This function will create a new stream which binds to the default reactor.
+/// The `Signal` stream is an infinite stream which will receive
+/// notifications whenever a signal is received. More documentation can be
+/// found on `Signal` itself, but to reiterate:
+///
+/// * Signals may be coalesced beyond what the kernel already does.
+/// * Once a signal handler is registered with the process the underlying
+/// libc signal handler is never unregistered.
+///
+/// A `Signal` stream can be created for a particular signal number
+/// multiple times. When a signal is received then all the associated
+/// channels will receive the signal notification.
+///
+/// # Errors
+///
+/// * If the lower-level C functions fail for some reason.
+/// * If the previous initialization of this specific signal failed.
+/// * If the signal is one of
+/// [`signal_hook::FORBIDDEN`](https://docs.rs/signal-hook/*/signal_hook/fn.register.html#panics)
+pub fn signal(kind: SignalKind) -> io::Result<Signal> {
+ let signal = kind.0;
+
+ // Turn the signal delivery on once we are ready for it
+ signal_enable(signal)?;
+
+ // Ensure there's a driver for our associated event loop processing
+ // signals.
+ let driver = Driver::new()?;
+
+ // One wakeup in a queue is enough, no need for us to buffer up any
+ // more.
+ let (tx, rx) = channel(1);
+ globals().register_listener(signal as EventId, tx);
+
+ Ok(Signal { driver, rx })
+}
+
+impl Signal {
+ /// Receives the next signal notification event.
+ ///
+ /// `None` is returned if no more events can be received by this stream.
+ ///
+ /// # Examples
+ ///
+ /// Wait for SIGHUP
+ ///
+ /// ```rust,no_run
+ /// use tokio::signal::unix::{signal, SignalKind};
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn std::error::Error>> {
+ /// // An infinite stream of hangup signals.
+ /// let mut stream = signal(SignalKind::hangup())?;
+ ///
+ /// // Print whenever a HUP signal is received
+ /// loop {
+ /// stream.recv().await;
+ /// println!("got signal HUP");
+ /// }
+ /// }
+ /// ```
+ pub async fn recv(&mut self) -> Option<()> {
+ use crate::future::poll_fn;
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ /// Polls to receive the next signal notification event, outside of an
+ /// `async` context.
+ ///
+ /// `None` is returned if no more events can be received by this stream.
+ ///
+ /// # Examples
+ ///
+ /// Polling from a manually implemented future
+ ///
+ /// ```rust,no_run
+ /// use std::pin::Pin;
+ /// use std::future::Future;
+ /// use std::task::{Context, Poll};
+ /// use tokio::signal::unix::Signal;
+ ///
+ /// struct MyFuture {
+ /// signal: Signal,
+ /// }
+ ///
+ /// impl Future for MyFuture {
+ /// type Output = Option<()>;
+ ///
+ /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ /// println!("polling MyFuture");
+ /// self.signal.poll_recv(cx)
+ /// }
+ /// }
+ /// ```
+ pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ let _ = self.driver.poll(cx);
+ self.rx.poll_recv(cx)
+ }
+}
+
+cfg_stream! {
+ impl crate::stream::Stream for Signal {
+ type Item = ();
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ self.poll_recv(cx)
+ }
+ }
+}
+
+pub(crate) fn ctrl_c() -> io::Result<Signal> {
+ signal(SignalKind::interrupt())
+}
+
+#[cfg(all(test, not(loom)))]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn signal_enable_error_on_invalid_input() {
+ signal_enable(-1).unwrap_err();
+ }
+
+ #[test]
+ fn signal_enable_error_on_forbidden_input() {
+ signal_enable(signal_hook_registry::FORBIDDEN[0]).unwrap_err();
+ }
+}
diff --git a/third_party/rust/tokio/src/signal/windows.rs b/third_party/rust/tokio/src/signal/windows.rs
new file mode 100644
index 0000000000..f55e504b00
--- /dev/null
+++ b/third_party/rust/tokio/src/signal/windows.rs
@@ -0,0 +1,297 @@
+//! Windows-specific types for signal handling.
+//!
+//! This module is only defined on Windows and contains the primary `Event` type
+//! for receiving notifications of events. These events are listened for via the
+//! `SetConsoleCtrlHandler` function which receives events of the type
+//! `CTRL_C_EVENT` and `CTRL_BREAK_EVENT`
+
+#![cfg(windows)]
+
+use crate::signal::registry::{globals, EventId, EventInfo, Init, Storage};
+use crate::sync::mpsc::{channel, Receiver};
+
+use std::convert::TryFrom;
+use std::io;
+use std::sync::Once;
+use std::task::{Context, Poll};
+use winapi::shared::minwindef::*;
+use winapi::um::consoleapi::SetConsoleCtrlHandler;
+use winapi::um::wincon::*;
+
+#[derive(Debug)]
+pub(crate) struct OsStorage {
+ ctrl_c: EventInfo,
+ ctrl_break: EventInfo,
+}
+
+impl Init for OsStorage {
+ fn init() -> Self {
+ Self {
+ ctrl_c: EventInfo::default(),
+ ctrl_break: EventInfo::default(),
+ }
+ }
+}
+
+impl Storage for OsStorage {
+ fn event_info(&self, id: EventId) -> Option<&EventInfo> {
+ match DWORD::try_from(id) {
+ Ok(CTRL_C_EVENT) => Some(&self.ctrl_c),
+ Ok(CTRL_BREAK_EVENT) => Some(&self.ctrl_break),
+ _ => None,
+ }
+ }
+
+ fn for_each<'a, F>(&'a self, mut f: F)
+ where
+ F: FnMut(&'a EventInfo),
+ {
+ f(&self.ctrl_c);
+ f(&self.ctrl_break);
+ }
+}
+
+#[derive(Debug)]
+pub(crate) struct OsExtraData {}
+
+impl Init for OsExtraData {
+ fn init() -> Self {
+ Self {}
+ }
+}
+
+/// Stream of events discovered via `SetConsoleCtrlHandler`.
+///
+/// This structure can be used to listen for events of the type `CTRL_C_EVENT`
+/// and `CTRL_BREAK_EVENT`. The `Stream` trait is implemented for this struct
+/// and will resolve for each notification received by the process. Note that
+/// there are few limitations with this as well:
+///
+/// * A notification to this process notifies *all* `Event` streams for that
+/// event type.
+/// * Notifications to an `Event` stream **are coalesced** if they aren't
+/// processed quickly enough. This means that if two notifications are
+/// received back-to-back, then the stream may only receive one item about the
+/// two notifications.
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub(crate) struct Event {
+ rx: Receiver<()>,
+}
+
+pub(crate) fn ctrl_c() -> io::Result<Event> {
+ Event::new(CTRL_C_EVENT)
+}
+
+impl Event {
+ fn new(signum: DWORD) -> io::Result<Self> {
+ global_init()?;
+
+ let (tx, rx) = channel(1);
+ globals().register_listener(signum as EventId, tx);
+
+ Ok(Event { rx })
+ }
+
+ pub(crate) async fn recv(&mut self) -> Option<()> {
+ use crate::future::poll_fn;
+ poll_fn(|cx| self.rx.poll_recv(cx)).await
+ }
+}
+
+fn global_init() -> io::Result<()> {
+ static INIT: Once = Once::new();
+
+ let mut init = None;
+
+ INIT.call_once(|| unsafe {
+ let rc = SetConsoleCtrlHandler(Some(handler), TRUE);
+ let ret = if rc == 0 {
+ Err(io::Error::last_os_error())
+ } else {
+ Ok(())
+ };
+
+ init = Some(ret);
+ });
+
+ init.unwrap_or_else(|| Ok(()))
+}
+
+unsafe extern "system" fn handler(ty: DWORD) -> BOOL {
+ let globals = globals();
+ globals.record_event(ty as EventId);
+
+ // According to https://docs.microsoft.com/en-us/windows/console/handlerroutine
+ // the handler routine is always invoked in a new thread, thus we don't
+ // have the same restrictions as in Unix signal handlers, meaning we can
+ // go ahead and perform the broadcast here.
+ if globals.broadcast() {
+ TRUE
+ } else {
+ // No one is listening for this notification any more
+ // let the OS fire the next (possibly the default) handler.
+ FALSE
+ }
+}
+
+/// Represents a stream which receives "ctrl-break" notifications sent to the process
+/// via `SetConsoleCtrlHandler`.
+///
+/// A notification to this process notifies *all* streams listening for
+/// this event. Moreover, the notifications **are coalesced** if they aren't processed
+/// quickly enough. This means that if two notifications are received back-to-back,
+/// then the stream may only receive one item about the two notifications.
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub struct CtrlBreak {
+ inner: Event,
+}
+
+impl CtrlBreak {
+ /// Receives the next signal notification event.
+ ///
+ /// `None` is returned if no more events can be received by this stream.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use tokio::signal::windows::ctrl_break;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn std::error::Error>> {
+ /// // An infinite stream of CTRL-BREAK events.
+ /// let mut stream = ctrl_break()?;
+ ///
+ /// // Print whenever a CTRL-BREAK event is received
+ /// loop {
+ /// stream.recv().await;
+ /// println!("got signal CTRL-BREAK");
+ /// }
+ /// }
+ /// ```
+ pub async fn recv(&mut self) -> Option<()> {
+ use crate::future::poll_fn;
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ /// Polls to receive the next signal notification event, outside of an
+ /// `async` context.
+ ///
+ /// `None` is returned if no more events can be received by this stream.
+ ///
+ /// # Examples
+ ///
+ /// Polling from a manually implemented future
+ ///
+ /// ```rust,no_run
+ /// use std::pin::Pin;
+ /// use std::future::Future;
+ /// use std::task::{Context, Poll};
+ /// use tokio::signal::windows::CtrlBreak;
+ ///
+ /// struct MyFuture {
+ /// ctrl_break: CtrlBreak,
+ /// }
+ ///
+ /// impl Future for MyFuture {
+ /// type Output = Option<()>;
+ ///
+ /// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ /// println!("polling MyFuture");
+ /// self.ctrl_break.poll_recv(cx)
+ /// }
+ /// }
+ /// ```
+ pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ self.inner.rx.poll_recv(cx)
+ }
+}
+
+cfg_stream! {
+ impl crate::stream::Stream for CtrlBreak {
+ type Item = ();
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ self.poll_recv(cx)
+ }
+ }
+}
+
+/// Creates a new stream which receives "ctrl-break" notifications sent to the
+/// process.
+///
+/// # Examples
+///
+/// ```rust,no_run
+/// use tokio::signal::windows::ctrl_break;
+///
+/// #[tokio::main]
+/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// // An infinite stream of CTRL-BREAK events.
+/// let mut stream = ctrl_break()?;
+///
+/// // Print whenever a CTRL-BREAK event is received
+/// loop {
+/// stream.recv().await;
+/// println!("got signal CTRL-BREAK");
+/// }
+/// }
+/// ```
+pub fn ctrl_break() -> io::Result<CtrlBreak> {
+ Event::new(CTRL_BREAK_EVENT).map(|inner| CtrlBreak { inner })
+}
+
+#[cfg(all(test, not(loom)))]
+mod tests {
+ use super::*;
+ use crate::runtime::Runtime;
+ use crate::stream::StreamExt;
+
+ use tokio_test::{assert_ok, assert_pending, assert_ready_ok, task};
+
+ #[test]
+ fn ctrl_c() {
+ let rt = rt();
+
+ rt.enter(|| {
+ let mut ctrl_c = task::spawn(crate::signal::ctrl_c());
+
+ assert_pending!(ctrl_c.poll());
+
+ // Windows doesn't have a good programmatic way of sending events
+ // like sending signals on Unix, so we'll stub out the actual OS
+ // integration and test that our handling works.
+ unsafe {
+ super::handler(CTRL_C_EVENT);
+ }
+
+ assert_ready_ok!(ctrl_c.poll());
+ });
+ }
+
+ #[test]
+ fn ctrl_break() {
+ let mut rt = rt();
+
+ rt.block_on(async {
+ let mut ctrl_break = assert_ok!(super::ctrl_break());
+
+ // Windows doesn't have a good programmatic way of sending events
+ // like sending signals on Unix, so we'll stub out the actual OS
+ // integration and test that our handling works.
+ unsafe {
+ super::handler(CTRL_BREAK_EVENT);
+ }
+
+ ctrl_break.next().await.unwrap();
+ });
+ }
+
+ fn rt() -> Runtime {
+ crate::runtime::Builder::new()
+ .basic_scheduler()
+ .build()
+ .unwrap()
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/all.rs b/third_party/rust/tokio/src/stream/all.rs
new file mode 100644
index 0000000000..615665d270
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/all.rs
@@ -0,0 +1,45 @@
+use crate::stream::Stream;
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Future for the [`all`](super::StreamExt::all) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct AllFuture<'a, St: ?Sized, F> {
+ stream: &'a mut St,
+ f: F,
+}
+
+impl<'a, St: ?Sized, F> AllFuture<'a, St, F> {
+ pub(super) fn new(stream: &'a mut St, f: F) -> Self {
+ Self { stream, f }
+ }
+}
+
+impl<St: ?Sized + Unpin, F> Unpin for AllFuture<'_, St, F> {}
+
+impl<St, F> Future for AllFuture<'_, St, F>
+where
+ St: ?Sized + Stream + Unpin,
+ F: FnMut(St::Item) -> bool,
+{
+ type Output = bool;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx));
+
+ match next {
+ Some(v) => {
+ if !(&mut self.f)(v) {
+ Poll::Ready(false)
+ } else {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ None => Poll::Ready(true),
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/any.rs b/third_party/rust/tokio/src/stream/any.rs
new file mode 100644
index 0000000000..f2ecad5edb
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/any.rs
@@ -0,0 +1,45 @@
+use crate::stream::Stream;
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Future for the [`any`](super::StreamExt::any) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct AnyFuture<'a, St: ?Sized, F> {
+ stream: &'a mut St,
+ f: F,
+}
+
+impl<'a, St: ?Sized, F> AnyFuture<'a, St, F> {
+ pub(super) fn new(stream: &'a mut St, f: F) -> Self {
+ Self { stream, f }
+ }
+}
+
+impl<St: ?Sized + Unpin, F> Unpin for AnyFuture<'_, St, F> {}
+
+impl<St, F> Future for AnyFuture<'_, St, F>
+where
+ St: ?Sized + Stream + Unpin,
+ F: FnMut(St::Item) -> bool,
+{
+ type Output = bool;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let next = futures_core::ready!(Pin::new(&mut self.stream).poll_next(cx));
+
+ match next {
+ Some(v) => {
+ if (&mut self.f)(v) {
+ Poll::Ready(true)
+ } else {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ None => Poll::Ready(false),
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/chain.rs b/third_party/rust/tokio/src/stream/chain.rs
new file mode 100644
index 0000000000..5f0324a4b5
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/chain.rs
@@ -0,0 +1,57 @@
+use crate::stream::{Fuse, Stream};
+
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream returned by the [`chain`](super::StreamExt::chain) method.
+ pub struct Chain<T, U> {
+ #[pin]
+ a: Fuse<T>,
+ #[pin]
+ b: U,
+ }
+}
+
+impl<T, U> Chain<T, U> {
+ pub(super) fn new(a: T, b: U) -> Chain<T, U>
+ where
+ T: Stream,
+ U: Stream,
+ {
+ Chain { a: Fuse::new(a), b }
+ }
+}
+
+impl<T, U> Stream for Chain<T, U>
+where
+ T: Stream,
+ U: Stream<Item = T::Item>,
+{
+ type Item = T::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
+ use Poll::Ready;
+
+ let me = self.project();
+
+ if let Some(v) = ready!(me.a.poll_next(cx)) {
+ return Ready(Some(v));
+ }
+
+ me.b.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_lower, a_upper) = self.a.size_hint();
+ let (b_lower, b_upper) = self.b.size_hint();
+
+ let upper = match (a_upper, b_upper) {
+ (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper),
+ _ => None,
+ };
+
+ (a_lower + b_lower, upper)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/collect.rs b/third_party/rust/tokio/src/stream/collect.rs
new file mode 100644
index 0000000000..f44c72b7b3
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/collect.rs
@@ -0,0 +1,246 @@
+use crate::stream::Stream;
+
+use bytes::{Buf, BufMut, Bytes, BytesMut};
+use core::future::Future;
+use core::mem;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+// Do not export this struct until `FromStream` can be unsealed.
+pin_project! {
+ /// Future returned by the [`collect`](super::StreamExt::collect) method.
+ #[must_use = "streams do nothing unless polled"]
+ #[derive(Debug)]
+ pub struct Collect<T, U>
+ where
+ T: Stream,
+ U: FromStream<T::Item>,
+ {
+ #[pin]
+ stream: T,
+ collection: U::Collection,
+ }
+}
+
+/// Convert from a [`Stream`](crate::stream::Stream).
+///
+/// This trait is not intended to be used directly. Instead, call
+/// [`StreamExt::collect()`](super::StreamExt::collect).
+///
+/// # Implementing
+///
+/// Currently, this trait may not be implemented by third parties. The trait is
+/// sealed in order to make changes in the future. Stabilization is pending
+/// enhancements to the Rust langague.
+pub trait FromStream<T>: sealed::FromStreamPriv<T> {}
+
+impl<T, U> Collect<T, U>
+where
+ T: Stream,
+ U: FromStream<T::Item>,
+{
+ pub(super) fn new(stream: T) -> Collect<T, U> {
+ let (lower, upper) = stream.size_hint();
+ let collection = U::initialize(lower, upper);
+
+ Collect { stream, collection }
+ }
+}
+
+impl<T, U> Future for Collect<T, U>
+where
+ T: Stream,
+ U: FromStream<T::Item>,
+{
+ type Output = U;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<U> {
+ use Poll::Ready;
+
+ loop {
+ let mut me = self.as_mut().project();
+
+ let item = match ready!(me.stream.poll_next(cx)) {
+ Some(item) => item,
+ None => {
+ return Ready(U::finalize(&mut me.collection));
+ }
+ };
+
+ if !U::extend(&mut me.collection, item) {
+ return Ready(U::finalize(&mut me.collection));
+ }
+ }
+ }
+}
+
+// ===== FromStream implementations
+
+impl FromStream<()> for () {}
+
+impl sealed::FromStreamPriv<()> for () {
+ type Collection = ();
+
+ fn initialize(_lower: usize, _upper: Option<usize>) {}
+
+ fn extend(_collection: &mut (), _item: ()) -> bool {
+ true
+ }
+
+ fn finalize(_collection: &mut ()) {}
+}
+
+impl<T: AsRef<str>> FromStream<T> for String {}
+
+impl<T: AsRef<str>> sealed::FromStreamPriv<T> for String {
+ type Collection = String;
+
+ fn initialize(_lower: usize, _upper: Option<usize>) -> String {
+ String::new()
+ }
+
+ fn extend(collection: &mut String, item: T) -> bool {
+ collection.push_str(item.as_ref());
+ true
+ }
+
+ fn finalize(collection: &mut String) -> String {
+ mem::replace(collection, String::new())
+ }
+}
+
+impl<T> FromStream<T> for Vec<T> {}
+
+impl<T> sealed::FromStreamPriv<T> for Vec<T> {
+ type Collection = Vec<T>;
+
+ fn initialize(lower: usize, _upper: Option<usize>) -> Vec<T> {
+ Vec::with_capacity(lower)
+ }
+
+ fn extend(collection: &mut Vec<T>, item: T) -> bool {
+ collection.push(item);
+ true
+ }
+
+ fn finalize(collection: &mut Vec<T>) -> Vec<T> {
+ mem::replace(collection, vec![])
+ }
+}
+
+impl<T> FromStream<T> for Box<[T]> {}
+
+impl<T> sealed::FromStreamPriv<T> for Box<[T]> {
+ type Collection = Vec<T>;
+
+ fn initialize(lower: usize, upper: Option<usize>) -> Vec<T> {
+ <Vec<T> as sealed::FromStreamPriv<T>>::initialize(lower, upper)
+ }
+
+ fn extend(collection: &mut Vec<T>, item: T) -> bool {
+ <Vec<T> as sealed::FromStreamPriv<T>>::extend(collection, item)
+ }
+
+ fn finalize(collection: &mut Vec<T>) -> Box<[T]> {
+ <Vec<T> as sealed::FromStreamPriv<T>>::finalize(collection).into_boxed_slice()
+ }
+}
+
+impl<T, U, E> FromStream<Result<T, E>> for Result<U, E> where U: FromStream<T> {}
+
+impl<T, U, E> sealed::FromStreamPriv<Result<T, E>> for Result<U, E>
+where
+ U: FromStream<T>,
+{
+ type Collection = Result<U::Collection, E>;
+
+ fn initialize(lower: usize, upper: Option<usize>) -> Result<U::Collection, E> {
+ Ok(U::initialize(lower, upper))
+ }
+
+ fn extend(collection: &mut Self::Collection, item: Result<T, E>) -> bool {
+ assert!(collection.is_ok());
+ match item {
+ Ok(item) => {
+ let collection = collection.as_mut().ok().expect("invalid state");
+ U::extend(collection, item)
+ }
+ Err(err) => {
+ *collection = Err(err);
+ false
+ }
+ }
+ }
+
+ fn finalize(collection: &mut Self::Collection) -> Result<U, E> {
+ if let Ok(collection) = collection.as_mut() {
+ Ok(U::finalize(collection))
+ } else {
+ let res = mem::replace(collection, Ok(U::initialize(0, Some(0))));
+
+ if let Err(err) = res {
+ Err(err)
+ } else {
+ unreachable!();
+ }
+ }
+ }
+}
+
+impl<T: Buf> FromStream<T> for Bytes {}
+
+impl<T: Buf> sealed::FromStreamPriv<T> for Bytes {
+ type Collection = BytesMut;
+
+ fn initialize(_lower: usize, _upper: Option<usize>) -> BytesMut {
+ BytesMut::new()
+ }
+
+ fn extend(collection: &mut BytesMut, item: T) -> bool {
+ collection.put(item);
+ true
+ }
+
+ fn finalize(collection: &mut BytesMut) -> Bytes {
+ mem::replace(collection, BytesMut::new()).freeze()
+ }
+}
+
+impl<T: Buf> FromStream<T> for BytesMut {}
+
+impl<T: Buf> sealed::FromStreamPriv<T> for BytesMut {
+ type Collection = BytesMut;
+
+ fn initialize(_lower: usize, _upper: Option<usize>) -> BytesMut {
+ BytesMut::new()
+ }
+
+ fn extend(collection: &mut BytesMut, item: T) -> bool {
+ collection.put(item);
+ true
+ }
+
+ fn finalize(collection: &mut BytesMut) -> BytesMut {
+ mem::replace(collection, BytesMut::new())
+ }
+}
+
+pub(crate) mod sealed {
+ #[doc(hidden)]
+ pub trait FromStreamPriv<T> {
+ /// Intermediate type used during collection process
+ type Collection;
+
+ /// Initialize the collection
+ fn initialize(lower: usize, upper: Option<usize>) -> Self::Collection;
+
+ /// Extend the collection with the received item
+ ///
+ /// Return `true` to continue streaming, `false` complete collection.
+ fn extend(collection: &mut Self::Collection, item: T) -> bool;
+
+ /// Finalize collection into target type.
+ fn finalize(collection: &mut Self::Collection) -> Self;
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/empty.rs b/third_party/rust/tokio/src/stream/empty.rs
new file mode 100644
index 0000000000..6118673e50
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/empty.rs
@@ -0,0 +1,50 @@
+use crate::stream::Stream;
+
+use core::marker::PhantomData;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Stream for the [`empty`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Empty<T>(PhantomData<T>);
+
+impl<T> Unpin for Empty<T> {}
+unsafe impl<T> Send for Empty<T> {}
+unsafe impl<T> Sync for Empty<T> {}
+
+/// Creates a stream that yields nothing.
+///
+/// The returned stream is immediately ready and returns `None`. Use
+/// [`stream::pending()`](super::pending()) to obtain a stream that is never
+/// ready.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use tokio::stream::{self, StreamExt};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut none = stream::empty::<i32>();
+///
+/// assert_eq!(None, none.next().await);
+/// }
+/// ```
+pub const fn empty<T>() -> Empty<T> {
+ Empty(PhantomData)
+}
+
+impl<T> Stream for Empty<T> {
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
+ Poll::Ready(None)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/filter.rs b/third_party/rust/tokio/src/stream/filter.rs
new file mode 100644
index 0000000000..799630b234
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/filter.rs
@@ -0,0 +1,58 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream returned by the [`filter`](super::StreamExt::filter) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Filter<St, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ }
+}
+
+impl<St, F> fmt::Debug for Filter<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Filter")
+ .field("stream", &self.stream)
+ .finish()
+ }
+}
+
+impl<St, F> Filter<St, F> {
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f }
+ }
+}
+
+impl<St, F> Stream for Filter<St, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> bool,
+{
+ type Item = St::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ loop {
+ match ready!(self.as_mut().project().stream.poll_next(cx)) {
+ Some(e) => {
+ if (self.as_mut().project().f)(&e) {
+ return Poll::Ready(Some(e));
+ }
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.stream.size_hint().1) // can't know a lower bound, due to the predicate
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/filter_map.rs b/third_party/rust/tokio/src/stream/filter_map.rs
new file mode 100644
index 0000000000..8dc05a5460
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/filter_map.rs
@@ -0,0 +1,58 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream returned by the [`filter_map`](super::StreamExt::filter_map) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct FilterMap<St, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ }
+}
+
+impl<St, F> fmt::Debug for FilterMap<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FilterMap")
+ .field("stream", &self.stream)
+ .finish()
+ }
+}
+
+impl<St, F> FilterMap<St, F> {
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f }
+ }
+}
+
+impl<St, F, T> Stream for FilterMap<St, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Option<T>,
+{
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ loop {
+ match ready!(self.as_mut().project().stream.poll_next(cx)) {
+ Some(e) => {
+ if let Some(e) = (self.as_mut().project().f)(e) {
+ return Poll::Ready(Some(e));
+ }
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.stream.size_hint().1) // can't know a lower bound, due to the predicate
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/fold.rs b/third_party/rust/tokio/src/stream/fold.rs
new file mode 100644
index 0000000000..7b9fead3db
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/fold.rs
@@ -0,0 +1,51 @@
+use crate::stream::Stream;
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future returned by the [`fold`](super::StreamExt::fold) method.
+ #[derive(Debug)]
+ pub struct FoldFuture<St, B, F> {
+ #[pin]
+ stream: St,
+ acc: Option<B>,
+ f: F,
+ }
+}
+
+impl<St, B, F> FoldFuture<St, B, F> {
+ pub(super) fn new(stream: St, init: B, f: F) -> Self {
+ Self {
+ stream,
+ acc: Some(init),
+ f,
+ }
+ }
+}
+
+impl<St, B, F> Future for FoldFuture<St, B, F>
+where
+ St: Stream,
+ F: FnMut(B, St::Item) -> B,
+{
+ type Output = B;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut me = self.project();
+ loop {
+ let next = ready!(me.stream.as_mut().poll_next(cx));
+
+ match next {
+ Some(v) => {
+ let old = me.acc.take().unwrap();
+ let new = (me.f)(old, v);
+ *me.acc = Some(new);
+ }
+ None => return Poll::Ready(me.acc.take().unwrap()),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/fuse.rs b/third_party/rust/tokio/src/stream/fuse.rs
new file mode 100644
index 0000000000..6c9e02d664
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/fuse.rs
@@ -0,0 +1,53 @@
+use crate::stream::Stream;
+
+use pin_project_lite::pin_project;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Stream returned by [`fuse()`][super::StreamExt::fuse].
+ #[derive(Debug)]
+ pub struct Fuse<T> {
+ #[pin]
+ stream: Option<T>,
+ }
+}
+
+impl<T> Fuse<T>
+where
+ T: Stream,
+{
+ pub(crate) fn new(stream: T) -> Fuse<T> {
+ Fuse {
+ stream: Some(stream),
+ }
+ }
+}
+
+impl<T> Stream for Fuse<T>
+where
+ T: Stream,
+{
+ type Item = T::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
+ let res = match Option::as_pin_mut(self.as_mut().project().stream) {
+ Some(stream) => ready!(stream.poll_next(cx)),
+ None => return Poll::Ready(None),
+ };
+
+ if res.is_none() {
+ // Do not poll the stream anymore
+ self.as_mut().project().stream.set(None);
+ }
+
+ Poll::Ready(res)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self.stream {
+ Some(ref stream) => stream.size_hint(),
+ None => (0, Some(0)),
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/iter.rs b/third_party/rust/tokio/src/stream/iter.rs
new file mode 100644
index 0000000000..36eeb5612f
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/iter.rs
@@ -0,0 +1,55 @@
+use crate::stream::Stream;
+
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Stream for the [`iter`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Iter<I> {
+ iter: I,
+}
+
+impl<I> Unpin for Iter<I> {}
+
+/// Converts an `Iterator` into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter
+/// simply always calls `iter.next()` and returns that.
+///
+/// ```
+/// # async fn dox() {
+/// use tokio::stream::{self, StreamExt};
+///
+/// let mut stream = stream::iter(vec![17, 19]);
+///
+/// assert_eq!(stream.next().await, Some(17));
+/// assert_eq!(stream.next().await, Some(19));
+/// assert_eq!(stream.next().await, None);
+/// # }
+/// ```
+pub fn iter<I>(i: I) -> Iter<I::IntoIter>
+where
+ I: IntoIterator,
+{
+ Iter {
+ iter: i.into_iter(),
+ }
+}
+
+impl<I> Stream for Iter<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<I::Item>> {
+ ready!(crate::coop::poll_proceed(cx));
+ Poll::Ready(self.iter.next())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/map.rs b/third_party/rust/tokio/src/stream/map.rs
new file mode 100644
index 0000000000..dfac5a2c94
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/map.rs
@@ -0,0 +1,51 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`map`](super::StreamExt::map) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Map<St, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ }
+}
+
+impl<St, F> fmt::Debug for Map<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Map").field("stream", &self.stream).finish()
+ }
+}
+
+impl<St, F> Map<St, F> {
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Map { stream, f }
+ }
+}
+
+impl<St, F, T> Stream for Map<St, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> T,
+{
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ self.as_mut()
+ .project()
+ .stream
+ .poll_next(cx)
+ .map(|opt| opt.map(|x| (self.as_mut().project().f)(x)))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/merge.rs b/third_party/rust/tokio/src/stream/merge.rs
new file mode 100644
index 0000000000..4850cd40c7
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/merge.rs
@@ -0,0 +1,97 @@
+use crate::stream::{Fuse, Stream};
+
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream returned by the [`merge`](super::StreamExt::merge) method.
+ pub struct Merge<T, U> {
+ #[pin]
+ a: Fuse<T>,
+ #[pin]
+ b: Fuse<U>,
+ // When `true`, poll `a` first, otherwise, `poll` b`.
+ a_first: bool,
+ }
+}
+
+impl<T, U> Merge<T, U> {
+ pub(super) fn new(a: T, b: U) -> Merge<T, U>
+ where
+ T: Stream,
+ U: Stream,
+ {
+ Merge {
+ a: Fuse::new(a),
+ b: Fuse::new(b),
+ a_first: true,
+ }
+ }
+}
+
+impl<T, U> Stream for Merge<T, U>
+where
+ T: Stream,
+ U: Stream<Item = T::Item>,
+{
+ type Item = T::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
+ let me = self.project();
+ let a_first = *me.a_first;
+
+ // Toggle the flag
+ *me.a_first = !a_first;
+
+ if a_first {
+ poll_next(me.a, me.b, cx)
+ } else {
+ poll_next(me.b, me.a, cx)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_lower, a_upper) = self.a.size_hint();
+ let (b_lower, b_upper) = self.b.size_hint();
+
+ let upper = match (a_upper, b_upper) {
+ (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper),
+ _ => None,
+ };
+
+ (a_lower + b_lower, upper)
+ }
+}
+
+fn poll_next<T, U>(
+ first: Pin<&mut T>,
+ second: Pin<&mut U>,
+ cx: &mut Context<'_>,
+) -> Poll<Option<T::Item>>
+where
+ T: Stream,
+ U: Stream<Item = T::Item>,
+{
+ use Poll::*;
+
+ let mut done = true;
+
+ match first.poll_next(cx) {
+ Ready(Some(val)) => return Ready(Some(val)),
+ Ready(None) => {}
+ Pending => done = false,
+ }
+
+ match second.poll_next(cx) {
+ Ready(Some(val)) => return Ready(Some(val)),
+ Ready(None) => {}
+ Pending => done = false,
+ }
+
+ if done {
+ Ready(None)
+ } else {
+ Pending
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/mod.rs b/third_party/rust/tokio/src/stream/mod.rs
new file mode 100644
index 0000000000..307ead5fba
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/mod.rs
@@ -0,0 +1,819 @@
+//! Stream utilities for Tokio.
+//!
+//! A `Stream` is an asynchronous sequence of values. It can be thought of as an asynchronous version of the standard library's `Iterator` trait.
+//!
+//! This module provides helpers to work with them.
+
+mod all;
+use all::AllFuture;
+
+mod any;
+use any::AnyFuture;
+
+mod chain;
+use chain::Chain;
+
+mod collect;
+use collect::Collect;
+pub use collect::FromStream;
+
+mod empty;
+pub use empty::{empty, Empty};
+
+mod filter;
+use filter::Filter;
+
+mod filter_map;
+use filter_map::FilterMap;
+
+mod fold;
+use fold::FoldFuture;
+
+mod fuse;
+use fuse::Fuse;
+
+mod iter;
+pub use iter::{iter, Iter};
+
+mod map;
+use map::Map;
+
+mod merge;
+use merge::Merge;
+
+mod next;
+use next::Next;
+
+mod once;
+pub use once::{once, Once};
+
+mod pending;
+pub use pending::{pending, Pending};
+
+mod stream_map;
+pub use stream_map::StreamMap;
+
+mod skip;
+use skip::Skip;
+
+mod skip_while;
+use skip_while::SkipWhile;
+
+mod try_next;
+use try_next::TryNext;
+
+mod take;
+use take::Take;
+
+mod take_while;
+use take_while::TakeWhile;
+
+cfg_time! {
+ mod timeout;
+ use timeout::Timeout;
+ use std::time::Duration;
+}
+
+pub use futures_core::Stream;
+
+/// An extension trait for `Stream`s that provides a variety of convenient
+/// combinator functions.
+pub trait StreamExt: Stream {
+ /// Consumes and returns the next value in the stream or `None` if the
+ /// stream is finished.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn next(&mut self) -> Option<Self::Item>;
+ /// ```
+ ///
+ /// Note that because `next` doesn't take ownership over the stream,
+ /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a
+ /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can
+ /// be done by boxing the stream using [`Box::pin`] or
+ /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils`
+ /// crate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..=3);
+ ///
+ /// assert_eq!(stream.next().await, Some(1));
+ /// assert_eq!(stream.next().await, Some(2));
+ /// assert_eq!(stream.next().await, Some(3));
+ /// assert_eq!(stream.next().await, None);
+ /// # }
+ /// ```
+ fn next(&mut self) -> Next<'_, Self>
+ where
+ Self: Unpin,
+ {
+ Next::new(self)
+ }
+
+ /// Consumes and returns the next item in the stream. If an error is
+ /// encountered before the next item, the error is returned instead.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn try_next(&mut self) -> Result<Option<T>, E>;
+ /// ```
+ ///
+ /// This is similar to the [`next`](StreamExt::next) combinator,
+ /// but returns a [`Result<Option<T>, E>`](Result) rather than
+ /// an [`Option<Result<T, E>>`](Option), making for easy use
+ /// with the [`?`](std::ops::Try) operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(vec![Ok(1), Ok(2), Err("nope")]);
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(1)));
+ /// assert_eq!(stream.try_next().await, Ok(Some(2)));
+ /// assert_eq!(stream.try_next().await, Err("nope"));
+ /// # }
+ /// ```
+ fn try_next<T, E>(&mut self) -> TryNext<'_, Self>
+ where
+ Self: Stream<Item = Result<T, E>> + Unpin,
+ {
+ TryNext::new(self)
+ }
+
+ /// Maps this stream's items to a different type, returning a new stream of
+ /// the resulting type.
+ ///
+ /// The provided closure is executed over all elements of this stream as
+ /// they are made available. It is executed inline with calls to
+ /// [`poll_next`](Stream::poll_next).
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=3);
+ /// let mut stream = stream.map(|x| x + 3);
+ ///
+ /// assert_eq!(stream.next().await, Some(4));
+ /// assert_eq!(stream.next().await, Some(5));
+ /// assert_eq!(stream.next().await, Some(6));
+ /// # }
+ /// ```
+ fn map<T, F>(self, f: F) -> Map<Self, F>
+ where
+ F: FnMut(Self::Item) -> T,
+ Self: Sized,
+ {
+ Map::new(self, f)
+ }
+
+ /// Combine two streams into one by interleaving the output of both as it
+ /// is produced.
+ ///
+ /// Values are produced from the merged stream in the order they arrive from
+ /// the two source streams. If both source streams provide values
+ /// simultaneously, the merge stream alternates between them. This provides
+ /// some level of fairness.
+ ///
+ /// The merged stream completes once **both** source streams complete. When
+ /// one source stream completes before the other, the merge stream
+ /// exclusively polls the remaining stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::StreamExt;
+ /// use tokio::sync::mpsc;
+ /// use tokio::time;
+ ///
+ /// use std::time::Duration;
+ ///
+ /// # /*
+ /// #[tokio::main]
+ /// # */
+ /// # #[tokio::main(basic_scheduler)]
+ /// async fn main() {
+ /// # time::pause();
+ /// let (mut tx1, rx1) = mpsc::channel(10);
+ /// let (mut tx2, rx2) = mpsc::channel(10);
+ ///
+ /// let mut rx = rx1.merge(rx2);
+ ///
+ /// tokio::spawn(async move {
+ /// // Send some values immediately
+ /// tx1.send(1).await.unwrap();
+ /// tx1.send(2).await.unwrap();
+ ///
+ /// // Let the other task send values
+ /// time::delay_for(Duration::from_millis(20)).await;
+ ///
+ /// tx1.send(4).await.unwrap();
+ /// });
+ ///
+ /// tokio::spawn(async move {
+ /// // Wait for the first task to send values
+ /// time::delay_for(Duration::from_millis(5)).await;
+ ///
+ /// tx2.send(3).await.unwrap();
+ ///
+ /// time::delay_for(Duration::from_millis(25)).await;
+ ///
+ /// // Send the final value
+ /// tx2.send(5).await.unwrap();
+ /// });
+ ///
+ /// assert_eq!(1, rx.next().await.unwrap());
+ /// assert_eq!(2, rx.next().await.unwrap());
+ /// assert_eq!(3, rx.next().await.unwrap());
+ /// assert_eq!(4, rx.next().await.unwrap());
+ /// assert_eq!(5, rx.next().await.unwrap());
+ ///
+ /// // The merged stream is consumed
+ /// assert!(rx.next().await.is_none());
+ /// }
+ /// ```
+ fn merge<U>(self, other: U) -> Merge<Self, U>
+ where
+ U: Stream<Item = Self::Item>,
+ Self: Sized,
+ {
+ Merge::new(self, other)
+ }
+
+ /// Filters the values produced by this stream according to the provided
+ /// predicate.
+ ///
+ /// As values of this stream are made available, the provided predicate `f`
+ /// will be run against them. If the predicate
+ /// resolves to `true`, then the stream will yield the value, but if the
+ /// predicate resolves to `false`, then the value
+ /// will be discarded and the next value will be produced.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to [`Iterator::filter`] method in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=8);
+ /// let mut evens = stream.filter(|x| x % 2 == 0);
+ ///
+ /// assert_eq!(Some(2), evens.next().await);
+ /// assert_eq!(Some(4), evens.next().await);
+ /// assert_eq!(Some(6), evens.next().await);
+ /// assert_eq!(Some(8), evens.next().await);
+ /// assert_eq!(None, evens.next().await);
+ /// # }
+ /// ```
+ fn filter<F>(self, f: F) -> Filter<Self, F>
+ where
+ F: FnMut(&Self::Item) -> bool,
+ Self: Sized,
+ {
+ Filter::new(self, f)
+ }
+
+ /// Filters the values produced by this stream while simultaneously mapping
+ /// them to a different type according to the provided closure.
+ ///
+ /// As values of this stream are made available, the provided function will
+ /// be run on them. If the predicate `f` resolves to
+ /// [`Some(item)`](Some) then the stream will yield the value `item`, but if
+ /// it resolves to [`None`] then the next value will be produced.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to [`Iterator::filter_map`] method in the
+ /// standard library.
+ ///
+ /// # Examples
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=8);
+ /// let mut evens = stream.filter_map(|x| {
+ /// if x % 2 == 0 { Some(x + 1) } else { None }
+ /// });
+ ///
+ /// assert_eq!(Some(3), evens.next().await);
+ /// assert_eq!(Some(5), evens.next().await);
+ /// assert_eq!(Some(7), evens.next().await);
+ /// assert_eq!(Some(9), evens.next().await);
+ /// assert_eq!(None, evens.next().await);
+ /// # }
+ /// ```
+ fn filter_map<T, F>(self, f: F) -> FilterMap<Self, F>
+ where
+ F: FnMut(Self::Item) -> Option<T>,
+ Self: Sized,
+ {
+ FilterMap::new(self, f)
+ }
+
+ /// Creates a stream which ends after the first `None`.
+ ///
+ /// After a stream returns `None`, behavior is undefined. Future calls to
+ /// `poll_next` may or may not return `Some(T)` again or they may panic.
+ /// `fuse()` adapts a stream, ensuring that after `None` is given, it will
+ /// return `None` forever.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{Stream, StreamExt};
+ ///
+ /// use std::pin::Pin;
+ /// use std::task::{Context, Poll};
+ ///
+ /// // a stream which alternates between Some and None
+ /// struct Alternate {
+ /// state: i32,
+ /// }
+ ///
+ /// impl Stream for Alternate {
+ /// type Item = i32;
+ ///
+ /// fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<i32>> {
+ /// let val = self.state;
+ /// self.state = self.state + 1;
+ ///
+ /// // if it's even, Some(i32), else None
+ /// if val % 2 == 0 {
+ /// Poll::Ready(Some(val))
+ /// } else {
+ /// Poll::Ready(None)
+ /// }
+ /// }
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut stream = Alternate { state: 0 };
+ ///
+ /// // the stream goes back and forth
+ /// assert_eq!(stream.next().await, Some(0));
+ /// assert_eq!(stream.next().await, None);
+ /// assert_eq!(stream.next().await, Some(2));
+ /// assert_eq!(stream.next().await, None);
+ ///
+ /// // however, once it is fused
+ /// let mut stream = stream.fuse();
+ ///
+ /// assert_eq!(stream.next().await, Some(4));
+ /// assert_eq!(stream.next().await, None);
+ ///
+ /// // it will always return `None` after the first time.
+ /// assert_eq!(stream.next().await, None);
+ /// assert_eq!(stream.next().await, None);
+ /// assert_eq!(stream.next().await, None);
+ /// }
+ /// ```
+ fn fuse(self) -> Fuse<Self>
+ where
+ Self: Sized,
+ {
+ Fuse::new(self)
+ }
+
+ /// Creates a new stream of at most `n` items of the underlying stream.
+ ///
+ /// Once `n` items have been yielded from this stream then it will always
+ /// return that the stream is done.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..=10).take(3);
+ ///
+ /// assert_eq!(Some(1), stream.next().await);
+ /// assert_eq!(Some(2), stream.next().await);
+ /// assert_eq!(Some(3), stream.next().await);
+ /// assert_eq!(None, stream.next().await);
+ /// # }
+ /// ```
+ fn take(self, n: usize) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ Take::new(self, n)
+ }
+
+ /// Take elements from this stream while the provided predicate
+ /// resolves to `true`.
+ ///
+ /// This function, like `Iterator::take_while`, will take elements from the
+ /// stream until the predicate `f` resolves to `false`. Once one element
+ /// returns false it will always return that the stream is done.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..=10).take_while(|x| *x <= 3);
+ ///
+ /// assert_eq!(Some(1), stream.next().await);
+ /// assert_eq!(Some(2), stream.next().await);
+ /// assert_eq!(Some(3), stream.next().await);
+ /// assert_eq!(None, stream.next().await);
+ /// # }
+ /// ```
+ fn take_while<F>(self, f: F) -> TakeWhile<Self, F>
+ where
+ F: FnMut(&Self::Item) -> bool,
+ Self: Sized,
+ {
+ TakeWhile::new(self, f)
+ }
+
+ /// Creates a new stream that will skip the `n` first items of the
+ /// underlying stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..=10).skip(7);
+ ///
+ /// assert_eq!(Some(8), stream.next().await);
+ /// assert_eq!(Some(9), stream.next().await);
+ /// assert_eq!(Some(10), stream.next().await);
+ /// assert_eq!(None, stream.next().await);
+ /// # }
+ /// ```
+ fn skip(self, n: usize) -> Skip<Self>
+ where
+ Self: Sized,
+ {
+ Skip::new(self, n)
+ }
+
+ /// Skip elements from the underlying stream while the provided predicate
+ /// resolves to `true`.
+ ///
+ /// This function, like [`Iterator::skip_while`], will ignore elemets from the
+ /// stream until the predicate `f` resolves to `false`. Once one element
+ /// returns false, the rest of the elements will be yielded.
+ ///
+ /// [`Iterator::skip_while`]: std::iter::Iterator::skip_while()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ /// let mut stream = stream::iter(vec![1,2,3,4,1]).skip_while(|x| *x < 3);
+ ///
+ /// assert_eq!(Some(3), stream.next().await);
+ /// assert_eq!(Some(4), stream.next().await);
+ /// assert_eq!(Some(1), stream.next().await);
+ /// assert_eq!(None, stream.next().await);
+ /// # }
+ /// ```
+ fn skip_while<F>(self, f: F) -> SkipWhile<Self, F>
+ where
+ F: FnMut(&Self::Item) -> bool,
+ Self: Sized,
+ {
+ SkipWhile::new(self, f)
+ }
+
+ /// Tests if every element of the stream matches a predicate.
+ ///
+ /// `all()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the stream, and if they all return
+ /// `true`, then so does `all`. If any of them return `false`, it
+ /// returns `false`. An empty stream returns `true`.
+ ///
+ /// `all()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `false`, given that no matter what else happens,
+ /// the result will also be `false`.
+ ///
+ /// An empty stream returns `true`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(stream::iter(&a).all(|&x| x > 0).await);
+ ///
+ /// assert!(!stream::iter(&a).all(|&x| x > 2).await);
+ /// # }
+ /// ```
+ ///
+ /// Stopping at the first `false`:
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = stream::iter(&a);
+ ///
+ /// assert!(!iter.all(|&x| x != 2).await);
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next().await, Some(&3));
+ /// # }
+ /// ```
+ fn all<F>(&mut self, f: F) -> AllFuture<'_, Self, F>
+ where
+ Self: Unpin,
+ F: FnMut(Self::Item) -> bool,
+ {
+ AllFuture::new(self, f)
+ }
+
+ /// Tests if any element of the stream matches a predicate.
+ ///
+ /// `any()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the stream, and if any of them return
+ /// `true`, then so does `any()`. If they all return `false`, it
+ /// returns `false`.
+ ///
+ /// `any()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `true`, given that no matter what else happens,
+ /// the result will also be `true`.
+ ///
+ /// An empty stream returns `false`.
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(stream::iter(&a).any(|&x| x > 0).await);
+ ///
+ /// assert!(!stream::iter(&a).any(|&x| x > 5).await);
+ /// # }
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = stream::iter(&a);
+ ///
+ /// assert!(iter.any(|&x| x != 2).await);
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next().await, Some(&2));
+ /// # }
+ /// ```
+ fn any<F>(&mut self, f: F) -> AnyFuture<'_, Self, F>
+ where
+ Self: Unpin,
+ F: FnMut(Self::Item) -> bool,
+ {
+ AnyFuture::new(self, f)
+ }
+
+ /// Combine two streams into one by first returning all values from the
+ /// first stream then all values from the second stream.
+ ///
+ /// As long as `self` still has values to emit, no values from `other` are
+ /// emitted, even if some are ready.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let one = stream::iter(vec![1, 2, 3]);
+ /// let two = stream::iter(vec![4, 5, 6]);
+ ///
+ /// let mut stream = one.chain(two);
+ ///
+ /// assert_eq!(stream.next().await, Some(1));
+ /// assert_eq!(stream.next().await, Some(2));
+ /// assert_eq!(stream.next().await, Some(3));
+ /// assert_eq!(stream.next().await, Some(4));
+ /// assert_eq!(stream.next().await, Some(5));
+ /// assert_eq!(stream.next().await, Some(6));
+ /// assert_eq!(stream.next().await, None);
+ /// }
+ /// ```
+ fn chain<U>(self, other: U) -> Chain<Self, U>
+ where
+ U: Stream<Item = Self::Item>,
+ Self: Sized,
+ {
+ Chain::new(self, other)
+ }
+
+ /// A combinator that applies a function to every element in a stream
+ /// producing a single, final value.
+ ///
+ /// # Examples
+ /// Basic usage:
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, *};
+ ///
+ /// let s = stream::iter(vec![1u8, 2, 3]);
+ /// let sum = s.fold(0, |acc, x| acc + x).await;
+ ///
+ /// assert_eq!(sum, 6);
+ /// # }
+ /// ```
+ fn fold<B, F>(self, init: B, f: F) -> FoldFuture<Self, B, F>
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ FoldFuture::new(self, init, f)
+ }
+
+ /// Drain stream pushing all emitted values into a collection.
+ ///
+ /// `collect` streams all values, awaiting as needed. Values are pushed into
+ /// a collection. A number of different target collection types are
+ /// supported, including [`Vec`](std::vec::Vec),
+ /// [`String`](std::string::String), and [`Bytes`](bytes::Bytes).
+ ///
+ /// # `Result`
+ ///
+ /// `collect()` can also be used with streams of type `Result<T, E>` where
+ /// `T: FromStream<_>`. In this case, `collect()` will stream as long as
+ /// values yielded from the stream are `Ok(_)`. If `Err(_)` is encountered,
+ /// streaming is terminated and `collect()` returns the `Err`.
+ ///
+ /// # Notes
+ ///
+ /// `FromStream` is currently a sealed trait. Stabilization is pending
+ /// enhancements to the Rust langague.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let doubled: Vec<i32> =
+ /// stream::iter(vec![1, 2, 3])
+ /// .map(|x| x * 2)
+ /// .collect()
+ /// .await;
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// }
+ /// ```
+ ///
+ /// Collecting a stream of `Result` values
+ ///
+ /// ```
+ /// use tokio::stream::{self, StreamExt};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// // A stream containing only `Ok` values will be collected
+ /// let values: Result<Vec<i32>, &str> =
+ /// stream::iter(vec![Ok(1), Ok(2), Ok(3)])
+ /// .collect()
+ /// .await;
+ ///
+ /// assert_eq!(Ok(vec![1, 2, 3]), values);
+ ///
+ /// // A stream containing `Err` values will return the first error.
+ /// let results = vec![Ok(1), Err("no"), Ok(2), Ok(3), Err("nein")];
+ ///
+ /// let values: Result<Vec<i32>, &str> =
+ /// stream::iter(results)
+ /// .collect()
+ /// .await;
+ ///
+ /// assert_eq!(Err("no"), values);
+ /// }
+ /// ```
+ fn collect<T>(self) -> Collect<Self, T>
+ where
+ T: FromStream<Self::Item>,
+ Self: Sized,
+ {
+ Collect::new(self)
+ }
+
+ /// Applies a per-item timeout to the passed stream.
+ ///
+ /// `timeout()` takes a `Duration` that represents the maximum amount of
+ /// time each element of the stream has to complete before timing out.
+ ///
+ /// If the wrapped stream yields a value before the deadline is reached, the
+ /// value is returned. Otherwise, an error is returned. The caller may decide
+ /// to continue consuming the stream and will eventually get the next source
+ /// stream value once it becomes available.
+ ///
+ /// # Notes
+ ///
+ /// This function consumes the stream passed into it and returns a
+ /// wrapped version of it.
+ ///
+ /// Polling the returned stream will continue to poll the inner stream even
+ /// if one or more items time out.
+ ///
+ /// # Examples
+ ///
+ /// Suppose we have a stream `int_stream` that yields 3 numbers (1, 2, 3):
+ ///
+ /// ```
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// use tokio::stream::{self, StreamExt};
+ /// use std::time::Duration;
+ /// # let int_stream = stream::iter(1..=3);
+ ///
+ /// let mut int_stream = int_stream.timeout(Duration::from_secs(1));
+ ///
+ /// // When no items time out, we get the 3 elements in succession:
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(1)));
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(2)));
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(3)));
+ /// assert_eq!(int_stream.try_next().await, Ok(None));
+ ///
+ /// // If the second item times out, we get an error and continue polling the stream:
+ /// # let mut int_stream = stream::iter(vec![Ok(1), Err(()), Ok(2), Ok(3)]);
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(1)));
+ /// assert!(int_stream.try_next().await.is_err());
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(2)));
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(3)));
+ /// assert_eq!(int_stream.try_next().await, Ok(None));
+ ///
+ /// // If we want to stop consuming the source stream the first time an
+ /// // element times out, we can use the `take_while` operator:
+ /// # let int_stream = stream::iter(vec![Ok(1), Err(()), Ok(2), Ok(3)]);
+ /// let mut int_stream = int_stream.take_while(Result::is_ok);
+ ///
+ /// assert_eq!(int_stream.try_next().await, Ok(Some(1)));
+ /// assert_eq!(int_stream.try_next().await, Ok(None));
+ /// # }
+ /// ```
+ #[cfg(all(feature = "time"))]
+ #[cfg_attr(docsrs, doc(cfg(feature = "time")))]
+ fn timeout(self, duration: Duration) -> Timeout<Self>
+ where
+ Self: Sized,
+ {
+ Timeout::new(self, duration)
+ }
+}
+
+impl<St: ?Sized> StreamExt for St where St: Stream {}
diff --git a/third_party/rust/tokio/src/stream/next.rs b/third_party/rust/tokio/src/stream/next.rs
new file mode 100644
index 0000000000..3909c0c233
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/next.rs
@@ -0,0 +1,28 @@
+use crate::stream::Stream;
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Future for the [`next`](super::StreamExt::next) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Next<'a, St: ?Sized> {
+ stream: &'a mut St,
+}
+
+impl<St: ?Sized + Unpin> Unpin for Next<'_, St> {}
+
+impl<'a, St: ?Sized> Next<'a, St> {
+ pub(super) fn new(stream: &'a mut St) -> Self {
+ Next { stream }
+ }
+}
+
+impl<St: ?Sized + Stream + Unpin> Future for Next<'_, St> {
+ type Output = Option<St::Item>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.stream).poll_next(cx)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/once.rs b/third_party/rust/tokio/src/stream/once.rs
new file mode 100644
index 0000000000..04a642f309
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/once.rs
@@ -0,0 +1,52 @@
+use crate::stream::{self, Iter, Stream};
+
+use core::option;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Stream for the [`once`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Once<T> {
+ iter: Iter<option::IntoIter<T>>,
+}
+
+impl<I> Unpin for Once<I> {}
+
+/// Creates a stream that emits an element exactly once.
+///
+/// The returned stream is immediately ready and emits the provided value once.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::stream::{self, StreamExt};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// // one is the loneliest number
+/// let mut one = stream::once(1);
+///
+/// assert_eq!(Some(1), one.next().await);
+///
+/// // just one, that's all we get
+/// assert_eq!(None, one.next().await);
+/// }
+/// ```
+pub fn once<T>(value: T) -> Once<T> {
+ Once {
+ iter: stream::iter(Some(value).into_iter()),
+ }
+}
+
+impl<T> Stream for Once<T> {
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ Pin::new(&mut self.iter).poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/pending.rs b/third_party/rust/tokio/src/stream/pending.rs
new file mode 100644
index 0000000000..2e06a1c261
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/pending.rs
@@ -0,0 +1,54 @@
+use crate::stream::Stream;
+
+use core::marker::PhantomData;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Stream for the [`pending`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Pending<T>(PhantomData<T>);
+
+impl<T> Unpin for Pending<T> {}
+unsafe impl<T> Send for Pending<T> {}
+unsafe impl<T> Sync for Pending<T> {}
+
+/// Creates a stream that is never ready
+///
+/// The returned stream is never ready. Attempting to call
+/// [`next()`](crate::stream::StreamExt::next) will never complete. Use
+/// [`stream::empty()`](super::empty()) to obtain a stream that is is
+/// immediately empty but returns no values.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```no_run
+/// use tokio::stream::{self, StreamExt};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut never = stream::pending::<i32>();
+///
+/// // This will never complete
+/// never.next().await;
+///
+/// unreachable!();
+/// }
+/// ```
+pub const fn pending<T>() -> Pending<T> {
+ Pending(PhantomData)
+}
+
+impl<T> Stream for Pending<T> {
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
+ Poll::Pending
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, None)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/skip.rs b/third_party/rust/tokio/src/stream/skip.rs
new file mode 100644
index 0000000000..39540cc984
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/skip.rs
@@ -0,0 +1,63 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`skip`](super::StreamExt::skip) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Skip<St> {
+ #[pin]
+ stream: St,
+ remaining: usize,
+ }
+}
+
+impl<St> fmt::Debug for Skip<St>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Skip")
+ .field("stream", &self.stream)
+ .finish()
+ }
+}
+
+impl<St> Skip<St> {
+ pub(super) fn new(stream: St, remaining: usize) -> Self {
+ Self { stream, remaining }
+ }
+}
+
+impl<St> Stream for Skip<St>
+where
+ St: Stream,
+{
+ type Item = St::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ loop {
+ match ready!(self.as_mut().project().stream.poll_next(cx)) {
+ Some(e) => {
+ if self.remaining == 0 {
+ return Poll::Ready(Some(e));
+ }
+ *self.as_mut().project().remaining -= 1;
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.stream.size_hint();
+
+ let lower = lower.saturating_sub(self.remaining);
+ let upper = upper.map(|x| x.saturating_sub(self.remaining));
+
+ (lower, upper)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/skip_while.rs b/third_party/rust/tokio/src/stream/skip_while.rs
new file mode 100644
index 0000000000..4e0500701a
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/skip_while.rs
@@ -0,0 +1,73 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`skip_while`](super::StreamExt::skip_while) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct SkipWhile<St, F> {
+ #[pin]
+ stream: St,
+ predicate: Option<F>,
+ }
+}
+
+impl<St, F> fmt::Debug for SkipWhile<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipWhile")
+ .field("stream", &self.stream)
+ .finish()
+ }
+}
+
+impl<St, F> SkipWhile<St, F> {
+ pub(super) fn new(stream: St, predicate: F) -> Self {
+ Self {
+ stream,
+ predicate: Some(predicate),
+ }
+ }
+}
+
+impl<St, F> Stream for SkipWhile<St, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> bool,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ if let Some(predicate) = this.predicate {
+ loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(item) => {
+ if !(predicate)(&item) {
+ *this.predicate = None;
+ return Poll::Ready(Some(item));
+ }
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ } else {
+ this.stream.poll_next(cx)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.stream.size_hint();
+
+ if self.predicate.is_some() {
+ return (0, upper);
+ }
+
+ (lower, upper)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/stream_map.rs b/third_party/rust/tokio/src/stream/stream_map.rs
new file mode 100644
index 0000000000..2f60ea4dda
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/stream_map.rs
@@ -0,0 +1,503 @@
+use crate::stream::Stream;
+
+use std::borrow::Borrow;
+use std::hash::Hash;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Combine many streams into one, indexing each source stream with a unique
+/// key.
+///
+/// `StreamMap` is similar to [`StreamExt::merge`] in that it combines source
+/// streams into a single merged stream that yields values in the order that
+/// they arrive from the source streams. However, `StreamMap` has a lot more
+/// flexibility in usage patterns.
+///
+/// `StreamMap` can:
+///
+/// * Merge an arbitrary number of streams.
+/// * Track which source stream the value was received from.
+/// * Handle inserting and removing streams from the set of managed streams at
+/// any point during iteration.
+///
+/// All source streams held by `StreamMap` are indexed using a key. This key is
+/// included with the value when a source stream yields a value. The key is also
+/// used to remove the stream from the `StreamMap` before the stream has
+/// completed streaming.
+///
+/// # `Unpin`
+///
+/// Because the `StreamMap` API moves streams during runtime, both streams and
+/// keys must be `Unpin`. In order to insert a `!Unpin` stream into a
+/// `StreamMap`, use [`pin!`] to pin the stream to the stack or [`Box::pin`] to
+/// pin the stream in the heap.
+///
+/// # Implementation
+///
+/// `StreamMap` is backed by a `Vec<(K, V)>`. There is no guarantee that this
+/// internal implementation detail will persist in future versions, but it is
+/// important to know the runtime implications. In general, `StreamMap` works
+/// best with a "smallish" number of streams as all entries are scanned on
+/// insert, remove, and polling. In cases where a large number of streams need
+/// to be merged, it may be advisable to use tasks sending values on a shared
+/// [`mpsc`] channel.
+///
+/// [`StreamExt::merge`]: crate::stream::StreamExt::merge
+/// [`mpsc`]: crate::sync::mpsc
+/// [`pin!`]: macro@pin
+/// [`Box::pin`]: std::boxed::Box::pin
+///
+/// # Examples
+///
+/// Merging two streams, then remove them after receiving the first value
+///
+/// ```
+/// use tokio::stream::{StreamExt, StreamMap};
+/// use tokio::sync::mpsc;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (mut tx1, rx1) = mpsc::channel(10);
+/// let (mut tx2, rx2) = mpsc::channel(10);
+///
+/// tokio::spawn(async move {
+/// tx1.send(1).await.unwrap();
+///
+/// // This value will never be received. The send may or may not return
+/// // `Err` depending on if the remote end closed first or not.
+/// let _ = tx1.send(2).await;
+/// });
+///
+/// tokio::spawn(async move {
+/// tx2.send(3).await.unwrap();
+/// let _ = tx2.send(4).await;
+/// });
+///
+/// let mut map = StreamMap::new();
+///
+/// // Insert both streams
+/// map.insert("one", rx1);
+/// map.insert("two", rx2);
+///
+/// // Read twice
+/// for _ in 0..2 {
+/// let (key, val) = map.next().await.unwrap();
+///
+/// if key == "one" {
+/// assert_eq!(val, 1);
+/// } else {
+/// assert_eq!(val, 3);
+/// }
+///
+/// // Remove the stream to prevent reading the next value
+/// map.remove(key);
+/// }
+/// }
+/// ```
+///
+/// This example models a read-only client to a chat system with channels. The
+/// client sends commands to join and leave channels. `StreamMap` is used to
+/// manage active channel subscriptions.
+///
+/// For simplicity, messages are displayed with `println!`, but they could be
+/// sent to the client over a socket.
+///
+/// ```no_run
+/// use tokio::stream::{Stream, StreamExt, StreamMap};
+///
+/// enum Command {
+/// Join(String),
+/// Leave(String),
+/// }
+///
+/// fn commands() -> impl Stream<Item = Command> {
+/// // Streams in user commands by parsing `stdin`.
+/// # tokio::stream::pending()
+/// }
+///
+/// // Join a channel, returns a stream of messages received on the channel.
+/// fn join(channel: &str) -> impl Stream<Item = String> + Unpin {
+/// // left as an exercise to the reader
+/// # tokio::stream::pending()
+/// }
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut channels = StreamMap::new();
+///
+/// // Input commands (join / leave channels).
+/// let cmds = commands();
+/// tokio::pin!(cmds);
+///
+/// loop {
+/// tokio::select! {
+/// Some(cmd) = cmds.next() => {
+/// match cmd {
+/// Command::Join(chan) => {
+/// // Join the channel and add it to the `channels`
+/// // stream map
+/// let msgs = join(&chan);
+/// channels.insert(chan, msgs);
+/// }
+/// Command::Leave(chan) => {
+/// channels.remove(&chan);
+/// }
+/// }
+/// }
+/// Some((chan, msg)) = channels.next() => {
+/// // Received a message, display it on stdout with the channel
+/// // it originated from.
+/// println!("{}: {}", chan, msg);
+/// }
+/// // Both the `commands` stream and the `channels` stream are
+/// // complete. There is no more work to do, so leave the loop.
+/// else => break,
+/// }
+/// }
+/// }
+/// ```
+#[derive(Debug, Default)]
+pub struct StreamMap<K, V> {
+ /// Streams stored in the map
+ entries: Vec<(K, V)>,
+}
+
+impl<K, V> StreamMap<K, V> {
+ /// Creates an empty `StreamMap`.
+ ///
+ /// The stream map is initially created with a capacity of `0`, so it will
+ /// not allocate until it is first inserted into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, Pending};
+ ///
+ /// let map: StreamMap<&str, Pending<()>> = StreamMap::new();
+ /// ```
+ pub fn new() -> StreamMap<K, V> {
+ StreamMap { entries: vec![] }
+ }
+
+ /// Creates an empty `StreamMap` with the specified capacity.
+ ///
+ /// The stream map will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the stream map will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, Pending};
+ ///
+ /// let map: StreamMap<&str, Pending<()>> = StreamMap::with_capacity(10);
+ /// ```
+ pub fn with_capacity(capacity: usize) -> StreamMap<K, V> {
+ StreamMap {
+ entries: Vec::with_capacity(capacity),
+ }
+ }
+
+ /// Returns an iterator visiting all keys in arbitrary order.
+ ///
+ /// The iterator element type is &'a K.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ ///
+ /// map.insert("a", pending::<i32>());
+ /// map.insert("b", pending());
+ /// map.insert("c", pending());
+ ///
+ /// for key in map.keys() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn keys(&self) -> impl Iterator<Item = &K> {
+ self.entries.iter().map(|(k, _)| k)
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ ///
+ /// The iterator element type is &'a V.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ ///
+ /// map.insert("a", pending::<i32>());
+ /// map.insert("b", pending());
+ /// map.insert("c", pending());
+ ///
+ /// for stream in map.values() {
+ /// println!("{:?}", stream);
+ /// }
+ /// ```
+ pub fn values(&self) -> impl Iterator<Item = &V> {
+ self.entries.iter().map(|(_, v)| v)
+ }
+
+ /// An iterator visiting all values mutably in arbitrary order.
+ ///
+ /// The iterator element type is &'a mut V.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ ///
+ /// map.insert("a", pending::<i32>());
+ /// map.insert("b", pending());
+ /// map.insert("c", pending());
+ ///
+ /// for stream in map.values_mut() {
+ /// println!("{:?}", stream);
+ /// }
+ /// ```
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> {
+ self.entries.iter_mut().map(|(_, v)| v)
+ }
+
+ /// Returns the number of streams the map can hold without reallocating.
+ ///
+ /// This number is a lower bound; the `StreamMap` might be able to hold
+ /// more, but is guaranteed to be able to hold at least this many.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, Pending};
+ ///
+ /// let map: StreamMap<i32, Pending<()>> = StreamMap::with_capacity(100);
+ /// assert!(map.capacity() >= 100);
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.entries.capacity()
+ }
+
+ /// Returns the number of streams in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut a = StreamMap::new();
+ /// assert_eq!(a.len(), 0);
+ /// a.insert(1, pending::<i32>());
+ /// assert_eq!(a.len(), 1);
+ /// ```
+ pub fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ /// Returns `true` if the map contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut a = HashMap::new();
+ /// assert!(a.is_empty());
+ /// a.insert(1, "a");
+ /// assert!(!a.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.entries.is_empty()
+ }
+
+ /// Clears the map, removing all key-stream pairs. Keeps the allocated
+ /// memory for reuse.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut a = StreamMap::new();
+ /// a.insert(1, pending::<i32>());
+ /// a.clear();
+ /// assert!(a.is_empty());
+ /// ```
+ pub fn clear(&mut self) {
+ self.entries.clear();
+ }
+
+ /// Insert a key-stream pair into the map.
+ ///
+ /// If the map did not have this key present, `None` is returned.
+ ///
+ /// If the map did have this key present, the new `stream` replaces the old
+ /// one and the old stream is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ ///
+ /// assert!(map.insert(37, pending::<i32>()).is_none());
+ /// assert!(!map.is_empty());
+ ///
+ /// map.insert(37, pending());
+ /// assert!(map.insert(37, pending()).is_some());
+ /// ```
+ pub fn insert(&mut self, k: K, stream: V) -> Option<V>
+ where
+ K: Hash + Eq,
+ {
+ let ret = self.remove(&k);
+ self.entries.push((k, stream));
+
+ ret
+ }
+
+ /// Removes a key from the map, returning the stream at the key if the key was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but `Hash` and
+ /// `Eq` on the borrowed form must match those for the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ /// map.insert(1, pending::<i32>());
+ /// assert!(map.remove(&1).is_some());
+ /// assert!(map.remove(&1).is_none());
+ /// ```
+ pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ for i in 0..self.entries.len() {
+ if self.entries[i].0.borrow() == k {
+ return Some(self.entries.swap_remove(i).1);
+ }
+ }
+
+ None
+ }
+
+ /// Returns `true` if the map contains a stream for the specified key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but `Hash` and
+ /// `Eq` on the borrowed form must match those for the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::stream::{StreamMap, pending};
+ ///
+ /// let mut map = StreamMap::new();
+ /// map.insert(1, pending::<i32>());
+ /// assert_eq!(map.contains_key(&1), true);
+ /// assert_eq!(map.contains_key(&2), false);
+ /// ```
+ pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ for i in 0..self.entries.len() {
+ if self.entries[i].0.borrow() == k {
+ return true;
+ }
+ }
+
+ false
+ }
+}
+
+impl<K, V> StreamMap<K, V>
+where
+ K: Unpin,
+ V: Stream + Unpin,
+{
+ /// Polls the next value, includes the vec entry index
+ fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<Option<(usize, V::Item)>> {
+ use Poll::*;
+
+ let start = crate::util::thread_rng_n(self.entries.len() as u32) as usize;
+ let mut idx = start;
+
+ for _ in 0..self.entries.len() {
+ let (_, stream) = &mut self.entries[idx];
+
+ match Pin::new(stream).poll_next(cx) {
+ Ready(Some(val)) => return Ready(Some((idx, val))),
+ Ready(None) => {
+ // Remove the entry
+ self.entries.swap_remove(idx);
+
+ // Check if this was the last entry, if so the cursor needs
+ // to wrap
+ if idx == self.entries.len() {
+ idx = 0;
+ } else if idx < start && start <= self.entries.len() {
+ // The stream being swapped into the current index has
+ // already been polled, so skip it.
+ idx = idx.wrapping_add(1) % self.entries.len();
+ }
+ }
+ Pending => {
+ idx = idx.wrapping_add(1) % self.entries.len();
+ }
+ }
+ }
+
+ // If the map is empty, then the stream is complete.
+ if self.entries.is_empty() {
+ Ready(None)
+ } else {
+ Pending
+ }
+ }
+}
+
+impl<K, V> Stream for StreamMap<K, V>
+where
+ K: Clone + Unpin,
+ V: Stream + Unpin,
+{
+ type Item = (K, V::Item);
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ if let Some((idx, val)) = ready!(self.poll_next_entry(cx)) {
+ let key = self.entries[idx].0.clone();
+ Poll::Ready(Some((key, val)))
+ } else {
+ Poll::Ready(None)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let mut ret = (0, Some(0));
+
+ for (_, stream) in &self.entries {
+ let hint = stream.size_hint();
+
+ ret.0 += hint.0;
+
+ match (ret.1, hint.1) {
+ (Some(a), Some(b)) => ret.1 = Some(a + b),
+ (Some(_), None) => ret.1 = None,
+ _ => {}
+ }
+ }
+
+ ret
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/take.rs b/third_party/rust/tokio/src/stream/take.rs
new file mode 100644
index 0000000000..a92430b77c
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/take.rs
@@ -0,0 +1,76 @@
+use crate::stream::Stream;
+
+use core::cmp;
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`take`](super::StreamExt::take) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Take<St> {
+ #[pin]
+ stream: St,
+ remaining: usize,
+ }
+}
+
+impl<St> fmt::Debug for Take<St>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Take")
+ .field("stream", &self.stream)
+ .finish()
+ }
+}
+
+impl<St> Take<St> {
+ pub(super) fn new(stream: St, remaining: usize) -> Self {
+ Self { stream, remaining }
+ }
+}
+
+impl<St> Stream for Take<St>
+where
+ St: Stream,
+{
+ type Item = St::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ if *self.as_mut().project().remaining > 0 {
+ self.as_mut().project().stream.poll_next(cx).map(|ready| {
+ match &ready {
+ Some(_) => {
+ *self.as_mut().project().remaining -= 1;
+ }
+ None => {
+ *self.as_mut().project().remaining = 0;
+ }
+ }
+ ready
+ })
+ } else {
+ Poll::Ready(None)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.remaining == 0 {
+ return (0, Some(0));
+ }
+
+ let (lower, upper) = self.stream.size_hint();
+
+ let lower = cmp::min(lower, self.remaining as usize);
+
+ let upper = match upper {
+ Some(x) if x < self.remaining as usize => Some(x),
+ _ => Some(self.remaining as usize),
+ };
+
+ (lower, upper)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/take_while.rs b/third_party/rust/tokio/src/stream/take_while.rs
new file mode 100644
index 0000000000..cf1e160613
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/take_while.rs
@@ -0,0 +1,79 @@
+use crate::stream::Stream;
+
+use core::fmt;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`take_while`](super::StreamExt::take_while) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TakeWhile<St, F> {
+ #[pin]
+ stream: St,
+ predicate: F,
+ done: bool,
+ }
+}
+
+impl<St, F> fmt::Debug for TakeWhile<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeWhile")
+ .field("stream", &self.stream)
+ .field("done", &self.done)
+ .finish()
+ }
+}
+
+impl<St, F> TakeWhile<St, F> {
+ pub(super) fn new(stream: St, predicate: F) -> Self {
+ Self {
+ stream,
+ predicate,
+ done: false,
+ }
+ }
+}
+
+impl<St, F> Stream for TakeWhile<St, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> bool,
+{
+ type Item = St::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ if !*self.as_mut().project().done {
+ self.as_mut().project().stream.poll_next(cx).map(|ready| {
+ let ready = ready.and_then(|item| {
+ if !(self.as_mut().project().predicate)(&item) {
+ None
+ } else {
+ Some(item)
+ }
+ });
+
+ if ready.is_none() {
+ *self.as_mut().project().done = true;
+ }
+
+ ready
+ })
+ } else {
+ Poll::Ready(None)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done {
+ return (0, Some(0));
+ }
+
+ let (_, upper) = self.stream.size_hint();
+
+ (0, upper)
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/timeout.rs b/third_party/rust/tokio/src/stream/timeout.rs
new file mode 100644
index 0000000000..b8a2024f6a
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/timeout.rs
@@ -0,0 +1,65 @@
+use crate::stream::{Fuse, Stream};
+use crate::time::{Delay, Elapsed, Instant};
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+use std::time::Duration;
+
+pin_project! {
+ /// Stream returned by the [`timeout`](super::StreamExt::timeout) method.
+ #[must_use = "streams do nothing unless polled"]
+ #[derive(Debug)]
+ pub struct Timeout<S> {
+ #[pin]
+ stream: Fuse<S>,
+ deadline: Delay,
+ duration: Duration,
+ poll_deadline: bool,
+ }
+}
+
+impl<S: Stream> Timeout<S> {
+ pub(super) fn new(stream: S, duration: Duration) -> Self {
+ let next = Instant::now() + duration;
+ let deadline = Delay::new_timeout(next, duration);
+
+ Timeout {
+ stream: Fuse::new(stream),
+ deadline,
+ duration,
+ poll_deadline: true,
+ }
+ }
+}
+
+impl<S: Stream> Stream for Timeout<S> {
+ type Item = Result<S::Item, Elapsed>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ match self.as_mut().project().stream.poll_next(cx) {
+ Poll::Ready(v) => {
+ if v.is_some() {
+ let next = Instant::now() + self.duration;
+ self.as_mut().project().deadline.reset(next);
+ *self.as_mut().project().poll_deadline = true;
+ }
+ return Poll::Ready(v.map(Ok));
+ }
+ Poll::Pending => {}
+ };
+
+ if self.poll_deadline {
+ ready!(Pin::new(self.as_mut().project().deadline).poll(cx));
+ *self.as_mut().project().poll_deadline = false;
+ return Poll::Ready(Some(Err(Elapsed::new())));
+ }
+
+ Poll::Pending
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
diff --git a/third_party/rust/tokio/src/stream/try_next.rs b/third_party/rust/tokio/src/stream/try_next.rs
new file mode 100644
index 0000000000..59e0eb1a41
--- /dev/null
+++ b/third_party/rust/tokio/src/stream/try_next.rs
@@ -0,0 +1,30 @@
+use crate::stream::{Next, Stream};
+
+use core::future::Future;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// Future for the [`try_next`](super::StreamExt::try_next) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct TryNext<'a, St: ?Sized> {
+ inner: Next<'a, St>,
+}
+
+impl<St: ?Sized + Unpin> Unpin for TryNext<'_, St> {}
+
+impl<'a, St: ?Sized> TryNext<'a, St> {
+ pub(super) fn new(stream: &'a mut St) -> Self {
+ Self {
+ inner: Next::new(stream),
+ }
+ }
+}
+
+impl<T, E, St: ?Sized + Stream<Item = Result<T, E>> + Unpin> Future for TryNext<'_, St> {
+ type Output = Result<Option<T>, E>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.inner).poll(cx).map(Option::transpose)
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/barrier.rs b/third_party/rust/tokio/src/sync/barrier.rs
new file mode 100644
index 0000000000..628633493a
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/barrier.rs
@@ -0,0 +1,136 @@
+use crate::sync::watch;
+
+use std::sync::Mutex;
+
+/// A barrier enables multiple threads to synchronize the beginning of some computation.
+///
+/// ```
+/// # #[tokio::main]
+/// # async fn main() {
+/// use tokio::sync::Barrier;
+///
+/// use futures::future::join_all;
+/// use std::sync::Arc;
+///
+/// let mut handles = Vec::with_capacity(10);
+/// let barrier = Arc::new(Barrier::new(10));
+/// for _ in 0..10 {
+/// let c = barrier.clone();
+/// // The same messages will be printed together.
+/// // You will NOT see any interleaving.
+/// handles.push(async move {
+/// println!("before wait");
+/// let wr = c.wait().await;
+/// println!("after wait");
+/// wr
+/// });
+/// }
+/// // Will not resolve until all "before wait" messages have been printed
+/// let wrs = join_all(handles).await;
+/// // Exactly one barrier will resolve as the "leader"
+/// assert_eq!(wrs.into_iter().filter(|wr| wr.is_leader()).count(), 1);
+/// # }
+/// ```
+#[derive(Debug)]
+pub struct Barrier {
+ state: Mutex<BarrierState>,
+ wait: watch::Receiver<usize>,
+ n: usize,
+}
+
+#[derive(Debug)]
+struct BarrierState {
+ waker: watch::Sender<usize>,
+ arrived: usize,
+ generation: usize,
+}
+
+impl Barrier {
+ /// Creates a new barrier that can block a given number of threads.
+ ///
+ /// A barrier will block `n`-1 threads which call [`Barrier::wait`] and then wake up all
+ /// threads at once when the `n`th thread calls `wait`.
+ pub fn new(mut n: usize) -> Barrier {
+ let (waker, wait) = crate::sync::watch::channel(0);
+
+ if n == 0 {
+ // if n is 0, it's not clear what behavior the user wants.
+ // in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every
+ // .wait() immediately unblocks, so we adopt that here as well.
+ n = 1;
+ }
+
+ Barrier {
+ state: Mutex::new(BarrierState {
+ waker,
+ arrived: 0,
+ generation: 1,
+ }),
+ n,
+ wait,
+ }
+ }
+
+ /// Does not resolve until all tasks have rendezvoused here.
+ ///
+ /// Barriers are re-usable after all threads have rendezvoused once, and can
+ /// be used continuously.
+ ///
+ /// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from
+ /// [`BarrierWaitResult::is_leader`] when returning from this function, and all other threads
+ /// will receive a result that will return `false` from `is_leader`.
+ pub async fn wait(&self) -> BarrierWaitResult {
+ // NOTE: we are taking a _synchronous_ lock here.
+ // It is okay to do so because the critical section is fast and never yields, so it cannot
+ // deadlock even if another future is concurrently holding the lock.
+ // It is _desireable_ to do so as synchronous Mutexes are, at least in theory, faster than
+ // the asynchronous counter-parts, so we should use them where possible [citation needed].
+ // NOTE: the extra scope here is so that the compiler doesn't think `state` is held across
+ // a yield point, and thus marks the returned future as !Send.
+ let generation = {
+ let mut state = self.state.lock().unwrap();
+ let generation = state.generation;
+ state.arrived += 1;
+ if state.arrived == self.n {
+ // we are the leader for this generation
+ // wake everyone, increment the generation, and return
+ state
+ .waker
+ .broadcast(state.generation)
+ .expect("there is at least one receiver");
+ state.arrived = 0;
+ state.generation += 1;
+ return BarrierWaitResult(true);
+ }
+
+ generation
+ };
+
+ // we're going to have to wait for the last of the generation to arrive
+ let mut wait = self.wait.clone();
+
+ loop {
+ // note that the first time through the loop, this _will_ yield a generation
+ // immediately, since we cloned a receiver that has never seen any values.
+ if wait.recv().await.expect("sender hasn't been closed") >= generation {
+ break;
+ }
+ }
+
+ BarrierWaitResult(false)
+ }
+}
+
+/// A `BarrierWaitResult` is returned by `wait` when all threads in the `Barrier` have rendezvoused.
+#[derive(Debug, Clone)]
+pub struct BarrierWaitResult(bool);
+
+impl BarrierWaitResult {
+ /// Returns `true` if this thread from wait is the "leader thread".
+ ///
+ /// Only one thread will have `true` returned from their result, all other threads will have
+ /// `false` returned.
+ pub fn is_leader(&self) -> bool {
+ self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/batch_semaphore.rs b/third_party/rust/tokio/src/sync/batch_semaphore.rs
new file mode 100644
index 0000000000..436737a670
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/batch_semaphore.rs
@@ -0,0 +1,547 @@
+//! # Implementation Details
+//!
+//! The semaphore is implemented using an intrusive linked list of waiters. An
+//! atomic counter tracks the number of available permits. If the semaphore does
+//! not contain the required number of permits, the task attempting to acquire
+//! permits places its waker at the end of a queue. When new permits are made
+//! available (such as by releasing an initial acquisition), they are assigned
+//! to the task at the front of the queue, waking that task if its requested
+//! number of permits is met.
+//!
+//! Because waiters are enqueued at the back of the linked list and dequeued
+//! from the front, the semaphore is fair. Tasks trying to acquire large numbers
+//! of permits at a time will always be woken eventually, even if many other
+//! tasks are acquiring smaller numbers of permits. This means that in a
+//! use-case like tokio's read-write lock, writers will not be starved by
+//! readers.
+use crate::loom::cell::UnsafeCell;
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::{Mutex, MutexGuard};
+use crate::util::linked_list::{self, LinkedList};
+
+use std::future::Future;
+use std::marker::PhantomPinned;
+use std::pin::Pin;
+use std::ptr::NonNull;
+use std::sync::atomic::Ordering::*;
+use std::task::Poll::*;
+use std::task::{Context, Poll, Waker};
+use std::{cmp, fmt};
+
+/// An asynchronous counting semaphore which permits waiting on multiple permits at once.
+pub(crate) struct Semaphore {
+ waiters: Mutex<Waitlist>,
+ /// The current number of available permits in the semaphore.
+ permits: AtomicUsize,
+}
+
+struct Waitlist {
+ queue: LinkedList<Waiter>,
+ closed: bool,
+}
+
+/// Error returned by `Semaphore::try_acquire`.
+#[derive(Debug)]
+pub(crate) enum TryAcquireError {
+ Closed,
+ NoPermits,
+}
+/// Error returned by `Semaphore::acquire`.
+#[derive(Debug)]
+pub(crate) struct AcquireError(());
+
+pub(crate) struct Acquire<'a> {
+ node: Waiter,
+ semaphore: &'a Semaphore,
+ num_permits: u16,
+ queued: bool,
+}
+
+/// An entry in the wait queue.
+struct Waiter {
+ /// The current state of the waiter.
+ ///
+ /// This is either the number of remaining permits required by
+ /// the waiter, or a flag indicating that the waiter is not yet queued.
+ state: AtomicUsize,
+
+ /// The waker to notify the task awaiting permits.
+ ///
+ /// # Safety
+ ///
+ /// This may only be accessed while the wait queue is locked.
+ waker: UnsafeCell<Option<Waker>>,
+
+ /// Intrusive linked-list pointers.
+ ///
+ /// # Safety
+ ///
+ /// This may only be accessed while the wait queue is locked.
+ ///
+ /// TODO: Ideally, we would be able to use loom to enforce that
+ /// this isn't accessed concurrently. However, it is difficult to
+ /// use a `UnsafeCell` here, since the `Link` trait requires _returning_
+ /// references to `Pointers`, and `UnsafeCell` requires that checked access
+ /// take place inside a closure. We should consider changing `Pointers` to
+ /// use `UnsafeCell` internally.
+ pointers: linked_list::Pointers<Waiter>,
+
+ /// Should not be `Unpin`.
+ _p: PhantomPinned,
+}
+
+impl Semaphore {
+ /// The maximum number of permits which a semaphore can hold.
+ ///
+ /// Note that this reserves three bits of flags in the permit counter, but
+ /// we only actually use one of them. However, the previous semaphore
+ /// implementation used three bits, so we will continue to reserve them to
+ /// avoid a breaking change if additional flags need to be aadded in the
+ /// future.
+ pub(crate) const MAX_PERMITS: usize = std::usize::MAX >> 3;
+ const CLOSED: usize = 1;
+ const PERMIT_SHIFT: usize = 1;
+
+ /// Creates a new semaphore with the initial number of permits
+ pub(crate) fn new(permits: usize) -> Self {
+ assert!(
+ permits <= Self::MAX_PERMITS,
+ "a semaphore may not have more than MAX_PERMITS permits ({})",
+ Self::MAX_PERMITS
+ );
+ Self {
+ permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT),
+ waiters: Mutex::new(Waitlist {
+ queue: LinkedList::new(),
+ closed: false,
+ }),
+ }
+ }
+
+ /// Returns the current number of available permits
+ pub(crate) fn available_permits(&self) -> usize {
+ self.permits.load(Acquire) >> Self::PERMIT_SHIFT
+ }
+
+ /// Adds `n` new permits to the semaphore.
+ pub(crate) fn release(&self, added: usize) {
+ if added == 0 {
+ return;
+ }
+
+ // Assign permits to the wait queue
+ self.add_permits_locked(added, self.waiters.lock().unwrap());
+ }
+
+ /// Closes the semaphore. This prevents the semaphore from issuing new
+ /// permits and notifies all pending waiters.
+ // This will be used once the bounded MPSC is updated to use the new
+ // semaphore implementation.
+ #[allow(dead_code)]
+ pub(crate) fn close(&self) {
+ let mut waiters = self.waiters.lock().unwrap();
+ // If the semaphore's permits counter has enough permits for an
+ // unqueued waiter to acquire all the permits it needs immediately,
+ // it won't touch the wait list. Therefore, we have to set a bit on
+ // the permit counter as well. However, we must do this while
+ // holding the lock --- otherwise, if we set the bit and then wait
+ // to acquire the lock we'll enter an inconsistent state where the
+ // permit counter is closed, but the wait list is not.
+ self.permits.fetch_or(Self::CLOSED, Release);
+ waiters.closed = true;
+ while let Some(mut waiter) = waiters.queue.pop_back() {
+ let waker = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) };
+ if let Some(waker) = waker {
+ waker.wake();
+ }
+ }
+ }
+
+ pub(crate) fn try_acquire(&self, num_permits: u16) -> Result<(), TryAcquireError> {
+ let mut curr = self.permits.load(Acquire);
+ let num_permits = (num_permits as usize) << Self::PERMIT_SHIFT;
+ loop {
+ // Has the semaphore closed?git
+ if curr & Self::CLOSED > 0 {
+ return Err(TryAcquireError::Closed);
+ }
+
+ // Are there enough permits remaining?
+ if curr < num_permits {
+ return Err(TryAcquireError::NoPermits);
+ }
+
+ let next = curr - num_permits;
+
+ match self.permits.compare_exchange(curr, next, AcqRel, Acquire) {
+ Ok(_) => return Ok(()),
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+
+ pub(crate) fn acquire(&self, num_permits: u16) -> Acquire<'_> {
+ Acquire::new(self, num_permits)
+ }
+
+ /// Release `rem` permits to the semaphore's wait list, starting from the
+ /// end of the queue.
+ ///
+ /// If `rem` exceeds the number of permits needed by the wait list, the
+ /// remainder are assigned back to the semaphore.
+ fn add_permits_locked(&self, mut rem: usize, waiters: MutexGuard<'_, Waitlist>) {
+ let mut wakers: [Option<Waker>; 8] = Default::default();
+ let mut lock = Some(waiters);
+ let mut is_empty = false;
+ while rem > 0 {
+ let mut waiters = lock.take().unwrap_or_else(|| self.waiters.lock().unwrap());
+ 'inner: for slot in &mut wakers[..] {
+ // Was the waiter assigned enough permits to wake it?
+ match waiters.queue.last() {
+ Some(waiter) => {
+ if !waiter.assign_permits(&mut rem) {
+ break 'inner;
+ }
+ }
+ None => {
+ is_empty = true;
+ // If we assigned permits to all the waiters in the queue, and there are
+ // still permits left over, assign them back to the semaphore.
+ break 'inner;
+ }
+ };
+ let mut waiter = waiters.queue.pop_back().unwrap();
+ *slot = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) };
+ }
+
+ if rem > 0 && is_empty {
+ let permits = rem << Self::PERMIT_SHIFT;
+ assert!(
+ permits < Self::MAX_PERMITS,
+ "cannot add more than MAX_PERMITS permits ({})",
+ Self::MAX_PERMITS
+ );
+ let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release);
+ assert!(
+ prev + permits <= Self::MAX_PERMITS,
+ "number of added permits ({}) would overflow MAX_PERMITS ({})",
+ rem,
+ Self::MAX_PERMITS
+ );
+ rem = 0;
+ }
+
+ drop(waiters); // release the lock
+
+ wakers
+ .iter_mut()
+ .filter_map(Option::take)
+ .for_each(Waker::wake);
+ }
+
+ assert_eq!(rem, 0);
+ }
+
+ fn poll_acquire(
+ &self,
+ cx: &mut Context<'_>,
+ num_permits: u16,
+ node: Pin<&mut Waiter>,
+ queued: bool,
+ ) -> Poll<Result<(), AcquireError>> {
+ let mut acquired = 0;
+
+ let needed = if queued {
+ node.state.load(Acquire) << Self::PERMIT_SHIFT
+ } else {
+ (num_permits as usize) << Self::PERMIT_SHIFT
+ };
+
+ let mut lock = None;
+ // First, try to take the requested number of permits from the
+ // semaphore.
+ let mut curr = self.permits.load(Acquire);
+ let mut waiters = loop {
+ // Has the semaphore closed?
+ if curr & Self::CLOSED > 0 {
+ return Ready(Err(AcquireError::closed()));
+ }
+
+ let mut remaining = 0;
+ let total = curr
+ .checked_add(acquired)
+ .expect("number of permits must not overflow");
+ let (next, acq) = if total >= needed {
+ let next = curr - (needed - acquired);
+ (next, needed >> Self::PERMIT_SHIFT)
+ } else {
+ remaining = (needed - acquired) - curr;
+ (0, curr >> Self::PERMIT_SHIFT)
+ };
+
+ if remaining > 0 && lock.is_none() {
+ // No permits were immediately available, so this permit will
+ // (probably) need to wait. We'll need to acquire a lock on the
+ // wait queue before continuing. We need to do this _before_ the
+ // CAS that sets the new value of the semaphore's `permits`
+ // counter. Otherwise, if we subtract the permits and then
+ // acquire the lock, we might miss additional permits being
+ // added while waiting for the lock.
+ lock = Some(self.waiters.lock().unwrap());
+ }
+
+ match self.permits.compare_exchange(curr, next, AcqRel, Acquire) {
+ Ok(_) => {
+ acquired += acq;
+ if remaining == 0 {
+ if !queued {
+ return Ready(Ok(()));
+ } else if lock.is_none() {
+ break self.waiters.lock().unwrap();
+ }
+ }
+ break lock.expect("lock must be acquired before waiting");
+ }
+ Err(actual) => curr = actual,
+ }
+ };
+
+ if waiters.closed {
+ return Ready(Err(AcquireError::closed()));
+ }
+
+ if node.assign_permits(&mut acquired) {
+ self.add_permits_locked(acquired, waiters);
+ return Ready(Ok(()));
+ }
+
+ assert_eq!(acquired, 0);
+
+ // Otherwise, register the waker & enqueue the node.
+ node.waker.with_mut(|waker| {
+ // Safety: the wait list is locked, so we may modify the waker.
+ let waker = unsafe { &mut *waker };
+ // Do we need to register the new waker?
+ if waker
+ .as_ref()
+ .map(|waker| !waker.will_wake(cx.waker()))
+ .unwrap_or(true)
+ {
+ *waker = Some(cx.waker().clone());
+ }
+ });
+
+ // If the waiter is not already in the wait queue, enqueue it.
+ if !queued {
+ let node = unsafe {
+ let node = Pin::into_inner_unchecked(node) as *mut _;
+ NonNull::new_unchecked(node)
+ };
+
+ waiters.queue.push_front(node);
+ }
+
+ Pending
+ }
+}
+
+impl fmt::Debug for Semaphore {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Semaphore")
+ .field("permits", &self.permits.load(Relaxed))
+ .finish()
+ }
+}
+
+impl Waiter {
+ fn new(num_permits: u16) -> Self {
+ Waiter {
+ waker: UnsafeCell::new(None),
+ state: AtomicUsize::new(num_permits as usize),
+ pointers: linked_list::Pointers::new(),
+ _p: PhantomPinned,
+ }
+ }
+
+ /// Assign permits to the waiter.
+ ///
+ /// Returns `true` if the waiter should be removed from the queue
+ fn assign_permits(&self, n: &mut usize) -> bool {
+ let mut curr = self.state.load(Acquire);
+ loop {
+ let assign = cmp::min(curr, *n);
+ let next = curr - assign;
+ match self.state.compare_exchange(curr, next, AcqRel, Acquire) {
+ Ok(_) => {
+ *n -= assign;
+ return next == 0;
+ }
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+}
+
+impl Future for Acquire<'_> {
+ type Output = Result<(), AcquireError>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let (node, semaphore, needed, queued) = self.project();
+ match semaphore.poll_acquire(cx, needed, node, *queued) {
+ Pending => {
+ *queued = true;
+ Pending
+ }
+ Ready(r) => {
+ r?;
+ *queued = false;
+ Ready(Ok(()))
+ }
+ }
+ }
+}
+
+impl<'a> Acquire<'a> {
+ fn new(semaphore: &'a Semaphore, num_permits: u16) -> Self {
+ Self {
+ node: Waiter::new(num_permits),
+ semaphore,
+ num_permits,
+ queued: false,
+ }
+ }
+
+ fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, u16, &mut bool) {
+ fn is_unpin<T: Unpin>() {}
+ unsafe {
+ // Safety: all fields other than `node` are `Unpin`
+
+ is_unpin::<&Semaphore>();
+ is_unpin::<&mut bool>();
+ is_unpin::<u16>();
+
+ let this = self.get_unchecked_mut();
+ (
+ Pin::new_unchecked(&mut this.node),
+ &this.semaphore,
+ this.num_permits,
+ &mut this.queued,
+ )
+ }
+ }
+}
+
+impl Drop for Acquire<'_> {
+ fn drop(&mut self) {
+ // If the future is completed, there is no node in the wait list, so we
+ // can skip acquiring the lock.
+ if !self.queued {
+ return;
+ }
+
+ // This is where we ensure safety. The future is being dropped,
+ // which means we must ensure that the waiter entry is no longer stored
+ // in the linked list.
+ let mut waiters = match self.semaphore.waiters.lock() {
+ Ok(lock) => lock,
+ // Removing the node from the linked list is necessary to ensure
+ // safety. Even if the lock was poisoned, we need to make sure it is
+ // removed from the linked list before dropping it --- otherwise,
+ // the list will contain a dangling pointer to this node.
+ Err(e) => e.into_inner(),
+ };
+
+ // remove the entry from the list
+ let node = NonNull::from(&mut self.node);
+ // Safety: we have locked the wait list.
+ unsafe { waiters.queue.remove(node) };
+
+ let acquired_permits = self.num_permits as usize - self.node.state.load(Acquire);
+ if acquired_permits > 0 {
+ self.semaphore.add_permits_locked(acquired_permits, waiters);
+ }
+ }
+}
+
+// Safety: the `Acquire` future is not `Sync` automatically because it contains
+// a `Waiter`, which, in turn, contains an `UnsafeCell`. However, the
+// `UnsafeCell` is only accessed when the future is borrowed mutably (either in
+// `poll` or in `drop`). Therefore, it is safe (although not particularly
+// _useful_) for the future to be borrowed immutably across threads.
+unsafe impl Sync for Acquire<'_> {}
+
+// ===== impl AcquireError ====
+
+impl AcquireError {
+ fn closed() -> AcquireError {
+ AcquireError(())
+ }
+}
+
+impl fmt::Display for AcquireError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "semaphore closed")
+ }
+}
+
+impl std::error::Error for AcquireError {}
+
+// ===== impl TryAcquireError =====
+
+impl TryAcquireError {
+ /// Returns `true` if the error was caused by a closed semaphore.
+ #[allow(dead_code)] // may be used later!
+ pub(crate) fn is_closed(&self) -> bool {
+ match self {
+ TryAcquireError::Closed => true,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if the error was caused by calling `try_acquire` on a
+ /// semaphore with no available permits.
+ #[allow(dead_code)] // may be used later!
+ pub(crate) fn is_no_permits(&self) -> bool {
+ match self {
+ TryAcquireError::NoPermits => true,
+ _ => false,
+ }
+ }
+}
+
+impl fmt::Display for TryAcquireError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ TryAcquireError::Closed => write!(fmt, "{}", "semaphore closed"),
+ TryAcquireError::NoPermits => write!(fmt, "{}", "no permits available"),
+ }
+ }
+}
+
+impl std::error::Error for TryAcquireError {}
+
+/// # Safety
+///
+/// `Waiter` is forced to be !Unpin.
+unsafe impl linked_list::Link for Waiter {
+ // XXX: ideally, we would be able to use `Pin` here, to enforce the
+ // invariant that list entries may not move while in the list. However, we
+ // can't do this currently, as using `Pin<&'a mut Waiter>` as the `Handle`
+ // type would require `Semaphore` to be generic over a lifetime. We can't
+ // use `Pin<*mut Waiter>`, as raw pointers are `Unpin` regardless of whether
+ // or not they dereference to an `!Unpin` target.
+ type Handle = NonNull<Waiter>;
+ type Target = Waiter;
+
+ fn as_raw(handle: &Self::Handle) -> NonNull<Waiter> {
+ *handle
+ }
+
+ unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> {
+ ptr
+ }
+
+ unsafe fn pointers(mut target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> {
+ NonNull::from(&mut target.as_mut().pointers)
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/broadcast.rs b/third_party/rust/tokio/src/sync/broadcast.rs
new file mode 100644
index 0000000000..05a58070ee
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/broadcast.rs
@@ -0,0 +1,1046 @@
+//! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by
+//! all consumers.
+//!
+//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`]
+//! values. [`Sender`] handles are clone-able, allowing concurrent send and
+//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as
+//! long as `T` is also `Send` or `Sync` respectively.
+//!
+//! When a value is sent, **all** [`Receiver`] handles are notified and will
+//! receive the value. The value is stored once inside the channel and cloned on
+//! demand for each receiver. Once all receivers have received a clone of the
+//! value, the value is released from the channel.
+//!
+//! A channel is created by calling [`channel`], specifying the maximum number
+//! of messages the channel can retain at any given time.
+//!
+//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The
+//! returned [`Receiver`] will receive values sent **after** the call to
+//! `subscribe`.
+//!
+//! ## Lagging
+//!
+//! As sent messages must be retained until **all** [`Receiver`] handles receive
+//! a clone, broadcast channels are suspectible to the "slow receiver" problem.
+//! In this case, all but one receiver are able to receive values at the rate
+//! they are sent. Because one receiver is stalled, the channel starts to fill
+//! up.
+//!
+//! This broadcast channel implementation handles this case by setting a hard
+//! upper bound on the number of values the channel may retain at any given
+//! time. This upper bound is passed to the [`channel`] function as an argument.
+//!
+//! If a value is sent when the channel is at capacity, the oldest value
+//! currently held by the channel is released. This frees up space for the new
+//! value. Any receiver that has not yet seen the released value will return
+//! [`RecvError::Lagged`] the next time [`recv`] is called.
+//!
+//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is
+//! updated to the oldest value contained by the channel. The next call to
+//! [`recv`] will return this value.
+//!
+//! This behavior enables a receiver to detect when it has lagged so far behind
+//! that data has been dropped. The caller may decide how to respond to this:
+//! either by aborting its task or by tolerating lost messages and resuming
+//! consumption of the channel.
+//!
+//! ## Closing
+//!
+//! When **all** [`Sender`] handles have been dropped, no new values may be
+//! sent. At this point, the channel is "closed". Once a receiver has received
+//! all values retained by the channel, the next call to [`recv`] will return
+//! with [`RecvError::Closed`].
+//!
+//! [`Sender`]: crate::sync::broadcast::Sender
+//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
+//! [`Receiver`]: crate::sync::broadcast::Receiver
+//! [`channel`]: crate::sync::broadcast::channel
+//! [`RecvError::Lagged`]: crate::sync::broadcast::RecvError::Lagged
+//! [`RecvError::Closed`]: crate::sync::broadcast::RecvError::Closed
+//! [`recv`]: crate::sync::broadcast::Receiver::recv
+//!
+//! # Examples
+//!
+//! Basic usage
+//!
+//! ```
+//! use tokio::sync::broadcast;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, mut rx1) = broadcast::channel(16);
+//! let mut rx2 = tx.subscribe();
+//!
+//! tokio::spawn(async move {
+//! assert_eq!(rx1.recv().await.unwrap(), 10);
+//! assert_eq!(rx1.recv().await.unwrap(), 20);
+//! });
+//!
+//! tokio::spawn(async move {
+//! assert_eq!(rx2.recv().await.unwrap(), 10);
+//! assert_eq!(rx2.recv().await.unwrap(), 20);
+//! });
+//!
+//! tx.send(10).unwrap();
+//! tx.send(20).unwrap();
+//! }
+//! ```
+//!
+//! Handling lag
+//!
+//! ```
+//! use tokio::sync::broadcast;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, mut rx) = broadcast::channel(2);
+//!
+//! tx.send(10).unwrap();
+//! tx.send(20).unwrap();
+//! tx.send(30).unwrap();
+//!
+//! // The receiver lagged behind
+//! assert!(rx.recv().await.is_err());
+//!
+//! // At this point, we can abort or continue with lost messages
+//!
+//! assert_eq!(20, rx.recv().await.unwrap());
+//! assert_eq!(30, rx.recv().await.unwrap());
+//! }
+
+use crate::loom::cell::UnsafeCell;
+use crate::loom::future::AtomicWaker;
+use crate::loom::sync::atomic::{spin_loop_hint, AtomicBool, AtomicPtr, AtomicUsize};
+use crate::loom::sync::{Arc, Condvar, Mutex};
+
+use std::fmt;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::Ordering::SeqCst;
+use std::task::{Context, Poll, Waker};
+use std::usize;
+
+/// Sending-half of the [`broadcast`] channel.
+///
+/// May be used from many threads. Messages can be sent with
+/// [`send`][Sender::send].
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::broadcast;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, mut rx1) = broadcast::channel(16);
+/// let mut rx2 = tx.subscribe();
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx1.recv().await.unwrap(), 10);
+/// assert_eq!(rx1.recv().await.unwrap(), 20);
+/// });
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx2.recv().await.unwrap(), 10);
+/// assert_eq!(rx2.recv().await.unwrap(), 20);
+/// });
+///
+/// tx.send(10).unwrap();
+/// tx.send(20).unwrap();
+/// }
+/// ```
+///
+/// [`broadcast`]: crate::sync::broadcast
+pub struct Sender<T> {
+ shared: Arc<Shared<T>>,
+}
+
+/// Receiving-half of the [`broadcast`] channel.
+///
+/// Must not be used concurrently. Messages may be retrieved using
+/// [`recv`][Receiver::recv].
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::broadcast;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, mut rx1) = broadcast::channel(16);
+/// let mut rx2 = tx.subscribe();
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx1.recv().await.unwrap(), 10);
+/// assert_eq!(rx1.recv().await.unwrap(), 20);
+/// });
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx2.recv().await.unwrap(), 10);
+/// assert_eq!(rx2.recv().await.unwrap(), 20);
+/// });
+///
+/// tx.send(10).unwrap();
+/// tx.send(20).unwrap();
+/// }
+/// ```
+///
+/// [`broadcast`]: crate::sync::broadcast
+pub struct Receiver<T> {
+ /// State shared with all receivers and senders.
+ shared: Arc<Shared<T>>,
+
+ /// Next position to read from
+ next: u64,
+
+ /// Waiter state
+ wait: Arc<WaitNode>,
+}
+
+/// Error returned by [`Sender::send`][Sender::send].
+///
+/// A **send** operation can only fail if there are no active receivers,
+/// implying that the message could never be received. The error contains the
+/// message being sent as a payload so it can be recovered.
+#[derive(Debug)]
+pub struct SendError<T>(pub T);
+
+/// An error returned from the [`recv`] function on a [`Receiver`].
+///
+/// [`recv`]: crate::sync::broadcast::Receiver::recv
+/// [`Receiver`]: crate::sync::broadcast::Receiver
+#[derive(Debug, PartialEq)]
+pub enum RecvError {
+ /// There are no more active senders implying no further messages will ever
+ /// be sent.
+ Closed,
+
+ /// The receiver lagged too far behind. Attempting to receive again will
+ /// return the oldest message still retained by the channel.
+ ///
+ /// Includes the number of skipped messages.
+ Lagged(u64),
+}
+
+/// An error returned from the [`try_recv`] function on a [`Receiver`].
+///
+/// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv
+/// [`Receiver`]: crate::sync::broadcast::Receiver
+#[derive(Debug, PartialEq)]
+pub enum TryRecvError {
+ /// The channel is currently empty. There are still active
+ /// [`Sender`][Sender] handles, so data may yet become available.
+ Empty,
+
+ /// There are no more active senders implying no further messages will ever
+ /// be sent.
+ Closed,
+
+ /// The receiver lagged too far behind and has been forcibly disconnected.
+ /// Attempting to receive again will return the oldest message still
+ /// retained by the channel.
+ ///
+ /// Includes the number of skipped messages.
+ Lagged(u64),
+}
+
+/// Data shared between senders and receivers
+struct Shared<T> {
+ /// slots in the channel
+ buffer: Box<[Slot<T>]>,
+
+ /// Mask a position -> index
+ mask: usize,
+
+ /// Tail of the queue
+ tail: Mutex<Tail>,
+
+ /// Notifies a sender that the slot is unlocked
+ condvar: Condvar,
+
+ /// Stack of pending waiters
+ wait_stack: AtomicPtr<WaitNode>,
+
+ /// Number of outstanding Sender handles
+ num_tx: AtomicUsize,
+}
+
+/// Next position to write a value
+struct Tail {
+ /// Next position to write to
+ pos: u64,
+
+ /// Number of active receivers
+ rx_cnt: usize,
+}
+
+/// Slot in the buffer
+struct Slot<T> {
+ /// Remaining number of receivers that are expected to see this value.
+ ///
+ /// When this goes to zero, the value is released.
+ rem: AtomicUsize,
+
+ /// Used to lock the `write` field.
+ lock: AtomicUsize,
+
+ /// The value being broadcast
+ ///
+ /// Synchronized by `state`
+ write: Write<T>,
+}
+
+/// A write in the buffer
+struct Write<T> {
+ /// Uniquely identifies this write
+ pos: UnsafeCell<u64>,
+
+ /// The written value
+ val: UnsafeCell<Option<T>>,
+}
+
+/// Tracks a waiting receiver
+#[derive(Debug)]
+struct WaitNode {
+ /// `true` if queued
+ queued: AtomicBool,
+
+ /// Task to wake when a permit is made available.
+ waker: AtomicWaker,
+
+ /// Next pointer in the stack of waiting senders.
+ next: UnsafeCell<*const WaitNode>,
+}
+
+struct RecvGuard<'a, T> {
+ slot: &'a Slot<T>,
+ tail: &'a Mutex<Tail>,
+ condvar: &'a Condvar,
+}
+
+/// Max number of receivers. Reserve space to lock.
+const MAX_RECEIVERS: usize = usize::MAX >> 1;
+
+/// Create a bounded, multi-producer, multi-consumer channel where each sent
+/// value is broadcasted to all active receivers.
+///
+/// All data sent on [`Sender`] will become available on every active
+/// [`Receiver`] in the same order as it was sent.
+///
+/// The `Sender` can be cloned to `send` to the same channel from multiple
+/// points in the process or it can be used concurrently from an `Arc`. New
+/// `Receiver` handles are created by calling [`Sender::subscribe`].
+///
+/// If all [`Receiver`] handles are dropped, the `send` method will return a
+/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`]
+/// method will return a [`RecvError`].
+///
+/// [`Sender`]: crate::sync::broadcast::Sender
+/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
+/// [`Receiver`]: crate::sync::broadcast::Receiver
+/// [`recv`]: crate::sync::broadcast::Receiver::recv
+/// [`SendError`]: crate::sync::broadcast::SendError
+/// [`RecvError`]: crate::sync::broadcast::RecvError
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::broadcast;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, mut rx1) = broadcast::channel(16);
+/// let mut rx2 = tx.subscribe();
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx1.recv().await.unwrap(), 10);
+/// assert_eq!(rx1.recv().await.unwrap(), 20);
+/// });
+///
+/// tokio::spawn(async move {
+/// assert_eq!(rx2.recv().await.unwrap(), 10);
+/// assert_eq!(rx2.recv().await.unwrap(), 20);
+/// });
+///
+/// tx.send(10).unwrap();
+/// tx.send(20).unwrap();
+/// }
+/// ```
+pub fn channel<T>(mut capacity: usize) -> (Sender<T>, Receiver<T>) {
+ assert!(capacity > 0, "capacity is empty");
+ assert!(capacity <= usize::MAX >> 1, "requested capacity too large");
+
+ // Round to a power of two
+ capacity = capacity.next_power_of_two();
+
+ let mut buffer = Vec::with_capacity(capacity);
+
+ for i in 0..capacity {
+ buffer.push(Slot {
+ rem: AtomicUsize::new(0),
+ lock: AtomicUsize::new(0),
+ write: Write {
+ pos: UnsafeCell::new((i as u64).wrapping_sub(capacity as u64)),
+ val: UnsafeCell::new(None),
+ },
+ });
+ }
+
+ let shared = Arc::new(Shared {
+ buffer: buffer.into_boxed_slice(),
+ mask: capacity - 1,
+ tail: Mutex::new(Tail { pos: 0, rx_cnt: 1 }),
+ condvar: Condvar::new(),
+ wait_stack: AtomicPtr::new(ptr::null_mut()),
+ num_tx: AtomicUsize::new(1),
+ });
+
+ let rx = Receiver {
+ shared: shared.clone(),
+ next: 0,
+ wait: Arc::new(WaitNode {
+ queued: AtomicBool::new(false),
+ waker: AtomicWaker::new(),
+ next: UnsafeCell::new(ptr::null()),
+ }),
+ };
+
+ let tx = Sender { shared };
+
+ (tx, rx)
+}
+
+unsafe impl<T: Send> Send for Sender<T> {}
+unsafe impl<T: Send> Sync for Sender<T> {}
+
+unsafe impl<T: Send> Send for Receiver<T> {}
+unsafe impl<T: Send> Sync for Receiver<T> {}
+
+impl<T> Sender<T> {
+ /// Attempts to send a value to all active [`Receiver`] handles, returning
+ /// it back if it could not be sent.
+ ///
+ /// A successful send occurs when there is at least one active [`Receiver`]
+ /// handle. An unsuccessful send would be one where all associated
+ /// [`Receiver`] handles have already been dropped.
+ ///
+ /// # Return
+ ///
+ /// On success, the number of subscribed [`Receiver`] handles is returned.
+ /// This does not mean that this number of receivers will see the message as
+ /// a receiver may drop before receiving the message.
+ ///
+ /// # Note
+ ///
+ /// A return value of `Ok` **does not** mean that the sent value will be
+ /// observed by all or any of the active [`Receiver`] handles. [`Receiver`]
+ /// handles may be dropped before receiving the sent message.
+ ///
+ /// A return value of `Err` **does not** mean that future calls to `send`
+ /// will fail. New [`Receiver`] handles may be created by calling
+ /// [`subscribe`].
+ ///
+ /// [`Receiver`]: crate::sync::broadcast::Receiver
+ /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx1) = broadcast::channel(16);
+ /// let mut rx2 = tx.subscribe();
+ ///
+ /// tokio::spawn(async move {
+ /// assert_eq!(rx1.recv().await.unwrap(), 10);
+ /// assert_eq!(rx1.recv().await.unwrap(), 20);
+ /// });
+ ///
+ /// tokio::spawn(async move {
+ /// assert_eq!(rx2.recv().await.unwrap(), 10);
+ /// assert_eq!(rx2.recv().await.unwrap(), 20);
+ /// });
+ ///
+ /// tx.send(10).unwrap();
+ /// tx.send(20).unwrap();
+ /// }
+ /// ```
+ pub fn send(&self, value: T) -> Result<usize, SendError<T>> {
+ self.send2(Some(value))
+ .map_err(|SendError(maybe_v)| SendError(maybe_v.unwrap()))
+ }
+
+ /// Creates a new [`Receiver`] handle that will receive values sent **after**
+ /// this call to `subscribe`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, _rx) = broadcast::channel(16);
+ ///
+ /// // Will not be seen
+ /// tx.send(10).unwrap();
+ ///
+ /// let mut rx = tx.subscribe();
+ ///
+ /// tx.send(20).unwrap();
+ ///
+ /// let value = rx.recv().await.unwrap();
+ /// assert_eq!(20, value);
+ /// }
+ /// ```
+ pub fn subscribe(&self) -> Receiver<T> {
+ let shared = self.shared.clone();
+
+ let mut tail = shared.tail.lock().unwrap();
+
+ if tail.rx_cnt == MAX_RECEIVERS {
+ panic!("max receivers");
+ }
+
+ tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow");
+ let next = tail.pos;
+
+ drop(tail);
+
+ Receiver {
+ shared,
+ next,
+ wait: Arc::new(WaitNode {
+ queued: AtomicBool::new(false),
+ waker: AtomicWaker::new(),
+ next: UnsafeCell::new(ptr::null()),
+ }),
+ }
+ }
+
+ /// Returns the number of active receivers
+ ///
+ /// An active receiver is a [`Receiver`] handle returned from [`channel`] or
+ /// [`subscribe`]. These are the handles that will receive values sent on
+ /// this [`Sender`].
+ ///
+ /// # Note
+ ///
+ /// It is not guaranteed that a sent message will reach this number of
+ /// receivers. Active receivers may never call [`recv`] again before
+ /// dropping.
+ ///
+ /// [`recv`]: crate::sync::broadcast::Receiver::recv
+ /// [`Receiver`]: crate::sync::broadcast::Receiver
+ /// [`Sender`]: crate::sync::broadcast::Sender
+ /// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
+ /// [`channel`]: crate::sync::broadcast::channel
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, _rx1) = broadcast::channel(16);
+ ///
+ /// assert_eq!(1, tx.receiver_count());
+ ///
+ /// let mut _rx2 = tx.subscribe();
+ ///
+ /// assert_eq!(2, tx.receiver_count());
+ ///
+ /// tx.send(10).unwrap();
+ /// }
+ /// ```
+ pub fn receiver_count(&self) -> usize {
+ let tail = self.shared.tail.lock().unwrap();
+ tail.rx_cnt
+ }
+
+ fn send2(&self, value: Option<T>) -> Result<usize, SendError<Option<T>>> {
+ let mut tail = self.shared.tail.lock().unwrap();
+
+ if tail.rx_cnt == 0 {
+ return Err(SendError(value));
+ }
+
+ // Position to write into
+ let pos = tail.pos;
+ let rem = tail.rx_cnt;
+ let idx = (pos & self.shared.mask as u64) as usize;
+
+ // Update the tail position
+ tail.pos = tail.pos.wrapping_add(1);
+
+ // Get the slot
+ let slot = &self.shared.buffer[idx];
+
+ // Acquire the write lock
+ let mut prev = slot.lock.fetch_or(1, SeqCst);
+
+ while prev & !1 != 0 {
+ // Concurrent readers, we must go to sleep
+ tail = self.shared.condvar.wait(tail).unwrap();
+
+ prev = slot.lock.load(SeqCst);
+
+ if prev & 1 == 0 {
+ // The writer lock bit was cleared while this thread was
+ // sleeping. This can only happen if a newer write happened on
+ // this slot by another thread. Bail early as an optimization,
+ // there is nothing left to do.
+ return Ok(rem);
+ }
+ }
+
+ if tail.pos.wrapping_sub(pos) > self.shared.buffer.len() as u64 {
+ // There is a newer pending write to the same slot.
+ return Ok(rem);
+ }
+
+ // Slot lock acquired
+ slot.write.pos.with_mut(|ptr| unsafe { *ptr = pos });
+ slot.write.val.with_mut(|ptr| unsafe { *ptr = value });
+
+ // Set remaining receivers
+ slot.rem.store(rem, SeqCst);
+
+ // Release the slot lock
+ slot.lock.store(0, SeqCst);
+
+ // Release the mutex. This must happen after the slot lock is released,
+ // otherwise the writer lock bit could be cleared while another thread
+ // is in the critical section.
+ drop(tail);
+
+ // Notify waiting receivers
+ self.notify_rx();
+
+ Ok(rem)
+ }
+
+ fn notify_rx(&self) {
+ let mut curr = self.shared.wait_stack.swap(ptr::null_mut(), SeqCst) as *const WaitNode;
+
+ while !curr.is_null() {
+ let waiter = unsafe { Arc::from_raw(curr) };
+
+ // Update `curr` before toggling `queued` and waking
+ curr = waiter.next.with(|ptr| unsafe { *ptr });
+
+ // Unset queued
+ waiter.queued.store(false, SeqCst);
+
+ // Wake
+ waiter.waker.wake();
+ }
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ let shared = self.shared.clone();
+ shared.num_tx.fetch_add(1, SeqCst);
+
+ Sender { shared }
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ if 1 == self.shared.num_tx.fetch_sub(1, SeqCst) {
+ let _ = self.send2(None);
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ /// Locks the next value if there is one.
+ ///
+ /// The caller is responsible for unlocking
+ fn recv_ref(&mut self, spin: bool) -> Result<RecvGuard<'_, T>, TryRecvError> {
+ let idx = (self.next & self.shared.mask as u64) as usize;
+
+ // The slot holding the next value to read
+ let slot = &self.shared.buffer[idx];
+
+ // Lock the slot
+ if !slot.try_rx_lock() {
+ if spin {
+ while !slot.try_rx_lock() {
+ spin_loop_hint();
+ }
+ } else {
+ return Err(TryRecvError::Empty);
+ }
+ }
+
+ let guard = RecvGuard {
+ slot,
+ tail: &self.shared.tail,
+ condvar: &self.shared.condvar,
+ };
+
+ if guard.pos() != self.next {
+ let pos = guard.pos();
+
+ guard.drop_no_rem_dec();
+
+ if pos.wrapping_add(self.shared.buffer.len() as u64) == self.next {
+ return Err(TryRecvError::Empty);
+ } else {
+ let tail = self.shared.tail.lock().unwrap();
+
+ // `tail.pos` points to the slot the **next** send writes to.
+ // Because a receiver is lagging, this slot also holds the
+ // oldest value. To make the positions match, we subtract the
+ // capacity.
+ let next = tail.pos.wrapping_sub(self.shared.buffer.len() as u64);
+ let missed = next.wrapping_sub(self.next);
+
+ self.next = next;
+
+ return Err(TryRecvError::Lagged(missed));
+ }
+ }
+
+ self.next = self.next.wrapping_add(1);
+
+ Ok(guard)
+ }
+}
+
+impl<T> Receiver<T>
+where
+ T: Clone,
+{
+ /// Attempts to return a pending value on this receiver without awaiting.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// await on a receiver.
+ ///
+ /// Compared with [`recv`], this function has three failure cases instead of one
+ /// (one for closed, one for an empty buffer, one for a lagging receiver).
+ ///
+ /// `Err(TryRecvError::Closed)` is returned when all `Sender` halves have
+ /// dropped, indicating that no further values can be sent on the channel.
+ ///
+ /// If the [`Receiver`] handle falls behind, once the channel is full, newly
+ /// sent values will overwrite old values. At this point, a call to [`recv`]
+ /// will return with `Err(TryRecvError::Lagged)` and the [`Receiver`]'s
+ /// internal cursor is updated to point to the oldest value still held by
+ /// the channel. A subsequent call to [`try_recv`] will return this value
+ /// **unless** it has been since overwritten. If there are no values to
+ /// receive, `Err(TryRecvError::Empty)` is returned.
+ ///
+ /// [`recv`]: crate::sync::broadcast::Receiver::recv
+ /// [`Receiver`]: crate::sync::broadcast::Receiver
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = broadcast::channel(16);
+ ///
+ /// assert!(rx.try_recv().is_err());
+ ///
+ /// tx.send(10).unwrap();
+ ///
+ /// let value = rx.try_recv().unwrap();
+ /// assert_eq!(10, value);
+ /// }
+ /// ```
+ pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
+ let guard = self.recv_ref(false)?;
+ guard.clone_value().ok_or(TryRecvError::Closed)
+ }
+
+ #[doc(hidden)] // TODO: document
+ pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> {
+ if let Some(value) = ok_empty(self.try_recv())? {
+ return Poll::Ready(Ok(value));
+ }
+
+ self.register_waker(cx.waker());
+
+ if let Some(value) = ok_empty(self.try_recv())? {
+ Poll::Ready(Ok(value))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ /// Receives the next value for this receiver.
+ ///
+ /// Each [`Receiver`] handle will receive a clone of all values sent
+ /// **after** it has subscribed.
+ ///
+ /// `Err(RecvError::Closed)` is returned when all `Sender` halves have
+ /// dropped, indicating that no further values can be sent on the channel.
+ ///
+ /// If the [`Receiver`] handle falls behind, once the channel is full, newly
+ /// sent values will overwrite old values. At this point, a call to [`recv`]
+ /// will return with `Err(RecvError::Lagged)` and the [`Receiver`]'s
+ /// internal cursor is updated to point to the oldest value still held by
+ /// the channel. A subsequent call to [`recv`] will return this value
+ /// **unless** it has been since overwritten.
+ ///
+ /// [`Receiver`]: crate::sync::broadcast::Receiver
+ /// [`recv`]: crate::sync::broadcast::Receiver::recv
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx1) = broadcast::channel(16);
+ /// let mut rx2 = tx.subscribe();
+ ///
+ /// tokio::spawn(async move {
+ /// assert_eq!(rx1.recv().await.unwrap(), 10);
+ /// assert_eq!(rx1.recv().await.unwrap(), 20);
+ /// });
+ ///
+ /// tokio::spawn(async move {
+ /// assert_eq!(rx2.recv().await.unwrap(), 10);
+ /// assert_eq!(rx2.recv().await.unwrap(), 20);
+ /// });
+ ///
+ /// tx.send(10).unwrap();
+ /// tx.send(20).unwrap();
+ /// }
+ /// ```
+ ///
+ /// Handling lag
+ ///
+ /// ```
+ /// use tokio::sync::broadcast;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = broadcast::channel(2);
+ ///
+ /// tx.send(10).unwrap();
+ /// tx.send(20).unwrap();
+ /// tx.send(30).unwrap();
+ ///
+ /// // The receiver lagged behind
+ /// assert!(rx.recv().await.is_err());
+ ///
+ /// // At this point, we can abort or continue with lost messages
+ ///
+ /// assert_eq!(20, rx.recv().await.unwrap());
+ /// assert_eq!(30, rx.recv().await.unwrap());
+ /// }
+ pub async fn recv(&mut self) -> Result<T, RecvError> {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ fn register_waker(&self, cx: &Waker) {
+ self.wait.waker.register_by_ref(cx);
+
+ if !self.wait.queued.load(SeqCst) {
+ // Set `queued` before queuing.
+ self.wait.queued.store(true, SeqCst);
+
+ let mut curr = self.shared.wait_stack.load(SeqCst);
+
+ // The ref count is decremented in `notify_rx` when all nodes are
+ // removed from the waiter stack.
+ let node = Arc::into_raw(self.wait.clone()) as *mut _;
+
+ loop {
+ // Safety: `queued == false` means the caller has exclusive
+ // access to `self.wait.next`.
+ self.wait.next.with_mut(|ptr| unsafe { *ptr = curr });
+
+ let res = self
+ .shared
+ .wait_stack
+ .compare_exchange(curr, node, SeqCst, SeqCst);
+
+ match res {
+ Ok(_) => return,
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<T> crate::stream::Stream for Receiver<T>
+where
+ T: Clone,
+{
+ type Item = Result<T, RecvError>;
+
+ fn poll_next(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<T, RecvError>>> {
+ self.poll_recv(cx).map(|v| match v {
+ Ok(v) => Some(Ok(v)),
+ lag @ Err(RecvError::Lagged(_)) => Some(lag),
+ Err(RecvError::Closed) => None,
+ })
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ let mut tail = self.shared.tail.lock().unwrap();
+
+ tail.rx_cnt -= 1;
+ let until = tail.pos;
+
+ drop(tail);
+
+ while self.next != until {
+ match self.recv_ref(true) {
+ // Ignore the value
+ Ok(_) => {}
+ // The channel is closed
+ Err(TryRecvError::Closed) => break,
+ // Ignore lagging, we will catch up
+ Err(TryRecvError::Lagged(..)) => {}
+ // Can't be empty
+ Err(TryRecvError::Empty) => panic!("unexpected empty broadcast channel"),
+ }
+ }
+ }
+}
+
+impl<T> Drop for Shared<T> {
+ fn drop(&mut self) {
+ // Clear the wait stack
+ let mut curr = self.wait_stack.with_mut(|ptr| *ptr as *const WaitNode);
+
+ while !curr.is_null() {
+ let waiter = unsafe { Arc::from_raw(curr) };
+ curr = waiter.next.with(|ptr| unsafe { *ptr });
+ }
+ }
+}
+
+impl<T> fmt::Debug for Sender<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "broadcast::Sender")
+ }
+}
+
+impl<T> fmt::Debug for Receiver<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "broadcast::Receiver")
+ }
+}
+
+impl<T> Slot<T> {
+ /// Tries to lock the slot for a receiver. If `false`, then a sender holds the
+ /// lock and the calling task will be notified once the sender has released
+ /// the lock.
+ fn try_rx_lock(&self) -> bool {
+ let mut curr = self.lock.load(SeqCst);
+
+ loop {
+ if curr & 1 == 1 {
+ // Locked by sender
+ return false;
+ }
+
+ // Only increment (by 2) if the LSB "lock" bit is not set.
+ let res = self.lock.compare_exchange(curr, curr + 2, SeqCst, SeqCst);
+
+ match res {
+ Ok(_) => return true,
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+
+ fn rx_unlock(&self, tail: &Mutex<Tail>, condvar: &Condvar, rem_dec: bool) {
+ if rem_dec {
+ // Decrement the remaining counter
+ if 1 == self.rem.fetch_sub(1, SeqCst) {
+ // Last receiver, drop the value
+ self.write.val.with_mut(|ptr| unsafe { *ptr = None });
+ }
+ }
+
+ if 1 == self.lock.fetch_sub(2, SeqCst) - 2 {
+ // First acquire the lock to make sure our sender is waiting on the
+ // condition variable, otherwise the notification could be lost.
+ mem::drop(tail.lock().unwrap());
+ // Wake up senders
+ condvar.notify_all();
+ }
+ }
+}
+
+impl<'a, T> RecvGuard<'a, T> {
+ fn pos(&self) -> u64 {
+ self.slot.write.pos.with(|ptr| unsafe { *ptr })
+ }
+
+ fn clone_value(&self) -> Option<T>
+ where
+ T: Clone,
+ {
+ self.slot.write.val.with(|ptr| unsafe { (*ptr).clone() })
+ }
+
+ fn drop_no_rem_dec(self) {
+ self.slot.rx_unlock(self.tail, self.condvar, false);
+
+ mem::forget(self);
+ }
+}
+
+impl<'a, T> Drop for RecvGuard<'a, T> {
+ fn drop(&mut self) {
+ self.slot.rx_unlock(self.tail, self.condvar, true)
+ }
+}
+
+fn ok_empty<T>(res: Result<T, TryRecvError>) -> Result<Option<T>, RecvError> {
+ match res {
+ Ok(value) => Ok(Some(value)),
+ Err(TryRecvError::Empty) => Ok(None),
+ Err(TryRecvError::Lagged(n)) => Err(RecvError::Lagged(n)),
+ Err(TryRecvError::Closed) => Err(RecvError::Closed),
+ }
+}
+
+impl fmt::Display for RecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ RecvError::Closed => write!(f, "channel closed"),
+ RecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt),
+ }
+ }
+}
+
+impl std::error::Error for RecvError {}
+
+impl fmt::Display for TryRecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ TryRecvError::Empty => write!(f, "channel empty"),
+ TryRecvError::Closed => write!(f, "channel closed"),
+ TryRecvError::Lagged(amt) => write!(f, "channel lagged by {}", amt),
+ }
+ }
+}
+
+impl std::error::Error for TryRecvError {}
diff --git a/third_party/rust/tokio/src/sync/mod.rs b/third_party/rust/tokio/src/sync/mod.rs
new file mode 100644
index 0000000000..0607f78ad4
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mod.rs
@@ -0,0 +1,472 @@
+#![cfg_attr(loom, allow(dead_code, unreachable_pub, unused_imports))]
+
+//! Synchronization primitives for use in asynchronous contexts.
+//!
+//! Tokio programs tend to be organized as a set of [tasks] where each task
+//! operates independently and may be executed on separate physical threads. The
+//! synchronization primitives provided in this module permit these independent
+//! tasks to communicate together.
+//!
+//! [tasks]: crate::task
+//!
+//! # Message passing
+//!
+//! The most common form of synchronization in a Tokio program is message
+//! passing. Two tasks operate independently and send messages to each other to
+//! synchronize. Doing so has the advantage of avoiding shared state.
+//!
+//! Message passing is implemented using channels. A channel supports sending a
+//! message from one producer task to one or more consumer tasks. There are a
+//! few flavors of channels provided by Tokio. Each channel flavor supports
+//! different message passing patterns. When a channel supports multiple
+//! producers, many separate tasks may **send** messages. When a channel
+//! supports muliple consumers, many different separate tasks may **receive**
+//! messages.
+//!
+//! Tokio provides many different channel flavors as different message passing
+//! patterns are best handled with different implementations.
+//!
+//! ## `oneshot` channel
+//!
+//! The [`oneshot` channel][oneshot] supports sending a **single** value from a
+//! single producer to a single consumer. This channel is usually used to send
+//! the result of a computation to a waiter.
+//!
+//! **Example:** using a `oneshot` channel to receive the result of a
+//! computation.
+//!
+//! ```
+//! use tokio::sync::oneshot;
+//!
+//! async fn some_computation() -> String {
+//! "represents the result of the computation".to_string()
+//! }
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, rx) = oneshot::channel();
+//!
+//! tokio::spawn(async move {
+//! let res = some_computation().await;
+//! tx.send(res).unwrap();
+//! });
+//!
+//! // Do other work while the computation is happening in the background
+//!
+//! // Wait for the computation result
+//! let res = rx.await.unwrap();
+//! }
+//! ```
+//!
+//! Note, if the task produces the the computation result as its final action
+//! before terminating, the [`JoinHandle`] can be used to receive the
+//! computation result instead of allocating resources for the `oneshot`
+//! channel. Awaiting on [`JoinHandle`] returns `Result`. If the task panics,
+//! the `Joinhandle` yields `Err` with the panic cause.
+//!
+//! **Example:**
+//!
+//! ```
+//! async fn some_computation() -> String {
+//! "the result of the computation".to_string()
+//! }
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let join_handle = tokio::spawn(async move {
+//! some_computation().await
+//! });
+//!
+//! // Do other work while the computation is happening in the background
+//!
+//! // Wait for the computation result
+//! let res = join_handle.await.unwrap();
+//! }
+//! ```
+//!
+//! [`JoinHandle`]: crate::task::JoinHandle
+//!
+//! ## `mpsc` channel
+//!
+//! The [`mpsc` channel][mpsc] supports sending **many** values from **many**
+//! producers to a single consumer. This channel is often used to send work to a
+//! task or to receive the result of many computations.
+//!
+//! **Example:** using an mpsc to incrementally stream the results of a series
+//! of computations.
+//!
+//! ```
+//! use tokio::sync::mpsc;
+//!
+//! async fn some_computation(input: u32) -> String {
+//! format!("the result of computation {}", input)
+//! }
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (mut tx, mut rx) = mpsc::channel(100);
+//!
+//! tokio::spawn(async move {
+//! for i in 0..10 {
+//! let res = some_computation(i).await;
+//! tx.send(res).await.unwrap();
+//! }
+//! });
+//!
+//! while let Some(res) = rx.recv().await {
+//! println!("got = {}", res);
+//! }
+//! }
+//! ```
+//!
+//! The argument to `mpsc::channel` is the channel capacity. This is the maximum
+//! number of values that can be stored in the channel pending receipt at any
+//! given time. Properly setting this value is key in implementing robust
+//! programs as the channel capacity plays a critical part in handling back
+//! pressure.
+//!
+//! A common concurrency pattern for resource management is to spawn a task
+//! dedicated to managing that resource and using message passing betwen other
+//! tasks to interact with the resource. The resource may be anything that may
+//! not be concurrently used. Some examples include a socket and program state.
+//! For example, if multiple tasks need to send data over a single socket, spawn
+//! a task to manage the socket and use a channel to synchronize.
+//!
+//! **Example:** sending data from many tasks over a single socket using message
+//! passing.
+//!
+//! ```no_run
+//! use tokio::io::{self, AsyncWriteExt};
+//! use tokio::net::TcpStream;
+//! use tokio::sync::mpsc;
+//!
+//! #[tokio::main]
+//! async fn main() -> io::Result<()> {
+//! let mut socket = TcpStream::connect("www.example.com:1234").await?;
+//! let (tx, mut rx) = mpsc::channel(100);
+//!
+//! for _ in 0..10 {
+//! // Each task needs its own `tx` handle. This is done by cloning the
+//! // original handle.
+//! let mut tx = tx.clone();
+//!
+//! tokio::spawn(async move {
+//! tx.send(&b"data to write"[..]).await.unwrap();
+//! });
+//! }
+//!
+//! // The `rx` half of the channel returns `None` once **all** `tx` clones
+//! // drop. To ensure `None` is returned, drop the handle owned by the
+//! // current task. If this `tx` handle is not dropped, there will always
+//! // be a single outstanding `tx` handle.
+//! drop(tx);
+//!
+//! while let Some(res) = rx.recv().await {
+//! socket.write_all(res).await?;
+//! }
+//!
+//! Ok(())
+//! }
+//! ```
+//!
+//! The [`mpsc`][mpsc] and [`oneshot`][oneshot] channels can be combined to
+//! provide a request / response type synchronization pattern with a shared
+//! resource. A task is spawned to synchronize a resource and waits on commands
+//! received on a [`mpsc`][mpsc] channel. Each command includes a
+//! [`oneshot`][oneshot] `Sender` on which the result of the command is sent.
+//!
+//! **Example:** use a task to synchronize a `u64` counter. Each task sends an
+//! "fetch and increment" command. The counter value **before** the increment is
+//! sent over the provided `oneshot` channel.
+//!
+//! ```
+//! use tokio::sync::{oneshot, mpsc};
+//! use Command::Increment;
+//!
+//! enum Command {
+//! Increment,
+//! // Other commands can be added here
+//! }
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (cmd_tx, mut cmd_rx) = mpsc::channel::<(Command, oneshot::Sender<u64>)>(100);
+//!
+//! // Spawn a task to manage the counter
+//! tokio::spawn(async move {
+//! let mut counter: u64 = 0;
+//!
+//! while let Some((cmd, response)) = cmd_rx.recv().await {
+//! match cmd {
+//! Increment => {
+//! let prev = counter;
+//! counter += 1;
+//! response.send(prev).unwrap();
+//! }
+//! }
+//! }
+//! });
+//!
+//! let mut join_handles = vec![];
+//!
+//! // Spawn tasks that will send the increment command.
+//! for _ in 0..10 {
+//! let mut cmd_tx = cmd_tx.clone();
+//!
+//! join_handles.push(tokio::spawn(async move {
+//! let (resp_tx, resp_rx) = oneshot::channel();
+//!
+//! cmd_tx.send((Increment, resp_tx)).await.ok().unwrap();
+//! let res = resp_rx.await.unwrap();
+//!
+//! println!("previous value = {}", res);
+//! }));
+//! }
+//!
+//! // Wait for all tasks to complete
+//! for join_handle in join_handles.drain(..) {
+//! join_handle.await.unwrap();
+//! }
+//! }
+//! ```
+//!
+//! ## `broadcast` channel
+//!
+//! The [`broadcast` channel][broadcast] supports sending **many** values from
+//! **many** producers to **many** consumers. Each consumer will receive
+//! **each** value. This channel can be used to implement "fan out" style
+//! patterns common with pub / sub or "chat" systems.
+//!
+//! This channel tends to be used less often than `oneshot` and `mpsc` but still
+//! has its use cases.
+//!
+//! Basic usage
+//!
+//! ```
+//! use tokio::sync::broadcast;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, mut rx1) = broadcast::channel(16);
+//! let mut rx2 = tx.subscribe();
+//!
+//! tokio::spawn(async move {
+//! assert_eq!(rx1.recv().await.unwrap(), 10);
+//! assert_eq!(rx1.recv().await.unwrap(), 20);
+//! });
+//!
+//! tokio::spawn(async move {
+//! assert_eq!(rx2.recv().await.unwrap(), 10);
+//! assert_eq!(rx2.recv().await.unwrap(), 20);
+//! });
+//!
+//! tx.send(10).unwrap();
+//! tx.send(20).unwrap();
+//! }
+//! ```
+//!
+//! ## `watch` channel
+//!
+//! The [`watch` channel][watch] supports sending **many** values from a
+//! **single** producer to **many** consumers. However, only the **most recent**
+//! value is stored in the channel. Consumers are notified when a new value is
+//! sent, but there is no guarantee that consumers will see **all** values.
+//!
+//! The [`watch` channel] is similar to a [`broadcast` channel] with capacity 1.
+//!
+//! Use cases for the [`watch` channel] include broadcasting configuration
+//! changes or signalling program state changes, such as transitioning to
+//! shutdown.
+//!
+//! **Example:** use a `watch` channel to notify tasks of configuration changes.
+//! In this example, a configuration file is checked periodically. When the file
+//! changes, the configuration changes are signalled to consumers.
+//!
+//! ```
+//! use tokio::sync::watch;
+//! use tokio::time::{self, Duration, Instant};
+//!
+//! use std::io;
+//!
+//! #[derive(Debug, Clone, Eq, PartialEq)]
+//! struct Config {
+//! timeout: Duration,
+//! }
+//!
+//! impl Config {
+//! async fn load_from_file() -> io::Result<Config> {
+//! // file loading and deserialization logic here
+//! # Ok(Config { timeout: Duration::from_secs(1) })
+//! }
+//! }
+//!
+//! async fn my_async_operation() {
+//! // Do something here
+//! }
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! // Load initial configuration value
+//! let mut config = Config::load_from_file().await.unwrap();
+//!
+//! // Create the watch channel, initialized with the loaded configuration
+//! let (tx, rx) = watch::channel(config.clone());
+//!
+//! // Spawn a task to monitor the file.
+//! tokio::spawn(async move {
+//! loop {
+//! // Wait 10 seconds between checks
+//! time::delay_for(Duration::from_secs(10)).await;
+//!
+//! // Load the configuration file
+//! let new_config = Config::load_from_file().await.unwrap();
+//!
+//! // If the configuration changed, send the new config value
+//! // on the watch channel.
+//! if new_config != config {
+//! tx.broadcast(new_config.clone()).unwrap();
+//! config = new_config;
+//! }
+//! }
+//! });
+//!
+//! let mut handles = vec![];
+//!
+//! // Spawn tasks that runs the async operation for at most `timeout`. If
+//! // the timeout elapses, restart the operation.
+//! //
+//! // The task simultaneously watches the `Config` for changes. When the
+//! // timeout duration changes, the timeout is updated without restarting
+//! // the in-flight operation.
+//! for _ in 0..5 {
+//! // Clone a config watch handle for use in this task
+//! let mut rx = rx.clone();
+//!
+//! let handle = tokio::spawn(async move {
+//! // Start the initial operation and pin the future to the stack.
+//! // Pinning to the stack is required to resume the operation
+//! // across multiple calls to `select!`
+//! let op = my_async_operation();
+//! tokio::pin!(op);
+//!
+//! // Receive the **initial** configuration value. As this is the
+//! // first time the config is received from the watch, it will
+//! // always complete immediatedly.
+//! let mut conf = rx.recv().await.unwrap();
+//!
+//! let mut op_start = Instant::now();
+//! let mut delay = time::delay_until(op_start + conf.timeout);
+//!
+//! loop {
+//! tokio::select! {
+//! _ = &mut delay => {
+//! // The operation elapsed. Restart it
+//! op.set(my_async_operation());
+//!
+//! // Track the new start time
+//! op_start = Instant::now();
+//!
+//! // Restart the timeout
+//! delay = time::delay_until(op_start + conf.timeout);
+//! }
+//! new_conf = rx.recv() => {
+//! conf = new_conf.unwrap();
+//!
+//! // The configuration has been updated. Update the
+//! // `delay` using the new `timeout` value.
+//! delay.reset(op_start + conf.timeout);
+//! }
+//! _ = &mut op => {
+//! // The operation completed!
+//! return
+//! }
+//! }
+//! }
+//! });
+//!
+//! handles.push(handle);
+//! }
+//!
+//! for handle in handles.drain(..) {
+//! handle.await.unwrap();
+//! }
+//! }
+//! ```
+//!
+//! # State synchronization
+//!
+//! The remaining synchronization primitives focus on synchronizing state.
+//! These are asynchronous equivalents to versions provided by `std`. They
+//! operate in a similar way as their `std` counterparts parts but will wait
+//! asynchronously instead of blocking the thread.
+//!
+//! * [`Barrier`][Barrier] Ensures multiple tasks will wait for each other to
+//! reach a point in the program, before continuing execution all together.
+//!
+//! * [`Mutex`][Mutex] Mutual Exclusion mechanism, which ensures that at most
+//! one thread at a time is able to access some data.
+//!
+//! * [`Notify`][Notify] Basic task notification. `Notify` supports notifying a
+//! receiving task without sending data. In this case, the task wakes up and
+//! resumes processing.
+//!
+//! * [`RwLock`][RwLock] Provides a mutual exclusion mechanism which allows
+//! multiple readers at the same time, while allowing only one writer at a
+//! time. In some cases, this can be more efficient than a mutex.
+//!
+//! * [`Semaphore`][Semaphore] Limits the amount of concurrency. A semaphore
+//! holds a number of permits, which tasks may request in order to enter a
+//! critical section. Semaphores are useful for implementing limiting of
+//! bounding of any kind.
+
+cfg_sync! {
+ mod barrier;
+ pub use barrier::{Barrier, BarrierWaitResult};
+
+ pub mod broadcast;
+
+ pub mod mpsc;
+
+ mod mutex;
+ pub use mutex::{Mutex, MutexGuard};
+
+ mod notify;
+ pub use notify::Notify;
+
+ pub mod oneshot;
+
+ pub(crate) mod batch_semaphore;
+ pub(crate) mod semaphore_ll;
+ mod semaphore;
+ pub use semaphore::{Semaphore, SemaphorePermit};
+
+ mod rwlock;
+ pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
+
+ mod task;
+ pub(crate) use task::AtomicWaker;
+
+ pub mod watch;
+}
+
+cfg_not_sync! {
+ cfg_atomic_waker_impl! {
+ mod task;
+ pub(crate) use task::AtomicWaker;
+ }
+
+ #[cfg(any(
+ feature = "rt-core",
+ feature = "process",
+ feature = "signal"))]
+ pub(crate) mod oneshot;
+
+ cfg_signal! {
+ pub(crate) mod mpsc;
+ pub(crate) mod semaphore_ll;
+ }
+}
+
+/// Unit tests
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/tokio/src/sync/mpsc/block.rs b/third_party/rust/tokio/src/sync/mpsc/block.rs
new file mode 100644
index 0000000000..7bf161967b
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/block.rs
@@ -0,0 +1,387 @@
+use crate::loom::{
+ cell::UnsafeCell,
+ sync::atomic::{AtomicPtr, AtomicUsize},
+ thread,
+};
+
+use std::mem::MaybeUninit;
+use std::ops;
+use std::ptr::{self, NonNull};
+use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release};
+
+/// A block in a linked list.
+///
+/// Each block in the list can hold up to `BLOCK_CAP` messages.
+pub(crate) struct Block<T> {
+ /// The start index of this block.
+ ///
+ /// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`.
+ start_index: usize,
+
+ /// The next block in the linked list.
+ next: AtomicPtr<Block<T>>,
+
+ /// Bitfield tracking slots that are ready to have their values consumed.
+ ready_slots: AtomicUsize,
+
+ /// The observed `tail_position` value *after* the block has been passed by
+ /// `block_tail`.
+ observed_tail_position: UnsafeCell<usize>,
+
+ /// Array containing values pushed into the block. Values are stored in a
+ /// continuous array in order to improve cache line behavior when reading.
+ /// The values must be manually dropped.
+ values: Values<T>,
+}
+
+pub(crate) enum Read<T> {
+ Value(T),
+ Closed,
+}
+
+struct Values<T>([UnsafeCell<MaybeUninit<T>>; BLOCK_CAP]);
+
+use super::BLOCK_CAP;
+
+/// Masks an index to get the block identifier
+const BLOCK_MASK: usize = !(BLOCK_CAP - 1);
+
+/// Masks an index to get the value offset in a block.
+const SLOT_MASK: usize = BLOCK_CAP - 1;
+
+/// Flag tracking that a block has gone through the sender's release routine.
+///
+/// When this is set, the receiver may consider freeing the block.
+const RELEASED: usize = 1 << BLOCK_CAP;
+
+/// Flag tracking all senders dropped.
+///
+/// When this flag is set, the send half of the channel has closed.
+const TX_CLOSED: usize = RELEASED << 1;
+
+/// Mask covering all bits used to track slot readiness.
+const READY_MASK: usize = RELEASED - 1;
+
+/// Returns the index of the first slot in the block referenced by `slot_index`.
+#[inline(always)]
+pub(crate) fn start_index(slot_index: usize) -> usize {
+ BLOCK_MASK & slot_index
+}
+
+/// Returns the offset into the block referenced by `slot_index`.
+#[inline(always)]
+pub(crate) fn offset(slot_index: usize) -> usize {
+ SLOT_MASK & slot_index
+}
+
+impl<T> Block<T> {
+ pub(crate) fn new(start_index: usize) -> Block<T> {
+ Block {
+ // The absolute index in the channel of the first slot in the block.
+ start_index,
+
+ // Pointer to the next block in the linked list.
+ next: AtomicPtr::new(ptr::null_mut()),
+
+ ready_slots: AtomicUsize::new(0),
+
+ observed_tail_position: UnsafeCell::new(0),
+
+ // Value storage
+ values: unsafe { Values::uninitialized() },
+ }
+ }
+
+ /// Returns `true` if the block matches the given index
+ pub(crate) fn is_at_index(&self, index: usize) -> bool {
+ debug_assert!(offset(index) == 0);
+ self.start_index == index
+ }
+
+ /// Returns the number of blocks between `self` and the block at the
+ /// specified index.
+ ///
+ /// `start_index` must represent a block *after* `self`.
+ pub(crate) fn distance(&self, other_index: usize) -> usize {
+ debug_assert!(offset(other_index) == 0);
+ other_index.wrapping_sub(self.start_index) / BLOCK_CAP
+ }
+
+ /// Reads the value at the given offset.
+ ///
+ /// Returns `None` if the slot is empty.
+ ///
+ /// # Safety
+ ///
+ /// To maintain safety, the caller must ensure:
+ ///
+ /// * No concurrent access to the slot.
+ pub(crate) unsafe fn read(&self, slot_index: usize) -> Option<Read<T>> {
+ let offset = offset(slot_index);
+
+ let ready_bits = self.ready_slots.load(Acquire);
+
+ if !is_ready(ready_bits, offset) {
+ if is_tx_closed(ready_bits) {
+ return Some(Read::Closed);
+ }
+
+ return None;
+ }
+
+ // Get the value
+ let value = self.values[offset].with(|ptr| ptr::read(ptr));
+
+ Some(Read::Value(value.assume_init()))
+ }
+
+ /// Writes a value to the block at the given offset.
+ ///
+ /// # Safety
+ ///
+ /// To maintain safety, the caller must ensure:
+ ///
+ /// * The slot is empty.
+ /// * No concurrent access to the slot.
+ pub(crate) unsafe fn write(&self, slot_index: usize, value: T) {
+ // Get the offset into the block
+ let slot_offset = offset(slot_index);
+
+ self.values[slot_offset].with_mut(|ptr| {
+ ptr::write(ptr, MaybeUninit::new(value));
+ });
+
+ // Release the value. After this point, the slot ref may no longer
+ // be used. It is possible for the receiver to free the memory at
+ // any point.
+ self.set_ready(slot_offset);
+ }
+
+ /// Signal to the receiver that the sender half of the list is closed.
+ pub(crate) unsafe fn tx_close(&self) {
+ self.ready_slots.fetch_or(TX_CLOSED, Release);
+ }
+
+ /// Resets the block to a blank state. This enables reusing blocks in the
+ /// channel.
+ ///
+ /// # Safety
+ ///
+ /// To maintain safety, the caller must ensure:
+ ///
+ /// * All slots are empty.
+ /// * The caller holds a unique pointer to the block.
+ pub(crate) unsafe fn reclaim(&mut self) {
+ self.start_index = 0;
+ self.next = AtomicPtr::new(ptr::null_mut());
+ self.ready_slots = AtomicUsize::new(0);
+ }
+
+ /// Releases the block to the rx half for freeing.
+ ///
+ /// This function is called by the tx half once it can be guaranteed that no
+ /// more senders will attempt to access the block.
+ ///
+ /// # Safety
+ ///
+ /// To maintain safety, the caller must ensure:
+ ///
+ /// * The block will no longer be accessed by any sender.
+ pub(crate) unsafe fn tx_release(&self, tail_position: usize) {
+ // Track the observed tail_position. Any sender targetting a greater
+ // tail_position is guaranteed to not access this block.
+ self.observed_tail_position
+ .with_mut(|ptr| *ptr = tail_position);
+
+ // Set the released bit, signalling to the receiver that it is safe to
+ // free the block's memory as soon as all slots **prior** to
+ // `observed_tail_position` have been filled.
+ self.ready_slots.fetch_or(RELEASED, Release);
+ }
+
+ /// Mark a slot as ready
+ fn set_ready(&self, slot: usize) {
+ let mask = 1 << slot;
+ self.ready_slots.fetch_or(mask, Release);
+ }
+
+ /// Returns `true` when all slots have their `ready` bits set.
+ ///
+ /// This indicates that the block is in its final state and will no longer
+ /// be mutated.
+ ///
+ /// # Implementation
+ ///
+ /// The implementation walks each slot checking the `ready` flag. It might
+ /// be that it would make more sense to coalesce ready flags as bits in a
+ /// single atomic cell. However, this could have negative impact on cache
+ /// behavior as there would be many more mutations to a single slot.
+ pub(crate) fn is_final(&self) -> bool {
+ self.ready_slots.load(Acquire) & READY_MASK == READY_MASK
+ }
+
+ /// Returns the `observed_tail_position` value, if set
+ pub(crate) fn observed_tail_position(&self) -> Option<usize> {
+ if 0 == RELEASED & self.ready_slots.load(Acquire) {
+ None
+ } else {
+ Some(self.observed_tail_position.with(|ptr| unsafe { *ptr }))
+ }
+ }
+
+ /// Loads the next block
+ pub(crate) fn load_next(&self, ordering: Ordering) -> Option<NonNull<Block<T>>> {
+ let ret = NonNull::new(self.next.load(ordering));
+
+ debug_assert!(unsafe {
+ ret.map(|block| block.as_ref().start_index == self.start_index.wrapping_add(BLOCK_CAP))
+ .unwrap_or(true)
+ });
+
+ ret
+ }
+
+ /// Pushes `block` as the next block in the link.
+ ///
+ /// Returns Ok if successful, otherwise, a pointer to the next block in
+ /// the list is returned.
+ ///
+ /// This requires that the next pointer is null.
+ ///
+ /// # Ordering
+ ///
+ /// This performs a compare-and-swap on `next` using AcqRel ordering.
+ ///
+ /// # Safety
+ ///
+ /// To maintain safety, the caller must ensure:
+ ///
+ /// * `block` is not freed until it has been removed from the list.
+ pub(crate) unsafe fn try_push(
+ &self,
+ block: &mut NonNull<Block<T>>,
+ ordering: Ordering,
+ ) -> Result<(), NonNull<Block<T>>> {
+ block.as_mut().start_index = self.start_index.wrapping_add(BLOCK_CAP);
+
+ let next_ptr = self
+ .next
+ .compare_and_swap(ptr::null_mut(), block.as_ptr(), ordering);
+
+ match NonNull::new(next_ptr) {
+ Some(next_ptr) => Err(next_ptr),
+ None => Ok(()),
+ }
+ }
+
+ /// Grows the `Block` linked list by allocating and appending a new block.
+ ///
+ /// The next block in the linked list is returned. This may or may not be
+ /// the one allocated by the function call.
+ ///
+ /// # Implementation
+ ///
+ /// It is assumed that `self.next` is null. A new block is allocated with
+ /// `start_index` set to be the next block. A compare-and-swap is performed
+ /// with AcqRel memory ordering. If the compare-and-swap is successful, the
+ /// newly allocated block is released to other threads walking the block
+ /// linked list. If the compare-and-swap fails, the current thread acquires
+ /// the next block in the linked list, allowing the current thread to access
+ /// the slots.
+ pub(crate) fn grow(&self) -> NonNull<Block<T>> {
+ // Create the new block. It is assumed that the block will become the
+ // next one after `&self`. If this turns out to not be the case,
+ // `start_index` is updated accordingly.
+ let new_block = Box::new(Block::new(self.start_index + BLOCK_CAP));
+
+ let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) };
+
+ // Attempt to store the block. The first compare-and-swap attempt is
+ // "unrolled" due to minor differences in logic
+ //
+ // `AcqRel` is used as the ordering **only** when attempting the
+ // compare-and-swap on self.next.
+ //
+ // If the compare-and-swap fails, then the actual value of the cell is
+ // returned from this function and accessed by the caller. Given this,
+ // the memory must be acquired.
+ //
+ // `Release` ensures that the newly allocated block is available to
+ // other threads acquiring the next pointer.
+ let next = NonNull::new(self.next.compare_and_swap(
+ ptr::null_mut(),
+ new_block.as_ptr(),
+ AcqRel,
+ ));
+
+ let next = match next {
+ Some(next) => next,
+ None => {
+ // The compare-and-swap succeeded and the newly allocated block
+ // is successfully pushed.
+ return new_block;
+ }
+ };
+
+ // There already is a next block in the linked list. The newly allocated
+ // block could be dropped and the discovered next block returned;
+ // however, that would be wasteful. Instead, the linked list is walked
+ // by repeatedly attempting to compare-and-swap the pointer into the
+ // `next` register until the compare-and-swap succeed.
+ //
+ // Care is taken to update new_block's start_index field as appropriate.
+
+ let mut curr = next;
+
+ // TODO: Should this iteration be capped?
+ loop {
+ let actual = unsafe { curr.as_ref().try_push(&mut new_block, AcqRel) };
+
+ curr = match actual {
+ Ok(_) => {
+ return next;
+ }
+ Err(curr) => curr,
+ };
+
+ // When running outside of loom, this calls `spin_loop_hint`.
+ thread::yield_now();
+ }
+ }
+}
+
+/// Returns `true` if the specificed slot has a value ready to be consumed.
+fn is_ready(bits: usize, slot: usize) -> bool {
+ let mask = 1 << slot;
+ mask == mask & bits
+}
+
+/// Returns `true` if the closed flag has been set.
+fn is_tx_closed(bits: usize) -> bool {
+ TX_CLOSED == bits & TX_CLOSED
+}
+
+impl<T> Values<T> {
+ unsafe fn uninitialized() -> Values<T> {
+ let mut vals = MaybeUninit::uninit();
+
+ // When fuzzing, `UnsafeCell` needs to be initialized.
+ if_loom! {
+ let p = vals.as_mut_ptr() as *mut UnsafeCell<MaybeUninit<T>>;
+ for i in 0..BLOCK_CAP {
+ p.add(i)
+ .write(UnsafeCell::new(MaybeUninit::uninit()));
+ }
+ }
+
+ Values(vals.assume_init())
+ }
+}
+
+impl<T> ops::Index<usize> for Values<T> {
+ type Output = UnsafeCell<MaybeUninit<T>>;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ self.0.index(index)
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mpsc/bounded.rs b/third_party/rust/tokio/src/sync/mpsc/bounded.rs
new file mode 100644
index 0000000000..afca8c524d
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/bounded.rs
@@ -0,0 +1,479 @@
+use crate::sync::mpsc::chan;
+use crate::sync::mpsc::error::{ClosedError, SendError, TryRecvError, TrySendError};
+use crate::sync::semaphore_ll as semaphore;
+
+cfg_time! {
+ use crate::sync::mpsc::error::SendTimeoutError;
+ use crate::time::Duration;
+}
+
+use std::fmt;
+use std::task::{Context, Poll};
+
+/// Send values to the associated `Receiver`.
+///
+/// Instances are created by the [`channel`](channel) function.
+pub struct Sender<T> {
+ chan: chan::Tx<T, Semaphore>,
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Self {
+ Sender {
+ chan: self.chan.clone(),
+ }
+ }
+}
+
+impl<T> fmt::Debug for Sender<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Sender")
+ .field("chan", &self.chan)
+ .finish()
+ }
+}
+
+/// Receive values from the associated `Sender`.
+///
+/// Instances are created by the [`channel`](channel) function.
+pub struct Receiver<T> {
+ /// The channel receiver
+ chan: chan::Rx<T, Semaphore>,
+}
+
+impl<T> fmt::Debug for Receiver<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Receiver")
+ .field("chan", &self.chan)
+ .finish()
+ }
+}
+
+/// Creates a bounded mpsc channel for communicating between asynchronous tasks,
+/// returning the sender/receiver halves.
+///
+/// All data sent on `Sender` will become available on `Receiver` in the same
+/// order as it was sent.
+///
+/// The `Sender` can be cloned to `send` to the same channel from multiple code
+/// locations. Only one `Receiver` is supported.
+///
+/// If the `Receiver` is disconnected while trying to `send`, the `send` method
+/// will return a `SendError`. Similarly, if `Sender` is disconnected while
+/// trying to `recv`, the `recv` method will return a `RecvError`.
+///
+/// # Examples
+///
+/// ```rust
+/// use tokio::sync::mpsc;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (mut tx, mut rx) = mpsc::channel(100);
+///
+/// tokio::spawn(async move {
+/// for i in 0..10 {
+/// if let Err(_) = tx.send(i).await {
+/// println!("receiver dropped");
+/// return;
+/// }
+/// }
+/// });
+///
+/// while let Some(i) = rx.recv().await {
+/// println!("got = {}", i);
+/// }
+/// }
+/// ```
+pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
+ assert!(buffer > 0, "mpsc bounded channel requires buffer > 0");
+ let semaphore = (semaphore::Semaphore::new(buffer), buffer);
+ let (tx, rx) = chan::channel(semaphore);
+
+ let tx = Sender::new(tx);
+ let rx = Receiver::new(rx);
+
+ (tx, rx)
+}
+
+/// Channel semaphore is a tuple of the semaphore implementation and a `usize`
+/// representing the channel bound.
+type Semaphore = (semaphore::Semaphore, usize);
+
+impl<T> Receiver<T> {
+ pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> Receiver<T> {
+ Receiver { chan }
+ }
+
+ /// Receives the next value for this receiver.
+ ///
+ /// `None` is returned when all `Sender` halves have dropped, indicating
+ /// that no further values can be sent on the channel.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, mut rx) = mpsc::channel(100);
+ ///
+ /// tokio::spawn(async move {
+ /// tx.send("hello").await.unwrap();
+ /// });
+ ///
+ /// assert_eq!(Some("hello"), rx.recv().await);
+ /// assert_eq!(None, rx.recv().await);
+ /// }
+ /// ```
+ ///
+ /// Values are buffered:
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, mut rx) = mpsc::channel(100);
+ ///
+ /// tx.send("hello").await.unwrap();
+ /// tx.send("world").await.unwrap();
+ ///
+ /// assert_eq!(Some("hello"), rx.recv().await);
+ /// assert_eq!(Some("world"), rx.recv().await);
+ /// }
+ /// ```
+ pub async fn recv(&mut self) -> Option<T> {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ #[doc(hidden)] // TODO: document
+ pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ self.chan.recv(cx)
+ }
+
+ /// Attempts to return a pending value on this receiver without blocking.
+ ///
+ /// This method will never block the caller in order to wait for data to
+ /// become available. Instead, this will always return immediately with
+ /// a possible option of pending data on the channel.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// block on a receiver.
+ ///
+ /// Compared with recv, this function has two failure cases instead of
+ /// one (one for disconnection, one for an empty buffer).
+ pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
+ self.chan.try_recv()
+ }
+
+ /// Closes the receiving half of a channel, without dropping it.
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ self.chan.close();
+ }
+}
+
+impl<T> Unpin for Receiver<T> {}
+
+cfg_stream! {
+ impl<T> crate::stream::Stream for Receiver<T> {
+ type Item = T;
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ self.poll_recv(cx)
+ }
+ }
+}
+
+impl<T> Sender<T> {
+ pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> Sender<T> {
+ Sender { chan }
+ }
+
+ /// Sends a value, waiting until there is capacity.
+ ///
+ /// A successful send occurs when it is determined that the other end of the
+ /// channel has not hung up already. An unsuccessful send would be one where
+ /// the corresponding receiver has already been closed. Note that a return
+ /// value of `Err` means that the data will never be received, but a return
+ /// value of `Ok` does not mean that the data will be received. It is
+ /// possible for the corresponding receiver to hang up immediately after
+ /// this function returns `Ok`.
+ ///
+ /// # Errors
+ ///
+ /// If the receive half of the channel is closed, either due to [`close`]
+ /// being called or the [`Receiver`] handle dropping, the function returns
+ /// an error. The error includes the value passed to `send`.
+ ///
+ /// [`close`]: Receiver::close
+ /// [`Receiver`]: Receiver
+ ///
+ /// # Examples
+ ///
+ /// In the following example, each call to `send` will block until the
+ /// previously sent value was received.
+ ///
+ /// ```rust
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, mut rx) = mpsc::channel(1);
+ ///
+ /// tokio::spawn(async move {
+ /// for i in 0..10 {
+ /// if let Err(_) = tx.send(i).await {
+ /// println!("receiver dropped");
+ /// return;
+ /// }
+ /// }
+ /// });
+ ///
+ /// while let Some(i) = rx.recv().await {
+ /// println!("got = {}", i);
+ /// }
+ /// }
+ /// ```
+ pub async fn send(&mut self, value: T) -> Result<(), SendError<T>> {
+ use crate::future::poll_fn;
+
+ if poll_fn(|cx| self.poll_ready(cx)).await.is_err() {
+ return Err(SendError(value));
+ }
+
+ match self.try_send(value) {
+ Ok(()) => Ok(()),
+ Err(TrySendError::Full(_)) => unreachable!(),
+ Err(TrySendError::Closed(value)) => Err(SendError(value)),
+ }
+ }
+
+ /// Attempts to immediately send a message on this `Sender`
+ ///
+ /// This method differs from [`send`] by returning immediately if the channel's
+ /// buffer is full or no receiver is waiting to acquire some data. Compared
+ /// with [`send`], this function has two failure cases instead of one (one for
+ /// disconnection, one for a full buffer).
+ ///
+ /// This function may be paired with [`poll_ready`] in order to wait for
+ /// channel capacity before trying to send a value.
+ ///
+ /// # Errors
+ ///
+ /// If the channel capacity has been reached, i.e., the channel has `n`
+ /// buffered values where `n` is the argument passed to [`channel`], then an
+ /// error is returned.
+ ///
+ /// If the receive half of the channel is closed, either due to [`close`]
+ /// being called or the [`Receiver`] handle dropping, the function returns
+ /// an error. The error includes the value passed to `send`.
+ ///
+ /// [`send`]: Sender::send
+ /// [`poll_ready`]: Sender::poll_ready
+ /// [`channel`]: channel
+ /// [`close`]: Receiver::close
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// // Create a channel with buffer size 1
+ /// let (mut tx1, mut rx) = mpsc::channel(1);
+ /// let mut tx2 = tx1.clone();
+ ///
+ /// tokio::spawn(async move {
+ /// tx1.send(1).await.unwrap();
+ /// tx1.send(2).await.unwrap();
+ /// // task waits until the receiver receives a value.
+ /// });
+ ///
+ /// tokio::spawn(async move {
+ /// // This will return an error and send
+ /// // no message if the buffer is full
+ /// let _ = tx2.try_send(3);
+ /// });
+ ///
+ /// let mut msg;
+ /// msg = rx.recv().await.unwrap();
+ /// println!("message {} received", msg);
+ ///
+ /// msg = rx.recv().await.unwrap();
+ /// println!("message {} received", msg);
+ ///
+ /// // Third message may have never been sent
+ /// match rx.recv().await {
+ /// Some(msg) => println!("message {} received", msg),
+ /// None => println!("the third message was never sent"),
+ /// }
+ /// }
+ /// ```
+ pub fn try_send(&mut self, message: T) -> Result<(), TrySendError<T>> {
+ self.chan.try_send(message)?;
+ Ok(())
+ }
+
+ /// Sends a value, waiting until there is capacity, but only for a limited time.
+ ///
+ /// Shares the same success and error conditions as [`send`], adding one more
+ /// condition for an unsuccessful send, which is when the provided timeout has
+ /// elapsed, and there is no capacity available.
+ ///
+ /// [`send`]: Sender::send
+ ///
+ /// # Errors
+ ///
+ /// If the receive half of the channel is closed, either due to [`close`]
+ /// being called or the [`Receiver`] having been dropped,
+ /// the function returns an error. The error includes the value passed to `send`.
+ ///
+ /// [`close`]: Receiver::close
+ /// [`Receiver`]: Receiver
+ ///
+ /// # Examples
+ ///
+ /// In the following example, each call to `send_timeout` will block until the
+ /// previously sent value was received, unless the timeout has elapsed.
+ ///
+ /// ```rust
+ /// use tokio::sync::mpsc;
+ /// use tokio::time::{delay_for, Duration};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, mut rx) = mpsc::channel(1);
+ ///
+ /// tokio::spawn(async move {
+ /// for i in 0..10 {
+ /// if let Err(e) = tx.send_timeout(i, Duration::from_millis(100)).await {
+ /// println!("send error: #{:?}", e);
+ /// return;
+ /// }
+ /// }
+ /// });
+ ///
+ /// while let Some(i) = rx.recv().await {
+ /// println!("got = {}", i);
+ /// delay_for(Duration::from_millis(200)).await;
+ /// }
+ /// }
+ /// ```
+ #[cfg(feature = "time")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "time")))]
+ pub async fn send_timeout(
+ &mut self,
+ value: T,
+ timeout: Duration,
+ ) -> Result<(), SendTimeoutError<T>> {
+ use crate::future::poll_fn;
+
+ match crate::time::timeout(timeout, poll_fn(|cx| self.poll_ready(cx))).await {
+ Err(_) => {
+ return Err(SendTimeoutError::Timeout(value));
+ }
+ Ok(Err(_)) => {
+ return Err(SendTimeoutError::Closed(value));
+ }
+ Ok(_) => {}
+ }
+
+ match self.try_send(value) {
+ Ok(()) => Ok(()),
+ Err(TrySendError::Full(_)) => unreachable!(),
+ Err(TrySendError::Closed(value)) => Err(SendTimeoutError::Closed(value)),
+ }
+ }
+
+ /// Returns `Poll::Ready(Ok(()))` when the channel is able to accept another item.
+ ///
+ /// If the channel is full, then `Poll::Pending` is returned and the task is notified when a
+ /// slot becomes available.
+ ///
+ /// Once `poll_ready` returns `Poll::Ready(Ok(()))`, a call to `try_send` will succeed unless
+ /// the channel has since been closed. To provide this guarantee, the channel reserves one slot
+ /// in the channel for the coming send. This reserved slot is not available to other `Sender`
+ /// instances, so you need to be careful to not end up with deadlocks by blocking after calling
+ /// `poll_ready` but before sending an element.
+ ///
+ /// If, after `poll_ready` succeeds, you decide you do not wish to send an item after all, you
+ /// can use [`disarm`](Sender::disarm) to release the reserved slot.
+ ///
+ /// Until an item is sent or [`disarm`](Sender::disarm) is called, repeated calls to
+ /// `poll_ready` will return either `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))` if channel
+ /// is closed.
+ pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ClosedError>> {
+ self.chan.poll_ready(cx).map_err(|_| ClosedError::new())
+ }
+
+ /// Undo a successful call to `poll_ready`.
+ ///
+ /// Once a call to `poll_ready` returns `Poll::Ready(Ok(()))`, it holds up one slot in the
+ /// channel to make room for the coming send. `disarm` allows you to give up that slot if you
+ /// decide you do not wish to send an item after all. After calling `disarm`, you must call
+ /// `poll_ready` until it returns `Poll::Ready(Ok(()))` before attempting to send again.
+ ///
+ /// Returns `false` if no slot is reserved for this sender (usually because `poll_ready` was
+ /// not previously called, or did not succeed).
+ ///
+ /// # Motivation
+ ///
+ /// Since `poll_ready` takes up one of the finite number of slots in a bounded channel, callers
+ /// need to send an item shortly after `poll_ready` succeeds. If they do not, idle senders may
+ /// take up all the slots of the channel, and prevent active senders from getting any requests
+ /// through. Consider this code that forwards from one channel to another:
+ ///
+ /// ```rust,ignore
+ /// loop {
+ /// ready!(tx.poll_ready(cx))?;
+ /// if let Some(item) = ready!(rx.poll_recv(cx)) {
+ /// tx.try_send(item)?;
+ /// } else {
+ /// break;
+ /// }
+ /// }
+ /// ```
+ ///
+ /// If many such forwarders exist, and they all forward into a single (cloned) `Sender`, then
+ /// any number of forwarders may be waiting for `rx.poll_recv` at the same time. While they do,
+ /// they are effectively each reducing the channel's capacity by 1. If enough of these
+ /// forwarders are idle, forwarders whose `rx` _do_ have elements will be unable to find a spot
+ /// for them through `poll_ready`, and the system will deadlock.
+ ///
+ /// `disarm` solves this problem by allowing you to give up the reserved slot if you find that
+ /// you have to block. We can then fix the code above by writing:
+ ///
+ /// ```rust,ignore
+ /// loop {
+ /// ready!(tx.poll_ready(cx))?;
+ /// let item = rx.poll_recv(cx);
+ /// if let Poll::Ready(Ok(_)) = item {
+ /// // we're going to send the item below, so don't disarm
+ /// } else {
+ /// // give up our send slot, we won't need it for a while
+ /// tx.disarm();
+ /// }
+ /// if let Some(item) = ready!(item) {
+ /// tx.try_send(item)?;
+ /// } else {
+ /// break;
+ /// }
+ /// }
+ /// ```
+ pub fn disarm(&mut self) -> bool {
+ if self.chan.is_ready() {
+ self.chan.disarm();
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mpsc/chan.rs b/third_party/rust/tokio/src/sync/mpsc/chan.rs
new file mode 100644
index 0000000000..3466395788
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/chan.rs
@@ -0,0 +1,524 @@
+use crate::loom::cell::UnsafeCell;
+use crate::loom::future::AtomicWaker;
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::Arc;
+use crate::sync::mpsc::error::{ClosedError, TryRecvError};
+use crate::sync::mpsc::{error, list};
+
+use std::fmt;
+use std::process;
+use std::sync::atomic::Ordering::{AcqRel, Relaxed};
+use std::task::Poll::{Pending, Ready};
+use std::task::{Context, Poll};
+
+/// Channel sender
+pub(crate) struct Tx<T, S: Semaphore> {
+ inner: Arc<Chan<T, S>>,
+ permit: S::Permit,
+}
+
+impl<T, S: Semaphore> fmt::Debug for Tx<T, S>
+where
+ S::Permit: fmt::Debug,
+ S: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Tx")
+ .field("inner", &self.inner)
+ .field("permit", &self.permit)
+ .finish()
+ }
+}
+
+/// Channel receiver
+pub(crate) struct Rx<T, S: Semaphore> {
+ inner: Arc<Chan<T, S>>,
+}
+
+impl<T, S: Semaphore> fmt::Debug for Rx<T, S>
+where
+ S: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Rx").field("inner", &self.inner).finish()
+ }
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub(crate) enum TrySendError {
+ Closed,
+ Full,
+}
+
+impl<T> From<(T, TrySendError)> for error::SendError<T> {
+ fn from(src: (T, TrySendError)) -> error::SendError<T> {
+ match src.1 {
+ TrySendError::Closed => error::SendError(src.0),
+ TrySendError::Full => unreachable!(),
+ }
+ }
+}
+
+impl<T> From<(T, TrySendError)> for error::TrySendError<T> {
+ fn from(src: (T, TrySendError)) -> error::TrySendError<T> {
+ match src.1 {
+ TrySendError::Closed => error::TrySendError::Closed(src.0),
+ TrySendError::Full => error::TrySendError::Full(src.0),
+ }
+ }
+}
+
+pub(crate) trait Semaphore {
+ type Permit;
+
+ fn new_permit() -> Self::Permit;
+
+ /// The permit is dropped without a value being sent. In this case, the
+ /// permit must be returned to the semaphore.
+ fn drop_permit(&self, permit: &mut Self::Permit);
+
+ fn is_idle(&self) -> bool;
+
+ fn add_permit(&self);
+
+ fn poll_acquire(
+ &self,
+ cx: &mut Context<'_>,
+ permit: &mut Self::Permit,
+ ) -> Poll<Result<(), ClosedError>>;
+
+ fn try_acquire(&self, permit: &mut Self::Permit) -> Result<(), TrySendError>;
+
+ /// A value was sent into the channel and the permit held by `tx` is
+ /// dropped. In this case, the permit should not immeditely be returned to
+ /// the semaphore. Instead, the permit is returnred to the semaphore once
+ /// the sent value is read by the rx handle.
+ fn forget(&self, permit: &mut Self::Permit);
+
+ fn close(&self);
+}
+
+struct Chan<T, S> {
+ /// Handle to the push half of the lock-free list.
+ tx: list::Tx<T>,
+
+ /// Coordinates access to channel's capacity.
+ semaphore: S,
+
+ /// Receiver waker. Notified when a value is pushed into the channel.
+ rx_waker: AtomicWaker,
+
+ /// Tracks the number of outstanding sender handles.
+ ///
+ /// When this drops to zero, the send half of the channel is closed.
+ tx_count: AtomicUsize,
+
+ /// Only accessed by `Rx` handle.
+ rx_fields: UnsafeCell<RxFields<T>>,
+}
+
+impl<T, S> fmt::Debug for Chan<T, S>
+where
+ S: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Chan")
+ .field("tx", &self.tx)
+ .field("semaphore", &self.semaphore)
+ .field("rx_waker", &self.rx_waker)
+ .field("tx_count", &self.tx_count)
+ .field("rx_fields", &"...")
+ .finish()
+ }
+}
+
+/// Fields only accessed by `Rx` handle.
+struct RxFields<T> {
+ /// Channel receiver. This field is only accessed by the `Receiver` type.
+ list: list::Rx<T>,
+
+ /// `true` if `Rx::close` is called.
+ rx_closed: bool,
+}
+
+impl<T> fmt::Debug for RxFields<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("RxFields")
+ .field("list", &self.list)
+ .field("rx_closed", &self.rx_closed)
+ .finish()
+ }
+}
+
+unsafe impl<T: Send, S: Send> Send for Chan<T, S> {}
+unsafe impl<T: Send, S: Sync> Sync for Chan<T, S> {}
+
+pub(crate) fn channel<T, S>(semaphore: S) -> (Tx<T, S>, Rx<T, S>)
+where
+ S: Semaphore,
+{
+ let (tx, rx) = list::channel();
+
+ let chan = Arc::new(Chan {
+ tx,
+ semaphore,
+ rx_waker: AtomicWaker::new(),
+ tx_count: AtomicUsize::new(1),
+ rx_fields: UnsafeCell::new(RxFields {
+ list: rx,
+ rx_closed: false,
+ }),
+ });
+
+ (Tx::new(chan.clone()), Rx::new(chan))
+}
+
+// ===== impl Tx =====
+
+impl<T, S> Tx<T, S>
+where
+ S: Semaphore,
+{
+ fn new(chan: Arc<Chan<T, S>>) -> Tx<T, S> {
+ Tx {
+ inner: chan,
+ permit: S::new_permit(),
+ }
+ }
+
+ pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ClosedError>> {
+ self.inner.semaphore.poll_acquire(cx, &mut self.permit)
+ }
+
+ pub(crate) fn disarm(&mut self) {
+ // TODO: should this error if not acquired?
+ self.inner.semaphore.drop_permit(&mut self.permit)
+ }
+
+ /// Send a message and notify the receiver.
+ pub(crate) fn try_send(&mut self, value: T) -> Result<(), (T, TrySendError)> {
+ self.inner.try_send(value, &mut self.permit)
+ }
+}
+
+impl<T> Tx<T, (crate::sync::semaphore_ll::Semaphore, usize)> {
+ pub(crate) fn is_ready(&self) -> bool {
+ self.permit.is_acquired()
+ }
+}
+
+impl<T> Tx<T, AtomicUsize> {
+ pub(crate) fn send_unbounded(&self, value: T) -> Result<(), (T, TrySendError)> {
+ self.inner.try_send(value, &mut ())
+ }
+}
+
+impl<T, S> Clone for Tx<T, S>
+where
+ S: Semaphore,
+{
+ fn clone(&self) -> Tx<T, S> {
+ // Using a Relaxed ordering here is sufficient as the caller holds a
+ // strong ref to `self`, preventing a concurrent decrement to zero.
+ self.inner.tx_count.fetch_add(1, Relaxed);
+
+ Tx {
+ inner: self.inner.clone(),
+ permit: S::new_permit(),
+ }
+ }
+}
+
+impl<T, S> Drop for Tx<T, S>
+where
+ S: Semaphore,
+{
+ fn drop(&mut self) {
+ self.inner.semaphore.drop_permit(&mut self.permit);
+
+ if self.inner.tx_count.fetch_sub(1, AcqRel) != 1 {
+ return;
+ }
+
+ // Close the list, which sends a `Close` message
+ self.inner.tx.close();
+
+ // Notify the receiver
+ self.inner.rx_waker.wake();
+ }
+}
+
+// ===== impl Rx =====
+
+impl<T, S> Rx<T, S>
+where
+ S: Semaphore,
+{
+ fn new(chan: Arc<Chan<T, S>>) -> Rx<T, S> {
+ Rx { inner: chan }
+ }
+
+ pub(crate) fn close(&mut self) {
+ self.inner.rx_fields.with_mut(|rx_fields_ptr| {
+ let rx_fields = unsafe { &mut *rx_fields_ptr };
+
+ if rx_fields.rx_closed {
+ return;
+ }
+
+ rx_fields.rx_closed = true;
+ });
+
+ self.inner.semaphore.close();
+ }
+
+ /// Receive the next value
+ pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ use super::block::Read::*;
+
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ self.inner.rx_fields.with_mut(|rx_fields_ptr| {
+ let rx_fields = unsafe { &mut *rx_fields_ptr };
+
+ macro_rules! try_recv {
+ () => {
+ match rx_fields.list.pop(&self.inner.tx) {
+ Some(Value(value)) => {
+ self.inner.semaphore.add_permit();
+ return Ready(Some(value));
+ }
+ Some(Closed) => {
+ // TODO: This check may not be required as it most
+ // likely can only return `true` at this point. A
+ // channel is closed when all tx handles are
+ // dropped. Dropping a tx handle releases memory,
+ // which ensures that if dropping the tx handle is
+ // visible, then all messages sent are also visible.
+ assert!(self.inner.semaphore.is_idle());
+ return Ready(None);
+ }
+ None => {} // fall through
+ }
+ };
+ }
+
+ try_recv!();
+
+ self.inner.rx_waker.register_by_ref(cx.waker());
+
+ // It is possible that a value was pushed between attempting to read
+ // and registering the task, so we have to check the channel a
+ // second time here.
+ try_recv!();
+
+ if rx_fields.rx_closed && self.inner.semaphore.is_idle() {
+ Ready(None)
+ } else {
+ Pending
+ }
+ })
+ }
+
+ /// Receives the next value without blocking
+ pub(crate) fn try_recv(&mut self) -> Result<T, TryRecvError> {
+ use super::block::Read::*;
+ self.inner.rx_fields.with_mut(|rx_fields_ptr| {
+ let rx_fields = unsafe { &mut *rx_fields_ptr };
+ match rx_fields.list.pop(&self.inner.tx) {
+ Some(Value(value)) => {
+ self.inner.semaphore.add_permit();
+ Ok(value)
+ }
+ Some(Closed) => Err(TryRecvError::Closed),
+ None => Err(TryRecvError::Empty),
+ }
+ })
+ }
+}
+
+impl<T, S> Drop for Rx<T, S>
+where
+ S: Semaphore,
+{
+ fn drop(&mut self) {
+ use super::block::Read::Value;
+
+ self.close();
+
+ self.inner.rx_fields.with_mut(|rx_fields_ptr| {
+ let rx_fields = unsafe { &mut *rx_fields_ptr };
+
+ while let Some(Value(_)) = rx_fields.list.pop(&self.inner.tx) {
+ self.inner.semaphore.add_permit();
+ }
+ })
+ }
+}
+
+// ===== impl Chan =====
+
+impl<T, S> Chan<T, S>
+where
+ S: Semaphore,
+{
+ fn try_send(&self, value: T, permit: &mut S::Permit) -> Result<(), (T, TrySendError)> {
+ if let Err(e) = self.semaphore.try_acquire(permit) {
+ return Err((value, e));
+ }
+
+ // Push the value
+ self.tx.push(value);
+
+ // Notify the rx task
+ self.rx_waker.wake();
+
+ // Release the permit
+ self.semaphore.forget(permit);
+
+ Ok(())
+ }
+}
+
+impl<T, S> Drop for Chan<T, S> {
+ fn drop(&mut self) {
+ use super::block::Read::Value;
+
+ // Safety: the only owner of the rx fields is Chan, and eing
+ // inside its own Drop means we're the last ones to touch it.
+ self.rx_fields.with_mut(|rx_fields_ptr| {
+ let rx_fields = unsafe { &mut *rx_fields_ptr };
+
+ while let Some(Value(_)) = rx_fields.list.pop(&self.tx) {}
+ unsafe { rx_fields.list.free_blocks() };
+ });
+ }
+}
+
+use crate::sync::semaphore_ll::TryAcquireError;
+
+impl From<TryAcquireError> for TrySendError {
+ fn from(src: TryAcquireError) -> TrySendError {
+ if src.is_closed() {
+ TrySendError::Closed
+ } else if src.is_no_permits() {
+ TrySendError::Full
+ } else {
+ unreachable!();
+ }
+ }
+}
+
+// ===== impl Semaphore for (::Semaphore, capacity) =====
+
+use crate::sync::semaphore_ll::Permit;
+
+impl Semaphore for (crate::sync::semaphore_ll::Semaphore, usize) {
+ type Permit = Permit;
+
+ fn new_permit() -> Permit {
+ Permit::new()
+ }
+
+ fn drop_permit(&self, permit: &mut Permit) {
+ permit.release(1, &self.0);
+ }
+
+ fn add_permit(&self) {
+ self.0.add_permits(1)
+ }
+
+ fn is_idle(&self) -> bool {
+ self.0.available_permits() == self.1
+ }
+
+ fn poll_acquire(
+ &self,
+ cx: &mut Context<'_>,
+ permit: &mut Permit,
+ ) -> Poll<Result<(), ClosedError>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ permit
+ .poll_acquire(cx, 1, &self.0)
+ .map_err(|_| ClosedError::new())
+ }
+
+ fn try_acquire(&self, permit: &mut Permit) -> Result<(), TrySendError> {
+ permit.try_acquire(1, &self.0)?;
+ Ok(())
+ }
+
+ fn forget(&self, permit: &mut Self::Permit) {
+ permit.forget(1);
+ }
+
+ fn close(&self) {
+ self.0.close();
+ }
+}
+
+// ===== impl Semaphore for AtomicUsize =====
+
+use std::sync::atomic::Ordering::{Acquire, Release};
+use std::usize;
+
+impl Semaphore for AtomicUsize {
+ type Permit = ();
+
+ fn new_permit() {}
+
+ fn drop_permit(&self, _permit: &mut ()) {}
+
+ fn add_permit(&self) {
+ let prev = self.fetch_sub(2, Release);
+
+ if prev >> 1 == 0 {
+ // Something went wrong
+ process::abort();
+ }
+ }
+
+ fn is_idle(&self) -> bool {
+ self.load(Acquire) >> 1 == 0
+ }
+
+ fn poll_acquire(
+ &self,
+ _cx: &mut Context<'_>,
+ permit: &mut (),
+ ) -> Poll<Result<(), ClosedError>> {
+ Ready(self.try_acquire(permit).map_err(|_| ClosedError::new()))
+ }
+
+ fn try_acquire(&self, _permit: &mut ()) -> Result<(), TrySendError> {
+ let mut curr = self.load(Acquire);
+
+ loop {
+ if curr & 1 == 1 {
+ return Err(TrySendError::Closed);
+ }
+
+ if curr == usize::MAX ^ 1 {
+ // Overflowed the ref count. There is no safe way to recover, so
+ // abort the process. In practice, this should never happen.
+ process::abort()
+ }
+
+ match self.compare_exchange(curr, curr + 2, AcqRel, Acquire) {
+ Ok(_) => return Ok(()),
+ Err(actual) => {
+ curr = actual;
+ }
+ }
+ }
+ }
+
+ fn forget(&self, _permit: &mut ()) {}
+
+ fn close(&self) {
+ self.fetch_or(1, Release);
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mpsc/error.rs b/third_party/rust/tokio/src/sync/mpsc/error.rs
new file mode 100644
index 0000000000..72c42aa53e
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/error.rs
@@ -0,0 +1,146 @@
+//! Channel error types
+
+use std::error::Error;
+use std::fmt;
+
+/// Error returned by the `Sender`.
+#[derive(Debug)]
+pub struct SendError<T>(pub T);
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "channel closed")
+ }
+}
+
+impl<T: fmt::Debug> std::error::Error for SendError<T> {}
+
+// ===== TrySendError =====
+
+/// This enumeration is the list of the possible error outcomes for the
+/// [try_send](super::Sender::try_send) method.
+#[derive(Debug)]
+pub enum TrySendError<T> {
+ /// The data could not be sent on the channel because the channel is
+ /// currently full and sending would require blocking.
+ Full(T),
+
+ /// The receive half of the channel was explicitly closed or has been
+ /// dropped.
+ Closed(T),
+}
+
+impl<T: fmt::Debug> Error for TrySendError<T> {}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "{}",
+ match self {
+ TrySendError::Full(..) => "no available capacity",
+ TrySendError::Closed(..) => "channel closed",
+ }
+ )
+ }
+}
+
+impl<T> From<SendError<T>> for TrySendError<T> {
+ fn from(src: SendError<T>) -> TrySendError<T> {
+ TrySendError::Closed(src.0)
+ }
+}
+
+// ===== RecvError =====
+
+/// Error returned by `Receiver`.
+#[derive(Debug)]
+pub struct RecvError(());
+
+impl fmt::Display for RecvError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "channel closed")
+ }
+}
+
+impl Error for RecvError {}
+
+// ===== TryRecvError =====
+
+/// This enumeration is the list of the possible reasons that try_recv
+/// could not return data when called.
+#[derive(Debug, PartialEq)]
+pub enum TryRecvError {
+ /// This channel is currently empty, but the Sender(s) have not yet
+ /// disconnected, so data may yet become available.
+ Empty,
+ /// The channel's sending half has been closed, and there will
+ /// never be any more data received on it.
+ Closed,
+}
+
+impl fmt::Display for TryRecvError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "{}",
+ match self {
+ TryRecvError::Empty => "channel empty",
+ TryRecvError::Closed => "channel closed",
+ }
+ )
+ }
+}
+
+impl Error for TryRecvError {}
+
+// ===== ClosedError =====
+
+/// Error returned by [`Sender::poll_ready`](super::Sender::poll_ready).
+#[derive(Debug)]
+pub struct ClosedError(());
+
+impl ClosedError {
+ pub(crate) fn new() -> ClosedError {
+ ClosedError(())
+ }
+}
+
+impl fmt::Display for ClosedError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "channel closed")
+ }
+}
+
+impl Error for ClosedError {}
+
+cfg_time! {
+ // ===== SendTimeoutError =====
+
+ #[derive(Debug)]
+ /// Error returned by [`Sender::send_timeout`](super::Sender::send_timeout)].
+ pub enum SendTimeoutError<T> {
+ /// The data could not be sent on the channel because the channel is
+ /// full, and the timeout to send has elapsed.
+ Timeout(T),
+
+ /// The receive half of the channel was explicitly closed or has been
+ /// dropped.
+ Closed(T),
+ }
+
+ impl<T: fmt::Debug> Error for SendTimeoutError<T> {}
+
+ impl<T> fmt::Display for SendTimeoutError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "{}",
+ match self {
+ SendTimeoutError::Timeout(..) => "timed out waiting on send operation",
+ SendTimeoutError::Closed(..) => "channel closed",
+ }
+ )
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mpsc/list.rs b/third_party/rust/tokio/src/sync/mpsc/list.rs
new file mode 100644
index 0000000000..53f82a25ef
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/list.rs
@@ -0,0 +1,341 @@
+//! A concurrent, lock-free, FIFO list.
+
+use crate::loom::{
+ sync::atomic::{AtomicPtr, AtomicUsize},
+ thread,
+};
+use crate::sync::mpsc::block::{self, Block};
+
+use std::fmt;
+use std::ptr::NonNull;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
+
+/// List queue transmit handle
+pub(crate) struct Tx<T> {
+ /// Tail in the `Block` mpmc list.
+ block_tail: AtomicPtr<Block<T>>,
+
+ /// Position to push the next message. This reference a block and offset
+ /// into the block.
+ tail_position: AtomicUsize,
+}
+
+/// List queue receive handle
+pub(crate) struct Rx<T> {
+ /// Pointer to the block being processed
+ head: NonNull<Block<T>>,
+
+ /// Next slot index to process
+ index: usize,
+
+ /// Pointer to the next block pending release
+ free_head: NonNull<Block<T>>,
+}
+
+pub(crate) fn channel<T>() -> (Tx<T>, Rx<T>) {
+ // Create the initial block shared between the tx and rx halves.
+ let initial_block = Box::new(Block::new(0));
+ let initial_block_ptr = Box::into_raw(initial_block);
+
+ let tx = Tx {
+ block_tail: AtomicPtr::new(initial_block_ptr),
+ tail_position: AtomicUsize::new(0),
+ };
+
+ let head = NonNull::new(initial_block_ptr).unwrap();
+
+ let rx = Rx {
+ head,
+ index: 0,
+ free_head: head,
+ };
+
+ (tx, rx)
+}
+
+impl<T> Tx<T> {
+ /// Pushes a value into the list.
+ pub(crate) fn push(&self, value: T) {
+ // First, claim a slot for the value. `Acquire` is used here to
+ // synchronize with the `fetch_add` in `reclaim_blocks`.
+ let slot_index = self.tail_position.fetch_add(1, Acquire);
+
+ // Load the current block and write the value
+ let block = self.find_block(slot_index);
+
+ unsafe {
+ // Write the value to the block
+ block.as_ref().write(slot_index, value);
+ }
+ }
+
+ /// Closes the send half of the list
+ ///
+ /// Similar process as pushing a value, but instead of writing the value &
+ /// setting the ready flag, the TX_CLOSED flag is set on the block.
+ pub(crate) fn close(&self) {
+ // First, claim a slot for the value. This is the last slot that will be
+ // claimed.
+ let slot_index = self.tail_position.fetch_add(1, Acquire);
+
+ let block = self.find_block(slot_index);
+
+ unsafe { block.as_ref().tx_close() }
+ }
+
+ fn find_block(&self, slot_index: usize) -> NonNull<Block<T>> {
+ // The start index of the block that contains `index`.
+ let start_index = block::start_index(slot_index);
+
+ // The index offset into the block
+ let offset = block::offset(slot_index);
+
+ // Load the current head of the block
+ let mut block_ptr = self.block_tail.load(Acquire);
+
+ let block = unsafe { &*block_ptr };
+
+ // Calculate the distance between the tail ptr and the target block
+ let distance = block.distance(start_index);
+
+ // Decide if this call to `find_block` should attempt to update the
+ // `block_tail` pointer.
+ //
+ // Updating `block_tail` is not always performed in order to reduce
+ // contention.
+ //
+ // When set, as the routine walks the linked list, it attempts to update
+ // `block_tail`. If the update cannot be performed, `try_updating_tail`
+ // is unset.
+ let mut try_updating_tail = distance > offset;
+
+ // Walk the linked list of blocks until the block with `start_index` is
+ // found.
+ loop {
+ let block = unsafe { &(*block_ptr) };
+
+ if block.is_at_index(start_index) {
+ return unsafe { NonNull::new_unchecked(block_ptr) };
+ }
+
+ let next_block = block
+ .load_next(Acquire)
+ // There is no allocated next block, grow the linked list.
+ .unwrap_or_else(|| block.grow());
+
+ // If the block is **not** final, then the tail pointer cannot be
+ // advanced any more.
+ try_updating_tail &= block.is_final();
+
+ if try_updating_tail {
+ // Advancing `block_tail` must happen when walking the linked
+ // list. `block_tail` may not advance passed any blocks that are
+ // not "final". At the point a block is finalized, it is unknown
+ // if there are any prior blocks that are unfinalized, which
+ // makes it impossible to advance `block_tail`.
+ //
+ // While walking the linked list, `block_tail` can be advanced
+ // as long as finalized blocks are traversed.
+ //
+ // Release ordering is used to ensure that any subsequent reads
+ // are able to see the memory pointed to by `block_tail`.
+ //
+ // Acquire is not needed as any "actual" value is not accessed.
+ // At this point, the linked list is walked to acquire blocks.
+ let actual =
+ self.block_tail
+ .compare_and_swap(block_ptr, next_block.as_ptr(), Release);
+
+ if actual == block_ptr {
+ // Synchronize with any senders
+ let tail_position = self.tail_position.fetch_add(0, Release);
+
+ unsafe {
+ block.tx_release(tail_position);
+ }
+ } else {
+ // A concurrent sender is also working on advancing
+ // `block_tail` and this thread is falling behind.
+ //
+ // Stop trying to advance the tail pointer
+ try_updating_tail = false;
+ }
+ }
+
+ block_ptr = next_block.as_ptr();
+
+ thread::yield_now();
+ }
+ }
+
+ pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull<Block<T>>) {
+ // The block has been removed from the linked list and ownership
+ // is reclaimed.
+ //
+ // Before dropping the block, see if it can be reused by
+ // inserting it back at the end of the linked list.
+ //
+ // First, reset the data
+ block.as_mut().reclaim();
+
+ let mut reused = false;
+
+ // Attempt to insert the block at the end
+ //
+ // Walk at most three times
+ //
+ let curr_ptr = self.block_tail.load(Acquire);
+
+ // The pointer can never be null
+ debug_assert!(!curr_ptr.is_null());
+
+ let mut curr = NonNull::new_unchecked(curr_ptr);
+
+ // TODO: Unify this logic with Block::grow
+ for _ in 0..3 {
+ match curr.as_ref().try_push(&mut block, AcqRel) {
+ Ok(_) => {
+ reused = true;
+ break;
+ }
+ Err(next) => {
+ curr = next;
+ }
+ }
+ }
+
+ if !reused {
+ let _ = Box::from_raw(block.as_ptr());
+ }
+ }
+}
+
+impl<T> fmt::Debug for Tx<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Tx")
+ .field("block_tail", &self.block_tail.load(Relaxed))
+ .field("tail_position", &self.tail_position.load(Relaxed))
+ .finish()
+ }
+}
+
+impl<T> Rx<T> {
+ /// Pops the next value off the queue
+ pub(crate) fn pop(&mut self, tx: &Tx<T>) -> Option<block::Read<T>> {
+ // Advance `head`, if needed
+ if !self.try_advancing_head() {
+ return None;
+ }
+
+ self.reclaim_blocks(tx);
+
+ unsafe {
+ let block = self.head.as_ref();
+
+ let ret = block.read(self.index);
+
+ if let Some(block::Read::Value(..)) = ret {
+ self.index = self.index.wrapping_add(1);
+ }
+
+ ret
+ }
+ }
+
+ /// Tries advancing the block pointer to the block referenced by `self.index`.
+ ///
+ /// Returns `true` if successful, `false` if there is no next block to load.
+ fn try_advancing_head(&mut self) -> bool {
+ let block_index = block::start_index(self.index);
+
+ loop {
+ let next_block = {
+ let block = unsafe { self.head.as_ref() };
+
+ if block.is_at_index(block_index) {
+ return true;
+ }
+
+ block.load_next(Acquire)
+ };
+
+ let next_block = match next_block {
+ Some(next_block) => next_block,
+ None => {
+ return false;
+ }
+ };
+
+ self.head = next_block;
+
+ thread::yield_now();
+ }
+ }
+
+ fn reclaim_blocks(&mut self, tx: &Tx<T>) {
+ while self.free_head != self.head {
+ unsafe {
+ // Get a handle to the block that will be freed and update
+ // `free_head` to point to the next block.
+ let block = self.free_head;
+
+ let observed_tail_position = block.as_ref().observed_tail_position();
+
+ let required_index = match observed_tail_position {
+ Some(i) => i,
+ None => return,
+ };
+
+ if required_index > self.index {
+ return;
+ }
+
+ // We may read the next pointer with `Relaxed` ordering as it is
+ // guaranteed that the `reclaim_blocks` routine trails the `recv`
+ // routine. Any memory accessed by `reclaim_blocks` has already
+ // been acquired by `recv`.
+ let next_block = block.as_ref().load_next(Relaxed);
+
+ // Update the free list head
+ self.free_head = next_block.unwrap();
+
+ // Push the emptied block onto the back of the queue, making it
+ // available to senders.
+ tx.reclaim_block(block);
+ }
+
+ thread::yield_now();
+ }
+ }
+
+ /// Effectively `Drop` all the blocks. Should only be called once, when
+ /// the list is dropping.
+ pub(super) unsafe fn free_blocks(&mut self) {
+ debug_assert_ne!(self.free_head, NonNull::dangling());
+
+ let mut cur = Some(self.free_head);
+
+ #[cfg(debug_assertions)]
+ {
+ // to trigger the debug assert above so as to catch that we
+ // don't call `free_blocks` more than once.
+ self.free_head = NonNull::dangling();
+ self.head = NonNull::dangling();
+ }
+
+ while let Some(block) = cur {
+ cur = block.as_ref().load_next(Relaxed);
+ drop(Box::from_raw(block.as_ptr()));
+ }
+ }
+}
+
+impl<T> fmt::Debug for Rx<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Rx")
+ .field("head", &self.head)
+ .field("index", &self.index)
+ .field("free_head", &self.free_head)
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mpsc/mod.rs b/third_party/rust/tokio/src/sync/mpsc/mod.rs
new file mode 100644
index 0000000000..4cfd6150f3
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/mod.rs
@@ -0,0 +1,64 @@
+#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
+
+//! A multi-producer, single-consumer queue for sending values across
+//! asynchronous tasks.
+//!
+//! Similar to `std`, channel creation provides [`Receiver`] and [`Sender`]
+//! handles. [`Receiver`] implements `Stream` and allows a task to read values
+//! out of the channel. If there is no message to read, the current task will be
+//! notified when a new value is sent. [`Sender`] implements the `Sink` trait
+//! and allows sending messages into the channel. If the channel is at capacity,
+//! the send is rejected and the task will be notified when additional capacity
+//! is available. In other words, the channel provides backpressure.
+//!
+//! Unbounded channels are also available using the `unbounded_channel`
+//! constructor.
+//!
+//! # Disconnection
+//!
+//! When all [`Sender`] handles have been dropped, it is no longer
+//! possible to send values into the channel. This is considered the termination
+//! event of the stream. As such, `Receiver::poll` returns `Ok(Ready(None))`.
+//!
+//! If the [`Receiver`] handle is dropped, then messages can no longer
+//! be read out of the channel. In this case, all further attempts to send will
+//! result in an error.
+//!
+//! # Clean Shutdown
+//!
+//! When the [`Receiver`] is dropped, it is possible for unprocessed messages to
+//! remain in the channel. Instead, it is usually desirable to perform a "clean"
+//! shutdown. To do this, the receiver first calls `close`, which will prevent
+//! any further messages to be sent into the channel. Then, the receiver
+//! consumes the channel to completion, at which point the receiver can be
+//! dropped.
+//!
+//! [`Sender`]: crate::sync::mpsc::Sender
+//! [`Receiver`]: crate::sync::mpsc::Receiver
+
+pub(super) mod block;
+
+mod bounded;
+pub use self::bounded::{channel, Receiver, Sender};
+
+mod chan;
+
+pub(super) mod list;
+
+mod unbounded;
+pub use self::unbounded::{unbounded_channel, UnboundedReceiver, UnboundedSender};
+
+pub mod error;
+
+/// The number of values a block can contain.
+///
+/// This value must be a power of 2. It also must be smaller than the number of
+/// bits in `usize`.
+#[cfg(all(target_pointer_width = "64", not(loom)))]
+const BLOCK_CAP: usize = 32;
+
+#[cfg(all(not(target_pointer_width = "64"), not(loom)))]
+const BLOCK_CAP: usize = 16;
+
+#[cfg(loom)]
+const BLOCK_CAP: usize = 2;
diff --git a/third_party/rust/tokio/src/sync/mpsc/unbounded.rs b/third_party/rust/tokio/src/sync/mpsc/unbounded.rs
new file mode 100644
index 0000000000..ba543fe4c8
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mpsc/unbounded.rs
@@ -0,0 +1,176 @@
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::sync::mpsc::chan;
+use crate::sync::mpsc::error::{SendError, TryRecvError};
+
+use std::fmt;
+use std::task::{Context, Poll};
+
+/// Send values to the associated `UnboundedReceiver`.
+///
+/// Instances are created by the
+/// [`unbounded_channel`](unbounded_channel) function.
+pub struct UnboundedSender<T> {
+ chan: chan::Tx<T, Semaphore>,
+}
+
+impl<T> Clone for UnboundedSender<T> {
+ fn clone(&self) -> Self {
+ UnboundedSender {
+ chan: self.chan.clone(),
+ }
+ }
+}
+
+impl<T> fmt::Debug for UnboundedSender<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("UnboundedSender")
+ .field("chan", &self.chan)
+ .finish()
+ }
+}
+
+/// Receive values from the associated `UnboundedSender`.
+///
+/// Instances are created by the
+/// [`unbounded_channel`](unbounded_channel) function.
+pub struct UnboundedReceiver<T> {
+ /// The channel receiver
+ chan: chan::Rx<T, Semaphore>,
+}
+
+impl<T> fmt::Debug for UnboundedReceiver<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("UnboundedReceiver")
+ .field("chan", &self.chan)
+ .finish()
+ }
+}
+
+/// Creates an unbounded mpsc channel for communicating between asynchronous
+/// tasks.
+///
+/// A `send` on this channel will always succeed as long as the receive half has
+/// not been closed. If the receiver falls behind, messages will be arbitrarily
+/// buffered.
+///
+/// **Note** that the amount of available system memory is an implicit bound to
+/// the channel. Using an `unbounded` channel has the ability of causing the
+/// process to run out of memory. In this case, the process will be aborted.
+pub fn unbounded_channel<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
+ let (tx, rx) = chan::channel(AtomicUsize::new(0));
+
+ let tx = UnboundedSender::new(tx);
+ let rx = UnboundedReceiver::new(rx);
+
+ (tx, rx)
+}
+
+/// No capacity
+type Semaphore = AtomicUsize;
+
+impl<T> UnboundedReceiver<T> {
+ pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> UnboundedReceiver<T> {
+ UnboundedReceiver { chan }
+ }
+
+ #[doc(hidden)] // TODO: doc
+ pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ self.chan.recv(cx)
+ }
+
+ /// Receives the next value for this receiver.
+ ///
+ /// `None` is returned when all `Sender` halves have dropped, indicating
+ /// that no further values can be sent on the channel.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = mpsc::unbounded_channel();
+ ///
+ /// tokio::spawn(async move {
+ /// tx.send("hello").unwrap();
+ /// });
+ ///
+ /// assert_eq!(Some("hello"), rx.recv().await);
+ /// assert_eq!(None, rx.recv().await);
+ /// }
+ /// ```
+ ///
+ /// Values are buffered:
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = mpsc::unbounded_channel();
+ ///
+ /// tx.send("hello").unwrap();
+ /// tx.send("world").unwrap();
+ ///
+ /// assert_eq!(Some("hello"), rx.recv().await);
+ /// assert_eq!(Some("world"), rx.recv().await);
+ /// }
+ /// ```
+ pub async fn recv(&mut self) -> Option<T> {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ /// Attempts to return a pending value on this receiver without blocking.
+ ///
+ /// This method will never block the caller in order to wait for data to
+ /// become available. Instead, this will always return immediately with
+ /// a possible option of pending data on the channel.
+ ///
+ /// This is useful for a flavor of "optimistic check" before deciding to
+ /// block on a receiver.
+ ///
+ /// Compared with recv, this function has two failure cases instead of
+ /// one (one for disconnection, one for an empty buffer).
+ pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
+ self.chan.try_recv()
+ }
+
+ /// Closes the receiving half of a channel, without dropping it.
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ self.chan.close();
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<T> crate::stream::Stream for UnboundedReceiver<T> {
+ type Item = T;
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ self.poll_recv(cx)
+ }
+}
+
+impl<T> UnboundedSender<T> {
+ pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> UnboundedSender<T> {
+ UnboundedSender { chan }
+ }
+
+ /// Attempts to send a message on this `UnboundedSender` without blocking.
+ ///
+ /// If the receive half of the channel is closed, either due to [`close`]
+ /// being called or the [`UnboundedReceiver`] having been dropped,
+ /// the function returns an error. The error includes the value passed to `send`.
+ ///
+ /// [`close`]: UnboundedReceiver::close
+ /// [`UnboundedReceiver`]: UnboundedReceiver
+ pub fn send(&self, message: T) -> Result<(), SendError<T>> {
+ self.chan.send_unbounded(message)?;
+ Ok(())
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/mutex.rs b/third_party/rust/tokio/src/sync/mutex.rs
new file mode 100644
index 0000000000..7167906de1
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/mutex.rs
@@ -0,0 +1,228 @@
+//! An asynchronous `Mutex`-like type.
+//!
+//! This module provides [`Mutex`], a type that acts similarly to an asynchronous `Mutex`, with one
+//! major difference: the [`MutexGuard`] returned by `lock` is not tied to the lifetime of the
+//! `Mutex`. This enables you to acquire a lock, and then pass that guard into a future, and then
+//! release it at some later point in time.
+//!
+//! This allows you to do something along the lines of:
+//!
+//! ```rust,no_run
+//! use tokio::sync::Mutex;
+//! use std::sync::Arc;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let data1 = Arc::new(Mutex::new(0));
+//! let data2 = Arc::clone(&data1);
+//!
+//! tokio::spawn(async move {
+//! let mut lock = data2.lock().await;
+//! *lock += 1;
+//! });
+//!
+//! let mut lock = data1.lock().await;
+//! *lock += 1;
+//! }
+//! ```
+//!
+//! Another example
+//! ```rust,no_run
+//! #![warn(rust_2018_idioms)]
+//!
+//! use tokio::sync::Mutex;
+//! use std::sync::Arc;
+//!
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let count = Arc::new(Mutex::new(0));
+//!
+//! for _ in 0..5 {
+//! let my_count = Arc::clone(&count);
+//! tokio::spawn(async move {
+//! for _ in 0..10 {
+//! let mut lock = my_count.lock().await;
+//! *lock += 1;
+//! println!("{}", lock);
+//! }
+//! });
+//! }
+//!
+//! loop {
+//! if *count.lock().await >= 50 {
+//! break;
+//! }
+//! }
+//! println!("Count hit 50.");
+//! }
+//! ```
+//! There are a few things of note here to pay attention to in this example.
+//! 1. The mutex is wrapped in an [`std::sync::Arc`] to allow it to be shared across threads.
+//! 2. Each spawned task obtains a lock and releases it on every iteration.
+//! 3. Mutation of the data the Mutex is protecting is done by de-referencing the the obtained lock
+//! as seen on lines 23 and 30.
+//!
+//! Tokio's Mutex works in a simple FIFO (first in, first out) style where as requests for a lock are
+//! made Tokio will queue them up and provide a lock when it is that requester's turn. In that way
+//! the Mutex is "fair" and predictable in how it distributes the locks to inner data. This is why
+//! the output of this program is an in-order count to 50. Locks are released and reacquired
+//! after every iteration, so basically, each thread goes to the back of the line after it increments
+//! the value once. Also, since there is only a single valid lock at any given time there is no
+//! possibility of a race condition when mutating the inner value.
+//!
+//! Note that in contrast to `std::sync::Mutex`, this implementation does not
+//! poison the mutex when a thread holding the `MutexGuard` panics. In such a
+//! case, the mutex will be unlocked. If the panic is caught, this might leave
+//! the data protected by the mutex in an inconsistent state.
+//!
+//! [`Mutex`]: struct@Mutex
+//! [`MutexGuard`]: struct@MutexGuard
+use crate::coop::CoopFutureExt;
+use crate::sync::batch_semaphore as semaphore;
+
+use std::cell::UnsafeCell;
+use std::error::Error;
+use std::fmt;
+use std::ops::{Deref, DerefMut};
+
+/// An asynchronous mutual exclusion primitive useful for protecting shared data
+///
+/// Each mutex has a type parameter (`T`) which represents the data that it is protecting. The data
+/// can only be accessed through the RAII guards returned from `lock`, which
+/// guarantees that the data is only ever accessed when the mutex is locked.
+#[derive(Debug)]
+pub struct Mutex<T> {
+ c: UnsafeCell<T>,
+ s: semaphore::Semaphore,
+}
+
+/// A handle to a held `Mutex`.
+///
+/// As long as you have this guard, you have exclusive access to the underlying `T`. The guard
+/// internally keeps a reference-couned pointer to the original `Mutex`, so even if the lock goes
+/// away, the guard remains valid.
+///
+/// The lock is automatically released whenever the guard is dropped, at which point `lock`
+/// will succeed yet again.
+pub struct MutexGuard<'a, T> {
+ lock: &'a Mutex<T>,
+}
+
+// As long as T: Send, it's fine to send and share Mutex<T> between threads.
+// If T was not Send, sending and sharing a Mutex<T> would be bad, since you can access T through
+// Mutex<T>.
+unsafe impl<T> Send for Mutex<T> where T: Send {}
+unsafe impl<T> Sync for Mutex<T> where T: Send {}
+unsafe impl<'a, T> Sync for MutexGuard<'a, T> where T: Send + Sync {}
+
+/// Error returned from the [`Mutex::try_lock`] function.
+///
+/// A `try_lock` operation can only fail if the mutex is already locked.
+///
+/// [`Mutex::try_lock`]: Mutex::try_lock
+#[derive(Debug)]
+pub struct TryLockError(());
+
+impl fmt::Display for TryLockError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{}", "operation would block")
+ }
+}
+
+impl Error for TryLockError {}
+
+#[test]
+#[cfg(not(loom))]
+fn bounds() {
+ fn check_send<T: Send>() {}
+ fn check_unpin<T: Unpin>() {}
+ // This has to take a value, since the async fn's return type is unnameable.
+ fn check_send_sync_val<T: Send + Sync>(_t: T) {}
+ fn check_send_sync<T: Send + Sync>() {}
+ check_send::<MutexGuard<'_, u32>>();
+ check_unpin::<Mutex<u32>>();
+ check_send_sync::<Mutex<u32>>();
+
+ let mutex = Mutex::new(1);
+ check_send_sync_val(mutex.lock());
+}
+
+impl<T> Mutex<T> {
+ /// Creates a new lock in an unlocked state ready for use.
+ pub fn new(t: T) -> Self {
+ Self {
+ c: UnsafeCell::new(t),
+ s: semaphore::Semaphore::new(1),
+ }
+ }
+
+ /// A future that resolves on acquiring the lock and returns the `MutexGuard`.
+ pub async fn lock(&self) -> MutexGuard<'_, T> {
+ self.s.acquire(1).cooperate().await.unwrap_or_else(|_| {
+ // The semaphore was closed. but, we never explicitly close it, and we have a
+ // handle to it through the Arc, which means that this can never happen.
+ unreachable!()
+ });
+ MutexGuard { lock: self }
+ }
+
+ /// Tries to acquire the lock
+ pub fn try_lock(&self) -> Result<MutexGuard<'_, T>, TryLockError> {
+ match self.s.try_acquire(1) {
+ Ok(_) => Ok(MutexGuard { lock: self }),
+ Err(_) => Err(TryLockError(())),
+ }
+ }
+
+ /// Consumes the mutex, returning the underlying data.
+ pub fn into_inner(self) -> T {
+ self.c.into_inner()
+ }
+}
+
+impl<'a, T> Drop for MutexGuard<'a, T> {
+ fn drop(&mut self) {
+ self.lock.s.release(1)
+ }
+}
+
+impl<T> From<T> for Mutex<T> {
+ fn from(s: T) -> Self {
+ Self::new(s)
+ }
+}
+
+impl<T> Default for Mutex<T>
+where
+ T: Default,
+{
+ fn default() -> Self {
+ Self::new(T::default())
+ }
+}
+
+impl<'a, T> Deref for MutexGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.lock.c.get() }
+ }
+}
+
+impl<'a, T> DerefMut for MutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.lock.c.get() }
+ }
+}
+
+impl<'a, T: fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: fmt::Display> fmt::Display for MutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/notify.rs b/third_party/rust/tokio/src/sync/notify.rs
new file mode 100644
index 0000000000..5cb41e89ea
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/notify.rs
@@ -0,0 +1,556 @@
+use crate::loom::sync::atomic::AtomicU8;
+use crate::loom::sync::Mutex;
+use crate::util::linked_list::{self, LinkedList};
+
+use std::cell::UnsafeCell;
+use std::future::Future;
+use std::marker::PhantomPinned;
+use std::pin::Pin;
+use std::ptr::NonNull;
+use std::sync::atomic::Ordering::SeqCst;
+use std::task::{Context, Poll, Waker};
+
+/// Notify a single task to wake up.
+///
+/// `Notify` provides a basic mechanism to notify a single task of an event.
+/// `Notify` itself does not carry any data. Instead, it is to be used to signal
+/// another task to perform an operation.
+///
+/// `Notify` can be thought of as a [`Semaphore`] starting with 0 permits.
+/// [`notified().await`] waits for a permit to become available, and [`notify()`]
+/// sets a permit **if there currently are no available permits**.
+///
+/// The synchronization details of `Notify` are similar to
+/// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`]
+/// value contains a single permit. [`notified().await`] waits for the permit to
+/// be made available, consumes the permit, and resumes. [`notify()`] sets the
+/// permit, waking a pending task if there is one.
+///
+/// If `notify()` is called **before** `notfied().await`, then the next call to
+/// `notified().await` will complete immediately, consuming the permit. Any
+/// subsequent calls to `notified().await` will wait for a new permit.
+///
+/// If `notify()` is called **multiple** times before `notified().await`, only a
+/// **single** permit is stored. The next call to `notified().await` will
+/// complete immediately, but the one after will wait for a new permit.
+///
+/// # Examples
+///
+/// Basic usage.
+///
+/// ```
+/// use tokio::sync::Notify;
+/// use std::sync::Arc;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let notify = Arc::new(Notify::new());
+/// let notify2 = notify.clone();
+///
+/// tokio::spawn(async move {
+/// notify2.notified().await;
+/// println!("received notification");
+/// });
+///
+/// println!("sending notification");
+/// notify.notify();
+/// }
+/// ```
+///
+/// Unbound mpsc channel.
+///
+/// ```
+/// use tokio::sync::Notify;
+///
+/// use std::collections::VecDeque;
+/// use std::sync::Mutex;
+///
+/// struct Channel<T> {
+/// values: Mutex<VecDeque<T>>,
+/// notify: Notify,
+/// }
+///
+/// impl<T> Channel<T> {
+/// pub fn send(&self, value: T) {
+/// self.values.lock().unwrap()
+/// .push_back(value);
+///
+/// // Notify the consumer a value is available
+/// self.notify.notify();
+/// }
+///
+/// pub async fn recv(&self) -> T {
+/// loop {
+/// // Drain values
+/// if let Some(value) = self.values.lock().unwrap().pop_front() {
+/// return value;
+/// }
+///
+/// // Wait for values to be available
+/// self.notify.notified().await;
+/// }
+/// }
+/// }
+/// ```
+///
+/// [park]: std::thread::park
+/// [unpark]: std::thread::Thread::unpark
+/// [`notified().await`]: Notify::notified()
+/// [`notify()`]: Notify::notify()
+/// [`Semaphore`]: crate::sync::Semaphore
+#[derive(Debug)]
+pub struct Notify {
+ state: AtomicU8,
+ waiters: Mutex<LinkedList<Waiter>>,
+}
+
+#[derive(Debug)]
+struct Waiter {
+ /// Intrusive linked-list pointers
+ pointers: linked_list::Pointers<Waiter>,
+
+ /// Waiting task's waker
+ waker: Option<Waker>,
+
+ /// `true` if the notification has been assigned to this waiter.
+ notified: bool,
+
+ /// Should not be `Unpin`.
+ _p: PhantomPinned,
+}
+
+/// Future returned from `notified()`
+#[derive(Debug)]
+struct Notified<'a> {
+ /// The `Notify` being received on.
+ notify: &'a Notify,
+
+ /// The current state of the receiving process.
+ state: State,
+
+ /// Entry in the waiter `LinkedList`.
+ waiter: UnsafeCell<Waiter>,
+}
+
+unsafe impl<'a> Send for Notified<'a> {}
+unsafe impl<'a> Sync for Notified<'a> {}
+
+#[derive(Debug)]
+enum State {
+ Init,
+ Waiting,
+ Done,
+}
+
+/// Initial "idle" state
+const EMPTY: u8 = 0;
+
+/// One or more threads are currently waiting to be notified.
+const WAITING: u8 = 1;
+
+/// Pending notification
+const NOTIFIED: u8 = 2;
+
+impl Notify {
+ /// Create a new `Notify`, initialized without a permit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::Notify;
+ ///
+ /// let notify = Notify::new();
+ /// ```
+ pub fn new() -> Notify {
+ Notify {
+ state: AtomicU8::new(0),
+ waiters: Mutex::new(LinkedList::new()),
+ }
+ }
+
+ /// Wait for a notification.
+ ///
+ /// Each `Notify` value holds a single permit. If a permit is available from
+ /// an earlier call to [`notify()`], then `notified().await` will complete
+ /// immediately, consuming that permit. Otherwise, `notified().await` waits
+ /// for a permit to be made available by the next call to `notify()`.
+ ///
+ /// [`notify()`]: Notify::notify
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::Notify;
+ /// use std::sync::Arc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let notify = Arc::new(Notify::new());
+ /// let notify2 = notify.clone();
+ ///
+ /// tokio::spawn(async move {
+ /// notify2.notified().await;
+ /// println!("received notification");
+ /// });
+ ///
+ /// println!("sending notification");
+ /// notify.notify();
+ /// }
+ /// ```
+ pub async fn notified(&self) {
+ Notified {
+ notify: self,
+ state: State::Init,
+ waiter: UnsafeCell::new(Waiter {
+ pointers: linked_list::Pointers::new(),
+ waker: None,
+ notified: false,
+ _p: PhantomPinned,
+ }),
+ }
+ .await
+ }
+
+ /// Notifies a waiting task
+ ///
+ /// If a task is currently waiting, that task is notified. Otherwise, a
+ /// permit is stored in this `Notify` value and the **next** call to
+ /// [`notified().await`] will complete immediately consuming the permit made
+ /// available by this call to `notify()`.
+ ///
+ /// At most one permit may be stored by `Notify`. Many sequential calls to
+ /// `notify` will result in a single permit being stored. The next call to
+ /// `notified().await` will complete immediately, but the one after that
+ /// will wait.
+ ///
+ /// [`notified().await`]: Notify::notified()
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::Notify;
+ /// use std::sync::Arc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let notify = Arc::new(Notify::new());
+ /// let notify2 = notify.clone();
+ ///
+ /// tokio::spawn(async move {
+ /// notify2.notified().await;
+ /// println!("received notification");
+ /// });
+ ///
+ /// println!("sending notification");
+ /// notify.notify();
+ /// }
+ /// ```
+ pub fn notify(&self) {
+ // Load the current state
+ let mut curr = self.state.load(SeqCst);
+
+ // If the state is `EMPTY`, transition to `NOTIFIED` and return.
+ while let EMPTY | NOTIFIED = curr {
+ // The compare-exchange from `NOTIFIED` -> `NOTIFIED` is intended. A
+ // happens-before synchronization must happen between this atomic
+ // operation and a task calling `notified().await`.
+ let res = self.state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst);
+
+ match res {
+ // No waiters, no further work to do
+ Ok(_) => return,
+ Err(actual) => {
+ curr = actual;
+ }
+ }
+ }
+
+ // There are waiters, the lock must be acquired to notify.
+ let mut waiters = self.waiters.lock().unwrap();
+
+ // The state must be reloaded while the lock is held. The state may only
+ // transition out of WAITING while the lock is held.
+ curr = self.state.load(SeqCst);
+
+ if let Some(waker) = notify_locked(&mut waiters, &self.state, curr) {
+ drop(waiters);
+ waker.wake();
+ }
+ }
+}
+
+impl Default for Notify {
+ fn default() -> Notify {
+ Notify::new()
+ }
+}
+
+fn notify_locked(waiters: &mut LinkedList<Waiter>, state: &AtomicU8, curr: u8) -> Option<Waker> {
+ loop {
+ match curr {
+ EMPTY | NOTIFIED => {
+ let res = state.compare_exchange(curr, NOTIFIED, SeqCst, SeqCst);
+
+ match res {
+ Ok(_) => return None,
+ Err(actual) => {
+ assert!(actual == EMPTY || actual == NOTIFIED);
+ state.store(NOTIFIED, SeqCst);
+ return None;
+ }
+ }
+ }
+ WAITING => {
+ // At this point, it is guaranteed that the state will not
+ // concurrently change as holding the lock is required to
+ // transition **out** of `WAITING`.
+ //
+ // Get a pending waiter
+ let mut waiter = waiters.pop_back().unwrap();
+
+ // Safety: `waiters` lock is still held.
+ let waiter = unsafe { waiter.as_mut() };
+
+ assert!(!waiter.notified);
+
+ waiter.notified = true;
+ let waker = waiter.waker.take();
+
+ if waiters.is_empty() {
+ // As this the **final** waiter in the list, the state
+ // must be transitioned to `EMPTY`. As transitioning
+ // **from** `WAITING` requires the lock to be held, a
+ // `store` is sufficient.
+ state.store(EMPTY, SeqCst);
+ }
+
+ return waker;
+ }
+ _ => unreachable!(),
+ }
+ }
+}
+
+// ===== impl Notified =====
+
+impl Notified<'_> {
+ /// A custom `project` implementation is used in place of `pin-project-lite`
+ /// as a custom drop implementation is needed.
+ fn project(self: Pin<&mut Self>) -> (&Notify, &mut State, &UnsafeCell<Waiter>) {
+ unsafe {
+ // Safety: both `notify` and `state` are `Unpin`.
+
+ is_unpin::<&Notify>();
+ is_unpin::<AtomicU8>();
+
+ let me = self.get_unchecked_mut();
+ (&me.notify, &mut me.state, &me.waiter)
+ }
+ }
+}
+
+impl Future for Notified<'_> {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ use State::*;
+
+ let (notify, state, waiter) = self.project();
+
+ loop {
+ match *state {
+ Init => {
+ // Optimistically try acquiring a pending notification
+ let res = notify
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst);
+
+ if res.is_ok() {
+ // Acquired the notification
+ *state = Done;
+ return Poll::Ready(());
+ }
+
+ // Acquire the lock and attempt to transition to the waiting
+ // state.
+ let mut waiters = notify.waiters.lock().unwrap();
+
+ // Reload the state with the lock held
+ let mut curr = notify.state.load(SeqCst);
+
+ // Transition the state to WAITING.
+ loop {
+ match curr {
+ EMPTY => {
+ // Transition to WAITING
+ let res = notify
+ .state
+ .compare_exchange(EMPTY, WAITING, SeqCst, SeqCst);
+
+ if let Err(actual) = res {
+ assert_eq!(actual, NOTIFIED);
+ curr = actual;
+ } else {
+ break;
+ }
+ }
+ WAITING => break,
+ NOTIFIED => {
+ // Try consuming the notification
+ let res = notify
+ .state
+ .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst);
+
+ match res {
+ Ok(_) => {
+ // Acquired the notification
+ *state = Done;
+ return Poll::Ready(());
+ }
+ Err(actual) => {
+ assert_eq!(actual, EMPTY);
+ curr = actual;
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ // Safety: called while locked.
+ unsafe {
+ (*waiter.get()).waker = Some(cx.waker().clone());
+ }
+
+ // Insert the waiter into the linked list
+ //
+ // safety: pointers from `UnsafeCell` are never null.
+ waiters.push_front(unsafe { NonNull::new_unchecked(waiter.get()) });
+
+ *state = Waiting;
+ }
+ Waiting => {
+ // Currently in the "Waiting" state, implying the caller has
+ // a waiter stored in the waiter list (guarded by
+ // `notify.waiters`). In order to access the waker fields,
+ // we must hold the lock.
+
+ let waiters = notify.waiters.lock().unwrap();
+
+ // Safety: called while locked
+ let w = unsafe { &mut *waiter.get() };
+
+ if w.notified {
+ // Our waker has been notified. Reset the fields and
+ // remove it from the list.
+ w.waker = None;
+ w.notified = false;
+
+ *state = Done;
+ } else {
+ // Update the waker, if necessary.
+ if !w.waker.as_ref().unwrap().will_wake(cx.waker()) {
+ w.waker = Some(cx.waker().clone());
+ }
+
+ return Poll::Pending;
+ }
+
+ // Explicit drop of the lock to indicate the scope that the
+ // lock is held. Because holding the lock is required to
+ // ensure safe access to fields not held within the lock, it
+ // is helpful to visualize the scope of the critical
+ // section.
+ drop(waiters);
+ }
+ Done => {
+ return Poll::Ready(());
+ }
+ }
+ }
+ }
+}
+
+impl Drop for Notified<'_> {
+ fn drop(&mut self) {
+ use State::*;
+
+ // Safety: The type only transitions to a "Waiting" state when pinned.
+ let (notify, state, waiter) = unsafe { Pin::new_unchecked(self).project() };
+
+ // This is where we ensure safety. The `Notified` value is being
+ // dropped, which means we must ensure that the waiter entry is no
+ // longer stored in the linked list.
+ if let Waiting = *state {
+ let mut notify_state = WAITING;
+ let mut waiters = notify.waiters.lock().unwrap();
+
+ // `Notify.state` may be in any of the three states (Empty, Waiting,
+ // Notified). It doesn't actually matter what the atomic is set to
+ // at this point. We hold the lock and will ensure the atomic is in
+ // the correct state once th elock is dropped.
+ //
+ // Because the atomic state is not checked, at first glance, it may
+ // seem like this routine does not handle the case where the
+ // receiver is notified but has not yet observed the notification.
+ // If this happens, no matter how many notifications happen between
+ // this receiver being notified and the receive future dropping, all
+ // we need to do is ensure that one notification is returned back to
+ // the `Notify`. This is done by calling `notify_locked` if `self`
+ // has the `notified` flag set.
+
+ // remove the entry from the list
+ //
+ // safety: the waiter is only added to `waiters` by virtue of it
+ // being the only `LinkedList` available to the type.
+ unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) };
+
+ if waiters.is_empty() {
+ notify_state = EMPTY;
+ // If the state *should* be `NOTIFIED`, the call to
+ // `notify_locked` below will end up doing the
+ // `store(NOTIFIED)`. If a concurrent receiver races and
+ // observes the incorrect `EMPTY` state, it will then obtain the
+ // lock and block until `notify.state` is in the correct final
+ // state.
+ notify.state.store(EMPTY, SeqCst);
+ }
+
+ // See if the node was notified but not received. In this case, the
+ // notification must be sent to another waiter.
+ //
+ // Safety: with the entry removed from the linked list, there can be
+ // no concurrent access to the entry
+ let notified = unsafe { (*waiter.get()).notified };
+
+ if notified {
+ if let Some(waker) = notify_locked(&mut waiters, &notify.state, notify_state) {
+ drop(waiters);
+ waker.wake();
+ }
+ }
+ }
+ }
+}
+
+/// # Safety
+///
+/// `Waiter` is forced to be !Unpin.
+unsafe impl linked_list::Link for Waiter {
+ type Handle = NonNull<Waiter>;
+ type Target = Waiter;
+
+ fn as_raw(handle: &NonNull<Waiter>) -> NonNull<Waiter> {
+ *handle
+ }
+
+ unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> {
+ ptr
+ }
+
+ unsafe fn pointers(mut target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> {
+ NonNull::from(&mut target.as_mut().pointers)
+ }
+}
+
+fn is_unpin<T: Unpin>() {}
diff --git a/third_party/rust/tokio/src/sync/oneshot.rs b/third_party/rust/tokio/src/sync/oneshot.rs
new file mode 100644
index 0000000000..62ad484eec
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/oneshot.rs
@@ -0,0 +1,784 @@
+#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
+
+//! A channel for sending a single message between asynchronous tasks.
+
+use crate::loom::cell::UnsafeCell;
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::Arc;
+
+use std::fmt;
+use std::future::Future;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::sync::atomic::Ordering::{self, AcqRel, Acquire};
+use std::task::Poll::{Pending, Ready};
+use std::task::{Context, Poll, Waker};
+
+/// Sends a value to the associated `Receiver`.
+///
+/// Instances are created by the [`channel`](fn@channel) function.
+#[derive(Debug)]
+pub struct Sender<T> {
+ inner: Option<Arc<Inner<T>>>,
+}
+
+/// Receive a value from the associated `Sender`.
+///
+/// Instances are created by the [`channel`](fn@channel) function.
+#[derive(Debug)]
+pub struct Receiver<T> {
+ inner: Option<Arc<Inner<T>>>,
+}
+
+pub mod error {
+ //! Oneshot error types
+
+ use std::fmt;
+
+ /// Error returned by the `Future` implementation for `Receiver`.
+ #[derive(Debug, Eq, PartialEq)]
+ pub struct RecvError(pub(super) ());
+
+ /// Error returned by the `try_recv` function on `Receiver`.
+ #[derive(Debug, Eq, PartialEq)]
+ pub enum TryRecvError {
+ /// The send half of the channel has not yet sent a value.
+ Empty,
+
+ /// The send half of the channel was dropped without sending a value.
+ Closed,
+ }
+
+ // ===== impl RecvError =====
+
+ impl fmt::Display for RecvError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "channel closed")
+ }
+ }
+
+ impl std::error::Error for RecvError {}
+
+ // ===== impl TryRecvError =====
+
+ impl fmt::Display for TryRecvError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ TryRecvError::Empty => write!(fmt, "channel empty"),
+ TryRecvError::Closed => write!(fmt, "channel closed"),
+ }
+ }
+ }
+
+ impl std::error::Error for TryRecvError {}
+}
+
+use self::error::*;
+
+struct Inner<T> {
+ /// Manages the state of the inner cell
+ state: AtomicUsize,
+
+ /// The value. This is set by `Sender` and read by `Receiver`. The state of
+ /// the cell is tracked by `state`.
+ value: UnsafeCell<Option<T>>,
+
+ /// The task to notify when the receiver drops without consuming the value.
+ tx_task: UnsafeCell<MaybeUninit<Waker>>,
+
+ /// The task to notify when the value is sent.
+ rx_task: UnsafeCell<MaybeUninit<Waker>>,
+}
+
+#[derive(Clone, Copy)]
+struct State(usize);
+
+/// Create a new one-shot channel for sending single values across asynchronous
+/// tasks.
+///
+/// The function returns separate "send" and "receive" handles. The `Sender`
+/// handle is used by the producer to send the value. The `Receiver` handle is
+/// used by the consumer to receive the value.
+///
+/// Each handle can be used on separate tasks.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::oneshot;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, rx) = oneshot::channel();
+///
+/// tokio::spawn(async move {
+/// if let Err(_) = tx.send(3) {
+/// println!("the receiver dropped");
+/// }
+/// });
+///
+/// match rx.await {
+/// Ok(v) => println!("got = {:?}", v),
+/// Err(_) => println!("the sender dropped"),
+/// }
+/// }
+/// ```
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ #[allow(deprecated)]
+ let inner = Arc::new(Inner {
+ state: AtomicUsize::new(State::new().as_usize()),
+ value: UnsafeCell::new(None),
+ tx_task: UnsafeCell::new(MaybeUninit::uninit()),
+ rx_task: UnsafeCell::new(MaybeUninit::uninit()),
+ });
+
+ let tx = Sender {
+ inner: Some(inner.clone()),
+ };
+ let rx = Receiver { inner: Some(inner) };
+
+ (tx, rx)
+}
+
+impl<T> Sender<T> {
+ /// Attempts to send a value on this channel, returning it back if it could
+ /// not be sent.
+ ///
+ /// The function consumes `self` as only one value may ever be sent on a
+ /// one-shot channel.
+ ///
+ /// A successful send occurs when it is determined that the other end of the
+ /// channel has not hung up already. An unsuccessful send would be one where
+ /// the corresponding receiver has already been deallocated. Note that a
+ /// return value of `Err` means that the data will never be received, but
+ /// a return value of `Ok` does *not* mean that the data will be received.
+ /// It is possible for the corresponding receiver to hang up immediately
+ /// after this function returns `Ok`.
+ ///
+ /// # Examples
+ ///
+ /// Send a value to another task
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, rx) = oneshot::channel();
+ ///
+ /// tokio::spawn(async move {
+ /// if let Err(_) = tx.send(3) {
+ /// println!("the receiver dropped");
+ /// }
+ /// });
+ ///
+ /// match rx.await {
+ /// Ok(v) => println!("got = {:?}", v),
+ /// Err(_) => println!("the sender dropped"),
+ /// }
+ /// }
+ /// ```
+ pub fn send(mut self, t: T) -> Result<(), T> {
+ let inner = self.inner.take().unwrap();
+
+ inner.value.with_mut(|ptr| unsafe {
+ *ptr = Some(t);
+ });
+
+ if !inner.complete() {
+ return Err(inner
+ .value
+ .with_mut(|ptr| unsafe { (*ptr).take() }.unwrap()));
+ }
+
+ Ok(())
+ }
+
+ #[doc(hidden)] // TODO: remove
+ pub fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ let inner = self.inner.as_ref().unwrap();
+
+ let mut state = State::load(&inner.state, Acquire);
+
+ if state.is_closed() {
+ return Poll::Ready(());
+ }
+
+ if state.is_tx_task_set() {
+ let will_notify = unsafe { inner.with_tx_task(|w| w.will_wake(cx.waker())) };
+
+ if !will_notify {
+ state = State::unset_tx_task(&inner.state);
+
+ if state.is_closed() {
+ // Set the flag again so that the waker is released in drop
+ State::set_tx_task(&inner.state);
+ return Ready(());
+ } else {
+ unsafe { inner.drop_tx_task() };
+ }
+ }
+ }
+
+ if !state.is_tx_task_set() {
+ // Attempt to set the task
+ unsafe {
+ inner.set_tx_task(cx);
+ }
+
+ // Update the state
+ state = State::set_tx_task(&inner.state);
+
+ if state.is_closed() {
+ return Ready(());
+ }
+ }
+
+ Pending
+ }
+
+ /// Waits for the associated [`Receiver`] handle to close.
+ ///
+ /// A [`Receiver`] is closed by either calling [`close`] explicitly or the
+ /// [`Receiver`] value is dropped.
+ ///
+ /// This function is useful when paired with `select!` to abort a
+ /// computation when the receiver is no longer interested in the result.
+ ///
+ /// # Return
+ ///
+ /// Returns a `Future` which must be awaited on.
+ ///
+ /// [`Receiver`]: Receiver
+ /// [`close`]: Receiver::close
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, rx) = oneshot::channel::<()>();
+ ///
+ /// tokio::spawn(async move {
+ /// drop(rx);
+ /// });
+ ///
+ /// tx.closed().await;
+ /// println!("the receiver dropped");
+ /// }
+ /// ```
+ ///
+ /// Paired with select
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ /// use tokio::time::{self, Duration};
+ ///
+ /// use futures::{select, FutureExt};
+ ///
+ /// async fn compute() -> String {
+ /// // Complex computation returning a `String`
+ /// # "hello".to_string()
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (mut tx, rx) = oneshot::channel();
+ ///
+ /// tokio::spawn(async move {
+ /// select! {
+ /// _ = tx.closed().fuse() => {
+ /// // The receiver dropped, no need to do any further work
+ /// }
+ /// value = compute().fuse() => {
+ /// tx.send(value).unwrap()
+ /// }
+ /// }
+ /// });
+ ///
+ /// // Wait for up to 10 seconds
+ /// let _ = time::timeout(Duration::from_secs(10), rx).await;
+ /// }
+ /// ```
+ pub async fn closed(&mut self) {
+ use crate::future::poll_fn;
+
+ poll_fn(|cx| self.poll_closed(cx)).await
+ }
+
+ /// Returns `true` if the associated [`Receiver`] handle has been dropped.
+ ///
+ /// A [`Receiver`] is closed by either calling [`close`] explicitly or the
+ /// [`Receiver`] value is dropped.
+ ///
+ /// If `true` is returned, a call to `send` will always result in an error.
+ ///
+ /// [`Receiver`]: Receiver
+ /// [`close`]: Receiver::close
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, rx) = oneshot::channel();
+ ///
+ /// assert!(!tx.is_closed());
+ ///
+ /// drop(rx);
+ ///
+ /// assert!(tx.is_closed());
+ /// assert!(tx.send("never received").is_err());
+ /// }
+ /// ```
+ pub fn is_closed(&self) -> bool {
+ let inner = self.inner.as_ref().unwrap();
+
+ let state = State::load(&inner.state, Acquire);
+ state.is_closed()
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ if let Some(inner) = self.inner.as_ref() {
+ inner.complete();
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ /// Prevents the associated [`Sender`] handle from sending a value.
+ ///
+ /// Any `send` operation which happens after calling `close` is guaranteed
+ /// to fail. After calling `close`, `Receiver::poll`] should be called to
+ /// receive a value if one was sent **before** the call to `close`
+ /// completed.
+ ///
+ /// This function is useful to perform a graceful shutdown and ensure that a
+ /// value will not be sent into the channel and never received.
+ ///
+ /// [`Sender`]: Sender
+ ///
+ /// # Examples
+ ///
+ /// Prevent a value from being sent
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ /// use tokio::sync::oneshot::error::TryRecvError;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = oneshot::channel();
+ ///
+ /// assert!(!tx.is_closed());
+ ///
+ /// rx.close();
+ ///
+ /// assert!(tx.is_closed());
+ /// assert!(tx.send("never received").is_err());
+ ///
+ /// match rx.try_recv() {
+ /// Err(TryRecvError::Closed) => {}
+ /// _ => unreachable!(),
+ /// }
+ /// }
+ /// ```
+ ///
+ /// Receive a value sent **before** calling `close`
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = oneshot::channel();
+ ///
+ /// assert!(tx.send("will receive").is_ok());
+ ///
+ /// rx.close();
+ ///
+ /// let msg = rx.try_recv().unwrap();
+ /// assert_eq!(msg, "will receive");
+ /// }
+ /// ```
+ pub fn close(&mut self) {
+ let inner = self.inner.as_ref().unwrap();
+ inner.close();
+ }
+
+ /// Attempts to receive a value.
+ ///
+ /// If a pending value exists in the channel, it is returned. If no value
+ /// has been sent, the current task **will not** be registered for
+ /// future notification.
+ ///
+ /// This function is useful to call from outside the context of an
+ /// asynchronous task.
+ ///
+ /// # Return
+ ///
+ /// - `Ok(T)` if a value is pending in the channel.
+ /// - `Err(TryRecvError::Empty)` if no value has been sent yet.
+ /// - `Err(TryRecvError::Closed)` if the sender has dropped without sending
+ /// a value.
+ ///
+ /// # Examples
+ ///
+ /// `try_recv` before a value is sent, then after.
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ /// use tokio::sync::oneshot::error::TryRecvError;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = oneshot::channel();
+ ///
+ /// match rx.try_recv() {
+ /// // The channel is currently empty
+ /// Err(TryRecvError::Empty) => {}
+ /// _ => unreachable!(),
+ /// }
+ ///
+ /// // Send a value
+ /// tx.send("hello").unwrap();
+ ///
+ /// match rx.try_recv() {
+ /// Ok(value) => assert_eq!(value, "hello"),
+ /// _ => unreachable!(),
+ /// }
+ /// }
+ /// ```
+ ///
+ /// `try_recv` when the sender dropped before sending a value
+ ///
+ /// ```
+ /// use tokio::sync::oneshot;
+ /// use tokio::sync::oneshot::error::TryRecvError;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = oneshot::channel::<()>();
+ ///
+ /// drop(tx);
+ ///
+ /// match rx.try_recv() {
+ /// // The channel will never receive a value.
+ /// Err(TryRecvError::Closed) => {}
+ /// _ => unreachable!(),
+ /// }
+ /// }
+ /// ```
+ pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
+ let result = if let Some(inner) = self.inner.as_ref() {
+ let state = State::load(&inner.state, Acquire);
+
+ if state.is_complete() {
+ match unsafe { inner.consume_value() } {
+ Some(value) => Ok(value),
+ None => Err(TryRecvError::Closed),
+ }
+ } else if state.is_closed() {
+ Err(TryRecvError::Closed)
+ } else {
+ // Not ready, this does not clear `inner`
+ return Err(TryRecvError::Empty);
+ }
+ } else {
+ panic!("called after complete");
+ };
+
+ self.inner = None;
+ result
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ if let Some(inner) = self.inner.as_ref() {
+ inner.close();
+ }
+ }
+}
+
+impl<T> Future for Receiver<T> {
+ type Output = Result<T, RecvError>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // If `inner` is `None`, then `poll()` has already completed.
+ let ret = if let Some(inner) = self.as_ref().get_ref().inner.as_ref() {
+ ready!(inner.poll_recv(cx))?
+ } else {
+ panic!("called after complete");
+ };
+
+ self.inner = None;
+ Ready(Ok(ret))
+ }
+}
+
+impl<T> Inner<T> {
+ fn complete(&self) -> bool {
+ let prev = State::set_complete(&self.state);
+
+ if prev.is_closed() {
+ return false;
+ }
+
+ if prev.is_rx_task_set() {
+ // TODO: Consume waker?
+ unsafe {
+ self.with_rx_task(Waker::wake_by_ref);
+ }
+ }
+
+ true
+ }
+
+ fn poll_recv(&self, cx: &mut Context<'_>) -> Poll<Result<T, RecvError>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ // Load the state
+ let mut state = State::load(&self.state, Acquire);
+
+ if state.is_complete() {
+ match unsafe { self.consume_value() } {
+ Some(value) => Ready(Ok(value)),
+ None => Ready(Err(RecvError(()))),
+ }
+ } else if state.is_closed() {
+ Ready(Err(RecvError(())))
+ } else {
+ if state.is_rx_task_set() {
+ let will_notify = unsafe { self.with_rx_task(|w| w.will_wake(cx.waker())) };
+
+ // Check if the task is still the same
+ if !will_notify {
+ // Unset the task
+ state = State::unset_rx_task(&self.state);
+ if state.is_complete() {
+ // Set the flag again so that the waker is released in drop
+ State::set_rx_task(&self.state);
+
+ return match unsafe { self.consume_value() } {
+ Some(value) => Ready(Ok(value)),
+ None => Ready(Err(RecvError(()))),
+ };
+ } else {
+ unsafe { self.drop_rx_task() };
+ }
+ }
+ }
+
+ if !state.is_rx_task_set() {
+ // Attempt to set the task
+ unsafe {
+ self.set_rx_task(cx);
+ }
+
+ // Update the state
+ state = State::set_rx_task(&self.state);
+
+ if state.is_complete() {
+ match unsafe { self.consume_value() } {
+ Some(value) => Ready(Ok(value)),
+ None => Ready(Err(RecvError(()))),
+ }
+ } else {
+ Pending
+ }
+ } else {
+ Pending
+ }
+ }
+ }
+
+ /// Called by `Receiver` to indicate that the value will never be received.
+ fn close(&self) {
+ let prev = State::set_closed(&self.state);
+
+ if prev.is_tx_task_set() && !prev.is_complete() {
+ unsafe {
+ self.with_tx_task(Waker::wake_by_ref);
+ }
+ }
+ }
+
+ /// Consumes the value. This function does not check `state`.
+ unsafe fn consume_value(&self) -> Option<T> {
+ self.value.with_mut(|ptr| (*ptr).take())
+ }
+
+ unsafe fn with_rx_task<F, R>(&self, f: F) -> R
+ where
+ F: FnOnce(&Waker) -> R,
+ {
+ self.rx_task.with(|ptr| {
+ let waker: *const Waker = (&*ptr).as_ptr();
+ f(&*waker)
+ })
+ }
+
+ unsafe fn with_tx_task<F, R>(&self, f: F) -> R
+ where
+ F: FnOnce(&Waker) -> R,
+ {
+ self.tx_task.with(|ptr| {
+ let waker: *const Waker = (&*ptr).as_ptr();
+ f(&*waker)
+ })
+ }
+
+ unsafe fn drop_rx_task(&self) {
+ self.rx_task.with_mut(|ptr| {
+ let ptr: *mut Waker = (&mut *ptr).as_mut_ptr();
+ ptr.drop_in_place();
+ });
+ }
+
+ unsafe fn drop_tx_task(&self) {
+ self.tx_task.with_mut(|ptr| {
+ let ptr: *mut Waker = (&mut *ptr).as_mut_ptr();
+ ptr.drop_in_place();
+ });
+ }
+
+ unsafe fn set_rx_task(&self, cx: &mut Context<'_>) {
+ self.rx_task.with_mut(|ptr| {
+ let ptr: *mut Waker = (&mut *ptr).as_mut_ptr();
+ ptr.write(cx.waker().clone());
+ });
+ }
+
+ unsafe fn set_tx_task(&self, cx: &mut Context<'_>) {
+ self.tx_task.with_mut(|ptr| {
+ let ptr: *mut Waker = (&mut *ptr).as_mut_ptr();
+ ptr.write(cx.waker().clone());
+ });
+ }
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ let state = State(self.state.with_mut(|v| *v));
+
+ if state.is_rx_task_set() {
+ unsafe {
+ self.drop_rx_task();
+ }
+ }
+
+ if state.is_tx_task_set() {
+ unsafe {
+ self.drop_tx_task();
+ }
+ }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Inner<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use std::sync::atomic::Ordering::Relaxed;
+
+ fmt.debug_struct("Inner")
+ .field("state", &State::load(&self.state, Relaxed))
+ .finish()
+ }
+}
+
+const RX_TASK_SET: usize = 0b00001;
+const VALUE_SENT: usize = 0b00010;
+const CLOSED: usize = 0b00100;
+const TX_TASK_SET: usize = 0b01000;
+
+impl State {
+ fn new() -> State {
+ State(0)
+ }
+
+ fn is_complete(self) -> bool {
+ self.0 & VALUE_SENT == VALUE_SENT
+ }
+
+ fn set_complete(cell: &AtomicUsize) -> State {
+ // TODO: This could be `Release`, followed by an `Acquire` fence *if*
+ // the `RX_TASK_SET` flag is set. However, `loom` does not support
+ // fences yet.
+ let val = cell.fetch_or(VALUE_SENT, AcqRel);
+ State(val)
+ }
+
+ fn is_rx_task_set(self) -> bool {
+ self.0 & RX_TASK_SET == RX_TASK_SET
+ }
+
+ fn set_rx_task(cell: &AtomicUsize) -> State {
+ let val = cell.fetch_or(RX_TASK_SET, AcqRel);
+ State(val | RX_TASK_SET)
+ }
+
+ fn unset_rx_task(cell: &AtomicUsize) -> State {
+ let val = cell.fetch_and(!RX_TASK_SET, AcqRel);
+ State(val & !RX_TASK_SET)
+ }
+
+ fn is_closed(self) -> bool {
+ self.0 & CLOSED == CLOSED
+ }
+
+ fn set_closed(cell: &AtomicUsize) -> State {
+ // Acquire because we want all later writes (attempting to poll) to be
+ // ordered after this.
+ let val = cell.fetch_or(CLOSED, Acquire);
+ State(val)
+ }
+
+ fn set_tx_task(cell: &AtomicUsize) -> State {
+ let val = cell.fetch_or(TX_TASK_SET, AcqRel);
+ State(val | TX_TASK_SET)
+ }
+
+ fn unset_tx_task(cell: &AtomicUsize) -> State {
+ let val = cell.fetch_and(!TX_TASK_SET, AcqRel);
+ State(val & !TX_TASK_SET)
+ }
+
+ fn is_tx_task_set(self) -> bool {
+ self.0 & TX_TASK_SET == TX_TASK_SET
+ }
+
+ fn as_usize(self) -> usize {
+ self.0
+ }
+
+ fn load(cell: &AtomicUsize, order: Ordering) -> State {
+ let val = cell.load(order);
+ State(val)
+ }
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("State")
+ .field("is_complete", &self.is_complete())
+ .field("is_closed", &self.is_closed())
+ .field("is_rx_task_set", &self.is_rx_task_set())
+ .field("is_tx_task_set", &self.is_tx_task_set())
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/rwlock.rs b/third_party/rust/tokio/src/sync/rwlock.rs
new file mode 100644
index 0000000000..68cf710e84
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/rwlock.rs
@@ -0,0 +1,294 @@
+use crate::coop::CoopFutureExt;
+use crate::sync::batch_semaphore::{AcquireError, Semaphore};
+use std::cell::UnsafeCell;
+use std::ops;
+
+#[cfg(not(loom))]
+const MAX_READS: usize = 32;
+
+#[cfg(loom)]
+const MAX_READS: usize = 10;
+
+/// An asynchronous reader-writer lock
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// In comparison, a [`Mutex`] does not distinguish between readers or writers
+/// that acquire the lock, therefore causing any tasks waiting for the lock to
+/// become available to yield. An `RwLock` will allow any number of readers to
+/// acquire the lock as long as a writer is not holding the lock.
+///
+/// The priority policy of Tokio's read-write lock is _fair_ (or
+/// [_write-preferring_]), in order to ensure that readers cannot starve
+/// writers. Fairness is ensured using a first-in, first-out queue for the tasks
+/// awaiting the lock; if a task that wishes to acquire the write lock is at the
+/// head of the queue, read locks will not be given out until the write lock has
+/// been released. This is in contrast to the Rust standard library's
+/// `std::sync::RwLock`, where the priority policy is dependent on the
+/// operating system's implementation.
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards
+/// returned from the locking methods implement [`Deref`](https://doc.rust-lang.org/std/ops/trait.Deref.html)
+/// (and [`DerefMut`](https://doc.rust-lang.org/std/ops/trait.DerefMut.html)
+/// for the `write` methods) to allow access to the content of the lock.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::RwLock;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let lock = RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+/// let r1 = lock.read().await;
+/// let r2 = lock.read().await;
+/// assert_eq!(*r1, 5);
+/// assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+/// let mut w = lock.write().await;
+/// *w += 1;
+/// assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// }
+/// ```
+///
+/// [`Mutex`]: struct@super::Mutex
+/// [`RwLock`]: struct@RwLock
+/// [`RwLockReadGuard`]: struct@RwLockReadGuard
+/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard
+/// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html
+/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
+#[derive(Debug)]
+pub struct RwLock<T> {
+ //semaphore to coordinate read and write access to T
+ s: Semaphore,
+
+ //inner data T
+ c: UnsafeCell<T>,
+}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] method on
+/// [`RwLock`].
+///
+/// [`read`]: method@RwLock::read
+#[derive(Debug)]
+pub struct RwLockReadGuard<'a, T> {
+ permit: ReleasingPermit<'a, T>,
+ lock: &'a RwLock<T>,
+}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] and method
+/// on [`RwLock`].
+///
+/// [`write`]: method@RwLock::write
+/// [`RwLock`]: struct@RwLock
+#[derive(Debug)]
+pub struct RwLockWriteGuard<'a, T> {
+ permit: ReleasingPermit<'a, T>,
+ lock: &'a RwLock<T>,
+}
+
+// Wrapper arround Permit that releases on Drop
+#[derive(Debug)]
+struct ReleasingPermit<'a, T> {
+ num_permits: u16,
+ lock: &'a RwLock<T>,
+}
+
+impl<'a, T> ReleasingPermit<'a, T> {
+ async fn acquire(
+ lock: &'a RwLock<T>,
+ num_permits: u16,
+ ) -> Result<ReleasingPermit<'a, T>, AcquireError> {
+ lock.s.acquire(num_permits).cooperate().await?;
+ Ok(Self { num_permits, lock })
+ }
+}
+
+impl<'a, T> Drop for ReleasingPermit<'a, T> {
+ fn drop(&mut self) {
+ self.lock.s.release(self.num_permits as usize);
+ }
+}
+
+#[test]
+#[cfg(not(loom))]
+fn bounds() {
+ fn check_send<T: Send>() {}
+ fn check_sync<T: Sync>() {}
+ fn check_unpin<T: Unpin>() {}
+ // This has to take a value, since the async fn's return type is unnameable.
+ fn check_send_sync_val<T: Send + Sync>(_t: T) {}
+
+ check_send::<RwLock<u32>>();
+ check_sync::<RwLock<u32>>();
+ check_unpin::<RwLock<u32>>();
+
+ check_sync::<RwLockReadGuard<'_, u32>>();
+ check_unpin::<RwLockReadGuard<'_, u32>>();
+
+ check_sync::<RwLockWriteGuard<'_, u32>>();
+ check_unpin::<RwLockWriteGuard<'_, u32>>();
+
+ let rwlock = RwLock::new(0);
+ check_send_sync_val(rwlock.read());
+ check_send_sync_val(rwlock.write());
+}
+
+// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
+// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through
+// RwLock<T>.
+unsafe impl<T> Send for RwLock<T> where T: Send {}
+unsafe impl<T> Sync for RwLock<T> where T: Send + Sync {}
+unsafe impl<'a, T> Sync for RwLockReadGuard<'a, T> where T: Send + Sync {}
+unsafe impl<'a, T> Sync for RwLockWriteGuard<'a, T> where T: Send + Sync {}
+
+impl<T> RwLock<T> {
+ /// Creates a new instance of an `RwLock<T>` which is unlocked.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::RwLock;
+ ///
+ /// let lock = RwLock::new(5);
+ /// ```
+ pub fn new(value: T) -> RwLock<T> {
+ RwLock {
+ c: UnsafeCell::new(value),
+ s: Semaphore::new(MAX_READS),
+ }
+ }
+
+ /// Locks this rwlock with shared read access, causing the current task
+ /// to yield until the lock has been acquired.
+ ///
+ /// The calling task will yield until there are no more writers which
+ /// hold the lock. There may be other readers currently inside the lock when
+ /// this method returns.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ /// let c_lock = lock.clone();
+ ///
+ /// let n = lock.read().await;
+ /// assert_eq!(*n, 1);
+ ///
+ /// tokio::spawn(async move {
+ /// // While main has an active read lock, we acquire one too.
+ /// let r = c_lock.read().await;
+ /// assert_eq!(*r, 1);
+ /// }).await.expect("The spawned task has paniced");
+ ///
+ /// // Drop the guard after the spawned task finishes.
+ /// drop(n);
+ ///}
+ /// ```
+ pub async fn read(&self) -> RwLockReadGuard<'_, T> {
+ let permit = ReleasingPermit::acquire(self, 1).await.unwrap_or_else(|_| {
+ // The semaphore was closed. but, we never explicitly close it, and we have a
+ // handle to it through the Arc, which means that this can never happen.
+ unreachable!()
+ });
+ RwLockReadGuard { lock: self, permit }
+ }
+
+ /// Locks this rwlock with exclusive write access, causing the current task
+ /// to yield until the lock has been acquired.
+ ///
+ /// This function will not return while other writers or other readers
+ /// currently have access to the lock.
+ ///
+ /// Returns an RAII guard which will drop the write access of this rwlock
+ /// when dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let lock = RwLock::new(1);
+ ///
+ /// let mut n = lock.write().await;
+ /// *n = 2;
+ ///}
+ /// ```
+ pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
+ let permit = ReleasingPermit::acquire(self, MAX_READS as u16)
+ .await
+ .unwrap_or_else(|_| {
+ // The semaphore was closed. but, we never explicitly close it, and we have a
+ // handle to it through the Arc, which means that this can never happen.
+ unreachable!()
+ });
+
+ RwLockWriteGuard { lock: self, permit }
+ }
+
+ /// Consumes the lock, returning the underlying data.
+ pub fn into_inner(self) -> T {
+ self.c.into_inner()
+ }
+}
+
+impl<T> ops::Deref for RwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.lock.c.get() }
+ }
+}
+
+impl<T> ops::Deref for RwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.lock.c.get() }
+ }
+}
+
+impl<T> ops::DerefMut for RwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.lock.c.get() }
+ }
+}
+
+impl<T> From<T> for RwLock<T> {
+ fn from(s: T) -> Self {
+ Self::new(s)
+ }
+}
+
+impl<T> Default for RwLock<T>
+where
+ T: Default,
+{
+ fn default() -> Self {
+ Self::new(T::default())
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/semaphore.rs b/third_party/rust/tokio/src/sync/semaphore.rs
new file mode 100644
index 0000000000..4cce7e8f5b
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/semaphore.rs
@@ -0,0 +1,105 @@
+use super::batch_semaphore as ll; // low level implementation
+use crate::coop::CoopFutureExt;
+
+/// Counting semaphore performing asynchronous permit aquisition.
+///
+/// A semaphore maintains a set of permits. Permits are used to synchronize
+/// access to a shared resource. A semaphore differs from a mutex in that it
+/// can allow more than one concurrent caller to access the shared resource at a
+/// time.
+///
+/// When `acquire` is called and the semaphore has remaining permits, the
+/// function immediately returns a permit. However, if no remaining permits are
+/// available, `acquire` (asynchronously) waits until an outstanding permit is
+/// dropped. At this point, the freed permit is assigned to the caller.
+#[derive(Debug)]
+pub struct Semaphore {
+ /// The low level semaphore
+ ll_sem: ll::Semaphore,
+}
+
+/// A permit from the semaphore
+#[must_use]
+#[derive(Debug)]
+pub struct SemaphorePermit<'a> {
+ sem: &'a Semaphore,
+ permits: u16,
+}
+
+/// Error returned from the [`Semaphore::try_acquire`] function.
+///
+/// A `try_acquire` operation can only fail if the semaphore has no available
+/// permits.
+///
+/// [`Semaphore::try_acquire`]: Semaphore::try_acquire
+#[derive(Debug)]
+pub struct TryAcquireError(());
+
+#[test]
+#[cfg(not(loom))]
+fn bounds() {
+ fn check_unpin<T: Unpin>() {}
+ // This has to take a value, since the async fn's return type is unnameable.
+ fn check_send_sync_val<T: Send + Sync>(_t: T) {}
+ fn check_send_sync<T: Send + Sync>() {}
+ check_unpin::<Semaphore>();
+ check_unpin::<SemaphorePermit<'_>>();
+ check_send_sync::<Semaphore>();
+
+ let semaphore = Semaphore::new(0);
+ check_send_sync_val(semaphore.acquire());
+}
+
+impl Semaphore {
+ /// Creates a new semaphore with the initial number of permits
+ pub fn new(permits: usize) -> Self {
+ Self {
+ ll_sem: ll::Semaphore::new(permits),
+ }
+ }
+
+ /// Returns the current number of available permits
+ pub fn available_permits(&self) -> usize {
+ self.ll_sem.available_permits()
+ }
+
+ /// Adds `n` new permits to the semaphore.
+ pub fn add_permits(&self, n: usize) {
+ self.ll_sem.release(n);
+ }
+
+ /// Acquires permit from the semaphore
+ pub async fn acquire(&self) -> SemaphorePermit<'_> {
+ self.ll_sem.acquire(1).cooperate().await.unwrap();
+ SemaphorePermit {
+ sem: &self,
+ permits: 1,
+ }
+ }
+
+ /// Tries to acquire a permit form the semaphore
+ pub fn try_acquire(&self) -> Result<SemaphorePermit<'_>, TryAcquireError> {
+ match self.ll_sem.try_acquire(1) {
+ Ok(_) => Ok(SemaphorePermit {
+ sem: self,
+ permits: 1,
+ }),
+ Err(_) => Err(TryAcquireError(())),
+ }
+ }
+}
+
+impl<'a> SemaphorePermit<'a> {
+ /// Forgets the permit **without** releasing it back to the semaphore.
+ /// This can be used to reduce the amount of permits available from a
+ /// semaphore.
+ pub fn forget(mut self) {
+ self.permits = 0;
+ }
+}
+
+impl<'a> Drop for SemaphorePermit<'_> {
+ fn drop(&mut self) {
+ self.sem.add_permits(self.permits as usize);
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/semaphore_ll.rs b/third_party/rust/tokio/src/sync/semaphore_ll.rs
new file mode 100644
index 0000000000..0bdc4e2761
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/semaphore_ll.rs
@@ -0,0 +1,1220 @@
+#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
+
+//! Thread-safe, asynchronous counting semaphore.
+//!
+//! A `Semaphore` instance holds a set of permits. Permits are used to
+//! synchronize access to a shared resource.
+//!
+//! Before accessing the shared resource, callers acquire a permit from the
+//! semaphore. Once the permit is acquired, the caller then enters the critical
+//! section. If no permits are available, then acquiring the semaphore returns
+//! `Pending`. The task is woken once a permit becomes available.
+
+use crate::loom::cell::UnsafeCell;
+use crate::loom::future::AtomicWaker;
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize};
+use crate::loom::thread;
+
+use std::cmp;
+use std::fmt;
+use std::ptr::{self, NonNull};
+use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed, Release};
+use std::task::Poll::{Pending, Ready};
+use std::task::{Context, Poll};
+use std::usize;
+
+/// Futures-aware semaphore.
+pub(crate) struct Semaphore {
+ /// Tracks both the waiter queue tail pointer and the number of remaining
+ /// permits.
+ state: AtomicUsize,
+
+ /// waiter queue head pointer.
+ head: UnsafeCell<NonNull<Waiter>>,
+
+ /// Coordinates access to the queue head.
+ rx_lock: AtomicUsize,
+
+ /// Stub waiter node used as part of the MPSC channel algorithm.
+ stub: Box<Waiter>,
+}
+
+/// A semaphore permit
+///
+/// Tracks the lifecycle of a semaphore permit.
+///
+/// An instance of `Permit` is intended to be used with a **single** instance of
+/// `Semaphore`. Using a single instance of `Permit` with multiple semaphore
+/// instances will result in unexpected behavior.
+///
+/// `Permit` does **not** release the permit back to the semaphore on drop. It
+/// is the user's responsibility to ensure that `Permit::release` is called
+/// before dropping the permit.
+#[derive(Debug)]
+pub(crate) struct Permit {
+ waiter: Option<Box<Waiter>>,
+ state: PermitState,
+}
+
+/// Error returned by `Permit::poll_acquire`.
+#[derive(Debug)]
+pub(crate) struct AcquireError(());
+
+/// Error returned by `Permit::try_acquire`.
+#[derive(Debug)]
+pub(crate) enum TryAcquireError {
+ Closed,
+ NoPermits,
+}
+
+/// Node used to notify the semaphore waiter when permit is available.
+#[derive(Debug)]
+struct Waiter {
+ /// Stores waiter state.
+ ///
+ /// See `WaiterState` for more details.
+ state: AtomicUsize,
+
+ /// Task to wake when a permit is made available.
+ waker: AtomicWaker,
+
+ /// Next pointer in the queue of waiting senders.
+ next: AtomicPtr<Waiter>,
+}
+
+/// Semaphore state
+///
+/// The 2 low bits track the modes.
+///
+/// - Closed
+/// - Full
+///
+/// When not full, the rest of the `usize` tracks the total number of messages
+/// in the channel. When full, the rest of the `usize` is a pointer to the tail
+/// of the "waiting senders" queue.
+#[derive(Copy, Clone)]
+struct SemState(usize);
+
+/// Permit state
+#[derive(Debug, Copy, Clone)]
+enum PermitState {
+ /// Currently waiting for permits to be made available and assigned to the
+ /// waiter.
+ Waiting(u16),
+
+ /// The number of acquired permits
+ Acquired(u16),
+}
+
+/// State for an individual waker node
+#[derive(Debug, Copy, Clone)]
+struct WaiterState(usize);
+
+/// Waiter node is in the semaphore queue
+const QUEUED: usize = 0b001;
+
+/// Semaphore has been closed, no more permits will be issued.
+const CLOSED: usize = 0b10;
+
+/// The permit that owns the `Waiter` dropped.
+const DROPPED: usize = 0b100;
+
+/// Represents "one requested permit" in the waiter state
+const PERMIT_ONE: usize = 0b1000;
+
+/// Masks the waiter state to only contain bits tracking number of requested
+/// permits.
+const PERMIT_MASK: usize = usize::MAX - (PERMIT_ONE - 1);
+
+/// How much to shift a permit count to pack it into the waker state
+const PERMIT_SHIFT: u32 = PERMIT_ONE.trailing_zeros();
+
+/// Flag differentiating between available permits and waiter pointers.
+///
+/// If we assume pointers are properly aligned, then the least significant bit
+/// will always be zero. So, we use that bit to track if the value represents a
+/// number.
+const NUM_FLAG: usize = 0b01;
+
+/// Signal the semaphore is closed
+const CLOSED_FLAG: usize = 0b10;
+
+/// Maximum number of permits a semaphore can manage
+const MAX_PERMITS: usize = usize::MAX >> NUM_SHIFT;
+
+/// When representing "numbers", the state has to be shifted this much (to get
+/// rid of the flag bit).
+const NUM_SHIFT: usize = 2;
+
+// ===== impl Semaphore =====
+
+impl Semaphore {
+ /// Creates a new semaphore with the initial number of permits
+ ///
+ /// # Panics
+ ///
+ /// Panics if `permits` is zero.
+ pub(crate) fn new(permits: usize) -> Semaphore {
+ let stub = Box::new(Waiter::new());
+ let ptr = NonNull::from(&*stub);
+
+ // Allocations are aligned
+ debug_assert!(ptr.as_ptr() as usize & NUM_FLAG == 0);
+
+ let state = SemState::new(permits, &stub);
+
+ Semaphore {
+ state: AtomicUsize::new(state.to_usize()),
+ head: UnsafeCell::new(ptr),
+ rx_lock: AtomicUsize::new(0),
+ stub,
+ }
+ }
+
+ /// Returns the current number of available permits
+ pub(crate) fn available_permits(&self) -> usize {
+ let curr = SemState(self.state.load(Acquire));
+ curr.available_permits()
+ }
+
+ /// Tries to acquire the requested number of permits, registering the waiter
+ /// if not enough permits are available.
+ fn poll_acquire(
+ &self,
+ cx: &mut Context<'_>,
+ num_permits: u16,
+ permit: &mut Permit,
+ ) -> Poll<Result<(), AcquireError>> {
+ self.poll_acquire2(num_permits, || {
+ let waiter = permit.waiter.get_or_insert_with(|| Box::new(Waiter::new()));
+
+ waiter.waker.register_by_ref(cx.waker());
+
+ Some(NonNull::from(&**waiter))
+ })
+ }
+
+ fn try_acquire(&self, num_permits: u16) -> Result<(), TryAcquireError> {
+ match self.poll_acquire2(num_permits, || None) {
+ Poll::Ready(res) => res.map_err(to_try_acquire),
+ Poll::Pending => Err(TryAcquireError::NoPermits),
+ }
+ }
+
+ /// Polls for a permit
+ ///
+ /// Tries to acquire available permits first. If unable to acquire a
+ /// sufficient number of permits, the caller's waiter is pushed onto the
+ /// semaphore's wait queue.
+ fn poll_acquire2<F>(
+ &self,
+ num_permits: u16,
+ mut get_waiter: F,
+ ) -> Poll<Result<(), AcquireError>>
+ where
+ F: FnMut() -> Option<NonNull<Waiter>>,
+ {
+ let num_permits = num_permits as usize;
+
+ // Load the current state
+ let mut curr = SemState(self.state.load(Acquire));
+
+ // Saves a ref to the waiter node
+ let mut maybe_waiter: Option<NonNull<Waiter>> = None;
+
+ /// Used in branches where we attempt to push the waiter into the wait
+ /// queue but fail due to permits becoming available or the wait queue
+ /// transitioning to "closed". In this case, the waiter must be
+ /// transitioned back to the "idle" state.
+ macro_rules! revert_to_idle {
+ () => {
+ if let Some(waiter) = maybe_waiter {
+ unsafe { waiter.as_ref() }.revert_to_idle();
+ }
+ };
+ }
+
+ loop {
+ let mut next = curr;
+
+ if curr.is_closed() {
+ revert_to_idle!();
+ return Ready(Err(AcquireError::closed()));
+ }
+
+ let acquired = next.acquire_permits(num_permits, &self.stub);
+
+ if !acquired {
+ // There are not enough available permits to satisfy the
+ // request. The permit transitions to a waiting state.
+ debug_assert!(curr.waiter().is_some() || curr.available_permits() < num_permits);
+
+ if let Some(waiter) = maybe_waiter.as_ref() {
+ // Safety: the caller owns the waiter.
+ let w = unsafe { waiter.as_ref() };
+ w.set_permits_to_acquire(num_permits - curr.available_permits());
+ } else {
+ // Get the waiter for the permit.
+ if let Some(waiter) = get_waiter() {
+ // Safety: the caller owns the waiter.
+ let w = unsafe { waiter.as_ref() };
+
+ // If there are any currently available permits, the
+ // waiter acquires those immediately and waits for the
+ // remaining permits to become available.
+ if !w.to_queued(num_permits - curr.available_permits()) {
+ // The node is alrady queued, there is no further work
+ // to do.
+ return Pending;
+ }
+
+ maybe_waiter = Some(waiter);
+ } else {
+ // No waiter, this indicates the caller does not wish to
+ // "wait", so there is nothing left to do.
+ return Pending;
+ }
+ }
+
+ next.set_waiter(maybe_waiter.unwrap());
+ }
+
+ debug_assert_ne!(curr.0, 0);
+ debug_assert_ne!(next.0, 0);
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => {
+ if acquired {
+ // Successfully acquire permits **without** queuing the
+ // waiter node. The waiter node is not currently in the
+ // queue.
+ revert_to_idle!();
+ return Ready(Ok(()));
+ } else {
+ // The node is pushed into the queue, the final step is
+ // to set the node's "next" pointer to return the wait
+ // queue into a consistent state.
+
+ let prev_waiter =
+ curr.waiter().unwrap_or_else(|| NonNull::from(&*self.stub));
+
+ let waiter = maybe_waiter.unwrap();
+
+ // Link the nodes.
+ //
+ // Safety: the mpsc algorithm guarantees the old tail of
+ // the queue is not removed from the queue during the
+ // push process.
+ unsafe {
+ prev_waiter.as_ref().store_next(waiter);
+ }
+
+ return Pending;
+ }
+ }
+ Err(actual) => {
+ curr = SemState(actual);
+ }
+ }
+ }
+ }
+
+ /// Closes the semaphore. This prevents the semaphore from issuing new
+ /// permits and notifies all pending waiters.
+ pub(crate) fn close(&self) {
+ // Acquire the `rx_lock`, setting the "closed" flag on the lock.
+ let prev = self.rx_lock.fetch_or(1, AcqRel);
+
+ if prev != 0 {
+ // Another thread has the lock and will be responsible for notifying
+ // pending waiters.
+ return;
+ }
+
+ self.add_permits_locked(0, true);
+ }
+
+ /// Adds `n` new permits to the semaphore.
+ pub(crate) fn add_permits(&self, n: usize) {
+ if n == 0 {
+ return;
+ }
+
+ // TODO: Handle overflow. A panic is not sufficient, the process must
+ // abort.
+ let prev = self.rx_lock.fetch_add(n << 1, AcqRel);
+
+ if prev != 0 {
+ // Another thread has the lock and will be responsible for notifying
+ // pending waiters.
+ return;
+ }
+
+ self.add_permits_locked(n, false);
+ }
+
+ fn add_permits_locked(&self, mut rem: usize, mut closed: bool) {
+ while rem > 0 || closed {
+ if closed {
+ SemState::fetch_set_closed(&self.state, AcqRel);
+ }
+
+ // Release the permits and notify
+ self.add_permits_locked2(rem, closed);
+
+ let n = rem << 1;
+
+ let actual = if closed {
+ let actual = self.rx_lock.fetch_sub(n | 1, AcqRel);
+ closed = false;
+ actual
+ } else {
+ let actual = self.rx_lock.fetch_sub(n, AcqRel);
+ closed = actual & 1 == 1;
+ actual
+ };
+
+ rem = (actual >> 1) - rem;
+ }
+ }
+
+ /// Releases a specific amount of permits to the semaphore
+ ///
+ /// This function is called by `add_permits` after the add lock has been
+ /// acquired.
+ fn add_permits_locked2(&self, mut n: usize, closed: bool) {
+ // If closing the semaphore, we want to drain the entire queue. The
+ // number of permits being assigned doesn't matter.
+ if closed {
+ n = usize::MAX;
+ }
+
+ 'outer: while n > 0 {
+ unsafe {
+ let mut head = self.head.with(|head| *head);
+ let mut next_ptr = head.as_ref().next.load(Acquire);
+
+ let stub = self.stub();
+
+ if head == stub {
+ // The stub node indicates an empty queue. Any remaining
+ // permits get assigned back to the semaphore.
+ let next = match NonNull::new(next_ptr) {
+ Some(next) => next,
+ None => {
+ // This loop is not part of the standard intrusive mpsc
+ // channel algorithm. This is where we atomically pop
+ // the last task and add `n` to the remaining capacity.
+ //
+ // This modification to the pop algorithm works because,
+ // at this point, we have not done any work (only done
+ // reading). We have a *pretty* good idea that there is
+ // no concurrent pusher.
+ //
+ // The capacity is then atomically added by doing an
+ // AcqRel CAS on `state`. The `state` cell is the
+ // linchpin of the algorithm.
+ //
+ // By successfully CASing `head` w/ AcqRel, we ensure
+ // that, if any thread was racing and entered a push, we
+ // see that and abort pop, retrying as it is
+ // "inconsistent".
+ let mut curr = SemState::load(&self.state, Acquire);
+
+ loop {
+ if curr.has_waiter(&self.stub) {
+ // A waiter is being added concurrently.
+ // This is the MPSC queue's "inconsistent"
+ // state and we must loop and try again.
+ thread::yield_now();
+ continue 'outer;
+ }
+
+ // If closing, nothing more to do.
+ if closed {
+ debug_assert!(curr.is_closed(), "state = {:?}", curr);
+ return;
+ }
+
+ let mut next = curr;
+ next.release_permits(n, &self.stub);
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => return,
+ Err(actual) => {
+ curr = SemState(actual);
+ }
+ }
+ }
+ }
+ };
+
+ self.head.with_mut(|head| *head = next);
+ head = next;
+ next_ptr = next.as_ref().next.load(Acquire);
+ }
+
+ // `head` points to a waiter assign permits to the waiter. If
+ // all requested permits are satisfied, then we can continue,
+ // otherwise the node stays in the wait queue.
+ if !head.as_ref().assign_permits(&mut n, closed) {
+ assert_eq!(n, 0);
+ return;
+ }
+
+ if let Some(next) = NonNull::new(next_ptr) {
+ self.head.with_mut(|head| *head = next);
+
+ self.remove_queued(head, closed);
+ continue 'outer;
+ }
+
+ let state = SemState::load(&self.state, Acquire);
+
+ // This must always be a pointer as the wait list is not empty.
+ let tail = state.waiter().unwrap();
+
+ if tail != head {
+ // Inconsistent
+ thread::yield_now();
+ continue 'outer;
+ }
+
+ self.push_stub(closed);
+
+ next_ptr = head.as_ref().next.load(Acquire);
+
+ if let Some(next) = NonNull::new(next_ptr) {
+ self.head.with_mut(|head| *head = next);
+
+ self.remove_queued(head, closed);
+ continue 'outer;
+ }
+
+ // Inconsistent state, loop
+ thread::yield_now();
+ }
+ }
+ }
+
+ /// The wait node has had all of its permits assigned and has been removed
+ /// from the wait queue.
+ ///
+ /// Attempt to remove the QUEUED bit from the node. If additional permits
+ /// are concurrently requested, the node must be pushed back into the wait
+ /// queued.
+ fn remove_queued(&self, waiter: NonNull<Waiter>, closed: bool) {
+ let mut curr = WaiterState(unsafe { waiter.as_ref() }.state.load(Acquire));
+
+ loop {
+ if curr.is_dropped() {
+ // The Permit dropped, it is on us to release the memory
+ let _ = unsafe { Box::from_raw(waiter.as_ptr()) };
+ return;
+ }
+
+ // The node is removed from the queue. We attempt to unset the
+ // queued bit, but concurrently the waiter has requested more
+ // permits. When the waiter requested more permits, it saw the
+ // queued bit set so took no further action. This requires us to
+ // push the node back into the queue.
+ if curr.permits_to_acquire() > 0 {
+ // More permits are requested. The waiter must be re-queued
+ unsafe {
+ self.push_waiter(waiter, closed);
+ }
+ return;
+ }
+
+ let mut next = curr;
+ next.unset_queued();
+
+ let w = unsafe { waiter.as_ref() };
+
+ match w.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => return,
+ Err(actual) => {
+ curr = WaiterState(actual);
+ }
+ }
+ }
+ }
+
+ unsafe fn push_stub(&self, closed: bool) {
+ self.push_waiter(self.stub(), closed);
+ }
+
+ unsafe fn push_waiter(&self, waiter: NonNull<Waiter>, closed: bool) {
+ // Set the next pointer. This does not require an atomic operation as
+ // this node is not accessible. The write will be flushed with the next
+ // operation
+ waiter.as_ref().next.store(ptr::null_mut(), Relaxed);
+
+ // Update the tail to point to the new node. We need to see the previous
+ // node in order to update the next pointer as well as release `task`
+ // to any other threads calling `push`.
+ let next = SemState::new_ptr(waiter, closed);
+ let prev = SemState(self.state.swap(next.0, AcqRel));
+
+ debug_assert_eq!(closed, prev.is_closed());
+
+ // This function is only called when there are pending tasks. Because of
+ // this, the state must *always* be in pointer mode.
+ let prev = prev.waiter().unwrap();
+
+ // No cycles plz
+ debug_assert_ne!(prev, waiter);
+
+ // Release `task` to the consume end.
+ prev.as_ref().next.store(waiter.as_ptr(), Release);
+ }
+
+ fn stub(&self) -> NonNull<Waiter> {
+ unsafe { NonNull::new_unchecked(&*self.stub as *const _ as *mut _) }
+ }
+}
+
+impl Drop for Semaphore {
+ fn drop(&mut self) {
+ self.close();
+ }
+}
+
+impl fmt::Debug for Semaphore {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Semaphore")
+ .field("state", &SemState::load(&self.state, Relaxed))
+ .field("head", &self.head.with(|ptr| ptr))
+ .field("rx_lock", &self.rx_lock.load(Relaxed))
+ .field("stub", &self.stub)
+ .finish()
+ }
+}
+
+unsafe impl Send for Semaphore {}
+unsafe impl Sync for Semaphore {}
+
+// ===== impl Permit =====
+
+impl Permit {
+ /// Creates a new `Permit`.
+ ///
+ /// The permit begins in the "unacquired" state.
+ pub(crate) fn new() -> Permit {
+ use PermitState::Acquired;
+
+ Permit {
+ waiter: None,
+ state: Acquired(0),
+ }
+ }
+
+ /// Returns `true` if the permit has been acquired
+ #[allow(dead_code)] // may be used later
+ pub(crate) fn is_acquired(&self) -> bool {
+ match self.state {
+ PermitState::Acquired(num) if num > 0 => true,
+ _ => false,
+ }
+ }
+
+ /// Tries to acquire the permit. If no permits are available, the current task
+ /// is notified once a new permit becomes available.
+ pub(crate) fn poll_acquire(
+ &mut self,
+ cx: &mut Context<'_>,
+ num_permits: u16,
+ semaphore: &Semaphore,
+ ) -> Poll<Result<(), AcquireError>> {
+ use std::cmp::Ordering::*;
+ use PermitState::*;
+
+ match self.state {
+ Waiting(requested) => {
+ // There must be a waiter
+ let waiter = self.waiter.as_ref().unwrap();
+
+ match requested.cmp(&num_permits) {
+ Less => {
+ let delta = num_permits - requested;
+
+ // Request additional permits. If the waiter has been
+ // dequeued, it must be re-queued.
+ if !waiter.try_inc_permits_to_acquire(delta as usize) {
+ let waiter = NonNull::from(&**waiter);
+
+ // Ignore the result. The check for
+ // `permits_to_acquire()` will converge the state as
+ // needed
+ let _ = semaphore.poll_acquire2(delta, || Some(waiter))?;
+ }
+
+ self.state = Waiting(num_permits);
+ }
+ Greater => {
+ let delta = requested - num_permits;
+ let to_release = waiter.try_dec_permits_to_acquire(delta as usize);
+
+ semaphore.add_permits(to_release);
+ self.state = Waiting(num_permits);
+ }
+ Equal => {}
+ }
+
+ if waiter.permits_to_acquire()? == 0 {
+ self.state = Acquired(requested);
+ return Ready(Ok(()));
+ }
+
+ waiter.waker.register_by_ref(cx.waker());
+
+ if waiter.permits_to_acquire()? == 0 {
+ self.state = Acquired(requested);
+ return Ready(Ok(()));
+ }
+
+ Pending
+ }
+ Acquired(acquired) => {
+ if acquired >= num_permits {
+ Ready(Ok(()))
+ } else {
+ match semaphore.poll_acquire(cx, num_permits - acquired, self)? {
+ Ready(()) => {
+ self.state = Acquired(num_permits);
+ Ready(Ok(()))
+ }
+ Pending => {
+ self.state = Waiting(num_permits);
+ Pending
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Tries to acquire the permit.
+ pub(crate) fn try_acquire(
+ &mut self,
+ num_permits: u16,
+ semaphore: &Semaphore,
+ ) -> Result<(), TryAcquireError> {
+ use PermitState::*;
+
+ match self.state {
+ Waiting(requested) => {
+ // There must be a waiter
+ let waiter = self.waiter.as_ref().unwrap();
+
+ if requested > num_permits {
+ let delta = requested - num_permits;
+ let to_release = waiter.try_dec_permits_to_acquire(delta as usize);
+
+ semaphore.add_permits(to_release);
+ self.state = Waiting(num_permits);
+ }
+
+ let res = waiter.permits_to_acquire().map_err(to_try_acquire)?;
+
+ if res == 0 {
+ if requested < num_permits {
+ // Try to acquire the additional permits
+ semaphore.try_acquire(num_permits - requested)?;
+ }
+
+ self.state = Acquired(num_permits);
+ Ok(())
+ } else {
+ Err(TryAcquireError::NoPermits)
+ }
+ }
+ Acquired(acquired) => {
+ if acquired < num_permits {
+ semaphore.try_acquire(num_permits - acquired)?;
+ self.state = Acquired(num_permits);
+ }
+
+ Ok(())
+ }
+ }
+ }
+
+ /// Releases a permit back to the semaphore
+ pub(crate) fn release(&mut self, n: u16, semaphore: &Semaphore) {
+ let n = self.forget(n);
+ semaphore.add_permits(n as usize);
+ }
+
+ /// Forgets the permit **without** releasing it back to the semaphore.
+ ///
+ /// After calling `forget`, `poll_acquire` is able to acquire new permit
+ /// from the sempahore.
+ ///
+ /// Repeatedly calling `forget` without associated calls to `add_permit`
+ /// will result in the semaphore losing all permits.
+ ///
+ /// Will forget **at most** the number of acquired permits. This number is
+ /// returned.
+ pub(crate) fn forget(&mut self, n: u16) -> u16 {
+ use PermitState::*;
+
+ match self.state {
+ Waiting(requested) => {
+ let n = cmp::min(n, requested);
+
+ // Decrement
+ let acquired = self
+ .waiter
+ .as_ref()
+ .unwrap()
+ .try_dec_permits_to_acquire(n as usize) as u16;
+
+ if n == requested {
+ self.state = Acquired(0);
+ } else if acquired == requested - n {
+ self.state = Waiting(acquired);
+ } else {
+ self.state = Waiting(requested - n);
+ }
+
+ acquired
+ }
+ Acquired(acquired) => {
+ let n = cmp::min(n, acquired);
+ self.state = Acquired(acquired - n);
+ n
+ }
+ }
+ }
+}
+
+impl Default for Permit {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Drop for Permit {
+ fn drop(&mut self) {
+ if let Some(waiter) = self.waiter.take() {
+ // Set the dropped flag
+ let state = WaiterState(waiter.state.fetch_or(DROPPED, AcqRel));
+
+ if state.is_queued() {
+ // The waiter is stored in the queue. The semaphore will drop it
+ std::mem::forget(waiter);
+ }
+ }
+ }
+}
+
+// ===== impl AcquireError ====
+
+impl AcquireError {
+ fn closed() -> AcquireError {
+ AcquireError(())
+ }
+}
+
+fn to_try_acquire(_: AcquireError) -> TryAcquireError {
+ TryAcquireError::Closed
+}
+
+impl fmt::Display for AcquireError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "semaphore closed")
+ }
+}
+
+impl std::error::Error for AcquireError {}
+
+// ===== impl TryAcquireError =====
+
+impl TryAcquireError {
+ /// Returns `true` if the error was caused by a closed semaphore.
+ pub(crate) fn is_closed(&self) -> bool {
+ match self {
+ TryAcquireError::Closed => true,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if the error was caused by calling `try_acquire` on a
+ /// semaphore with no available permits.
+ pub(crate) fn is_no_permits(&self) -> bool {
+ match self {
+ TryAcquireError::NoPermits => true,
+ _ => false,
+ }
+ }
+}
+
+impl fmt::Display for TryAcquireError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ TryAcquireError::Closed => write!(fmt, "{}", "semaphore closed"),
+ TryAcquireError::NoPermits => write!(fmt, "{}", "no permits available"),
+ }
+ }
+}
+
+impl std::error::Error for TryAcquireError {}
+
+// ===== impl Waiter =====
+
+impl Waiter {
+ fn new() -> Waiter {
+ Waiter {
+ state: AtomicUsize::new(0),
+ waker: AtomicWaker::new(),
+ next: AtomicPtr::new(ptr::null_mut()),
+ }
+ }
+
+ fn permits_to_acquire(&self) -> Result<usize, AcquireError> {
+ let state = WaiterState(self.state.load(Acquire));
+
+ if state.is_closed() {
+ Err(AcquireError(()))
+ } else {
+ Ok(state.permits_to_acquire())
+ }
+ }
+
+ /// Only increments the number of permits *if* the waiter is currently
+ /// queued.
+ ///
+ /// # Returns
+ ///
+ /// `true` if the number of permits to acquire has been incremented. `false`
+ /// otherwise. On `false`, the caller should use `Semaphore::poll_acquire`.
+ fn try_inc_permits_to_acquire(&self, n: usize) -> bool {
+ let mut curr = WaiterState(self.state.load(Acquire));
+
+ loop {
+ if !curr.is_queued() {
+ assert_eq!(0, curr.permits_to_acquire());
+ return false;
+ }
+
+ let mut next = curr;
+ next.set_permits_to_acquire(n + curr.permits_to_acquire());
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => return true,
+ Err(actual) => curr = WaiterState(actual),
+ }
+ }
+ }
+
+ /// Try to decrement the number of permits to acquire. This returns the
+ /// actual number of permits that were decremented. The delta betweeen `n`
+ /// and the return has been assigned to the permit and the caller must
+ /// assign these back to the semaphore.
+ fn try_dec_permits_to_acquire(&self, n: usize) -> usize {
+ let mut curr = WaiterState(self.state.load(Acquire));
+
+ loop {
+ if !curr.is_queued() {
+ assert_eq!(0, curr.permits_to_acquire());
+ }
+
+ let delta = cmp::min(n, curr.permits_to_acquire());
+ let rem = curr.permits_to_acquire() - delta;
+
+ let mut next = curr;
+ next.set_permits_to_acquire(rem);
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => return n - delta,
+ Err(actual) => curr = WaiterState(actual),
+ }
+ }
+ }
+
+ /// Store the number of remaining permits needed to satisfy the waiter and
+ /// transition to the "QUEUED" state.
+ ///
+ /// # Returns
+ ///
+ /// `true` if the `QUEUED` bit was set as part of the transition.
+ fn to_queued(&self, num_permits: usize) -> bool {
+ let mut curr = WaiterState(self.state.load(Acquire));
+
+ // The waiter should **not** be waiting for any permits.
+ debug_assert_eq!(curr.permits_to_acquire(), 0);
+
+ loop {
+ let mut next = curr;
+ next.set_permits_to_acquire(num_permits);
+ next.set_queued();
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => {
+ if curr.is_queued() {
+ return false;
+ } else {
+ // Make sure the next pointer is null
+ self.next.store(ptr::null_mut(), Relaxed);
+ return true;
+ }
+ }
+ Err(actual) => curr = WaiterState(actual),
+ }
+ }
+ }
+
+ /// Set the number of permits to acquire.
+ ///
+ /// This function is only called when the waiter is being inserted into the
+ /// wait queue. Because of this, there are no concurrent threads that can
+ /// modify the state and using `store` is safe.
+ fn set_permits_to_acquire(&self, num_permits: usize) {
+ debug_assert!(WaiterState(self.state.load(Acquire)).is_queued());
+
+ let mut state = WaiterState(QUEUED);
+ state.set_permits_to_acquire(num_permits);
+
+ self.state.store(state.0, Release);
+ }
+
+ /// Assign permits to the waiter.
+ ///
+ /// Returns `true` if the waiter should be removed from the queue
+ fn assign_permits(&self, n: &mut usize, closed: bool) -> bool {
+ let mut curr = WaiterState(self.state.load(Acquire));
+
+ loop {
+ let mut next = curr;
+
+ // Number of permits to assign to this waiter
+ let assign = cmp::min(curr.permits_to_acquire(), *n);
+
+ // Assign the permits
+ next.set_permits_to_acquire(curr.permits_to_acquire() - assign);
+
+ if closed {
+ next.set_closed();
+ }
+
+ match self.state.compare_exchange(curr.0, next.0, AcqRel, Acquire) {
+ Ok(_) => {
+ // Update `n`
+ *n -= assign;
+
+ if next.permits_to_acquire() == 0 {
+ if curr.permits_to_acquire() > 0 {
+ self.waker.wake();
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+ Err(actual) => curr = WaiterState(actual),
+ }
+ }
+ }
+
+ fn revert_to_idle(&self) {
+ // An idle node is not waiting on any permits
+ self.state.store(0, Relaxed);
+ }
+
+ fn store_next(&self, next: NonNull<Waiter>) {
+ self.next.store(next.as_ptr(), Release);
+ }
+}
+
+// ===== impl SemState =====
+
+impl SemState {
+ /// Returns a new default `State` value.
+ fn new(permits: usize, stub: &Waiter) -> SemState {
+ assert!(permits <= MAX_PERMITS);
+
+ if permits > 0 {
+ SemState((permits << NUM_SHIFT) | NUM_FLAG)
+ } else {
+ SemState(stub as *const _ as usize)
+ }
+ }
+
+ /// Returns a `State` tracking `ptr` as the tail of the queue.
+ fn new_ptr(tail: NonNull<Waiter>, closed: bool) -> SemState {
+ let mut val = tail.as_ptr() as usize;
+
+ if closed {
+ val |= CLOSED_FLAG;
+ }
+
+ SemState(val)
+ }
+
+ /// Returns the amount of remaining capacity
+ fn available_permits(self) -> usize {
+ if !self.has_available_permits() {
+ return 0;
+ }
+
+ self.0 >> NUM_SHIFT
+ }
+
+ /// Returns `true` if the state has permits that can be claimed by a waiter.
+ fn has_available_permits(self) -> bool {
+ self.0 & NUM_FLAG == NUM_FLAG
+ }
+
+ fn has_waiter(self, stub: &Waiter) -> bool {
+ !self.has_available_permits() && !self.is_stub(stub)
+ }
+
+ /// Tries to atomically acquire specified number of permits.
+ ///
+ /// # Return
+ ///
+ /// Returns `true` if the specified number of permits were acquired, `false`
+ /// otherwise. Returning false does not mean that there are no more
+ /// available permits.
+ fn acquire_permits(&mut self, num: usize, stub: &Waiter) -> bool {
+ debug_assert!(num > 0);
+
+ if self.available_permits() < num {
+ return false;
+ }
+
+ debug_assert!(self.waiter().is_none());
+
+ self.0 -= num << NUM_SHIFT;
+
+ if self.0 == NUM_FLAG {
+ // Set the state to the stub pointer.
+ self.0 = stub as *const _ as usize;
+ }
+
+ true
+ }
+
+ /// Releases permits
+ ///
+ /// Returns `true` if the permits were accepted.
+ fn release_permits(&mut self, permits: usize, stub: &Waiter) {
+ debug_assert!(permits > 0);
+
+ if self.is_stub(stub) {
+ self.0 = (permits << NUM_SHIFT) | NUM_FLAG | (self.0 & CLOSED_FLAG);
+ return;
+ }
+
+ debug_assert!(self.has_available_permits());
+
+ self.0 += permits << NUM_SHIFT;
+ }
+
+ fn is_waiter(self) -> bool {
+ self.0 & NUM_FLAG == 0
+ }
+
+ /// Returns the waiter, if one is set.
+ fn waiter(self) -> Option<NonNull<Waiter>> {
+ if self.is_waiter() {
+ let waiter = NonNull::new(self.as_ptr()).expect("null pointer stored");
+
+ Some(waiter)
+ } else {
+ None
+ }
+ }
+
+ /// Assumes `self` represents a pointer
+ fn as_ptr(self) -> *mut Waiter {
+ (self.0 & !CLOSED_FLAG) as *mut Waiter
+ }
+
+ /// Sets to a pointer to a waiter.
+ ///
+ /// This can only be done from the full state.
+ fn set_waiter(&mut self, waiter: NonNull<Waiter>) {
+ let waiter = waiter.as_ptr() as usize;
+ debug_assert!(!self.is_closed());
+
+ self.0 = waiter;
+ }
+
+ fn is_stub(self, stub: &Waiter) -> bool {
+ self.as_ptr() as usize == stub as *const _ as usize
+ }
+
+ /// Loads the state from an AtomicUsize.
+ fn load(cell: &AtomicUsize, ordering: Ordering) -> SemState {
+ let value = cell.load(ordering);
+ SemState(value)
+ }
+
+ fn fetch_set_closed(cell: &AtomicUsize, ordering: Ordering) -> SemState {
+ let value = cell.fetch_or(CLOSED_FLAG, ordering);
+ SemState(value)
+ }
+
+ fn is_closed(self) -> bool {
+ self.0 & CLOSED_FLAG == CLOSED_FLAG
+ }
+
+ /// Converts the state into a `usize` representation.
+ fn to_usize(self) -> usize {
+ self.0
+ }
+}
+
+impl fmt::Debug for SemState {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut fmt = fmt.debug_struct("SemState");
+
+ if self.is_waiter() {
+ fmt.field("state", &"<waiter>");
+ } else {
+ fmt.field("permits", &self.available_permits());
+ }
+
+ fmt.finish()
+ }
+}
+
+// ===== impl WaiterState =====
+
+impl WaiterState {
+ fn permits_to_acquire(self) -> usize {
+ self.0 >> PERMIT_SHIFT
+ }
+
+ fn set_permits_to_acquire(&mut self, val: usize) {
+ self.0 = (val << PERMIT_SHIFT) | (self.0 & !PERMIT_MASK)
+ }
+
+ fn is_queued(self) -> bool {
+ self.0 & QUEUED == QUEUED
+ }
+
+ fn set_queued(&mut self) {
+ self.0 |= QUEUED;
+ }
+
+ fn is_closed(self) -> bool {
+ self.0 & CLOSED == CLOSED
+ }
+
+ fn set_closed(&mut self) {
+ self.0 |= CLOSED;
+ }
+
+ fn unset_queued(&mut self) {
+ assert!(self.is_queued());
+ self.0 -= QUEUED;
+ }
+
+ fn is_dropped(self) -> bool {
+ self.0 & DROPPED == DROPPED
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/task/atomic_waker.rs b/third_party/rust/tokio/src/sync/task/atomic_waker.rs
new file mode 100644
index 0000000000..73b1745f1a
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/task/atomic_waker.rs
@@ -0,0 +1,318 @@
+#![cfg_attr(any(loom, not(feature = "sync")), allow(dead_code, unreachable_pub))]
+
+use crate::loom::cell::UnsafeCell;
+use crate::loom::sync::atomic::{self, AtomicUsize};
+
+use std::fmt;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
+use std::task::Waker;
+
+/// A synchronization primitive for task waking.
+///
+/// `AtomicWaker` will coordinate concurrent wakes with the consumer
+/// potentially "waking" the underlying task. This is useful in scenarios
+/// where a computation completes in another thread and wants to wake the
+/// consumer, but the consumer is in the process of being migrated to a new
+/// logical task.
+///
+/// Consumers should call `register` before checking the result of a computation
+/// and producers should call `wake` after producing the computation (this
+/// differs from the usual `thread::park` pattern). It is also permitted for
+/// `wake` to be called **before** `register`. This results in a no-op.
+///
+/// A single `AtomicWaker` may be reused for any number of calls to `register` or
+/// `wake`.
+pub(crate) struct AtomicWaker {
+ state: AtomicUsize,
+ waker: UnsafeCell<Option<Waker>>,
+}
+
+// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell
+// stores a `Waker` value produced by calls to `register` and many threads can
+// race to take the waker by calling `wake.
+//
+// If a new `Waker` instance is produced by calling `register` before an existing
+// one is consumed, then the existing one is overwritten.
+//
+// While `AtomicWaker` is single-producer, the implementation ensures memory
+// safety. In the event of concurrent calls to `register`, there will be a
+// single winner whose waker will get stored in the cell. The losers will not
+// have their tasks woken. As such, callers should ensure to add synchronization
+// to calls to `register`.
+//
+// The implementation uses a single `AtomicUsize` value to coordinate access to
+// the `Waker` cell. There are two bits that are operated on independently. These
+// are represented by `REGISTERING` and `WAKING`.
+//
+// The `REGISTERING` bit is set when a producer enters the critical section. The
+// `WAKING` bit is set when a consumer enters the critical section. Neither
+// bit being set is represented by `WAITING`.
+//
+// A thread obtains an exclusive lock on the waker cell by transitioning the
+// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the
+// operation the thread wishes to perform. When this transition is made, it is
+// guaranteed that no other thread will access the waker cell.
+//
+// # Registering
+//
+// On a call to `register`, an attempt to transition the state from WAITING to
+// REGISTERING is made. On success, the caller obtains a lock on the waker cell.
+//
+// If the lock is obtained, then the thread sets the waker cell to the waker
+// provided as an argument. Then it attempts to transition the state back from
+// `REGISTERING` -> `WAITING`.
+//
+// If this transition is successful, then the registering process is complete
+// and the next call to `wake` will observe the waker.
+//
+// If the transition fails, then there was a concurrent call to `wake` that
+// was unable to access the waker cell (due to the registering thread holding the
+// lock). To handle this, the registering thread removes the waker it just set
+// from the cell and calls `wake` on it. This call to wake represents the
+// attempt to wake by the other thread (that set the `WAKING` bit). The
+// state is then transitioned from `REGISTERING | WAKING` back to `WAITING`.
+// This transition must succeed because, at this point, the state cannot be
+// transitioned by another thread.
+//
+// # Waking
+//
+// On a call to `wake`, an attempt to transition the state from `WAITING` to
+// `WAKING` is made. On success, the caller obtains a lock on the waker cell.
+//
+// If the lock is obtained, then the thread takes ownership of the current value
+// in the waker cell, and calls `wake` on it. The state is then transitioned
+// back to `WAITING`. This transition must succeed as, at this point, the state
+// cannot be transitioned by another thread.
+//
+// If the thread is unable to obtain the lock, the `WAKING` bit is still.
+// This is because it has either been set by the current thread but the previous
+// value included the `REGISTERING` bit **or** a concurrent thread is in the
+// `WAKING` critical section. Either way, no action must be taken.
+//
+// If the current thread is the only concurrent call to `wake` and another
+// thread is in the `register` critical section, when the other thread **exits**
+// the `register` critical section, it will observe the `WAKING` bit and
+// handle the waker itself.
+//
+// If another thread is in the `waker` critical section, then it will handle
+// waking the caller task.
+//
+// # A potential race (is safely handled).
+//
+// Imagine the following situation:
+//
+// * Thread A obtains the `wake` lock and wakes a task.
+//
+// * Before thread A releases the `wake` lock, the woken task is scheduled.
+//
+// * Thread B attempts to wake the task. In theory this should result in the
+// task being woken, but it cannot because thread A still holds the wake
+// lock.
+//
+// This case is handled by requiring users of `AtomicWaker` to call `register`
+// **before** attempting to observe the application state change that resulted
+// in the task being woken. The wakers also change the application state
+// before calling wake.
+//
+// Because of this, the task will do one of two things.
+//
+// 1) Observe the application state change that Thread B is waking on. In
+// this case, it is OK for Thread B's wake to be lost.
+//
+// 2) Call register before attempting to observe the application state. Since
+// Thread A still holds the `wake` lock, the call to `register` will result
+// in the task waking itself and get scheduled again.
+
+/// Idle state
+const WAITING: usize = 0;
+
+/// A new waker value is being registered with the `AtomicWaker` cell.
+const REGISTERING: usize = 0b01;
+
+/// The task currently registered with the `AtomicWaker` cell is being woken.
+const WAKING: usize = 0b10;
+
+impl AtomicWaker {
+ /// Create an `AtomicWaker`
+ pub(crate) fn new() -> AtomicWaker {
+ AtomicWaker {
+ state: AtomicUsize::new(WAITING),
+ waker: UnsafeCell::new(None),
+ }
+ }
+
+ /// Registers the current waker to be notified on calls to `wake`.
+ ///
+ /// This is the same as calling `register_task` with `task::current()`.
+ #[cfg(feature = "io-driver")]
+ pub(crate) fn register(&self, waker: Waker) {
+ self.do_register(waker);
+ }
+
+ /// Registers the provided waker to be notified on calls to `wake`.
+ ///
+ /// The new waker will take place of any previous wakers that were registered
+ /// by previous calls to `register`. Any calls to `wake` that happen after
+ /// a call to `register` (as defined by the memory ordering rules), will
+ /// wake the `register` caller's task.
+ ///
+ /// It is safe to call `register` with multiple other threads concurrently
+ /// calling `wake`. This will result in the `register` caller's current
+ /// task being woken once.
+ ///
+ /// This function is safe to call concurrently, but this is generally a bad
+ /// idea. Concurrent calls to `register` will attempt to register different
+ /// tasks to be woken. One of the callers will win and have its task set,
+ /// but there is no guarantee as to which caller will succeed.
+ pub(crate) fn register_by_ref(&self, waker: &Waker) {
+ self.do_register(waker);
+ }
+
+ fn do_register<W>(&self, waker: W)
+ where
+ W: WakerRef,
+ {
+ match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) {
+ WAITING => {
+ unsafe {
+ // Locked acquired, update the waker cell
+ self.waker.with_mut(|t| *t = Some(waker.into_waker()));
+
+ // Release the lock. If the state transitioned to include
+ // the `WAKING` bit, this means that a wake has been
+ // called concurrently, so we have to remove the waker and
+ // wake it.`
+ //
+ // Start by assuming that the state is `REGISTERING` as this
+ // is what we jut set it to.
+ let res = self
+ .state
+ .compare_exchange(REGISTERING, WAITING, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => {}
+ Err(actual) => {
+ // This branch can only be reached if a
+ // concurrent thread called `wake`. In this
+ // case, `actual` **must** be `REGISTERING |
+ // `WAKING`.
+ debug_assert_eq!(actual, REGISTERING | WAKING);
+
+ // Take the waker to wake once the atomic operation has
+ // completed.
+ let waker = self.waker.with_mut(|t| (*t).take()).unwrap();
+
+ // Just swap, because no one could change state
+ // while state == `Registering | `Waking`
+ self.state.swap(WAITING, AcqRel);
+
+ // The atomic swap was complete, now
+ // wake the waker and return.
+ waker.wake();
+ }
+ }
+ }
+ }
+ WAKING => {
+ // Currently in the process of waking the task, i.e.,
+ // `wake` is currently being called on the old waker.
+ // So, we call wake on the new waker.
+ waker.wake();
+
+ // This is equivalent to a spin lock, so use a spin hint.
+ atomic::spin_loop_hint();
+ }
+ state => {
+ // In this case, a concurrent thread is holding the
+ // "registering" lock. This probably indicates a bug in the
+ // caller's code as racing to call `register` doesn't make much
+ // sense.
+ //
+ // We just want to maintain memory safety. It is ok to drop the
+ // call to `register`.
+ debug_assert!(state == REGISTERING || state == REGISTERING | WAKING);
+ }
+ }
+ }
+
+ /// Wakes the task that last called `register`.
+ ///
+ /// If `register` has not been called yet, then this does nothing.
+ pub(crate) fn wake(&self) {
+ if let Some(waker) = self.take_waker() {
+ waker.wake();
+ }
+ }
+
+ /// Attempts to take the `Waker` value out of the `AtomicWaker` with the
+ /// intention that the caller will wake the task later.
+ pub(crate) fn take_waker(&self) -> Option<Waker> {
+ // AcqRel ordering is used in order to acquire the value of the `waker`
+ // cell as well as to establish a `release` ordering with whatever
+ // memory the `AtomicWaker` is associated with.
+ match self.state.fetch_or(WAKING, AcqRel) {
+ WAITING => {
+ // The waking lock has been acquired.
+ let waker = unsafe { self.waker.with_mut(|t| (*t).take()) };
+
+ // Release the lock
+ self.state.fetch_and(!WAKING, Release);
+
+ waker
+ }
+ state => {
+ // There is a concurrent thread currently updating the
+ // associated waker.
+ //
+ // Nothing more to do as the `WAKING` bit has been set. It
+ // doesn't matter if there are concurrent registering threads or
+ // not.
+ //
+ debug_assert!(
+ state == REGISTERING || state == REGISTERING | WAKING || state == WAKING
+ );
+ None
+ }
+ }
+ }
+}
+
+impl Default for AtomicWaker {
+ fn default() -> Self {
+ AtomicWaker::new()
+ }
+}
+
+impl fmt::Debug for AtomicWaker {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "AtomicWaker")
+ }
+}
+
+unsafe impl Send for AtomicWaker {}
+unsafe impl Sync for AtomicWaker {}
+
+trait WakerRef {
+ fn wake(self);
+ fn into_waker(self) -> Waker;
+}
+
+impl WakerRef for Waker {
+ fn wake(self) {
+ self.wake()
+ }
+
+ fn into_waker(self) -> Waker {
+ self
+ }
+}
+
+impl WakerRef for &Waker {
+ fn wake(self) {
+ self.wake_by_ref()
+ }
+
+ fn into_waker(self) -> Waker {
+ self.clone()
+ }
+}
diff --git a/third_party/rust/tokio/src/sync/task/mod.rs b/third_party/rust/tokio/src/sync/task/mod.rs
new file mode 100644
index 0000000000..a6bc6ed06e
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/task/mod.rs
@@ -0,0 +1,4 @@
+//! Thread-safe task notification primitives.
+
+mod atomic_waker;
+pub(crate) use self::atomic_waker::AtomicWaker;
diff --git a/third_party/rust/tokio/src/sync/tests/atomic_waker.rs b/third_party/rust/tokio/src/sync/tests/atomic_waker.rs
new file mode 100644
index 0000000000..c832d62e9a
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/atomic_waker.rs
@@ -0,0 +1,34 @@
+use crate::sync::AtomicWaker;
+use tokio_test::task;
+
+use std::task::Waker;
+
+trait AssertSend: Send {}
+trait AssertSync: Send {}
+
+impl AssertSend for AtomicWaker {}
+impl AssertSync for AtomicWaker {}
+
+impl AssertSend for Waker {}
+impl AssertSync for Waker {}
+
+#[test]
+fn basic_usage() {
+ let mut waker = task::spawn(AtomicWaker::new());
+
+ waker.enter(|cx, waker| waker.register_by_ref(cx.waker()));
+ waker.wake();
+
+ assert!(waker.is_woken());
+}
+
+#[test]
+fn wake_without_register() {
+ let mut waker = task::spawn(AtomicWaker::new());
+ waker.wake();
+
+ // Registering should not result in a notification
+ waker.enter(|cx, waker| waker.register_by_ref(cx.waker()));
+
+ assert!(!waker.is_woken());
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs b/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs
new file mode 100644
index 0000000000..c148bcbe11
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_atomic_waker.rs
@@ -0,0 +1,45 @@
+use crate::sync::task::AtomicWaker;
+
+use futures::future::poll_fn;
+use loom::future::block_on;
+use loom::sync::atomic::AtomicUsize;
+use loom::thread;
+use std::sync::atomic::Ordering::Relaxed;
+use std::sync::Arc;
+use std::task::Poll::{Pending, Ready};
+
+struct Chan {
+ num: AtomicUsize,
+ task: AtomicWaker,
+}
+
+#[test]
+fn basic_notification() {
+ const NUM_NOTIFY: usize = 2;
+
+ loom::model(|| {
+ let chan = Arc::new(Chan {
+ num: AtomicUsize::new(0),
+ task: AtomicWaker::new(),
+ });
+
+ for _ in 0..NUM_NOTIFY {
+ let chan = chan.clone();
+
+ thread::spawn(move || {
+ chan.num.fetch_add(1, Relaxed);
+ chan.task.wake();
+ });
+ }
+
+ block_on(poll_fn(move |cx| {
+ chan.task.register_by_ref(cx.waker());
+
+ if NUM_NOTIFY == chan.num.load(Relaxed) {
+ return Ready(());
+ }
+
+ Pending
+ }));
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs b/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs
new file mode 100644
index 0000000000..da12fb9ff0
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_broadcast.rs
@@ -0,0 +1,180 @@
+use crate::sync::broadcast;
+use crate::sync::broadcast::RecvError::{Closed, Lagged};
+
+use loom::future::block_on;
+use loom::sync::Arc;
+use loom::thread;
+use tokio_test::{assert_err, assert_ok};
+
+#[test]
+fn broadcast_send() {
+ loom::model(|| {
+ let (tx1, mut rx) = broadcast::channel(2);
+ let tx1 = Arc::new(tx1);
+ let tx2 = tx1.clone();
+
+ let th1 = thread::spawn(move || {
+ block_on(async {
+ assert_ok!(tx1.send("one"));
+ assert_ok!(tx1.send("two"));
+ assert_ok!(tx1.send("three"));
+ });
+ });
+
+ let th2 = thread::spawn(move || {
+ block_on(async {
+ assert_ok!(tx2.send("eins"));
+ assert_ok!(tx2.send("zwei"));
+ assert_ok!(tx2.send("drei"));
+ });
+ });
+
+ block_on(async {
+ let mut num = 0;
+ loop {
+ match rx.recv().await {
+ Ok(_) => num += 1,
+ Err(Closed) => break,
+ Err(Lagged(n)) => num += n as usize,
+ }
+ }
+ assert_eq!(num, 6);
+ });
+
+ assert_ok!(th1.join());
+ assert_ok!(th2.join());
+ });
+}
+
+// An `Arc` is used as the value in order to detect memory leaks.
+#[test]
+fn broadcast_two() {
+ loom::model(|| {
+ let (tx, mut rx1) = broadcast::channel::<Arc<&'static str>>(16);
+ let mut rx2 = tx.subscribe();
+
+ let th1 = thread::spawn(move || {
+ block_on(async {
+ let v = assert_ok!(rx1.recv().await);
+ assert_eq!(*v, "hello");
+
+ let v = assert_ok!(rx1.recv().await);
+ assert_eq!(*v, "world");
+
+ match assert_err!(rx1.recv().await) {
+ Closed => {}
+ _ => panic!(),
+ }
+ });
+ });
+
+ let th2 = thread::spawn(move || {
+ block_on(async {
+ let v = assert_ok!(rx2.recv().await);
+ assert_eq!(*v, "hello");
+
+ let v = assert_ok!(rx2.recv().await);
+ assert_eq!(*v, "world");
+
+ match assert_err!(rx2.recv().await) {
+ Closed => {}
+ _ => panic!(),
+ }
+ });
+ });
+
+ assert_ok!(tx.send(Arc::new("hello")));
+ assert_ok!(tx.send(Arc::new("world")));
+ drop(tx);
+
+ assert_ok!(th1.join());
+ assert_ok!(th2.join());
+ });
+}
+
+#[test]
+fn broadcast_wrap() {
+ loom::model(|| {
+ let (tx, mut rx1) = broadcast::channel(2);
+ let mut rx2 = tx.subscribe();
+
+ let th1 = thread::spawn(move || {
+ block_on(async {
+ let mut num = 0;
+
+ loop {
+ match rx1.recv().await {
+ Ok(_) => num += 1,
+ Err(Closed) => break,
+ Err(Lagged(n)) => num += n as usize,
+ }
+ }
+
+ assert_eq!(num, 3);
+ });
+ });
+
+ let th2 = thread::spawn(move || {
+ block_on(async {
+ let mut num = 0;
+
+ loop {
+ match rx2.recv().await {
+ Ok(_) => num += 1,
+ Err(Closed) => break,
+ Err(Lagged(n)) => num += n as usize,
+ }
+ }
+
+ assert_eq!(num, 3);
+ });
+ });
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+ assert_ok!(tx.send("three"));
+
+ drop(tx);
+
+ assert_ok!(th1.join());
+ assert_ok!(th2.join());
+ });
+}
+
+#[test]
+fn drop_rx() {
+ loom::model(|| {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let rx2 = tx.subscribe();
+
+ let th1 = thread::spawn(move || {
+ block_on(async {
+ let v = assert_ok!(rx1.recv().await);
+ assert_eq!(v, "one");
+
+ let v = assert_ok!(rx1.recv().await);
+ assert_eq!(v, "two");
+
+ let v = assert_ok!(rx1.recv().await);
+ assert_eq!(v, "three");
+
+ match assert_err!(rx1.recv().await) {
+ Closed => {}
+ _ => panic!(),
+ }
+ });
+ });
+
+ let th2 = thread::spawn(move || {
+ drop(rx2);
+ });
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+ assert_ok!(tx.send("three"));
+ drop(tx);
+
+ assert_ok!(th1.join());
+ assert_ok!(th2.join());
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_list.rs b/third_party/rust/tokio/src/sync/tests/loom_list.rs
new file mode 100644
index 0000000000..4067f865ce
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_list.rs
@@ -0,0 +1,48 @@
+use crate::sync::mpsc::list;
+
+use loom::thread;
+use std::sync::Arc;
+
+#[test]
+fn smoke() {
+ use crate::sync::mpsc::block::Read::*;
+
+ const NUM_TX: usize = 2;
+ const NUM_MSG: usize = 2;
+
+ loom::model(|| {
+ let (tx, mut rx) = list::channel();
+ let tx = Arc::new(tx);
+
+ for th in 0..NUM_TX {
+ let tx = tx.clone();
+
+ thread::spawn(move || {
+ for i in 0..NUM_MSG {
+ tx.push((th, i));
+ }
+ });
+ }
+
+ let mut next = vec![0; NUM_TX];
+
+ loop {
+ match rx.pop(&tx) {
+ Some(Value((th, v))) => {
+ assert_eq!(v, next[th]);
+ next[th] += 1;
+
+ if next.iter().all(|&i| i == NUM_MSG) {
+ break;
+ }
+ }
+ Some(Closed) => {
+ panic!();
+ }
+ None => {
+ thread::yield_now();
+ }
+ }
+ }
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs b/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs
new file mode 100644
index 0000000000..6a1a6abedd
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_mpsc.rs
@@ -0,0 +1,77 @@
+use crate::sync::mpsc;
+
+use futures::future::poll_fn;
+use loom::future::block_on;
+use loom::thread;
+
+#[test]
+fn closing_tx() {
+ loom::model(|| {
+ let (mut tx, mut rx) = mpsc::channel(16);
+
+ thread::spawn(move || {
+ tx.try_send(()).unwrap();
+ drop(tx);
+ });
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_some());
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_none());
+ });
+}
+
+#[test]
+fn closing_unbounded_tx() {
+ loom::model(|| {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ thread::spawn(move || {
+ tx.send(()).unwrap();
+ drop(tx);
+ });
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_some());
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_none());
+ });
+}
+
+#[test]
+fn dropping_tx() {
+ loom::model(|| {
+ let (tx, mut rx) = mpsc::channel::<()>(16);
+
+ for _ in 0..2 {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ drop(tx);
+ });
+ }
+ drop(tx);
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_none());
+ });
+}
+
+#[test]
+fn dropping_unbounded_tx() {
+ loom::model(|| {
+ let (tx, mut rx) = mpsc::unbounded_channel::<()>();
+
+ for _ in 0..2 {
+ let tx = tx.clone();
+ thread::spawn(move || {
+ drop(tx);
+ });
+ }
+ drop(tx);
+
+ let v = block_on(poll_fn(|cx| rx.poll_recv(cx)));
+ assert!(v.is_none());
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_notify.rs b/third_party/rust/tokio/src/sync/tests/loom_notify.rs
new file mode 100644
index 0000000000..60981d4669
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_notify.rs
@@ -0,0 +1,90 @@
+use crate::sync::Notify;
+
+use loom::future::block_on;
+use loom::sync::Arc;
+use loom::thread;
+
+#[test]
+fn notify_one() {
+ loom::model(|| {
+ let tx = Arc::new(Notify::new());
+ let rx = tx.clone();
+
+ let th = thread::spawn(move || {
+ block_on(async {
+ rx.notified().await;
+ });
+ });
+
+ tx.notify();
+ th.join().unwrap();
+ });
+}
+
+#[test]
+fn notify_multi() {
+ loom::model(|| {
+ let notify = Arc::new(Notify::new());
+
+ let mut ths = vec![];
+
+ for _ in 0..2 {
+ let notify = notify.clone();
+
+ ths.push(thread::spawn(move || {
+ block_on(async {
+ notify.notified().await;
+ notify.notify();
+ })
+ }));
+ }
+
+ notify.notify();
+
+ for th in ths.drain(..) {
+ th.join().unwrap();
+ }
+
+ block_on(async {
+ notify.notified().await;
+ });
+ });
+}
+
+#[test]
+fn notify_drop() {
+ use crate::future::poll_fn;
+ use std::future::Future;
+ use std::task::Poll;
+
+ loom::model(|| {
+ let notify = Arc::new(Notify::new());
+ let rx1 = notify.clone();
+ let rx2 = notify.clone();
+
+ let th1 = thread::spawn(move || {
+ let mut recv = Box::pin(rx1.notified());
+
+ block_on(poll_fn(|cx| {
+ if recv.as_mut().poll(cx).is_ready() {
+ rx1.notify();
+ }
+ Poll::Ready(())
+ }));
+ });
+
+ let th2 = thread::spawn(move || {
+ block_on(async {
+ rx2.notified().await;
+ // Trigger second notification
+ rx2.notify();
+ rx2.notified().await;
+ });
+ });
+
+ notify.notify();
+
+ th1.join().unwrap();
+ th2.join().unwrap();
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs b/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs
new file mode 100644
index 0000000000..dfa7459da7
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_oneshot.rs
@@ -0,0 +1,109 @@
+use crate::sync::oneshot;
+
+use futures::future::poll_fn;
+use loom::future::block_on;
+use loom::thread;
+use std::task::Poll::{Pending, Ready};
+
+#[test]
+fn smoke() {
+ loom::model(|| {
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ tx.send(1).unwrap();
+ });
+
+ let value = block_on(rx).unwrap();
+ assert_eq!(1, value);
+ });
+}
+
+#[test]
+fn changing_rx_task() {
+ loom::model(|| {
+ let (tx, mut rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ tx.send(1).unwrap();
+ });
+
+ let rx = thread::spawn(move || {
+ let ready = block_on(poll_fn(|cx| match Pin::new(&mut rx).poll(cx) {
+ Ready(Ok(value)) => {
+ assert_eq!(1, value);
+ Ready(true)
+ }
+ Ready(Err(_)) => unimplemented!(),
+ Pending => Ready(false),
+ }));
+
+ if ready {
+ None
+ } else {
+ Some(rx)
+ }
+ })
+ .join()
+ .unwrap();
+
+ if let Some(rx) = rx {
+ // Previous task parked, use a new task...
+ let value = block_on(rx).unwrap();
+ assert_eq!(1, value);
+ }
+ });
+}
+
+// TODO: Move this into `oneshot` proper.
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+struct OnClose<'a> {
+ tx: &'a mut oneshot::Sender<i32>,
+}
+
+impl<'a> OnClose<'a> {
+ fn new(tx: &'a mut oneshot::Sender<i32>) -> Self {
+ OnClose { tx }
+ }
+}
+
+impl Future for OnClose<'_> {
+ type Output = bool;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<bool> {
+ let res = self.get_mut().tx.poll_closed(cx);
+ Ready(res.is_ready())
+ }
+}
+
+#[test]
+fn changing_tx_task() {
+ loom::model(|| {
+ let (mut tx, rx) = oneshot::channel::<i32>();
+
+ thread::spawn(move || {
+ drop(rx);
+ });
+
+ let tx = thread::spawn(move || {
+ let t1 = block_on(OnClose::new(&mut tx));
+
+ if t1 {
+ None
+ } else {
+ Some(tx)
+ }
+ })
+ .join()
+ .unwrap();
+
+ if let Some(mut tx) = tx {
+ // Previous task parked, use a new task...
+ block_on(OnClose::new(&mut tx));
+ }
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs b/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs
new file mode 100644
index 0000000000..48d06e1d5f
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_rwlock.rs
@@ -0,0 +1,78 @@
+use crate::sync::rwlock::*;
+
+use loom::future::block_on;
+use loom::thread;
+use std::sync::Arc;
+
+#[test]
+fn concurrent_write() {
+ let mut b = loom::model::Builder::new();
+
+ b.check(|| {
+ let rwlock = Arc::new(RwLock::<u32>::new(0));
+
+ let rwclone = rwlock.clone();
+ let t1 = thread::spawn(move || {
+ block_on(async {
+ let mut guard = rwclone.write().await;
+ *guard += 5;
+ });
+ });
+
+ let rwclone = rwlock.clone();
+ let t2 = thread::spawn(move || {
+ block_on(async {
+ let mut guard = rwclone.write().await;
+ *guard += 5;
+ });
+ });
+
+ t1.join().expect("thread 1 write should not panic");
+ t2.join().expect("thread 2 write should not panic");
+ //when all threads have finished the value on the lock should be 10
+ let guard = block_on(rwlock.read());
+ assert_eq!(10, *guard);
+ });
+}
+
+#[test]
+fn concurrent_read_write() {
+ let mut b = loom::model::Builder::new();
+
+ b.check(|| {
+ let rwlock = Arc::new(RwLock::<u32>::new(0));
+
+ let rwclone = rwlock.clone();
+ let t1 = thread::spawn(move || {
+ block_on(async {
+ let mut guard = rwclone.write().await;
+ *guard += 5;
+ });
+ });
+
+ let rwclone = rwlock.clone();
+ let t2 = thread::spawn(move || {
+ block_on(async {
+ let mut guard = rwclone.write().await;
+ *guard += 5;
+ });
+ });
+
+ let rwclone = rwlock.clone();
+ let t3 = thread::spawn(move || {
+ block_on(async {
+ let guard = rwclone.read().await;
+ //at this state the value on the lock may either be 0, 5, or 10
+ assert!(*guard == 0 || *guard == 5 || *guard == 10);
+ });
+ });
+
+ t1.join().expect("thread 1 write should not panic");
+ t2.join().expect("thread 2 write should not panic");
+ t3.join().expect("thread 3 read should not panic");
+
+ let guard = block_on(rwlock.read());
+ //when all threads have finished the value on the lock should be 10
+ assert_eq!(10, *guard);
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs b/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs
new file mode 100644
index 0000000000..76a1bc0062
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_semaphore_batch.rs
@@ -0,0 +1,215 @@
+use crate::sync::batch_semaphore::*;
+
+use futures::future::poll_fn;
+use loom::future::block_on;
+use loom::sync::atomic::AtomicUsize;
+use loom::thread;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::Arc;
+use std::task::Poll::Ready;
+use std::task::{Context, Poll};
+
+#[test]
+fn basic_usage() {
+ const NUM: usize = 2;
+
+ struct Shared {
+ semaphore: Semaphore,
+ active: AtomicUsize,
+ }
+
+ async fn actor(shared: Arc<Shared>) {
+ shared.semaphore.acquire(1).await.unwrap();
+ let actual = shared.active.fetch_add(1, SeqCst);
+ assert!(actual <= NUM - 1);
+
+ let actual = shared.active.fetch_sub(1, SeqCst);
+ assert!(actual <= NUM);
+ shared.semaphore.release(1);
+ }
+
+ loom::model(|| {
+ let shared = Arc::new(Shared {
+ semaphore: Semaphore::new(NUM),
+ active: AtomicUsize::new(0),
+ });
+
+ for _ in 0..NUM {
+ let shared = shared.clone();
+
+ thread::spawn(move || {
+ block_on(actor(shared));
+ });
+ }
+
+ block_on(actor(shared));
+ });
+}
+
+#[test]
+fn release() {
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || {
+ block_on(semaphore.acquire(1)).unwrap();
+ semaphore.release(1);
+ });
+ }
+
+ block_on(semaphore.acquire(1)).unwrap();
+
+ semaphore.release(1);
+ });
+}
+
+#[test]
+fn basic_closing() {
+ const NUM: usize = 2;
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ for _ in 0..NUM {
+ let semaphore = semaphore.clone();
+
+ thread::spawn(move || {
+ for _ in 0..2 {
+ block_on(semaphore.acquire(1)).map_err(|_| ())?;
+
+ semaphore.release(1);
+ }
+
+ Ok::<(), ()>(())
+ });
+ }
+
+ semaphore.close();
+ });
+}
+
+#[test]
+fn concurrent_close() {
+ const NUM: usize = 3;
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ for _ in 0..NUM {
+ let semaphore = semaphore.clone();
+
+ thread::spawn(move || {
+ block_on(semaphore.acquire(1)).map_err(|_| ())?;
+ semaphore.release(1);
+ semaphore.close();
+
+ Ok::<(), ()>(())
+ });
+ }
+ });
+}
+
+#[test]
+fn concurrent_cancel() {
+ async fn poll_and_cancel(semaphore: Arc<Semaphore>) {
+ let mut acquire1 = Some(semaphore.acquire(1));
+ let mut acquire2 = Some(semaphore.acquire(1));
+ poll_fn(|cx| {
+ // poll the acquire future once, and then immediately throw
+ // it away. this simulates a situation where a future is
+ // polled and then cancelled, such as by a timeout.
+ if let Some(acquire) = acquire1.take() {
+ pin!(acquire);
+ let _ = acquire.poll(cx);
+ }
+ if let Some(acquire) = acquire2.take() {
+ pin!(acquire);
+ let _ = acquire.poll(cx);
+ }
+ Poll::Ready(())
+ })
+ .await
+ }
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(0));
+ let t1 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+ let t2 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+ let t3 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+
+ t1.join().unwrap();
+ semaphore.release(10);
+ t2.join().unwrap();
+ t3.join().unwrap();
+ });
+}
+
+#[test]
+fn batch() {
+ let mut b = loom::model::Builder::new();
+ b.preemption_bound = Some(1);
+
+ b.check(|| {
+ let semaphore = Arc::new(Semaphore::new(10));
+ let active = Arc::new(AtomicUsize::new(0));
+ let mut ths = vec![];
+
+ for _ in 0..2 {
+ let semaphore = semaphore.clone();
+ let active = active.clone();
+
+ ths.push(thread::spawn(move || {
+ for n in &[4, 10, 8] {
+ block_on(semaphore.acquire(*n)).unwrap();
+
+ active.fetch_add(*n as usize, SeqCst);
+
+ let num_active = active.load(SeqCst);
+ assert!(num_active <= 10);
+
+ thread::yield_now();
+
+ active.fetch_sub(*n as usize, SeqCst);
+
+ semaphore.release(*n as usize);
+ }
+ }));
+ }
+
+ for th in ths.into_iter() {
+ th.join().unwrap();
+ }
+
+ assert_eq!(10, semaphore.available_permits());
+ });
+}
+
+#[test]
+fn release_during_acquire() {
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(10));
+ semaphore
+ .try_acquire(8)
+ .expect("try_acquire should succeed; semaphore uncontended");
+ let semaphore2 = semaphore.clone();
+ let thread = thread::spawn(move || block_on(semaphore2.acquire(4)).unwrap());
+
+ semaphore.release(8);
+ thread.join().unwrap();
+ semaphore.release(4);
+ assert_eq!(10, semaphore.available_permits());
+ })
+}
diff --git a/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs b/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs
new file mode 100644
index 0000000000..b5e5efba82
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/loom_semaphore_ll.rs
@@ -0,0 +1,192 @@
+use crate::sync::semaphore_ll::*;
+
+use futures::future::poll_fn;
+use loom::future::block_on;
+use loom::thread;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::Arc;
+use std::task::Poll::Ready;
+use std::task::{Context, Poll};
+
+#[test]
+fn basic_usage() {
+ const NUM: usize = 2;
+
+ struct Actor {
+ waiter: Permit,
+ shared: Arc<Shared>,
+ }
+
+ struct Shared {
+ semaphore: Semaphore,
+ active: AtomicUsize,
+ }
+
+ impl Future for Actor {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let me = &mut *self;
+
+ ready!(me.waiter.poll_acquire(cx, 1, &me.shared.semaphore)).unwrap();
+
+ let actual = me.shared.active.fetch_add(1, SeqCst);
+ assert!(actual <= NUM - 1);
+
+ let actual = me.shared.active.fetch_sub(1, SeqCst);
+ assert!(actual <= NUM);
+
+ me.waiter.release(1, &me.shared.semaphore);
+
+ Ready(())
+ }
+ }
+
+ loom::model(|| {
+ let shared = Arc::new(Shared {
+ semaphore: Semaphore::new(NUM),
+ active: AtomicUsize::new(0),
+ });
+
+ for _ in 0..NUM {
+ let shared = shared.clone();
+
+ thread::spawn(move || {
+ block_on(Actor {
+ waiter: Permit::new(),
+ shared,
+ });
+ });
+ }
+
+ block_on(Actor {
+ waiter: Permit::new(),
+ shared,
+ });
+ });
+}
+
+#[test]
+fn release() {
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || {
+ let mut permit = Permit::new();
+
+ block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap();
+
+ permit.release(1, &semaphore);
+ });
+ }
+
+ let mut permit = Permit::new();
+
+ block_on(poll_fn(|cx| permit.poll_acquire(cx, 1, &semaphore))).unwrap();
+
+ permit.release(1, &semaphore);
+ });
+}
+
+#[test]
+fn basic_closing() {
+ const NUM: usize = 2;
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ for _ in 0..NUM {
+ let semaphore = semaphore.clone();
+
+ thread::spawn(move || {
+ let mut permit = Permit::new();
+
+ for _ in 0..2 {
+ block_on(poll_fn(|cx| {
+ permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ())
+ }))?;
+
+ permit.release(1, &semaphore);
+ }
+
+ Ok::<(), ()>(())
+ });
+ }
+
+ semaphore.close();
+ });
+}
+
+#[test]
+fn concurrent_close() {
+ const NUM: usize = 3;
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(1));
+
+ for _ in 0..NUM {
+ let semaphore = semaphore.clone();
+
+ thread::spawn(move || {
+ let mut permit = Permit::new();
+
+ block_on(poll_fn(|cx| {
+ permit.poll_acquire(cx, 1, &semaphore).map_err(|_| ())
+ }))?;
+
+ permit.release(1, &semaphore);
+
+ semaphore.close();
+
+ Ok::<(), ()>(())
+ });
+ }
+ });
+}
+
+#[test]
+fn batch() {
+ let mut b = loom::model::Builder::new();
+ b.preemption_bound = Some(1);
+
+ b.check(|| {
+ let semaphore = Arc::new(Semaphore::new(10));
+ let active = Arc::new(AtomicUsize::new(0));
+ let mut ths = vec![];
+
+ for _ in 0..2 {
+ let semaphore = semaphore.clone();
+ let active = active.clone();
+
+ ths.push(thread::spawn(move || {
+ let mut permit = Permit::new();
+
+ for n in &[4, 10, 8] {
+ block_on(poll_fn(|cx| permit.poll_acquire(cx, *n, &semaphore))).unwrap();
+
+ active.fetch_add(*n as usize, SeqCst);
+
+ let num_active = active.load(SeqCst);
+ assert!(num_active <= 10);
+
+ thread::yield_now();
+
+ active.fetch_sub(*n as usize, SeqCst);
+
+ permit.release(*n, &semaphore);
+ }
+ }));
+ }
+
+ for th in ths.into_iter() {
+ th.join().unwrap();
+ }
+
+ assert_eq!(10, semaphore.available_permits());
+ });
+}
diff --git a/third_party/rust/tokio/src/sync/tests/mod.rs b/third_party/rust/tokio/src/sync/tests/mod.rs
new file mode 100644
index 0000000000..d571754c01
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/mod.rs
@@ -0,0 +1,16 @@
+cfg_not_loom! {
+ mod atomic_waker;
+ mod semaphore_ll;
+ mod semaphore_batch;
+}
+
+cfg_loom! {
+ mod loom_atomic_waker;
+ mod loom_broadcast;
+ mod loom_list;
+ mod loom_mpsc;
+ mod loom_notify;
+ mod loom_oneshot;
+ mod loom_semaphore_batch;
+ mod loom_semaphore_ll;
+}
diff --git a/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs b/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs
new file mode 100644
index 0000000000..60f3f231e7
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/semaphore_batch.rs
@@ -0,0 +1,250 @@
+use crate::sync::batch_semaphore::Semaphore;
+use tokio_test::*;
+
+#[test]
+fn poll_acquire_one_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ assert_ready_ok!(task::spawn(s.acquire(1)).poll());
+ assert_eq!(s.available_permits(), 99);
+}
+
+#[test]
+fn poll_acquire_many_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ assert_ready_ok!(task::spawn(s.acquire(5)).poll());
+ assert_eq!(s.available_permits(), 95);
+
+ assert_ready_ok!(task::spawn(s.acquire(5)).poll());
+ assert_eq!(s.available_permits(), 90);
+}
+
+#[test]
+fn try_acquire_one_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ assert_ok!(s.try_acquire(1));
+ assert_eq!(s.available_permits(), 99);
+
+ assert_ok!(s.try_acquire(1));
+ assert_eq!(s.available_permits(), 98);
+}
+
+#[test]
+fn try_acquire_many_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ assert_ok!(s.try_acquire(5));
+ assert_eq!(s.available_permits(), 95);
+
+ assert_ok!(s.try_acquire(5));
+ assert_eq!(s.available_permits(), 90);
+}
+
+#[test]
+fn poll_acquire_one_unavailable() {
+ let s = Semaphore::new(1);
+
+ // Acquire the first permit
+ assert_ready_ok!(task::spawn(s.acquire(1)).poll());
+ assert_eq!(s.available_permits(), 0);
+
+ let mut acquire_2 = task::spawn(s.acquire(1));
+ // Try to acquire the second permit
+ assert_pending!(acquire_2.poll());
+ assert_eq!(s.available_permits(), 0);
+
+ s.release(1);
+
+ assert_eq!(s.available_permits(), 0);
+ assert!(acquire_2.is_woken());
+ assert_ready_ok!(acquire_2.poll());
+ assert_eq!(s.available_permits(), 0);
+
+ s.release(1);
+ assert_eq!(s.available_permits(), 1);
+}
+
+#[test]
+fn poll_acquire_many_unavailable() {
+ let s = Semaphore::new(5);
+
+ // Acquire the first permit
+ assert_ready_ok!(task::spawn(s.acquire(1)).poll());
+ assert_eq!(s.available_permits(), 4);
+
+ // Try to acquire the second permit
+ let mut acquire_2 = task::spawn(s.acquire(5));
+ assert_pending!(acquire_2.poll());
+ assert_eq!(s.available_permits(), 0);
+
+ // Try to acquire the third permit
+ let mut acquire_3 = task::spawn(s.acquire(3));
+ assert_pending!(acquire_3.poll());
+ assert_eq!(s.available_permits(), 0);
+
+ s.release(1);
+
+ assert_eq!(s.available_permits(), 0);
+ assert!(acquire_2.is_woken());
+ assert_ready_ok!(acquire_2.poll());
+
+ assert!(!acquire_3.is_woken());
+ assert_eq!(s.available_permits(), 0);
+
+ s.release(1);
+ assert!(!acquire_3.is_woken());
+ assert_eq!(s.available_permits(), 0);
+
+ s.release(2);
+ assert!(acquire_3.is_woken());
+
+ assert_ready_ok!(acquire_3.poll());
+}
+
+#[test]
+fn try_acquire_one_unavailable() {
+ let s = Semaphore::new(1);
+
+ // Acquire the first permit
+ assert_ok!(s.try_acquire(1));
+ assert_eq!(s.available_permits(), 0);
+
+ assert_err!(s.try_acquire(1));
+
+ s.release(1);
+
+ assert_eq!(s.available_permits(), 1);
+ assert_ok!(s.try_acquire(1));
+
+ s.release(1);
+ assert_eq!(s.available_permits(), 1);
+}
+
+#[test]
+fn try_acquire_many_unavailable() {
+ let s = Semaphore::new(5);
+
+ // Acquire the first permit
+ assert_ok!(s.try_acquire(1));
+ assert_eq!(s.available_permits(), 4);
+
+ assert_err!(s.try_acquire(5));
+
+ s.release(1);
+ assert_eq!(s.available_permits(), 5);
+
+ assert_ok!(s.try_acquire(5));
+
+ s.release(1);
+ assert_eq!(s.available_permits(), 1);
+
+ s.release(1);
+ assert_eq!(s.available_permits(), 2);
+}
+
+#[test]
+fn poll_acquire_one_zero_permits() {
+ let s = Semaphore::new(0);
+ assert_eq!(s.available_permits(), 0);
+
+ // Try to acquire the permit
+ let mut acquire = task::spawn(s.acquire(1));
+ assert_pending!(acquire.poll());
+
+ s.release(1);
+
+ assert!(acquire.is_woken());
+ assert_ready_ok!(acquire.poll());
+}
+
+#[test]
+#[should_panic]
+fn validates_max_permits() {
+ use std::usize;
+ Semaphore::new((usize::MAX >> 2) + 1);
+}
+
+#[test]
+fn close_semaphore_prevents_acquire() {
+ let s = Semaphore::new(5);
+ s.close();
+
+ assert_eq!(5, s.available_permits());
+
+ assert_ready_err!(task::spawn(s.acquire(1)).poll());
+ assert_eq!(5, s.available_permits());
+
+ assert_ready_err!(task::spawn(s.acquire(1)).poll());
+ assert_eq!(5, s.available_permits());
+}
+
+#[test]
+fn close_semaphore_notifies_permit1() {
+ let s = Semaphore::new(0);
+ let mut acquire = task::spawn(s.acquire(1));
+
+ assert_pending!(acquire.poll());
+
+ s.close();
+
+ assert!(acquire.is_woken());
+ assert_ready_err!(acquire.poll());
+}
+
+#[test]
+fn close_semaphore_notifies_permit2() {
+ let s = Semaphore::new(2);
+
+ // Acquire a couple of permits
+ assert_ready_ok!(task::spawn(s.acquire(1)).poll());
+ assert_ready_ok!(task::spawn(s.acquire(1)).poll());
+
+ let mut acquire3 = task::spawn(s.acquire(1));
+ let mut acquire4 = task::spawn(s.acquire(1));
+ assert_pending!(acquire3.poll());
+ assert_pending!(acquire4.poll());
+
+ s.close();
+
+ assert!(acquire3.is_woken());
+ assert!(acquire4.is_woken());
+
+ assert_ready_err!(acquire3.poll());
+ assert_ready_err!(acquire4.poll());
+
+ assert_eq!(0, s.available_permits());
+
+ s.release(1);
+
+ assert_eq!(1, s.available_permits());
+
+ assert_ready_err!(task::spawn(s.acquire(1)).poll());
+
+ s.release(1);
+
+ assert_eq!(2, s.available_permits());
+}
+
+#[test]
+fn cancel_acquire_releases_permits() {
+ let s = Semaphore::new(10);
+ let _permit1 = s.try_acquire(4).expect("uncontended try_acquire succeeds");
+ assert_eq!(6, s.available_permits());
+
+ let mut acquire = task::spawn(s.acquire(8));
+ assert_pending!(acquire.poll());
+
+ assert_eq!(0, s.available_permits());
+ drop(acquire);
+
+ assert_eq!(6, s.available_permits());
+ assert_ok!(s.try_acquire(6));
+}
diff --git a/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs b/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs
new file mode 100644
index 0000000000..bfb075780b
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/tests/semaphore_ll.rs
@@ -0,0 +1,470 @@
+use crate::sync::semaphore_ll::{Permit, Semaphore};
+use tokio_test::*;
+
+#[test]
+fn poll_acquire_one_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = task::spawn(Permit::new());
+ assert!(!permit.is_acquired());
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 99);
+ assert!(permit.is_acquired());
+
+ // Polling again on the same waiter does not claim a new permit
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 99);
+ assert!(permit.is_acquired());
+}
+
+#[test]
+fn poll_acquire_many_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = task::spawn(Permit::new());
+ assert!(!permit.is_acquired());
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s)));
+ assert_eq!(s.available_permits(), 95);
+ assert!(permit.is_acquired());
+
+ // Polling again on the same waiter does not claim a new permit
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 95);
+ assert!(permit.is_acquired());
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 5, &s)));
+ assert_eq!(s.available_permits(), 95);
+ assert!(permit.is_acquired());
+
+ // Polling for a larger number of permits acquires more
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 8, &s)));
+ assert_eq!(s.available_permits(), 92);
+ assert!(permit.is_acquired());
+}
+
+#[test]
+fn try_acquire_one_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = Permit::new();
+ assert!(!permit.is_acquired());
+
+ assert_ok!(permit.try_acquire(1, &s));
+ assert_eq!(s.available_permits(), 99);
+ assert!(permit.is_acquired());
+
+ // Polling again on the same waiter does not claim a new permit
+ assert_ok!(permit.try_acquire(1, &s));
+ assert_eq!(s.available_permits(), 99);
+ assert!(permit.is_acquired());
+}
+
+#[test]
+fn try_acquire_many_available() {
+ let s = Semaphore::new(100);
+ assert_eq!(s.available_permits(), 100);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = Permit::new();
+ assert!(!permit.is_acquired());
+
+ assert_ok!(permit.try_acquire(5, &s));
+ assert_eq!(s.available_permits(), 95);
+ assert!(permit.is_acquired());
+
+ // Polling again on the same waiter does not claim a new permit
+ assert_ok!(permit.try_acquire(5, &s));
+ assert_eq!(s.available_permits(), 95);
+ assert!(permit.is_acquired());
+}
+
+#[test]
+fn poll_acquire_one_unavailable() {
+ let s = Semaphore::new(1);
+
+ let mut permit_1 = task::spawn(Permit::new());
+ let mut permit_2 = task::spawn(Permit::new());
+
+ // Acquire the first permit
+ assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 0);
+
+ permit_2.enter(|cx, mut p| {
+ // Try to acquire the second permit
+ assert_pending!(p.poll_acquire(cx, 1, &s));
+ });
+
+ permit_1.release(1, &s);
+
+ assert_eq!(s.available_permits(), 0);
+ assert!(permit_2.is_woken());
+ assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ permit_2.release(1, &s);
+ assert_eq!(s.available_permits(), 1);
+}
+
+#[test]
+fn forget_acquired() {
+ let s = Semaphore::new(1);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = task::spawn(Permit::new());
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ assert_eq!(s.available_permits(), 0);
+
+ permit.forget(1);
+ assert_eq!(s.available_permits(), 0);
+}
+
+#[test]
+fn forget_waiting() {
+ let s = Semaphore::new(0);
+
+ // Polling for a permit succeeds immediately
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ assert_eq!(s.available_permits(), 0);
+
+ permit.forget(1);
+
+ s.add_permits(1);
+
+ assert!(!permit.is_woken());
+ assert_eq!(s.available_permits(), 1);
+}
+
+#[test]
+fn poll_acquire_many_unavailable() {
+ let s = Semaphore::new(5);
+
+ let mut permit_1 = task::spawn(Permit::new());
+ let mut permit_2 = task::spawn(Permit::new());
+ let mut permit_3 = task::spawn(Permit::new());
+
+ // Acquire the first permit
+ assert_ready_ok!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 4);
+
+ permit_2.enter(|cx, mut p| {
+ // Try to acquire the second permit
+ assert_pending!(p.poll_acquire(cx, 5, &s));
+ });
+
+ assert_eq!(s.available_permits(), 0);
+
+ permit_3.enter(|cx, mut p| {
+ // Try to acquire the third permit
+ assert_pending!(p.poll_acquire(cx, 3, &s));
+ });
+
+ permit_1.release(1, &s);
+
+ assert_eq!(s.available_permits(), 0);
+ assert!(permit_2.is_woken());
+ assert_ready_ok!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 5, &s)));
+
+ assert!(!permit_3.is_woken());
+ assert_eq!(s.available_permits(), 0);
+
+ permit_2.release(1, &s);
+ assert!(!permit_3.is_woken());
+ assert_eq!(s.available_permits(), 0);
+
+ permit_2.release(2, &s);
+ assert!(permit_3.is_woken());
+
+ assert_ready_ok!(permit_3.enter(|cx, mut p| p.poll_acquire(cx, 3, &s)));
+}
+
+#[test]
+fn try_acquire_one_unavailable() {
+ let s = Semaphore::new(1);
+
+ let mut permit_1 = Permit::new();
+ let mut permit_2 = Permit::new();
+
+ // Acquire the first permit
+ assert_ok!(permit_1.try_acquire(1, &s));
+ assert_eq!(s.available_permits(), 0);
+
+ assert_err!(permit_2.try_acquire(1, &s));
+
+ permit_1.release(1, &s);
+
+ assert_eq!(s.available_permits(), 1);
+ assert_ok!(permit_2.try_acquire(1, &s));
+
+ permit_2.release(1, &s);
+ assert_eq!(s.available_permits(), 1);
+}
+
+#[test]
+fn try_acquire_many_unavailable() {
+ let s = Semaphore::new(5);
+
+ let mut permit_1 = Permit::new();
+ let mut permit_2 = Permit::new();
+
+ // Acquire the first permit
+ assert_ok!(permit_1.try_acquire(1, &s));
+ assert_eq!(s.available_permits(), 4);
+
+ assert_err!(permit_2.try_acquire(5, &s));
+
+ permit_1.release(1, &s);
+ assert_eq!(s.available_permits(), 5);
+
+ assert_ok!(permit_2.try_acquire(5, &s));
+
+ permit_2.release(1, &s);
+ assert_eq!(s.available_permits(), 1);
+
+ permit_2.release(1, &s);
+ assert_eq!(s.available_permits(), 2);
+}
+
+#[test]
+fn poll_acquire_one_zero_permits() {
+ let s = Semaphore::new(0);
+ assert_eq!(s.available_permits(), 0);
+
+ let mut permit = task::spawn(Permit::new());
+
+ // Try to acquire the permit
+ permit.enter(|cx, mut p| {
+ assert_pending!(p.poll_acquire(cx, 1, &s));
+ });
+
+ s.add_permits(1);
+
+ assert!(permit.is_woken());
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+}
+
+#[test]
+#[should_panic]
+fn validates_max_permits() {
+ use std::usize;
+ Semaphore::new((usize::MAX >> 2) + 1);
+}
+
+#[test]
+fn close_semaphore_prevents_acquire() {
+ let s = Semaphore::new(5);
+ s.close();
+
+ assert_eq!(5, s.available_permits());
+
+ let mut permit_1 = task::spawn(Permit::new());
+ let mut permit_2 = task::spawn(Permit::new());
+
+ assert_ready_err!(permit_1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(5, s.available_permits());
+
+ assert_ready_err!(permit_2.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ assert_eq!(5, s.available_permits());
+}
+
+#[test]
+fn close_semaphore_notifies_permit1() {
+ let s = Semaphore::new(0);
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ s.close();
+
+ assert!(permit.is_woken());
+ assert_ready_err!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+}
+
+#[test]
+fn close_semaphore_notifies_permit2() {
+ let s = Semaphore::new(2);
+
+ let mut permit1 = task::spawn(Permit::new());
+ let mut permit2 = task::spawn(Permit::new());
+ let mut permit3 = task::spawn(Permit::new());
+ let mut permit4 = task::spawn(Permit::new());
+
+ // Acquire a couple of permits
+ assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ assert_pending!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_pending!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ s.close();
+
+ assert!(permit3.is_woken());
+ assert!(permit4.is_woken());
+
+ assert_ready_err!(permit3.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_ready_err!(permit4.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ assert_eq!(0, s.available_permits());
+
+ permit1.release(1, &s);
+
+ assert_eq!(1, s.available_permits());
+
+ assert_ready_err!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ permit2.release(1, &s);
+
+ assert_eq!(2, s.available_permits());
+}
+
+#[test]
+fn poll_acquire_additional_permits_while_waiting_before_assigned() {
+ let s = Semaphore::new(1);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s)));
+
+ s.add_permits(1);
+ assert!(!permit.is_woken());
+
+ s.add_permits(1);
+ assert!(permit.is_woken());
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s)));
+}
+
+#[test]
+fn try_acquire_additional_permits_while_waiting_before_assigned() {
+ let s = Semaphore::new(1);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+
+ assert_err!(permit.enter(|_, mut p| p.try_acquire(3, &s)));
+
+ s.add_permits(1);
+ assert!(permit.is_woken());
+
+ assert_ok!(permit.enter(|_, mut p| p.try_acquire(2, &s)));
+}
+
+#[test]
+fn poll_acquire_additional_permits_while_waiting_after_assigned_success() {
+ let s = Semaphore::new(1);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+
+ s.add_permits(2);
+
+ assert!(permit.is_woken());
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 3, &s)));
+}
+
+#[test]
+fn poll_acquire_additional_permits_while_waiting_after_assigned_requeue() {
+ let s = Semaphore::new(1);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+
+ s.add_permits(2);
+
+ assert!(permit.is_woken());
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s)));
+
+ s.add_permits(1);
+
+ assert!(permit.is_woken());
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 4, &s)));
+}
+
+#[test]
+fn poll_acquire_fewer_permits_while_waiting() {
+ let s = Semaphore::new(1);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ assert_eq!(s.available_permits(), 0);
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+ assert_eq!(s.available_permits(), 0);
+}
+
+#[test]
+fn poll_acquire_fewer_permits_after_assigned() {
+ let s = Semaphore::new(1);
+
+ let mut permit1 = task::spawn(Permit::new());
+ let mut permit2 = task::spawn(Permit::new());
+
+ assert_pending!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 5, &s)));
+ assert_eq!(s.available_permits(), 0);
+
+ assert_pending!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ s.add_permits(4);
+ assert!(permit1.is_woken());
+ assert!(!permit2.is_woken());
+
+ assert_ready_ok!(permit1.enter(|cx, mut p| p.poll_acquire(cx, 3, &s)));
+
+ assert!(permit2.is_woken());
+ assert_eq!(s.available_permits(), 1);
+
+ assert_ready_ok!(permit2.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+}
+
+#[test]
+fn forget_partial_1() {
+ let s = Semaphore::new(0);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ s.add_permits(1);
+
+ assert_eq!(0, s.available_permits());
+
+ permit.release(1, &s);
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 1, &s)));
+
+ assert_eq!(s.available_permits(), 0);
+}
+
+#[test]
+fn forget_partial_2() {
+ let s = Semaphore::new(0);
+
+ let mut permit = task::spawn(Permit::new());
+
+ assert_pending!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ s.add_permits(1);
+
+ assert_eq!(0, s.available_permits());
+
+ permit.release(1, &s);
+
+ s.add_permits(1);
+
+ assert_ready_ok!(permit.enter(|cx, mut p| p.poll_acquire(cx, 2, &s)));
+ assert_eq!(s.available_permits(), 0);
+}
diff --git a/third_party/rust/tokio/src/sync/watch.rs b/third_party/rust/tokio/src/sync/watch.rs
new file mode 100644
index 0000000000..ba609a8c6d
--- /dev/null
+++ b/third_party/rust/tokio/src/sync/watch.rs
@@ -0,0 +1,432 @@
+//! A single-producer, multi-consumer channel that only retains the *last* sent
+//! value.
+//!
+//! This channel is useful for watching for changes to a value from multiple
+//! points in the code base, for example, changes to configuration values.
+//!
+//! # Usage
+//!
+//! [`channel`] returns a [`Sender`] / [`Receiver`] pair. These are
+//! the producer and sender halves of the channel. The channel is
+//! created with an initial value. [`Receiver::recv`] will always
+//! be ready upon creation and will yield either this initial value or
+//! the latest value that has been sent by `Sender`.
+//!
+//! Calls to [`Receiver::recv`] will always yield the latest value.
+//!
+//! # Examples
+//!
+//! ```
+//! use tokio::sync::watch;
+//!
+//! # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+//! let (tx, mut rx) = watch::channel("hello");
+//!
+//! tokio::spawn(async move {
+//! while let Some(value) = rx.recv().await {
+//! println!("received = {:?}", value);
+//! }
+//! });
+//!
+//! tx.broadcast("world")?;
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! # Closing
+//!
+//! [`Sender::closed`] allows the producer to detect when all [`Receiver`]
+//! handles have been dropped. This indicates that there is no further interest
+//! in the values being produced and work can be stopped.
+//!
+//! # Thread safety
+//!
+//! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other
+//! threads and can be used in a concurrent environment. Clones of [`Receiver`]
+//! handles may be moved to separate threads and also used concurrently.
+//!
+//! [`Sender`]: crate::sync::watch::Sender
+//! [`Receiver`]: crate::sync::watch::Receiver
+//! [`Receiver::recv`]: crate::sync::watch::Receiver::recv
+//! [`channel`]: crate::sync::watch::channel
+//! [`Sender::closed`]: crate::sync::watch::Sender::closed
+
+use crate::future::poll_fn;
+use crate::sync::task::AtomicWaker;
+
+use fnv::FnvHashSet;
+use std::ops;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::{Relaxed, SeqCst};
+use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, Weak};
+use std::task::Poll::{Pending, Ready};
+use std::task::{Context, Poll};
+
+/// Receives values from the associated [`Sender`](struct@Sender).
+///
+/// Instances are created by the [`channel`](fn@channel) function.
+#[derive(Debug)]
+pub struct Receiver<T> {
+ /// Pointer to the shared state
+ shared: Arc<Shared<T>>,
+
+ /// Pointer to the watcher's internal state
+ inner: Watcher,
+}
+
+/// Sends values to the associated [`Receiver`](struct@Receiver).
+///
+/// Instances are created by the [`channel`](fn@channel) function.
+#[derive(Debug)]
+pub struct Sender<T> {
+ shared: Weak<Shared<T>>,
+}
+
+/// Returns a reference to the inner value
+///
+/// Outstanding borrows hold a read lock on the inner value. This means that
+/// long lived borrows could cause the produce half to block. It is recommended
+/// to keep the borrow as short lived as possible.
+#[derive(Debug)]
+pub struct Ref<'a, T> {
+ inner: RwLockReadGuard<'a, T>,
+}
+
+pub mod error {
+ //! Watch error types
+
+ use std::fmt;
+
+ /// Error produced when sending a value fails.
+ #[derive(Debug)]
+ pub struct SendError<T> {
+ pub(crate) inner: T,
+ }
+
+ // ===== impl SendError =====
+
+ impl<T: fmt::Debug> fmt::Display for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "channel closed")
+ }
+ }
+
+ impl<T: fmt::Debug> std::error::Error for SendError<T> {}
+}
+
+#[derive(Debug)]
+struct Shared<T> {
+ /// The most recent value
+ value: RwLock<T>,
+
+ /// The current version
+ ///
+ /// The lowest bit represents a "closed" state. The rest of the bits
+ /// represent the current version.
+ version: AtomicUsize,
+
+ /// All watchers
+ watchers: Mutex<Watchers>,
+
+ /// Task to notify when all watchers drop
+ cancel: AtomicWaker,
+}
+
+type Watchers = FnvHashSet<Watcher>;
+
+/// The watcher's ID is based on the Arc's pointer.
+#[derive(Clone, Debug)]
+struct Watcher(Arc<WatchInner>);
+
+#[derive(Debug)]
+struct WatchInner {
+ /// Last observed version
+ version: AtomicUsize,
+ waker: AtomicWaker,
+}
+
+const CLOSED: usize = 1;
+
+/// Creates a new watch channel, returning the "send" and "receive" handles.
+///
+/// All values sent by [`Sender`] will become visible to the [`Receiver`] handles.
+/// Only the last value sent is made available to the [`Receiver`] half. All
+/// intermediate values are dropped.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::watch;
+///
+/// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
+/// let (tx, mut rx) = watch::channel("hello");
+///
+/// tokio::spawn(async move {
+/// while let Some(value) = rx.recv().await {
+/// println!("received = {:?}", value);
+/// }
+/// });
+///
+/// tx.broadcast("world")?;
+/// # Ok(())
+/// # }
+/// ```
+///
+/// [`Sender`]: struct@Sender
+/// [`Receiver`]: struct@Receiver
+pub fn channel<T: Clone>(init: T) -> (Sender<T>, Receiver<T>) {
+ const VERSION_0: usize = 0;
+ const VERSION_1: usize = 2;
+
+ // We don't start knowing VERSION_1
+ let inner = Watcher::new_version(VERSION_0);
+
+ // Insert the watcher
+ let mut watchers = FnvHashSet::with_capacity_and_hasher(0, Default::default());
+ watchers.insert(inner.clone());
+
+ let shared = Arc::new(Shared {
+ value: RwLock::new(init),
+ version: AtomicUsize::new(VERSION_1),
+ watchers: Mutex::new(watchers),
+ cancel: AtomicWaker::new(),
+ });
+
+ let tx = Sender {
+ shared: Arc::downgrade(&shared),
+ };
+
+ let rx = Receiver { shared, inner };
+
+ (tx, rx)
+}
+
+impl<T> Receiver<T> {
+ /// Returns a reference to the most recently sent value
+ ///
+ /// Outstanding borrows hold a read lock. This means that long lived borrows
+ /// could cause the send half to block. It is recommended to keep the borrow
+ /// as short lived as possible.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::watch;
+ ///
+ /// let (_, rx) = watch::channel("hello");
+ /// assert_eq!(*rx.borrow(), "hello");
+ /// ```
+ pub fn borrow(&self) -> Ref<'_, T> {
+ let inner = self.shared.value.read().unwrap();
+ Ref { inner }
+ }
+
+ // TODO: document
+ #[doc(hidden)]
+ pub fn poll_recv_ref<'a>(&'a mut self, cx: &mut Context<'_>) -> Poll<Option<Ref<'a, T>>> {
+ // Make sure the task is up to date
+ self.inner.waker.register_by_ref(cx.waker());
+
+ let state = self.shared.version.load(SeqCst);
+ let version = state & !CLOSED;
+
+ if self.inner.version.swap(version, Relaxed) != version {
+ let inner = self.shared.value.read().unwrap();
+
+ return Ready(Some(Ref { inner }));
+ }
+
+ if CLOSED == state & CLOSED {
+ // The `Store` handle has been dropped.
+ return Ready(None);
+ }
+
+ Pending
+ }
+}
+
+impl<T: Clone> Receiver<T> {
+ /// Attempts to clone the latest value sent via the channel.
+ ///
+ /// If this is the first time the function is called on a `Receiver`
+ /// instance, then the function completes immediately with the **current**
+ /// value held by the channel. On the next call, the function waits until
+ /// a new value is sent in the channel.
+ ///
+ /// `None` is returned if the `Sender` half is dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::watch;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = watch::channel("hello");
+ ///
+ /// let v = rx.recv().await.unwrap();
+ /// assert_eq!(v, "hello");
+ ///
+ /// tokio::spawn(async move {
+ /// tx.broadcast("goodbye").unwrap();
+ /// });
+ ///
+ /// // Waits for the new task to spawn and send the value.
+ /// let v = rx.recv().await.unwrap();
+ /// assert_eq!(v, "goodbye");
+ ///
+ /// let v = rx.recv().await;
+ /// assert!(v.is_none());
+ /// }
+ /// ```
+ pub async fn recv(&mut self) -> Option<T> {
+ poll_fn(|cx| {
+ let v_ref = ready!(self.poll_recv_ref(cx));
+ Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone()))
+ })
+ .await
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<T: Clone> crate::stream::Stream for Receiver<T> {
+ type Item = T;
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ let v_ref = ready!(self.poll_recv_ref(cx));
+
+ Poll::Ready(v_ref.map(|v_ref| (*v_ref).clone()))
+ }
+}
+
+impl<T> Clone for Receiver<T> {
+ fn clone(&self) -> Self {
+ let ver = self.inner.version.load(Relaxed);
+ let inner = Watcher::new_version(ver);
+ let shared = self.shared.clone();
+
+ shared.watchers.lock().unwrap().insert(inner.clone());
+
+ Receiver { shared, inner }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ self.shared.watchers.lock().unwrap().remove(&self.inner);
+ }
+}
+
+impl<T> Sender<T> {
+ /// Broadcasts a new value via the channel, notifying all receivers.
+ pub fn broadcast(&self, value: T) -> Result<(), error::SendError<T>> {
+ let shared = match self.shared.upgrade() {
+ Some(shared) => shared,
+ // All `Watch` handles have been canceled
+ None => return Err(error::SendError { inner: value }),
+ };
+
+ // Replace the value
+ {
+ let mut lock = shared.value.write().unwrap();
+ *lock = value;
+ }
+
+ // Update the version. 2 is used so that the CLOSED bit is not set.
+ shared.version.fetch_add(2, SeqCst);
+
+ // Notify all watchers
+ notify_all(&*shared);
+
+ // Return the old value
+ Ok(())
+ }
+
+ /// Completes when all receivers have dropped.
+ ///
+ /// This allows the producer to get notified when interest in the produced
+ /// values is canceled and immediately stop doing work.
+ pub async fn closed(&mut self) {
+ poll_fn(|cx| self.poll_close(cx)).await
+ }
+
+ fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ match self.shared.upgrade() {
+ Some(shared) => {
+ shared.cancel.register_by_ref(cx.waker());
+ Pending
+ }
+ None => Ready(()),
+ }
+ }
+}
+
+/// Notifies all watchers of a change
+fn notify_all<T>(shared: &Shared<T>) {
+ let watchers = shared.watchers.lock().unwrap();
+
+ for watcher in watchers.iter() {
+ // Notify the task
+ watcher.waker.wake();
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ if let Some(shared) = self.shared.upgrade() {
+ shared.version.fetch_or(CLOSED, SeqCst);
+ notify_all(&*shared);
+ }
+ }
+}
+
+// ===== impl Ref =====
+
+impl<T> ops::Deref for Ref<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ self.inner.deref()
+ }
+}
+
+// ===== impl Shared =====
+
+impl<T> Drop for Shared<T> {
+ fn drop(&mut self) {
+ self.cancel.wake();
+ }
+}
+
+// ===== impl Watcher =====
+
+impl Watcher {
+ fn new_version(version: usize) -> Self {
+ Watcher(Arc::new(WatchInner {
+ version: AtomicUsize::new(version),
+ waker: AtomicWaker::new(),
+ }))
+ }
+}
+
+impl std::cmp::PartialEq for Watcher {
+ fn eq(&self, other: &Watcher) -> bool {
+ Arc::ptr_eq(&self.0, &other.0)
+ }
+}
+
+impl std::cmp::Eq for Watcher {}
+
+impl std::hash::Hash for Watcher {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ (&*self.0 as *const WatchInner).hash(state)
+ }
+}
+
+impl std::ops::Deref for Watcher {
+ type Target = WatchInner;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/task/blocking.rs b/third_party/rust/tokio/src/task/blocking.rs
new file mode 100644
index 0000000000..0069b10ada
--- /dev/null
+++ b/third_party/rust/tokio/src/task/blocking.rs
@@ -0,0 +1,71 @@
+use crate::task::JoinHandle;
+
+cfg_rt_threaded! {
+ /// Runs the provided blocking function without blocking the executor.
+ ///
+ /// In general, issuing a blocking call or performing a lot of compute in a
+ /// future without yielding is not okay, as it may prevent the executor from
+ /// driving other futures forward. If you run a closure through this method,
+ /// the current executor thread will relegate all its executor duties to another
+ /// (possibly new) thread, and only then poll the task. Note that this requires
+ /// additional synchronization.
+ ///
+ /// # Note
+ ///
+ /// This function can only be called from a spawned task when working with
+ /// the [threaded scheduler](https://docs.rs/tokio/0.2.10/tokio/runtime/index.html#threaded-scheduler).
+ /// Consider using [tokio::task::spawn_blocking](https://docs.rs/tokio/0.2.10/tokio/task/fn.spawn_blocking.html).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::task;
+ ///
+ /// # async fn docs() {
+ /// task::block_in_place(move || {
+ /// // do some compute-heavy work or call synchronous code
+ /// });
+ /// # }
+ /// ```
+ #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))]
+ pub fn block_in_place<F, R>(f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ use crate::runtime::{enter, thread_pool};
+
+ enter::exit(|| thread_pool::block_in_place(f))
+ }
+}
+
+cfg_blocking! {
+ /// Runs the provided closure on a thread where blocking is acceptable.
+ ///
+ /// In general, issuing a blocking call or performing a lot of compute in a future without
+ /// yielding is not okay, as it may prevent the executor from driving other futures forward.
+ /// A closure that is run through this method will instead be run on a dedicated thread pool for
+ /// such blocking tasks without holding up the main futures executor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::task;
+ ///
+ /// # async fn docs() -> Result<(), Box<dyn std::error::Error>>{
+ /// let res = task::spawn_blocking(move || {
+ /// // do some compute-heavy work or call synchronous code
+ /// "done computing"
+ /// }).await?;
+ ///
+ /// assert_eq!(res, "done computing");
+ /// # Ok(())
+ /// # }
+ /// ```
+ pub fn spawn_blocking<F, R>(f: F) -> JoinHandle<R>
+ where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static,
+ {
+ crate::runtime::spawn_blocking(f)
+ }
+}
diff --git a/third_party/rust/tokio/src/task/local.rs b/third_party/rust/tokio/src/task/local.rs
new file mode 100644
index 0000000000..fce467079f
--- /dev/null
+++ b/third_party/rust/tokio/src/task/local.rs
@@ -0,0 +1,584 @@
+//! Runs `!Send` futures on the current thread.
+use crate::runtime::task::{self, JoinHandle, Task};
+use crate::sync::AtomicWaker;
+use crate::util::linked_list::LinkedList;
+
+use std::cell::{Cell, RefCell};
+use std::collections::VecDeque;
+use std::fmt;
+use std::future::Future;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::Poll;
+
+use pin_project_lite::pin_project;
+
+cfg_rt_util! {
+ /// A set of tasks which are executed on the same thread.
+ ///
+ /// In some cases, it is necessary to run one or more futures that do not
+ /// implement [`Send`] and thus are unsafe to send between threads. In these
+ /// cases, a [local task set] may be used to schedule one or more `!Send`
+ /// futures to run together on the same thread.
+ ///
+ /// For example, the following code will not compile:
+ ///
+ /// ```rust,compile_fail
+ /// use std::rc::Rc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// // `Rc` does not implement `Send`, and thus may not be sent between
+ /// // threads safely.
+ /// let unsend_data = Rc::new("my unsend data...");
+ ///
+ /// let unsend_data = unsend_data.clone();
+ /// // Because the `async` block here moves `unsend_data`, the future is `!Send`.
+ /// // Since `tokio::spawn` requires the spawned future to implement `Send`, this
+ /// // will not compile.
+ /// tokio::spawn(async move {
+ /// println!("{}", unsend_data);
+ /// // ...
+ /// }).await.unwrap();
+ /// }
+ /// ```
+ /// In order to spawn `!Send` futures, we can use a local task set to
+ /// schedule them on the thread calling [`Runtime::block_on`]. When running
+ /// inside of the local task set, we can use [`task::spawn_local`], which can
+ /// spawn `!Send` futures. For example:
+ ///
+ /// ```rust
+ /// use std::rc::Rc;
+ /// use tokio::task;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let unsend_data = Rc::new("my unsend data...");
+ ///
+ /// // Construct a local task set that can run `!Send` futures.
+ /// let local = task::LocalSet::new();
+ ///
+ /// // Run the local task set.
+ /// local.run_until(async move {
+ /// let unsend_data = unsend_data.clone();
+ /// // `spawn_local` ensures that the future is spawned on the local
+ /// // task set.
+ /// task::spawn_local(async move {
+ /// println!("{}", unsend_data);
+ /// // ...
+ /// }).await.unwrap();
+ /// }).await;
+ /// }
+ /// ```
+ ///
+ /// ## Awaiting a `LocalSet`
+ ///
+ /// Additionally, a `LocalSet` itself implements `Future`, completing when
+ /// *all* tasks spawned on the `LocalSet` complete. This can be used to run
+ /// several futures on a `LocalSet` and drive the whole set until they
+ /// complete. For example,
+ ///
+ /// ```rust
+ /// use tokio::{task, time};
+ /// use std::rc::Rc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let unsend_data = Rc::new("world");
+ /// let local = task::LocalSet::new();
+ ///
+ /// let unsend_data2 = unsend_data.clone();
+ /// local.spawn_local(async move {
+ /// // ...
+ /// println!("hello {}", unsend_data2)
+ /// });
+ ///
+ /// local.spawn_local(async move {
+ /// time::delay_for(time::Duration::from_millis(100)).await;
+ /// println!("goodbye {}", unsend_data)
+ /// });
+ ///
+ /// // ...
+ ///
+ /// local.await;
+ /// }
+ /// ```
+ ///
+ /// [`Send`]: https://doc.rust-lang.org/std/marker/trait.Send.html
+ /// [local task set]: struct@LocalSet
+ /// [`Runtime::block_on`]: ../struct.Runtime.html#method.block_on
+ /// [`task::spawn_local`]: fn@spawn_local
+ pub struct LocalSet {
+ /// Current scheduler tick
+ tick: Cell<u8>,
+
+ /// State available from thread-local
+ context: Context,
+
+ /// This type should not be Send.
+ _not_send: PhantomData<*const ()>,
+ }
+}
+
+/// State available from the thread-local
+struct Context {
+ /// Owned task set and local run queue
+ tasks: RefCell<Tasks>,
+
+ /// State shared between threads.
+ shared: Arc<Shared>,
+}
+
+struct Tasks {
+ /// Collection of all active tasks spawned onto this executor.
+ owned: LinkedList<Task<Arc<Shared>>>,
+
+ /// Local run queue sender and receiver.
+ queue: VecDeque<task::Notified<Arc<Shared>>>,
+}
+
+/// LocalSet state shared between threads.
+struct Shared {
+ /// Remote run queue sender
+ queue: Mutex<VecDeque<task::Notified<Arc<Shared>>>>,
+
+ /// Wake the `LocalSet` task
+ waker: AtomicWaker,
+}
+
+pin_project! {
+ #[derive(Debug)]
+ struct RunUntil<'a, F> {
+ local_set: &'a LocalSet,
+ #[pin]
+ future: F,
+ }
+}
+
+scoped_thread_local!(static CURRENT: Context);
+
+cfg_rt_util! {
+ /// Spawns a `!Send` future on the local task set.
+ ///
+ /// The spawned future will be run on the same thread that called `spawn_local.`
+ /// This may only be called from the context of a local task set.
+ ///
+ /// # Panics
+ ///
+ /// - This function panics if called outside of a local task set.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::rc::Rc;
+ /// use tokio::task;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let unsend_data = Rc::new("my unsend data...");
+ ///
+ /// let local = task::LocalSet::new();
+ ///
+ /// // Run the local task set.
+ /// local.run_until(async move {
+ /// let unsend_data = unsend_data.clone();
+ /// task::spawn_local(async move {
+ /// println!("{}", unsend_data);
+ /// // ...
+ /// }).await.unwrap();
+ /// }).await;
+ /// }
+ /// ```
+ pub fn spawn_local<F>(future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static,
+ {
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx
+ .expect("`spawn_local` called from outside of a `task::LocalSet`");
+
+ // Safety: Tasks are only polled and dropped from the thread that
+ // spawns them.
+ let (task, handle) = unsafe { task::joinable_local(future) };
+ cx.tasks.borrow_mut().queue.push_back(task);
+ handle
+ })
+ }
+}
+
+/// Initial queue capacity
+const INITIAL_CAPACITY: usize = 64;
+
+/// Max number of tasks to poll per tick.
+const MAX_TASKS_PER_TICK: usize = 61;
+
+/// How often it check the remote queue first
+const REMOTE_FIRST_INTERVAL: u8 = 31;
+
+impl LocalSet {
+ /// Returns a new local task set.
+ pub fn new() -> LocalSet {
+ LocalSet {
+ tick: Cell::new(0),
+ context: Context {
+ tasks: RefCell::new(Tasks {
+ owned: LinkedList::new(),
+ queue: VecDeque::with_capacity(INITIAL_CAPACITY),
+ }),
+ shared: Arc::new(Shared {
+ queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)),
+ waker: AtomicWaker::new(),
+ }),
+ },
+ _not_send: PhantomData,
+ }
+ }
+
+ /// Spawns a `!Send` task onto the local task set.
+ ///
+ /// This task is guaranteed to be run on the current thread.
+ ///
+ /// Unlike the free function [`spawn_local`], this method may be used to
+ /// spawn local tasks when the task set is _not_ running. For example:
+ /// ```rust
+ /// use tokio::task;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let local = task::LocalSet::new();
+ ///
+ /// // Spawn a future on the local set. This future will be run when
+ /// // we call `run_until` to drive the task set.
+ /// local.spawn_local(async {
+ /// // ...
+ /// });
+ ///
+ /// // Run the local task set.
+ /// local.run_until(async move {
+ /// // ...
+ /// }).await;
+ ///
+ /// // When `run` finishes, we can spawn _more_ futures, which will
+ /// // run in subsequent calls to `run_until`.
+ /// local.spawn_local(async {
+ /// // ...
+ /// });
+ ///
+ /// local.run_until(async move {
+ /// // ...
+ /// }).await;
+ /// }
+ /// ```
+ /// [`spawn_local`]: fn@spawn_local
+ pub fn spawn_local<F>(&self, future: F) -> JoinHandle<F::Output>
+ where
+ F: Future + 'static,
+ F::Output: 'static,
+ {
+ let (task, handle) = unsafe { task::joinable_local(future) };
+ self.context.tasks.borrow_mut().queue.push_back(task);
+ handle
+ }
+
+ /// Runs a future to completion on the provided runtime, driving any local
+ /// futures spawned on this task set on the current thread.
+ ///
+ /// This runs the given future on the runtime, blocking until it is
+ /// complete, and yielding its resolved result. Any tasks or timers which
+ /// the future spawns internally will be executed on the runtime. The future
+ /// may also call [`spawn_local`] to spawn_local additional local futures on the
+ /// current thread.
+ ///
+ /// This method should not be called from an asynchronous context.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the executor is at capacity, if the provided
+ /// future panics, or if called within an asynchronous execution context.
+ ///
+ /// # Notes
+ ///
+ /// Since this function internally calls [`Runtime::block_on`], and drives
+ /// futures in the local task set inside that call to `block_on`, the local
+ /// futures may not use [in-place blocking]. If a blocking call needs to be
+ /// issued from a local task, the [`spawn_blocking`] API may be used instead.
+ ///
+ /// For example, this will panic:
+ /// ```should_panic
+ /// use tokio::runtime::Runtime;
+ /// use tokio::task;
+ ///
+ /// let mut rt = Runtime::new().unwrap();
+ /// let local = task::LocalSet::new();
+ /// local.block_on(&mut rt, async {
+ /// let join = task::spawn_local(async {
+ /// let blocking_result = task::block_in_place(|| {
+ /// // ...
+ /// });
+ /// // ...
+ /// });
+ /// join.await.unwrap();
+ /// })
+ /// ```
+ /// This, however, will not panic:
+ /// ```
+ /// use tokio::runtime::Runtime;
+ /// use tokio::task;
+ ///
+ /// let mut rt = Runtime::new().unwrap();
+ /// let local = task::LocalSet::new();
+ /// local.block_on(&mut rt, async {
+ /// let join = task::spawn_local(async {
+ /// let blocking_result = task::spawn_blocking(|| {
+ /// // ...
+ /// }).await;
+ /// // ...
+ /// });
+ /// join.await.unwrap();
+ /// })
+ /// ```
+ ///
+ /// [`spawn_local`]: fn@spawn_local
+ /// [`Runtime::block_on`]: ../struct.Runtime.html#method.block_on
+ /// [in-place blocking]: ../blocking/fn.in_place.html
+ /// [`spawn_blocking`]: ../blocking/fn.spawn_blocking.html
+ pub fn block_on<F>(&self, rt: &mut crate::runtime::Runtime, future: F) -> F::Output
+ where
+ F: Future,
+ {
+ rt.block_on(self.run_until(future))
+ }
+
+ /// Run a future to completion on the local set, returning its output.
+ ///
+ /// This returns a future that runs the given future with a local set,
+ /// allowing it to call [`spawn_local`] to spawn additional `!Send` futures.
+ /// Any local futures spawned on the local set will be driven in the
+ /// background until the future passed to `run_until` completes. When the future
+ /// passed to `run` finishes, any local futures which have not completed
+ /// will remain on the local set, and will be driven on subsequent calls to
+ /// `run_until` or when [awaiting the local set] itself.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use tokio::task;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// task::LocalSet::new().run_until(async {
+ /// task::spawn_local(async move {
+ /// // ...
+ /// }).await.unwrap();
+ /// // ...
+ /// }).await;
+ /// }
+ /// ```
+ ///
+ /// [`spawn_local`]: fn@spawn_local
+ /// [awaiting the local set]: #awaiting-a-localset
+ pub async fn run_until<F>(&self, future: F) -> F::Output
+ where
+ F: Future,
+ {
+ let run_until = RunUntil {
+ future,
+ local_set: self,
+ };
+ run_until.await
+ }
+
+ /// Tick the scheduler, returning whether the local future needs to be
+ /// notified again.
+ fn tick(&self) -> bool {
+ for _ in 0..MAX_TASKS_PER_TICK {
+ match self.next_task() {
+ // Run the task
+ //
+ // Safety: As spawned tasks are `!Send`, `run_unchecked` must be
+ // used. We are responsible for maintaining the invariant that
+ // `run_unchecked` is only called on threads that spawned the
+ // task initially. Because `LocalSet` itself is `!Send`, and
+ // `spawn_local` spawns into the `LocalSet` on the current
+ // thread, the invariant is maintained.
+ Some(task) => crate::coop::budget(|| task.run()),
+ // We have fully drained the queue of notified tasks, so the
+ // local future doesn't need to be notified again — it can wait
+ // until something else wakes a task in the local set.
+ None => return false,
+ }
+ }
+
+ true
+ }
+
+ fn next_task(&self) -> Option<task::Notified<Arc<Shared>>> {
+ let tick = self.tick.get();
+ self.tick.set(tick.wrapping_add(1));
+
+ if tick % REMOTE_FIRST_INTERVAL == 0 {
+ self.context
+ .shared
+ .queue
+ .lock()
+ .unwrap()
+ .pop_front()
+ .or_else(|| self.context.tasks.borrow_mut().queue.pop_front())
+ } else {
+ self.context
+ .tasks
+ .borrow_mut()
+ .queue
+ .pop_front()
+ .or_else(|| self.context.shared.queue.lock().unwrap().pop_front())
+ }
+ }
+
+ fn with<T>(&self, f: impl FnOnce() -> T) -> T {
+ CURRENT.set(&self.context, f)
+ }
+}
+
+impl fmt::Debug for LocalSet {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("LocalSet").finish()
+ }
+}
+
+impl Future for LocalSet {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
+ // Register the waker before starting to work
+ self.context.shared.waker.register_by_ref(cx.waker());
+
+ if self.with(|| self.tick()) {
+ // If `tick` returns true, we need to notify the local future again:
+ // there are still tasks remaining in the run queue.
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ } else if self.context.tasks.borrow().owned.is_empty() {
+ // If the scheduler has no remaining futures, we're done!
+ Poll::Ready(())
+ } else {
+ // There are still futures in the local set, but we've polled all the
+ // futures in the run queue. Therefore, we can just return Pending
+ // since the remaining futures will be woken from somewhere else.
+ Poll::Pending
+ }
+ }
+}
+
+impl Default for LocalSet {
+ fn default() -> LocalSet {
+ LocalSet::new()
+ }
+}
+
+impl Drop for LocalSet {
+ fn drop(&mut self) {
+ self.with(|| {
+ // Loop required here to ensure borrow is dropped between iterations
+ #[allow(clippy::while_let_loop)]
+ loop {
+ let task = match self.context.tasks.borrow_mut().owned.pop_back() {
+ Some(task) => task,
+ None => break,
+ };
+
+ // Safety: same as `run_unchecked`.
+ task.shutdown();
+ }
+
+ for task in self.context.tasks.borrow_mut().queue.drain(..) {
+ task.shutdown();
+ }
+
+ for task in self.context.shared.queue.lock().unwrap().drain(..) {
+ task.shutdown();
+ }
+
+ assert!(self.context.tasks.borrow().owned.is_empty());
+ });
+ }
+}
+
+// === impl LocalFuture ===
+
+impl<T: Future> Future for RunUntil<'_, T> {
+ type Output = T::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
+ let me = self.project();
+
+ me.local_set.with(|| {
+ me.local_set
+ .context
+ .shared
+ .waker
+ .register_by_ref(cx.waker());
+
+ if let Poll::Ready(output) = me.future.poll(cx) {
+ return Poll::Ready(output);
+ }
+
+ if me.local_set.tick() {
+ // If `tick` returns `true`, we need to notify the local future again:
+ // there are still tasks remaining in the run queue.
+ cx.waker().wake_by_ref();
+ }
+
+ Poll::Pending
+ })
+ }
+}
+
+impl Shared {
+ /// Schedule the provided task on the scheduler.
+ fn schedule(&self, task: task::Notified<Arc<Self>>) {
+ CURRENT.with(|maybe_cx| match maybe_cx {
+ Some(cx) if cx.shared.ptr_eq(self) => {
+ cx.tasks.borrow_mut().queue.push_back(task);
+ }
+ _ => {
+ self.queue.lock().unwrap().push_back(task);
+ self.waker.wake();
+ }
+ });
+ }
+
+ fn ptr_eq(&self, other: &Shared) -> bool {
+ self as *const _ == other as *const _
+ }
+}
+
+impl task::Schedule for Arc<Shared> {
+ fn bind(task: Task<Self>) -> Arc<Shared> {
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+ cx.tasks.borrow_mut().owned.push_front(task);
+ cx.shared.clone()
+ })
+ }
+
+ fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
+ use std::ptr::NonNull;
+
+ CURRENT.with(|maybe_cx| {
+ let cx = maybe_cx.expect("scheduler context missing");
+
+ assert!(cx.shared.ptr_eq(self));
+
+ let ptr = NonNull::from(task.header());
+ // safety: task must be contained by list. It is inserted into the
+ // list in `bind`.
+ unsafe { cx.tasks.borrow_mut().owned.remove(ptr) }
+ })
+ }
+
+ fn schedule(&self, task: task::Notified<Self>) {
+ Shared::schedule(self, task);
+ }
+}
diff --git a/third_party/rust/tokio/src/task/mod.rs b/third_party/rust/tokio/src/task/mod.rs
new file mode 100644
index 0000000000..5c89393a5e
--- /dev/null
+++ b/third_party/rust/tokio/src/task/mod.rs
@@ -0,0 +1,242 @@
+//! Asynchronous green-threads.
+//!
+//! ## What are Tasks?
+//!
+//! A _task_ is a light weight, non-blocking unit of execution. A task is similar
+//! to an OS thread, but rather than being managed by the OS scheduler, they are
+//! managed by the [Tokio runtime][rt]. Another name for this general pattern is
+//! [green threads]. If you are familiar with [Go's goroutines], [Kotlin's
+//! coroutines], or [Erlang's processes], you can think of Tokio's tasks as
+//! something similar.
+//!
+//! Key points about tasks include:
+//!
+//! * Tasks are **light weight**. Because tasks are scheduled by the Tokio
+//! runtime rather than the operating system, creating new tasks or switching
+//! between tasks does not require a context switch and has fairly low
+//! overhead. Creating, running, and destroying large numbers of tasks is
+//! quite cheap, especially compared to OS threads.
+//!
+//! * Tasks are scheduled **cooperatively**. Most operating systems implement
+//! _preemptive multitasking_. This is a scheduling technique where the
+//! operating system allows each thread to run for a period of time, and then
+//! _preempts_ it, temporarily pausing that thread and switching to another.
+//! Tasks, on the other hand, implement _cooperative multitasking_. In
+//! cooperative multitasking, a task is allowed to run until it _yields_,
+//! indicating to the Tokio runtime's scheduler that it cannot currently
+//! continue executing. When a task yields, the Tokio runtime switches to
+//! executing the next task.
+//!
+//! * Tasks are **non-blocking**. Typically, when an OS thread performs I/O or
+//! must synchronize with another thread, it _blocks_, allowing the OS to
+//! schedule another thread. When a task cannot continue executing, it must
+//! yield instead, allowing the Tokio runtime to schedule another task. Tasks
+//! should generally not perform system calls or other operations that could
+//! block a thread, as this would prevent other tasks running on the same
+//! thread from executing as well. Instead, this module provides APIs for
+//! running blocking operations in an asynchronous context.
+//!
+//! [rt]: crate::runtime
+//! [green threads]: https://en.wikipedia.org/wiki/Green_threads
+//! [Go's goroutines]: https://tour.golang.org/concurrency/1
+//! [Kotlin's coroutines]: https://kotlinlang.org/docs/reference/coroutines-overview.html
+//! [Erlang's processes]: http://erlang.org/doc/getting_started/conc_prog.html#processes
+//!
+//! ## Working with Tasks
+//!
+//! This module provides the following APIs for working with tasks:
+//!
+//! ### Spawning
+//!
+//! Perhaps the most important function in this module is [`task::spawn`]. This
+//! function can be thought of as an async equivalent to the standard library's
+//! [`thread::spawn`][`std::thread::spawn`]. It takes an `async` block or other
+//! [future], and creates a new task to run that work concurrently:
+//!
+//! ```
+//! use tokio::task;
+//!
+//! # async fn doc() {
+//! task::spawn(async {
+//! // perform some work here...
+//! });
+//! # }
+//! ```
+//!
+//! Like [`std::thread::spawn`], `task::spawn` returns a [`JoinHandle`] struct.
+//! A `JoinHandle` is itself a future which may be used to await the output of
+//! the spawned task. For example:
+//!
+//! ```
+//! use tokio::task;
+//!
+//! # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> {
+//! let join = task::spawn(async {
+//! // ...
+//! "hello world!"
+//! });
+//!
+//! // ...
+//!
+//! // Await the result of the spawned task.
+//! let result = join.await?;
+//! assert_eq!(result, "hello world!");
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! Again, like `std::thread`'s [`JoinHandle` type][thread_join], if the spawned
+//! task panics, awaiting its `JoinHandle` will return a [`JoinError`]`. For
+//! example:
+//!
+//! ```
+//! use tokio::task;
+//!
+//! # #[tokio::main] async fn main() {
+//! let join = task::spawn(async {
+//! panic!("something bad happened!")
+//! });
+//!
+//! // The returned result indicates that the task failed.
+//! assert!(join.await.is_err());
+//! # }
+//! ```
+//!
+//! `spawn`, `JoinHandle`, and `JoinError` are present when the "rt-core"
+//! feature flag is enabled.
+//!
+//! [`task::spawn`]: crate::task::spawn()
+//! [future]: std::future::Future
+//! [`std::thread::spawn`]: std::thread::spawn
+//! [`JoinHandle`]: crate::task::JoinHandle
+//! [thread_join]: std::thread::JoinHandle
+//! [`JoinError`]: crate::task::JoinError
+//!
+//! ### Blocking and Yielding
+//!
+//! As we discussed above, code running in asynchronous tasks should not perform
+//! operations that can block. A blocking operation performed in a task running
+//! on a thread that is also running other tasks would block the entire thread,
+//! preventing other tasks from running.
+//!
+//! Instead, Tokio provides two APIs for running blocking operations in an
+//! asynchronous context: [`task::spawn_blocking`] and [`task::block_in_place`].
+//!
+//! #### spawn_blocking
+//!
+//! The `task::spawn_blocking` function is similar to the `task::spawn` function
+//! discussed in the previous section, but rather than spawning an
+//! _non-blocking_ future on the Tokio runtime, it instead spawns a
+//! _blocking_ function on a dedicated thread pool for blocking tasks. For
+//! example:
+//!
+//! ```
+//! use tokio::task;
+//!
+//! # async fn docs() {
+//! task::spawn_blocking(|| {
+//! // do some compute-heavy work or call synchronous code
+//! });
+//! # }
+//! ```
+//!
+//! Just like `task::spawn`, `task::spawn_blocking` returns a `JoinHandle`
+//! which we can use to await the result of the blocking operation:
+//!
+//! ```rust
+//! # use tokio::task;
+//! # async fn docs() -> Result<(), Box<dyn std::error::Error>>{
+//! let join = task::spawn_blocking(|| {
+//! // do some compute-heavy work or call synchronous code
+//! "blocking completed"
+//! });
+//!
+//! let result = join.await?;
+//! assert_eq!(result, "blocking completed");
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! #### block_in_place
+//!
+//! When using the [threaded runtime][rt-threaded], the [`task::block_in_place`]
+//! function is also available. Like `task::spawn_blocking`, this function
+//! allows running a blocking operation from an asynchronous context. Unlike
+//! `spawn_blocking`, however, `block_in_place` works by transitioning the
+//! _current_ worker thread to a blocking thread, moving other tasks running on
+//! that thread to another worker thread. This can improve performance by avoiding
+//! context switches.
+//!
+//! For example:
+//!
+//! ```
+//! use tokio::task;
+//!
+//! # async fn docs() {
+//! let result = task::block_in_place(|| {
+//! // do some compute-heavy work or call synchronous code
+//! "blocking completed"
+//! });
+//!
+//! assert_eq!(result, "blocking completed");
+//! # }
+//! ```
+//!
+//! #### yield_now
+//!
+//! In addition, this module provides a [`task::yield_now`] async function
+//! that is analogous to the standard library's [`thread::yield_now`]. Calling
+//! and `await`ing this function will cause the current task to yield to the
+//! Tokio runtime's scheduler, allowing other tasks to be
+//! scheduled. Eventually, the yielding task will be polled again, allowing it
+//! to execute. For example:
+//!
+//! ```rust
+//! use tokio::task;
+//!
+//! # #[tokio::main] async fn main() {
+//! async {
+//! task::spawn(async {
+//! // ...
+//! println!("spawned task done!")
+//! });
+//!
+//! // Yield, allowing the newly-spawned task to execute first.
+//! task::yield_now().await;
+//! println!("main task done!");
+//! }
+//! # .await;
+//! # }
+//! ```
+//!
+//! [`task::spawn_blocking`]: crate::task::spawn_blocking
+//! [`task::block_in_place`]: crate::task::block_in_place
+//! [rt-threaded]: ../runtime/index.html#threaded-scheduler
+//! [`task::yield_now`]: crate::task::yield_now()
+//! [`thread::yield_now`]: std::thread::yield_now
+cfg_blocking! {
+ mod blocking;
+ pub use blocking::spawn_blocking;
+
+ cfg_rt_threaded! {
+ pub use blocking::block_in_place;
+ }
+}
+
+cfg_rt_core! {
+ pub use crate::runtime::task::{JoinError, JoinHandle};
+
+ mod spawn;
+ pub use spawn::spawn;
+
+ mod yield_now;
+ pub use yield_now::yield_now;
+}
+
+cfg_rt_util! {
+ mod local;
+ pub use local::{spawn_local, LocalSet};
+
+ mod task_local;
+ pub use task_local::LocalKey;
+}
diff --git a/third_party/rust/tokio/src/task/spawn.rs b/third_party/rust/tokio/src/task/spawn.rs
new file mode 100644
index 0000000000..fa5ff13b01
--- /dev/null
+++ b/third_party/rust/tokio/src/task/spawn.rs
@@ -0,0 +1,134 @@
+use crate::runtime;
+use crate::task::JoinHandle;
+
+use std::future::Future;
+
+doc_rt_core! {
+ /// Spawns a new asynchronous task, returning a
+ /// [`JoinHandle`](super::JoinHandle) for it.
+ ///
+ /// Spawning a task enables the task to execute concurrently to other tasks. The
+ /// spawned task may execute on the current thread, or it may be sent to a
+ /// different thread to be executed. The specifics depend on the current
+ /// [`Runtime`](crate::runtime::Runtime) configuration.
+ ///
+ /// There is no guarantee that a spawned task will execute to completion.
+ /// When a runtime is shutdown, all outstanding tasks are dropped,
+ /// regardless of the lifecycle of that task.
+ ///
+ /// This function must be called from the context of a Tokio runtime. Tasks running on
+ /// the Tokio runtime are always inside its context, but you can also enter the context
+ /// using the [`Handle::enter`](crate::runtime::Handle::enter()) method.
+ ///
+ /// # Examples
+ ///
+ /// In this example, a server is started and `spawn` is used to start a new task
+ /// that processes each received connection.
+ ///
+ /// ```no_run
+ /// use tokio::net::{TcpListener, TcpStream};
+ ///
+ /// use std::io;
+ ///
+ /// async fn process(socket: TcpStream) {
+ /// // ...
+ /// # drop(socket);
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut listener = TcpListener::bind("127.0.0.1:8080").await?;
+ ///
+ /// loop {
+ /// let (socket, _) = listener.accept().await?;
+ ///
+ /// tokio::spawn(async move {
+ /// // Process each socket concurrently.
+ /// process(socket).await
+ /// });
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if called from **outside** of the Tokio runtime.
+ ///
+ /// # Using `!Send` values from a task
+ ///
+ /// The task supplied to `spawn` must implement `Send`. However, it is
+ /// possible to **use** `!Send` values from the task as long as they only
+ /// exist between calls to `.await`.
+ ///
+ /// For example, this will work:
+ ///
+ /// ```
+ /// use tokio::task;
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// fn use_rc(rc: Rc<()>) {
+ /// // Do stuff w/ rc
+ /// # drop(rc);
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// tokio::spawn(async {
+ /// // Force the `Rc` to stay in a scope with no `.await`
+ /// {
+ /// let rc = Rc::new(());
+ /// use_rc(rc.clone());
+ /// }
+ ///
+ /// task::yield_now().await;
+ /// }).await.unwrap();
+ /// }
+ /// ```
+ ///
+ /// This will **not** work:
+ ///
+ /// ```compile_fail
+ /// use tokio::task;
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// fn use_rc(rc: Rc<()>) {
+ /// // Do stuff w/ rc
+ /// # drop(rc);
+ /// }
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// tokio::spawn(async {
+ /// let rc = Rc::new(());
+ ///
+ /// task::yield_now().await;
+ ///
+ /// use_rc(rc.clone());
+ /// }).await.unwrap();
+ /// }
+ /// ```
+ ///
+ /// Holding on to a `!Send` value across calls to `.await` will result in
+ /// an unfriendly compile error message similar to:
+ ///
+ /// ```text
+ /// `[... some type ...]` cannot be sent between threads safely
+ /// ```
+ ///
+ /// or:
+ ///
+ /// ```text
+ /// error[E0391]: cycle detected when processing `main`
+ /// ```
+ pub fn spawn<T>(task: T) -> JoinHandle<T::Output>
+ where
+ T: Future + Send + 'static,
+ T::Output: Send + 'static,
+ {
+ let spawn_handle = runtime::context::spawn_handle()
+ .expect("must be called from the context of Tokio runtime configured with either `basic_scheduler` or `threaded_scheduler`");
+ spawn_handle.spawn(task)
+ }
+}
diff --git a/third_party/rust/tokio/src/task/task_local.rs b/third_party/rust/tokio/src/task/task_local.rs
new file mode 100644
index 0000000000..f3341b6a7e
--- /dev/null
+++ b/third_party/rust/tokio/src/task/task_local.rs
@@ -0,0 +1,240 @@
+use pin_project_lite::pin_project;
+use std::cell::RefCell;
+use std::error::Error;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{fmt, thread};
+
+/// Declares a new task-local key of type [`tokio::task::LocalKey`].
+///
+/// # Syntax
+///
+/// The macro wraps any number of static declarations and makes them local to the current task.
+/// Publicity and attributes for each static is preserved. For example:
+///
+/// # Examples
+///
+/// ```
+/// # use tokio::task_local;
+/// task_local! {
+/// pub static ONE: u32;
+///
+/// #[allow(unused)]
+/// static TWO: f32;
+/// }
+/// # fn main() {}
+/// ```
+///
+/// See [LocalKey documentation][`tokio::task::LocalKey`] for more
+/// information.
+///
+/// [`tokio::task::LocalKey`]: ../tokio/task/struct.LocalKey.html
+#[macro_export]
+macro_rules! task_local {
+ // empty (base case for the recursion)
+ () => {};
+
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty; $($rest:tt)*) => {
+ $crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
+ $crate::task_local!($($rest)*);
+ };
+
+ ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty) => {
+ $crate::__task_local_inner!($(#[$attr])* $vis $name, $t);
+ }
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __task_local_inner {
+ ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty) => {
+ static $name: $crate::task::LocalKey<$t> = {
+ std::thread_local! {
+ static __KEY: std::cell::RefCell<Option<$t>> = std::cell::RefCell::new(None);
+ }
+
+ $crate::task::LocalKey { inner: __KEY }
+ };
+ };
+}
+
+/// A key for task-local data.
+///
+/// This type is generated by the `task_local!` macro.
+///
+/// Unlike [`std::thread::LocalKey`], `tokio::task::LocalKey` will
+/// _not_ lazily initialize the value on first access. Instead, the
+/// value is first initialized when the future containing
+/// the task-local is first polled by a futures executor, like Tokio.
+///
+/// # Examples
+///
+/// ```
+/// # async fn dox() {
+/// tokio::task_local! {
+/// static NUMBER: u32;
+/// }
+///
+/// NUMBER.scope(1, async move {
+/// assert_eq!(NUMBER.get(), 1);
+/// }).await;
+///
+/// NUMBER.scope(2, async move {
+/// assert_eq!(NUMBER.get(), 2);
+///
+/// NUMBER.scope(3, async move {
+/// assert_eq!(NUMBER.get(), 3);
+/// }).await;
+/// }).await;
+/// # }
+/// ```
+/// [`std::thread::LocalKey`]: https://doc.rust-lang.org/std/thread/struct.LocalKey.html
+pub struct LocalKey<T: 'static> {
+ #[doc(hidden)]
+ pub inner: thread::LocalKey<RefCell<Option<T>>>,
+}
+
+impl<T: 'static> LocalKey<T> {
+ /// Sets a value `T` as the task-local value for the future `F`.
+ ///
+ /// On completion of `scope`, the task-local will be dropped.
+ ///
+ /// ### Examples
+ ///
+ /// ```
+ /// # async fn dox() {
+ /// tokio::task_local! {
+ /// static NUMBER: u32;
+ /// }
+ ///
+ /// NUMBER.scope(1, async move {
+ /// println!("task local value: {}", NUMBER.get());
+ /// }).await;
+ /// # }
+ /// ```
+ pub async fn scope<F>(&'static self, value: T, f: F) -> F::Output
+ where
+ F: Future,
+ {
+ TaskLocalFuture {
+ local: &self,
+ slot: Some(value),
+ future: f,
+ }
+ .await
+ }
+
+ /// Accesses the current task-local and runs the provided closure.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if not called within the context
+ /// of a future containing a task-local with the corresponding key.
+ pub fn with<F, R>(&'static self, f: F) -> R
+ where
+ F: FnOnce(&T) -> R,
+ {
+ self.try_with(f).expect(
+ "cannot access a Task Local Storage value \
+ without setting it via `LocalKey::set`",
+ )
+ }
+
+ /// Accesses the current task-local and runs the provided closure.
+ ///
+ /// If the task-local with the accociated key is not present, this
+ /// method will return an `AccessError`. For a panicking variant,
+ /// see `with`.
+ pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
+ where
+ F: FnOnce(&T) -> R,
+ {
+ self.inner.with(|v| {
+ if let Some(val) = v.borrow().as_ref() {
+ Ok(f(val))
+ } else {
+ Err(AccessError { _private: () })
+ }
+ })
+ }
+}
+
+impl<T: Copy + 'static> LocalKey<T> {
+ /// Returns a copy of the task-local value
+ /// if the task-local value implements `Copy`.
+ pub fn get(&'static self) -> T {
+ self.with(|v| *v)
+ }
+}
+
+impl<T: 'static> fmt::Debug for LocalKey<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("LocalKey { .. }")
+ }
+}
+
+pin_project! {
+ struct TaskLocalFuture<T: StaticLifetime, F> {
+ local: &'static LocalKey<T>,
+ slot: Option<T>,
+ #[pin]
+ future: F,
+ }
+}
+
+impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> {
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ struct Guard<'a, T: 'static> {
+ local: &'static LocalKey<T>,
+ slot: &'a mut Option<T>,
+ prev: Option<T>,
+ }
+
+ impl<T> Drop for Guard<'_, T> {
+ fn drop(&mut self) {
+ let value = self.local.inner.with(|c| c.replace(self.prev.take()));
+ *self.slot = value;
+ }
+ }
+
+ let mut project = self.project();
+ let val = project.slot.take();
+
+ let prev = project.local.inner.with(|c| c.replace(val));
+
+ let _guard = Guard {
+ prev,
+ slot: &mut project.slot,
+ local: *project.local,
+ };
+
+ project.future.poll(cx)
+ }
+}
+
+// Required to make `pin_project` happy.
+trait StaticLifetime: 'static {}
+impl<T: 'static> StaticLifetime for T {}
+
+/// An error returned by [`LocalKey::try_with`](method@LocalKey::try_with).
+#[derive(Clone, Copy, Eq, PartialEq)]
+pub struct AccessError {
+ _private: (),
+}
+
+impl fmt::Debug for AccessError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AccessError").finish()
+ }
+}
+
+impl fmt::Display for AccessError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("task-local value not set", f)
+ }
+}
+
+impl Error for AccessError {}
diff --git a/third_party/rust/tokio/src/task/yield_now.rs b/third_party/rust/tokio/src/task/yield_now.rs
new file mode 100644
index 0000000000..e0e20841c9
--- /dev/null
+++ b/third_party/rust/tokio/src/task/yield_now.rs
@@ -0,0 +1,38 @@
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+doc_rt_core! {
+ /// Yields execution back to the Tokio runtime.
+ ///
+ /// A task yields by awaiting on `yield_now()`, and may resume when that
+ /// future completes (with no output.) The current task will be re-added as
+ /// a pending task at the _back_ of the pending queue. Any other pending
+ /// tasks will be scheduled. No other waking is required for the task to
+ /// continue.
+ ///
+ /// See also the usage example in the [task module](index.html#yield_now).
+ #[must_use = "yield_now does nothing unless polled/`await`-ed"]
+ pub async fn yield_now() {
+ /// Yield implementation
+ struct YieldNow {
+ yielded: bool,
+ }
+
+ impl Future for YieldNow {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ if self.yielded {
+ return Poll::Ready(());
+ }
+
+ self.yielded = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+
+ YieldNow { yielded: false }.await
+ }
+}
diff --git a/third_party/rust/tokio/src/time/clock.rs b/third_party/rust/tokio/src/time/clock.rs
new file mode 100644
index 0000000000..4ac24af3d0
--- /dev/null
+++ b/third_party/rust/tokio/src/time/clock.rs
@@ -0,0 +1,164 @@
+//! Source of time abstraction.
+//!
+//! By default, `std::time::Instant::now()` is used. However, when the
+//! `test-util` feature flag is enabled, the values returned for `now()` are
+//! configurable.
+
+cfg_not_test_util! {
+ use crate::time::{Duration, Instant};
+
+ #[derive(Debug, Clone)]
+ pub(crate) struct Clock {}
+
+ pub(crate) fn now() -> Instant {
+ Instant::from_std(std::time::Instant::now())
+ }
+
+ impl Clock {
+ pub(crate) fn new() -> Clock {
+ Clock {}
+ }
+
+ pub(crate) fn now(&self) -> Instant {
+ now()
+ }
+
+ pub(crate) fn is_paused(&self) -> bool {
+ false
+ }
+
+ pub(crate) fn advance(&self, _dur: Duration) {
+ unreachable!();
+ }
+ }
+}
+
+cfg_test_util! {
+ use crate::time::{Duration, Instant};
+ use std::sync::{Arc, Mutex};
+ use crate::runtime::context;
+
+ /// A handle to a source of time.
+ #[derive(Debug, Clone)]
+ pub(crate) struct Clock {
+ inner: Arc<Mutex<Inner>>,
+ }
+
+ #[derive(Debug)]
+ struct Inner {
+ /// Instant to use as the clock's base instant.
+ base: std::time::Instant,
+
+ /// Instant at which the clock was last unfrozen
+ unfrozen: Option<std::time::Instant>,
+ }
+
+ /// Pause time
+ ///
+ /// The current value of `Instant::now()` is saved and all subsequent calls
+ /// to `Instant::now()` will return the saved value. This is useful for
+ /// running tests that are dependent on time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if time is already frozen or if called from outside of the Tokio
+ /// runtime.
+ pub fn pause() {
+ let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime");
+ clock.pause();
+ }
+
+ /// Resume time
+ ///
+ /// Clears the saved `Instant::now()` value. Subsequent calls to
+ /// `Instant::now()` will return the value returned by the system call.
+ ///
+ /// # Panics
+ ///
+ /// Panics if time is not frozen or if called from outside of the Tokio
+ /// runtime.
+ pub fn resume() {
+ let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime");
+ let mut inner = clock.inner.lock().unwrap();
+
+ if inner.unfrozen.is_some() {
+ panic!("time is not frozen");
+ }
+
+ inner.unfrozen = Some(std::time::Instant::now());
+ }
+
+ /// Advance time
+ ///
+ /// Increments the saved `Instant::now()` value by `duration`. Subsequent
+ /// calls to `Instant::now()` will return the result of the increment.
+ ///
+ /// # Panics
+ ///
+ /// Panics if time is not frozen or if called from outside of the Tokio
+ /// runtime.
+ pub async fn advance(duration: Duration) {
+ let clock = context::clock().expect("time cannot be frozen from outside the Tokio runtime");
+ clock.advance(duration);
+ crate::task::yield_now().await;
+ }
+
+ /// Return the current instant, factoring in frozen time.
+ pub(crate) fn now() -> Instant {
+ if let Some(clock) = context::clock() {
+ clock.now()
+ } else {
+ Instant::from_std(std::time::Instant::now())
+ }
+ }
+
+ impl Clock {
+ /// Return a new `Clock` instance that uses the current execution context's
+ /// source of time.
+ pub(crate) fn new() -> Clock {
+ let now = std::time::Instant::now();
+
+ Clock {
+ inner: Arc::new(Mutex::new(Inner {
+ base: now,
+ unfrozen: Some(now),
+ })),
+ }
+ }
+
+ pub(crate) fn pause(&self) {
+ let mut inner = self.inner.lock().unwrap();
+
+ let elapsed = inner.unfrozen.as_ref().expect("time is already frozen").elapsed();
+ inner.base += elapsed;
+ inner.unfrozen = None;
+ }
+
+ pub(crate) fn is_paused(&self) -> bool {
+ let inner = self.inner.lock().unwrap();
+ inner.unfrozen.is_none()
+ }
+
+ pub(crate) fn advance(&self, duration: Duration) {
+ let mut inner = self.inner.lock().unwrap();
+
+ if inner.unfrozen.is_some() {
+ panic!("time is not frozen");
+ }
+
+ inner.base += duration;
+ }
+
+ pub(crate) fn now(&self) -> Instant {
+ let inner = self.inner.lock().unwrap();
+
+ let mut ret = inner.base;
+
+ if let Some(unfrozen) = inner.unfrozen {
+ ret += unfrozen.elapsed();
+ }
+
+ Instant::from_std(ret)
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/time/delay.rs b/third_party/rust/tokio/src/time/delay.rs
new file mode 100644
index 0000000000..8088c9955c
--- /dev/null
+++ b/third_party/rust/tokio/src/time/delay.rs
@@ -0,0 +1,99 @@
+use crate::time::driver::Registration;
+use crate::time::{Duration, Instant};
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{self, Poll};
+
+/// Waits until `deadline` is reached.
+///
+/// No work is performed while awaiting on the delay to complete. The delay
+/// operates at millisecond granularity and should not be used for tasks that
+/// require high-resolution timers.
+///
+/// # Cancellation
+///
+/// Canceling a delay is done by dropping the returned future. No additional
+/// cleanup work is required.
+pub fn delay_until(deadline: Instant) -> Delay {
+ let registration = Registration::new(deadline, Duration::from_millis(0));
+ Delay { registration }
+}
+
+/// Waits until `duration` has elapsed.
+///
+/// Equivalent to `delay_until(Instant::now() + duration)`. An asynchronous
+/// analog to `std::thread::sleep`.
+///
+/// No work is performed while awaiting on the delay to complete. The delay
+/// operates at millisecond granularity and should not be used for tasks that
+/// require high-resolution timers.
+///
+/// # Cancellation
+///
+/// Canceling a delay is done by dropping the returned future. No additional
+/// cleanup work is required.
+pub fn delay_for(duration: Duration) -> Delay {
+ delay_until(Instant::now() + duration)
+}
+
+/// Future returned by [`delay_until`](delay_until) and
+/// [`delay_for`](delay_for).
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Delay {
+ /// The link between the `Delay` instance and the timer that drives it.
+ ///
+ /// This also stores the `deadline` value.
+ registration: Registration,
+}
+
+impl Delay {
+ pub(crate) fn new_timeout(deadline: Instant, duration: Duration) -> Delay {
+ let registration = Registration::new(deadline, duration);
+ Delay { registration }
+ }
+
+ /// Returns the instant at which the future will complete.
+ pub fn deadline(&self) -> Instant {
+ self.registration.deadline()
+ }
+
+ /// Returns `true` if the `Delay` has elapsed
+ ///
+ /// A `Delay` is elapsed when the requested duration has elapsed.
+ pub fn is_elapsed(&self) -> bool {
+ self.registration.is_elapsed()
+ }
+
+ /// Resets the `Delay` instance to a new deadline.
+ ///
+ /// Calling this function allows changing the instant at which the `Delay`
+ /// future completes without having to create new associated state.
+ ///
+ /// This function can be called both before and after the future has
+ /// completed.
+ pub fn reset(&mut self, deadline: Instant) {
+ self.registration.reset(deadline);
+ }
+}
+
+impl Future for Delay {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ // `poll_elapsed` can return an error in two cases:
+ //
+ // - AtCapacity: this is a pathlogical case where far too many
+ // delays have been scheduled.
+ // - Shutdown: No timer has been setup, which is a mis-use error.
+ //
+ // Both cases are extremely rare, and pretty accurately fit into
+ // "logic errors", so we just panic in this case. A user couldn't
+ // really do much better if we passed the error onwards.
+ match ready!(self.registration.poll_elapsed(cx)) {
+ Ok(()) => Poll::Ready(()),
+ Err(e) => panic!("timer error: {}", e),
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/time/delay_queue.rs b/third_party/rust/tokio/src/time/delay_queue.rs
new file mode 100644
index 0000000000..59f901a95d
--- /dev/null
+++ b/third_party/rust/tokio/src/time/delay_queue.rs
@@ -0,0 +1,887 @@
+//! A queue of delayed elements.
+//!
+//! See [`DelayQueue`] for more details.
+//!
+//! [`DelayQueue`]: struct@DelayQueue
+
+use crate::time::wheel::{self, Wheel};
+use crate::time::{delay_until, Delay, Duration, Error, Instant};
+
+use slab::Slab;
+use std::cmp;
+use std::future::Future;
+use std::marker::PhantomData;
+use std::pin::Pin;
+use std::task::{self, Poll};
+
+/// A queue of delayed elements.
+///
+/// Once an element is inserted into the `DelayQueue`, it is yielded once the
+/// specified deadline has been reached.
+///
+/// # Usage
+///
+/// Elements are inserted into `DelayQueue` using the [`insert`] or
+/// [`insert_at`] methods. A deadline is provided with the item and a [`Key`] is
+/// returned. The key is used to remove the entry or to change the deadline at
+/// which it should be yielded back.
+///
+/// Once delays have been configured, the `DelayQueue` is used via its
+/// [`Stream`] implementation. [`poll`] is called. If an entry has reached its
+/// deadline, it is returned. If not, `Poll::Pending` indicating that the
+/// current task will be notified once the deadline has been reached.
+///
+/// # `Stream` implementation
+///
+/// Items are retrieved from the queue via [`Stream::poll`]. If no delays have
+/// expired, no items are returned. In this case, `NotReady` is returned and the
+/// current task is registered to be notified once the next item's delay has
+/// expired.
+///
+/// If no items are in the queue, i.e. `is_empty()` returns `true`, then `poll`
+/// returns `Ready(None)`. This indicates that the stream has reached an end.
+/// However, if a new item is inserted *after*, `poll` will once again start
+/// returning items or `NotReady.
+///
+/// Items are returned ordered by their expirations. Items that are configured
+/// to expire first will be returned first. There are no ordering guarantees
+/// for items configured to expire the same instant. Also note that delays are
+/// rounded to the closest millisecond.
+///
+/// # Implementation
+///
+/// The `DelayQueue` is backed by the same hashed timing wheel implementation as
+/// [`Timer`] as such, it offers the same performance benefits. See [`Timer`]
+/// for further implementation notes.
+///
+/// State associated with each entry is stored in a [`slab`]. This allows
+/// amortizing the cost of allocation. Space created for expired entries is
+/// reused when inserting new entries.
+///
+/// Capacity can be checked using [`capacity`] and allocated preemptively by using
+/// the [`reserve`] method.
+///
+/// # Usage
+///
+/// Using `DelayQueue` to manage cache entries.
+///
+/// ```rust,no_run
+/// use tokio::time::{delay_queue, DelayQueue, Error};
+///
+/// use futures::ready;
+/// use std::collections::HashMap;
+/// use std::task::{Context, Poll};
+/// use std::time::Duration;
+/// # type CacheKey = String;
+/// # type Value = String;
+///
+/// struct Cache {
+/// entries: HashMap<CacheKey, (Value, delay_queue::Key)>,
+/// expirations: DelayQueue<CacheKey>,
+/// }
+///
+/// const TTL_SECS: u64 = 30;
+///
+/// impl Cache {
+/// fn insert(&mut self, key: CacheKey, value: Value) {
+/// let delay = self.expirations
+/// .insert(key.clone(), Duration::from_secs(TTL_SECS));
+///
+/// self.entries.insert(key, (value, delay));
+/// }
+///
+/// fn get(&self, key: &CacheKey) -> Option<&Value> {
+/// self.entries.get(key)
+/// .map(|&(ref v, _)| v)
+/// }
+///
+/// fn remove(&mut self, key: &CacheKey) {
+/// if let Some((_, cache_key)) = self.entries.remove(key) {
+/// self.expirations.remove(&cache_key);
+/// }
+/// }
+///
+/// fn poll_purge(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Error>> {
+/// while let Some(res) = ready!(self.expirations.poll_expired(cx)) {
+/// let entry = res?;
+/// self.entries.remove(entry.get_ref());
+/// }
+///
+/// Poll::Ready(Ok(()))
+/// }
+/// }
+/// ```
+///
+/// [`insert`]: #method.insert
+/// [`insert_at`]: #method.insert_at
+/// [`Key`]: struct@Key
+/// [`Stream`]: https://docs.rs/futures/0.1/futures/stream/trait.Stream.html
+/// [`poll`]: #method.poll
+/// [`Stream::poll`]: #method.poll
+/// [`Timer`]: ../struct.Timer.html
+/// [`slab`]: https://docs.rs/slab
+/// [`capacity`]: #method.capacity
+/// [`reserve`]: #method.reserve
+#[derive(Debug)]
+pub struct DelayQueue<T> {
+ /// Stores data associated with entries
+ slab: Slab<Data<T>>,
+
+ /// Lookup structure tracking all delays in the queue
+ wheel: Wheel<Stack<T>>,
+
+ /// Delays that were inserted when already expired. These cannot be stored
+ /// in the wheel
+ expired: Stack<T>,
+
+ /// Delay expiring when the *first* item in the queue expires
+ delay: Option<Delay>,
+
+ /// Wheel polling state
+ poll: wheel::Poll,
+
+ /// Instant at which the timer starts
+ start: Instant,
+}
+
+/// An entry in `DelayQueue` that has expired and removed.
+///
+/// Values are returned by [`DelayQueue::poll`].
+///
+/// [`DelayQueue::poll`]: method@DelayQueue::poll
+#[derive(Debug)]
+pub struct Expired<T> {
+ /// The data stored in the queue
+ data: T,
+
+ /// The expiration time
+ deadline: Instant,
+
+ /// The key associated with the entry
+ key: Key,
+}
+
+/// Token to a value stored in a `DelayQueue`.
+///
+/// Instances of `Key` are returned by [`DelayQueue::insert`]. See [`DelayQueue`]
+/// documentation for more details.
+///
+/// [`DelayQueue`]: struct@DelayQueue
+/// [`DelayQueue::insert`]: method@DelayQueue::insert
+#[derive(Debug, Clone)]
+pub struct Key {
+ index: usize,
+}
+
+#[derive(Debug)]
+struct Stack<T> {
+ /// Head of the stack
+ head: Option<usize>,
+ _p: PhantomData<fn() -> T>,
+}
+
+#[derive(Debug)]
+struct Data<T> {
+ /// The data being stored in the queue and will be returned at the requested
+ /// instant.
+ inner: T,
+
+ /// The instant at which the item is returned.
+ when: u64,
+
+ /// Set to true when stored in the `expired` queue
+ expired: bool,
+
+ /// Next entry in the stack
+ next: Option<usize>,
+
+ /// Previous entry in the stack
+ prev: Option<usize>,
+}
+
+/// Maximum number of entries the queue can handle
+const MAX_ENTRIES: usize = (1 << 30) - 1;
+
+impl<T> DelayQueue<T> {
+ /// Creates a new, empty, `DelayQueue`
+ ///
+ /// The queue will not allocate storage until items are inserted into it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use tokio::time::DelayQueue;
+ /// let delay_queue: DelayQueue<u32> = DelayQueue::new();
+ /// ```
+ pub fn new() -> DelayQueue<T> {
+ DelayQueue::with_capacity(0)
+ }
+
+ /// Creates a new, empty, `DelayQueue` with the specified capacity.
+ ///
+ /// The queue will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the queue will not allocate for
+ /// storage.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use tokio::time::DelayQueue;
+ /// # use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::with_capacity(10);
+ ///
+ /// // These insertions are done without further allocation
+ /// for i in 0..10 {
+ /// delay_queue.insert(i, Duration::from_secs(i));
+ /// }
+ ///
+ /// // This will make the queue allocate additional storage
+ /// delay_queue.insert(11, Duration::from_secs(11));
+ /// # }
+ /// ```
+ pub fn with_capacity(capacity: usize) -> DelayQueue<T> {
+ DelayQueue {
+ wheel: Wheel::new(),
+ slab: Slab::with_capacity(capacity),
+ expired: Stack::default(),
+ delay: None,
+ poll: wheel::Poll::new(0),
+ start: Instant::now(),
+ }
+ }
+
+ /// Inserts `value` into the queue set to expire at a specific instant in
+ /// time.
+ ///
+ /// This function is identical to `insert`, but takes an `Instant` instead
+ /// of a `Duration`.
+ ///
+ /// `value` is stored in the queue until `when` is reached. At which point,
+ /// `value` will be returned from [`poll`]. If `when` has already been
+ /// reached, then `value` is immediately made available to poll.
+ ///
+ /// The return value represents the insertion and is used at an argument to
+ /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once
+ /// `value` is removed from the queue either by calling [`poll`] after
+ /// `when` is reached or by calling [`remove`]. At this point, the caller
+ /// must take care to not use the returned [`Key`] again as it may reference
+ /// a different item in the queue.
+ ///
+ /// See [type] level documentation for more details.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `when` is too far in the future.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```rust
+ /// use tokio::time::{DelayQueue, Duration, Instant};
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// let key = delay_queue.insert_at(
+ /// "foo", Instant::now() + Duration::from_secs(5));
+ ///
+ /// // Remove the entry
+ /// let item = delay_queue.remove(&key);
+ /// assert_eq!(*item.get_ref(), "foo");
+ /// # }
+ /// ```
+ ///
+ /// [`poll`]: #method.poll
+ /// [`remove`]: #method.remove
+ /// [`reset`]: #method.reset
+ /// [`Key`]: struct@Key
+ /// [type]: #
+ pub fn insert_at(&mut self, value: T, when: Instant) -> Key {
+ assert!(self.slab.len() < MAX_ENTRIES, "max entries exceeded");
+
+ // Normalize the deadline. Values cannot be set to expire in the past.
+ let when = self.normalize_deadline(when);
+
+ // Insert the value in the store
+ let key = self.slab.insert(Data {
+ inner: value,
+ when,
+ expired: false,
+ next: None,
+ prev: None,
+ });
+
+ self.insert_idx(when, key);
+
+ // Set a new delay if the current's deadline is later than the one of the new item
+ let should_set_delay = if let Some(ref delay) = self.delay {
+ let current_exp = self.normalize_deadline(delay.deadline());
+ current_exp > when
+ } else {
+ true
+ };
+
+ if should_set_delay {
+ let delay_time = self.start + Duration::from_millis(when);
+ if let Some(ref mut delay) = &mut self.delay {
+ delay.reset(delay_time);
+ } else {
+ self.delay = Some(delay_until(delay_time));
+ }
+ }
+
+ Key::new(key)
+ }
+
+ /// Attempts to pull out the next value of the delay queue, registering the
+ /// current task for wakeup if the value is not yet available, and returning
+ /// None if the queue is exhausted.
+ pub fn poll_expired(
+ &mut self,
+ cx: &mut task::Context<'_>,
+ ) -> Poll<Option<Result<Expired<T>, Error>>> {
+ let item = ready!(self.poll_idx(cx));
+ Poll::Ready(item.map(|result| {
+ result.map(|idx| {
+ let data = self.slab.remove(idx);
+ debug_assert!(data.next.is_none());
+ debug_assert!(data.prev.is_none());
+
+ Expired {
+ key: Key::new(idx),
+ data: data.inner,
+ deadline: self.start + Duration::from_millis(data.when),
+ }
+ })
+ }))
+ }
+
+ /// Inserts `value` into the queue set to expire after the requested duration
+ /// elapses.
+ ///
+ /// This function is identical to `insert_at`, but takes a `Duration`
+ /// instead of an `Instant`.
+ ///
+ /// `value` is stored in the queue until `when` is reached. At which point,
+ /// `value` will be returned from [`poll`]. If `when` has already been
+ /// reached, then `value` is immediately made available to poll.
+ ///
+ /// The return value represents the insertion and is used at an argument to
+ /// [`remove`] and [`reset`]. Note that [`Key`] is token and is reused once
+ /// `value` is removed from the queue either by calling [`poll`] after
+ /// `when` is reached or by calling [`remove`]. At this point, the caller
+ /// must take care to not use the returned [`Key`] again as it may reference
+ /// a different item in the queue.
+ ///
+ /// See [type] level documentation for more details.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `timeout` is greater than the maximum supported
+ /// duration.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// let key = delay_queue.insert("foo", Duration::from_secs(5));
+ ///
+ /// // Remove the entry
+ /// let item = delay_queue.remove(&key);
+ /// assert_eq!(*item.get_ref(), "foo");
+ /// # }
+ /// ```
+ ///
+ /// [`poll`]: #method.poll
+ /// [`remove`]: #method.remove
+ /// [`reset`]: #method.reset
+ /// [`Key`]: struct@Key
+ /// [type]: #
+ pub fn insert(&mut self, value: T, timeout: Duration) -> Key {
+ self.insert_at(value, Instant::now() + timeout)
+ }
+
+ fn insert_idx(&mut self, when: u64, key: usize) {
+ use self::wheel::{InsertError, Stack};
+
+ // Register the deadline with the timer wheel
+ match self.wheel.insert(when, key, &mut self.slab) {
+ Ok(_) => {}
+ Err((_, InsertError::Elapsed)) => {
+ self.slab[key].expired = true;
+ // The delay is already expired, store it in the expired queue
+ self.expired.push(key, &mut self.slab);
+ }
+ Err((_, err)) => panic!("invalid deadline; err={:?}", err),
+ }
+ }
+
+ /// Removes the item associated with `key` from the queue.
+ ///
+ /// There must be an item associated with `key`. The function returns the
+ /// removed item as well as the `Instant` at which it will the delay will
+ /// have expired.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `key` is not contained by the queue.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// let key = delay_queue.insert("foo", Duration::from_secs(5));
+ ///
+ /// // Remove the entry
+ /// let item = delay_queue.remove(&key);
+ /// assert_eq!(*item.get_ref(), "foo");
+ /// # }
+ /// ```
+ pub fn remove(&mut self, key: &Key) -> Expired<T> {
+ use crate::time::wheel::Stack;
+
+ // Special case the `expired` queue
+ if self.slab[key.index].expired {
+ self.expired.remove(&key.index, &mut self.slab);
+ } else {
+ self.wheel.remove(&key.index, &mut self.slab);
+ }
+
+ let data = self.slab.remove(key.index);
+
+ Expired {
+ key: Key::new(key.index),
+ data: data.inner,
+ deadline: self.start + Duration::from_millis(data.when),
+ }
+ }
+
+ /// Sets the delay of the item associated with `key` to expire at `when`.
+ ///
+ /// This function is identical to `reset` but takes an `Instant` instead of
+ /// a `Duration`.
+ ///
+ /// The item remains in the queue but the delay is set to expire at `when`.
+ /// If `when` is in the past, then the item is immediately made available to
+ /// the caller.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `when` is too far in the future or if `key` is
+ /// not contained by the queue.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```rust
+ /// use tokio::time::{DelayQueue, Duration, Instant};
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// let key = delay_queue.insert("foo", Duration::from_secs(5));
+ ///
+ /// // "foo" is scheduled to be returned in 5 seconds
+ ///
+ /// delay_queue.reset_at(&key, Instant::now() + Duration::from_secs(10));
+ ///
+ /// // "foo"is now scheduled to be returned in 10 seconds
+ /// # }
+ /// ```
+ pub fn reset_at(&mut self, key: &Key, when: Instant) {
+ self.wheel.remove(&key.index, &mut self.slab);
+
+ // Normalize the deadline. Values cannot be set to expire in the past.
+ let when = self.normalize_deadline(when);
+
+ self.slab[key.index].when = when;
+ self.insert_idx(when, key.index);
+
+ let next_deadline = self.next_deadline();
+ if let (Some(ref mut delay), Some(deadline)) = (&mut self.delay, next_deadline) {
+ delay.reset(deadline);
+ }
+ }
+
+ /// Returns the next time poll as determined by the wheel
+ fn next_deadline(&mut self) -> Option<Instant> {
+ self.wheel
+ .poll_at()
+ .map(|poll_at| self.start + Duration::from_millis(poll_at))
+ }
+
+ /// Sets the delay of the item associated with `key` to expire after
+ /// `timeout`.
+ ///
+ /// This function is identical to `reset_at` but takes a `Duration` instead
+ /// of an `Instant`.
+ ///
+ /// The item remains in the queue but the delay is set to expire after
+ /// `timeout`. If `timeout` is zero, then the item is immediately made
+ /// available to the caller.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `timeout` is greater than the maximum supported
+ /// duration or if `key` is not contained by the queue.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// let key = delay_queue.insert("foo", Duration::from_secs(5));
+ ///
+ /// // "foo" is scheduled to be returned in 5 seconds
+ ///
+ /// delay_queue.reset(&key, Duration::from_secs(10));
+ ///
+ /// // "foo"is now scheduled to be returned in 10 seconds
+ /// # }
+ /// ```
+ pub fn reset(&mut self, key: &Key, timeout: Duration) {
+ self.reset_at(key, Instant::now() + timeout);
+ }
+
+ /// Clears the queue, removing all items.
+ ///
+ /// After calling `clear`, [`poll`] will return `Ok(Ready(None))`.
+ ///
+ /// Note that this method has no effect on the allocated capacity.
+ ///
+ /// [`poll`]: #method.poll
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ ///
+ /// delay_queue.insert("foo", Duration::from_secs(5));
+ ///
+ /// assert!(!delay_queue.is_empty());
+ ///
+ /// delay_queue.clear();
+ ///
+ /// assert!(delay_queue.is_empty());
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.slab.clear();
+ self.expired = Stack::default();
+ self.wheel = Wheel::new();
+ self.delay = None;
+ }
+
+ /// Returns the number of elements the queue can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ ///
+ /// let delay_queue: DelayQueue<i32> = DelayQueue::with_capacity(10);
+ /// assert_eq!(delay_queue.capacity(), 10);
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.slab.capacity()
+ }
+
+ /// Returns the number of elements currently in the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue: DelayQueue<i32> = DelayQueue::with_capacity(10);
+ /// assert_eq!(delay_queue.len(), 0);
+ /// delay_queue.insert(3, Duration::from_secs(5));
+ /// assert_eq!(delay_queue.len(), 1);
+ /// # }
+ /// ```
+ pub fn len(&self) -> usize {
+ self.slab.len()
+ }
+
+ /// Reserves capacity for at least `additional` more items to be queued
+ /// without allocating.
+ ///
+ /// `reserve` does nothing if the queue already has sufficient capacity for
+ /// `additional` more values. If more capacity is required, a new segment of
+ /// memory will be allocated and all existing values will be copied into it.
+ /// As such, if the queue is already very large, a call to `reserve` can end
+ /// up being expensive.
+ ///
+ /// The queue may reserve more than `additional` extra space in order to
+ /// avoid frequent reallocations.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds the maximum number of entries the
+ /// queue can contain.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ ///
+ /// delay_queue.insert("hello", Duration::from_secs(10));
+ /// delay_queue.reserve(10);
+ ///
+ /// assert!(delay_queue.capacity() >= 11);
+ /// # }
+ /// ```
+ pub fn reserve(&mut self, additional: usize) {
+ self.slab.reserve(additional);
+ }
+
+ /// Returns `true` if there are no items in the queue.
+ ///
+ /// Note that this function returns `false` even if all items have not yet
+ /// expired and a call to `poll` will return `NotReady`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::DelayQueue;
+ /// use std::time::Duration;
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let mut delay_queue = DelayQueue::new();
+ /// assert!(delay_queue.is_empty());
+ ///
+ /// delay_queue.insert("hello", Duration::from_secs(5));
+ /// assert!(!delay_queue.is_empty());
+ /// # }
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.slab.is_empty()
+ }
+
+ /// Polls the queue, returning the index of the next slot in the slab that
+ /// should be returned.
+ ///
+ /// A slot should be returned when the associated deadline has been reached.
+ fn poll_idx(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<Result<usize, Error>>> {
+ use self::wheel::Stack;
+
+ let expired = self.expired.pop(&mut self.slab);
+
+ if expired.is_some() {
+ return Poll::Ready(expired.map(Ok));
+ }
+
+ loop {
+ if let Some(ref mut delay) = self.delay {
+ if !delay.is_elapsed() {
+ ready!(Pin::new(&mut *delay).poll(cx));
+ }
+
+ let now = crate::time::ms(delay.deadline() - self.start, crate::time::Round::Down);
+
+ self.poll = wheel::Poll::new(now);
+ }
+
+ // We poll the wheel to get the next value out before finding the next deadline.
+ let wheel_idx = self.wheel.poll(&mut self.poll, &mut self.slab);
+
+ self.delay = self.next_deadline().map(delay_until);
+
+ if let Some(idx) = wheel_idx {
+ return Poll::Ready(Some(Ok(idx)));
+ }
+
+ if self.delay.is_none() {
+ return Poll::Ready(None);
+ }
+ }
+ }
+
+ fn normalize_deadline(&self, when: Instant) -> u64 {
+ let when = if when < self.start {
+ 0
+ } else {
+ crate::time::ms(when - self.start, crate::time::Round::Up)
+ };
+
+ cmp::max(when, self.wheel.elapsed())
+ }
+}
+
+// We never put `T` in a `Pin`...
+impl<T> Unpin for DelayQueue<T> {}
+
+impl<T> Default for DelayQueue<T> {
+ fn default() -> DelayQueue<T> {
+ DelayQueue::new()
+ }
+}
+
+#[cfg(feature = "stream")]
+impl<T> futures_core::Stream for DelayQueue<T> {
+ // DelayQueue seems much more specific, where a user may care that it
+ // has reached capacity, so return those errors instead of panicking.
+ type Item = Result<Expired<T>, Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
+ DelayQueue::poll_expired(self.get_mut(), cx)
+ }
+}
+
+impl<T> wheel::Stack for Stack<T> {
+ type Owned = usize;
+ type Borrowed = usize;
+ type Store = Slab<Data<T>>;
+
+ fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ fn push(&mut self, item: Self::Owned, store: &mut Self::Store) {
+ // Ensure the entry is not already in a stack.
+ debug_assert!(store[item].next.is_none());
+ debug_assert!(store[item].prev.is_none());
+
+ // Remove the old head entry
+ let old = self.head.take();
+
+ if let Some(idx) = old {
+ store[idx].prev = Some(item);
+ }
+
+ store[item].next = old;
+ self.head = Some(item)
+ }
+
+ fn pop(&mut self, store: &mut Self::Store) -> Option<Self::Owned> {
+ if let Some(idx) = self.head {
+ self.head = store[idx].next;
+
+ if let Some(idx) = self.head {
+ store[idx].prev = None;
+ }
+
+ store[idx].next = None;
+ debug_assert!(store[idx].prev.is_none());
+
+ Some(idx)
+ } else {
+ None
+ }
+ }
+
+ fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store) {
+ assert!(store.contains(*item));
+
+ // Ensure that the entry is in fact contained by the stack
+ debug_assert!({
+ // This walks the full linked list even if an entry is found.
+ let mut next = self.head;
+ let mut contains = false;
+
+ while let Some(idx) = next {
+ if idx == *item {
+ debug_assert!(!contains);
+ contains = true;
+ }
+
+ next = store[idx].next;
+ }
+
+ contains
+ });
+
+ if let Some(next) = store[*item].next {
+ store[next].prev = store[*item].prev;
+ }
+
+ if let Some(prev) = store[*item].prev {
+ store[prev].next = store[*item].next;
+ } else {
+ self.head = store[*item].next;
+ }
+
+ store[*item].next = None;
+ store[*item].prev = None;
+ }
+
+ fn when(item: &Self::Borrowed, store: &Self::Store) -> u64 {
+ store[*item].when
+ }
+}
+
+impl<T> Default for Stack<T> {
+ fn default() -> Stack<T> {
+ Stack {
+ head: None,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl Key {
+ pub(crate) fn new(index: usize) -> Key {
+ Key { index }
+ }
+}
+
+impl<T> Expired<T> {
+ /// Returns a reference to the inner value.
+ pub fn get_ref(&self) -> &T {
+ &self.data
+ }
+
+ /// Returns a mutable reference to the inner value.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.data
+ }
+
+ /// Consumes `self` and returns the inner value.
+ pub fn into_inner(self) -> T {
+ self.data
+ }
+
+ /// Returns the deadline that the expiration was set to.
+ pub fn deadline(&self) -> Instant {
+ self.deadline
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/atomic_stack.rs b/third_party/rust/tokio/src/time/driver/atomic_stack.rs
new file mode 100644
index 0000000000..7e5a83fa52
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/atomic_stack.rs
@@ -0,0 +1,124 @@
+use crate::time::driver::Entry;
+use crate::time::Error;
+
+use std::ptr;
+use std::sync::atomic::AtomicPtr;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::Arc;
+
+/// A stack of `Entry` nodes
+#[derive(Debug)]
+pub(crate) struct AtomicStack {
+ /// Stack head
+ head: AtomicPtr<Entry>,
+}
+
+/// Entries that were removed from the stack
+#[derive(Debug)]
+pub(crate) struct AtomicStackEntries {
+ ptr: *mut Entry,
+}
+
+/// Used to indicate that the timer has shutdown.
+const SHUTDOWN: *mut Entry = 1 as *mut _;
+
+impl AtomicStack {
+ pub(crate) fn new() -> AtomicStack {
+ AtomicStack {
+ head: AtomicPtr::new(ptr::null_mut()),
+ }
+ }
+
+ /// Pushes an entry onto the stack.
+ ///
+ /// Returns `true` if the entry was pushed, `false` if the entry is already
+ /// on the stack, `Err` if the timer is shutdown.
+ pub(crate) fn push(&self, entry: &Arc<Entry>) -> Result<bool, Error> {
+ // First, set the queued bit on the entry
+ let queued = entry.queued.fetch_or(true, SeqCst);
+
+ if queued {
+ // Already queued, nothing more to do
+ return Ok(false);
+ }
+
+ let ptr = Arc::into_raw(entry.clone()) as *mut _;
+
+ let mut curr = self.head.load(SeqCst);
+
+ loop {
+ if curr == SHUTDOWN {
+ // Don't leak the entry node
+ let _ = unsafe { Arc::from_raw(ptr) };
+
+ return Err(Error::shutdown());
+ }
+
+ // Update the `next` pointer. This is safe because setting the queued
+ // bit is a "lock" on this field.
+ unsafe {
+ *(entry.next_atomic.get()) = curr;
+ }
+
+ let actual = self.head.compare_and_swap(curr, ptr, SeqCst);
+
+ if actual == curr {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ Ok(true)
+ }
+
+ /// Takes all entries from the stack
+ pub(crate) fn take(&self) -> AtomicStackEntries {
+ let ptr = self.head.swap(ptr::null_mut(), SeqCst);
+ AtomicStackEntries { ptr }
+ }
+
+ /// Drains all remaining nodes in the stack and prevent any new nodes from
+ /// being pushed onto the stack.
+ pub(crate) fn shutdown(&self) {
+ // Shutdown the processing queue
+ let ptr = self.head.swap(SHUTDOWN, SeqCst);
+
+ // Let the drop fn of `AtomicStackEntries` handle draining the stack
+ drop(AtomicStackEntries { ptr });
+ }
+}
+
+// ===== impl AtomicStackEntries =====
+
+impl Iterator for AtomicStackEntries {
+ type Item = Arc<Entry>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.ptr.is_null() {
+ return None;
+ }
+
+ // Convert the pointer to an `Arc<Entry>`
+ let entry = unsafe { Arc::from_raw(self.ptr) };
+
+ // Update `self.ptr` to point to the next element of the stack
+ self.ptr = unsafe { *entry.next_atomic.get() };
+
+ // Unset the queued flag
+ let res = entry.queued.fetch_and(false, SeqCst);
+ debug_assert!(res);
+
+ // Return the entry
+ Some(entry)
+ }
+}
+
+impl Drop for AtomicStackEntries {
+ fn drop(&mut self) {
+ for entry in self {
+ // Flag the entry as errored
+ entry.error();
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/entry.rs b/third_party/rust/tokio/src/time/driver/entry.rs
new file mode 100644
index 0000000000..20cc824019
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/entry.rs
@@ -0,0 +1,345 @@
+use crate::loom::sync::atomic::AtomicU64;
+use crate::sync::AtomicWaker;
+use crate::time::driver::{Handle, Inner};
+use crate::time::{Duration, Error, Instant};
+
+use std::cell::UnsafeCell;
+use std::ptr;
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Weak};
+use std::task::{self, Poll};
+use std::u64;
+
+/// Internal state shared between a `Delay` instance and the timer.
+///
+/// This struct is used as a node in two intrusive data structures:
+///
+/// * An atomic stack used to signal to the timer thread that the entry state
+/// has changed. The timer thread will observe the entry on this stack and
+/// perform any actions as necessary.
+///
+/// * A doubly linked list used **only** by the timer thread. Each slot in the
+/// timer wheel is a head pointer to the list of entries that must be
+/// processed during that timer tick.
+#[derive(Debug)]
+pub(crate) struct Entry {
+ /// Only accessed from `Registration`.
+ time: CachePadded<UnsafeCell<Time>>,
+
+ /// Timer internals. Using a weak pointer allows the timer to shutdown
+ /// without all `Delay` instances having completed.
+ ///
+ /// When `None`, the entry has not yet been linked with a timer instance.
+ inner: Weak<Inner>,
+
+ /// Tracks the entry state. This value contains the following information:
+ ///
+ /// * The deadline at which the entry must be "fired".
+ /// * A flag indicating if the entry has already been fired.
+ /// * Whether or not the entry transitioned to the error state.
+ ///
+ /// When an `Entry` is created, `state` is initialized to the instant at
+ /// which the entry must be fired. When a timer is reset to a different
+ /// instant, this value is changed.
+ state: AtomicU64,
+
+ /// Task to notify once the deadline is reached.
+ waker: AtomicWaker,
+
+ /// True when the entry is queued in the "process" stack. This value
+ /// is set before pushing the value and unset after popping the value.
+ ///
+ /// TODO: This could possibly be rolled up into `state`.
+ pub(super) queued: AtomicBool,
+
+ /// Next entry in the "process" linked list.
+ ///
+ /// Access to this field is coordinated by the `queued` flag.
+ ///
+ /// Represents a strong Arc ref.
+ pub(super) next_atomic: UnsafeCell<*mut Entry>,
+
+ /// When the entry expires, relative to the `start` of the timer
+ /// (Inner::start). This is only used by the timer.
+ ///
+ /// A `Delay` instance can be reset to a different deadline by the thread
+ /// that owns the `Delay` instance. In this case, the timer thread will not
+ /// immediately know that this has happened. The timer thread must know the
+ /// last deadline that it saw as it uses this value to locate the entry in
+ /// its wheel.
+ ///
+ /// Once the timer thread observes that the instant has changed, it updates
+ /// the wheel and sets this value. The idea is that this value eventually
+ /// converges to the value of `state` as the timer thread makes updates.
+ when: UnsafeCell<Option<u64>>,
+
+ /// Next entry in the State's linked list.
+ ///
+ /// This is only accessed by the timer
+ pub(super) next_stack: UnsafeCell<Option<Arc<Entry>>>,
+
+ /// Previous entry in the State's linked list.
+ ///
+ /// This is only accessed by the timer and is used to unlink a canceled
+ /// entry.
+ ///
+ /// This is a weak reference.
+ pub(super) prev_stack: UnsafeCell<*const Entry>,
+}
+
+/// Stores the info for `Delay`.
+#[derive(Debug)]
+pub(crate) struct Time {
+ pub(crate) deadline: Instant,
+ pub(crate) duration: Duration,
+}
+
+/// Flag indicating a timer entry has elapsed
+const ELAPSED: u64 = 1 << 63;
+
+/// Flag indicating a timer entry has reached an error state
+const ERROR: u64 = u64::MAX;
+
+// ===== impl Entry =====
+
+impl Entry {
+ pub(crate) fn new(handle: &Handle, deadline: Instant, duration: Duration) -> Arc<Entry> {
+ let inner = handle.inner().unwrap();
+ let entry: Entry;
+
+ // Increment the number of active timeouts
+ if inner.increment().is_err() {
+ entry = Entry::new2(deadline, duration, Weak::new(), ERROR)
+ } else {
+ let when = inner.normalize_deadline(deadline);
+ let state = if when <= inner.elapsed() {
+ ELAPSED
+ } else {
+ when
+ };
+ entry = Entry::new2(deadline, duration, Arc::downgrade(&inner), state);
+ }
+
+ let entry = Arc::new(entry);
+ if inner.queue(&entry).is_err() {
+ entry.error();
+ }
+
+ entry
+ }
+
+ /// Only called by `Registration`
+ pub(crate) fn time_ref(&self) -> &Time {
+ unsafe { &*self.time.0.get() }
+ }
+
+ /// Only called by `Registration`
+ #[allow(clippy::mut_from_ref)] // https://github.com/rust-lang/rust-clippy/issues/4281
+ pub(crate) unsafe fn time_mut(&self) -> &mut Time {
+ &mut *self.time.0.get()
+ }
+
+ /// The current entry state as known by the timer. This is not the value of
+ /// `state`, but lets the timer know how to converge its state to `state`.
+ pub(crate) fn when_internal(&self) -> Option<u64> {
+ unsafe { *self.when.get() }
+ }
+
+ pub(crate) fn set_when_internal(&self, when: Option<u64>) {
+ unsafe {
+ *self.when.get() = when;
+ }
+ }
+
+ /// Called by `Timer` to load the current value of `state` for processing
+ pub(crate) fn load_state(&self) -> Option<u64> {
+ let state = self.state.load(SeqCst);
+
+ if is_elapsed(state) {
+ None
+ } else {
+ Some(state)
+ }
+ }
+
+ pub(crate) fn is_elapsed(&self) -> bool {
+ let state = self.state.load(SeqCst);
+ is_elapsed(state)
+ }
+
+ pub(crate) fn fire(&self, when: u64) {
+ let mut curr = self.state.load(SeqCst);
+
+ loop {
+ if is_elapsed(curr) || curr > when {
+ return;
+ }
+
+ let next = ELAPSED | curr;
+ let actual = self.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ self.waker.wake();
+ }
+
+ pub(crate) fn error(&self) {
+ // Only transition to the error state if not currently elapsed
+ let mut curr = self.state.load(SeqCst);
+
+ loop {
+ if is_elapsed(curr) {
+ return;
+ }
+
+ let next = ERROR;
+
+ let actual = self.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ self.waker.wake();
+ }
+
+ pub(crate) fn cancel(entry: &Arc<Entry>) {
+ let state = entry.state.fetch_or(ELAPSED, SeqCst);
+
+ if is_elapsed(state) {
+ // Nothing more to do
+ return;
+ }
+
+ // If registered with a timer instance, try to upgrade the Arc.
+ let inner = match entry.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ let _ = inner.queue(entry);
+ }
+
+ pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {
+ let mut curr = self.state.load(SeqCst);
+
+ if is_elapsed(curr) {
+ return Poll::Ready(if curr == ERROR {
+ Err(Error::shutdown())
+ } else {
+ Ok(())
+ });
+ }
+
+ self.waker.register_by_ref(cx.waker());
+
+ curr = self.state.load(SeqCst);
+
+ if is_elapsed(curr) {
+ return Poll::Ready(if curr == ERROR {
+ Err(Error::shutdown())
+ } else {
+ Ok(())
+ });
+ }
+
+ Poll::Pending
+ }
+
+ /// Only called by `Registration`
+ pub(crate) fn reset(entry: &mut Arc<Entry>) {
+ let inner = match entry.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ let deadline = entry.time_ref().deadline;
+ let when = inner.normalize_deadline(deadline);
+ let elapsed = inner.elapsed();
+
+ let mut curr = entry.state.load(SeqCst);
+ let mut notify;
+
+ loop {
+ // In these two cases, there is no work to do when resetting the
+ // timer. If the `Entry` is in an error state, then it cannot be
+ // used anymore. If resetting the entry to the current value, then
+ // the reset is a noop.
+ if curr == ERROR || curr == when {
+ return;
+ }
+
+ let next;
+
+ if when <= elapsed {
+ next = ELAPSED;
+ notify = !is_elapsed(curr);
+ } else {
+ next = when;
+ notify = true;
+ }
+
+ let actual = entry.state.compare_and_swap(curr, next, SeqCst);
+
+ if curr == actual {
+ break;
+ }
+
+ curr = actual;
+ }
+
+ if notify {
+ let _ = inner.queue(entry);
+ }
+ }
+
+ fn new2(deadline: Instant, duration: Duration, inner: Weak<Inner>, state: u64) -> Self {
+ Self {
+ time: CachePadded(UnsafeCell::new(Time { deadline, duration })),
+ inner,
+ waker: AtomicWaker::new(),
+ state: AtomicU64::new(state),
+ queued: AtomicBool::new(false),
+ next_atomic: UnsafeCell::new(ptr::null_mut()),
+ when: UnsafeCell::new(None),
+ next_stack: UnsafeCell::new(None),
+ prev_stack: UnsafeCell::new(ptr::null_mut()),
+ }
+ }
+
+ fn upgrade_inner(&self) -> Option<Arc<Inner>> {
+ self.inner.upgrade()
+ }
+}
+
+fn is_elapsed(state: u64) -> bool {
+ state & ELAPSED == ELAPSED
+}
+
+impl Drop for Entry {
+ fn drop(&mut self) {
+ let inner = match self.upgrade_inner() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ inner.decrement();
+ }
+}
+
+unsafe impl Send for Entry {}
+unsafe impl Sync for Entry {}
+
+#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
+#[derive(Debug)]
+struct CachePadded<T>(T);
diff --git a/third_party/rust/tokio/src/time/driver/handle.rs b/third_party/rust/tokio/src/time/driver/handle.rs
new file mode 100644
index 0000000000..38b1761c8e
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/handle.rs
@@ -0,0 +1,38 @@
+use crate::runtime::context;
+use crate::time::driver::Inner;
+use std::fmt;
+use std::sync::{Arc, Weak};
+
+/// Handle to time driver instance.
+#[derive(Clone)]
+pub(crate) struct Handle {
+ inner: Weak<Inner>,
+}
+
+impl Handle {
+ /// Creates a new timer `Handle` from a shared `Inner` timer state.
+ pub(crate) fn new(inner: Weak<Inner>) -> Self {
+ Handle { inner }
+ }
+
+ /// Tries to get a handle to the current timer.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is no current timer set.
+ pub(crate) fn current() -> Self {
+ context::time_handle()
+ .expect("there is no timer running, must be called from the context of Tokio runtime")
+ }
+
+ /// Tries to return a strong ref to the inner
+ pub(crate) fn inner(&self) -> Option<Arc<Inner>> {
+ self.inner.upgrade()
+ }
+}
+
+impl fmt::Debug for Handle {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Handle")
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/mod.rs b/third_party/rust/tokio/src/time/driver/mod.rs
new file mode 100644
index 0000000000..4616816f3f
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/mod.rs
@@ -0,0 +1,391 @@
+//! Time driver
+
+mod atomic_stack;
+use self::atomic_stack::AtomicStack;
+
+mod entry;
+pub(super) use self::entry::Entry;
+
+mod handle;
+pub(crate) use self::handle::Handle;
+
+mod registration;
+pub(crate) use self::registration::Registration;
+
+mod stack;
+use self::stack::Stack;
+
+use crate::loom::sync::atomic::{AtomicU64, AtomicUsize};
+use crate::park::{Park, Unpark};
+use crate::time::{wheel, Error};
+use crate::time::{Clock, Duration, Instant};
+
+use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
+
+use std::sync::Arc;
+use std::usize;
+use std::{cmp, fmt};
+
+/// Time implementation that drives [`Delay`], [`Interval`], and [`Timeout`].
+///
+/// A `Driver` instance tracks the state necessary for managing time and
+/// notifying the [`Delay`] instances once their deadlines are reached.
+///
+/// It is expected that a single instance manages many individual [`Delay`]
+/// instances. The `Driver` implementation is thread-safe and, as such, is able
+/// to handle callers from across threads.
+///
+/// After creating the `Driver` instance, the caller must repeatedly call
+/// [`turn`]. The time driver will perform no work unless [`turn`] is called
+/// repeatedly.
+///
+/// The driver has a resolution of one millisecond. Any unit of time that falls
+/// between milliseconds are rounded up to the next millisecond.
+///
+/// When an instance is dropped, any outstanding [`Delay`] instance that has not
+/// elapsed will be notified with an error. At this point, calling `poll` on the
+/// [`Delay`] instance will result in `Err` being returned.
+///
+/// # Implementation
+///
+/// THe time driver is based on the [paper by Varghese and Lauck][paper].
+///
+/// A hashed timing wheel is a vector of slots, where each slot handles a time
+/// slice. As time progresses, the timer walks over the slot for the current
+/// instant, and processes each entry for that slot. When the timer reaches the
+/// end of the wheel, it starts again at the beginning.
+///
+/// The implementation maintains six wheels arranged in a set of levels. As the
+/// levels go up, the slots of the associated wheel represent larger intervals
+/// of time. At each level, the wheel has 64 slots. Each slot covers a range of
+/// time equal to the wheel at the lower level. At level zero, each slot
+/// represents one millisecond of time.
+///
+/// The wheels are:
+///
+/// * Level 0: 64 x 1 millisecond slots.
+/// * Level 1: 64 x 64 millisecond slots.
+/// * Level 2: 64 x ~4 second slots.
+/// * Level 3: 64 x ~4 minute slots.
+/// * Level 4: 64 x ~4 hour slots.
+/// * Level 5: 64 x ~12 day slots.
+///
+/// When the timer processes entries at level zero, it will notify all the
+/// `Delay` instances as their deadlines have been reached. For all higher
+/// levels, all entries will be redistributed across the wheel at the next level
+/// down. Eventually, as time progresses, entries will [`Delay`] instances will
+/// either be canceled (dropped) or their associated entries will reach level
+/// zero and be notified.
+#[derive(Debug)]
+pub(crate) struct Driver<T> {
+ /// Shared state
+ inner: Arc<Inner>,
+
+ /// Timer wheel
+ wheel: wheel::Wheel<Stack>,
+
+ /// Thread parker. The `Driver` park implementation delegates to this.
+ park: T,
+
+ /// Source of "now" instances
+ clock: Clock,
+}
+
+/// Timer state shared between `Driver`, `Handle`, and `Registration`.
+pub(crate) struct Inner {
+ /// The instant at which the timer started running.
+ start: Instant,
+
+ /// The last published timer `elapsed` value.
+ elapsed: AtomicU64,
+
+ /// Number of active timeouts
+ num: AtomicUsize,
+
+ /// Head of the "process" linked list.
+ process: AtomicStack,
+
+ /// Unparks the timer thread.
+ unpark: Box<dyn Unpark>,
+}
+
+/// Maximum number of timeouts the system can handle concurrently.
+const MAX_TIMEOUTS: usize = usize::MAX >> 1;
+
+// ===== impl Driver =====
+
+impl<T> Driver<T>
+where
+ T: Park,
+{
+ /// Creates a new `Driver` instance that uses `park` to block the current
+ /// thread and `now` to get the current `Instant`.
+ ///
+ /// Specifying the source of time is useful when testing.
+ pub(crate) fn new(park: T, clock: Clock) -> Driver<T> {
+ let unpark = Box::new(park.unpark());
+
+ Driver {
+ inner: Arc::new(Inner::new(clock.now(), unpark)),
+ wheel: wheel::Wheel::new(),
+ park,
+ clock,
+ }
+ }
+
+ /// Returns a handle to the timer.
+ ///
+ /// The `Handle` is how `Delay` instances are created. The `Delay` instances
+ /// can either be created directly or the `Handle` instance can be passed to
+ /// `with_default`, setting the timer as the default timer for the execution
+ /// context.
+ pub(crate) fn handle(&self) -> Handle {
+ Handle::new(Arc::downgrade(&self.inner))
+ }
+
+ /// Converts an `Expiration` to an `Instant`.
+ fn expiration_instant(&self, when: u64) -> Instant {
+ self.inner.start + Duration::from_millis(when)
+ }
+
+ /// Runs timer related logic
+ fn process(&mut self) {
+ let now = crate::time::ms(
+ self.clock.now() - self.inner.start,
+ crate::time::Round::Down,
+ );
+ let mut poll = wheel::Poll::new(now);
+
+ while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) {
+ let when = entry.when_internal().expect("invalid internal entry state");
+
+ // Fire the entry
+ entry.fire(when);
+
+ // Track that the entry has been fired
+ entry.set_when_internal(None);
+ }
+
+ // Update the elapsed cache
+ self.inner.elapsed.store(self.wheel.elapsed(), SeqCst);
+ }
+
+ /// Processes the entry queue
+ ///
+ /// This handles adding and canceling timeouts.
+ fn process_queue(&mut self) {
+ for entry in self.inner.process.take() {
+ match (entry.when_internal(), entry.load_state()) {
+ (None, None) => {
+ // Nothing to do
+ }
+ (Some(_), None) => {
+ // Remove the entry
+ self.clear_entry(&entry);
+ }
+ (None, Some(when)) => {
+ // Queue the entry
+ self.add_entry(entry, when);
+ }
+ (Some(_), Some(next)) => {
+ self.clear_entry(&entry);
+ self.add_entry(entry, next);
+ }
+ }
+ }
+ }
+
+ fn clear_entry(&mut self, entry: &Arc<Entry>) {
+ self.wheel.remove(entry, &mut ());
+ entry.set_when_internal(None);
+ }
+
+ /// Fires the entry if it needs to, otherwise queue it to be processed later.
+ ///
+ /// Returns `None` if the entry was fired.
+ fn add_entry(&mut self, entry: Arc<Entry>, when: u64) {
+ use crate::time::wheel::InsertError;
+
+ entry.set_when_internal(Some(when));
+
+ match self.wheel.insert(when, entry, &mut ()) {
+ Ok(_) => {}
+ Err((entry, InsertError::Elapsed)) => {
+ // The entry's deadline has elapsed, so fire it and update the
+ // internal state accordingly.
+ entry.set_when_internal(None);
+ entry.fire(when);
+ }
+ Err((entry, InsertError::Invalid)) => {
+ // The entry's deadline is invalid, so error it and update the
+ // internal state accordingly.
+ entry.set_when_internal(None);
+ entry.error();
+ }
+ }
+ }
+}
+
+impl<T> Park for Driver<T>
+where
+ T: Park,
+{
+ type Unpark = T::Unpark;
+ type Error = T::Error;
+
+ fn unpark(&self) -> Self::Unpark {
+ self.park.unpark()
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ self.process_queue();
+
+ match self.wheel.poll_at() {
+ Some(when) => {
+ let now = self.clock.now();
+ let deadline = self.expiration_instant(when);
+
+ if deadline > now {
+ let dur = deadline - now;
+
+ if self.clock.is_paused() {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ self.clock.advance(dur);
+ } else {
+ self.park.park_timeout(dur)?;
+ }
+ } else {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ }
+ }
+ None => {
+ self.park.park()?;
+ }
+ }
+
+ self.process();
+
+ Ok(())
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ self.process_queue();
+
+ match self.wheel.poll_at() {
+ Some(when) => {
+ let now = self.clock.now();
+ let deadline = self.expiration_instant(when);
+
+ if deadline > now {
+ let duration = cmp::min(deadline - now, duration);
+
+ if self.clock.is_paused() {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ self.clock.advance(duration);
+ } else {
+ self.park.park_timeout(duration)?;
+ }
+ } else {
+ self.park.park_timeout(Duration::from_secs(0))?;
+ }
+ }
+ None => {
+ self.park.park_timeout(duration)?;
+ }
+ }
+
+ self.process();
+
+ Ok(())
+ }
+}
+
+impl<T> Drop for Driver<T> {
+ fn drop(&mut self) {
+ use std::u64;
+
+ // Shutdown the stack of entries to process, preventing any new entries
+ // from being pushed.
+ self.inner.process.shutdown();
+
+ // Clear the wheel, using u64::MAX allows us to drain everything
+ let mut poll = wheel::Poll::new(u64::MAX);
+
+ while let Some(entry) = self.wheel.poll(&mut poll, &mut ()) {
+ entry.error();
+ }
+ }
+}
+
+// ===== impl Inner =====
+
+impl Inner {
+ fn new(start: Instant, unpark: Box<dyn Unpark>) -> Inner {
+ Inner {
+ num: AtomicUsize::new(0),
+ elapsed: AtomicU64::new(0),
+ process: AtomicStack::new(),
+ start,
+ unpark,
+ }
+ }
+
+ fn elapsed(&self) -> u64 {
+ self.elapsed.load(SeqCst)
+ }
+
+ #[cfg(all(test, loom))]
+ fn num(&self, ordering: std::sync::atomic::Ordering) -> usize {
+ self.num.load(ordering)
+ }
+
+ /// Increments the number of active timeouts
+ fn increment(&self) -> Result<(), Error> {
+ let mut curr = self.num.load(Relaxed);
+ loop {
+ if curr == MAX_TIMEOUTS {
+ return Err(Error::at_capacity());
+ }
+
+ match self
+ .num
+ .compare_exchange_weak(curr, curr + 1, Release, Relaxed)
+ {
+ Ok(_) => return Ok(()),
+ Err(next) => curr = next,
+ }
+ }
+ }
+
+ /// Decrements the number of active timeouts
+ fn decrement(&self) {
+ let prev = self.num.fetch_sub(1, Acquire);
+ debug_assert!(prev <= MAX_TIMEOUTS);
+ }
+
+ fn queue(&self, entry: &Arc<Entry>) -> Result<(), Error> {
+ if self.process.push(entry)? {
+ // The timer is notified so that it can process the timeout
+ self.unpark.unpark();
+ }
+
+ Ok(())
+ }
+
+ fn normalize_deadline(&self, deadline: Instant) -> u64 {
+ if deadline < self.start {
+ return 0;
+ }
+
+ crate::time::ms(deadline - self.start, crate::time::Round::Up)
+ }
+}
+
+impl fmt::Debug for Inner {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Inner").finish()
+ }
+}
+
+#[cfg(all(test, loom))]
+mod tests;
diff --git a/third_party/rust/tokio/src/time/driver/registration.rs b/third_party/rust/tokio/src/time/driver/registration.rs
new file mode 100644
index 0000000000..b77357e735
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/registration.rs
@@ -0,0 +1,53 @@
+use crate::time::driver::{Entry, Handle};
+use crate::time::{Duration, Error, Instant};
+
+use std::sync::Arc;
+use std::task::{self, Poll};
+
+/// Registration with a timer.
+///
+/// The association between a `Delay` instance and a timer is done lazily in
+/// `poll`
+#[derive(Debug)]
+pub(crate) struct Registration {
+ entry: Arc<Entry>,
+}
+
+impl Registration {
+ pub(crate) fn new(deadline: Instant, duration: Duration) -> Registration {
+ let handle = Handle::current();
+
+ Registration {
+ entry: Entry::new(&handle, deadline, duration),
+ }
+ }
+
+ pub(crate) fn deadline(&self) -> Instant {
+ self.entry.time_ref().deadline
+ }
+
+ pub(crate) fn reset(&mut self, deadline: Instant) {
+ unsafe {
+ self.entry.time_mut().deadline = deadline;
+ }
+
+ Entry::reset(&mut self.entry);
+ }
+
+ pub(crate) fn is_elapsed(&self) -> bool {
+ self.entry.is_elapsed()
+ }
+
+ pub(crate) fn poll_elapsed(&self, cx: &mut task::Context<'_>) -> Poll<Result<(), Error>> {
+ // Keep track of task budget
+ ready!(crate::coop::poll_proceed(cx));
+
+ self.entry.poll_elapsed(cx)
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ Entry::cancel(&self.entry);
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/stack.rs b/third_party/rust/tokio/src/time/driver/stack.rs
new file mode 100644
index 0000000000..3e2924f265
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/stack.rs
@@ -0,0 +1,121 @@
+use crate::time::driver::Entry;
+use crate::time::wheel;
+
+use std::ptr;
+use std::sync::Arc;
+
+/// A doubly linked stack
+#[derive(Debug)]
+pub(crate) struct Stack {
+ head: Option<Arc<Entry>>,
+}
+
+impl Default for Stack {
+ fn default() -> Stack {
+ Stack { head: None }
+ }
+}
+
+impl wheel::Stack for Stack {
+ type Owned = Arc<Entry>;
+ type Borrowed = Entry;
+ type Store = ();
+
+ fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ fn push(&mut self, entry: Self::Owned, _: &mut Self::Store) {
+ // Get a pointer to the entry to for the prev link
+ let ptr: *const Entry = &*entry as *const _;
+
+ // Remove the old head entry
+ let old = self.head.take();
+
+ unsafe {
+ // Ensure the entry is not already in a stack.
+ debug_assert!((*entry.next_stack.get()).is_none());
+ debug_assert!((*entry.prev_stack.get()).is_null());
+
+ if let Some(ref entry) = old.as_ref() {
+ debug_assert!({
+ // The head is not already set to the entry
+ ptr != &***entry as *const _
+ });
+
+ // Set the previous link on the old head
+ *entry.prev_stack.get() = ptr;
+ }
+
+ // Set this entry's next pointer
+ *entry.next_stack.get() = old;
+ }
+
+ // Update the head pointer
+ self.head = Some(entry);
+ }
+
+ /// Pops an item from the stack
+ fn pop(&mut self, _: &mut ()) -> Option<Arc<Entry>> {
+ let entry = self.head.take();
+
+ unsafe {
+ if let Some(entry) = entry.as_ref() {
+ self.head = (*entry.next_stack.get()).take();
+
+ if let Some(entry) = self.head.as_ref() {
+ *entry.prev_stack.get() = ptr::null();
+ }
+
+ *entry.prev_stack.get() = ptr::null();
+ }
+ }
+
+ entry
+ }
+
+ fn remove(&mut self, entry: &Entry, _: &mut ()) {
+ unsafe {
+ // Ensure that the entry is in fact contained by the stack
+ debug_assert!({
+ // This walks the full linked list even if an entry is found.
+ let mut next = self.head.as_ref();
+ let mut contains = false;
+
+ while let Some(n) = next {
+ if entry as *const _ == &**n as *const _ {
+ debug_assert!(!contains);
+ contains = true;
+ }
+
+ next = (*n.next_stack.get()).as_ref();
+ }
+
+ contains
+ });
+
+ // Unlink `entry` from the next node
+ let next = (*entry.next_stack.get()).take();
+
+ if let Some(next) = next.as_ref() {
+ (*next.prev_stack.get()) = *entry.prev_stack.get();
+ }
+
+ // Unlink `entry` from the prev node
+
+ if let Some(prev) = (*entry.prev_stack.get()).as_ref() {
+ *prev.next_stack.get() = next;
+ } else {
+ // It is the head
+ self.head = next;
+ }
+
+ // Unset the prev pointer
+ *entry.prev_stack.get() = ptr::null();
+ }
+ }
+
+ fn when(item: &Entry, _: &()) -> u64 {
+ item.when_internal().expect("invalid internal state")
+ }
+}
diff --git a/third_party/rust/tokio/src/time/driver/tests/mod.rs b/third_party/rust/tokio/src/time/driver/tests/mod.rs
new file mode 100644
index 0000000000..88ff5525da
--- /dev/null
+++ b/third_party/rust/tokio/src/time/driver/tests/mod.rs
@@ -0,0 +1,55 @@
+use crate::park::Unpark;
+use crate::time::driver::Inner;
+use crate::time::Instant;
+
+use loom::thread;
+
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+struct MockUnpark;
+
+impl Unpark for MockUnpark {
+ fn unpark(&self) {}
+}
+
+#[test]
+fn balanced_incr_and_decr() {
+ const OPS: usize = 5;
+
+ fn incr(inner: Arc<Inner>) {
+ for _ in 0..OPS {
+ inner.increment().expect("increment should not have failed");
+ thread::yield_now();
+ }
+ }
+
+ fn decr(inner: Arc<Inner>) {
+ let mut ops_performed = 0;
+ while ops_performed < OPS {
+ if inner.num(Ordering::Relaxed) > 0 {
+ ops_performed += 1;
+ inner.decrement();
+ }
+ thread::yield_now();
+ }
+ }
+
+ loom::model(|| {
+ let unpark = Box::new(MockUnpark);
+ let instant = Instant::now();
+
+ let inner = Arc::new(Inner::new(instant, unpark));
+
+ let incr_inner = inner.clone();
+ let decr_inner = inner.clone();
+
+ let incr_hndle = thread::spawn(move || incr(incr_inner));
+ let decr_hndle = thread::spawn(move || decr(decr_inner));
+
+ incr_hndle.join().expect("should never fail");
+ decr_hndle.join().expect("should never fail");
+
+ assert_eq!(inner.num(Ordering::SeqCst), 0);
+ })
+}
diff --git a/third_party/rust/tokio/src/time/error.rs b/third_party/rust/tokio/src/time/error.rs
new file mode 100644
index 0000000000..0667b97ac1
--- /dev/null
+++ b/third_party/rust/tokio/src/time/error.rs
@@ -0,0 +1,72 @@
+use self::Kind::*;
+use std::error;
+use std::fmt;
+
+/// Errors encountered by the timer implementation.
+///
+/// Currently, there are two different errors that can occur:
+///
+/// * `shutdown` occurs when a timer operation is attempted, but the timer
+/// instance has been dropped. In this case, the operation will never be able
+/// to complete and the `shutdown` error is returned. This is a permanent
+/// error, i.e., once this error is observed, timer operations will never
+/// succeed in the future.
+///
+/// * `at_capacity` occurs when a timer operation is attempted, but the timer
+/// instance is currently handling its maximum number of outstanding delays.
+/// In this case, the operation is not able to be performed at the current
+/// moment, and `at_capacity` is returned. This is a transient error, i.e., at
+/// some point in the future, if the operation is attempted again, it might
+/// succeed. Callers that observe this error should attempt to [shed load]. One
+/// way to do this would be dropping the future that issued the timer operation.
+///
+/// [shed load]: https://en.wikipedia.org/wiki/Load_Shedding
+#[derive(Debug)]
+pub struct Error(Kind);
+
+#[derive(Debug)]
+enum Kind {
+ Shutdown,
+ AtCapacity,
+}
+
+impl Error {
+ /// Creates an error representing a shutdown timer.
+ pub fn shutdown() -> Error {
+ Error(Shutdown)
+ }
+
+ /// Returns `true` if the error was caused by the timer being shutdown.
+ pub fn is_shutdown(&self) -> bool {
+ match self.0 {
+ Kind::Shutdown => true,
+ _ => false,
+ }
+ }
+
+ /// Creates an error representing a timer at capacity.
+ pub fn at_capacity() -> Error {
+ Error(AtCapacity)
+ }
+
+ /// Returns `true` if the error was caused by the timer being at capacity.
+ pub fn is_at_capacity(&self) -> bool {
+ match self.0 {
+ Kind::AtCapacity => true,
+ _ => false,
+ }
+ }
+}
+
+impl error::Error for Error {}
+
+impl fmt::Display for Error {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::Kind::*;
+ let descr = match self.0 {
+ Shutdown => "the timer is shutdown, must be called from the context of Tokio runtime",
+ AtCapacity => "timer is at capacity and cannot create a new entry",
+ };
+ write!(fmt, "{}", descr)
+ }
+}
diff --git a/third_party/rust/tokio/src/time/instant.rs b/third_party/rust/tokio/src/time/instant.rs
new file mode 100644
index 0000000000..f2cb4bc97d
--- /dev/null
+++ b/third_party/rust/tokio/src/time/instant.rs
@@ -0,0 +1,199 @@
+#![allow(clippy::trivially_copy_pass_by_ref)]
+
+use std::fmt;
+use std::ops;
+use std::time::Duration;
+
+/// A measurement of the system clock, useful for talking to
+/// external entities like the file system or other processes.
+#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash)]
+pub struct Instant {
+ std: std::time::Instant,
+}
+
+impl Instant {
+ /// Returns an instant corresponding to "now".
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::Instant;
+ ///
+ /// let now = Instant::now();
+ /// ```
+ pub fn now() -> Instant {
+ variant::now()
+ }
+
+ /// Create a `tokio::time::Instant` from a `std::time::Instant`.
+ pub fn from_std(std: std::time::Instant) -> Instant {
+ Instant { std }
+ }
+
+ /// Convert the value into a `std::time::Instant`.
+ pub fn into_std(self) -> std::time::Instant {
+ self.std
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `earlier` is later than `self`.
+ pub fn duration_since(&self, earlier: Instant) -> Duration {
+ self.std.duration_since(earlier.std)
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one, or
+ /// None if that instant is later than this one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::{Duration, Instant, delay_for};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let now = Instant::now();
+ /// delay_for(Duration::new(1, 0)).await;
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.checked_duration_since(now));
+ /// println!("{:?}", now.checked_duration_since(new_now)); // None
+ /// }
+ /// ```
+ pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> {
+ self.std.checked_duration_since(earlier.std)
+ }
+
+ /// Returns the amount of time elapsed from another instant to this one, or
+ /// zero duration if that instant is earlier than this one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::{Duration, Instant, delay_for};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let now = Instant::now();
+ /// delay_for(Duration::new(1, 0)).await;
+ /// let new_now = Instant::now();
+ /// println!("{:?}", new_now.saturating_duration_since(now));
+ /// println!("{:?}", now.saturating_duration_since(new_now)); // 0ns
+ /// }
+ /// ```
+ pub fn saturating_duration_since(&self, earlier: Instant) -> Duration {
+ self.std.saturating_duration_since(earlier.std)
+ }
+
+ /// Returns the amount of time elapsed since this instant was created.
+ ///
+ /// # Panics
+ ///
+ /// This function may panic if the current time is earlier than this
+ /// instant, which is something that can happen if an `Instant` is
+ /// produced synthetically.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time::{Duration, Instant, delay_for};
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let instant = Instant::now();
+ /// let three_secs = Duration::from_secs(3);
+ /// delay_for(three_secs).await;
+ /// assert!(instant.elapsed() >= three_secs);
+ /// }
+ /// ```
+ pub fn elapsed(&self) -> Duration {
+ Instant::now() - *self
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be
+ /// represented as `Instant` (which means it's inside the bounds of the
+ /// underlying data structure), `None` otherwise.
+ pub fn checked_add(&self, duration: Duration) -> Option<Instant> {
+ self.std.checked_add(duration).map(Instant::from_std)
+ }
+
+ /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be
+ /// represented as `Instant` (which means it's inside the bounds of the
+ /// underlying data structure), `None` otherwise.
+ pub fn checked_sub(&self, duration: Duration) -> Option<Instant> {
+ self.std.checked_sub(duration).map(Instant::from_std)
+ }
+}
+
+impl From<std::time::Instant> for Instant {
+ fn from(time: std::time::Instant) -> Instant {
+ Instant::from_std(time)
+ }
+}
+
+impl From<Instant> for std::time::Instant {
+ fn from(time: Instant) -> std::time::Instant {
+ time.into_std()
+ }
+}
+
+impl ops::Add<Duration> for Instant {
+ type Output = Instant;
+
+ fn add(self, other: Duration) -> Instant {
+ Instant::from_std(self.std + other)
+ }
+}
+
+impl ops::AddAssign<Duration> for Instant {
+ fn add_assign(&mut self, rhs: Duration) {
+ *self = *self + rhs;
+ }
+}
+
+impl ops::Sub for Instant {
+ type Output = Duration;
+
+ fn sub(self, rhs: Instant) -> Duration {
+ self.std - rhs.std
+ }
+}
+
+impl ops::Sub<Duration> for Instant {
+ type Output = Instant;
+
+ fn sub(self, rhs: Duration) -> Instant {
+ Instant::from_std(self.std - rhs)
+ }
+}
+
+impl ops::SubAssign<Duration> for Instant {
+ fn sub_assign(&mut self, rhs: Duration) {
+ *self = *self - rhs;
+ }
+}
+
+impl fmt::Debug for Instant {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.std.fmt(fmt)
+ }
+}
+
+#[cfg(not(feature = "test-util"))]
+mod variant {
+ use super::Instant;
+
+ pub(super) fn now() -> Instant {
+ Instant::from_std(std::time::Instant::now())
+ }
+}
+
+#[cfg(feature = "test-util")]
+mod variant {
+ use super::Instant;
+
+ pub(super) fn now() -> Instant {
+ crate::time::clock::now()
+ }
+}
diff --git a/third_party/rust/tokio/src/time/interval.rs b/third_party/rust/tokio/src/time/interval.rs
new file mode 100644
index 0000000000..090e2d1f05
--- /dev/null
+++ b/third_party/rust/tokio/src/time/interval.rs
@@ -0,0 +1,139 @@
+use crate::future::poll_fn;
+use crate::time::{delay_until, Delay, Duration, Instant};
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// Creates new `Interval` that yields with interval of `duration`. The first
+/// tick completes immediately.
+///
+/// An interval will tick indefinitely. At any time, the `Interval` value can be
+/// dropped. This cancels the interval.
+///
+/// This function is equivalent to `interval_at(Instant::now(), period)`.
+///
+/// # Panics
+///
+/// This function panics if `period` is zero.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::time::{self, Duration};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let mut interval = time::interval(Duration::from_millis(10));
+///
+/// interval.tick().await;
+/// interval.tick().await;
+/// interval.tick().await;
+///
+/// // approximately 20ms have elapsed.
+/// }
+/// ```
+pub fn interval(period: Duration) -> Interval {
+ assert!(period > Duration::new(0, 0), "`period` must be non-zero.");
+
+ interval_at(Instant::now(), period)
+}
+
+/// Creates new `Interval` that yields with interval of `period` with the
+/// first tick completing at `at`.
+///
+/// An interval will tick indefinitely. At any time, the `Interval` value can be
+/// dropped. This cancels the interval.
+///
+/// # Panics
+///
+/// This function panics if `period` is zero.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::time::{interval_at, Duration, Instant};
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let start = Instant::now() + Duration::from_millis(50);
+/// let mut interval = interval_at(start, Duration::from_millis(10));
+///
+/// interval.tick().await;
+/// interval.tick().await;
+/// interval.tick().await;
+///
+/// // approximately 70ms have elapsed.
+/// }
+/// ```
+pub fn interval_at(start: Instant, period: Duration) -> Interval {
+ assert!(period > Duration::new(0, 0), "`period` must be non-zero.");
+
+ Interval {
+ delay: delay_until(start),
+ period,
+ }
+}
+
+/// Stream returned by [`interval`](interval) and [`interval_at`](interval_at).
+#[derive(Debug)]
+pub struct Interval {
+ /// Future that completes the next time the `Interval` yields a value.
+ delay: Delay,
+
+ /// The duration between values yielded by `Interval`.
+ period: Duration,
+}
+
+impl Interval {
+ #[doc(hidden)] // TODO: document
+ pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll<Instant> {
+ // Wait for the delay to be done
+ ready!(Pin::new(&mut self.delay).poll(cx));
+
+ // Get the `now` by looking at the `delay` deadline
+ let now = self.delay.deadline();
+
+ // The next interval value is `duration` after the one that just
+ // yielded.
+ let next = now + self.period;
+ self.delay.reset(next);
+
+ // Return the current instant
+ Poll::Ready(now)
+ }
+
+ /// Completes when the next instant in the interval has been reached.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::time;
+ ///
+ /// use std::time::Duration;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let mut interval = time::interval(Duration::from_millis(10));
+ ///
+ /// interval.tick().await;
+ /// interval.tick().await;
+ /// interval.tick().await;
+ ///
+ /// // approximately 20ms have elapsed.
+ /// }
+ /// ```
+ #[allow(clippy::should_implement_trait)] // TODO: rename (tokio-rs/tokio#1261)
+ pub async fn tick(&mut self) -> Instant {
+ poll_fn(|cx| self.poll_tick(cx)).await
+ }
+}
+
+#[cfg(feature = "stream")]
+impl crate::stream::Stream for Interval {
+ type Item = Instant;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Instant>> {
+ Poll::Ready(Some(ready!(self.poll_tick(cx))))
+ }
+}
diff --git a/third_party/rust/tokio/src/time/mod.rs b/third_party/rust/tokio/src/time/mod.rs
new file mode 100644
index 0000000000..7070d6b257
--- /dev/null
+++ b/third_party/rust/tokio/src/time/mod.rs
@@ -0,0 +1,130 @@
+//! Utilities for tracking time.
+//!
+//! This module provides a number of types for executing code after a set period
+//! of time.
+//!
+//! * `Delay` is a future that does no work and completes at a specific `Instant`
+//! in time.
+//!
+//! * `Interval` is a stream yielding a value at a fixed period. It is
+//! initialized with a `Duration` and repeatedly yields each time the duration
+//! elapses.
+//!
+//! * `Timeout`: Wraps a future or stream, setting an upper bound to the amount
+//! of time it is allowed to execute. If the future or stream does not
+//! complete in time, then it is canceled and an error is returned.
+//!
+//! * `DelayQueue`: A queue where items are returned once the requested delay
+//! has expired.
+//!
+//! These types are sufficient for handling a large number of scenarios
+//! involving time.
+//!
+//! These types must be used from within the context of the `Runtime`.
+//!
+//! # Examples
+//!
+//! Wait 100ms and print "Hello World!"
+//!
+//! ```
+//! use tokio::time::delay_for;
+//!
+//! use std::time::Duration;
+//!
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! delay_for(Duration::from_millis(100)).await;
+//! println!("100 ms have elapsed");
+//! }
+//! ```
+//!
+//! Require that an operation takes no more than 300ms. Note that this uses the
+//! `timeout` function on the `FutureExt` trait. This trait is included in the
+//! prelude.
+//!
+//! ```
+//! use tokio::time::{timeout, Duration};
+//!
+//! async fn long_future() {
+//! // do work here
+//! }
+//!
+//! # async fn dox() {
+//! let res = timeout(Duration::from_secs(1), long_future()).await;
+//!
+//! if res.is_err() {
+//! println!("operation timed out");
+//! }
+//! # }
+//! ```
+
+mod clock;
+pub(crate) use self::clock::Clock;
+#[cfg(feature = "test-util")]
+pub use clock::{advance, pause, resume};
+
+pub mod delay_queue;
+#[doc(inline)]
+pub use delay_queue::DelayQueue;
+
+mod delay;
+pub use delay::{delay_for, delay_until, Delay};
+
+pub(crate) mod driver;
+
+mod error;
+pub use error::Error;
+
+mod instant;
+pub use self::instant::Instant;
+
+mod interval;
+pub use interval::{interval, interval_at, Interval};
+
+mod timeout;
+#[doc(inline)]
+pub use timeout::{timeout, timeout_at, Elapsed, Timeout};
+
+cfg_stream! {
+ mod throttle;
+ pub use throttle::{throttle, Throttle};
+}
+
+mod wheel;
+
+#[cfg(test)]
+#[cfg(not(loom))]
+mod tests;
+
+// Re-export for convenience
+pub use std::time::Duration;
+
+// ===== Internal utils =====
+
+enum Round {
+ Up,
+ Down,
+}
+
+/// Convert a `Duration` to milliseconds, rounding up and saturating at
+/// `u64::MAX`.
+///
+/// The saturating is fine because `u64::MAX` milliseconds are still many
+/// million years.
+#[inline]
+fn ms(duration: Duration, round: Round) -> u64 {
+ const NANOS_PER_MILLI: u32 = 1_000_000;
+ const MILLIS_PER_SEC: u64 = 1_000;
+
+ // Round up.
+ let millis = match round {
+ Round::Up => (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI,
+ Round::Down => duration.subsec_millis(),
+ };
+
+ duration
+ .as_secs()
+ .saturating_mul(MILLIS_PER_SEC)
+ .saturating_add(u64::from(millis))
+}
diff --git a/third_party/rust/tokio/src/time/tests/mod.rs b/third_party/rust/tokio/src/time/tests/mod.rs
new file mode 100644
index 0000000000..4710d470f7
--- /dev/null
+++ b/third_party/rust/tokio/src/time/tests/mod.rs
@@ -0,0 +1,22 @@
+mod test_delay;
+
+use crate::time::{self, Instant};
+use std::time::Duration;
+
+fn assert_send<T: Send>() {}
+fn assert_sync<T: Sync>() {}
+
+#[test]
+fn registration_is_send_and_sync() {
+ use crate::time::driver::Registration;
+
+ assert_send::<Registration>();
+ assert_sync::<Registration>();
+}
+
+#[test]
+#[should_panic]
+fn delay_is_eager() {
+ let when = Instant::now() + Duration::from_millis(100);
+ let _ = time::delay_until(when);
+}
diff --git a/third_party/rust/tokio/src/time/tests/test_delay.rs b/third_party/rust/tokio/src/time/tests/test_delay.rs
new file mode 100644
index 0000000000..f843434be4
--- /dev/null
+++ b/third_party/rust/tokio/src/time/tests/test_delay.rs
@@ -0,0 +1,447 @@
+#![warn(rust_2018_idioms)]
+
+use crate::park::{Park, Unpark};
+use crate::time::driver::{Driver, Entry, Handle};
+use crate::time::Clock;
+use crate::time::{Duration, Instant};
+
+use tokio_test::task;
+use tokio_test::{assert_ok, assert_pending, assert_ready_ok};
+
+use std::sync::Arc;
+
+macro_rules! poll {
+ ($e:expr) => {
+ $e.enter(|cx, e| e.poll_elapsed(cx))
+ };
+}
+
+#[test]
+fn frozen_utility_returns_correct_advanced_duration() {
+ let clock = Clock::new();
+ clock.pause();
+ let start = clock.now();
+
+ clock.advance(ms(10));
+ assert_eq!(clock.now() - start, ms(10));
+}
+
+#[test]
+fn immediate_delay() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ let when = clock.now();
+ let mut e = task::spawn(delay_until(&handle, when));
+
+ assert_ready_ok!(poll!(e));
+
+ assert_ok!(driver.park_timeout(Duration::from_millis(1000)));
+
+ // The time has not advanced. The `turn` completed immediately.
+ assert_eq!(clock.now() - start, ms(1000));
+}
+
+#[test]
+fn delayed_delay_level_0() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ for &i in &[1, 10, 60] {
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, start + ms(i)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(i));
+
+ assert_ready_ok!(poll!(e));
+ }
+}
+
+#[test]
+fn sub_ms_delayed_delay() {
+ let (mut driver, clock, handle) = setup();
+
+ for _ in 0..5 {
+ let deadline = clock.now() + ms(1) + Duration::new(0, 1);
+
+ let mut e = task::spawn(delay_until(&handle, deadline));
+
+ assert_pending!(poll!(e));
+
+ assert_ok!(driver.park());
+ assert_ready_ok!(poll!(e));
+
+ assert!(clock.now() >= deadline);
+
+ clock.advance(Duration::new(0, 1));
+ }
+}
+
+#[test]
+fn delayed_delay_wrapping_level_0() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ assert_ok!(driver.park_timeout(ms(5)));
+ assert_eq!(clock.now() - start, ms(5));
+
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(60)));
+
+ assert_pending!(poll!(e));
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(64));
+ assert_pending!(poll!(e));
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(65));
+
+ assert_ready_ok!(poll!(e));
+}
+
+#[test]
+fn timer_wrapping_with_higher_levels() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Set delay to hit level 1
+ let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(64)));
+ assert_pending!(poll!(e1));
+
+ // Turn a bit
+ assert_ok!(driver.park_timeout(ms(5)));
+
+ // Set timeout such that it will hit level 0, but wrap
+ let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(60)));
+ assert_pending!(poll!(e2));
+
+ // This should result in s1 firing
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(64));
+
+ assert_ready_ok!(poll!(e1));
+ assert_pending!(poll!(e2));
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(65));
+
+ assert_ready_ok!(poll!(e1));
+}
+
+#[test]
+fn delay_with_deadline_in_past() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create `Delay` that elapsed immediately.
+ let mut e = task::spawn(delay_until(&handle, clock.now() - ms(100)));
+
+ // Even though the delay expires in the past, it is not ready yet
+ // because the timer must observe it.
+ assert_ready_ok!(poll!(e));
+
+ // Turn the timer, it runs for the elapsed time
+ assert_ok!(driver.park_timeout(ms(1000)));
+
+ // The time has not advanced. The `turn` completed immediately.
+ assert_eq!(clock.now() - start, ms(1000));
+}
+
+#[test]
+fn delayed_delay_level_1() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ // Turn the timer, this will wake up to cascade the timer down.
+ assert_ok!(driver.park_timeout(ms(1000)));
+ assert_eq!(clock.now() - start, ms(192));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ // Turn the timer again
+ assert_ok!(driver.park_timeout(ms(1000)));
+ assert_eq!(clock.now() - start, ms(234));
+
+ // The delay has elapsed.
+ assert_ready_ok!(poll!(e));
+
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(234)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ // Turn the timer with a smaller timeout than the cascade.
+ assert_ok!(driver.park_timeout(ms(100)));
+ assert_eq!(clock.now() - start, ms(100));
+
+ assert_pending!(poll!(e));
+
+ // Turn the timer, this will wake up to cascade the timer down.
+ assert_ok!(driver.park_timeout(ms(1000)));
+ assert_eq!(clock.now() - start, ms(192));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ // Turn the timer again
+ assert_ok!(driver.park_timeout(ms(1000)));
+ assert_eq!(clock.now() - start, ms(234));
+
+ // The delay has elapsed.
+ assert_ready_ok!(poll!(e));
+}
+
+#[test]
+fn concurrently_set_two_timers_second_one_shorter() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(500)));
+ let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(200)));
+
+ // The delay has not elapsed
+ assert_pending!(poll!(e1));
+ assert_pending!(poll!(e2));
+
+ // Delay until a cascade
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(192));
+
+ // Delay until the second timer.
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(200));
+
+ // The shorter delay fires
+ assert_ready_ok!(poll!(e2));
+ assert_pending!(poll!(e1));
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(448));
+
+ assert_pending!(poll!(e1));
+
+ // Turn again, this time the time will advance to the second delay
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(500));
+
+ assert_ready_ok!(poll!(e1));
+}
+
+#[test]
+fn short_delay() {
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ // Turn the timer, but not enough time will go by.
+ assert_ok!(driver.park());
+
+ // The delay has elapsed.
+ assert_ready_ok!(poll!(e));
+
+ // The time has advanced to the point of the delay elapsing.
+ assert_eq!(clock.now() - start, ms(1));
+}
+
+#[test]
+fn sorta_long_delay_until() {
+ const MIN_5: u64 = 5 * 60 * 1000;
+
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MIN_5)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ let cascades = &[262_144, 262_144 + 9 * 4096, 262_144 + 9 * 4096 + 15 * 64];
+
+ for &elapsed in cascades {
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(elapsed));
+
+ assert_pending!(poll!(e));
+ }
+
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(MIN_5));
+
+ // The delay has elapsed.
+ assert_ready_ok!(poll!(e));
+}
+
+#[test]
+fn very_long_delay() {
+ const MO_5: u64 = 5 * 30 * 24 * 60 * 60 * 1000;
+
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ // Create a `Delay` that elapses in the future
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(MO_5)));
+
+ // The delay has not elapsed.
+ assert_pending!(poll!(e));
+
+ let cascades = &[
+ 12_884_901_888,
+ 12_952_010_752,
+ 12_959_875_072,
+ 12_959_997_952,
+ ];
+
+ for &elapsed in cascades {
+ assert_ok!(driver.park());
+ assert_eq!(clock.now() - start, ms(elapsed));
+
+ assert_pending!(poll!(e));
+ }
+
+ // Turn the timer, but not enough time will go by.
+ assert_ok!(driver.park());
+
+ // The time has advanced to the point of the delay elapsing.
+ assert_eq!(clock.now() - start, ms(MO_5));
+
+ // The delay has elapsed.
+ assert_ready_ok!(poll!(e));
+}
+
+#[test]
+fn unpark_is_delayed() {
+ // A special park that will take much longer than the requested duration
+ struct MockPark(Clock);
+
+ struct MockUnpark;
+
+ impl Park for MockPark {
+ type Unpark = MockUnpark;
+ type Error = ();
+
+ fn unpark(&self) -> Self::Unpark {
+ MockUnpark
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ panic!("parking forever");
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ assert_eq!(duration, ms(0));
+ self.0.advance(ms(436));
+ Ok(())
+ }
+ }
+
+ impl Unpark for MockUnpark {
+ fn unpark(&self) {}
+ }
+
+ let clock = Clock::new();
+ clock.pause();
+ let start = clock.now();
+ let mut driver = Driver::new(MockPark(clock.clone()), clock.clone());
+ let handle = driver.handle();
+
+ let mut e1 = task::spawn(delay_until(&handle, clock.now() + ms(100)));
+ let mut e2 = task::spawn(delay_until(&handle, clock.now() + ms(101)));
+ let mut e3 = task::spawn(delay_until(&handle, clock.now() + ms(200)));
+
+ assert_pending!(poll!(e1));
+ assert_pending!(poll!(e2));
+ assert_pending!(poll!(e3));
+
+ assert_ok!(driver.park());
+
+ assert_eq!(clock.now() - start, ms(500));
+
+ assert_ready_ok!(poll!(e1));
+ assert_ready_ok!(poll!(e2));
+ assert_ready_ok!(poll!(e3));
+}
+
+#[test]
+fn set_timeout_at_deadline_greater_than_max_timer() {
+ const YR_1: u64 = 365 * 24 * 60 * 60 * 1000;
+ const YR_5: u64 = 5 * YR_1;
+
+ let (mut driver, clock, handle) = setup();
+ let start = clock.now();
+
+ for _ in 0..5 {
+ assert_ok!(driver.park_timeout(ms(YR_1)));
+ }
+
+ let mut e = task::spawn(delay_until(&handle, clock.now() + ms(1)));
+ assert_pending!(poll!(e));
+
+ assert_ok!(driver.park_timeout(ms(1000)));
+ assert_eq!(clock.now() - start, ms(YR_5) + ms(1));
+
+ assert_ready_ok!(poll!(e));
+}
+
+fn setup() -> (Driver<MockPark>, Clock, Handle) {
+ let clock = Clock::new();
+ clock.pause();
+ let driver = Driver::new(MockPark(clock.clone()), clock.clone());
+ let handle = driver.handle();
+
+ (driver, clock, handle)
+}
+
+fn delay_until(handle: &Handle, when: Instant) -> Arc<Entry> {
+ Entry::new(&handle, when, ms(0))
+}
+
+struct MockPark(Clock);
+
+struct MockUnpark;
+
+impl Park for MockPark {
+ type Unpark = MockUnpark;
+ type Error = ();
+
+ fn unpark(&self) -> Self::Unpark {
+ MockUnpark
+ }
+
+ fn park(&mut self) -> Result<(), Self::Error> {
+ panic!("parking forever");
+ }
+
+ fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> {
+ self.0.advance(duration);
+ Ok(())
+ }
+}
+
+impl Unpark for MockUnpark {
+ fn unpark(&self) {}
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/src/time/throttle.rs b/third_party/rust/tokio/src/time/throttle.rs
new file mode 100644
index 0000000000..435bef6381
--- /dev/null
+++ b/third_party/rust/tokio/src/time/throttle.rs
@@ -0,0 +1,117 @@
+//! Slow down a stream by enforcing a delay between items.
+
+use crate::stream::Stream;
+use crate::time::{Delay, Duration, Instant};
+
+use std::future::Future;
+use std::marker::Unpin;
+use std::pin::Pin;
+use std::task::{self, Poll};
+
+use pin_project_lite::pin_project;
+
+/// Slows down a stream by enforcing a delay between items.
+/// They will be produced not more often than the specified interval.
+///
+/// # Example
+///
+/// Create a throttled stream.
+/// ```rust,norun
+/// use std::time::Duration;
+/// use tokio::stream::StreamExt;
+/// use tokio::time::throttle;
+///
+/// # async fn dox() {
+/// let mut item_stream = throttle(Duration::from_secs(2), futures::stream::repeat("one"));
+///
+/// loop {
+/// // The string will be produced at most every 2 seconds
+/// println!("{:?}", item_stream.next().await);
+/// }
+/// # }
+/// ```
+pub fn throttle<T>(duration: Duration, stream: T) -> Throttle<T>
+where
+ T: Stream,
+{
+ let delay = if duration == Duration::from_millis(0) {
+ None
+ } else {
+ Some(Delay::new_timeout(Instant::now() + duration, duration))
+ };
+
+ Throttle {
+ delay,
+ duration,
+ has_delayed: true,
+ stream,
+ }
+}
+
+pin_project! {
+ /// Stream for the [`throttle`](throttle) function.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Throttle<T> {
+ // `None` when duration is zero.
+ delay: Option<Delay>,
+ duration: Duration,
+
+ // Set to true when `delay` has returned ready, but `stream` hasn't.
+ has_delayed: bool,
+
+ // The stream to throttle
+ #[pin]
+ stream: T,
+ }
+}
+
+// XXX: are these safe if `T: !Unpin`?
+impl<T: Unpin> Throttle<T> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &T {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this combinator
+ /// is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the stream
+ /// which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so care
+ /// should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> T {
+ self.stream
+ }
+}
+
+impl<T: Stream> Stream for Throttle<T> {
+ type Item = T::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
+ if !self.has_delayed && self.delay.is_some() {
+ ready!(Pin::new(self.as_mut().project().delay.as_mut().unwrap()).poll(cx));
+ *self.as_mut().project().has_delayed = true;
+ }
+
+ let value = ready!(self.as_mut().project().stream.poll_next(cx));
+
+ if value.is_some() {
+ let dur = self.duration;
+ if let Some(ref mut delay) = self.as_mut().project().delay {
+ delay.reset(Instant::now() + dur);
+ }
+
+ *self.as_mut().project().has_delayed = false;
+ }
+
+ Poll::Ready(value)
+ }
+}
diff --git a/third_party/rust/tokio/src/time/timeout.rs b/third_party/rust/tokio/src/time/timeout.rs
new file mode 100644
index 0000000000..401856a881
--- /dev/null
+++ b/third_party/rust/tokio/src/time/timeout.rs
@@ -0,0 +1,185 @@
+//! Allows a future to execute for a maximum amount of time.
+//!
+//! See [`Timeout`] documentation for more details.
+//!
+//! [`Timeout`]: struct@Timeout
+
+use crate::time::{delay_until, Delay, Duration, Instant};
+
+use std::fmt;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{self, Poll};
+
+/// Require a `Future` to complete before the specified duration has elapsed.
+///
+/// If the future completes before the duration has elapsed, then the completed
+/// value is returned. Otherwise, an error is returned and the future is
+/// canceled.
+///
+/// # Cancelation
+///
+/// Cancelling a timeout is done by dropping the future. No additional cleanup
+/// or other work is required.
+///
+/// The original future may be obtained by calling [`Timeout::into_inner`]. This
+/// consumes the `Timeout`.
+///
+/// # Examples
+///
+/// Create a new `Timeout` set to expire in 10 milliseconds.
+///
+/// ```rust
+/// use tokio::time::timeout;
+/// use tokio::sync::oneshot;
+///
+/// use std::time::Duration;
+///
+/// # async fn dox() {
+/// let (tx, rx) = oneshot::channel();
+/// # tx.send(()).unwrap();
+///
+/// // Wrap the future with a `Timeout` set to expire in 10 milliseconds.
+/// if let Err(_) = timeout(Duration::from_millis(10), rx).await {
+/// println!("did not receive value within 10 ms");
+/// }
+/// # }
+/// ```
+pub fn timeout<T>(duration: Duration, future: T) -> Timeout<T>
+where
+ T: Future,
+{
+ let delay = Delay::new_timeout(Instant::now() + duration, duration);
+ Timeout::new_with_delay(future, delay)
+}
+
+/// Require a `Future` to complete before the specified instant in time.
+///
+/// If the future completes before the instant is reached, then the completed
+/// value is returned. Otherwise, an error is returned.
+///
+/// # Cancelation
+///
+/// Cancelling a timeout is done by dropping the future. No additional cleanup
+/// or other work is required.
+///
+/// The original future may be obtained by calling [`Timeout::into_inner`]. This
+/// consumes the `Timeout`.
+///
+/// # Examples
+///
+/// Create a new `Timeout` set to expire in 10 milliseconds.
+///
+/// ```rust
+/// use tokio::time::{Instant, timeout_at};
+/// use tokio::sync::oneshot;
+///
+/// use std::time::Duration;
+///
+/// # async fn dox() {
+/// let (tx, rx) = oneshot::channel();
+/// # tx.send(()).unwrap();
+///
+/// // Wrap the future with a `Timeout` set to expire 10 milliseconds into the
+/// // future.
+/// if let Err(_) = timeout_at(Instant::now() + Duration::from_millis(10), rx).await {
+/// println!("did not receive value within 10 ms");
+/// }
+/// # }
+/// ```
+pub fn timeout_at<T>(deadline: Instant, future: T) -> Timeout<T>
+where
+ T: Future,
+{
+ let delay = delay_until(deadline);
+
+ Timeout {
+ value: future,
+ delay,
+ }
+}
+
+/// Future returned by [`timeout`](timeout) and [`timeout_at`](timeout_at).
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct Timeout<T> {
+ value: T,
+ delay: Delay,
+}
+
+/// Error returned by `Timeout`.
+#[derive(Debug, PartialEq)]
+pub struct Elapsed(());
+
+impl Elapsed {
+ // Used on StreamExt::timeout
+ #[allow(unused)]
+ pub(crate) fn new() -> Self {
+ Elapsed(())
+ }
+}
+
+impl<T> Timeout<T> {
+ pub(crate) fn new_with_delay(value: T, delay: Delay) -> Timeout<T> {
+ Timeout { value, delay }
+ }
+
+ /// Gets a reference to the underlying value in this timeout.
+ pub fn get_ref(&self) -> &T {
+ &self.value
+ }
+
+ /// Gets a mutable reference to the underlying value in this timeout.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+
+ /// Consumes this timeout, returning the underlying value.
+ pub fn into_inner(self) -> T {
+ self.value
+ }
+}
+
+impl<T> Future for Timeout<T>
+where
+ T: Future,
+{
+ type Output = Result<T::Output, Elapsed>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ // First, try polling the future
+
+ // Safety: we never move `self.value`
+ unsafe {
+ let p = self.as_mut().map_unchecked_mut(|me| &mut me.value);
+ if let Poll::Ready(v) = p.poll(cx) {
+ return Poll::Ready(Ok(v));
+ }
+ }
+
+ // Now check the timer
+ // Safety: X_X!
+ unsafe {
+ match self.map_unchecked_mut(|me| &mut me.delay).poll(cx) {
+ Poll::Ready(()) => Poll::Ready(Err(Elapsed(()))),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+ }
+}
+
+// ===== impl Elapsed =====
+
+impl fmt::Display for Elapsed {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "deadline has elapsed".fmt(fmt)
+ }
+}
+
+impl std::error::Error for Elapsed {}
+
+impl From<Elapsed> for std::io::Error {
+ fn from(_err: Elapsed) -> std::io::Error {
+ std::io::ErrorKind::TimedOut.into()
+ }
+}
diff --git a/third_party/rust/tokio/src/time/wheel/level.rs b/third_party/rust/tokio/src/time/wheel/level.rs
new file mode 100644
index 0000000000..49f9bfb9cf
--- /dev/null
+++ b/third_party/rust/tokio/src/time/wheel/level.rs
@@ -0,0 +1,255 @@
+use crate::time::wheel::Stack;
+
+use std::fmt;
+
+/// Wheel for a single level in the timer. This wheel contains 64 slots.
+pub(crate) struct Level<T> {
+ level: usize,
+
+ /// Bit field tracking which slots currently contain entries.
+ ///
+ /// Using a bit field to track slots that contain entries allows avoiding a
+ /// scan to find entries. This field is updated when entries are added or
+ /// removed from a slot.
+ ///
+ /// The least-significant bit represents slot zero.
+ occupied: u64,
+
+ /// Slots
+ slot: [T; LEVEL_MULT],
+}
+
+/// Indicates when a slot must be processed next.
+#[derive(Debug)]
+pub(crate) struct Expiration {
+ /// The level containing the slot.
+ pub(crate) level: usize,
+
+ /// The slot index.
+ pub(crate) slot: usize,
+
+ /// The instant at which the slot needs to be processed.
+ pub(crate) deadline: u64,
+}
+
+/// Level multiplier.
+///
+/// Being a power of 2 is very important.
+const LEVEL_MULT: usize = 64;
+
+impl<T: Stack> Level<T> {
+ pub(crate) fn new(level: usize) -> Level<T> {
+ // Rust's derived implementations for arrays require that the value
+ // contained by the array be `Copy`. So, here we have to manually
+ // initialize every single slot.
+ macro_rules! s {
+ () => {
+ T::default()
+ };
+ };
+
+ Level {
+ level,
+ occupied: 0,
+ slot: [
+ // It does not look like the necessary traits are
+ // derived for [T; 64].
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ s!(),
+ ],
+ }
+ }
+
+ /// Finds the slot that needs to be processed next and returns the slot and
+ /// `Instant` at which this slot must be processed.
+ pub(crate) fn next_expiration(&self, now: u64) -> Option<Expiration> {
+ // Use the `occupied` bit field to get the index of the next slot that
+ // needs to be processed.
+ let slot = match self.next_occupied_slot(now) {
+ Some(slot) => slot,
+ None => return None,
+ };
+
+ // From the slot index, calculate the `Instant` at which it needs to be
+ // processed. This value *must* be in the future with respect to `now`.
+
+ let level_range = level_range(self.level);
+ let slot_range = slot_range(self.level);
+
+ // TODO: This can probably be simplified w/ power of 2 math
+ let level_start = now - (now % level_range);
+ let deadline = level_start + slot as u64 * slot_range;
+
+ debug_assert!(
+ deadline >= now,
+ "deadline={}; now={}; level={}; slot={}; occupied={:b}",
+ deadline,
+ now,
+ self.level,
+ slot,
+ self.occupied
+ );
+
+ Some(Expiration {
+ level: self.level,
+ slot,
+ deadline,
+ })
+ }
+
+ fn next_occupied_slot(&self, now: u64) -> Option<usize> {
+ if self.occupied == 0 {
+ return None;
+ }
+
+ // Get the slot for now using Maths
+ let now_slot = (now / slot_range(self.level)) as usize;
+ let occupied = self.occupied.rotate_right(now_slot as u32);
+ let zeros = occupied.trailing_zeros() as usize;
+ let slot = (zeros + now_slot) % 64;
+
+ Some(slot)
+ }
+
+ pub(crate) fn add_entry(&mut self, when: u64, item: T::Owned, store: &mut T::Store) {
+ let slot = slot_for(when, self.level);
+
+ self.slot[slot].push(item, store);
+ self.occupied |= occupied_bit(slot);
+ }
+
+ pub(crate) fn remove_entry(&mut self, when: u64, item: &T::Borrowed, store: &mut T::Store) {
+ let slot = slot_for(when, self.level);
+
+ self.slot[slot].remove(item, store);
+
+ if self.slot[slot].is_empty() {
+ // The bit is currently set
+ debug_assert!(self.occupied & occupied_bit(slot) != 0);
+
+ // Unset the bit
+ self.occupied ^= occupied_bit(slot);
+ }
+ }
+
+ pub(crate) fn pop_entry_slot(&mut self, slot: usize, store: &mut T::Store) -> Option<T::Owned> {
+ let ret = self.slot[slot].pop(store);
+
+ if ret.is_some() && self.slot[slot].is_empty() {
+ // The bit is currently set
+ debug_assert!(self.occupied & occupied_bit(slot) != 0);
+
+ self.occupied ^= occupied_bit(slot);
+ }
+
+ ret
+ }
+}
+
+impl<T> fmt::Debug for Level<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Level")
+ .field("occupied", &self.occupied)
+ .finish()
+ }
+}
+
+fn occupied_bit(slot: usize) -> u64 {
+ 1 << slot
+}
+
+fn slot_range(level: usize) -> u64 {
+ LEVEL_MULT.pow(level as u32) as u64
+}
+
+fn level_range(level: usize) -> u64 {
+ LEVEL_MULT as u64 * slot_range(level)
+}
+
+/// Convert a duration (milliseconds) and a level to a slot position
+fn slot_for(duration: u64, level: usize) -> usize {
+ ((duration >> (level * 6)) % LEVEL_MULT as u64) as usize
+}
+
+/*
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_slot_for() {
+ for pos in 1..64 {
+ assert_eq!(pos as usize, slot_for(pos, 0));
+ }
+
+ for level in 1..5 {
+ for pos in level..64 {
+ let a = pos * 64_usize.pow(level as u32);
+ assert_eq!(pos as usize, slot_for(a as u64, level));
+ }
+ }
+ }
+}
+*/
diff --git a/third_party/rust/tokio/src/time/wheel/mod.rs b/third_party/rust/tokio/src/time/wheel/mod.rs
new file mode 100644
index 0000000000..a2ef27fc6c
--- /dev/null
+++ b/third_party/rust/tokio/src/time/wheel/mod.rs
@@ -0,0 +1,314 @@
+mod level;
+pub(crate) use self::level::Expiration;
+use self::level::Level;
+
+mod stack;
+pub(crate) use self::stack::Stack;
+
+use std::borrow::Borrow;
+use std::usize;
+
+/// Timing wheel implementation.
+///
+/// This type provides the hashed timing wheel implementation that backs `Timer`
+/// and `DelayQueue`.
+///
+/// The structure is generic over `T: Stack`. This allows handling timeout data
+/// being stored on the heap or in a slab. In order to support the latter case,
+/// the slab must be passed into each function allowing the implementation to
+/// lookup timer entries.
+///
+/// See `Timer` documentation for some implementation notes.
+#[derive(Debug)]
+pub(crate) struct Wheel<T> {
+ /// The number of milliseconds elapsed since the wheel started.
+ elapsed: u64,
+
+ /// Timer wheel.
+ ///
+ /// Levels:
+ ///
+ /// * 1 ms slots / 64 ms range
+ /// * 64 ms slots / ~ 4 sec range
+ /// * ~ 4 sec slots / ~ 4 min range
+ /// * ~ 4 min slots / ~ 4 hr range
+ /// * ~ 4 hr slots / ~ 12 day range
+ /// * ~ 12 day slots / ~ 2 yr range
+ levels: Vec<Level<T>>,
+}
+
+/// Number of levels. Each level has 64 slots. By using 6 levels with 64 slots
+/// each, the timer is able to track time up to 2 years into the future with a
+/// precision of 1 millisecond.
+const NUM_LEVELS: usize = 6;
+
+/// The maximum duration of a delay
+const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1;
+
+#[derive(Debug)]
+pub(crate) enum InsertError {
+ Elapsed,
+ Invalid,
+}
+
+/// Poll expirations from the wheel
+#[derive(Debug, Default)]
+pub(crate) struct Poll {
+ now: u64,
+ expiration: Option<Expiration>,
+}
+
+impl<T> Wheel<T>
+where
+ T: Stack,
+{
+ /// Create a new timing wheel
+ pub(crate) fn new() -> Wheel<T> {
+ let levels = (0..NUM_LEVELS).map(Level::new).collect();
+
+ Wheel { elapsed: 0, levels }
+ }
+
+ /// Return the number of milliseconds that have elapsed since the timing
+ /// wheel's creation.
+ pub(crate) fn elapsed(&self) -> u64 {
+ self.elapsed
+ }
+
+ /// Insert an entry into the timing wheel.
+ ///
+ /// # Arguments
+ ///
+ /// * `when`: is the instant at which the entry should be fired. It is
+ /// represented as the number of milliseconds since the creation
+ /// of the timing wheel.
+ ///
+ /// * `item`: The item to insert into the wheel.
+ ///
+ /// * `store`: The slab or `()` when using heap storage.
+ ///
+ /// # Return
+ ///
+ /// Returns `Ok` when the item is successfully inserted, `Err` otherwise.
+ ///
+ /// `Err(Elapsed)` indicates that `when` represents an instant that has
+ /// already passed. In this case, the caller should fire the timeout
+ /// immediately.
+ ///
+ /// `Err(Invalid)` indicates an invalid `when` argument as been supplied.
+ pub(crate) fn insert(
+ &mut self,
+ when: u64,
+ item: T::Owned,
+ store: &mut T::Store,
+ ) -> Result<(), (T::Owned, InsertError)> {
+ if when <= self.elapsed {
+ return Err((item, InsertError::Elapsed));
+ } else if when - self.elapsed > MAX_DURATION {
+ return Err((item, InsertError::Invalid));
+ }
+
+ // Get the level at which the entry should be stored
+ let level = self.level_for(when);
+
+ self.levels[level].add_entry(when, item, store);
+
+ debug_assert!({
+ self.levels[level]
+ .next_expiration(self.elapsed)
+ .map(|e| e.deadline >= self.elapsed)
+ .unwrap_or(true)
+ });
+
+ Ok(())
+ }
+
+ /// Remove `item` from thee timing wheel.
+ pub(crate) fn remove(&mut self, item: &T::Borrowed, store: &mut T::Store) {
+ let when = T::when(item, store);
+ let level = self.level_for(when);
+
+ self.levels[level].remove_entry(when, item, store);
+ }
+
+ /// Instant at which to poll
+ pub(crate) fn poll_at(&self) -> Option<u64> {
+ self.next_expiration().map(|expiration| expiration.deadline)
+ }
+
+ pub(crate) fn poll(&mut self, poll: &mut Poll, store: &mut T::Store) -> Option<T::Owned> {
+ loop {
+ if poll.expiration.is_none() {
+ poll.expiration = self.next_expiration().and_then(|expiration| {
+ if expiration.deadline > poll.now {
+ None
+ } else {
+ Some(expiration)
+ }
+ });
+ }
+
+ match poll.expiration {
+ Some(ref expiration) => {
+ if let Some(item) = self.poll_expiration(expiration, store) {
+ return Some(item);
+ }
+
+ self.set_elapsed(expiration.deadline);
+ }
+ None => {
+ self.set_elapsed(poll.now);
+ return None;
+ }
+ }
+
+ poll.expiration = None;
+ }
+ }
+
+ /// Returns the instant at which the next timeout expires.
+ fn next_expiration(&self) -> Option<Expiration> {
+ // Check all levels
+ for level in 0..NUM_LEVELS {
+ if let Some(expiration) = self.levels[level].next_expiration(self.elapsed) {
+ // There cannot be any expirations at a higher level that happen
+ // before this one.
+ debug_assert!(self.no_expirations_before(level + 1, expiration.deadline));
+
+ return Some(expiration);
+ }
+ }
+
+ None
+ }
+
+ /// Used for debug assertions
+ fn no_expirations_before(&self, start_level: usize, before: u64) -> bool {
+ let mut res = true;
+
+ for l2 in start_level..NUM_LEVELS {
+ if let Some(e2) = self.levels[l2].next_expiration(self.elapsed) {
+ if e2.deadline < before {
+ res = false;
+ }
+ }
+ }
+
+ res
+ }
+
+ pub(crate) fn poll_expiration(
+ &mut self,
+ expiration: &Expiration,
+ store: &mut T::Store,
+ ) -> Option<T::Owned> {
+ while let Some(item) = self.pop_entry(expiration, store) {
+ if expiration.level == 0 {
+ debug_assert_eq!(T::when(item.borrow(), store), expiration.deadline);
+
+ return Some(item);
+ } else {
+ let when = T::when(item.borrow(), store);
+
+ let next_level = expiration.level - 1;
+
+ self.levels[next_level].add_entry(when, item, store);
+ }
+ }
+
+ None
+ }
+
+ fn set_elapsed(&mut self, when: u64) {
+ assert!(
+ self.elapsed <= when,
+ "elapsed={:?}; when={:?}",
+ self.elapsed,
+ when
+ );
+
+ if when > self.elapsed {
+ self.elapsed = when;
+ }
+ }
+
+ fn pop_entry(&mut self, expiration: &Expiration, store: &mut T::Store) -> Option<T::Owned> {
+ self.levels[expiration.level].pop_entry_slot(expiration.slot, store)
+ }
+
+ fn level_for(&self, when: u64) -> usize {
+ level_for(self.elapsed, when)
+ }
+}
+
+fn level_for(elapsed: u64, when: u64) -> usize {
+ let masked = elapsed ^ when;
+
+ assert!(masked != 0, "elapsed={}; when={}", elapsed, when);
+
+ let leading_zeros = masked.leading_zeros() as usize;
+ let significant = 63 - leading_zeros;
+ significant / 6
+}
+
+impl Poll {
+ pub(crate) fn new(now: u64) -> Poll {
+ Poll {
+ now,
+ expiration: None,
+ }
+ }
+}
+
+#[cfg(all(test, not(loom)))]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_level_for() {
+ for pos in 1..64 {
+ assert_eq!(
+ 0,
+ level_for(0, pos),
+ "level_for({}) -- binary = {:b}",
+ pos,
+ pos
+ );
+ }
+
+ for level in 1..5 {
+ for pos in level..64 {
+ let a = pos * 64_usize.pow(level as u32);
+ assert_eq!(
+ level,
+ level_for(0, a as u64),
+ "level_for({}) -- binary = {:b}",
+ a,
+ a
+ );
+
+ if pos > level {
+ let a = a - 1;
+ assert_eq!(
+ level,
+ level_for(0, a as u64),
+ "level_for({}) -- binary = {:b}",
+ a,
+ a
+ );
+ }
+
+ if pos < 64 {
+ let a = a + 1;
+ assert_eq!(
+ level,
+ level_for(0, a as u64),
+ "level_for({}) -- binary = {:b}",
+ a,
+ a
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/time/wheel/stack.rs b/third_party/rust/tokio/src/time/wheel/stack.rs
new file mode 100644
index 0000000000..6e55c38ccd
--- /dev/null
+++ b/third_party/rust/tokio/src/time/wheel/stack.rs
@@ -0,0 +1,26 @@
+use std::borrow::Borrow;
+
+/// Abstracts the stack operations needed to track timeouts.
+pub(crate) trait Stack: Default {
+ /// Type of the item stored in the stack
+ type Owned: Borrow<Self::Borrowed>;
+
+ /// Borrowed item
+ type Borrowed;
+
+ /// Item storage, this allows a slab to be used instead of just the heap
+ type Store;
+
+ /// Returns `true` if the stack is empty
+ fn is_empty(&self) -> bool;
+
+ /// Push an item onto the stack
+ fn push(&mut self, item: Self::Owned, store: &mut Self::Store);
+
+ /// Pop an item from the stack
+ fn pop(&mut self, store: &mut Self::Store) -> Option<Self::Owned>;
+
+ fn remove(&mut self, item: &Self::Borrowed, store: &mut Self::Store);
+
+ fn when(item: &Self::Borrowed, store: &Self::Store) -> u64;
+}
diff --git a/third_party/rust/tokio/src/util/bit.rs b/third_party/rust/tokio/src/util/bit.rs
new file mode 100644
index 0000000000..e61ac2165a
--- /dev/null
+++ b/third_party/rust/tokio/src/util/bit.rs
@@ -0,0 +1,85 @@
+use std::fmt;
+
+#[derive(Clone, Copy)]
+pub(crate) struct Pack {
+ mask: usize,
+ shift: u32,
+}
+
+impl Pack {
+ /// Value is packed in the `width` most-significant bits.
+ pub(crate) const fn most_significant(width: u32) -> Pack {
+ let mask = mask_for(width).reverse_bits();
+
+ Pack {
+ mask,
+ shift: mask.trailing_zeros(),
+ }
+ }
+
+ /// Value is packed in the `width` least-significant bits.
+ pub(crate) const fn least_significant(width: u32) -> Pack {
+ let mask = mask_for(width);
+
+ Pack { mask, shift: 0 }
+ }
+
+ /// Value is packed in the `width` more-significant bits.
+ pub(crate) const fn then(&self, width: u32) -> Pack {
+ let shift = pointer_width() - self.mask.leading_zeros();
+ let mask = mask_for(width) << shift;
+
+ Pack { mask, shift }
+ }
+
+ /// Mask used to unpack value
+ #[cfg(all(test, loom))]
+ pub(crate) const fn mask(&self) -> usize {
+ self.mask
+ }
+
+ /// Width, in bits, dedicated to storing the value.
+ pub(crate) const fn width(&self) -> u32 {
+ pointer_width() - (self.mask >> self.shift).leading_zeros()
+ }
+
+ /// Max representable value
+ pub(crate) const fn max_value(&self) -> usize {
+ (1 << self.width()) - 1
+ }
+
+ pub(crate) fn pack(&self, value: usize, base: usize) -> usize {
+ assert!(value <= self.max_value());
+ (base & !self.mask) | (value << self.shift)
+ }
+
+ pub(crate) fn unpack(&self, src: usize) -> usize {
+ unpack(src, self.mask, self.shift)
+ }
+}
+
+impl fmt::Debug for Pack {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "Pack {{ mask: {:b}, shift: {} }}",
+ self.mask, self.shift
+ )
+ }
+}
+
+/// Returns the width of a pointer in bits
+pub(crate) const fn pointer_width() -> u32 {
+ std::mem::size_of::<usize>() as u32 * 8
+}
+
+/// Returns a `usize` with the right-most `n` bits set.
+pub(crate) const fn mask_for(n: u32) -> usize {
+ let shift = 1usize.wrapping_shl(n - 1);
+ shift | (shift - 1)
+}
+
+/// Unpack a value using a mask & shift
+pub(crate) const fn unpack(src: usize, mask: usize, shift: u32) -> usize {
+ (src & mask) >> shift
+}
diff --git a/third_party/rust/tokio/src/util/linked_list.rs b/third_party/rust/tokio/src/util/linked_list.rs
new file mode 100644
index 0000000000..aa3ce77188
--- /dev/null
+++ b/third_party/rust/tokio/src/util/linked_list.rs
@@ -0,0 +1,585 @@
+//! An intrusive double linked list of data
+//!
+//! The data structure supports tracking pinned nodes. Most of the data
+//! structure's APIs are `unsafe` as they require the caller to ensure the
+//! specified node is actually contained by the list.
+
+use core::fmt;
+use core::mem::ManuallyDrop;
+use core::ptr::NonNull;
+
+/// An intrusive linked list.
+///
+/// Currently, the list is not emptied on drop. It is the caller's
+/// responsibility to ensure the list is empty before dropping it.
+pub(crate) struct LinkedList<T: Link> {
+ /// Linked list head
+ head: Option<NonNull<T::Target>>,
+
+ /// Linked list tail
+ tail: Option<NonNull<T::Target>>,
+}
+
+unsafe impl<T: Link> Send for LinkedList<T> where T::Target: Send {}
+unsafe impl<T: Link> Sync for LinkedList<T> where T::Target: Sync {}
+
+/// Defines how a type is tracked within a linked list.
+///
+/// In order to support storing a single type within multiple lists, accessing
+/// the list pointers is decoupled from the entry type.
+///
+/// # Safety
+///
+/// Implementations must guarantee that `Target` types are pinned in memory. In
+/// other words, when a node is inserted, the value will not be moved as long as
+/// it is stored in the list.
+pub(crate) unsafe trait Link {
+ /// Handle to the list entry.
+ ///
+ /// This is usually a pointer-ish type.
+ type Handle;
+
+ /// Node type
+ type Target;
+
+ /// Convert the handle to a raw pointer without consuming the handle
+ fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target>;
+
+ /// Convert the raw pointer to a handle
+ unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle;
+
+ /// Return the pointers for a node
+ unsafe fn pointers(target: NonNull<Self::Target>) -> NonNull<Pointers<Self::Target>>;
+}
+
+/// Previous / next pointers
+pub(crate) struct Pointers<T> {
+ /// The previous node in the list. null if there is no previous node.
+ prev: Option<NonNull<T>>,
+
+ /// The next node in the list. null if there is no previous node.
+ next: Option<NonNull<T>>,
+}
+
+unsafe impl<T: Send> Send for Pointers<T> {}
+unsafe impl<T: Sync> Sync for Pointers<T> {}
+
+// ===== impl LinkedList =====
+
+impl<T: Link> LinkedList<T> {
+ /// Creates an empty linked list
+ pub(crate) fn new() -> LinkedList<T> {
+ LinkedList {
+ head: None,
+ tail: None,
+ }
+ }
+
+ /// Adds an element first in the list.
+ pub(crate) fn push_front(&mut self, val: T::Handle) {
+ // The value should not be dropped, it is being inserted into the list
+ let val = ManuallyDrop::new(val);
+ let ptr = T::as_raw(&*val);
+ assert_ne!(self.head, Some(ptr));
+ unsafe {
+ T::pointers(ptr).as_mut().next = self.head;
+ T::pointers(ptr).as_mut().prev = None;
+
+ if let Some(head) = self.head {
+ T::pointers(head).as_mut().prev = Some(ptr);
+ }
+
+ self.head = Some(ptr);
+
+ if self.tail.is_none() {
+ self.tail = Some(ptr);
+ }
+ }
+ }
+
+ /// Removes the last element from a list and returns it, or None if it is
+ /// empty.
+ pub(crate) fn pop_back(&mut self) -> Option<T::Handle> {
+ unsafe {
+ let last = self.tail?;
+ self.tail = T::pointers(last).as_ref().prev;
+
+ if let Some(prev) = T::pointers(last).as_ref().prev {
+ T::pointers(prev).as_mut().next = None;
+ } else {
+ self.head = None
+ }
+
+ T::pointers(last).as_mut().prev = None;
+ T::pointers(last).as_mut().next = None;
+
+ Some(T::from_raw(last))
+ }
+ }
+
+ /// Returns whether the linked list doesn not contain any node
+ pub(crate) fn is_empty(&self) -> bool {
+ if self.head.is_some() {
+ return false;
+ }
+
+ assert!(self.tail.is_none());
+ true
+ }
+
+ /// Removes the specified node from the list
+ ///
+ /// # Safety
+ ///
+ /// The caller **must** ensure that `node` is currently contained by
+ /// `self` or not contained by any other list.
+ pub(crate) unsafe fn remove(&mut self, node: NonNull<T::Target>) -> Option<T::Handle> {
+ if let Some(prev) = T::pointers(node).as_ref().prev {
+ debug_assert_eq!(T::pointers(prev).as_ref().next, Some(node));
+ T::pointers(prev).as_mut().next = T::pointers(node).as_ref().next;
+ } else {
+ if self.head != Some(node) {
+ return None;
+ }
+
+ self.head = T::pointers(node).as_ref().next;
+ }
+
+ if let Some(next) = T::pointers(node).as_ref().next {
+ debug_assert_eq!(T::pointers(next).as_ref().prev, Some(node));
+ T::pointers(next).as_mut().prev = T::pointers(node).as_ref().prev;
+ } else {
+ // This might be the last item in the list
+ if self.tail != Some(node) {
+ return None;
+ }
+
+ self.tail = T::pointers(node).as_ref().prev;
+ }
+
+ T::pointers(node).as_mut().next = None;
+ T::pointers(node).as_mut().prev = None;
+
+ Some(T::from_raw(node))
+ }
+}
+
+impl<T: Link> fmt::Debug for LinkedList<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("LinkedList")
+ .field("head", &self.head)
+ .field("tail", &self.tail)
+ .finish()
+ }
+}
+
+cfg_sync! {
+ impl<T: Link> LinkedList<T> {
+ pub(crate) fn last(&self) -> Option<&T::Target> {
+ let tail = self.tail.as_ref()?;
+ unsafe {
+ Some(&*tail.as_ptr())
+ }
+ }
+ }
+}
+
+// ===== impl Iter =====
+
+cfg_rt_threaded! {
+ pub(crate) struct Iter<'a, T: Link> {
+ curr: Option<NonNull<T::Target>>,
+ _p: core::marker::PhantomData<&'a T>,
+ }
+
+ impl<T: Link> LinkedList<T> {
+ pub(crate) fn iter(&self) -> Iter<'_, T> {
+ Iter {
+ curr: self.head,
+ _p: core::marker::PhantomData,
+ }
+ }
+ }
+
+ impl<'a, T: Link> Iterator for Iter<'a, T> {
+ type Item = &'a T::Target;
+
+ fn next(&mut self) -> Option<&'a T::Target> {
+ let curr = self.curr?;
+ // safety: the pointer references data contained by the list
+ self.curr = unsafe { T::pointers(curr).as_ref() }.next;
+
+ // safety: the value is still owned by the linked list.
+ Some(unsafe { &*curr.as_ptr() })
+ }
+ }
+}
+
+// ===== impl Pointers =====
+
+impl<T> Pointers<T> {
+ /// Create a new set of empty pointers
+ pub(crate) fn new() -> Pointers<T> {
+ Pointers {
+ prev: None,
+ next: None,
+ }
+ }
+}
+
+impl<T> fmt::Debug for Pointers<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Pointers")
+ .field("prev", &self.prev)
+ .field("next", &self.next)
+ .finish()
+ }
+}
+
+#[cfg(test)]
+#[cfg(not(loom))]
+mod tests {
+ use super::*;
+
+ use std::pin::Pin;
+
+ #[derive(Debug)]
+ struct Entry {
+ pointers: Pointers<Entry>,
+ val: i32,
+ }
+
+ unsafe impl<'a> Link for &'a Entry {
+ type Handle = Pin<&'a Entry>;
+ type Target = Entry;
+
+ fn as_raw(handle: &Pin<&'_ Entry>) -> NonNull<Entry> {
+ NonNull::from(handle.get_ref())
+ }
+
+ unsafe fn from_raw(ptr: NonNull<Entry>) -> Pin<&'a Entry> {
+ Pin::new(&*ptr.as_ptr())
+ }
+
+ unsafe fn pointers(mut target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
+ NonNull::from(&mut target.as_mut().pointers)
+ }
+ }
+
+ fn entry(val: i32) -> Pin<Box<Entry>> {
+ Box::pin(Entry {
+ pointers: Pointers::new(),
+ val,
+ })
+ }
+
+ fn ptr(r: &Pin<Box<Entry>>) -> NonNull<Entry> {
+ r.as_ref().get_ref().into()
+ }
+
+ fn collect_list(list: &mut LinkedList<&'_ Entry>) -> Vec<i32> {
+ let mut ret = vec![];
+
+ while let Some(entry) = list.pop_back() {
+ ret.push(entry.val);
+ }
+
+ ret
+ }
+
+ fn push_all<'a>(list: &mut LinkedList<&'a Entry>, entries: &[Pin<&'a Entry>]) {
+ for entry in entries.iter() {
+ list.push_front(*entry);
+ }
+ }
+
+ macro_rules! assert_clean {
+ ($e:ident) => {{
+ assert!($e.pointers.next.is_none());
+ assert!($e.pointers.prev.is_none());
+ }};
+ }
+
+ macro_rules! assert_ptr_eq {
+ ($a:expr, $b:expr) => {{
+ // Deal with mapping a Pin<&mut T> -> Option<NonNull<T>>
+ assert_eq!(Some($a.as_ref().get_ref().into()), $b)
+ }};
+ }
+
+ #[test]
+ fn push_and_drain() {
+ let a = entry(5);
+ let b = entry(7);
+ let c = entry(31);
+
+ let mut list = LinkedList::new();
+ assert!(list.is_empty());
+
+ list.push_front(a.as_ref());
+ assert!(!list.is_empty());
+ list.push_front(b.as_ref());
+ list.push_front(c.as_ref());
+
+ let items: Vec<i32> = collect_list(&mut list);
+ assert_eq!([5, 7, 31].to_vec(), items);
+
+ assert!(list.is_empty());
+ }
+
+ #[test]
+ fn push_pop_push_pop() {
+ let a = entry(5);
+ let b = entry(7);
+
+ let mut list = LinkedList::<&Entry>::new();
+
+ list.push_front(a.as_ref());
+
+ let entry = list.pop_back().unwrap();
+ assert_eq!(5, entry.val);
+ assert!(list.is_empty());
+
+ list.push_front(b.as_ref());
+
+ let entry = list.pop_back().unwrap();
+ assert_eq!(7, entry.val);
+
+ assert!(list.is_empty());
+ assert!(list.pop_back().is_none());
+ }
+
+ #[test]
+ fn remove_by_address() {
+ let a = entry(5);
+ let b = entry(7);
+ let c = entry(31);
+
+ unsafe {
+ // Remove first
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
+ assert!(list.remove(ptr(&a)).is_some());
+ assert_clean!(a);
+ // `a` should be no longer there and can't be removed twice
+ assert!(list.remove(ptr(&a)).is_none());
+ assert!(!list.is_empty());
+
+ assert!(list.remove(ptr(&b)).is_some());
+ assert_clean!(b);
+ // `b` should be no longer there and can't be removed twice
+ assert!(list.remove(ptr(&b)).is_none());
+ assert!(!list.is_empty());
+
+ assert!(list.remove(ptr(&c)).is_some());
+ assert_clean!(c);
+ // `b` should be no longer there and can't be removed twice
+ assert!(list.remove(ptr(&c)).is_none());
+ assert!(list.is_empty());
+ }
+
+ unsafe {
+ // Remove middle
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
+
+ assert!(list.remove(ptr(&a)).is_some());
+ assert_clean!(a);
+
+ assert_ptr_eq!(b, list.head);
+ assert_ptr_eq!(c, b.pointers.next);
+ assert_ptr_eq!(b, c.pointers.prev);
+
+ let items = collect_list(&mut list);
+ assert_eq!([31, 7].to_vec(), items);
+ }
+
+ unsafe {
+ // Remove middle
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
+
+ assert!(list.remove(ptr(&b)).is_some());
+ assert_clean!(b);
+
+ assert_ptr_eq!(c, a.pointers.next);
+ assert_ptr_eq!(a, c.pointers.prev);
+
+ let items = collect_list(&mut list);
+ assert_eq!([31, 5].to_vec(), items);
+ }
+
+ unsafe {
+ // Remove last
+ // Remove middle
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[c.as_ref(), b.as_ref(), a.as_ref()]);
+
+ assert!(list.remove(ptr(&c)).is_some());
+ assert_clean!(c);
+
+ assert!(b.pointers.next.is_none());
+ assert_ptr_eq!(b, list.tail);
+
+ let items = collect_list(&mut list);
+ assert_eq!([7, 5].to_vec(), items);
+ }
+
+ unsafe {
+ // Remove first of two
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[b.as_ref(), a.as_ref()]);
+
+ assert!(list.remove(ptr(&a)).is_some());
+
+ assert_clean!(a);
+
+ // a should be no longer there and can't be removed twice
+ assert!(list.remove(ptr(&a)).is_none());
+
+ assert_ptr_eq!(b, list.head);
+ assert_ptr_eq!(b, list.tail);
+
+ assert!(b.pointers.next.is_none());
+ assert!(b.pointers.prev.is_none());
+
+ let items = collect_list(&mut list);
+ assert_eq!([7].to_vec(), items);
+ }
+
+ unsafe {
+ // Remove last of two
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[b.as_ref(), a.as_ref()]);
+
+ assert!(list.remove(ptr(&b)).is_some());
+
+ assert_clean!(b);
+
+ assert_ptr_eq!(a, list.head);
+ assert_ptr_eq!(a, list.tail);
+
+ assert!(a.pointers.next.is_none());
+ assert!(a.pointers.prev.is_none());
+
+ let items = collect_list(&mut list);
+ assert_eq!([5].to_vec(), items);
+ }
+
+ unsafe {
+ // Remove last item
+ let mut list = LinkedList::new();
+
+ push_all(&mut list, &[a.as_ref()]);
+
+ assert!(list.remove(ptr(&a)).is_some());
+ assert_clean!(a);
+
+ assert!(list.head.is_none());
+ assert!(list.tail.is_none());
+ let items = collect_list(&mut list);
+ assert!(items.is_empty());
+ }
+
+ unsafe {
+ // Remove missing
+ let mut list = LinkedList::<&Entry>::new();
+
+ list.push_front(b.as_ref());
+ list.push_front(a.as_ref());
+
+ assert!(list.remove(ptr(&c)).is_none());
+ }
+ }
+
+ #[test]
+ fn iter() {
+ let a = entry(5);
+ let b = entry(7);
+
+ let mut list = LinkedList::<&Entry>::new();
+
+ assert_eq!(0, list.iter().count());
+
+ list.push_front(a.as_ref());
+ list.push_front(b.as_ref());
+
+ let mut i = list.iter();
+ assert_eq!(7, i.next().unwrap().val);
+ assert_eq!(5, i.next().unwrap().val);
+ assert!(i.next().is_none());
+ }
+
+ proptest::proptest! {
+ #[test]
+ fn fuzz_linked_list(ops: Vec<usize>) {
+ run_fuzz(ops);
+ }
+ }
+
+ fn run_fuzz(ops: Vec<usize>) {
+ use std::collections::VecDeque;
+
+ #[derive(Debug)]
+ enum Op {
+ Push,
+ Pop,
+ Remove(usize),
+ }
+
+ let ops = ops
+ .iter()
+ .map(|i| match i % 3 {
+ 0 => Op::Push,
+ 1 => Op::Pop,
+ 2 => Op::Remove(i / 3),
+ _ => unreachable!(),
+ })
+ .collect::<Vec<_>>();
+
+ let mut ll = LinkedList::<&Entry>::new();
+ let mut reference = VecDeque::new();
+
+ let entries: Vec<_> = (0..ops.len()).map(|i| entry(i as i32)).collect();
+
+ for (i, op) in ops.iter().enumerate() {
+ match op {
+ Op::Push => {
+ reference.push_front(i as i32);
+ assert_eq!(entries[i].val, i as i32);
+
+ ll.push_front(entries[i].as_ref());
+ }
+ Op::Pop => {
+ if reference.is_empty() {
+ assert!(ll.is_empty());
+ continue;
+ }
+
+ let v = reference.pop_back();
+ assert_eq!(v, ll.pop_back().map(|v| v.val));
+ }
+ Op::Remove(n) => {
+ if reference.is_empty() {
+ assert!(ll.is_empty());
+ continue;
+ }
+
+ let idx = n % reference.len();
+ let expect = reference.remove(idx).unwrap();
+
+ unsafe {
+ let entry = ll.remove(ptr(&entries[expect as usize])).unwrap();
+ assert_eq!(expect, entry.val);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/util/mod.rs b/third_party/rust/tokio/src/util/mod.rs
new file mode 100644
index 0000000000..a093395c02
--- /dev/null
+++ b/third_party/rust/tokio/src/util/mod.rs
@@ -0,0 +1,24 @@
+cfg_io_driver! {
+ pub(crate) mod bit;
+ pub(crate) mod slab;
+}
+
+#[cfg(any(feature = "sync", feature = "rt-core"))]
+pub(crate) mod linked_list;
+
+#[cfg(any(feature = "rt-threaded", feature = "macros", feature = "stream"))]
+mod rand;
+
+mod wake;
+pub(crate) use wake::{waker_ref, Wake};
+
+cfg_rt_threaded! {
+ pub(crate) use rand::FastRand;
+
+ mod try_lock;
+ pub(crate) use try_lock::TryLock;
+}
+
+#[cfg(any(feature = "macros", feature = "stream"))]
+#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))]
+pub use rand::thread_rng_n;
diff --git a/third_party/rust/tokio/src/util/pad.rs b/third_party/rust/tokio/src/util/pad.rs
new file mode 100644
index 0000000000..bf0913ca85
--- /dev/null
+++ b/third_party/rust/tokio/src/util/pad.rs
@@ -0,0 +1,52 @@
+use core::fmt;
+use core::ops::{Deref, DerefMut};
+
+#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
+// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
+// lines at a time, so we have to align to 128 bytes rather than 64.
+//
+// Sources:
+// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
+// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
+#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
+pub(crate) struct CachePadded<T> {
+ value: T,
+}
+
+unsafe impl<T: Send> Send for CachePadded<T> {}
+unsafe impl<T: Sync> Sync for CachePadded<T> {}
+
+impl<T> CachePadded<T> {
+ pub(crate) fn new(t: T) -> CachePadded<T> {
+ CachePadded::<T> { value: t }
+ }
+}
+
+impl<T> Deref for CachePadded<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.value
+ }
+}
+
+impl<T> DerefMut for CachePadded<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CachePadded")
+ .field("value", &self.value)
+ .finish()
+ }
+}
+
+impl<T> From<T> for CachePadded<T> {
+ fn from(t: T) -> Self {
+ CachePadded::new(t)
+ }
+}
diff --git a/third_party/rust/tokio/src/util/rand.rs b/third_party/rust/tokio/src/util/rand.rs
new file mode 100644
index 0000000000..4b72b4b110
--- /dev/null
+++ b/third_party/rust/tokio/src/util/rand.rs
@@ -0,0 +1,64 @@
+use std::cell::Cell;
+
+/// Fast random number generate
+///
+/// Implement xorshift64+: 2 32-bit xorshift sequences added together.
+/// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
+/// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+/// This generator passes the SmallCrush suite, part of TestU01 framework:
+/// http://simul.iro.umontreal.ca/testu01/tu01.html
+#[derive(Debug)]
+pub(crate) struct FastRand {
+ one: Cell<u32>,
+ two: Cell<u32>,
+}
+
+impl FastRand {
+ /// Initialize a new, thread-local, fast random number generator.
+ pub(crate) fn new(seed: u64) -> FastRand {
+ let one = (seed >> 32) as u32;
+ let mut two = seed as u32;
+
+ if two == 0 {
+ // This value cannot be zero
+ two = 1;
+ }
+
+ FastRand {
+ one: Cell::new(one),
+ two: Cell::new(two),
+ }
+ }
+
+ pub(crate) fn fastrand_n(&self, n: u32) -> u32 {
+ // This is similar to fastrand() % n, but faster.
+ // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+ let mul = (self.fastrand() as u64).wrapping_mul(n as u64);
+ (mul >> 32) as u32
+ }
+
+ fn fastrand(&self) -> u32 {
+ let mut s1 = self.one.get();
+ let s0 = self.two.get();
+
+ s1 ^= s1 << 17;
+ s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16;
+
+ self.one.set(s0);
+ self.two.set(s1);
+
+ s0.wrapping_add(s1)
+ }
+}
+
+// Used by the select macro and `StreamMap`
+#[cfg(any(feature = "macros", feature = "stream"))]
+#[doc(hidden)]
+#[cfg_attr(not(feature = "macros"), allow(unreachable_pub))]
+pub fn thread_rng_n(n: u32) -> u32 {
+ thread_local! {
+ static THREAD_RNG: FastRand = FastRand::new(crate::loom::rand::seed());
+ }
+
+ THREAD_RNG.with(|rng| rng.fastrand_n(n))
+}
diff --git a/third_party/rust/tokio/src/util/slab/addr.rs b/third_party/rust/tokio/src/util/slab/addr.rs
new file mode 100644
index 0000000000..c14e32e909
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/addr.rs
@@ -0,0 +1,154 @@
+//! Tracks the location of an entry in a slab.
+//!
+//! # Index packing
+//!
+//! A slab index consists of multiple indices packed into a single `usize` value
+//! that correspond to different parts of the slab.
+//!
+//! The least significant `MAX_PAGES + INITIAL_PAGE_SIZE.trailing_zeros() + 1`
+//! bits store the address within a shard, starting at 0 for the first slot on
+//! the first page. To index a slot within a shard, we first find the index of
+//! the page that the address falls on, and then the offset of the slot within
+//! that page.
+//!
+//! Since every page is twice as large as the previous page, and all page sizes
+//! are powers of two, we can determine the page index that contains a given
+//! address by shifting the address down by the smallest page size and looking
+//! at how many twos places necessary to represent that number, telling us what
+//! power of two page size it fits inside of. We can determine the number of
+//! twos places by counting the number of leading zeros (unused twos places) in
+//! the number's binary representation, and subtracting that count from the
+//! total number of bits in a word.
+//!
+//! Once we know what page contains an address, we can subtract the size of all
+//! previous pages from the address to determine the offset within the page.
+//!
+//! After the page address, the next `MAX_THREADS.trailing_zeros() + 1` least
+//! significant bits are the thread ID. These are used to index the array of
+//! shards to find which shard a slot belongs to. If an entry is being removed
+//! and the thread ID of its index matches that of the current thread, we can
+//! use the `remove_local` fast path; otherwise, we have to use the synchronized
+//! `remove_remote` path.
+//!
+//! Finally, a generation value is packed into the index. The `RESERVED_BITS`
+//! most significant bits are left unused, and the remaining bits between the
+//! last bit of the thread ID and the first reserved bit are used to store the
+//! generation. The generation is used as part of an atomic read-modify-write
+//! loop every time a `ScheduledIo`'s readiness is modified, or when the
+//! resource is removed, to guard against the ABA problem.
+//!
+//! Visualized:
+//!
+//! ```text
+//! ┌──────────┬───────────────┬──────────────────┬──────────────────────────┐
+//! │ reserved │ generation │ thread ID │ address │
+//! └▲─────────┴▲──────────────┴▲─────────────────┴▲────────────────────────▲┘
+//! │ │ │ │ │
+//! bits(usize) │ bits(MAX_THREADS) │ 0
+//! │ │
+//! bits(usize) - RESERVED MAX_PAGES + bits(INITIAL_PAGE_SIZE)
+//! ```
+
+use crate::util::bit;
+use crate::util::slab::{Generation, INITIAL_PAGE_SIZE, MAX_PAGES, MAX_THREADS};
+
+use std::usize;
+
+/// References the location at which an entry is stored in a slab.
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub(crate) struct Address(usize);
+
+const PAGE_INDEX_SHIFT: u32 = INITIAL_PAGE_SIZE.trailing_zeros() + 1;
+
+/// Address in the shard
+const SLOT: bit::Pack = bit::Pack::least_significant(MAX_PAGES as u32 + PAGE_INDEX_SHIFT);
+
+/// Masks the thread identifier
+const THREAD: bit::Pack = SLOT.then(MAX_THREADS.trailing_zeros() + 1);
+
+/// Masks the generation
+const GENERATION: bit::Pack = THREAD
+ .then(bit::pointer_width().wrapping_sub(RESERVED.width() + THREAD.width() + SLOT.width()));
+
+// Chosen arbitrarily
+const RESERVED: bit::Pack = bit::Pack::most_significant(5);
+
+impl Address {
+ /// Represents no entry, picked to avoid collision with Mio's internals.
+ /// This value should not be passed to mio.
+ pub(crate) const NULL: usize = usize::MAX >> 1;
+
+ /// Re-exported by `Generation`.
+ pub(super) const GENERATION_WIDTH: u32 = GENERATION.width();
+
+ pub(super) fn new(shard_index: usize, generation: Generation) -> Address {
+ let mut repr = 0;
+
+ repr = SLOT.pack(shard_index, repr);
+ repr = GENERATION.pack(generation.to_usize(), repr);
+
+ Address(repr)
+ }
+
+ /// Convert from a `usize` representation.
+ pub(crate) fn from_usize(src: usize) -> Address {
+ assert_ne!(src, Self::NULL);
+
+ Address(src)
+ }
+
+ /// Convert to a `usize` representation
+ pub(crate) fn to_usize(self) -> usize {
+ self.0
+ }
+
+ pub(crate) fn generation(self) -> Generation {
+ Generation::new(GENERATION.unpack(self.0))
+ }
+
+ /// Returns the page index
+ pub(super) fn page(self) -> usize {
+ // Since every page is twice as large as the previous page, and all page
+ // sizes are powers of two, we can determine the page index that
+ // contains a given address by shifting the address down by the smallest
+ // page size and looking at how many twos places necessary to represent
+ // that number, telling us what power of two page size it fits inside
+ // of. We can determine the number of twos places by counting the number
+ // of leading zeros (unused twos places) in the number's binary
+ // representation, and subtracting that count from the total number of
+ // bits in a word.
+ let slot_shifted = (self.slot() + INITIAL_PAGE_SIZE) >> PAGE_INDEX_SHIFT;
+ (bit::pointer_width() - slot_shifted.leading_zeros()) as usize
+ }
+
+ /// Returns the slot index
+ pub(super) fn slot(self) -> usize {
+ SLOT.unpack(self.0)
+ }
+}
+
+#[cfg(test)]
+cfg_not_loom! {
+ use proptest::proptest;
+
+ #[test]
+ fn test_pack_format() {
+ assert_eq!(5, RESERVED.width());
+ assert_eq!(0b11111, RESERVED.max_value());
+ }
+
+ proptest! {
+ #[test]
+ fn address_roundtrips(
+ slot in 0usize..SLOT.max_value(),
+ generation in 0usize..Generation::MAX,
+ ) {
+ let address = Address::new(slot, Generation::new(generation));
+ // Round trip
+ let address = Address::from_usize(address.to_usize());
+
+ assert_eq!(address.slot(), slot);
+ assert_eq!(address.generation().to_usize(), generation);
+ }
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/entry.rs b/third_party/rust/tokio/src/util/slab/entry.rs
new file mode 100644
index 0000000000..2e0b10b0fd
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/entry.rs
@@ -0,0 +1,7 @@
+use crate::util::slab::Generation;
+
+pub(crate) trait Entry: Default {
+ fn generation(&self) -> Generation;
+
+ fn reset(&self, generation: Generation) -> bool;
+}
diff --git a/third_party/rust/tokio/src/util/slab/generation.rs b/third_party/rust/tokio/src/util/slab/generation.rs
new file mode 100644
index 0000000000..4b16b2caf6
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/generation.rs
@@ -0,0 +1,32 @@
+use crate::util::bit;
+use crate::util::slab::Address;
+
+/// An mutation identifier for a slot in the slab. The generation helps prevent
+/// accessing an entry with an outdated token.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
+pub(crate) struct Generation(usize);
+
+impl Generation {
+ pub(crate) const WIDTH: u32 = Address::GENERATION_WIDTH;
+
+ pub(super) const MAX: usize = bit::mask_for(Address::GENERATION_WIDTH);
+
+ /// Create a new generation
+ ///
+ /// # Panics
+ ///
+ /// Panics if `value` is greater than max generation.
+ pub(crate) fn new(value: usize) -> Generation {
+ assert!(value <= Self::MAX);
+ Generation(value)
+ }
+
+ /// Returns the next generation value
+ pub(crate) fn next(self) -> Generation {
+ Generation((self.0 + 1) & Self::MAX)
+ }
+
+ pub(crate) fn to_usize(self) -> usize {
+ self.0
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/mod.rs b/third_party/rust/tokio/src/util/slab/mod.rs
new file mode 100644
index 0000000000..5082970507
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/mod.rs
@@ -0,0 +1,107 @@
+//! A lock-free concurrent slab.
+
+mod addr;
+pub(crate) use addr::Address;
+
+mod entry;
+pub(crate) use entry::Entry;
+
+mod generation;
+pub(crate) use generation::Generation;
+
+mod page;
+
+mod shard;
+use shard::Shard;
+
+mod slot;
+use slot::Slot;
+
+mod stack;
+use stack::TransferStack;
+
+#[cfg(all(loom, test))]
+mod tests;
+
+use crate::loom::sync::Mutex;
+use crate::util::bit;
+
+use std::fmt;
+
+#[cfg(target_pointer_width = "64")]
+const MAX_THREADS: usize = 4096;
+
+#[cfg(target_pointer_width = "32")]
+const MAX_THREADS: usize = 2048;
+
+/// Max number of pages per slab
+const MAX_PAGES: usize = bit::pointer_width() as usize / 4;
+
+cfg_not_loom! {
+ /// Size of first page
+ const INITIAL_PAGE_SIZE: usize = 32;
+}
+
+cfg_loom! {
+ const INITIAL_PAGE_SIZE: usize = 2;
+}
+
+/// A sharded slab.
+pub(crate) struct Slab<T> {
+ // Signal shard for now. Eventually there will be more.
+ shard: Shard<T>,
+ local: Mutex<()>,
+}
+
+unsafe impl<T: Send> Send for Slab<T> {}
+unsafe impl<T: Sync> Sync for Slab<T> {}
+
+impl<T: Entry> Slab<T> {
+ /// Returns a new slab with the default configuration parameters.
+ pub(crate) fn new() -> Slab<T> {
+ Slab {
+ shard: Shard::new(),
+ local: Mutex::new(()),
+ }
+ }
+
+ /// allocs a value into the slab, returning a key that can be used to
+ /// access it.
+ ///
+ /// If this function returns `None`, then the shard for the current thread
+ /// is full and no items can be added until some are removed, or the maximum
+ /// number of shards has been reached.
+ pub(crate) fn alloc(&self) -> Option<Address> {
+ // we must lock the slab to alloc an item.
+ let _local = self.local.lock().unwrap();
+ self.shard.alloc()
+ }
+
+ /// Removes the value associated with the given key from the slab.
+ pub(crate) fn remove(&self, idx: Address) {
+ // try to lock the slab so that we can use `remove_local`.
+ let lock = self.local.try_lock();
+
+ // if we were able to lock the slab, we are "local" and can use the fast
+ // path; otherwise, we will use `remove_remote`.
+ if lock.is_ok() {
+ self.shard.remove_local(idx)
+ } else {
+ self.shard.remove_remote(idx)
+ }
+ }
+
+ /// Return a reference to the value associated with the given key.
+ ///
+ /// If the slab does not contain a value for the given key, `None` is
+ /// returned instead.
+ pub(crate) fn get(&self, token: Address) -> Option<&T> {
+ self.shard.get(token)
+ }
+}
+
+impl<T> fmt::Debug for Slab<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Slab").field("shard", &self.shard).finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/page.rs b/third_party/rust/tokio/src/util/slab/page.rs
new file mode 100644
index 0000000000..0000e934de
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/page.rs
@@ -0,0 +1,187 @@
+use crate::loom::cell::UnsafeCell;
+use crate::util::slab::{Address, Entry, Slot, TransferStack, INITIAL_PAGE_SIZE};
+
+use std::fmt;
+
+/// Data accessed only by the thread that owns the shard.
+pub(crate) struct Local {
+ head: UnsafeCell<usize>,
+}
+
+/// Data accessed by any thread.
+pub(crate) struct Shared<T> {
+ remote: TransferStack,
+ size: usize,
+ prev_sz: usize,
+ slab: UnsafeCell<Option<Box<[Slot<T>]>>>,
+}
+
+/// Returns the size of the page at index `n`
+pub(super) fn size(n: usize) -> usize {
+ INITIAL_PAGE_SIZE << n
+}
+
+impl Local {
+ pub(crate) fn new() -> Self {
+ Self {
+ head: UnsafeCell::new(0),
+ }
+ }
+
+ fn head(&self) -> usize {
+ self.head.with(|head| unsafe { *head })
+ }
+
+ fn set_head(&self, new_head: usize) {
+ self.head.with_mut(|head| unsafe {
+ *head = new_head;
+ })
+ }
+}
+
+impl<T: Entry> Shared<T> {
+ pub(crate) fn new(size: usize, prev_sz: usize) -> Shared<T> {
+ Self {
+ prev_sz,
+ size,
+ remote: TransferStack::new(),
+ slab: UnsafeCell::new(None),
+ }
+ }
+
+ /// Allocates storage for this page if it does not allready exist.
+ ///
+ /// This requires unique access to the page (e.g. it is called from the
+ /// thread that owns the page, or, in the case of `SingleShard`, while the
+ /// lock is held). In order to indicate this, a reference to the page's
+ /// `Local` data is taken by this function; the `Local` argument is not
+ /// actually used, but requiring it ensures that this is only called when
+ /// local access is held.
+ #[cold]
+ fn alloc_page(&self, _: &Local) {
+ debug_assert!(self.slab.with(|s| unsafe { (*s).is_none() }));
+
+ let mut slab = Vec::with_capacity(self.size);
+ slab.extend((1..self.size).map(Slot::new));
+ slab.push(Slot::new(Address::NULL));
+
+ self.slab.with_mut(|s| {
+ // this mut access is safe — it only occurs to initially
+ // allocate the page, which only happens on this thread; if the
+ // page has not yet been allocated, other threads will not try
+ // to access it yet.
+ unsafe {
+ *s = Some(slab.into_boxed_slice());
+ }
+ });
+ }
+
+ pub(crate) fn alloc(&self, local: &Local) -> Option<Address> {
+ let head = local.head();
+
+ // are there any items on the local free list? (fast path)
+ let head = if head < self.size {
+ head
+ } else {
+ // if the local free list is empty, pop all the items on the remote
+ // free list onto the local free list.
+ self.remote.pop_all()?
+ };
+
+ // if the head is still null, both the local and remote free lists are
+ // empty --- we can't fit any more items on this page.
+ if head == Address::NULL {
+ return None;
+ }
+
+ // do we need to allocate storage for this page?
+ let page_needs_alloc = self.slab.with(|s| unsafe { (*s).is_none() });
+ if page_needs_alloc {
+ self.alloc_page(local);
+ }
+
+ let gen = self.slab.with(|slab| {
+ let slab = unsafe { &*(slab) }
+ .as_ref()
+ .expect("page must have been allocated to alloc!");
+
+ let slot = &slab[head];
+
+ local.set_head(slot.next());
+ slot.generation()
+ });
+
+ let index = head + self.prev_sz;
+
+ Some(Address::new(index, gen))
+ }
+
+ pub(crate) fn get(&self, addr: Address) -> Option<&T> {
+ let page_offset = addr.slot() - self.prev_sz;
+
+ self.slab
+ .with(|slab| unsafe { &*slab }.as_ref()?.get(page_offset))
+ .map(|slot| slot.get())
+ }
+
+ pub(crate) fn remove_local(&self, local: &Local, addr: Address) {
+ let offset = addr.slot() - self.prev_sz;
+
+ self.slab.with(|slab| {
+ let slab = unsafe { &*slab }.as_ref();
+
+ let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
+ slot
+ } else {
+ return;
+ };
+
+ if slot.reset(addr.generation()) {
+ slot.set_next(local.head());
+ local.set_head(offset);
+ }
+ })
+ }
+
+ pub(crate) fn remove_remote(&self, addr: Address) {
+ let offset = addr.slot() - self.prev_sz;
+
+ self.slab.with(|slab| {
+ let slab = unsafe { &*slab }.as_ref();
+
+ let slot = if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
+ slot
+ } else {
+ return;
+ };
+
+ if !slot.reset(addr.generation()) {
+ return;
+ }
+
+ self.remote.push(offset, |next| slot.set_next(next));
+ })
+ }
+}
+
+impl fmt::Debug for Local {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.head.with(|head| {
+ let head = unsafe { *head };
+ f.debug_struct("Local")
+ .field("head", &format_args!("{:#0x}", head))
+ .finish()
+ })
+ }
+}
+
+impl<T> fmt::Debug for Shared<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Shared")
+ .field("remote", &self.remote)
+ .field("prev_sz", &self.prev_sz)
+ .field("size", &self.size)
+ // .field("slab", &self.slab)
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/shard.rs b/third_party/rust/tokio/src/util/slab/shard.rs
new file mode 100644
index 0000000000..eaca6f656a
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/shard.rs
@@ -0,0 +1,105 @@
+use crate::util::slab::{page, Address, Entry, MAX_PAGES};
+
+use std::fmt;
+
+// ┌─────────────┐ ┌────────┐
+// │ page 1 │ │ │
+// ├─────────────┤ ┌───▶│ next──┼─┐
+// │ page 2 │ │ ├────────┤ │
+// │ │ │ │XXXXXXXX│ │
+// │ local_free──┼─┘ ├────────┤ │
+// │ global_free─┼─┐ │ │◀┘
+// ├─────────────┤ └───▶│ next──┼─┐
+// │ page 3 │ ├────────┤ │
+// └─────────────┘ │XXXXXXXX│ │
+// ... ├────────┤ │
+// ┌─────────────┐ │XXXXXXXX│ │
+// │ page n │ ├────────┤ │
+// └─────────────┘ │ │◀┘
+// │ next──┼───▶
+// ├────────┤
+// │XXXXXXXX│
+// └────────┘
+// ...
+pub(super) struct Shard<T> {
+ /// The local free list for each page.
+ ///
+ /// These are only ever accessed from this shard's thread, so they are
+ /// stored separately from the shared state for the page that can be
+ /// accessed concurrently, to minimize false sharing.
+ local: Box<[page::Local]>,
+ /// The shared state for each page in this shard.
+ ///
+ /// This consists of the page's metadata (size, previous size), remote free
+ /// list, and a pointer to the actual array backing that page.
+ shared: Box<[page::Shared<T>]>,
+}
+
+impl<T: Entry> Shard<T> {
+ pub(super) fn new() -> Shard<T> {
+ let mut total_sz = 0;
+ let shared = (0..MAX_PAGES)
+ .map(|page_num| {
+ let sz = page::size(page_num);
+ let prev_sz = total_sz;
+ total_sz += sz;
+ page::Shared::new(sz, prev_sz)
+ })
+ .collect();
+
+ let local = (0..MAX_PAGES).map(|_| page::Local::new()).collect();
+
+ Shard { local, shared }
+ }
+
+ pub(super) fn alloc(&self) -> Option<Address> {
+ // Can we fit the value into an existing page?
+ for (page_idx, page) in self.shared.iter().enumerate() {
+ let local = self.local(page_idx);
+
+ if let Some(page_offset) = page.alloc(local) {
+ return Some(page_offset);
+ }
+ }
+
+ None
+ }
+
+ pub(super) fn get(&self, addr: Address) -> Option<&T> {
+ let page_idx = addr.page();
+
+ if page_idx > self.shared.len() {
+ return None;
+ }
+
+ self.shared[page_idx].get(addr)
+ }
+
+ /// Remove an item on the shard's local thread.
+ pub(super) fn remove_local(&self, addr: Address) {
+ let page_idx = addr.page();
+
+ if let Some(page) = self.shared.get(page_idx) {
+ page.remove_local(self.local(page_idx), addr);
+ }
+ }
+
+ /// Remove an item, while on a different thread from the shard's local thread.
+ pub(super) fn remove_remote(&self, addr: Address) {
+ if let Some(page) = self.shared.get(addr.page()) {
+ page.remove_remote(addr);
+ }
+ }
+
+ fn local(&self, i: usize) -> &page::Local {
+ &self.local[i]
+ }
+}
+
+impl<T> fmt::Debug for Shard<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Shard")
+ .field("shared", &self.shared)
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/slot.rs b/third_party/rust/tokio/src/util/slab/slot.rs
new file mode 100644
index 0000000000..0608b26189
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/slot.rs
@@ -0,0 +1,42 @@
+use crate::loom::cell::UnsafeCell;
+use crate::util::slab::{Entry, Generation};
+
+/// Stores an entry in the slab.
+pub(super) struct Slot<T> {
+ next: UnsafeCell<usize>,
+ entry: T,
+}
+
+impl<T: Entry> Slot<T> {
+ /// Initialize a new `Slot` linked to `next`.
+ ///
+ /// The entry is initialized to a default value.
+ pub(super) fn new(next: usize) -> Slot<T> {
+ Slot {
+ next: UnsafeCell::new(next),
+ entry: T::default(),
+ }
+ }
+
+ pub(super) fn get(&self) -> &T {
+ &self.entry
+ }
+
+ pub(super) fn generation(&self) -> Generation {
+ self.entry.generation()
+ }
+
+ pub(super) fn reset(&self, generation: Generation) -> bool {
+ self.entry.reset(generation)
+ }
+
+ pub(super) fn next(&self) -> usize {
+ self.next.with(|next| unsafe { *next })
+ }
+
+ pub(super) fn set_next(&self, next: usize) {
+ self.next.with_mut(|n| unsafe {
+ (*n) = next;
+ })
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/stack.rs b/third_party/rust/tokio/src/util/slab/stack.rs
new file mode 100644
index 0000000000..0ae0d71006
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/stack.rs
@@ -0,0 +1,58 @@
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::util::slab::Address;
+
+use std::fmt;
+use std::sync::atomic::Ordering;
+use std::usize;
+
+pub(super) struct TransferStack {
+ head: AtomicUsize,
+}
+
+impl TransferStack {
+ pub(super) fn new() -> Self {
+ Self {
+ head: AtomicUsize::new(Address::NULL),
+ }
+ }
+
+ pub(super) fn pop_all(&self) -> Option<usize> {
+ let val = self.head.swap(Address::NULL, Ordering::Acquire);
+
+ if val == Address::NULL {
+ None
+ } else {
+ Some(val)
+ }
+ }
+
+ pub(super) fn push(&self, value: usize, before: impl Fn(usize)) {
+ let mut next = self.head.load(Ordering::Relaxed);
+
+ loop {
+ before(next);
+
+ match self
+ .head
+ .compare_exchange(next, value, Ordering::AcqRel, Ordering::Acquire)
+ {
+ // lost the race!
+ Err(actual) => next = actual,
+ Ok(_) => return,
+ }
+ }
+ }
+}
+
+impl fmt::Debug for TransferStack {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Loom likes to dump all its internal state in `fmt::Debug` impls, so
+ // we override this to just print the current value in tests.
+ f.debug_struct("TransferStack")
+ .field(
+ "head",
+ &format_args!("{:#x}", self.head.load(Ordering::Relaxed)),
+ )
+ .finish()
+ }
+}
diff --git a/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs b/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs
new file mode 100644
index 0000000000..48e94f0034
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/tests/loom_slab.rs
@@ -0,0 +1,327 @@
+use crate::io::driver::ScheduledIo;
+use crate::util::slab::{Address, Slab};
+
+use loom::sync::{Arc, Condvar, Mutex};
+use loom::thread;
+
+#[test]
+fn local_remove() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+
+ let s = slab.clone();
+ let t1 = thread::spawn(move || {
+ let idx = store_val(&s, 1);
+ assert_eq!(get_val(&s, idx), Some(1));
+ s.remove(idx);
+ assert_eq!(get_val(&s, idx), None);
+ let idx = store_val(&s, 2);
+ assert_eq!(get_val(&s, idx), Some(2));
+ s.remove(idx);
+ assert_eq!(get_val(&s, idx), None);
+ });
+
+ let s = slab.clone();
+ let t2 = thread::spawn(move || {
+ let idx = store_val(&s, 3);
+ assert_eq!(get_val(&s, idx), Some(3));
+ s.remove(idx);
+ assert_eq!(get_val(&s, idx), None);
+ let idx = store_val(&s, 4);
+ s.remove(idx);
+ assert_eq!(get_val(&s, idx), None);
+ });
+
+ let s = slab;
+ let idx1 = store_val(&s, 5);
+ assert_eq!(get_val(&s, idx1), Some(5));
+ let idx2 = store_val(&s, 6);
+ assert_eq!(get_val(&s, idx2), Some(6));
+ s.remove(idx1);
+ assert_eq!(get_val(&s, idx1), None);
+ assert_eq!(get_val(&s, idx2), Some(6));
+ s.remove(idx2);
+ assert_eq!(get_val(&s, idx2), None);
+
+ t1.join().expect("thread 1 should not panic");
+ t2.join().expect("thread 2 should not panic");
+ });
+}
+
+#[test]
+fn remove_remote() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+
+ let idx1 = store_val(&slab, 1);
+ assert_eq!(get_val(&slab, idx1), Some(1));
+
+ let idx2 = store_val(&slab, 2);
+ assert_eq!(get_val(&slab, idx2), Some(2));
+
+ let idx3 = store_val(&slab, 3);
+ assert_eq!(get_val(&slab, idx3), Some(3));
+
+ let s = slab.clone();
+ let t1 = thread::spawn(move || {
+ assert_eq!(get_val(&s, idx2), Some(2));
+ s.remove(idx2);
+ assert_eq!(get_val(&s, idx2), None);
+ });
+
+ let s = slab.clone();
+ let t2 = thread::spawn(move || {
+ assert_eq!(get_val(&s, idx3), Some(3));
+ s.remove(idx3);
+ assert_eq!(get_val(&s, idx3), None);
+ });
+
+ t1.join().expect("thread 1 should not panic");
+ t2.join().expect("thread 2 should not panic");
+
+ assert_eq!(get_val(&slab, idx1), Some(1));
+ assert_eq!(get_val(&slab, idx2), None);
+ assert_eq!(get_val(&slab, idx3), None);
+ });
+}
+
+#[test]
+fn remove_remote_and_reuse() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+
+ let idx1 = store_val(&slab, 1);
+ let idx2 = store_val(&slab, 2);
+
+ assert_eq!(get_val(&slab, idx1), Some(1));
+ assert_eq!(get_val(&slab, idx2), Some(2));
+
+ let s = slab.clone();
+ let t1 = thread::spawn(move || {
+ s.remove(idx1);
+ let value = get_val(&s, idx1);
+
+ // We may or may not see the new value yet, depending on when
+ // this occurs, but we must either see the new value or `None`;
+ // the old value has been removed!
+ assert!(value == None || value == Some(3));
+ });
+
+ let idx3 = store_when_free(&slab, 3);
+ t1.join().expect("thread 1 should not panic");
+
+ assert_eq!(get_val(&slab, idx3), Some(3));
+ assert_eq!(get_val(&slab, idx2), Some(2));
+ });
+}
+
+#[test]
+fn concurrent_alloc_remove() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+ let pair = Arc::new((Mutex::new(None), Condvar::new()));
+
+ let slab2 = slab.clone();
+ let pair2 = pair.clone();
+ let remover = thread::spawn(move || {
+ let (lock, cvar) = &*pair2;
+ for _ in 0..2 {
+ let mut next = lock.lock().unwrap();
+ while next.is_none() {
+ next = cvar.wait(next).unwrap();
+ }
+ let key = next.take().unwrap();
+ slab2.remove(key);
+ assert_eq!(get_val(&slab2, key), None);
+ cvar.notify_one();
+ }
+ });
+
+ let (lock, cvar) = &*pair;
+ for i in 0..2 {
+ let key = store_val(&slab, i);
+
+ let mut next = lock.lock().unwrap();
+ *next = Some(key);
+ cvar.notify_one();
+
+ // Wait for the item to be removed.
+ while next.is_some() {
+ next = cvar.wait(next).unwrap();
+ }
+
+ assert_eq!(get_val(&slab, key), None);
+ }
+
+ remover.join().unwrap();
+ })
+}
+
+#[test]
+fn concurrent_remove_remote_and_reuse() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+
+ let idx1 = store_val(&slab, 1);
+ let idx2 = store_val(&slab, 2);
+
+ assert_eq!(get_val(&slab, idx1), Some(1));
+ assert_eq!(get_val(&slab, idx2), Some(2));
+
+ let s = slab.clone();
+ let s2 = slab.clone();
+ let t1 = thread::spawn(move || {
+ s.remove(idx1);
+ });
+
+ let t2 = thread::spawn(move || {
+ s2.remove(idx2);
+ });
+
+ let idx3 = store_when_free(&slab, 3);
+ t1.join().expect("thread 1 should not panic");
+ t2.join().expect("thread 1 should not panic");
+
+ assert!(get_val(&slab, idx1).is_none());
+ assert!(get_val(&slab, idx2).is_none());
+ assert_eq!(get_val(&slab, idx3), Some(3));
+ });
+}
+
+#[test]
+fn alloc_remove_get() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+ let pair = Arc::new((Mutex::new(None), Condvar::new()));
+
+ let slab2 = slab.clone();
+ let pair2 = pair.clone();
+ let t1 = thread::spawn(move || {
+ let slab = slab2;
+ let (lock, cvar) = &*pair2;
+ // allocate one entry just so that we have to use the final one for
+ // all future allocations.
+ let _key0 = store_val(&slab, 0);
+ let key = store_val(&slab, 1);
+
+ let mut next = lock.lock().unwrap();
+ *next = Some(key);
+ cvar.notify_one();
+ // remove the second entry
+ slab.remove(key);
+ // store a new readiness at the same location (since the slab
+ // already has an entry in slot 0)
+ store_val(&slab, 2);
+ });
+
+ let (lock, cvar) = &*pair;
+ // wait for the second entry to be stored...
+ let mut next = lock.lock().unwrap();
+ while next.is_none() {
+ next = cvar.wait(next).unwrap();
+ }
+ let key = next.unwrap();
+
+ // our generation will be stale when the second store occurs at that
+ // index, we must not see the value of that store.
+ let val = get_val(&slab, key);
+ assert_ne!(val, Some(2), "generation must have advanced!");
+
+ t1.join().unwrap();
+ })
+}
+
+#[test]
+fn alloc_remove_set() {
+ loom::model(|| {
+ let slab = Arc::new(Slab::new());
+ let pair = Arc::new((Mutex::new(None), Condvar::new()));
+
+ let slab2 = slab.clone();
+ let pair2 = pair.clone();
+ let t1 = thread::spawn(move || {
+ let slab = slab2;
+ let (lock, cvar) = &*pair2;
+ // allocate one entry just so that we have to use the final one for
+ // all future allocations.
+ let _key0 = store_val(&slab, 0);
+ let key = store_val(&slab, 1);
+
+ let mut next = lock.lock().unwrap();
+ *next = Some(key);
+ cvar.notify_one();
+
+ slab.remove(key);
+ // remove the old entry and insert a new one, with a new generation.
+ let key2 = slab.alloc().expect("store key 2");
+ // after the remove, we must not see the value written with the
+ // stale index.
+ assert_eq!(
+ get_val(&slab, key),
+ None,
+ "stale set must no longer be visible"
+ );
+ assert_eq!(get_val(&slab, key2), Some(0));
+ key2
+ });
+
+ let (lock, cvar) = &*pair;
+
+ // wait for the second entry to be stored. the index we get from the
+ // other thread may become stale after a write.
+ let mut next = lock.lock().unwrap();
+ while next.is_none() {
+ next = cvar.wait(next).unwrap();
+ }
+ let key = next.unwrap();
+
+ // try to write to the index with our generation
+ slab.get(key).map(|val| val.set_readiness(key, |_| 2));
+
+ let key2 = t1.join().unwrap();
+ // after the remove, we must not see the value written with the
+ // stale index either.
+ assert_eq!(
+ get_val(&slab, key),
+ None,
+ "stale set must no longer be visible"
+ );
+ assert_eq!(get_val(&slab, key2), Some(0));
+ });
+}
+
+fn get_val(slab: &Arc<Slab<ScheduledIo>>, address: Address) -> Option<usize> {
+ slab.get(address).and_then(|s| s.get_readiness(address))
+}
+
+fn store_val(slab: &Arc<Slab<ScheduledIo>>, readiness: usize) -> Address {
+ let key = slab.alloc().expect("allocate slot");
+
+ if let Some(slot) = slab.get(key) {
+ slot.set_readiness(key, |_| readiness)
+ .expect("generation should still be valid!");
+ } else {
+ panic!("slab did not contain a value for {:?}", key);
+ }
+
+ key
+}
+
+fn store_when_free(slab: &Arc<Slab<ScheduledIo>>, readiness: usize) -> Address {
+ let key = loop {
+ if let Some(key) = slab.alloc() {
+ break key;
+ }
+
+ thread::yield_now();
+ };
+
+ if let Some(slot) = slab.get(key) {
+ slot.set_readiness(key, |_| readiness)
+ .expect("generation should still be valid!");
+ } else {
+ panic!("slab did not contain a value for {:?}", key);
+ }
+
+ key
+}
diff --git a/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs b/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs
new file mode 100644
index 0000000000..47ad46d3a1
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/tests/loom_stack.rs
@@ -0,0 +1,88 @@
+use crate::util::slab::TransferStack;
+
+use loom::cell::UnsafeCell;
+use loom::sync::Arc;
+use loom::thread;
+
+#[test]
+fn transfer_stack() {
+ loom::model(|| {
+ let causalities = [UnsafeCell::new(None), UnsafeCell::new(None)];
+ let shared = Arc::new((causalities, TransferStack::new()));
+ let shared1 = shared.clone();
+ let shared2 = shared.clone();
+
+ // Spawn two threads that both try to push to the stack.
+ let t1 = thread::spawn(move || {
+ let (causalities, stack) = &*shared1;
+ stack.push(0, |prev| {
+ causalities[0].with_mut(|c| unsafe {
+ *c = Some(prev);
+ });
+ });
+ });
+
+ let t2 = thread::spawn(move || {
+ let (causalities, stack) = &*shared2;
+ stack.push(1, |prev| {
+ causalities[1].with_mut(|c| unsafe {
+ *c = Some(prev);
+ });
+ });
+ });
+
+ let (causalities, stack) = &*shared;
+
+ // Try to pop from the stack...
+ let mut idx = stack.pop_all();
+ while idx == None {
+ idx = stack.pop_all();
+ thread::yield_now();
+ }
+ let idx = idx.unwrap();
+
+ let saw_both = causalities[idx].with(|val| {
+ let val = unsafe { *val };
+ assert!(
+ val.is_some(),
+ "UnsafeCell write must happen-before index is pushed to the stack!",
+ );
+ // were there two entries in the stack? if so, check that
+ // both saw a write.
+ if let Some(c) = causalities.get(val.unwrap()) {
+ c.with(|val| {
+ let val = unsafe { *val };
+ assert!(
+ val.is_some(),
+ "UnsafeCell write must happen-before index is pushed to the stack!",
+ );
+ });
+ true
+ } else {
+ false
+ }
+ });
+
+ // We only saw one push. Ensure that the other push happens too.
+ if !saw_both {
+ // Try to pop from the stack...
+ let mut idx = stack.pop_all();
+ while idx == None {
+ idx = stack.pop_all();
+ thread::yield_now();
+ }
+ let idx = idx.unwrap();
+
+ causalities[idx].with(|val| {
+ let val = unsafe { *val };
+ assert!(
+ val.is_some(),
+ "UnsafeCell write must happen-before index is pushed to the stack!",
+ );
+ });
+ }
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+ });
+}
diff --git a/third_party/rust/tokio/src/util/slab/tests/mod.rs b/third_party/rust/tokio/src/util/slab/tests/mod.rs
new file mode 100644
index 0000000000..7f79354466
--- /dev/null
+++ b/third_party/rust/tokio/src/util/slab/tests/mod.rs
@@ -0,0 +1,2 @@
+mod loom_slab;
+mod loom_stack;
diff --git a/third_party/rust/tokio/src/util/try_lock.rs b/third_party/rust/tokio/src/util/try_lock.rs
new file mode 100644
index 0000000000..8b0edb4a87
--- /dev/null
+++ b/third_party/rust/tokio/src/util/try_lock.rs
@@ -0,0 +1,80 @@
+use crate::loom::sync::atomic::AtomicBool;
+
+use std::cell::UnsafeCell;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut};
+use std::sync::atomic::Ordering::SeqCst;
+
+pub(crate) struct TryLock<T> {
+ locked: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+pub(crate) struct LockGuard<'a, T> {
+ lock: &'a TryLock<T>,
+ _p: PhantomData<std::rc::Rc<()>>,
+}
+
+unsafe impl<T: Send> Send for TryLock<T> {}
+unsafe impl<T: Send> Sync for TryLock<T> {}
+
+unsafe impl<T: Sync> Sync for LockGuard<'_, T> {}
+
+macro_rules! new {
+ ($data:ident) => {
+ TryLock {
+ locked: AtomicBool::new(false),
+ data: UnsafeCell::new($data),
+ }
+ };
+}
+
+impl<T> TryLock<T> {
+ #[cfg(not(loom))]
+ /// Create a new `TryLock`
+ pub(crate) const fn new(data: T) -> TryLock<T> {
+ new!(data)
+ }
+
+ #[cfg(loom)]
+ /// Create a new `TryLock`
+ pub(crate) fn new(data: T) -> TryLock<T> {
+ new!(data)
+ }
+
+ /// Attempt to acquire lock
+ pub(crate) fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ if self
+ .locked
+ .compare_exchange(false, true, SeqCst, SeqCst)
+ .is_err()
+ {
+ return None;
+ }
+
+ Some(LockGuard {
+ lock: self,
+ _p: PhantomData,
+ })
+ }
+}
+
+impl<T> Deref for LockGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+impl<T> DerefMut for LockGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+impl<T> Drop for LockGuard<'_, T> {
+ fn drop(&mut self) {
+ self.lock.locked.store(false, SeqCst);
+ }
+}
diff --git a/third_party/rust/tokio/src/util/wake.rs b/third_party/rust/tokio/src/util/wake.rs
new file mode 100644
index 0000000000..e49f1e895d
--- /dev/null
+++ b/third_party/rust/tokio/src/util/wake.rs
@@ -0,0 +1,83 @@
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::ops::Deref;
+use std::sync::Arc;
+use std::task::{RawWaker, RawWakerVTable, Waker};
+
+/// Simplfied waking interface based on Arcs
+pub(crate) trait Wake: Send + Sync {
+ /// Wake by value
+ fn wake(self: Arc<Self>);
+
+ /// Wake by reference
+ fn wake_by_ref(arc_self: &Arc<Self>);
+}
+
+/// A `Waker` that is only valid for a given lifetime.
+#[derive(Debug)]
+pub(crate) struct WakerRef<'a> {
+ waker: ManuallyDrop<Waker>,
+ _p: PhantomData<&'a ()>,
+}
+
+impl Deref for WakerRef<'_> {
+ type Target = Waker;
+
+ fn deref(&self) -> &Waker {
+ &self.waker
+ }
+}
+
+/// Creates a reference to a `Waker` from a reference to `Arc<impl Wake>`.
+pub(crate) fn waker_ref<W: Wake>(wake: &Arc<W>) -> WakerRef<'_> {
+ let ptr = &**wake as *const _ as *const ();
+
+ let waker = unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) };
+
+ WakerRef {
+ waker: ManuallyDrop::new(waker),
+ _p: PhantomData,
+ }
+}
+
+fn waker_vtable<W: Wake>() -> &'static RawWakerVTable {
+ &RawWakerVTable::new(
+ clone_arc_raw::<W>,
+ wake_arc_raw::<W>,
+ wake_by_ref_arc_raw::<W>,
+ drop_arc_raw::<W>,
+ )
+}
+
+unsafe fn inc_ref_count<T: Wake>(data: *const ()) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = ManuallyDrop::new(Arc::<T>::from_raw(data as *const T));
+
+ // Now increase refcount, but don't drop new refcount either
+ let arc_clone: ManuallyDrop<_> = arc.clone();
+
+ // Drop explicitly to avoid clippy warnings
+ drop(arc);
+ drop(arc_clone);
+}
+
+unsafe fn clone_arc_raw<T: Wake>(data: *const ()) -> RawWaker {
+ inc_ref_count::<T>(data);
+ RawWaker::new(data, waker_vtable::<T>())
+}
+
+unsafe fn wake_arc_raw<T: Wake>(data: *const ()) {
+ let arc: Arc<T> = Arc::from_raw(data as *const T);
+ Wake::wake(arc);
+}
+
+// used by `waker_ref`
+unsafe fn wake_by_ref_arc_raw<T: Wake>(data: *const ()) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = ManuallyDrop::new(Arc::<T>::from_raw(data as *const T));
+ Wake::wake_by_ref(&arc);
+}
+
+unsafe fn drop_arc_raw<T: Wake>(data: *const ()) {
+ drop(Arc::<T>::from_raw(data as *const T))
+}
diff --git a/third_party/rust/tokio/tests/_require_full.rs b/third_party/rust/tokio/tests/_require_full.rs
new file mode 100644
index 0000000000..98455bedef
--- /dev/null
+++ b/third_party/rust/tokio/tests/_require_full.rs
@@ -0,0 +1,2 @@
+#![cfg(not(feature = "full"))]
+compile_error!("run main Tokio tests with `--features full`");
diff --git a/third_party/rust/tokio/tests/async_send_sync.rs b/third_party/rust/tokio/tests/async_send_sync.rs
new file mode 100644
index 0000000000..1fea19c2a7
--- /dev/null
+++ b/third_party/rust/tokio/tests/async_send_sync.rs
@@ -0,0 +1,258 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::cell::Cell;
+use std::io::Cursor;
+use std::net::SocketAddr;
+use std::rc::Rc;
+use tokio::net::TcpStream;
+use tokio::time::{Duration, Instant};
+
+#[allow(dead_code)]
+type BoxFutureSync<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + Sync>>;
+#[allow(dead_code)]
+type BoxFutureSend<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send>>;
+#[allow(dead_code)]
+type BoxFuture<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T>>>;
+
+#[allow(dead_code)]
+fn require_send<T: Send>(_t: &T) {}
+#[allow(dead_code)]
+fn require_sync<T: Sync>(_t: &T) {}
+
+#[allow(dead_code)]
+struct Invalid;
+
+trait AmbiguousIfSend<A> {
+ fn some_item(&self) {}
+}
+impl<T: ?Sized> AmbiguousIfSend<()> for T {}
+impl<T: ?Sized + Send> AmbiguousIfSend<Invalid> for T {}
+
+trait AmbiguousIfSync<A> {
+ fn some_item(&self) {}
+}
+impl<T: ?Sized> AmbiguousIfSync<()> for T {}
+impl<T: ?Sized + Sync> AmbiguousIfSync<Invalid> for T {}
+
+macro_rules! into_todo {
+ ($typ:ty) => {{
+ let x: $typ = todo!();
+ x
+ }};
+}
+macro_rules! assert_value {
+ ($type:ty: Send & Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f: $type = todo!();
+ require_send(&f);
+ require_sync(&f);
+ };
+ };
+ ($type:ty: !Send & Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f: $type = todo!();
+ AmbiguousIfSend::some_item(&f);
+ require_sync(&f);
+ };
+ };
+ ($type:ty: Send & !Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f: $type = todo!();
+ require_send(&f);
+ AmbiguousIfSync::some_item(&f);
+ };
+ };
+ ($type:ty: !Send & !Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f: $type = todo!();
+ AmbiguousIfSend::some_item(&f);
+ AmbiguousIfSync::some_item(&f);
+ };
+ };
+}
+macro_rules! async_assert_fn {
+ ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
+ require_send(&f);
+ require_sync(&f);
+ };
+ };
+ ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): Send & !Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
+ require_send(&f);
+ AmbiguousIfSync::some_item(&f);
+ };
+ };
+ ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
+ AmbiguousIfSend::some_item(&f);
+ require_sync(&f);
+ };
+ };
+ ($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): !Send & !Sync) => {
+ #[allow(unreachable_code)]
+ #[allow(unused_variables)]
+ const _: fn() = || {
+ let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
+ AmbiguousIfSend::some_item(&f);
+ AmbiguousIfSync::some_item(&f);
+ };
+ };
+}
+
+async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync);
+async_assert_fn!(tokio::io::empty(): Send & Sync);
+async_assert_fn!(tokio::io::repeat(u8): Send & Sync);
+async_assert_fn!(tokio::io::sink(): Send & Sync);
+async_assert_fn!(tokio::io::split(TcpStream): Send & Sync);
+async_assert_fn!(tokio::io::stderr(): Send & Sync);
+async_assert_fn!(tokio::io::stdin(): Send & Sync);
+async_assert_fn!(tokio::io::stdout(): Send & Sync);
+async_assert_fn!(tokio::io::Split<Cursor<Vec<u8>>>::next_segment(_): Send & Sync);
+
+async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync);
+async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync);
+async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync);
+async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync);
+async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync);
+async_assert_fn!(tokio::fs::metadata(&str): Send & Sync);
+async_assert_fn!(tokio::fs::read(&str): Send & Sync);
+async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync);
+async_assert_fn!(tokio::fs::read_link(&str): Send & Sync);
+async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync);
+async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync);
+async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync);
+async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync);
+async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync);
+async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync);
+async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync);
+async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync);
+async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync);
+async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync);
+async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync);
+async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync);
+
+async_assert_fn!(tokio::fs::File::open(&str): Send & Sync);
+async_assert_fn!(tokio::fs::File::create(&str): Send & Sync);
+async_assert_fn!(tokio::fs::File::seek(_, std::io::SeekFrom): Send & Sync);
+async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync);
+async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync);
+async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync);
+async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync);
+async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync);
+async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync);
+async_assert_fn!(tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync);
+
+async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync);
+async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync);
+async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::udp::RecvHalf::recv(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::udp::RecvHalf::recv_from(_, &mut [u8]): Send & Sync);
+async_assert_fn!(tokio::net::udp::SendHalf::send(_, &[u8]): Send & Sync);
+async_assert_fn!(tokio::net::udp::SendHalf::send_to(_, &[u8], &SocketAddr): Send & Sync);
+
+#[cfg(unix)]
+mod unix_datagram {
+ use super::*;
+ async_assert_fn!(tokio::net::UnixListener::bind(&str): Send & Sync);
+ async_assert_fn!(tokio::net::UnixListener::accept(_): Send & Sync);
+ async_assert_fn!(tokio::net::UnixDatagram::send(_, &[u8]): Send & Sync);
+ async_assert_fn!(tokio::net::UnixDatagram::recv(_, &mut [u8]): Send & Sync);
+ async_assert_fn!(tokio::net::UnixDatagram::send_to(_, &[u8], &str): Send & Sync);
+ async_assert_fn!(tokio::net::UnixDatagram::recv_from(_, &mut [u8]): Send & Sync);
+ async_assert_fn!(tokio::net::UnixStream::connect(&str): Send & Sync);
+}
+
+async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync);
+async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync);
+#[cfg(unix)]
+async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync);
+
+async_assert_fn!(tokio::stream::empty<Rc<u8>>(): Send & Sync);
+async_assert_fn!(tokio::stream::pending<Rc<u8>>(): Send & Sync);
+async_assert_fn!(tokio::stream::iter(std::vec::IntoIter<u8>): Send & Sync);
+
+async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync);
+async_assert_fn!(tokio::sync::Mutex<u8>::lock(_): Send & Sync);
+async_assert_fn!(tokio::sync::Mutex<Cell<u8>>::lock(_): Send & Sync);
+async_assert_fn!(tokio::sync::Mutex<Rc<u8>>::lock(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::Notify::notified(_): Send & !Sync);
+async_assert_fn!(tokio::sync::RwLock<u8>::read(_): Send & Sync);
+async_assert_fn!(tokio::sync::RwLock<u8>::write(_): Send & Sync);
+async_assert_fn!(tokio::sync::RwLock<Cell<u8>>::read(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::RwLock<Cell<u8>>::write(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::RwLock<Rc<u8>>::read(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::RwLock<Rc<u8>>::write(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync);
+
+async_assert_fn!(tokio::sync::broadcast::Receiver<u8>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::broadcast::Receiver<Cell<u8>>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::broadcast::Receiver<Rc<u8>>::recv(_): !Send & !Sync);
+
+async_assert_fn!(tokio::sync::mpsc::Receiver<u8>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::mpsc::Receiver<Cell<u8>>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::mpsc::Receiver<Rc<u8>>::recv(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::mpsc::Sender<u8>::send(_, u8): Send & Sync);
+async_assert_fn!(tokio::sync::mpsc::Sender<Cell<u8>>::send(_, Cell<u8>): Send & !Sync);
+async_assert_fn!(tokio::sync::mpsc::Sender<Rc<u8>>::send(_, Rc<u8>): !Send & !Sync);
+
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<u8>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<Cell<u8>>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<Rc<u8>>::recv(_): !Send & !Sync);
+
+async_assert_fn!(tokio::sync::watch::Receiver<u8>::recv(_): Send & Sync);
+async_assert_fn!(tokio::sync::watch::Receiver<Cell<u8>>::recv(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::watch::Receiver<Rc<u8>>::recv(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::watch::Sender<u8>::closed(_): Send & Sync);
+async_assert_fn!(tokio::sync::watch::Sender<Cell<u8>>::closed(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::watch::Sender<Rc<u8>>::closed(_): !Send & !Sync);
+
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync);
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync);
+async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync);
+async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync);
+assert_value!(tokio::task::LocalSet: !Send & !Sync);
+
+async_assert_fn!(tokio::time::advance(Duration): Send & Sync);
+async_assert_fn!(tokio::time::delay_for(Duration): Send & Sync);
+async_assert_fn!(tokio::time::delay_until(Instant): Send & Sync);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync);
+async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync);
+async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync);
+async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync);
diff --git a/third_party/rust/tokio/tests/buffered.rs b/third_party/rust/tokio/tests/buffered.rs
new file mode 100644
index 0000000000..595f855a0f
--- /dev/null
+++ b/third_party/rust/tokio/tests/buffered.rs
@@ -0,0 +1,51 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio::prelude::*;
+use tokio_test::assert_ok;
+
+use std::io::prelude::*;
+use std::net::TcpStream;
+use std::thread;
+
+#[tokio::test]
+async fn echo_server() {
+ const N: usize = 1024;
+
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ let msg = "foo bar baz";
+
+ let t = thread::spawn(move || {
+ let mut s = assert_ok!(TcpStream::connect(&addr));
+
+ let t2 = thread::spawn(move || {
+ let mut s = assert_ok!(TcpStream::connect(&addr));
+ let mut b = vec![0; msg.len() * N];
+ assert_ok!(s.read_exact(&mut b));
+ b
+ });
+
+ let mut expected = Vec::<u8>::new();
+ for _i in 0..N {
+ expected.extend(msg.as_bytes());
+ let res = assert_ok!(s.write(msg.as_bytes()));
+ assert_eq!(res, msg.len());
+ }
+
+ (expected, t2)
+ });
+
+ let (mut a, _) = assert_ok!(srv.accept().await);
+ let (mut b, _) = assert_ok!(srv.accept().await);
+
+ let n = assert_ok!(io::copy(&mut a, &mut b).await);
+
+ let (expected, t2) = t.join().unwrap();
+ let actual = t2.join().unwrap();
+
+ assert!(expected == actual);
+ assert_eq!(n, msg.len() as u64 * 1024);
+}
diff --git a/third_party/rust/tokio/tests/fs.rs b/third_party/rust/tokio/tests/fs.rs
new file mode 100644
index 0000000000..13c44c08d6
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs.rs
@@ -0,0 +1,20 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn path_read_write() {
+ let temp = tempdir();
+ let dir = temp.path();
+
+ assert_ok!(fs::write(dir.join("bar"), b"bytes").await);
+ let out = assert_ok!(fs::read(dir.join("bar")).await);
+
+ assert_eq!(out, b"bytes");
+}
+
+fn tempdir() -> tempfile::TempDir {
+ tempfile::tempdir().unwrap()
+}
diff --git a/third_party/rust/tokio/tests/fs_copy.rs b/third_party/rust/tokio/tests/fs_copy.rs
new file mode 100644
index 0000000000..8d1632013e
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_copy.rs
@@ -0,0 +1,39 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tempfile::tempdir;
+use tokio::fs;
+
+#[tokio::test]
+async fn copy() {
+ let dir = tempdir().unwrap();
+
+ let source_path = dir.path().join("foo.txt");
+ let dest_path = dir.path().join("bar.txt");
+
+ fs::write(&source_path, b"Hello File!").await.unwrap();
+ fs::copy(&source_path, &dest_path).await.unwrap();
+
+ let from = fs::read(&source_path).await.unwrap();
+ let to = fs::read(&dest_path).await.unwrap();
+
+ assert_eq!(from, to);
+}
+
+#[tokio::test]
+async fn copy_permissions() {
+ let dir = tempdir().unwrap();
+ let from_path = dir.path().join("foo.txt");
+ let to_path = dir.path().join("bar.txt");
+
+ let from = tokio::fs::File::create(&from_path).await.unwrap();
+ let mut from_perms = from.metadata().await.unwrap().permissions();
+ from_perms.set_readonly(true);
+ from.set_permissions(from_perms.clone()).await.unwrap();
+
+ tokio::fs::copy(from_path, &to_path).await.unwrap();
+
+ let to_perms = tokio::fs::metadata(to_path).await.unwrap().permissions();
+
+ assert_eq!(from_perms, to_perms);
+}
diff --git a/third_party/rust/tokio/tests/fs_dir.rs b/third_party/rust/tokio/tests/fs_dir.rs
new file mode 100644
index 0000000000..eaff59da4f
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_dir.rs
@@ -0,0 +1,102 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+use tokio_test::assert_ok;
+
+use std::sync::{Arc, Mutex};
+use tempfile::tempdir;
+
+#[tokio::test]
+async fn create_dir() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo");
+ let new_dir_2 = new_dir.clone();
+
+ assert_ok!(fs::create_dir(new_dir).await);
+
+ assert!(new_dir_2.is_dir());
+}
+
+#[tokio::test]
+async fn create_all() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo").join("bar");
+ let new_dir_2 = new_dir.clone();
+
+ assert_ok!(fs::create_dir_all(new_dir).await);
+ assert!(new_dir_2.is_dir());
+}
+
+#[tokio::test]
+async fn remove() {
+ let base_dir = tempdir().unwrap();
+ let new_dir = base_dir.path().join("foo");
+ let new_dir_2 = new_dir.clone();
+
+ std::fs::create_dir(new_dir.clone()).unwrap();
+
+ assert_ok!(fs::remove_dir(new_dir).await);
+ assert!(!new_dir_2.exists());
+}
+
+#[tokio::test]
+async fn read_inherent() {
+ let base_dir = tempdir().unwrap();
+
+ let p = base_dir.path();
+ std::fs::create_dir(p.join("aa")).unwrap();
+ std::fs::create_dir(p.join("bb")).unwrap();
+ std::fs::create_dir(p.join("cc")).unwrap();
+
+ let files = Arc::new(Mutex::new(Vec::new()));
+
+ let f = files.clone();
+ let p = p.to_path_buf();
+
+ let mut entries = fs::read_dir(p).await.unwrap();
+
+ while let Some(e) = assert_ok!(entries.next_entry().await) {
+ let s = e.file_name().to_str().unwrap().to_string();
+ f.lock().unwrap().push(s);
+ }
+
+ let mut files = files.lock().unwrap();
+ files.sort(); // because the order is not guaranteed
+ assert_eq!(
+ *files,
+ vec!["aa".to_string(), "bb".to_string(), "cc".to_string()]
+ );
+}
+
+#[tokio::test]
+async fn read_stream() {
+ use tokio::stream::StreamExt;
+
+ let base_dir = tempdir().unwrap();
+
+ let p = base_dir.path();
+ std::fs::create_dir(p.join("aa")).unwrap();
+ std::fs::create_dir(p.join("bb")).unwrap();
+ std::fs::create_dir(p.join("cc")).unwrap();
+
+ let files = Arc::new(Mutex::new(Vec::new()));
+
+ let f = files.clone();
+ let p = p.to_path_buf();
+
+ let mut entries = fs::read_dir(p).await.unwrap();
+
+ while let Some(res) = entries.next().await {
+ let e = assert_ok!(res);
+ let s = e.file_name().to_str().unwrap().to_string();
+ f.lock().unwrap().push(s);
+ }
+
+ let mut files = files.lock().unwrap();
+ files.sort(); // because the order is not guaranteed
+ assert_eq!(
+ *files,
+ vec!["aa".to_string(), "bb".to_string(), "cc".to_string()]
+ );
+}
diff --git a/third_party/rust/tokio/tests/fs_file.rs b/third_party/rust/tokio/tests/fs_file.rs
new file mode 100644
index 0000000000..eee9a5b5c5
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_file.rs
@@ -0,0 +1,87 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs::File;
+use tokio::prelude::*;
+use tokio_test::task;
+
+use std::io::prelude::*;
+use tempfile::NamedTempFile;
+
+const HELLO: &[u8] = b"hello world...";
+
+#[tokio::test]
+async fn basic_read() {
+ let mut tempfile = tempfile();
+ tempfile.write_all(HELLO).unwrap();
+
+ let mut file = File::open(tempfile.path()).await.unwrap();
+
+ let mut buf = [0; 1024];
+ let n = file.read(&mut buf).await.unwrap();
+
+ assert_eq!(n, HELLO.len());
+ assert_eq!(&buf[..n], HELLO);
+}
+
+#[tokio::test]
+async fn basic_write() {
+ let tempfile = tempfile();
+
+ let mut file = File::create(tempfile.path()).await.unwrap();
+
+ file.write_all(HELLO).await.unwrap();
+ file.flush().await.unwrap();
+
+ let file = std::fs::read(tempfile.path()).unwrap();
+ assert_eq!(file, HELLO);
+}
+
+#[tokio::test]
+async fn coop() {
+ let mut tempfile = tempfile();
+ tempfile.write_all(HELLO).unwrap();
+
+ let mut task = task::spawn(async {
+ let mut file = File::open(tempfile.path()).await.unwrap();
+
+ let mut buf = [0; 1024];
+
+ loop {
+ file.read(&mut buf).await.unwrap();
+ file.seek(std::io::SeekFrom::Start(0)).await.unwrap();
+ }
+ });
+
+ for _ in 0..1_000 {
+ if task.poll().is_pending() {
+ return;
+ }
+ }
+
+ panic!("did not yield");
+}
+
+fn tempfile() -> NamedTempFile {
+ NamedTempFile::new().unwrap()
+}
+
+#[tokio::test]
+#[cfg(unix)]
+async fn unix_fd() {
+ use std::os::unix::io::AsRawFd;
+ let tempfile = tempfile();
+
+ let file = File::create(tempfile.path()).await.unwrap();
+ assert!(file.as_raw_fd() as u64 > 0);
+}
+
+#[tokio::test]
+#[cfg(windows)]
+async fn windows_handle() {
+ use std::os::windows::io::AsRawHandle;
+ let tempfile = tempfile();
+
+ let file = File::create(tempfile.path()).await.unwrap();
+ assert!(file.as_raw_handle() as u64 > 0);
+}
diff --git a/third_party/rust/tokio/tests/fs_file_mocked.rs b/third_party/rust/tokio/tests/fs_file_mocked.rs
new file mode 100644
index 0000000000..0c5722404e
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_file_mocked.rs
@@ -0,0 +1,777 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+macro_rules! ready {
+ ($e:expr $(,)?) => {
+ match $e {
+ std::task::Poll::Ready(t) => t,
+ std::task::Poll::Pending => return std::task::Poll::Pending,
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! cfg_fs {
+ ($($item:item)*) => { $($item)* }
+}
+
+#[macro_export]
+macro_rules! cfg_io_std {
+ ($($item:item)*) => { $($item)* }
+}
+
+use futures::future;
+
+// Load source
+#[allow(warnings)]
+#[path = "../src/fs/file.rs"]
+mod file;
+use file::File;
+
+#[allow(warnings)]
+#[path = "../src/io/blocking.rs"]
+mod blocking;
+
+// Load mocked types
+mod support {
+ pub(crate) mod mock_file;
+ pub(crate) mod mock_pool;
+}
+pub(crate) use support::mock_pool as pool;
+
+// Place them where the source expects them
+pub(crate) mod io {
+ pub(crate) use tokio::io::*;
+
+ pub(crate) use crate::blocking;
+
+ pub(crate) mod sys {
+ pub(crate) use crate::support::mock_pool::{run, Blocking};
+ }
+}
+pub(crate) mod fs {
+ pub(crate) mod sys {
+ pub(crate) use crate::support::mock_file::File;
+ pub(crate) use crate::support::mock_pool::{run, Blocking};
+ }
+
+ pub(crate) use crate::support::mock_pool::asyncify;
+}
+use fs::sys;
+
+use tokio::prelude::*;
+use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok, task};
+
+use std::io::SeekFrom;
+
+const HELLO: &[u8] = b"hello world...";
+const FOO: &[u8] = b"foo bar baz...";
+
+#[test]
+fn open_read() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO);
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 1024];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_eq!(0, pool::len());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, mock.remaining());
+ assert_eq!(1, pool::len());
+
+ pool::run_one();
+
+ assert_eq!(0, mock.remaining());
+ assert!(t.is_woken());
+
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, HELLO.len());
+ assert_eq!(&buf[..n], HELLO);
+}
+
+#[test]
+fn read_twice_before_dispatch() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO);
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 1024];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_pending!(t.poll());
+ assert_pending!(t.poll());
+
+ assert_eq!(pool::len(), 1);
+ pool::run_one();
+
+ assert!(t.is_woken());
+
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(&buf[..n], HELLO);
+}
+
+#[test]
+fn read_with_smaller_buf() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO);
+
+ let mut file = File::from_std(file);
+
+ {
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+ }
+
+ pool::run_one();
+
+ {
+ let mut buf = [0; 4];
+ let mut t = task::spawn(file.read(&mut buf));
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, 4);
+ assert_eq!(&buf[..], &HELLO[..n]);
+ }
+
+ // Calling again immediately succeeds with the rest of the buffer
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, 10);
+ assert_eq!(&buf[..n], &HELLO[4..]);
+
+ assert_eq!(0, pool::len());
+}
+
+#[test]
+fn read_with_bigger_buf() {
+ let (mock, file) = sys::File::mock();
+ mock.read(&HELLO[..4]).read(&HELLO[4..]);
+
+ let mut file = File::from_std(file);
+
+ {
+ let mut buf = [0; 4];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+ }
+
+ pool::run_one();
+
+ {
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, 4);
+ assert_eq!(&buf[..n], &HELLO[..n]);
+ }
+
+ // Calling again immediately succeeds with the rest of the buffer
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, 10);
+ assert_eq!(&buf[..n], &HELLO[4..]);
+
+ assert_eq!(0, pool::len());
+}
+
+#[test]
+fn read_err_then_read_success() {
+ let (mock, file) = sys::File::mock();
+ mock.read_err().read(&HELLO);
+
+ let mut file = File::from_std(file);
+
+ {
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ assert_ready_err!(t.poll());
+ }
+
+ {
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let n = assert_ready_ok!(t.poll());
+
+ assert_eq!(n, HELLO.len());
+ assert_eq!(&buf[..n], HELLO);
+ }
+}
+
+#[test]
+fn open_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+
+ assert_eq!(0, pool::len());
+ assert_ready_ok!(t.poll());
+
+ assert_eq!(1, mock.remaining());
+ assert_eq!(1, pool::len());
+
+ pool::run_one();
+
+ assert_eq!(0, mock.remaining());
+ assert!(!t.is_woken());
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn flush_while_idle() {
+ let (_mock, file) = sys::File::mock();
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn read_with_buffer_larger_than_max() {
+ // Chunks
+ let a = 16 * 1024;
+ let b = a * 2;
+ let c = a * 3;
+ let d = a * 4;
+
+ assert_eq!(d / 1024, 64);
+
+ let mut data = vec![];
+ for i in 0..(d - 1) {
+ data.push((i % 151) as u8);
+ }
+
+ let (mock, file) = sys::File::mock();
+ mock.read(&data[0..a])
+ .read(&data[a..b])
+ .read(&data[b..c])
+ .read(&data[c..]);
+
+ let mut file = File::from_std(file);
+
+ let mut actual = vec![0; d];
+ let mut pos = 0;
+
+ while pos < data.len() {
+ let mut t = task::spawn(file.read(&mut actual[pos..]));
+
+ assert_pending!(t.poll());
+ pool::run_one();
+ assert!(t.is_woken());
+
+ let n = assert_ready_ok!(t.poll());
+ assert!(n <= a);
+
+ pos += n;
+ }
+
+ assert_eq!(mock.remaining(), 0);
+ assert_eq!(data, &actual[..data.len()]);
+}
+
+#[test]
+fn write_with_buffer_larger_than_max() {
+ // Chunks
+ let a = 16 * 1024;
+ let b = a * 2;
+ let c = a * 3;
+ let d = a * 4;
+
+ assert_eq!(d / 1024, 64);
+
+ let mut data = vec![];
+ for i in 0..(d - 1) {
+ data.push((i % 151) as u8);
+ }
+
+ let (mock, file) = sys::File::mock();
+ mock.write(&data[0..a])
+ .write(&data[a..b])
+ .write(&data[b..c])
+ .write(&data[c..]);
+
+ let mut file = File::from_std(file);
+
+ let mut rem = &data[..];
+
+ let mut first = true;
+
+ while !rem.is_empty() {
+ let mut t = task::spawn(file.write(rem));
+
+ if !first {
+ assert_pending!(t.poll());
+ pool::run_one();
+ assert!(t.is_woken());
+ }
+
+ first = false;
+
+ let n = assert_ready_ok!(t.poll());
+
+ rem = &rem[n..];
+ }
+
+ pool::run_one();
+
+ assert_eq!(mock.remaining(), 0);
+}
+
+#[test]
+fn write_twice_before_dispatch() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).write(FOO);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_pending!(t.poll());
+
+ assert_eq!(pool::len(), 1);
+ pool::run_one();
+
+ assert!(t.is_woken());
+
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.flush());
+ assert_pending!(t.poll());
+
+ assert_eq!(pool::len(), 1);
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn incomplete_read_followed_by_write() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO)
+ .seek_current_ok(-(HELLO.len() as i64), 0)
+ .write(FOO);
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 32];
+
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_ok!(t.poll());
+
+ assert_eq!(pool::len(), 1);
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn incomplete_partial_read_followed_by_write() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO).seek_current_ok(-10, 0).write(FOO);
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 32];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let mut buf = [0; 4];
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_ok!(t.poll());
+
+ assert_eq!(pool::len(), 1);
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn incomplete_read_followed_by_flush() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO)
+ .seek_current_ok(-(HELLO.len() as i64), 0)
+ .write(FOO);
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 32];
+
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+}
+
+#[test]
+fn incomplete_flush_followed_by_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).write(FOO);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ let n = assert_ready_ok!(t.poll());
+ assert_eq!(n, HELLO.len());
+
+ let mut t = task::spawn(file.flush());
+ assert_pending!(t.poll());
+
+ // TODO: Move under write
+ pool::run_one();
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn read_err() {
+ let (mock, file) = sys::File::mock();
+ mock.read_err();
+
+ let mut file = File::from_std(file);
+
+ let mut buf = [0; 1024];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_pending!(t.poll());
+
+ pool::run_one();
+ assert!(t.is_woken());
+
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn write_write_err() {
+ let (mock, file) = sys::File::mock();
+ mock.write_err();
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn write_read_write_err() {
+ let (mock, file) = sys::File::mock();
+ mock.write_err().read(HELLO);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ let mut buf = [0; 1024];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn write_read_flush_err() {
+ let (mock, file) = sys::File::mock();
+ mock.write_err().read(HELLO);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ let mut buf = [0; 1024];
+ let mut t = task::spawn(file.read(&mut buf));
+
+ assert_pending!(t.poll());
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn write_seek_write_err() {
+ let (mock, file) = sys::File::mock();
+ mock.write_err().seek_start_ok(0);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ {
+ let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
+ assert_pending!(t.poll());
+ }
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.write(FOO));
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn write_seek_flush_err() {
+ let (mock, file) = sys::File::mock();
+ mock.write_err().seek_start_ok(0);
+
+ let mut file = File::from_std(file);
+
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ pool::run_one();
+
+ {
+ let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
+ assert_pending!(t.poll());
+ }
+
+ pool::run_one();
+
+ let mut t = task::spawn(file.flush());
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn sync_all_ordered_after_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).sync_all();
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.sync_all());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn sync_all_err_ordered_after_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).sync_all_err();
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.sync_all());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn sync_data_ordered_after_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).sync_data();
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.sync_data());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn sync_data_err_ordered_after_write() {
+ let (mock, file) = sys::File::mock();
+ mock.write(HELLO).sync_data_err();
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.write(HELLO));
+ assert_ready_ok!(t.poll());
+
+ let mut t = task::spawn(file.sync_data());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_pending!(t.poll());
+
+ assert_eq!(1, pool::len());
+ pool::run_one();
+
+ assert!(t.is_woken());
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn open_set_len_ok() {
+ let (mock, file) = sys::File::mock();
+ mock.set_len(123);
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.set_len(123));
+
+ assert_pending!(t.poll());
+ assert_eq!(1, mock.remaining());
+
+ pool::run_one();
+ assert_eq!(0, mock.remaining());
+
+ assert!(t.is_woken());
+ assert_ready_ok!(t.poll());
+}
+
+#[test]
+fn open_set_len_err() {
+ let (mock, file) = sys::File::mock();
+ mock.set_len_err(123);
+
+ let mut file = File::from_std(file);
+ let mut t = task::spawn(file.set_len(123));
+
+ assert_pending!(t.poll());
+ assert_eq!(1, mock.remaining());
+
+ pool::run_one();
+ assert_eq!(0, mock.remaining());
+
+ assert!(t.is_woken());
+ assert_ready_err!(t.poll());
+}
+
+#[test]
+fn partial_read_set_len_ok() {
+ let (mock, file) = sys::File::mock();
+ mock.read(HELLO)
+ .seek_current_ok(-14, 0)
+ .set_len(123)
+ .read(FOO);
+
+ let mut buf = [0; 32];
+ let mut file = File::from_std(file);
+
+ {
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+ }
+
+ pool::run_one();
+
+ {
+ let mut t = task::spawn(file.set_len(123));
+
+ assert_pending!(t.poll());
+ pool::run_one();
+ assert_ready_ok!(t.poll());
+ }
+
+ let mut t = task::spawn(file.read(&mut buf));
+ assert_pending!(t.poll());
+ pool::run_one();
+ let n = assert_ready_ok!(t.poll());
+
+ assert_eq!(n, FOO.len());
+ assert_eq!(&buf[..n], FOO);
+}
diff --git a/third_party/rust/tokio/tests/fs_link.rs b/third_party/rust/tokio/tests/fs_link.rs
new file mode 100644
index 0000000000..cbbe27efe4
--- /dev/null
+++ b/third_party/rust/tokio/tests/fs_link.rs
@@ -0,0 +1,70 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::fs;
+
+use std::io::prelude::*;
+use std::io::BufReader;
+use tempfile::tempdir;
+
+#[tokio::test]
+async fn test_hard_link() {
+ let dir = tempdir().unwrap();
+ let src = dir.path().join("src.txt");
+ let dst = dir.path().join("dst.txt");
+
+ {
+ let mut file = std::fs::File::create(&src).unwrap();
+ file.write_all(b"hello").unwrap();
+ }
+
+ let dst_2 = dst.clone();
+
+ assert!(fs::hard_link(src, dst_2.clone()).await.is_ok());
+
+ let mut content = String::new();
+
+ {
+ let file = std::fs::File::open(dst).unwrap();
+ let mut reader = BufReader::new(file);
+ reader.read_to_string(&mut content).unwrap();
+ }
+
+ assert!(content == "hello");
+}
+
+#[cfg(unix)]
+#[tokio::test]
+async fn test_symlink() {
+ let dir = tempdir().unwrap();
+ let src = dir.path().join("src.txt");
+ let dst = dir.path().join("dst.txt");
+
+ {
+ let mut file = std::fs::File::create(&src).unwrap();
+ file.write_all(b"hello").unwrap();
+ }
+
+ let src_2 = src.clone();
+ let dst_2 = dst.clone();
+
+ assert!(fs::os::unix::symlink(src_2.clone(), dst_2.clone())
+ .await
+ .is_ok());
+
+ let mut content = String::new();
+
+ {
+ let file = std::fs::File::open(dst.clone()).unwrap();
+ let mut reader = BufReader::new(file);
+ reader.read_to_string(&mut content).unwrap();
+ }
+
+ assert!(content == "hello");
+
+ let read = fs::read_link(dst.clone()).await.unwrap();
+ assert!(read == src);
+
+ let symlink_meta = fs::symlink_metadata(dst.clone()).await.unwrap();
+ assert!(symlink_meta.file_type().is_symlink());
+}
diff --git a/third_party/rust/tokio/tests/io_async_read.rs b/third_party/rust/tokio/tests/io_async_read.rs
new file mode 100644
index 0000000000..20440bbde3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_async_read.rs
@@ -0,0 +1,148 @@
+#![allow(clippy::transmute_ptr_to_ptr)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncRead;
+use tokio_test::task;
+use tokio_test::{assert_ready_err, assert_ready_ok};
+
+use bytes::{BufMut, BytesMut};
+use std::io;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[test]
+fn assert_obj_safe() {
+ fn _assert<T>() {}
+ _assert::<Box<dyn AsyncRead>>();
+}
+
+#[test]
+fn read_buf_success() {
+ struct Rd;
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ buf[0..11].copy_from_slice(b"hello world");
+ Poll::Ready(Ok(11))
+ }
+ }
+
+ let mut buf = BytesMut::with_capacity(65);
+
+ task::spawn(Rd).enter(|cx, rd| {
+ let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf));
+
+ assert_eq!(11, n);
+ assert_eq!(buf[..], b"hello world"[..]);
+ });
+}
+
+#[test]
+fn read_buf_error() {
+ struct Rd;
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ let err = io::ErrorKind::Other.into();
+ Poll::Ready(Err(err))
+ }
+ }
+
+ let mut buf = BytesMut::with_capacity(65);
+
+ task::spawn(Rd).enter(|cx, rd| {
+ let err = assert_ready_err!(rd.poll_read_buf(cx, &mut buf));
+ assert_eq!(err.kind(), io::ErrorKind::Other);
+ });
+}
+
+#[test]
+fn read_buf_no_capacity() {
+ struct Rd;
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ unimplemented!();
+ }
+ }
+
+ let mut buf = [0u8; 0];
+
+ task::spawn(Rd).enter(|cx, rd| {
+ let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut &mut buf[..]));
+ assert_eq!(0, n);
+ });
+}
+
+#[test]
+fn read_buf_no_uninitialized() {
+ struct Rd;
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ for b in buf {
+ assert_eq!(0, *b);
+ }
+
+ Poll::Ready(Ok(0))
+ }
+ }
+
+ let mut buf = BytesMut::with_capacity(64);
+
+ task::spawn(Rd).enter(|cx, rd| {
+ let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf));
+ assert_eq!(0, n);
+ });
+}
+
+#[test]
+fn read_buf_uninitialized_ok() {
+ struct Rd;
+
+ impl AsyncRead for Rd {
+ unsafe fn prepare_uninitialized_buffer(&self, _: &mut [MaybeUninit<u8>]) -> bool {
+ false
+ }
+
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ assert_eq!(buf[0..11], b"hello world"[..]);
+ Poll::Ready(Ok(0))
+ }
+ }
+
+ // Can't create BytesMut w/ zero capacity, so fill it up
+ let mut buf = BytesMut::with_capacity(64);
+
+ unsafe {
+ let b: &mut [u8] = std::mem::transmute(buf.bytes_mut());
+ b[0..11].copy_from_slice(b"hello world");
+ }
+
+ task::spawn(Rd).enter(|cx, rd| {
+ let n = assert_ready_ok!(rd.poll_read_buf(cx, &mut buf));
+ assert_eq!(0, n);
+ });
+}
diff --git a/third_party/rust/tokio/tests/io_chain.rs b/third_party/rust/tokio/tests/io_chain.rs
new file mode 100644
index 0000000000..e2d59411a1
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_chain.rs
@@ -0,0 +1,16 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn chain() {
+ let mut buf = Vec::new();
+ let rd1: &[u8] = b"hello ";
+ let rd2: &[u8] = b"world";
+
+ let mut rd = rd1.chain(rd2);
+ assert_ok!(rd.read_to_end(&mut buf).await);
+ assert_eq!(buf, b"hello world");
+}
diff --git a/third_party/rust/tokio/tests/io_copy.rs b/third_party/rust/tokio/tests/io_copy.rs
new file mode 100644
index 0000000000..c1c6df4eb3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_copy.rs
@@ -0,0 +1,36 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{self, AsyncRead};
+use tokio_test::assert_ok;
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn copy() {
+ struct Rd(bool);
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.0 {
+ buf[0..11].copy_from_slice(b"hello world");
+ self.0 = false;
+ Poll::Ready(Ok(11))
+ } else {
+ Poll::Ready(Ok(0))
+ }
+ }
+ }
+
+ let mut rd = Rd(true);
+ let mut wr = Vec::new();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, 11);
+ assert_eq!(wr, b"hello world");
+}
diff --git a/third_party/rust/tokio/tests/io_driver.rs b/third_party/rust/tokio/tests/io_driver.rs
new file mode 100644
index 0000000000..b85abd8c2a
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_driver.rs
@@ -0,0 +1,88 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio::runtime;
+use tokio_test::{assert_ok, assert_pending};
+
+use futures::task::{waker_ref, ArcWake};
+use std::future::Future;
+use std::net::TcpStream;
+use std::pin::Pin;
+use std::sync::{mpsc, Arc, Mutex};
+use std::task::Context;
+
+struct Task<T> {
+ future: Mutex<Pin<Box<T>>>,
+}
+
+impl<T: Send> ArcWake for Task<T> {
+ fn wake_by_ref(_: &Arc<Self>) {
+ // Do nothing...
+ }
+}
+
+impl<T> Task<T> {
+ fn new(future: T) -> Task<T> {
+ Task {
+ future: Mutex::new(Box::pin(future)),
+ }
+ }
+}
+
+#[test]
+fn test_drop_on_notify() {
+ // When the reactor receives a kernel notification, it notifies the
+ // task that holds the associated socket. If this notification results in
+ // the task being dropped, the socket will also be dropped.
+ //
+ // Previously, there was a deadlock scenario where the reactor, while
+ // notifying, held a lock and the task being dropped attempted to acquire
+ // that same lock in order to clean up state.
+ //
+ // To simulate this case, we create a fake executor that does nothing when
+ // the task is notified. This simulates an executor in the process of
+ // shutting down. Then, when the task handle is dropped, the task itself is
+ // dropped.
+
+ let mut rt = runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap();
+
+ let (addr_tx, addr_rx) = mpsc::channel();
+
+ // Define a task that just drains the listener
+ let task = Arc::new(Task::new(async move {
+ // Create a listener
+ let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Send the address
+ let addr = listener.local_addr().unwrap();
+ addr_tx.send(addr).unwrap();
+
+ loop {
+ let _ = listener.accept().await;
+ }
+ }));
+
+ {
+ rt.enter(|| {
+ let waker = waker_ref(&task);
+ let mut cx = Context::from_waker(&waker);
+ assert_pending!(task.future.lock().unwrap().as_mut().poll(&mut cx));
+ });
+ }
+
+ // Get the address
+ let addr = addr_rx.recv().unwrap();
+
+ drop(task);
+
+ // Establish a connection to the acceptor
+ let _s = TcpStream::connect(&addr).unwrap();
+
+ // Force the reactor to turn
+ rt.block_on(async {});
+}
diff --git a/third_party/rust/tokio/tests/io_driver_drop.rs b/third_party/rust/tokio/tests/io_driver_drop.rs
new file mode 100644
index 0000000000..0a5ce62513
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_driver_drop.rs
@@ -0,0 +1,53 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+use tokio::runtime;
+use tokio_test::{assert_err, assert_pending, assert_ready, task};
+
+#[test]
+fn tcp_doesnt_block() {
+ let rt = rt();
+
+ let mut listener = rt.enter(|| {
+ let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
+ TcpListener::from_std(listener).unwrap()
+ });
+
+ drop(rt);
+
+ let mut task = task::spawn(async move {
+ assert_err!(listener.accept().await);
+ });
+
+ assert_ready!(task.poll());
+}
+
+#[test]
+fn drop_wakes() {
+ let rt = rt();
+
+ let mut listener = rt.enter(|| {
+ let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
+ TcpListener::from_std(listener).unwrap()
+ });
+
+ let mut task = task::spawn(async move {
+ assert_err!(listener.accept().await);
+ });
+
+ assert_pending!(task.poll());
+
+ drop(rt);
+
+ assert!(task.is_woken());
+ assert_ready!(task.poll());
+}
+
+fn rt() -> runtime::Runtime {
+ runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/io_lines.rs b/third_party/rust/tokio/tests/io_lines.rs
new file mode 100644
index 0000000000..2f6b3393b9
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_lines.rs
@@ -0,0 +1,35 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncBufReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn lines_inherent() {
+ let rd: &[u8] = b"hello\r\nworld\n\n";
+ let mut st = rd.lines();
+
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "hello");
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "world");
+ let b = assert_ok!(st.next_line().await).unwrap();
+ assert_eq!(b, "");
+ assert!(assert_ok!(st.next_line().await).is_none());
+}
+
+#[tokio::test]
+async fn lines_stream() {
+ use tokio::stream::StreamExt;
+
+ let rd: &[u8] = b"hello\r\nworld\n\n";
+ let mut st = rd.lines();
+
+ let b = assert_ok!(st.next().await.unwrap());
+ assert_eq!(b, "hello");
+ let b = assert_ok!(st.next().await.unwrap());
+ assert_eq!(b, "world");
+ let b = assert_ok!(st.next().await.unwrap());
+ assert_eq!(b, "");
+ assert!(st.next().await.is_none());
+}
diff --git a/third_party/rust/tokio/tests/io_read.rs b/third_party/rust/tokio/tests/io_read.rs
new file mode 100644
index 0000000000..4791c9a661
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read.rs
@@ -0,0 +1,60 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncRead, AsyncReadExt};
+use tokio_test::assert_ok;
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn read() {
+ #[derive(Default)]
+ struct Rd {
+ poll_cnt: usize,
+ }
+
+ impl AsyncRead for Rd {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ assert_eq!(0, self.poll_cnt);
+ self.poll_cnt += 1;
+
+ buf[0..11].copy_from_slice(b"hello world");
+ Poll::Ready(Ok(11))
+ }
+ }
+
+ let mut buf = Box::new([0; 11]);
+ let mut rd = Rd::default();
+
+ let n = assert_ok!(rd.read(&mut buf[..]).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], b"hello world"[..]);
+}
+
+struct BadAsyncRead;
+
+impl AsyncRead for BadAsyncRead {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ for b in &mut *buf {
+ *b = b'a';
+ }
+ Poll::Ready(Ok(buf.len() * 2))
+ }
+}
+
+#[tokio::test]
+#[should_panic]
+async fn read_buf_bad_async_read() {
+ let mut buf = Vec::with_capacity(10);
+ BadAsyncRead.read_buf(&mut buf).await.unwrap();
+}
diff --git a/third_party/rust/tokio/tests/io_read_exact.rs b/third_party/rust/tokio/tests/io_read_exact.rs
new file mode 100644
index 0000000000..d0e659bd33
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_exact.rs
@@ -0,0 +1,15 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_exact() {
+ let mut buf = Box::new([0; 8]);
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_exact(&mut buf[..]).await);
+ assert_eq!(n, 8);
+ assert_eq!(buf[..], b"hello wo"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_read_line.rs b/third_party/rust/tokio/tests/io_read_line.rs
new file mode 100644
index 0000000000..57ae37cef3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_line.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncBufReadExt;
+use tokio_test::assert_ok;
+
+use std::io::Cursor;
+
+#[tokio::test]
+async fn read_line() {
+ let mut buf = String::new();
+ let mut rd = Cursor::new(b"hello\nworld\n\n");
+
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, "hello\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, "world\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 1);
+ assert_eq!(buf, "\n");
+ buf.clear();
+ let n = assert_ok!(rd.read_line(&mut buf).await);
+ assert_eq!(n, 0);
+ assert_eq!(buf, "");
+}
diff --git a/third_party/rust/tokio/tests/io_read_to_end.rs b/third_party/rust/tokio/tests/io_read_to_end.rs
new file mode 100644
index 0000000000..ee636ba596
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_to_end.rs
@@ -0,0 +1,15 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_to_end() {
+ let mut buf = vec![];
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_to_end(&mut buf).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], b"hello world"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_read_to_string.rs b/third_party/rust/tokio/tests/io_read_to_string.rs
new file mode 100644
index 0000000000..6b384b8910
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_to_string.rs
@@ -0,0 +1,15 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_to_string() {
+ let mut buf = String::new();
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_to_string(&mut buf).await);
+ assert_eq!(n, 11);
+ assert_eq!(buf[..], "hello world"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_read_until.rs b/third_party/rust/tokio/tests/io_read_until.rs
new file mode 100644
index 0000000000..4e0e0d10d3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_read_until.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncBufReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn read_until() {
+ let mut buf = vec![];
+ let mut rd: &[u8] = b"hello world";
+
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 6);
+ assert_eq!(buf, b"hello ");
+ buf.clear();
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 5);
+ assert_eq!(buf, b"world");
+ buf.clear();
+ let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
+ assert_eq!(n, 0);
+ assert_eq!(buf, []);
+}
diff --git a/third_party/rust/tokio/tests/io_split.rs b/third_party/rust/tokio/tests/io_split.rs
new file mode 100644
index 0000000000..e54bf24852
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_split.rs
@@ -0,0 +1,78 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{split, AsyncRead, AsyncWrite, ReadHalf, WriteHalf};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+struct RW;
+
+impl AsyncRead for RW {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(1))
+ }
+}
+
+impl AsyncWrite for RW {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ Poll::Ready(Ok(1))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[test]
+fn is_send_and_sync() {
+ fn assert_bound<T: Send + Sync>() {}
+
+ assert_bound::<ReadHalf<RW>>();
+ assert_bound::<WriteHalf<RW>>();
+}
+
+#[test]
+fn split_stream_id() {
+ let (r1, w1) = split(RW);
+ let (r2, w2) = split(RW);
+ assert_eq!(r1.is_pair_of(&w1), true);
+ assert_eq!(r1.is_pair_of(&w2), false);
+ assert_eq!(r2.is_pair_of(&w2), true);
+ assert_eq!(r2.is_pair_of(&w1), false);
+}
+
+#[test]
+fn unsplit_ok() {
+ let (r, w) = split(RW);
+ r.unsplit(w);
+}
+
+#[test]
+#[should_panic]
+fn unsplit_err1() {
+ let (r, _) = split(RW);
+ let (_, w) = split(RW);
+ r.unsplit(w);
+}
+
+#[test]
+#[should_panic]
+fn unsplit_err2() {
+ let (_, w) = split(RW);
+ let (r, _) = split(RW);
+ r.unsplit(w);
+}
diff --git a/third_party/rust/tokio/tests/io_take.rs b/third_party/rust/tokio/tests/io_take.rs
new file mode 100644
index 0000000000..683606f727
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_take.rs
@@ -0,0 +1,16 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn take() {
+ let mut buf = [0; 6];
+ let rd: &[u8] = b"hello world";
+
+ let mut rd = rd.take(4);
+ let n = assert_ok!(rd.read(&mut buf).await);
+ assert_eq!(n, 4);
+ assert_eq!(&buf, &b"hell\0\0"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_write.rs b/third_party/rust/tokio/tests/io_write.rs
new file mode 100644
index 0000000000..96cebc3313
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write.rs
@@ -0,0 +1,58 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::assert_ok;
+
+use bytes::BytesMut;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ assert_eq!(self.cnt, 0);
+ self.buf.extend(&buf[0..4]);
+ Ok(4).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ let n = assert_ok!(wr.write(b"hello world").await);
+ assert_eq!(n, 4);
+ assert_eq!(wr.buf, b"hell"[..]);
+}
+
+#[tokio::test]
+async fn write_cursor() {
+ use std::io::Cursor;
+
+ let mut wr = Cursor::new(Vec::new());
+
+ let n = assert_ok!(wr.write(b"hello world").await);
+ assert_eq!(n, 11);
+ assert_eq!(wr.get_ref().as_slice(), &b"hello world"[..]);
+}
diff --git a/third_party/rust/tokio/tests/io_write_all.rs b/third_party/rust/tokio/tests/io_write_all.rs
new file mode 100644
index 0000000000..7ca02228a3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_all.rs
@@ -0,0 +1,51 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+use tokio_test::assert_ok;
+
+use bytes::BytesMut;
+use std::cmp;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_all() {
+ struct Wr {
+ buf: BytesMut,
+ cnt: usize,
+ }
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ let n = cmp::min(4, buf.len());
+ let buf = &buf[0..n];
+
+ self.cnt += 1;
+ self.buf.extend(buf);
+ Ok(buf.len()).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {
+ buf: BytesMut::with_capacity(64),
+ cnt: 0,
+ };
+
+ assert_ok!(wr.write_all(b"hello world").await);
+ assert_eq!(wr.buf, b"hello world"[..]);
+ assert_eq!(wr.cnt, 3);
+}
diff --git a/third_party/rust/tokio/tests/io_write_int.rs b/third_party/rust/tokio/tests/io_write_int.rs
new file mode 100644
index 0000000000..48a583d8c3
--- /dev/null
+++ b/third_party/rust/tokio/tests/io_write_int.rs
@@ -0,0 +1,37 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncWrite, AsyncWriteExt};
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+#[tokio::test]
+async fn write_int_should_err_if_write_count_0() {
+ struct Wr {}
+
+ impl AsyncWrite for Wr {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ _buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Ok(0).into()
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+
+ fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Ok(()).into()
+ }
+ }
+
+ let mut wr = Wr {};
+
+ // should be ok just to test these 2, other cases actually expanded by same macro.
+ assert!(wr.write_i8(0).await.is_err());
+ assert!(wr.write_i32(12).await.is_err());
+}
diff --git a/third_party/rust/tokio/tests/macros_join.rs b/third_party/rust/tokio/tests/macros_join.rs
new file mode 100644
index 0000000000..d9b748d9a7
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_join.rs
@@ -0,0 +1,71 @@
+use tokio::sync::oneshot;
+use tokio_test::{assert_pending, assert_ready, task};
+
+#[tokio::test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::join!(async { 1 },);
+
+ assert_eq!(foo, (1,));
+}
+
+#[tokio::test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::join!(async { 1 });
+
+ assert_eq!(foo, (1,));
+}
+
+#[tokio::test]
+async fn sync_two_lit_expr_comma() {
+ let foo = tokio::join!(async { 1 }, async { 2 },);
+
+ assert_eq!(foo, (1, 2));
+}
+
+#[tokio::test]
+async fn sync_two_lit_expr_no_comma() {
+ let foo = tokio::join!(async { 1 }, async { 2 });
+
+ assert_eq!(foo, (1, 2));
+}
+
+#[tokio::test]
+async fn two_await() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+
+ let mut join = task::spawn(async {
+ tokio::join!(async { rx1.await.unwrap() }, async { rx2.await.unwrap() })
+ });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ tx1.send("hello").unwrap();
+ assert!(join.is_woken());
+ let res = assert_ready!(join.poll());
+
+ assert_eq!(("hello", 123), res);
+}
+
+#[test]
+fn join_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(0i32);
+ tokio::join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+ tokio::join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
diff --git a/third_party/rust/tokio/tests/macros_pin.rs b/third_party/rust/tokio/tests/macros_pin.rs
new file mode 100644
index 0000000000..da6e0be6ed
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_pin.rs
@@ -0,0 +1,13 @@
+async fn one() {}
+async fn two() {}
+
+#[tokio::test]
+async fn multi_pin() {
+ tokio::pin! {
+ let f1 = one();
+ let f2 = two();
+ }
+
+ (&mut f1).await;
+ (&mut f2).await;
+}
diff --git a/third_party/rust/tokio/tests/macros_select.rs b/third_party/rust/tokio/tests/macros_select.rs
new file mode 100644
index 0000000000..c08e816a01
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_select.rs
@@ -0,0 +1,447 @@
+use tokio::sync::{mpsc, oneshot};
+use tokio::task;
+use tokio_test::{assert_ok, assert_pending, assert_ready};
+
+use futures::future::poll_fn;
+use std::task::Poll::Ready;
+
+#[tokio::test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::select! {
+ foo = async { 1 } => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn nested_one() {
+ let foo = tokio::select! {
+ foo = async { 1 } => tokio::select! {
+ bar = async { foo } => bar,
+ },
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::select! {
+ foo = async { 1 } => foo
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn sync_one_lit_expr_block() {
+ let foo = tokio::select! {
+ foo = async { 1 } => { foo }
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn sync_one_await() {
+ let foo = tokio::select! {
+ foo = one() => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn sync_one_ident() {
+ let one = one();
+
+ let foo = tokio::select! {
+ foo = one => foo,
+ };
+
+ assert_eq!(foo, 1);
+}
+
+#[tokio::test]
+async fn sync_two() {
+ use std::cell::Cell;
+
+ let cnt = Cell::new(0);
+
+ let res = tokio::select! {
+ foo = async {
+ cnt.set(cnt.get() + 1);
+ 1
+ } => foo,
+ bar = async {
+ cnt.set(cnt.get() + 1);
+ 2
+ } => bar,
+ };
+
+ assert_eq!(1, cnt.get());
+ assert!(res == 1 || res == 2);
+}
+
+#[tokio::test]
+async fn drop_in_fut() {
+ let s = "hello".to_string();
+
+ let res = tokio::select! {
+ foo = async {
+ let v = one().await;
+ drop(s);
+ v
+ } => foo
+ };
+
+ assert_eq!(res, 1);
+}
+
+#[tokio::test]
+async fn one_ready() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (_tx2, rx2) = oneshot::channel::<i32>();
+
+ tx1.send(1).unwrap();
+
+ let v = tokio::select! {
+ res = rx1 => {
+ assert_ok!(res)
+ },
+ _ = rx2 => unreachable!(),
+ };
+
+ assert_eq!(1, v);
+}
+
+#[tokio::test]
+async fn select_streams() {
+ let (tx1, mut rx1) = mpsc::unbounded_channel::<i32>();
+ let (tx2, mut rx2) = mpsc::unbounded_channel::<i32>();
+
+ tokio::spawn(async move {
+ assert_ok!(tx2.send(1));
+ task::yield_now().await;
+
+ assert_ok!(tx1.send(2));
+ task::yield_now().await;
+
+ assert_ok!(tx2.send(3));
+ task::yield_now().await;
+
+ drop((tx1, tx2));
+ });
+
+ let mut rem = true;
+ let mut msgs = vec![];
+
+ while rem {
+ tokio::select! {
+ Some(x) = rx1.recv() => {
+ msgs.push(x);
+ }
+ Some(y) = rx2.recv() => {
+ msgs.push(y);
+ }
+ else => {
+ rem = false;
+ }
+ }
+ }
+
+ msgs.sort();
+ assert_eq!(&msgs[..], &[1, 2, 3]);
+}
+
+#[tokio::test]
+async fn move_uncompleted_futures() {
+ let (tx1, mut rx1) = oneshot::channel::<i32>();
+ let (tx2, mut rx2) = oneshot::channel::<i32>();
+
+ tx1.send(1).unwrap();
+ tx2.send(2).unwrap();
+
+ let ran;
+
+ tokio::select! {
+ res = &mut rx1 => {
+ assert_eq!(1, assert_ok!(res));
+ assert_eq!(2, assert_ok!(rx2.await));
+ ran = true;
+ },
+ res = &mut rx2 => {
+ assert_eq!(2, assert_ok!(res));
+ assert_eq!(1, assert_ok!(rx1.await));
+ ran = true;
+ },
+ }
+
+ assert!(ran);
+}
+
+#[tokio::test]
+async fn nested() {
+ let res = tokio::select! {
+ x = async { 1 } => {
+ tokio::select! {
+ y = async { 2 } => x + y,
+ }
+ }
+ };
+
+ assert_eq!(res, 3);
+}
+
+#[tokio::test]
+async fn struct_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 32);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready1 => {},
+ _ = ready2 => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 40);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+ let ready3 = future::ready(0i32);
+
+ tokio::select! {
+ _ = ready1 => {},
+ _ = ready2 => {},
+ _ = ready3 => {},
+ }
+ };
+
+ assert!(mem::size_of_val(&fut) <= 48);
+}
+
+#[tokio::test]
+async fn mutable_borrowing_future_with_same_borrow_in_block() {
+ let mut value = 234;
+
+ tokio::select! {
+ _ = require_mutable(&mut value) => { },
+ _ = async_noop() => {
+ value += 5;
+ },
+ }
+
+ assert!(value >= 234);
+}
+
+#[tokio::test]
+async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() {
+ let mut value = 234;
+
+ tokio::select! {
+ _ = require_mutable(&mut value) => { },
+ _ = async_noop() => {
+ value += 5;
+ },
+ else => {
+ value += 27;
+ },
+ }
+
+ assert!(value >= 234);
+}
+
+#[tokio::test]
+async fn future_panics_after_poll() {
+ use tokio_test::task;
+
+ let (tx, rx) = oneshot::channel();
+
+ let mut polled = false;
+
+ let f = poll_fn(|_| {
+ assert!(!polled);
+ polled = true;
+ Ready(None::<()>)
+ });
+
+ let mut f = task::spawn(async {
+ tokio::select! {
+ Some(_) = f => unreachable!(),
+ ret = rx => ret.unwrap(),
+ }
+ });
+
+ assert_pending!(f.poll());
+ assert_pending!(f.poll());
+
+ assert_ok!(tx.send(1));
+
+ let res = assert_ready!(f.poll());
+ assert_eq!(1, res);
+}
+
+#[tokio::test]
+async fn disable_with_if() {
+ use tokio_test::task;
+
+ let f = poll_fn(|_| panic!());
+ let (tx, rx) = oneshot::channel();
+
+ let mut f = task::spawn(async {
+ tokio::select! {
+ _ = f, if false => unreachable!(),
+ _ = rx => (),
+ }
+ });
+
+ assert_pending!(f.poll());
+
+ assert_ok!(tx.send(()));
+ assert!(f.is_woken());
+
+ assert_ready!(f.poll());
+}
+
+#[tokio::test]
+async fn join_with_select() {
+ use tokio_test::task;
+
+ let (tx1, mut rx1) = oneshot::channel();
+ let (tx2, mut rx2) = oneshot::channel();
+
+ let mut f = task::spawn(async {
+ let mut a = None;
+ let mut b = None;
+
+ while a.is_none() || b.is_none() {
+ tokio::select! {
+ v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)),
+ v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2))
+ }
+ }
+
+ (a.unwrap(), b.unwrap())
+ });
+
+ assert_pending!(f.poll());
+
+ assert_ok!(tx1.send(123));
+ assert!(f.is_woken());
+ assert_pending!(f.poll());
+
+ assert_ok!(tx2.send(456));
+ assert!(f.is_woken());
+ let (a, b) = assert_ready!(f.poll());
+
+ assert_eq!(a, 123);
+ assert_eq!(b, 456);
+}
+
+#[tokio::test]
+async fn use_future_in_if_condition() {
+ use tokio::time::{self, Duration};
+
+ let mut delay = time::delay_for(Duration::from_millis(50));
+
+ tokio::select! {
+ _ = &mut delay, if !delay.is_elapsed() => {
+ }
+ _ = async { 1 } => {
+ }
+ }
+}
+
+#[tokio::test]
+async fn many_branches() {
+ let num = tokio::select! {
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ x = async { 1 } => x,
+ };
+
+ assert_eq!(1, num);
+}
+
+async fn one() -> usize {
+ 1
+}
+
+async fn require_mutable(_: &mut i32) {}
+async fn async_noop() {}
diff --git a/third_party/rust/tokio/tests/macros_try_join.rs b/third_party/rust/tokio/tests/macros_try_join.rs
new file mode 100644
index 0000000000..faa55421a2
--- /dev/null
+++ b/third_party/rust/tokio/tests/macros_try_join.rs
@@ -0,0 +1,100 @@
+use tokio::sync::oneshot;
+use tokio_test::{assert_pending, assert_ready, task};
+
+#[tokio::test]
+async fn sync_one_lit_expr_comma() {
+ let foo = tokio::try_join!(async { ok(1) },);
+
+ assert_eq!(foo, Ok((1,)));
+}
+
+#[tokio::test]
+async fn sync_one_lit_expr_no_comma() {
+ let foo = tokio::try_join!(async { ok(1) });
+
+ assert_eq!(foo, Ok((1,)));
+}
+
+#[tokio::test]
+async fn sync_two_lit_expr_comma() {
+ let foo = tokio::try_join!(async { ok(1) }, async { ok(2) },);
+
+ assert_eq!(foo, Ok((1, 2)));
+}
+
+#[tokio::test]
+async fn sync_two_lit_expr_no_comma() {
+ let foo = tokio::try_join!(async { ok(1) }, async { ok(2) });
+
+ assert_eq!(foo, Ok((1, 2)));
+}
+
+#[tokio::test]
+async fn two_await() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+
+ let mut join =
+ task::spawn(async { tokio::try_join!(async { rx1.await }, async { rx2.await }) });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ tx1.send("hello").unwrap();
+ assert!(join.is_woken());
+ let res: Result<(&str, u32), _> = assert_ready!(join.poll());
+
+ assert_eq!(Ok(("hello", 123)), res);
+}
+
+#[tokio::test]
+async fn err_abort_early() {
+ let (tx1, rx1) = oneshot::channel::<&str>();
+ let (tx2, rx2) = oneshot::channel::<u32>();
+ let (_tx3, rx3) = oneshot::channel::<u32>();
+
+ let mut join = task::spawn(async {
+ tokio::try_join!(async { rx1.await }, async { rx2.await }, async {
+ rx3.await
+ })
+ });
+
+ assert_pending!(join.poll());
+
+ tx2.send(123).unwrap();
+ assert!(join.is_woken());
+ assert_pending!(join.poll());
+
+ drop(tx1);
+ assert!(join.is_woken());
+
+ let res = assert_ready!(join.poll());
+
+ assert!(res.is_err());
+}
+
+#[test]
+fn join_size() {
+ use futures::future;
+ use std::mem;
+
+ let fut = async {
+ let ready = future::ready(ok(0i32));
+ tokio::try_join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(ok(0i32));
+ let ready2 = future::ready(ok(0i32));
+ tokio::try_join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
+
+fn ok<T>(val: T) -> Result<T, ()> {
+ Ok(val)
+}
diff --git a/third_party/rust/tokio/tests/net_bind_resource.rs b/third_party/rust/tokio/tests/net_bind_resource.rs
new file mode 100644
index 0000000000..d4a0b8dab0
--- /dev/null
+++ b/third_party/rust/tokio/tests/net_bind_resource.rs
@@ -0,0 +1,14 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::TcpListener;
+
+use std::convert::TryFrom;
+use std::net;
+
+#[test]
+#[should_panic]
+fn no_runtime_panics_binding_net_tcp_listener() {
+ let listener = net::TcpListener::bind("127.0.0.1:0").expect("failed to bind listener");
+ let _ = TcpListener::try_from(listener);
+}
diff --git a/third_party/rust/tokio/tests/net_lookup_host.rs b/third_party/rust/tokio/tests/net_lookup_host.rs
new file mode 100644
index 0000000000..4d06402988
--- /dev/null
+++ b/third_party/rust/tokio/tests/net_lookup_host.rs
@@ -0,0 +1,36 @@
+use tokio::net;
+use tokio_test::assert_ok;
+
+use std::io;
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
+
+#[tokio::test]
+async fn lookup_socket_addr() {
+ let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
+
+ let actual = assert_ok!(net::lookup_host(addr).await).collect::<Vec<_>>();
+ assert_eq!(vec![addr], actual);
+}
+
+#[tokio::test]
+async fn lookup_str_socket_addr() {
+ let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
+
+ let actual = assert_ok!(net::lookup_host("127.0.0.1:8000").await).collect::<Vec<_>>();
+ assert_eq!(vec![addr], actual);
+}
+
+#[tokio::test]
+async fn resolve_dns() -> io::Result<()> {
+ let mut hosts = net::lookup_host("localhost:3000").await?;
+ let host = hosts.next().unwrap();
+
+ let expected = if host.is_ipv4() {
+ SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000)
+ } else {
+ SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 3000)
+ };
+ assert_eq!(host, expected);
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/no_rt.rs b/third_party/rust/tokio/tests/no_rt.rs
new file mode 100644
index 0000000000..962eed7952
--- /dev/null
+++ b/third_party/rust/tokio/tests/no_rt.rs
@@ -0,0 +1,27 @@
+use tokio::net::TcpStream;
+use tokio::sync::oneshot;
+use tokio::time::{timeout, Duration};
+
+use futures::executor::block_on;
+
+use std::net::TcpListener;
+
+#[test]
+#[should_panic(expected = "no timer running")]
+fn panics_when_no_timer() {
+ block_on(timeout_value());
+}
+
+#[test]
+#[should_panic(expected = "no reactor running")]
+fn panics_when_no_reactor() {
+ let srv = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = srv.local_addr().unwrap();
+ block_on(TcpStream::connect(&addr)).unwrap();
+}
+
+async fn timeout_value() {
+ let (_tx, rx) = oneshot::channel::<()>();
+ let dur = Duration::from_millis(20);
+ let _ = timeout(dur, rx).await;
+}
diff --git a/third_party/rust/tokio/tests/process_issue_2174.rs b/third_party/rust/tokio/tests/process_issue_2174.rs
new file mode 100644
index 0000000000..b5a63ceee8
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_issue_2174.rs
@@ -0,0 +1,46 @@
+#![cfg(feature = "process")]
+#![warn(rust_2018_idioms)]
+// This test reveals a difference in behavior of kqueue on FreeBSD. When the
+// reader disconnects, there does not seem to be an `EVFILT_WRITE` filter that
+// is returned.
+//
+// It is expected that `EVFILT_WRITE` would be returned with either the
+// `EV_EOF` or `EV_ERROR` flag set. If either flag is set a write would be
+// attempted, but that does not seem to occur.
+#![cfg(all(unix, not(target_os = "freebsd")))]
+
+use std::process::Stdio;
+use std::time::Duration;
+use tokio::prelude::*;
+use tokio::process::Command;
+use tokio::time;
+use tokio_test::assert_err;
+
+#[tokio::test]
+async fn issue_2174() {
+ let mut child = Command::new("sleep")
+ .arg("2")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::null())
+ .spawn()
+ .unwrap();
+ let mut input = child.stdin.take().unwrap();
+
+ // Writes will buffer up to 65_636. This *should* loop at least 8 times
+ // and then register interest.
+ let handle = tokio::spawn(async move {
+ let data = [0u8; 8192];
+ loop {
+ input.write_all(&data).await.unwrap();
+ }
+ });
+
+ // Sleep enough time so that the child process's stdin's buffer fills.
+ time::delay_for(Duration::from_secs(1)).await;
+
+ // Kill the child process.
+ child.kill().unwrap();
+ let _ = child.await;
+
+ assert_err!(handle.await);
+}
diff --git a/third_party/rust/tokio/tests/process_issue_42.rs b/third_party/rust/tokio/tests/process_issue_42.rs
new file mode 100644
index 0000000000..aa70af3b56
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_issue_42.rs
@@ -0,0 +1,36 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use futures::future::join_all;
+use std::process::Stdio;
+use tokio::process::Command;
+use tokio::task;
+
+#[tokio::test]
+async fn issue_42() {
+ // We spawn a many batches of processes which should exit at roughly the
+ // same time (modulo OS scheduling delays), to make sure that consuming
+ // a readiness event for one process doesn't inadvertently starve another.
+ // We then do this many times (in parallel) in an effort to stress test the
+ // implementation to ensure there are no race conditions.
+ // See alexcrichton/tokio-process#42 for background
+ let join_handles = (0..10usize).map(|_| {
+ task::spawn(async {
+ let processes = (0..10usize).map(|i| {
+ Command::new("echo")
+ .arg(format!("I am spawned process #{}", i))
+ .stdin(Stdio::null())
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .kill_on_drop(true)
+ .spawn()
+ .unwrap()
+ });
+
+ join_all(processes).await;
+ })
+ });
+
+ join_all(join_handles).await;
+}
diff --git a/third_party/rust/tokio/tests/process_kill_on_drop.rs b/third_party/rust/tokio/tests/process_kill_on_drop.rs
new file mode 100644
index 0000000000..f376c15475
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_kill_on_drop.rs
@@ -0,0 +1,42 @@
+#![cfg(all(unix, feature = "process"))]
+#![warn(rust_2018_idioms)]
+
+use std::process::Stdio;
+use std::time::Duration;
+use tokio::io::AsyncReadExt;
+use tokio::process::Command;
+use tokio::time::delay_for;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn kill_on_drop() {
+ let mut cmd = Command::new("sh");
+ cmd.args(&[
+ "-c",
+ "
+ # Fork another child that won't get killed
+ sh -c 'sleep 1; echo child ran' &
+ disown -a
+
+ # Await our death
+ sleep 5
+ echo hello from beyond the grave
+ ",
+ ]);
+
+ let mut child = cmd
+ .kill_on_drop(true)
+ .stdout(Stdio::piped())
+ .spawn()
+ .unwrap();
+
+ delay_for(Duration::from_secs(2)).await;
+
+ let mut out = child.stdout.take().unwrap();
+ drop(child);
+
+ let mut msg = String::new();
+ assert_ok!(out.read_to_string(&mut msg).await);
+
+ assert_eq!("child ran\n", msg);
+}
diff --git a/third_party/rust/tokio/tests/process_smoke.rs b/third_party/rust/tokio/tests/process_smoke.rs
new file mode 100644
index 0000000000..d16d1d72c1
--- /dev/null
+++ b/third_party/rust/tokio/tests/process_smoke.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::process::Command;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn simple() {
+ let mut cmd;
+
+ if cfg!(windows) {
+ cmd = Command::new("cmd");
+ cmd.arg("/c");
+ } else {
+ cmd = Command::new("sh");
+ cmd.arg("-c");
+ }
+
+ let mut child = cmd.arg("exit 2").spawn().unwrap();
+
+ let id = child.id();
+ assert!(id > 0);
+
+ let status = assert_ok!((&mut child).await);
+ assert_eq!(status.code(), Some(2));
+
+ assert_eq!(child.id(), id);
+ drop(child.kill());
+}
diff --git a/third_party/rust/tokio/tests/rt_basic.rs b/third_party/rust/tokio/tests/rt_basic.rs
new file mode 100644
index 0000000000..b9e373b88f
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_basic.rs
@@ -0,0 +1,135 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::runtime::Runtime;
+use tokio::sync::{mpsc, oneshot};
+use tokio_test::{assert_err, assert_ok};
+
+use std::thread;
+use std::time::Duration;
+
+#[test]
+fn spawned_task_does_not_progress_without_block_on() {
+ let (tx, mut rx) = oneshot::channel();
+
+ let mut rt = rt();
+
+ rt.spawn(async move {
+ assert_ok!(tx.send("hello"));
+ });
+
+ thread::sleep(Duration::from_millis(50));
+
+ assert_err!(rx.try_recv());
+
+ let out = rt.block_on(async { assert_ok!(rx.await) });
+
+ assert_eq!(out, "hello");
+}
+
+#[test]
+fn no_extra_poll() {
+ use std::pin::Pin;
+ use std::sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Arc,
+ };
+ use std::task::{Context, Poll};
+ use tokio::stream::{Stream, StreamExt};
+
+ struct TrackPolls<S> {
+ npolls: Arc<AtomicUsize>,
+ s: S,
+ }
+
+ impl<S> Stream for TrackPolls<S>
+ where
+ S: Stream,
+ {
+ type Item = S::Item;
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ // safety: we do not move s
+ let this = unsafe { self.get_unchecked_mut() };
+ this.npolls.fetch_add(1, SeqCst);
+ // safety: we are pinned, and so is s
+ unsafe { Pin::new_unchecked(&mut this.s) }.poll_next(cx)
+ }
+ }
+
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut rx = TrackPolls {
+ npolls: Arc::new(AtomicUsize::new(0)),
+ s: rx,
+ };
+ let npolls = Arc::clone(&rx.npolls);
+
+ let mut rt = rt();
+
+ rt.spawn(async move { while let Some(_) = rx.next().await {} });
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled exactly once: the initial poll
+ assert_eq!(npolls.load(SeqCst), 1);
+
+ tx.send(()).unwrap();
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled twice more: once to yield Some(), then once to yield Pending
+ assert_eq!(npolls.load(SeqCst), 1 + 2);
+
+ drop(tx);
+ rt.block_on(async {
+ tokio::task::yield_now().await;
+ });
+
+ // should have been polled once more: to yield None
+ assert_eq!(npolls.load(SeqCst), 1 + 2 + 1);
+}
+
+#[test]
+fn acquire_mutex_in_drop() {
+ use futures::future::pending;
+ use tokio::task;
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ let mut rt = rt();
+
+ rt.spawn(async move {
+ let _ = rx2.await;
+ unreachable!();
+ });
+
+ rt.spawn(async move {
+ let _ = rx1.await;
+ tx2.send(()).unwrap();
+ unreachable!();
+ });
+
+ // Spawn a task that will never notify
+ rt.spawn(async move {
+ pending::<()>().await;
+ tx1.send(()).unwrap();
+ });
+
+ // Tick the loop
+ rt.block_on(async {
+ task::yield_now().await;
+ });
+
+ // Drop the rt
+ drop(rt);
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/rt_common.rs b/third_party/rust/tokio/tests/rt_common.rs
new file mode 100644
index 0000000000..8dc0da3c5a
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_common.rs
@@ -0,0 +1,1009 @@
+#![allow(clippy::needless_range_loop)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// Tests to run on both current-thread & therad-pool runtime variants.
+
+macro_rules! rt_test {
+ ($($t:tt)*) => {
+ mod basic_scheduler {
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_4_threads {
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .threaded_scheduler()
+ .core_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_1_thread {
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .threaded_scheduler()
+ .core_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+ }
+}
+
+#[test]
+fn send_sync_bound() {
+ use tokio::runtime::Runtime;
+ fn is_send<T: Send + Sync>() {}
+
+ is_send::<Runtime>();
+}
+
+rt_test! {
+ use tokio::net::{TcpListener, TcpStream, UdpSocket};
+ use tokio::prelude::*;
+ use tokio::runtime::Runtime;
+ use tokio::sync::oneshot;
+ use tokio::{task, time};
+ use tokio_test::{assert_err, assert_ok};
+
+ use futures::future::poll_fn;
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::sync::{mpsc, Arc};
+ use std::task::{Context, Poll};
+ use std::thread;
+ use std::time::{Duration, Instant};
+
+ #[test]
+ fn block_on_sync() {
+ let mut rt = rt();
+
+ let mut win = false;
+ rt.block_on(async {
+ win = true;
+ });
+
+ assert!(win);
+ }
+
+ #[test]
+ fn block_on_async() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ tx.send("ZOMG").unwrap();
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_one_bg() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ });
+
+ assert_ok!(rx.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_one_join() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ let handle = tokio::spawn(async move {
+ tx.send("ZOMG").unwrap();
+ "DONE"
+ });
+
+ let msg = assert_ok!(rx.await);
+
+ let out = assert_ok!(handle.await);
+ assert_eq!(out, "DONE");
+
+ msg
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_two() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ tokio::spawn(async move {
+ assert_ok!(tx1.send("ZOMG"));
+ });
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx1.await);
+ assert_ok!(tx2.send(msg));
+ });
+
+ assert_ok!(rx2.await)
+ });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn spawn_many_from_block_on() {
+ use tokio::sync::mpsc;
+
+ const ITER: usize = 200;
+
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ let (done_tx, mut done_rx) = mpsc::unbounded_channel();
+
+ let mut txs = (0..ITER)
+ .map(|i| {
+ let (tx, rx) = oneshot::channel();
+ let done_tx = done_tx.clone();
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx.await);
+ assert_eq!(i, msg);
+ assert_ok!(done_tx.send(msg));
+ });
+
+ tx
+ })
+ .collect::<Vec<_>>();
+
+ drop(done_tx);
+
+ thread::spawn(move || {
+ for (i, tx) in txs.drain(..).enumerate() {
+ assert_ok!(tx.send(i));
+ }
+ });
+
+ let mut out = vec![];
+ while let Some(i) = done_rx.recv().await {
+ out.push(i);
+ }
+
+ out.sort();
+ out
+ });
+
+ assert_eq!(ITER, out.len());
+
+ for i in 0..ITER {
+ assert_eq!(i, out[i]);
+ }
+ }
+
+ #[test]
+ fn spawn_many_from_task() {
+ use tokio::sync::mpsc;
+
+ const ITER: usize = 500;
+
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ tokio::spawn(async move {
+ let (done_tx, mut done_rx) = mpsc::unbounded_channel();
+
+ /*
+ for _ in 0..100 {
+ tokio::spawn(async move { });
+ }
+
+ tokio::task::yield_now().await;
+ */
+
+ let mut txs = (0..ITER)
+ .map(|i| {
+ let (tx, rx) = oneshot::channel();
+ let done_tx = done_tx.clone();
+
+ tokio::spawn(async move {
+ let msg = assert_ok!(rx.await);
+ assert_eq!(i, msg);
+ assert_ok!(done_tx.send(msg));
+ });
+
+ tx
+ })
+ .collect::<Vec<_>>();
+
+ drop(done_tx);
+
+ thread::spawn(move || {
+ for (i, tx) in txs.drain(..).enumerate() {
+ assert_ok!(tx.send(i));
+ }
+ });
+
+ let mut out = vec![];
+ while let Some(i) = done_rx.recv().await {
+ out.push(i);
+ }
+
+ out.sort();
+ out
+ }).await.unwrap()
+ });
+
+ assert_eq!(ITER, out.len());
+
+ for i in 0..ITER {
+ assert_eq!(i, out[i]);
+ }
+ }
+
+ #[test]
+ fn spawn_await_chain() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async {
+ assert_ok!(tokio::spawn(async {
+ assert_ok!(tokio::spawn(async {
+ "hello"
+ }).await)
+ }).await)
+ });
+
+ assert_eq!(out, "hello");
+ }
+
+ #[test]
+ fn outstanding_tasks_dropped() {
+ let mut rt = rt();
+
+ let cnt = Arc::new(());
+
+ rt.block_on(async {
+ let cnt = cnt.clone();
+
+ tokio::spawn(poll_fn(move |_| {
+ assert_eq!(2, Arc::strong_count(&cnt));
+ Poll::<()>::Pending
+ }));
+ });
+
+ assert_eq!(2, Arc::strong_count(&cnt));
+
+ drop(rt);
+
+ assert_eq!(1, Arc::strong_count(&cnt));
+ }
+
+ #[test]
+ #[should_panic]
+ fn nested_rt() {
+ let mut rt1 = rt();
+ let mut rt2 = rt();
+
+ rt1.block_on(async { rt2.block_on(async { "hello" }) });
+ }
+
+ #[test]
+ fn create_rt_in_block_on() {
+ let mut rt1 = rt();
+ let mut rt2 = rt1.block_on(async { rt() });
+ let out = rt2.block_on(async { "ZOMG" });
+
+ assert_eq!(out, "ZOMG");
+ }
+
+ #[test]
+ fn complete_block_on_under_load() {
+ let mut rt = rt();
+
+ rt.block_on(async {
+ let (tx, rx) = oneshot::channel();
+
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn complete_task_under_load() {
+ let mut rt = rt();
+
+ rt.block_on(async {
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+ assert_ok!(tx1.send(()));
+ });
+
+ tokio::spawn(async move {
+ assert_ok!(rx1.await);
+ assert_ok!(tx2.send(()));
+ });
+
+ assert_ok!(rx2.await);
+ });
+ }
+
+ #[test]
+ fn spawn_from_other_thread_idle() {
+ let mut rt = rt();
+ let handle = rt.handle().clone();
+
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(50));
+
+ handle.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+ });
+
+ rt.block_on(async move {
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn spawn_from_other_thread_under_load() {
+ let mut rt = rt();
+ let handle = rt.handle().clone();
+
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ handle.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+ });
+
+ rt.block_on(async move {
+ // Spin hard
+ tokio::spawn(async {
+ loop {
+ yield_once().await;
+ }
+ });
+
+ assert_ok!(rx.await);
+ });
+ }
+
+ #[test]
+ fn delay_at_root() {
+ let mut rt = rt();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(50);
+
+ rt.block_on(async move {
+ time::delay_for(dur).await;
+ });
+
+ assert!(now.elapsed() >= dur);
+ }
+
+ #[test]
+ fn delay_in_spawn() {
+ let mut rt = rt();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(50);
+
+ rt.block_on(async move {
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ time::delay_for(dur).await;
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rx.await);
+ });
+
+ assert!(now.elapsed() >= dur);
+ }
+
+ #[test]
+ fn block_on_socket() {
+ let mut rt = rt();
+
+ rt.block_on(async move {
+ let (tx, rx) = oneshot::channel();
+
+ let mut listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ tokio::spawn(async move {
+ let _ = listener.accept().await;
+ tx.send(()).unwrap();
+ });
+
+ TcpStream::connect(&addr).await.unwrap();
+ rx.await.unwrap();
+ });
+ }
+
+ #[test]
+ fn spawn_from_blocking() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async move {
+ let inner = assert_ok!(tokio::task::spawn_blocking(|| {
+ tokio::spawn(async move { "hello" })
+ }).await);
+
+ assert_ok!(inner.await)
+ });
+
+ assert_eq!(out, "hello")
+ }
+
+ #[test]
+ fn spawn_blocking_from_blocking() {
+ let mut rt = rt();
+
+ let out = rt.block_on(async move {
+ let inner = assert_ok!(tokio::task::spawn_blocking(|| {
+ tokio::task::spawn_blocking(|| "hello")
+ }).await);
+
+ assert_ok!(inner.await)
+ });
+
+ assert_eq!(out, "hello")
+ }
+
+ #[test]
+ fn delay_from_blocking() {
+ let mut rt = rt();
+
+ rt.block_on(async move {
+ assert_ok!(tokio::task::spawn_blocking(|| {
+ let now = std::time::Instant::now();
+ let dur = Duration::from_millis(1);
+
+ // use the futures' block_on fn to make sure we aren't setting
+ // any Tokio context
+ futures::executor::block_on(async {
+ tokio::time::delay_for(dur).await;
+ });
+
+ assert!(now.elapsed() >= dur);
+ }).await);
+ });
+ }
+
+ #[test]
+ fn socket_from_blocking() {
+ let mut rt = rt();
+
+ rt.block_on(async move {
+ let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(listener.local_addr());
+
+ let peer = tokio::task::spawn_blocking(move || {
+ // use the futures' block_on fn to make sure we aren't setting
+ // any Tokio context
+ futures::executor::block_on(async {
+ assert_ok!(TcpStream::connect(addr).await);
+ });
+ });
+
+ // Wait for the client to connect
+ let _ = assert_ok!(listener.accept().await);
+
+ assert_ok!(peer.await);
+ });
+ }
+
+ #[test]
+ fn spawn_blocking_after_shutdown() {
+ let rt = rt();
+ let handle = rt.handle().clone();
+
+ // Shutdown
+ drop(rt);
+
+ handle.enter(|| {
+ let res = task::spawn_blocking(|| unreachable!());
+
+ // Avoid using a tokio runtime
+ let out = futures::executor::block_on(res);
+ assert!(out.is_err());
+ });
+ }
+
+ #[test]
+ fn io_driver_called_when_under_load() {
+ let mut rt = rt();
+
+ // Create a lot of constant load. The scheduler will always be busy.
+ for _ in 0..100 {
+ rt.spawn(async {
+ loop {
+ tokio::task::yield_now().await;
+ }
+ });
+ }
+
+ // Do some I/O work
+ rt.block_on(async {
+ let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(listener.local_addr());
+
+ let srv = tokio::spawn(async move {
+ let (mut stream, _) = assert_ok!(listener.accept().await);
+ assert_ok!(stream.write_all(b"hello world").await);
+ });
+
+ let cli = tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(addr).await);
+ let mut dst = vec![0; 11];
+
+ assert_ok!(stream.read_exact(&mut dst).await);
+ assert_eq!(dst, b"hello world");
+ });
+
+ assert_ok!(srv.await);
+ assert_ok!(cli.await);
+ });
+ }
+
+ #[test]
+ fn client_server_block_on() {
+ let mut rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async move { client_server(tx).await });
+
+ assert_ok!(rx.try_recv());
+ assert_err!(rx.try_recv());
+ }
+
+ #[test]
+ fn panic_in_task() {
+ let mut rt = rt();
+ let (tx, rx) = oneshot::channel();
+
+ struct Boom(Option<oneshot::Sender<()>>);
+
+ impl Future for Boom {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ panic!();
+ }
+ }
+
+ impl Drop for Boom {
+ fn drop(&mut self) {
+ assert!(std::thread::panicking());
+ self.0.take().unwrap().send(()).unwrap();
+ }
+ }
+
+ rt.spawn(Boom(Some(tx)));
+ assert_ok!(rt.block_on(rx));
+ }
+
+ #[test]
+ #[should_panic]
+ fn panic_in_block_on() {
+ let mut rt = rt();
+ rt.block_on(async { panic!() });
+ }
+
+ async fn yield_once() {
+ let mut yielded = false;
+ poll_fn(|cx| {
+ if yielded {
+ Poll::Ready(())
+ } else {
+ yielded = true;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ })
+ .await
+ }
+
+ #[test]
+ fn enter_and_spawn() {
+ let mut rt = rt();
+ let handle = rt.enter(|| {
+ tokio::spawn(async {})
+ });
+
+ assert_ok!(rt.block_on(handle));
+ }
+
+ #[test]
+ fn eagerly_drops_futures_on_shutdown() {
+ use std::sync::mpsc;
+
+ struct Never {
+ drop_tx: mpsc::Sender<()>,
+ }
+
+ impl Future for Never {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+ }
+
+ impl Drop for Never {
+ fn drop(&mut self) {
+ self.drop_tx.send(()).unwrap();
+ }
+ }
+
+ let mut rt = rt();
+
+ let (drop_tx, drop_rx) = mpsc::channel();
+ let (run_tx, run_rx) = oneshot::channel();
+
+ rt.block_on(async move {
+ tokio::spawn(async move {
+ assert_ok!(run_tx.send(()));
+
+ Never { drop_tx }.await
+ });
+
+ assert_ok!(run_rx.await);
+ });
+
+ drop(rt);
+
+ assert_ok!(drop_rx.recv());
+ }
+
+ #[test]
+ fn wake_while_rt_is_dropping() {
+ use tokio::task;
+
+ struct OnDrop<F: FnMut()>(F);
+
+ impl<F: FnMut()> Drop for OnDrop<F> {
+ fn drop(&mut self) {
+ (self.0)()
+ }
+ }
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let (tx3, rx3) = oneshot::channel();
+
+ let mut rt = rt();
+
+ let h1 = rt.handle().clone();
+
+ rt.handle().spawn(async move {
+ // Ensure a waker gets stored in oneshot 1.
+ let _ = rx1.await;
+ tx3.send(()).unwrap();
+ });
+
+ rt.handle().spawn(async move {
+ // When this task is dropped, we'll be "closing remotes".
+ // We spawn a new task that owns the `tx1`, to move its Drop
+ // out of here.
+ //
+ // Importantly, the oneshot 1 has a waker already stored, so
+ // the eventual drop here will try to re-schedule again.
+ let mut opt_tx1 = Some(tx1);
+ let _d = OnDrop(move || {
+ let tx1 = opt_tx1.take().unwrap();
+ h1.spawn(async move {
+ tx1.send(()).unwrap();
+ });
+ });
+ let _ = rx2.await;
+ });
+
+ rt.handle().spawn(async move {
+ let _ = rx3.await;
+ // We'll never get here, but once task 3 drops, this will
+ // force task 2 to re-schedule since it's waiting on oneshot 2.
+ tx2.send(()).unwrap();
+ });
+
+ // Tick the loop
+ rt.block_on(async {
+ task::yield_now().await;
+ });
+
+ // Drop the rt
+ drop(rt);
+ }
+
+ #[test]
+ fn io_notify_while_shutting_down() {
+ use std::net::Ipv6Addr;
+
+ for _ in 1..10 {
+ let mut runtime = rt();
+
+ runtime.block_on(async {
+ let socket = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)).await.unwrap();
+ let addr = socket.local_addr().unwrap();
+ let (mut recv_half, mut send_half) = socket.split();
+
+ tokio::spawn(async move {
+ let mut buf = [0];
+ loop {
+ recv_half.recv_from(&mut buf).await.unwrap();
+ std::thread::sleep(Duration::from_millis(2));
+ }
+ });
+
+ tokio::spawn(async move {
+ let buf = [0];
+ loop {
+ send_half.send_to(&buf, &addr).await.unwrap();
+ tokio::time::delay_for(Duration::from_millis(1)).await;
+ }
+ });
+
+ tokio::time::delay_for(Duration::from_millis(5)).await;
+ });
+ }
+ }
+
+ #[test]
+ fn shutdown_timeout() {
+ let (tx, rx) = oneshot::channel();
+ let mut runtime = rt();
+
+ runtime.block_on(async move {
+ task::spawn_blocking(move || {
+ tx.send(()).unwrap();
+ thread::sleep(Duration::from_secs(10_000));
+ });
+
+ rx.await.unwrap();
+ });
+
+ runtime.shutdown_timeout(Duration::from_millis(100));
+ }
+
+ #[test]
+ fn runtime_in_thread_local() {
+ use std::cell::RefCell;
+ use std::thread;
+
+ thread_local!(
+ static R: RefCell<Option<Runtime>> = RefCell::new(None);
+ );
+
+ thread::spawn(|| {
+ R.with(|cell| {
+ *cell.borrow_mut() = Some(rt());
+ });
+
+ let _rt = rt();
+ }).join().unwrap();
+ }
+
+ async fn client_server(tx: mpsc::Sender<()>) {
+ let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ tokio::spawn(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+ }
+
+ #[test]
+ fn local_set_block_on_socket() {
+ let mut rt = rt();
+ let local = task::LocalSet::new();
+
+ local.block_on(&mut rt, async move {
+ let (tx, rx) = oneshot::channel();
+
+ let mut listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ task::spawn_local(async move {
+ let _ = listener.accept().await;
+ tx.send(()).unwrap();
+ });
+
+ TcpStream::connect(&addr).await.unwrap();
+ rx.await.unwrap();
+ });
+ }
+
+ #[test]
+ fn local_set_client_server_block_on() {
+ let mut rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ let local = task::LocalSet::new();
+
+ local.block_on(&mut rt, async move { client_server_local(tx).await });
+
+ assert_ok!(rx.try_recv());
+ assert_err!(rx.try_recv());
+ }
+
+ async fn client_server_local(tx: mpsc::Sender<()>) {
+ let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ task::spawn_local(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+ }
+
+ #[test]
+ fn coop() {
+ use std::task::Poll::Ready;
+
+ let mut rt = rt();
+
+ rt.block_on(async {
+ // Create a bunch of tasks
+ let mut tasks = (0..1_000).map(|_| {
+ tokio::spawn(async { })
+ }).collect::<Vec<_>>();
+
+ // Hope that all the tasks complete...
+ time::delay_for(Duration::from_millis(100)).await;
+
+ poll_fn(|cx| {
+ // At least one task should not be ready
+ for task in &mut tasks {
+ if Pin::new(task).poll(cx).is_pending() {
+ return Ready(());
+ }
+ }
+
+ panic!("did not yield");
+ }).await;
+ });
+ }
+
+ // Tests that the "next task" scheduler optimization is not able to starve
+ // other tasks.
+ #[test]
+ fn ping_pong_saturation() {
+ use tokio::sync::mpsc;
+
+ const NUM: usize = 100;
+
+ let mut rt = rt();
+
+ rt.block_on(async {
+ let (spawned_tx, mut spawned_rx) = mpsc::unbounded_channel();
+
+ // Spawn a bunch of tasks that ping ping between each other to
+ // saturate the runtime.
+ for _ in 0..NUM {
+ let (tx1, mut rx1) = mpsc::unbounded_channel();
+ let (tx2, mut rx2) = mpsc::unbounded_channel();
+ let spawned_tx = spawned_tx.clone();
+
+ task::spawn(async move {
+ spawned_tx.send(()).unwrap();
+
+ tx1.send(()).unwrap();
+
+ loop {
+ rx2.recv().await.unwrap();
+ tx1.send(()).unwrap();
+ }
+ });
+
+ task::spawn(async move {
+ loop {
+ rx1.recv().await.unwrap();
+ tx2.send(()).unwrap();
+ }
+ });
+ }
+
+ for _ in 0..NUM {
+ spawned_rx.recv().await.unwrap();
+ }
+
+ // spawn another task and wait for it to complete
+ let handle = task::spawn(async {
+ for _ in 0..5 {
+ // Yielding forces it back into the local queue.
+ task::yield_now().await;
+ }
+ });
+ handle.await.unwrap();
+ });
+ }
+}
diff --git a/third_party/rust/tokio/tests/rt_threaded.rs b/third_party/rust/tokio/tests/rt_threaded.rs
new file mode 100644
index 0000000000..9c95afd5ae
--- /dev/null
+++ b/third_party/rust/tokio/tests/rt_threaded.rs
@@ -0,0 +1,327 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::runtime::{self, Runtime};
+use tokio::sync::oneshot;
+use tokio_test::{assert_err, assert_ok};
+
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::Relaxed;
+use std::sync::{mpsc, Arc};
+use std::task::{Context, Poll};
+
+#[test]
+fn single_thread() {
+ // No panic when starting a runtime w/ a single thread
+ let _ = runtime::Builder::new()
+ .threaded_scheduler()
+ .enable_all()
+ .core_threads(1)
+ .build();
+}
+
+#[test]
+fn many_oneshot_futures() {
+ // used for notifying the main thread
+ const NUM: usize = 1_000;
+
+ for _ in 0..5 {
+ let (tx, rx) = mpsc::channel();
+
+ let rt = rt();
+ let cnt = Arc::new(AtomicUsize::new(0));
+
+ for _ in 0..NUM {
+ let cnt = cnt.clone();
+ let tx = tx.clone();
+
+ rt.spawn(async move {
+ let num = cnt.fetch_add(1, Relaxed) + 1;
+
+ if num == NUM {
+ tx.send(()).unwrap();
+ }
+ });
+ }
+
+ rx.recv().unwrap();
+
+ // Wait for the pool to shutdown
+ drop(rt);
+ }
+}
+#[test]
+fn many_multishot_futures() {
+ use tokio::sync::mpsc;
+
+ const CHAIN: usize = 200;
+ const CYCLES: usize = 5;
+ const TRACKS: usize = 50;
+
+ for _ in 0..50 {
+ let mut rt = rt();
+ let mut start_txs = Vec::with_capacity(TRACKS);
+ let mut final_rxs = Vec::with_capacity(TRACKS);
+
+ for _ in 0..TRACKS {
+ let (start_tx, mut chain_rx) = mpsc::channel(10);
+
+ for _ in 0..CHAIN {
+ let (mut next_tx, next_rx) = mpsc::channel(10);
+
+ // Forward all the messages
+ rt.spawn(async move {
+ while let Some(v) = chain_rx.recv().await {
+ next_tx.send(v).await.unwrap();
+ }
+ });
+
+ chain_rx = next_rx;
+ }
+
+ // This final task cycles if needed
+ let (mut final_tx, final_rx) = mpsc::channel(10);
+ let mut cycle_tx = start_tx.clone();
+ let mut rem = CYCLES;
+
+ rt.spawn(async move {
+ for _ in 0..CYCLES {
+ let msg = chain_rx.recv().await.unwrap();
+
+ rem -= 1;
+
+ if rem == 0 {
+ final_tx.send(msg).await.unwrap();
+ } else {
+ cycle_tx.send(msg).await.unwrap();
+ }
+ }
+ });
+
+ start_txs.push(start_tx);
+ final_rxs.push(final_rx);
+ }
+
+ {
+ rt.block_on(async move {
+ for mut start_tx in start_txs {
+ start_tx.send("ping").await.unwrap();
+ }
+
+ for mut final_rx in final_rxs {
+ final_rx.recv().await.unwrap();
+ }
+ });
+ }
+ }
+}
+
+#[test]
+fn spawn_shutdown() {
+ let mut rt = rt();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async {
+ tokio::spawn(client_server(tx.clone()));
+ });
+
+ // Use spawner
+ rt.spawn(client_server(tx));
+
+ assert_ok!(rx.recv());
+ assert_ok!(rx.recv());
+
+ drop(rt);
+ assert_err!(rx.try_recv());
+}
+
+async fn client_server(tx: mpsc::Sender<()>) {
+ let mut server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+
+ // Get the assigned address
+ let addr = assert_ok!(server.local_addr());
+
+ // Spawn the server
+ tokio::spawn(async move {
+ // Accept a socket
+ let (mut socket, _) = server.accept().await.unwrap();
+
+ // Write some data
+ socket.write_all(b"hello").await.unwrap();
+ });
+
+ let mut client = TcpStream::connect(&addr).await.unwrap();
+
+ let mut buf = vec![];
+ client.read_to_end(&mut buf).await.unwrap();
+
+ assert_eq!(buf, b"hello");
+ tx.send(()).unwrap();
+}
+
+#[test]
+fn drop_threadpool_drops_futures() {
+ for _ in 0..1_000 {
+ let num_inc = Arc::new(AtomicUsize::new(0));
+ let num_dec = Arc::new(AtomicUsize::new(0));
+ let num_drop = Arc::new(AtomicUsize::new(0));
+
+ struct Never(Arc<AtomicUsize>);
+
+ impl Future for Never {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+ }
+
+ impl Drop for Never {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Relaxed);
+ }
+ }
+
+ let a = num_inc.clone();
+ let b = num_dec.clone();
+
+ let rt = runtime::Builder::new()
+ .threaded_scheduler()
+ .enable_all()
+ .on_thread_start(move || {
+ a.fetch_add(1, Relaxed);
+ })
+ .on_thread_stop(move || {
+ b.fetch_add(1, Relaxed);
+ })
+ .build()
+ .unwrap();
+
+ rt.spawn(Never(num_drop.clone()));
+
+ // Wait for the pool to shutdown
+ drop(rt);
+
+ // Assert that only a single thread was spawned.
+ let a = num_inc.load(Relaxed);
+ assert!(a >= 1);
+
+ // Assert that all threads shutdown
+ let b = num_dec.load(Relaxed);
+ assert_eq!(a, b);
+
+ // Assert that the future was dropped
+ let c = num_drop.load(Relaxed);
+ assert_eq!(c, 1);
+ }
+}
+
+#[test]
+fn start_stop_callbacks_called() {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ let after_start = Arc::new(AtomicUsize::new(0));
+ let before_stop = Arc::new(AtomicUsize::new(0));
+
+ let after_inner = after_start.clone();
+ let before_inner = before_stop.clone();
+ let mut rt = tokio::runtime::Builder::new()
+ .threaded_scheduler()
+ .enable_all()
+ .on_thread_start(move || {
+ after_inner.clone().fetch_add(1, Ordering::Relaxed);
+ })
+ .on_thread_stop(move || {
+ before_inner.clone().fetch_add(1, Ordering::Relaxed);
+ })
+ .build()
+ .unwrap();
+
+ let (tx, rx) = oneshot::channel();
+
+ rt.spawn(async move {
+ assert_ok!(tx.send(()));
+ });
+
+ assert_ok!(rt.block_on(rx));
+
+ drop(rt);
+
+ assert!(after_start.load(Ordering::Relaxed) > 0);
+ assert!(before_stop.load(Ordering::Relaxed) > 0);
+}
+
+#[test]
+fn blocking() {
+ // used for notifying the main thread
+ const NUM: usize = 1_000;
+
+ for _ in 0..10 {
+ let (tx, rx) = mpsc::channel();
+
+ let rt = rt();
+ let cnt = Arc::new(AtomicUsize::new(0));
+
+ // there are four workers in the pool
+ // so, if we run 4 blocking tasks, we know that handoff must have happened
+ let block = Arc::new(std::sync::Barrier::new(5));
+ for _ in 0..4 {
+ let block = block.clone();
+ rt.spawn(async move {
+ tokio::task::block_in_place(move || {
+ block.wait();
+ block.wait();
+ })
+ });
+ }
+ block.wait();
+
+ for _ in 0..NUM {
+ let cnt = cnt.clone();
+ let tx = tx.clone();
+
+ rt.spawn(async move {
+ let num = cnt.fetch_add(1, Relaxed) + 1;
+
+ if num == NUM {
+ tx.send(()).unwrap();
+ }
+ });
+ }
+
+ rx.recv().unwrap();
+
+ // Wait for the pool to shutdown
+ block.wait();
+ }
+}
+
+#[test]
+fn multi_threadpool() {
+ use tokio::sync::oneshot;
+
+ let rt1 = rt();
+ let rt2 = rt();
+
+ let (tx, rx) = oneshot::channel();
+ let (done_tx, done_rx) = mpsc::channel();
+
+ rt2.spawn(async move {
+ rx.await.unwrap();
+ done_tx.send(()).unwrap();
+ });
+
+ rt1.spawn(async move {
+ tx.send(()).unwrap();
+ });
+
+ done_rx.recv().unwrap();
+}
+
+fn rt() -> Runtime {
+ Runtime::new().unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_ctrl_c.rs b/third_party/rust/tokio/tests/signal_ctrl_c.rs
new file mode 100644
index 0000000000..4b057ee7e1
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_ctrl_c.rs
@@ -0,0 +1,30 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal;
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn ctrl_c() {
+ let ctrl_c = signal::ctrl_c();
+
+ let (fire, wait) = oneshot::channel();
+
+ // NB: simulate a signal coming in by exercising our signal handler
+ // to avoid complications with sending SIGINT to the test process
+ tokio::spawn(async {
+ wait.await.expect("wait failed");
+ send_signal(libc::SIGINT);
+ });
+
+ let _ = fire.send(());
+
+ assert_ok!(ctrl_c.await);
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_recv.rs b/third_party/rust/tokio/tests/signal_drop_recv.rs
new file mode 100644
index 0000000000..b0d9213e61
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_recv.rs
@@ -0,0 +1,22 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn drop_then_get_a_signal() {
+ let kind = SignalKind::user_defined1();
+ let sig = signal(kind).expect("failed to create first signal");
+ drop(sig);
+
+ send_signal(libc::SIGUSR1);
+ let mut sig = signal(kind).expect("failed to create second signal");
+
+ let _ = sig.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_rt.rs b/third_party/rust/tokio/tests/signal_drop_rt.rs
new file mode 100644
index 0000000000..aeedd96e4e
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_rt.rs
@@ -0,0 +1,45 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::runtime::Runtime;
+use tokio::signal::unix::{signal, SignalKind};
+
+#[test]
+fn dropping_loops_does_not_cause_starvation() {
+ let kind = SignalKind::user_defined1();
+
+ let mut first_rt = rt();
+ let mut first_signal =
+ first_rt.block_on(async { signal(kind).expect("failed to register first signal") });
+
+ let mut second_rt = rt();
+ let mut second_signal =
+ second_rt.block_on(async { signal(kind).expect("failed to register second signal") });
+
+ send_signal(libc::SIGUSR1);
+
+ first_rt
+ .block_on(first_signal.recv())
+ .expect("failed to await first signal");
+
+ drop(first_rt);
+ drop(first_signal);
+
+ send_signal(libc::SIGUSR1);
+
+ second_rt.block_on(second_signal.recv());
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_drop_signal.rs b/third_party/rust/tokio/tests/signal_drop_signal.rs
new file mode 100644
index 0000000000..92ac4050d5
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_drop_signal.rs
@@ -0,0 +1,26 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn dropping_signal_does_not_deregister_any_other_instances() {
+ let kind = SignalKind::user_defined1();
+
+ // Signals should not starve based on ordering
+ let first_duplicate_signal = signal(kind).expect("failed to register first duplicate signal");
+ let mut sig = signal(kind).expect("failed to register signal");
+ let second_duplicate_signal = signal(kind).expect("failed to register second duplicate signal");
+
+ drop(first_duplicate_signal);
+ drop(second_duplicate_signal);
+
+ send_signal(libc::SIGUSR1);
+ let _ = sig.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_multi_rt.rs b/third_party/rust/tokio/tests/signal_multi_rt.rs
new file mode 100644
index 0000000000..9d78469578
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_multi_rt.rs
@@ -0,0 +1,55 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::runtime::Runtime;
+use tokio::signal::unix::{signal, SignalKind};
+
+use std::sync::mpsc::channel;
+use std::thread;
+
+#[test]
+fn multi_loop() {
+ // An "ordinary" (non-future) channel
+ let (sender, receiver) = channel();
+ // Run multiple times, to make sure there are no race conditions
+ for _ in 0..10 {
+ // Run multiple event loops, each one in its own thread
+ let threads: Vec<_> = (0..4)
+ .map(|_| {
+ let sender = sender.clone();
+ thread::spawn(move || {
+ let mut rt = rt();
+ let _ = rt.block_on(async {
+ let mut signal = signal(SignalKind::hangup()).unwrap();
+ sender.send(()).unwrap();
+ signal.recv().await
+ });
+ })
+ })
+ .collect();
+ // Wait for them to declare they're ready
+ for &_ in threads.iter() {
+ receiver.recv().unwrap();
+ }
+ // Send a signal
+ send_signal(libc::SIGHUP);
+ // Make sure the threads terminated correctly
+ for t in threads {
+ t.join().unwrap();
+ }
+ }
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/signal_no_rt.rs b/third_party/rust/tokio/tests/signal_no_rt.rs
new file mode 100644
index 0000000000..b0f32b2d10
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_no_rt.rs
@@ -0,0 +1,11 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[test]
+#[should_panic]
+fn no_runtime_panics_creating_signals() {
+ let _ = signal(SignalKind::hangup());
+}
diff --git a/third_party/rust/tokio/tests/signal_notify_both.rs b/third_party/rust/tokio/tests/signal_notify_both.rs
new file mode 100644
index 0000000000..3481f808b3
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_notify_both.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn notify_both() {
+ let kind = SignalKind::user_defined2();
+
+ let mut signal1 = signal(kind).expect("failed to create signal1");
+ let mut signal2 = signal(kind).expect("failed to create signal2");
+
+ send_signal(libc::SIGUSR2);
+
+ signal1.recv().await;
+ signal2.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/signal_twice.rs b/third_party/rust/tokio/tests/signal_twice.rs
new file mode 100644
index 0000000000..8f33d22a82
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_twice.rs
@@ -0,0 +1,22 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+
+#[tokio::test]
+async fn twice() {
+ let kind = SignalKind::user_defined1();
+ let mut sig = signal(kind).expect("failed to get signal");
+
+ for _ in 0..2 {
+ send_signal(libc::SIGUSR1);
+
+ assert!(sig.recv().await.is_some());
+ }
+}
diff --git a/third_party/rust/tokio/tests/signal_usr1.rs b/third_party/rust/tokio/tests/signal_usr1.rs
new file mode 100644
index 0000000000..d74c7d31ab
--- /dev/null
+++ b/third_party/rust/tokio/tests/signal_usr1.rs
@@ -0,0 +1,23 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+mod support {
+ pub mod signal;
+}
+use support::signal::send_signal;
+
+use tokio::signal::unix::{signal, SignalKind};
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn signal_usr1() {
+ let mut signal = assert_ok!(
+ signal(SignalKind::user_defined1()),
+ "failed to create signal"
+ );
+
+ send_signal(libc::SIGUSR1);
+
+ signal.recv().await;
+}
diff --git a/third_party/rust/tokio/tests/stream_chain.rs b/third_party/rust/tokio/tests/stream_chain.rs
new file mode 100644
index 0000000000..0e14618b49
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_chain.rs
@@ -0,0 +1,71 @@
+use tokio::stream::{self, Stream, StreamExt};
+use tokio::sync::mpsc;
+use tokio_test::{assert_pending, assert_ready, task};
+
+#[tokio::test]
+async fn basic_usage() {
+ let one = stream::iter(vec![1, 2, 3]);
+ let two = stream::iter(vec![4, 5, 6]);
+
+ let mut stream = one.chain(two);
+
+ assert_eq!(stream.size_hint(), (6, Some(6)));
+ assert_eq!(stream.next().await, Some(1));
+
+ assert_eq!(stream.size_hint(), (5, Some(5)));
+ assert_eq!(stream.next().await, Some(2));
+
+ assert_eq!(stream.size_hint(), (4, Some(4)));
+ assert_eq!(stream.next().await, Some(3));
+
+ assert_eq!(stream.size_hint(), (3, Some(3)));
+ assert_eq!(stream.next().await, Some(4));
+
+ assert_eq!(stream.size_hint(), (2, Some(2)));
+ assert_eq!(stream.next().await, Some(5));
+
+ assert_eq!(stream.size_hint(), (1, Some(1)));
+ assert_eq!(stream.next().await, Some(6));
+
+ assert_eq!(stream.size_hint(), (0, Some(0)));
+ assert_eq!(stream.next().await, None);
+
+ assert_eq!(stream.size_hint(), (0, Some(0)));
+ assert_eq!(stream.next().await, None);
+}
+
+#[tokio::test]
+async fn pending_first() {
+ let (tx1, rx1) = mpsc::unbounded_channel();
+ let (tx2, rx2) = mpsc::unbounded_channel();
+
+ let mut stream = task::spawn(rx1.chain(rx2));
+ assert_eq!(stream.size_hint(), (0, None));
+
+ assert_pending!(stream.poll_next());
+
+ tx2.send(2).unwrap();
+ assert!(!stream.is_woken());
+
+ assert_pending!(stream.poll_next());
+
+ tx1.send(1).unwrap();
+ assert!(stream.is_woken());
+ assert_eq!(Some(1), assert_ready!(stream.poll_next()));
+
+ assert_pending!(stream.poll_next());
+
+ drop(tx1);
+
+ assert_eq!(stream.size_hint(), (0, None));
+
+ assert!(stream.is_woken());
+ assert_eq!(Some(2), assert_ready!(stream.poll_next()));
+
+ assert_eq!(stream.size_hint(), (0, None));
+
+ drop(tx2);
+
+ assert_eq!(stream.size_hint(), (0, None));
+ assert_eq!(None, assert_ready!(stream.poll_next()));
+}
diff --git a/third_party/rust/tokio/tests/stream_collect.rs b/third_party/rust/tokio/tests/stream_collect.rs
new file mode 100644
index 0000000000..70051e7f67
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_collect.rs
@@ -0,0 +1,172 @@
+use tokio::stream::{self, StreamExt};
+use tokio::sync::mpsc;
+use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok, task};
+
+use bytes::{Bytes, BytesMut};
+
+#[allow(clippy::let_unit_value)]
+#[tokio::test]
+async fn empty_unit() {
+ // Drains the stream.
+ let mut iter = vec![(), (), ()].into_iter();
+ let _: () = stream::iter(&mut iter).collect().await;
+ assert!(iter.next().is_none());
+}
+
+#[tokio::test]
+async fn empty_vec() {
+ let coll: Vec<u32> = stream::empty().collect().await;
+ assert!(coll.is_empty());
+}
+
+#[tokio::test]
+async fn empty_box_slice() {
+ let coll: Box<[u32]> = stream::empty().collect().await;
+ assert!(coll.is_empty());
+}
+
+#[tokio::test]
+async fn empty_bytes() {
+ let coll: Bytes = stream::empty::<&[u8]>().collect().await;
+ assert!(coll.is_empty());
+}
+
+#[tokio::test]
+async fn empty_bytes_mut() {
+ let coll: BytesMut = stream::empty::<&[u8]>().collect().await;
+ assert!(coll.is_empty());
+}
+
+#[tokio::test]
+async fn empty_string() {
+ let coll: String = stream::empty::<&str>().collect().await;
+ assert!(coll.is_empty());
+}
+
+#[tokio::test]
+async fn empty_result() {
+ let coll: Result<Vec<u32>, &str> = stream::empty().collect().await;
+ assert_eq!(Ok(vec![]), coll);
+}
+
+#[tokio::test]
+async fn collect_vec_items() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<Vec<i32>>());
+
+ assert_pending!(fut.poll());
+
+ tx.send(1).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send(2).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ drop(tx);
+ assert!(fut.is_woken());
+ let coll = assert_ready!(fut.poll());
+ assert_eq!(vec![1, 2], coll);
+}
+
+#[tokio::test]
+async fn collect_string_items() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<String>());
+
+ assert_pending!(fut.poll());
+
+ tx.send("hello ".to_string()).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send("world".to_string()).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ drop(tx);
+ assert!(fut.is_woken());
+ let coll = assert_ready!(fut.poll());
+ assert_eq!("hello world", coll);
+}
+
+#[tokio::test]
+async fn collect_str_items() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<String>());
+
+ assert_pending!(fut.poll());
+
+ tx.send("hello ").unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send("world").unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ drop(tx);
+ assert!(fut.is_woken());
+ let coll = assert_ready!(fut.poll());
+ assert_eq!("hello world", coll);
+}
+
+#[tokio::test]
+async fn collect_bytes() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<Bytes>());
+
+ assert_pending!(fut.poll());
+
+ tx.send(&b"hello "[..]).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send(&b"world"[..]).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ drop(tx);
+ assert!(fut.is_woken());
+ let coll = assert_ready!(fut.poll());
+ assert_eq!(&b"hello world"[..], coll);
+}
+
+#[tokio::test]
+async fn collect_results_ok() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<Result<String, &str>>());
+
+ assert_pending!(fut.poll());
+
+ tx.send(Ok("hello ")).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send(Ok("world")).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ drop(tx);
+ assert!(fut.is_woken());
+ let coll = assert_ready_ok!(fut.poll());
+ assert_eq!("hello world", coll);
+}
+
+#[tokio::test]
+async fn collect_results_err() {
+ let (tx, rx) = mpsc::unbounded_channel();
+ let mut fut = task::spawn(rx.collect::<Result<String, &str>>());
+
+ assert_pending!(fut.poll());
+
+ tx.send(Ok("hello ")).unwrap();
+ assert!(fut.is_woken());
+ assert_pending!(fut.poll());
+
+ tx.send(Err("oh no")).unwrap();
+ assert!(fut.is_woken());
+ let err = assert_ready_err!(fut.poll());
+ assert_eq!("oh no", err);
+}
diff --git a/third_party/rust/tokio/tests/stream_empty.rs b/third_party/rust/tokio/tests/stream_empty.rs
new file mode 100644
index 0000000000..f278076d1a
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_empty.rs
@@ -0,0 +1,11 @@
+use tokio::stream::{self, Stream, StreamExt};
+
+#[tokio::test]
+async fn basic_usage() {
+ let mut stream = stream::empty::<i32>();
+
+ for _ in 0..2 {
+ assert_eq!(stream.size_hint(), (0, Some(0)));
+ assert_eq!(None, stream.next().await);
+ }
+}
diff --git a/third_party/rust/tokio/tests/stream_fuse.rs b/third_party/rust/tokio/tests/stream_fuse.rs
new file mode 100644
index 0000000000..9d7d969f8b
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_fuse.rs
@@ -0,0 +1,50 @@
+use tokio::stream::{Stream, StreamExt};
+
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+// a stream which alternates between Some and None
+struct Alternate {
+ state: i32,
+}
+
+impl Stream for Alternate {
+ type Item = i32;
+
+ fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<i32>> {
+ let val = self.state;
+ self.state += 1;
+
+ // if it's even, Some(i32), else None
+ if val % 2 == 0 {
+ Poll::Ready(Some(val))
+ } else {
+ Poll::Ready(None)
+ }
+ }
+}
+
+#[tokio::test]
+async fn basic_usage() {
+ let mut stream = Alternate { state: 0 };
+
+ // the stream goes back and forth
+ assert_eq!(stream.next().await, Some(0));
+ assert_eq!(stream.next().await, None);
+ assert_eq!(stream.next().await, Some(2));
+ assert_eq!(stream.next().await, None);
+
+ // however, once it is fused
+ let mut stream = stream.fuse();
+
+ assert_eq!(stream.size_hint(), (0, None));
+ assert_eq!(stream.next().await, Some(4));
+
+ assert_eq!(stream.size_hint(), (0, None));
+ assert_eq!(stream.next().await, None);
+
+ // it will always return `None` after the first time.
+ assert_eq!(stream.size_hint(), (0, Some(0)));
+ assert_eq!(stream.next().await, None);
+ assert_eq!(stream.size_hint(), (0, Some(0)));
+}
diff --git a/third_party/rust/tokio/tests/stream_iter.rs b/third_party/rust/tokio/tests/stream_iter.rs
new file mode 100644
index 0000000000..45148a7a8b
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_iter.rs
@@ -0,0 +1,18 @@
+use tokio::stream;
+use tokio_test::task;
+
+use std::iter;
+
+#[tokio::test]
+async fn coop() {
+ let mut stream = task::spawn(stream::iter(iter::repeat(1)));
+
+ for _ in 0..10_000 {
+ if stream.poll_next().is_pending() {
+ assert!(stream.is_woken());
+ return;
+ }
+ }
+
+ panic!("did not yield");
+}
diff --git a/third_party/rust/tokio/tests/stream_merge.rs b/third_party/rust/tokio/tests/stream_merge.rs
new file mode 100644
index 0000000000..f0168d72ee
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_merge.rs
@@ -0,0 +1,54 @@
+use tokio::stream::{self, Stream, StreamExt};
+use tokio::sync::mpsc;
+use tokio_test::task;
+use tokio_test::{assert_pending, assert_ready};
+
+#[tokio::test]
+async fn merge_sync_streams() {
+ let mut s = stream::iter(vec![0, 2, 4, 6]).merge(stream::iter(vec![1, 3, 5]));
+
+ for i in 0..7 {
+ let rem = 7 - i;
+ assert_eq!(s.size_hint(), (rem, Some(rem)));
+ assert_eq!(Some(i), s.next().await);
+ }
+
+ assert!(s.next().await.is_none());
+}
+
+#[tokio::test]
+async fn merge_async_streams() {
+ let (tx1, rx1) = mpsc::unbounded_channel();
+ let (tx2, rx2) = mpsc::unbounded_channel();
+
+ let mut rx = task::spawn(rx1.merge(rx2));
+
+ assert_eq!(rx.size_hint(), (0, None));
+
+ assert_pending!(rx.poll_next());
+
+ tx1.send(1).unwrap();
+
+ assert!(rx.is_woken());
+ assert_eq!(Some(1), assert_ready!(rx.poll_next()));
+
+ assert_pending!(rx.poll_next());
+ tx2.send(2).unwrap();
+
+ assert!(rx.is_woken());
+ assert_eq!(Some(2), assert_ready!(rx.poll_next()));
+ assert_pending!(rx.poll_next());
+
+ drop(tx1);
+ assert!(rx.is_woken());
+ assert_pending!(rx.poll_next());
+
+ tx2.send(3).unwrap();
+ assert!(rx.is_woken());
+ assert_eq!(Some(3), assert_ready!(rx.poll_next()));
+ assert_pending!(rx.poll_next());
+
+ drop(tx2);
+ assert!(rx.is_woken());
+ assert_eq!(None, assert_ready!(rx.poll_next()));
+}
diff --git a/third_party/rust/tokio/tests/stream_once.rs b/third_party/rust/tokio/tests/stream_once.rs
new file mode 100644
index 0000000000..bb4635ac9e
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_once.rs
@@ -0,0 +1,12 @@
+use tokio::stream::{self, Stream, StreamExt};
+
+#[tokio::test]
+async fn basic_usage() {
+ let mut one = stream::once(1);
+
+ assert_eq!(one.size_hint(), (1, Some(1)));
+ assert_eq!(Some(1), one.next().await);
+
+ assert_eq!(one.size_hint(), (0, Some(0)));
+ assert_eq!(None, one.next().await);
+}
diff --git a/third_party/rust/tokio/tests/stream_pending.rs b/third_party/rust/tokio/tests/stream_pending.rs
new file mode 100644
index 0000000000..f4d3080de8
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_pending.rs
@@ -0,0 +1,14 @@
+use tokio::stream::{self, Stream, StreamExt};
+use tokio_test::{assert_pending, task};
+
+#[tokio::test]
+async fn basic_usage() {
+ let mut stream = stream::pending::<i32>();
+
+ for _ in 0..2 {
+ assert_eq!(stream.size_hint(), (0, None));
+
+ let mut next = task::spawn(async { stream.next().await });
+ assert_pending!(next.poll());
+ }
+}
diff --git a/third_party/rust/tokio/tests/stream_reader.rs b/third_party/rust/tokio/tests/stream_reader.rs
new file mode 100644
index 0000000000..8370df4dac
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_reader.rs
@@ -0,0 +1,35 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use bytes::Bytes;
+use tokio::io::{stream_reader, AsyncReadExt};
+use tokio::stream::iter;
+
+#[tokio::test]
+async fn test_stream_reader() -> std::io::Result<()> {
+ let stream = iter(vec![
+ Ok(Bytes::from_static(&[])),
+ Ok(Bytes::from_static(&[0, 1, 2, 3])),
+ Ok(Bytes::from_static(&[])),
+ Ok(Bytes::from_static(&[4, 5, 6, 7])),
+ Ok(Bytes::from_static(&[])),
+ Ok(Bytes::from_static(&[8, 9, 10, 11])),
+ Ok(Bytes::from_static(&[])),
+ ]);
+
+ let mut read = stream_reader(stream);
+
+ let mut buf = [0; 5];
+ read.read_exact(&mut buf).await?;
+ assert_eq!(buf, [0, 1, 2, 3, 4]);
+
+ assert_eq!(read.read(&mut buf).await?, 3);
+ assert_eq!(&buf[..3], [5, 6, 7]);
+
+ assert_eq!(read.read(&mut buf).await?, 4);
+ assert_eq!(&buf[..4], [8, 9, 10, 11]);
+
+ assert_eq!(read.read(&mut buf).await?, 0);
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/stream_stream_map.rs b/third_party/rust/tokio/tests/stream_stream_map.rs
new file mode 100644
index 0000000000..6b49803234
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_stream_map.rs
@@ -0,0 +1,374 @@
+use tokio::stream::{self, pending, Stream, StreamExt, StreamMap};
+use tokio::sync::mpsc;
+use tokio_test::{assert_ok, assert_pending, assert_ready, task};
+
+use std::pin::Pin;
+
+macro_rules! assert_ready_some {
+ ($($t:tt)*) => {
+ match assert_ready!($($t)*) {
+ Some(v) => v,
+ None => panic!("expected `Some`, got `None`"),
+ }
+ };
+}
+
+macro_rules! assert_ready_none {
+ ($($t:tt)*) => {
+ match assert_ready!($($t)*) {
+ None => {}
+ Some(v) => panic!("expected `None`, got `Some({:?})`", v),
+ }
+ };
+}
+
+#[tokio::test]
+async fn empty() {
+ let mut map = StreamMap::<&str, stream::Pending<()>>::new();
+
+ assert_eq!(map.len(), 0);
+ assert!(map.is_empty());
+
+ assert!(map.next().await.is_none());
+ assert!(map.next().await.is_none());
+
+ assert!(map.remove("foo").is_none());
+}
+
+#[tokio::test]
+async fn single_entry() {
+ let mut map = task::spawn(StreamMap::new());
+ let (tx, rx) = mpsc::unbounded_channel();
+
+ assert_ready_none!(map.poll_next());
+
+ assert!(map.insert("foo", rx).is_none());
+ assert!(map.contains_key("foo"));
+ assert!(!map.contains_key("bar"));
+
+ assert_eq!(map.len(), 1);
+ assert!(!map.is_empty());
+
+ assert_pending!(map.poll_next());
+
+ assert_ok!(tx.send(1));
+
+ assert!(map.is_woken());
+ let (k, v) = assert_ready_some!(map.poll_next());
+ assert_eq!(k, "foo");
+ assert_eq!(v, 1);
+
+ assert_pending!(map.poll_next());
+
+ assert_ok!(tx.send(2));
+
+ assert!(map.is_woken());
+ let (k, v) = assert_ready_some!(map.poll_next());
+ assert_eq!(k, "foo");
+ assert_eq!(v, 2);
+
+ assert_pending!(map.poll_next());
+ drop(tx);
+ assert!(map.is_woken());
+ assert_ready_none!(map.poll_next());
+}
+
+#[tokio::test]
+async fn multiple_entries() {
+ let mut map = task::spawn(StreamMap::new());
+ let (tx1, rx1) = mpsc::unbounded_channel();
+ let (tx2, rx2) = mpsc::unbounded_channel();
+
+ map.insert("foo", rx1);
+ map.insert("bar", rx2);
+
+ assert_pending!(map.poll_next());
+
+ assert_ok!(tx1.send(1));
+
+ assert!(map.is_woken());
+ let (k, v) = assert_ready_some!(map.poll_next());
+ assert_eq!(k, "foo");
+ assert_eq!(v, 1);
+
+ assert_pending!(map.poll_next());
+
+ assert_ok!(tx2.send(2));
+
+ assert!(map.is_woken());
+ let (k, v) = assert_ready_some!(map.poll_next());
+ assert_eq!(k, "bar");
+ assert_eq!(v, 2);
+
+ assert_pending!(map.poll_next());
+
+ assert_ok!(tx1.send(3));
+ assert_ok!(tx2.send(4));
+
+ assert!(map.is_woken());
+
+ // Given the randomization, there is no guarantee what order the values will
+ // be received in.
+ let mut v = (0..2)
+ .map(|_| assert_ready_some!(map.poll_next()))
+ .collect::<Vec<_>>();
+
+ assert_pending!(map.poll_next());
+
+ v.sort();
+ assert_eq!(v[0].0, "bar");
+ assert_eq!(v[0].1, 4);
+ assert_eq!(v[1].0, "foo");
+ assert_eq!(v[1].1, 3);
+
+ drop(tx1);
+ assert!(map.is_woken());
+ assert_pending!(map.poll_next());
+ drop(tx2);
+
+ assert_ready_none!(map.poll_next());
+}
+
+#[tokio::test]
+async fn insert_remove() {
+ let mut map = task::spawn(StreamMap::new());
+ let (tx, rx) = mpsc::unbounded_channel();
+
+ assert_ready_none!(map.poll_next());
+
+ assert!(map.insert("foo", rx).is_none());
+ let rx = map.remove("foo").unwrap();
+
+ assert_ok!(tx.send(1));
+
+ assert!(!map.is_woken());
+ assert_ready_none!(map.poll_next());
+
+ assert!(map.insert("bar", rx).is_none());
+
+ let v = assert_ready_some!(map.poll_next());
+ assert_eq!(v.0, "bar");
+ assert_eq!(v.1, 1);
+
+ assert!(map.remove("bar").is_some());
+ assert_ready_none!(map.poll_next());
+
+ assert!(map.is_empty());
+ assert_eq!(0, map.len());
+}
+
+#[tokio::test]
+async fn replace() {
+ let mut map = task::spawn(StreamMap::new());
+ let (tx1, rx1) = mpsc::unbounded_channel();
+ let (tx2, rx2) = mpsc::unbounded_channel();
+
+ assert!(map.insert("foo", rx1).is_none());
+
+ assert_pending!(map.poll_next());
+
+ let _rx1 = map.insert("foo", rx2).unwrap();
+
+ assert_pending!(map.poll_next());
+
+ tx1.send(1).unwrap();
+ assert_pending!(map.poll_next());
+
+ tx2.send(2).unwrap();
+ assert!(map.is_woken());
+ let v = assert_ready_some!(map.poll_next());
+ assert_eq!(v.0, "foo");
+ assert_eq!(v.1, 2);
+}
+
+#[test]
+fn size_hint_with_upper() {
+ let mut map = StreamMap::new();
+
+ map.insert("a", stream::iter(vec![1]));
+ map.insert("b", stream::iter(vec![1, 2]));
+ map.insert("c", stream::iter(vec![1, 2, 3]));
+
+ assert_eq!(3, map.len());
+ assert!(!map.is_empty());
+
+ let size_hint = map.size_hint();
+ assert_eq!(size_hint, (6, Some(6)));
+}
+
+#[test]
+fn size_hint_without_upper() {
+ let mut map = StreamMap::new();
+
+ map.insert("a", pin_box(stream::iter(vec![1])));
+ map.insert("b", pin_box(stream::iter(vec![1, 2])));
+ map.insert("c", pin_box(pending()));
+
+ let size_hint = map.size_hint();
+ assert_eq!(size_hint, (3, None));
+}
+
+#[test]
+fn new_capacity_zero() {
+ let map = StreamMap::<&str, stream::Pending<()>>::new();
+ assert_eq!(0, map.capacity());
+
+ let keys = map.keys().collect::<Vec<_>>();
+ assert!(keys.is_empty());
+}
+
+#[test]
+fn with_capacity() {
+ let map = StreamMap::<&str, stream::Pending<()>>::with_capacity(10);
+ assert!(10 <= map.capacity());
+
+ let keys = map.keys().collect::<Vec<_>>();
+ assert!(keys.is_empty());
+}
+
+#[test]
+fn iter_keys() {
+ let mut map = StreamMap::new();
+
+ map.insert("a", pending::<i32>());
+ map.insert("b", pending());
+ map.insert("c", pending());
+
+ let mut keys = map.keys().collect::<Vec<_>>();
+ keys.sort();
+
+ assert_eq!(&keys[..], &[&"a", &"b", &"c"]);
+}
+
+#[test]
+fn iter_values() {
+ let mut map = StreamMap::new();
+
+ map.insert("a", stream::iter(vec![1]));
+ map.insert("b", stream::iter(vec![1, 2]));
+ map.insert("c", stream::iter(vec![1, 2, 3]));
+
+ let mut size_hints = map.values().map(|s| s.size_hint().0).collect::<Vec<_>>();
+
+ size_hints.sort();
+
+ assert_eq!(&size_hints[..], &[1, 2, 3]);
+}
+
+#[test]
+fn iter_values_mut() {
+ let mut map = StreamMap::new();
+
+ map.insert("a", stream::iter(vec![1]));
+ map.insert("b", stream::iter(vec![1, 2]));
+ map.insert("c", stream::iter(vec![1, 2, 3]));
+
+ let mut size_hints = map
+ .values_mut()
+ .map(|s: &mut _| s.size_hint().0)
+ .collect::<Vec<_>>();
+
+ size_hints.sort();
+
+ assert_eq!(&size_hints[..], &[1, 2, 3]);
+}
+
+#[test]
+fn clear() {
+ let mut map = task::spawn(StreamMap::new());
+
+ map.insert("a", stream::iter(vec![1]));
+ map.insert("b", stream::iter(vec![1, 2]));
+ map.insert("c", stream::iter(vec![1, 2, 3]));
+
+ assert_ready_some!(map.poll_next());
+
+ map.clear();
+
+ assert_ready_none!(map.poll_next());
+ assert!(map.is_empty());
+}
+
+#[test]
+fn contains_key_borrow() {
+ let mut map = StreamMap::new();
+ map.insert("foo".to_string(), pending::<()>());
+
+ assert!(map.contains_key("foo"));
+}
+
+#[test]
+fn one_ready_many_none() {
+ // Run a few times because of randomness
+ for _ in 0..100 {
+ let mut map = task::spawn(StreamMap::new());
+
+ map.insert(0, pin_box(stream::empty()));
+ map.insert(1, pin_box(stream::empty()));
+ map.insert(2, pin_box(stream::once("hello")));
+ map.insert(3, pin_box(stream::pending()));
+
+ let v = assert_ready_some!(map.poll_next());
+ assert_eq!(v, (2, "hello"));
+ }
+}
+
+proptest::proptest! {
+ #[test]
+ fn fuzz_pending_complete_mix(kinds: Vec<bool>) {
+ use std::task::{Context, Poll};
+
+ struct DidPoll<T> {
+ did_poll: bool,
+ inner: T,
+ }
+
+ impl<T: Stream + Unpin> Stream for DidPoll<T> {
+ type Item = T::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>)
+ -> Poll<Option<T::Item>>
+ {
+ self.did_poll = true;
+ Pin::new(&mut self.inner).poll_next(cx)
+ }
+ }
+
+ for _ in 0..10 {
+ let mut map = task::spawn(StreamMap::new());
+ let mut expect = 0;
+
+ for (i, &is_empty) in kinds.iter().enumerate() {
+ let inner = if is_empty {
+ pin_box(stream::empty::<()>())
+ } else {
+ expect += 1;
+ pin_box(stream::pending::<()>())
+ };
+
+ let stream = DidPoll {
+ did_poll: false,
+ inner,
+ };
+
+ map.insert(i, stream);
+ }
+
+ if expect == 0 {
+ assert_ready_none!(map.poll_next());
+ } else {
+ assert_pending!(map.poll_next());
+
+ assert_eq!(expect, map.values().count());
+
+ for stream in map.values() {
+ assert!(stream.did_poll);
+ }
+ }
+ }
+ }
+}
+
+fn pin_box<T: Stream<Item = U> + 'static, U>(s: T) -> Pin<Box<dyn Stream<Item = U>>> {
+ Box::pin(s)
+}
diff --git a/third_party/rust/tokio/tests/stream_timeout.rs b/third_party/rust/tokio/tests/stream_timeout.rs
new file mode 100644
index 0000000000..f65c835196
--- /dev/null
+++ b/third_party/rust/tokio/tests/stream_timeout.rs
@@ -0,0 +1,109 @@
+#![cfg(feature = "full")]
+
+use tokio::stream::{self, StreamExt};
+use tokio::time::{self, delay_for, Duration};
+use tokio_test::*;
+
+use futures::StreamExt as _;
+
+async fn maybe_delay(idx: i32) -> i32 {
+ if idx % 2 == 0 {
+ delay_for(ms(200)).await;
+ }
+ idx
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
+
+#[tokio::test]
+async fn basic_usage() {
+ time::pause();
+
+ // Items 2 and 4 time out. If we run the stream until it completes,
+ // we end up with the following items:
+ //
+ // [Ok(1), Err(Elapsed), Ok(2), Ok(3), Err(Elapsed), Ok(4)]
+
+ let stream = stream::iter(1..=4).then(maybe_delay).timeout(ms(100));
+ let mut stream = task::spawn(stream);
+
+ // First item completes immediately
+ assert_ready_eq!(stream.poll_next(), Some(Ok(1)));
+
+ // Second item is delayed 200ms, times out after 100ms
+ assert_pending!(stream.poll_next());
+
+ time::advance(ms(150)).await;
+ let v = assert_ready!(stream.poll_next());
+ assert!(v.unwrap().is_err());
+
+ assert_pending!(stream.poll_next());
+
+ time::advance(ms(100)).await;
+ assert_ready_eq!(stream.poll_next(), Some(Ok(2)));
+
+ // Third item is ready immediately
+ assert_ready_eq!(stream.poll_next(), Some(Ok(3)));
+
+ // Fourth item is delayed 200ms, times out after 100ms
+ assert_pending!(stream.poll_next());
+
+ time::advance(ms(60)).await;
+ assert_pending!(stream.poll_next()); // nothing ready yet
+
+ time::advance(ms(60)).await;
+ let v = assert_ready!(stream.poll_next());
+ assert!(v.unwrap().is_err()); // timeout!
+
+ time::advance(ms(120)).await;
+ assert_ready_eq!(stream.poll_next(), Some(Ok(4)));
+
+ // Done.
+ assert_ready_eq!(stream.poll_next(), None);
+}
+
+#[tokio::test]
+async fn return_elapsed_errors_only_once() {
+ time::pause();
+
+ let stream = stream::iter(1..=3).then(maybe_delay).timeout(ms(50));
+ let mut stream = task::spawn(stream);
+
+ // First item completes immediately
+ assert_ready_eq!(stream.poll_next(), Some(Ok(1)));
+
+ // Second item is delayed 200ms, times out after 50ms. Only one `Elapsed`
+ // error is returned.
+ assert_pending!(stream.poll_next());
+ //
+ time::advance(ms(50)).await;
+ let v = assert_ready!(stream.poll_next());
+ assert!(v.unwrap().is_err()); // timeout!
+
+ // deadline elapses again, but no error is returned
+ time::advance(ms(50)).await;
+ assert_pending!(stream.poll_next());
+
+ time::advance(ms(100)).await;
+ assert_ready_eq!(stream.poll_next(), Some(Ok(2)));
+ assert_ready_eq!(stream.poll_next(), Some(Ok(3)));
+
+ // Done
+ assert_ready_eq!(stream.poll_next(), None);
+}
+
+#[tokio::test]
+async fn no_timeouts() {
+ let stream = stream::iter(vec![1, 3, 5])
+ .then(maybe_delay)
+ .timeout(ms(100));
+
+ let mut stream = task::spawn(stream);
+
+ assert_ready_eq!(stream.poll_next(), Some(Ok(1)));
+ assert_ready_eq!(stream.poll_next(), Some(Ok(3)));
+ assert_ready_eq!(stream.poll_next(), Some(Ok(5)));
+ assert_ready_eq!(stream.poll_next(), None);
+}
diff --git a/third_party/rust/tokio/tests/support/mock_file.rs b/third_party/rust/tokio/tests/support/mock_file.rs
new file mode 100644
index 0000000000..9895f835e6
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/mock_file.rs
@@ -0,0 +1,281 @@
+#![allow(clippy::unnecessary_operation)]
+
+use std::collections::VecDeque;
+use std::fmt;
+use std::fs::{Metadata, Permissions};
+use std::io;
+use std::io::prelude::*;
+use std::io::SeekFrom;
+use std::path::PathBuf;
+use std::sync::{Arc, Mutex};
+
+pub struct File {
+ shared: Arc<Mutex<Shared>>,
+}
+
+pub struct Handle {
+ shared: Arc<Mutex<Shared>>,
+}
+
+struct Shared {
+ calls: VecDeque<Call>,
+}
+
+#[derive(Debug)]
+enum Call {
+ Read(io::Result<Vec<u8>>),
+ Write(io::Result<Vec<u8>>),
+ Seek(SeekFrom, io::Result<u64>),
+ SyncAll(io::Result<()>),
+ SyncData(io::Result<()>),
+ SetLen(u64, io::Result<()>),
+}
+
+impl Handle {
+ pub fn read(&self, data: &[u8]) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls.push_back(Call::Read(Ok(data.to_owned())));
+ self
+ }
+
+ pub fn read_err(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::Read(Err(io::ErrorKind::Other.into())));
+ self
+ }
+
+ pub fn write(&self, data: &[u8]) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls.push_back(Call::Write(Ok(data.to_owned())));
+ self
+ }
+
+ pub fn write_err(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::Write(Err(io::ErrorKind::Other.into())));
+ self
+ }
+
+ pub fn seek_start_ok(&self, offset: u64) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::Seek(SeekFrom::Start(offset), Ok(offset)));
+ self
+ }
+
+ pub fn seek_current_ok(&self, offset: i64, ret: u64) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::Seek(SeekFrom::Current(offset), Ok(ret)));
+ self
+ }
+
+ pub fn sync_all(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls.push_back(Call::SyncAll(Ok(())));
+ self
+ }
+
+ pub fn sync_all_err(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::SyncAll(Err(io::ErrorKind::Other.into())));
+ self
+ }
+
+ pub fn sync_data(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls.push_back(Call::SyncData(Ok(())));
+ self
+ }
+
+ pub fn sync_data_err(&self) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::SyncData(Err(io::ErrorKind::Other.into())));
+ self
+ }
+
+ pub fn set_len(&self, size: u64) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls.push_back(Call::SetLen(size, Ok(())));
+ self
+ }
+
+ pub fn set_len_err(&self, size: u64) -> &Self {
+ let mut s = self.shared.lock().unwrap();
+ s.calls
+ .push_back(Call::SetLen(size, Err(io::ErrorKind::Other.into())));
+ self
+ }
+
+ pub fn remaining(&self) -> usize {
+ let s = self.shared.lock().unwrap();
+ s.calls.len()
+ }
+}
+
+impl Drop for Handle {
+ fn drop(&mut self) {
+ if !std::thread::panicking() {
+ let s = self.shared.lock().unwrap();
+ assert_eq!(0, s.calls.len());
+ }
+ }
+}
+
+impl File {
+ pub fn open(_: PathBuf) -> io::Result<File> {
+ unimplemented!();
+ }
+
+ pub fn create(_: PathBuf) -> io::Result<File> {
+ unimplemented!();
+ }
+
+ pub fn mock() -> (Handle, File) {
+ let shared = Arc::new(Mutex::new(Shared {
+ calls: VecDeque::new(),
+ }));
+
+ let handle = Handle {
+ shared: shared.clone(),
+ };
+ let file = File { shared };
+
+ (handle, file)
+ }
+
+ pub fn sync_all(&self) -> io::Result<()> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(SyncAll(ret)) => ret,
+ Some(op) => panic!("expected next call to be {:?}; was sync_all", op),
+ None => panic!("did not expect call"),
+ }
+ }
+
+ pub fn sync_data(&self) -> io::Result<()> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(SyncData(ret)) => ret,
+ Some(op) => panic!("expected next call to be {:?}; was sync_all", op),
+ None => panic!("did not expect call"),
+ }
+ }
+
+ pub fn set_len(&self, size: u64) -> io::Result<()> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(SetLen(arg, ret)) => {
+ assert_eq!(arg, size);
+ ret
+ }
+ Some(op) => panic!("expected next call to be {:?}; was sync_all", op),
+ None => panic!("did not expect call"),
+ }
+ }
+
+ pub fn metadata(&self) -> io::Result<Metadata> {
+ unimplemented!();
+ }
+
+ pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()> {
+ unimplemented!();
+ }
+
+ pub fn try_clone(&self) -> io::Result<Self> {
+ unimplemented!();
+ }
+}
+
+impl Read for &'_ File {
+ fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(Read(Ok(data))) => {
+ assert!(dst.len() >= data.len());
+ assert!(dst.len() <= 16 * 1024, "actual = {}", dst.len()); // max buffer
+
+ &mut dst[..data.len()].copy_from_slice(&data);
+ Ok(data.len())
+ }
+ Some(Read(Err(e))) => Err(e),
+ Some(op) => panic!("expected next call to be {:?}; was a read", op),
+ None => panic!("did not expect call"),
+ }
+ }
+}
+
+impl Write for &'_ File {
+ fn write(&mut self, src: &[u8]) -> io::Result<usize> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(Write(Ok(data))) => {
+ assert_eq!(src, &data[..]);
+ Ok(src.len())
+ }
+ Some(Write(Err(e))) => Err(e),
+ Some(op) => panic!("expected next call to be {:?}; was write", op),
+ None => panic!("did not expect call"),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+impl Seek for &'_ File {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ use self::Call::*;
+
+ let mut s = self.shared.lock().unwrap();
+
+ match s.calls.pop_front() {
+ Some(Seek(expect, res)) => {
+ assert_eq!(expect, pos);
+ res
+ }
+ Some(op) => panic!("expected call {:?}; was `seek`", op),
+ None => panic!("did not expect call; was `seek`"),
+ }
+ }
+}
+
+impl fmt::Debug for File {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("mock::File").finish()
+ }
+}
+
+#[cfg(unix)]
+impl std::os::unix::io::AsRawFd for File {
+ fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
+ unimplemented!();
+ }
+}
+
+#[cfg(windows)]
+impl std::os::windows::io::AsRawHandle for File {
+ fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
+ unimplemented!();
+ }
+}
diff --git a/third_party/rust/tokio/tests/support/mock_pool.rs b/third_party/rust/tokio/tests/support/mock_pool.rs
new file mode 100644
index 0000000000..e1fdb42641
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/mock_pool.rs
@@ -0,0 +1,66 @@
+use tokio::sync::oneshot;
+
+use std::cell::RefCell;
+use std::collections::VecDeque;
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+thread_local! {
+ static QUEUE: RefCell<VecDeque<Box<dyn FnOnce() + Send>>> = RefCell::new(VecDeque::new())
+}
+
+#[derive(Debug)]
+pub(crate) struct Blocking<T> {
+ rx: oneshot::Receiver<T>,
+}
+
+pub(crate) fn run<F, R>(f: F) -> Blocking<R>
+where
+ F: FnOnce() -> R + Send + 'static,
+ R: Send + 'static,
+{
+ let (tx, rx) = oneshot::channel();
+ let task = Box::new(move || {
+ let _ = tx.send(f());
+ });
+
+ QUEUE.with(|cell| cell.borrow_mut().push_back(task));
+
+ Blocking { rx }
+}
+
+impl<T> Future for Blocking<T> {
+ type Output = Result<T, io::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ use std::task::Poll::*;
+
+ match Pin::new(&mut self.rx).poll(cx) {
+ Ready(Ok(v)) => Ready(Ok(v)),
+ Ready(Err(e)) => panic!("error = {:?}", e),
+ Pending => Pending,
+ }
+ }
+}
+
+pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T>
+where
+ F: FnOnce() -> io::Result<T> + Send + 'static,
+ T: Send + 'static,
+{
+ run(f).await?
+}
+
+pub(crate) fn len() -> usize {
+ QUEUE.with(|cell| cell.borrow().len())
+}
+
+pub(crate) fn run_one() {
+ let task = QUEUE
+ .with(|cell| cell.borrow_mut().pop_front())
+ .expect("expected task to run, but none ready");
+
+ task();
+}
diff --git a/third_party/rust/tokio/tests/support/signal.rs b/third_party/rust/tokio/tests/support/signal.rs
new file mode 100644
index 0000000000..ea06058764
--- /dev/null
+++ b/third_party/rust/tokio/tests/support/signal.rs
@@ -0,0 +1,7 @@
+pub fn send_signal(signal: libc::c_int) {
+ use libc::{getpid, kill};
+
+ unsafe {
+ assert_eq!(kill(getpid(), signal), 0);
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_barrier.rs b/third_party/rust/tokio/tests/sync_barrier.rs
new file mode 100644
index 0000000000..f280fe8600
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_barrier.rs
@@ -0,0 +1,96 @@
+#![allow(clippy::unnecessary_operation)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::Barrier;
+
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+struct IsSend<T: Send>(T);
+#[test]
+fn barrier_future_is_send() {
+ let b = Barrier::new(0);
+ IsSend(b.wait());
+}
+
+#[test]
+fn zero_does_not_block() {
+ let b = Barrier::new(0);
+
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+}
+
+#[test]
+fn single() {
+ let b = Barrier::new(1);
+
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+ {
+ let mut w = spawn(b.wait());
+ let wr = assert_ready!(w.poll());
+ assert!(wr.is_leader());
+ }
+}
+
+#[test]
+fn tango() {
+ let b = Barrier::new(2);
+
+ let mut w1 = spawn(b.wait());
+ assert_pending!(w1.poll());
+
+ let mut w2 = spawn(b.wait());
+ let wr2 = assert_ready!(w2.poll());
+ let wr1 = assert_ready!(w1.poll());
+
+ assert!(wr1.is_leader() || wr2.is_leader());
+ assert!(!(wr1.is_leader() && wr2.is_leader()));
+}
+
+#[test]
+fn lots() {
+ let b = Barrier::new(100);
+
+ for _ in 0..10 {
+ let mut wait = Vec::new();
+ for _ in 0..99 {
+ let mut w = spawn(b.wait());
+ assert_pending!(w.poll());
+ wait.push(w);
+ }
+ for w in &mut wait {
+ assert_pending!(w.poll());
+ }
+
+ // pass the barrier
+ let mut w = spawn(b.wait());
+ let mut found_leader = assert_ready!(w.poll()).is_leader();
+ for mut w in wait {
+ let wr = assert_ready!(w.poll());
+ if wr.is_leader() {
+ assert!(!found_leader);
+ found_leader = true;
+ }
+ }
+ assert!(found_leader);
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_broadcast.rs b/third_party/rust/tokio/tests/sync_broadcast.rs
new file mode 100644
index 0000000000..e9e7b36610
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_broadcast.rs
@@ -0,0 +1,357 @@
+#![allow(clippy::cognitive_complexity)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "sync")]
+
+use tokio::sync::broadcast;
+use tokio_test::task;
+use tokio_test::{
+ assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok,
+};
+
+use std::sync::Arc;
+
+macro_rules! assert_recv {
+ ($e:expr) => {
+ match $e.try_recv() {
+ Ok(value) => value,
+ Err(e) => panic!("expected recv; got = {:?}", e),
+ }
+ };
+}
+
+macro_rules! assert_empty {
+ ($e:expr) => {
+ match $e.try_recv() {
+ Ok(value) => panic!("expected empty; got = {:?}", value),
+ Err(broadcast::TryRecvError::Empty) => {}
+ Err(e) => panic!("expected empty; got = {:?}", e),
+ }
+ };
+}
+
+macro_rules! assert_lagged {
+ ($e:expr, $n:expr) => {
+ match assert_err!($e) {
+ broadcast::TryRecvError::Lagged(n) => {
+ assert_eq!(n, $n);
+ }
+ _ => panic!("did not lag"),
+ }
+ };
+}
+
+trait AssertSend: Send {}
+impl AssertSend for broadcast::Sender<i32> {}
+impl AssertSend for broadcast::Receiver<i32> {}
+
+#[test]
+fn send_try_recv_bounded() {
+ let (tx, mut rx) = broadcast::channel(16);
+
+ assert_empty!(rx);
+
+ let n = assert_ok!(tx.send("hello"));
+ assert_eq!(n, 1);
+
+ let val = assert_recv!(rx);
+ assert_eq!(val, "hello");
+
+ assert_empty!(rx);
+}
+
+#[test]
+fn send_two_recv() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ assert_empty!(rx1);
+ assert_empty!(rx2);
+
+ let n = assert_ok!(tx.send("hello"));
+ assert_eq!(n, 2);
+
+ let val = assert_recv!(rx1);
+ assert_eq!(val, "hello");
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "hello");
+
+ assert_empty!(rx1);
+ assert_empty!(rx2);
+}
+
+#[tokio::test]
+async fn send_recv_stream() {
+ use tokio::stream::StreamExt;
+
+ let (tx, mut rx) = broadcast::channel::<i32>(8);
+
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+
+ assert_eq!(Some(Ok(1)), rx.next().await);
+ assert_eq!(Some(Ok(2)), rx.next().await);
+
+ drop(tx);
+
+ assert_eq!(None, rx.next().await);
+}
+
+#[test]
+fn send_recv_bounded() {
+ let (tx, mut rx) = broadcast::channel(16);
+
+ let mut recv = task::spawn(rx.recv());
+
+ assert_pending!(recv.poll());
+
+ assert_ok!(tx.send("hello"));
+
+ assert!(recv.is_woken());
+ let val = assert_ready_ok!(recv.poll());
+ assert_eq!(val, "hello");
+}
+
+#[test]
+fn send_two_recv_bounded() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ assert_ok!(tx.send("hello"));
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ let val1 = assert_ready_ok!(recv1.poll());
+ let val2 = assert_ready_ok!(recv2.poll());
+ assert_eq!(val1, "hello");
+ assert_eq!(val2, "hello");
+
+ drop((recv1, recv2));
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+
+ assert_ok!(tx.send("world"));
+
+ assert!(recv1.is_woken());
+ assert!(!recv2.is_woken());
+
+ let val1 = assert_ready_ok!(recv1.poll());
+ let val2 = assert_ready_ok!(recv2.poll());
+ assert_eq!(val1, "world");
+ assert_eq!(val2, "world");
+}
+
+#[test]
+fn send_slow_rx() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ {
+ let mut recv2 = task::spawn(rx2.recv());
+
+ {
+ let mut recv1 = task::spawn(rx1.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ assert_ok!(tx.send("one"));
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ assert_ok!(tx.send("two"));
+
+ let val = assert_ready_ok!(recv1.poll());
+ assert_eq!(val, "one");
+ }
+
+ let val = assert_ready_ok!(task::spawn(rx1.recv()).poll());
+ assert_eq!(val, "two");
+
+ let mut recv1 = task::spawn(rx1.recv());
+
+ assert_pending!(recv1.poll());
+
+ assert_ok!(tx.send("three"));
+
+ assert!(recv1.is_woken());
+
+ let val = assert_ready_ok!(recv1.poll());
+ assert_eq!(val, "three");
+
+ let val = assert_ready_ok!(recv2.poll());
+ assert_eq!(val, "one");
+ }
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "two");
+
+ let val = assert_recv!(rx2);
+ assert_eq!(val, "three");
+}
+
+#[test]
+fn drop_rx_while_values_remain() {
+ let (tx, mut rx1) = broadcast::channel(16);
+ let mut rx2 = tx.subscribe();
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+
+ assert_recv!(rx1);
+ assert_recv!(rx2);
+
+ drop(rx2);
+ drop(rx1);
+}
+
+#[test]
+fn lagging_rx() {
+ let (tx, mut rx1) = broadcast::channel(2);
+ let mut rx2 = tx.subscribe();
+
+ assert_ok!(tx.send("one"));
+ assert_ok!(tx.send("two"));
+
+ assert_eq!("one", assert_recv!(rx1));
+
+ assert_ok!(tx.send("three"));
+
+ // Lagged too far
+ assert_lagged!(rx2.try_recv(), 1);
+
+ // Calling again gets the next value
+ assert_eq!("two", assert_recv!(rx2));
+
+ assert_eq!("two", assert_recv!(rx1));
+ assert_eq!("three", assert_recv!(rx1));
+
+ assert_ok!(tx.send("four"));
+ assert_ok!(tx.send("five"));
+
+ assert_lagged!(rx2.try_recv(), 1);
+
+ assert_ok!(tx.send("six"));
+
+ assert_lagged!(rx2.try_recv(), 1);
+}
+
+#[test]
+fn send_no_rx() {
+ let (tx, _) = broadcast::channel(16);
+
+ assert_err!(tx.send("hello"));
+
+ let mut rx = tx.subscribe();
+
+ assert_ok!(tx.send("world"));
+
+ let val = assert_recv!(rx);
+ assert_eq!("world", val);
+}
+
+#[test]
+#[should_panic]
+fn zero_capacity() {
+ broadcast::channel::<()>(0);
+}
+
+#[test]
+#[should_panic]
+fn capacity_too_big() {
+ use std::usize;
+
+ broadcast::channel::<()>(1 + (usize::MAX >> 1));
+}
+
+#[test]
+fn panic_in_clone() {
+ use std::panic::{self, AssertUnwindSafe};
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct MyVal(usize);
+
+ impl Clone for MyVal {
+ fn clone(&self) -> MyVal {
+ assert_ne!(0, self.0);
+ MyVal(self.0)
+ }
+ }
+
+ let (tx, mut rx) = broadcast::channel(16);
+
+ assert_ok!(tx.send(MyVal(0)));
+ assert_ok!(tx.send(MyVal(1)));
+
+ let res = panic::catch_unwind(AssertUnwindSafe(|| {
+ let _ = rx.try_recv();
+ }));
+
+ assert_err!(res);
+
+ let val = assert_recv!(rx);
+ assert_eq!(val, MyVal(1));
+}
+
+#[test]
+fn dropping_tx_notifies_rx() {
+ let (tx, mut rx1) = broadcast::channel::<()>(16);
+ let mut rx2 = tx.subscribe();
+
+ let tx2 = tx.clone();
+
+ let mut recv1 = task::spawn(rx1.recv());
+ let mut recv2 = task::spawn(rx2.recv());
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ drop(tx);
+
+ assert_pending!(recv1.poll());
+ assert_pending!(recv2.poll());
+
+ drop(tx2);
+
+ assert!(recv1.is_woken());
+ assert!(recv2.is_woken());
+
+ let err = assert_ready_err!(recv1.poll());
+ assert!(is_closed(err));
+
+ let err = assert_ready_err!(recv2.poll());
+ assert!(is_closed(err));
+}
+
+#[test]
+fn unconsumed_messages_are_dropped() {
+ let (tx, rx) = broadcast::channel(16);
+
+ let msg = Arc::new(());
+
+ assert_ok!(tx.send(msg.clone()));
+
+ assert_eq!(2, Arc::strong_count(&msg));
+
+ drop(rx);
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+fn is_closed(err: broadcast::RecvError) -> bool {
+ match err {
+ broadcast::RecvError::Closed => true,
+ _ => false,
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_errors.rs b/third_party/rust/tokio/tests/sync_errors.rs
new file mode 100644
index 0000000000..66e8f0c098
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_errors.rs
@@ -0,0 +1,27 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+fn is_error<T: std::error::Error + Send + Sync>() {}
+
+#[test]
+fn mpsc_error_bound() {
+ use tokio::sync::mpsc::error;
+
+ is_error::<error::SendError<()>>();
+ is_error::<error::TrySendError<()>>();
+}
+
+#[test]
+fn oneshot_error_bound() {
+ use tokio::sync::oneshot::error;
+
+ is_error::<error::RecvError>();
+ is_error::<error::TryRecvError>();
+}
+
+#[test]
+fn watch_error_bound() {
+ use tokio::sync::watch::error;
+
+ is_error::<error::SendError<()>>();
+}
diff --git a/third_party/rust/tokio/tests/sync_mpsc.rs b/third_party/rust/tokio/tests/sync_mpsc.rs
new file mode 100644
index 0000000000..f02d90aa56
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_mpsc.rs
@@ -0,0 +1,492 @@
+#![allow(clippy::redundant_clone)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::mpsc;
+use tokio::sync::mpsc::error::{TryRecvError, TrySendError};
+use tokio_test::task;
+use tokio_test::{
+ assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok,
+};
+
+use std::sync::Arc;
+
+trait AssertSend: Send {}
+impl AssertSend for mpsc::Sender<i32> {}
+impl AssertSend for mpsc::Receiver<i32> {}
+
+#[test]
+fn send_recv_with_buffer() {
+ let (tx, rx) = mpsc::channel::<i32>(16);
+ let mut tx = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ // Using poll_ready / try_send
+ assert_ready_ok!(tx.enter(|cx, mut tx| tx.poll_ready(cx)));
+ tx.try_send(1).unwrap();
+
+ // Without poll_ready
+ tx.try_send(2).unwrap();
+
+ drop(tx);
+
+ let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx)));
+ assert_eq!(val, Some(1));
+
+ let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx)));
+ assert_eq!(val, Some(2));
+
+ let val = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx)));
+ assert!(val.is_none());
+}
+
+#[test]
+fn disarm() {
+ let (tx, rx) = mpsc::channel::<i32>(2);
+ let mut tx1 = task::spawn(tx.clone());
+ let mut tx2 = task::spawn(tx.clone());
+ let mut tx3 = task::spawn(tx.clone());
+ let mut tx4 = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ // We should be able to `poll_ready` two handles without problem
+ assert_ready_ok!(tx1.enter(|cx, mut tx| tx.poll_ready(cx)));
+ assert_ready_ok!(tx2.enter(|cx, mut tx| tx.poll_ready(cx)));
+
+ // But a third should not be ready
+ assert_pending!(tx3.enter(|cx, mut tx| tx.poll_ready(cx)));
+
+ // Using one of the reserved slots should allow a new handle to become ready
+ tx1.try_send(1).unwrap();
+ // We also need to receive for the slot to be free
+ let _ = assert_ready!(rx.enter(|cx, mut rx| rx.poll_recv(cx))).unwrap();
+ // Now there's a free slot!
+ assert_ready_ok!(tx3.enter(|cx, mut tx| tx.poll_ready(cx)));
+ assert_pending!(tx4.enter(|cx, mut tx| tx.poll_ready(cx)));
+
+ // Dropping a ready handle should also open up a slot
+ drop(tx2);
+ assert_ready_ok!(tx4.enter(|cx, mut tx| tx.poll_ready(cx)));
+ assert_pending!(tx1.enter(|cx, mut tx| tx.poll_ready(cx)));
+
+ // Explicitly disarming a handle should also open a slot
+ assert!(tx3.disarm());
+ assert_ready_ok!(tx1.enter(|cx, mut tx| tx.poll_ready(cx)));
+
+ // Disarming a non-armed sender does not free up a slot
+ assert!(!tx3.disarm());
+ assert_pending!(tx3.enter(|cx, mut tx| tx.poll_ready(cx)));
+}
+
+#[tokio::test]
+async fn send_recv_stream_with_buffer() {
+ use tokio::stream::StreamExt;
+
+ let (mut tx, mut rx) = mpsc::channel::<i32>(16);
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1).await);
+ assert_ok!(tx.send(2).await);
+ });
+
+ assert_eq!(Some(1), rx.next().await);
+ assert_eq!(Some(2), rx.next().await);
+ assert_eq!(None, rx.next().await);
+}
+
+#[tokio::test]
+async fn async_send_recv_with_buffer() {
+ let (mut tx, mut rx) = mpsc::channel(16);
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1).await);
+ assert_ok!(tx.send(2).await);
+ });
+
+ assert_eq!(Some(1), rx.recv().await);
+ assert_eq!(Some(2), rx.recv().await);
+ assert_eq!(None, rx.recv().await);
+}
+
+#[test]
+fn start_send_past_cap() {
+ let mut t1 = task::spawn(());
+ let mut t2 = task::spawn(());
+ let mut t3 = task::spawn(());
+
+ let (mut tx1, mut rx) = mpsc::channel(1);
+ let mut tx2 = tx1.clone();
+
+ assert_ok!(tx1.try_send(()));
+
+ t1.enter(|cx, _| {
+ assert_pending!(tx1.poll_ready(cx));
+ });
+
+ t2.enter(|cx, _| {
+ assert_pending!(tx2.poll_ready(cx));
+ });
+
+ drop(tx1);
+
+ let val = t3.enter(|cx, _| assert_ready!(rx.poll_recv(cx)));
+ assert!(val.is_some());
+
+ assert!(t2.is_woken());
+ assert!(!t1.is_woken());
+
+ drop(tx2);
+
+ let val = t3.enter(|cx, _| assert_ready!(rx.poll_recv(cx)));
+ assert!(val.is_none());
+}
+
+#[test]
+#[should_panic]
+fn buffer_gteq_one() {
+ mpsc::channel::<i32>(0);
+}
+
+#[test]
+fn send_recv_unbounded() {
+ let mut t1 = task::spawn(());
+
+ let (tx, mut rx) = mpsc::unbounded_channel::<i32>();
+
+ // Using `try_send`
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert_eq!(val, Some(1));
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert_eq!(val, Some(2));
+
+ drop(tx);
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert!(val.is_none());
+}
+
+#[tokio::test]
+async fn async_send_recv_unbounded() {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ });
+
+ assert_eq!(Some(1), rx.recv().await);
+ assert_eq!(Some(2), rx.recv().await);
+ assert_eq!(None, rx.recv().await);
+}
+
+#[tokio::test]
+async fn send_recv_stream_unbounded() {
+ use tokio::stream::StreamExt;
+
+ let (tx, mut rx) = mpsc::unbounded_channel::<i32>();
+
+ tokio::spawn(async move {
+ assert_ok!(tx.send(1));
+ assert_ok!(tx.send(2));
+ });
+
+ assert_eq!(Some(1), rx.next().await);
+ assert_eq!(Some(2), rx.next().await);
+ assert_eq!(None, rx.next().await);
+}
+
+#[test]
+fn no_t_bounds_buffer() {
+ struct NoImpls;
+
+ let mut t1 = task::spawn(());
+ let (tx, mut rx) = mpsc::channel(100);
+
+ // sender should be Debug even though T isn't Debug
+ println!("{:?}", tx);
+ // same with Receiver
+ println!("{:?}", rx);
+ // and sender should be Clone even though T isn't Clone
+ assert!(tx.clone().try_send(NoImpls).is_ok());
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert!(val.is_some());
+}
+
+#[test]
+fn no_t_bounds_unbounded() {
+ struct NoImpls;
+
+ let mut t1 = task::spawn(());
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ // sender should be Debug even though T isn't Debug
+ println!("{:?}", tx);
+ // same with Receiver
+ println!("{:?}", rx);
+ // and sender should be Clone even though T isn't Clone
+ assert!(tx.clone().send(NoImpls).is_ok());
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert!(val.is_some());
+}
+
+#[test]
+fn send_recv_buffer_limited() {
+ let mut t1 = task::spawn(());
+ let mut t2 = task::spawn(());
+
+ let (mut tx, mut rx) = mpsc::channel::<i32>(1);
+
+ // Run on a task context
+ t1.enter(|cx, _| {
+ assert_ready_ok!(tx.poll_ready(cx));
+
+ // Send first message
+ assert_ok!(tx.try_send(1));
+
+ // Not ready
+ assert_pending!(tx.poll_ready(cx));
+
+ // Send second message
+ assert_err!(tx.try_send(1337));
+ });
+
+ t2.enter(|cx, _| {
+ // Take the value
+ let val = assert_ready!(rx.poll_recv(cx));
+ assert_eq!(Some(1), val);
+ });
+
+ assert!(t1.is_woken());
+
+ t1.enter(|cx, _| {
+ assert_ready_ok!(tx.poll_ready(cx));
+
+ assert_ok!(tx.try_send(2));
+
+ // Not ready
+ assert_pending!(tx.poll_ready(cx));
+ });
+
+ t2.enter(|cx, _| {
+ // Take the value
+ let val = assert_ready!(rx.poll_recv(cx));
+ assert_eq!(Some(2), val);
+ });
+
+ t1.enter(|cx, _| {
+ assert_ready_ok!(tx.poll_ready(cx));
+ });
+}
+
+#[test]
+fn recv_close_gets_none_idle() {
+ let mut t1 = task::spawn(());
+
+ let (mut tx, mut rx) = mpsc::channel::<i32>(10);
+
+ rx.close();
+
+ t1.enter(|cx, _| {
+ let val = assert_ready!(rx.poll_recv(cx));
+ assert!(val.is_none());
+ assert_ready_err!(tx.poll_ready(cx));
+ });
+}
+
+#[test]
+fn recv_close_gets_none_reserved() {
+ let mut t1 = task::spawn(());
+ let mut t2 = task::spawn(());
+ let mut t3 = task::spawn(());
+
+ let (mut tx1, mut rx) = mpsc::channel::<i32>(1);
+ let mut tx2 = tx1.clone();
+
+ assert_ready_ok!(t1.enter(|cx, _| tx1.poll_ready(cx)));
+
+ t2.enter(|cx, _| {
+ assert_pending!(tx2.poll_ready(cx));
+ });
+
+ rx.close();
+
+ assert!(t2.is_woken());
+
+ t2.enter(|cx, _| {
+ assert_ready_err!(tx2.poll_ready(cx));
+ });
+
+ t3.enter(|cx, _| assert_pending!(rx.poll_recv(cx)));
+
+ assert!(!t1.is_woken());
+ assert!(!t2.is_woken());
+
+ assert_ok!(tx1.try_send(123));
+
+ assert!(t3.is_woken());
+
+ t3.enter(|cx, _| {
+ let v = assert_ready!(rx.poll_recv(cx));
+ assert_eq!(v, Some(123));
+
+ let v = assert_ready!(rx.poll_recv(cx));
+ assert!(v.is_none());
+ });
+}
+
+#[test]
+fn tx_close_gets_none() {
+ let mut t1 = task::spawn(());
+
+ let (_, mut rx) = mpsc::channel::<i32>(10);
+
+ // Run on a task context
+ t1.enter(|cx, _| {
+ let v = assert_ready!(rx.poll_recv(cx));
+ assert!(v.is_none());
+ });
+}
+
+#[test]
+fn try_send_fail() {
+ let mut t1 = task::spawn(());
+
+ let (mut tx, mut rx) = mpsc::channel(1);
+
+ tx.try_send("hello").unwrap();
+
+ // This should fail
+ match assert_err!(tx.try_send("fail")) {
+ TrySendError::Full(..) => {}
+ _ => panic!(),
+ }
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert_eq!(val, Some("hello"));
+
+ assert_ok!(tx.try_send("goodbye"));
+ drop(tx);
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert_eq!(val, Some("goodbye"));
+
+ let val = assert_ready!(t1.enter(|cx, _| rx.poll_recv(cx)));
+ assert!(val.is_none());
+}
+
+#[test]
+fn drop_tx_with_permit_releases_permit() {
+ let mut t1 = task::spawn(());
+ let mut t2 = task::spawn(());
+
+ // poll_ready reserves capacity, ensure that the capacity is released if tx
+ // is dropped w/o sending a value.
+ let (mut tx1, _rx) = mpsc::channel::<i32>(1);
+ let mut tx2 = tx1.clone();
+
+ assert_ready_ok!(t1.enter(|cx, _| tx1.poll_ready(cx)));
+
+ t2.enter(|cx, _| {
+ assert_pending!(tx2.poll_ready(cx));
+ });
+
+ drop(tx1);
+
+ assert!(t2.is_woken());
+
+ assert_ready_ok!(t2.enter(|cx, _| tx2.poll_ready(cx)));
+}
+
+#[test]
+fn dropping_rx_closes_channel() {
+ let mut t1 = task::spawn(());
+
+ let (mut tx, rx) = mpsc::channel(100);
+
+ let msg = Arc::new(());
+ assert_ok!(tx.try_send(msg.clone()));
+
+ drop(rx);
+ assert_ready_err!(t1.enter(|cx, _| tx.poll_ready(cx)));
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn dropping_rx_closes_channel_for_try() {
+ let (mut tx, rx) = mpsc::channel(100);
+
+ let msg = Arc::new(());
+ tx.try_send(msg.clone()).unwrap();
+
+ drop(rx);
+
+ {
+ let err = assert_err!(tx.try_send(msg.clone()));
+ match err {
+ TrySendError::Closed(..) => {}
+ _ => panic!(),
+ }
+ }
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn unconsumed_messages_are_dropped() {
+ let msg = Arc::new(());
+
+ let (mut tx, rx) = mpsc::channel(100);
+
+ tx.try_send(msg.clone()).unwrap();
+
+ assert_eq!(2, Arc::strong_count(&msg));
+
+ drop((tx, rx));
+
+ assert_eq!(1, Arc::strong_count(&msg));
+}
+
+#[test]
+fn try_recv() {
+ let (mut tx, mut rx) = mpsc::channel(1);
+ match rx.try_recv() {
+ Err(TryRecvError::Empty) => {}
+ _ => panic!(),
+ }
+ tx.try_send(42).unwrap();
+ match rx.try_recv() {
+ Ok(42) => {}
+ _ => panic!(),
+ }
+ drop(tx);
+ match rx.try_recv() {
+ Err(TryRecvError::Closed) => {}
+ _ => panic!(),
+ }
+}
+
+#[test]
+fn try_recv_unbounded() {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+ match rx.try_recv() {
+ Err(TryRecvError::Empty) => {}
+ _ => panic!(),
+ }
+ tx.send(42).unwrap();
+ match rx.try_recv() {
+ Ok(42) => {}
+ _ => panic!(),
+ }
+ drop(tx);
+ match rx.try_recv() {
+ Err(TryRecvError::Closed) => {}
+ _ => panic!(),
+ }
+}
diff --git a/third_party/rust/tokio/tests/sync_mutex.rs b/third_party/rust/tokio/tests/sync_mutex.rs
new file mode 100644
index 0000000000..444ebd6a22
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_mutex.rs
@@ -0,0 +1,154 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::Mutex;
+use tokio::time::{interval, timeout};
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+use std::sync::Arc;
+use std::time::Duration;
+
+#[test]
+fn straight_execution() {
+ let l = Mutex::new(100);
+
+ {
+ let mut t = spawn(l.lock());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &100);
+ *g = 99;
+ }
+ {
+ let mut t = spawn(l.lock());
+ let mut g = assert_ready!(t.poll());
+ assert_eq!(&*g, &99);
+ *g = 98;
+ }
+ {
+ let mut t = spawn(l.lock());
+ let g = assert_ready!(t.poll());
+ assert_eq!(&*g, &98);
+ }
+}
+
+#[test]
+fn readiness() {
+ let l1 = Arc::new(Mutex::new(100));
+ let l2 = Arc::clone(&l1);
+ let mut t1 = spawn(l1.lock());
+ let mut t2 = spawn(l2.lock());
+
+ let g = assert_ready!(t1.poll());
+
+ // We can't now acquire the lease since it's already held in g
+ assert_pending!(t2.poll());
+
+ // But once g unlocks, we can acquire it
+ drop(g);
+ assert!(t2.is_woken());
+ assert_ready!(t2.poll());
+}
+
+/*
+#[test]
+#[ignore]
+fn lock() {
+ let mut lock = Mutex::new(false);
+
+ let mut lock2 = lock.clone();
+ std::thread::spawn(move || {
+ let l = lock2.lock();
+ pin_mut!(l);
+
+ let mut task = MockTask::new();
+ let mut g = assert_ready!(task.poll(&mut l));
+ std::thread::sleep(std::time::Duration::from_millis(500));
+ *g = true;
+ drop(g);
+ });
+
+ std::thread::sleep(std::time::Duration::from_millis(50));
+ let mut task = MockTask::new();
+ let l = lock.lock();
+ pin_mut!(l);
+
+ assert_pending!(task.poll(&mut l));
+
+ std::thread::sleep(std::time::Duration::from_millis(500));
+ assert!(task.is_woken());
+ let result = assert_ready!(task.poll(&mut l));
+ assert!(*result);
+}
+*/
+
+#[tokio::test]
+/// Ensure a mutex is unlocked if a future holding the lock
+/// is aborted prematurely.
+async fn aborted_future_1() {
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ let mut iv = interval(Duration::from_millis(1000));
+ m2.lock().await;
+ iv.tick().await;
+ iv.tick().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+#[tokio::test]
+/// This test is similar to `aborted_future_1` but this time the
+/// aborted future is waiting for the lock.
+async fn aborted_future_2() {
+ let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
+ {
+ // Lock mutex
+ let _lock = m1.lock().await;
+ {
+ let m2 = m1.clone();
+ // Try to lock mutex in a future that is aborted prematurely
+ timeout(Duration::from_millis(1u64), async move {
+ m2.lock().await;
+ })
+ .await
+ .unwrap_err();
+ }
+ }
+ // This should succeed as there is no lock left for the mutex.
+ timeout(Duration::from_millis(1u64), async move {
+ m1.lock().await;
+ })
+ .await
+ .expect("Mutex is locked");
+}
+
+#[test]
+fn try_lock() {
+ let m: Mutex<usize> = Mutex::new(0);
+ {
+ let g1 = m.try_lock();
+ assert_eq!(g1.is_ok(), true);
+ let g2 = m.try_lock();
+ assert_eq!(g2.is_ok(), false);
+ }
+ let g3 = m.try_lock();
+ assert_eq!(g3.is_ok(), true);
+}
+
+#[tokio::test]
+async fn debug_format() {
+ let s = "debug";
+ let m = Mutex::new(s.to_string());
+ assert_eq!(format!("{:?}", s), format!("{:?}", m.lock().await));
+}
diff --git a/third_party/rust/tokio/tests/sync_notify.rs b/third_party/rust/tokio/tests/sync_notify.rs
new file mode 100644
index 0000000000..be39ce32df
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_notify.rs
@@ -0,0 +1,102 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::Notify;
+use tokio_test::task::spawn;
+use tokio_test::*;
+
+trait AssertSend: Send + Sync {}
+impl AssertSend for Notify {}
+
+#[test]
+fn notify_notified_one() {
+ let notify = Notify::new();
+ let mut notified = spawn(async { notify.notified().await });
+
+ notify.notify();
+ assert_ready!(notified.poll());
+}
+
+#[test]
+fn notified_one_notify() {
+ let notify = Notify::new();
+ let mut notified = spawn(async { notify.notified().await });
+
+ assert_pending!(notified.poll());
+
+ notify.notify();
+ assert!(notified.is_woken());
+ assert_ready!(notified.poll());
+}
+
+#[test]
+fn notified_multi_notify() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify();
+ assert!(notified1.is_woken());
+ assert!(!notified2.is_woken());
+
+ assert_ready!(notified1.poll());
+ assert_pending!(notified2.poll());
+}
+
+#[test]
+fn notify_notified_multi() {
+ let notify = Notify::new();
+
+ notify.notify();
+
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_ready!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify();
+
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
+
+#[test]
+fn notified_drop_notified_notify() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+
+ drop(notified1);
+
+ assert_pending!(notified2.poll());
+
+ notify.notify();
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
+
+#[test]
+fn notified_multi_notify_drop_one() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+ assert_pending!(notified2.poll());
+
+ notify.notify();
+
+ assert!(notified1.is_woken());
+ assert!(!notified2.is_woken());
+
+ drop(notified1);
+
+ assert!(notified2.is_woken());
+ assert_ready!(notified2.poll());
+}
diff --git a/third_party/rust/tokio/tests/sync_oneshot.rs b/third_party/rust/tokio/tests/sync_oneshot.rs
new file mode 100644
index 0000000000..13e526d48e
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_oneshot.rs
@@ -0,0 +1,234 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::oneshot;
+use tokio_test::*;
+
+use std::future::Future;
+use std::pin::Pin;
+
+trait AssertSend: Send {}
+impl AssertSend for oneshot::Sender<i32> {}
+impl AssertSend for oneshot::Receiver<i32> {}
+
+#[test]
+fn send_recv() {
+ let (tx, rx) = oneshot::channel();
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(rx.poll());
+
+ assert_ok!(tx.send(1));
+
+ assert!(rx.is_woken());
+
+ let val = assert_ready_ok!(rx.poll());
+ assert_eq!(val, 1);
+}
+
+#[tokio::test]
+async fn async_send_recv() {
+ let (tx, rx) = oneshot::channel();
+
+ assert_ok!(tx.send(1));
+ assert_eq!(1, assert_ok!(rx.await));
+}
+
+#[test]
+fn close_tx() {
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(rx.poll());
+
+ drop(tx);
+
+ assert!(rx.is_woken());
+ assert_ready_err!(rx.poll());
+}
+
+#[test]
+fn close_rx() {
+ // First, without checking poll_closed()
+ //
+ let (tx, _) = oneshot::channel();
+
+ assert_err!(tx.send(1));
+
+ // Second, via poll_closed();
+
+ let (tx, rx) = oneshot::channel();
+ let mut tx = task::spawn(tx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ drop(rx);
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(tx.into_inner().send(1));
+}
+
+#[tokio::test]
+async fn async_rx_closed() {
+ let (mut tx, rx) = oneshot::channel::<()>();
+
+ tokio::spawn(async move {
+ drop(rx);
+ });
+
+ tx.closed().await;
+}
+
+#[test]
+fn explicit_close_poll() {
+ // First, with message sent
+ let (tx, rx) = oneshot::channel();
+ let mut rx = task::spawn(rx);
+
+ assert_ok!(tx.send(1));
+
+ rx.close();
+
+ let value = assert_ready_ok!(rx.poll());
+ assert_eq!(value, 1);
+
+ // Second, without the message sent
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(tx.into_inner().send(1));
+ assert_ready_err!(rx.poll());
+
+ // Again, but without sending the value this time
+ let (tx, rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+ let mut rx = task::spawn(rx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_ready_err!(rx.poll());
+}
+
+#[test]
+fn explicit_close_try_recv() {
+ // First, with message sent
+ let (tx, mut rx) = oneshot::channel();
+
+ assert_ok!(tx.send(1));
+
+ rx.close();
+
+ let val = assert_ok!(rx.try_recv());
+ assert_eq!(1, val);
+
+ // Second, without the message sent
+ let (tx, mut rx) = oneshot::channel::<i32>();
+ let mut tx = task::spawn(tx);
+
+ assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ rx.close();
+
+ assert!(tx.is_woken());
+ assert!(tx.is_closed());
+ assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
+
+ assert_err!(rx.try_recv());
+}
+
+#[test]
+#[should_panic]
+fn close_try_recv_poll() {
+ let (_tx, rx) = oneshot::channel::<i32>();
+ let mut rx = task::spawn(rx);
+
+ rx.close();
+
+ assert_err!(rx.try_recv());
+
+ let _ = rx.poll();
+}
+
+#[test]
+fn drops_tasks() {
+ let (mut tx, mut rx) = oneshot::channel::<i32>();
+ let mut tx_task = task::spawn(());
+ let mut rx_task = task::spawn(());
+
+ assert_pending!(tx_task.enter(|cx, _| tx.poll_closed(cx)));
+ assert_pending!(rx_task.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ drop(tx);
+ drop(rx);
+
+ assert_eq!(1, tx_task.waker_ref_count());
+ assert_eq!(1, rx_task.waker_ref_count());
+}
+
+#[test]
+fn receiver_changes_task() {
+ let (tx, mut rx) = oneshot::channel();
+
+ let mut task1 = task::spawn(());
+ let mut task2 = task::spawn(());
+
+ assert_pending!(task1.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ assert_eq!(2, task1.waker_ref_count());
+ assert_eq!(1, task2.waker_ref_count());
+
+ assert_pending!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+
+ assert_eq!(1, task1.waker_ref_count());
+ assert_eq!(2, task2.waker_ref_count());
+
+ assert_ok!(tx.send(1));
+
+ assert!(!task1.is_woken());
+ assert!(task2.is_woken());
+
+ assert_ready_ok!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
+}
+
+#[test]
+fn sender_changes_task() {
+ let (mut tx, rx) = oneshot::channel::<i32>();
+
+ let mut task1 = task::spawn(());
+ let mut task2 = task::spawn(());
+
+ assert_pending!(task1.enter(|cx, _| tx.poll_closed(cx)));
+
+ assert_eq!(2, task1.waker_ref_count());
+ assert_eq!(1, task2.waker_ref_count());
+
+ assert_pending!(task2.enter(|cx, _| tx.poll_closed(cx)));
+
+ assert_eq!(1, task1.waker_ref_count());
+ assert_eq!(2, task2.waker_ref_count());
+
+ drop(rx);
+
+ assert!(!task1.is_woken());
+ assert!(task2.is_woken());
+
+ assert_ready!(task2.enter(|cx, _| tx.poll_closed(cx)));
+}
diff --git a/third_party/rust/tokio/tests/sync_rwlock.rs b/third_party/rust/tokio/tests/sync_rwlock.rs
new file mode 100644
index 0000000000..87010b658e
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_rwlock.rs
@@ -0,0 +1,237 @@
+#![warn(rust_2018_idioms)]
+
+use std::sync::Arc;
+use std::task::Poll;
+
+use futures::future::FutureExt;
+use futures::stream;
+use futures::stream::StreamExt;
+
+use tokio::sync::{Barrier, RwLock};
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+#[test]
+fn into_inner() {
+ let rwlock = RwLock::new(42);
+ assert_eq!(rwlock.into_inner(), 42);
+}
+
+// multiple reads should be Ready
+#[test]
+fn read_shared() {
+ let rwlock = RwLock::new(100);
+
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.read());
+ assert_ready!(t2.poll());
+}
+
+// When there is an active shared owner, exclusive access should not be possible
+#[test]
+fn write_shared_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+}
+
+// When there is an active exclusive owner, subsequent exclusive access should not be possible
+#[test]
+fn read_exclusive_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.write());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.read());
+ assert_pending!(t2.poll());
+}
+
+// If the max shared access is reached and subsquent shared access is pending
+// should be made available when one of the shared acesses is dropped
+#[test]
+fn exhaust_reading() {
+ let rwlock = RwLock::new(100);
+ let mut reads = Vec::new();
+ loop {
+ let mut t = spawn(rwlock.read());
+ match t.poll() {
+ Poll::Ready(guard) => reads.push(guard),
+ Poll::Pending => break,
+ }
+ }
+
+ let mut t1 = spawn(rwlock.read());
+ assert_pending!(t1.poll());
+ let g2 = reads.pop().unwrap();
+ drop(g2);
+ assert!(t1.is_woken());
+ assert_ready!(t1.poll());
+}
+
+// When there is an active exclusive owner, subsequent exclusive access should not be possible
+#[test]
+fn write_exclusive_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.write());
+
+ let _g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+}
+
+// When there is an active shared owner, exclusive access should be possible after shared is dropped
+#[test]
+fn write_shared_drop() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+
+ let g1 = assert_ready!(t1.poll());
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+ drop(g1);
+ assert!(t2.is_woken());
+ assert_ready!(t2.poll());
+}
+
+// when there is an active shared owner, and exclusive access is triggered,
+// subsequent shared access should not be possible as write gathers all the available semaphore permits
+#[test]
+fn write_read_shared_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+
+ let mut t2 = spawn(rwlock.read());
+ assert_ready!(t2.poll());
+
+ let mut t3 = spawn(rwlock.write());
+ assert_pending!(t3.poll());
+
+ let mut t4 = spawn(rwlock.read());
+ assert_pending!(t4.poll());
+}
+
+// when there is an active shared owner, and exclusive access is triggered,
+// reading should be possible after pending exclusive access is dropped
+#[test]
+fn write_read_shared_drop_pending() {
+ let rwlock = RwLock::new(100);
+ let mut t1 = spawn(rwlock.read());
+ let _g1 = assert_ready!(t1.poll());
+
+ let mut t2 = spawn(rwlock.write());
+ assert_pending!(t2.poll());
+
+ let mut t3 = spawn(rwlock.read());
+ assert_pending!(t3.poll());
+ drop(t2);
+
+ assert!(t3.is_woken());
+ assert_ready!(t3.poll());
+}
+
+// Acquire an RwLock nonexclusively by a single task
+#[tokio::test]
+async fn read_uncontested() {
+ let rwlock = RwLock::new(100);
+ let result = *rwlock.read().await;
+
+ assert_eq!(result, 100);
+}
+
+// Acquire an uncontested RwLock in exclusive mode
+#[tokio::test]
+async fn write_uncontested() {
+ let rwlock = RwLock::new(100);
+ let mut result = rwlock.write().await;
+ *result += 50;
+ assert_eq!(*result, 150);
+}
+
+// RwLocks should be acquired in the order that their Futures are waited upon.
+#[tokio::test]
+async fn write_order() {
+ let rwlock = RwLock::<Vec<u32>>::new(vec![]);
+ let fut2 = rwlock.write().map(|mut guard| guard.push(2));
+ let fut1 = rwlock.write().map(|mut guard| guard.push(1));
+ fut1.await;
+ fut2.await;
+
+ let g = rwlock.read().await;
+ assert_eq!(*g, vec![1, 2]);
+}
+
+// A single RwLock is contested by tasks in multiple threads
+#[tokio::test(threaded_scheduler)]
+async fn multithreaded() {
+ let barrier = Arc::new(Barrier::new(5));
+ let rwlock = Arc::new(RwLock::<u32>::new(0));
+ let rwclone1 = rwlock.clone();
+ let rwclone2 = rwlock.clone();
+ let rwclone3 = rwlock.clone();
+ let rwclone4 = rwlock.clone();
+
+ let b1 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone1.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 2;
+ }
+ })
+ .await;
+ b1.wait().await;
+ });
+
+ let b2 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone2.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 3;
+ }
+ })
+ .await;
+ b2.wait().await;
+ });
+
+ let b3 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone3.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 5;
+ }
+ })
+ .await;
+ b3.wait().await;
+ });
+
+ let b4 = barrier.clone();
+ tokio::spawn(async move {
+ stream::iter(0..1000)
+ .for_each(move |_| {
+ let rwlock = rwclone4.clone();
+ async move {
+ let mut guard = rwlock.write().await;
+ *guard += 7;
+ }
+ })
+ .await;
+ b4.wait().await;
+ });
+
+ barrier.wait().await;
+ let g = rwlock.read().await;
+ assert_eq!(*g, 17_000);
+}
diff --git a/third_party/rust/tokio/tests/sync_semaphore.rs b/third_party/rust/tokio/tests/sync_semaphore.rs
new file mode 100644
index 0000000000..1cb0c749db
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_semaphore.rs
@@ -0,0 +1,81 @@
+#![cfg(feature = "full")]
+
+use std::sync::Arc;
+use tokio::sync::Semaphore;
+
+#[test]
+fn no_permits() {
+ // this should not panic
+ Semaphore::new(0);
+}
+
+#[test]
+fn try_acquire() {
+ let sem = Semaphore::new(1);
+ {
+ let p1 = sem.try_acquire();
+ assert!(p1.is_ok());
+ let p2 = sem.try_acquire();
+ assert!(p2.is_err());
+ }
+ let p3 = sem.try_acquire();
+ assert!(p3.is_ok());
+}
+
+#[tokio::test]
+async fn acquire() {
+ let sem = Arc::new(Semaphore::new(1));
+ let p1 = sem.try_acquire().unwrap();
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire().await;
+ });
+ drop(p1);
+ j.await.unwrap();
+}
+
+#[tokio::test]
+async fn add_permits() {
+ let sem = Arc::new(Semaphore::new(0));
+ let sem_clone = sem.clone();
+ let j = tokio::spawn(async move {
+ let _p2 = sem_clone.acquire().await;
+ });
+ sem.add_permits(1);
+ j.await.unwrap();
+}
+
+#[test]
+fn forget() {
+ let sem = Arc::new(Semaphore::new(1));
+ {
+ let p = sem.try_acquire().unwrap();
+ assert_eq!(sem.available_permits(), 0);
+ p.forget();
+ assert_eq!(sem.available_permits(), 0);
+ }
+ assert_eq!(sem.available_permits(), 0);
+ assert!(sem.try_acquire().is_err());
+}
+
+#[tokio::test]
+async fn stresstest() {
+ let sem = Arc::new(Semaphore::new(5));
+ let mut join_handles = Vec::new();
+ for _ in 0..1000 {
+ let sem_clone = sem.clone();
+ join_handles.push(tokio::spawn(async move {
+ let _p = sem_clone.acquire().await;
+ }));
+ }
+ for j in join_handles {
+ j.await.unwrap();
+ }
+ // there should be exactly 5 semaphores available now
+ let _p1 = sem.try_acquire().unwrap();
+ let _p2 = sem.try_acquire().unwrap();
+ let _p3 = sem.try_acquire().unwrap();
+ let _p4 = sem.try_acquire().unwrap();
+ let _p5 = sem.try_acquire().unwrap();
+ assert!(sem.try_acquire().is_err());
+}
diff --git a/third_party/rust/tokio/tests/sync_watch.rs b/third_party/rust/tokio/tests/sync_watch.rs
new file mode 100644
index 0000000000..2bc5bb2a85
--- /dev/null
+++ b/third_party/rust/tokio/tests/sync_watch.rs
@@ -0,0 +1,231 @@
+#![allow(clippy::cognitive_complexity)]
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::watch;
+use tokio_test::task::spawn;
+use tokio_test::{assert_pending, assert_ready};
+
+#[test]
+fn single_rx_recv() {
+ let (tx, mut rx) = watch::channel("one");
+
+ {
+ let mut t = spawn(rx.recv());
+ let v = assert_ready!(t.poll()).unwrap();
+ assert_eq!(v, "one");
+ }
+
+ {
+ let mut t = spawn(rx.recv());
+
+ assert_pending!(t.poll());
+
+ tx.broadcast("two").unwrap();
+
+ assert!(t.is_woken());
+
+ let v = assert_ready!(t.poll()).unwrap();
+ assert_eq!(v, "two");
+ }
+
+ {
+ let mut t = spawn(rx.recv());
+
+ assert_pending!(t.poll());
+
+ drop(tx);
+
+ let res = assert_ready!(t.poll());
+ assert!(res.is_none());
+ }
+}
+
+#[test]
+fn multi_rx() {
+ let (tx, mut rx1) = watch::channel("one");
+ let mut rx2 = rx1.clone();
+
+ {
+ let mut t1 = spawn(rx1.recv());
+ let mut t2 = spawn(rx2.recv());
+
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "one");
+
+ let res = assert_ready!(t2.poll());
+ assert_eq!(res.unwrap(), "one");
+ }
+
+ let mut t2 = spawn(rx2.recv());
+
+ {
+ let mut t1 = spawn(rx1.recv());
+
+ assert_pending!(t1.poll());
+ assert_pending!(t2.poll());
+
+ tx.broadcast("two").unwrap();
+
+ assert!(t1.is_woken());
+ assert!(t2.is_woken());
+
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "two");
+ }
+
+ {
+ let mut t1 = spawn(rx1.recv());
+
+ assert_pending!(t1.poll());
+
+ tx.broadcast("three").unwrap();
+
+ assert!(t1.is_woken());
+ assert!(t2.is_woken());
+
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "three");
+
+ let res = assert_ready!(t2.poll());
+ assert_eq!(res.unwrap(), "three");
+ }
+
+ drop(t2);
+
+ {
+ let mut t1 = spawn(rx1.recv());
+ let mut t2 = spawn(rx2.recv());
+
+ assert_pending!(t1.poll());
+ assert_pending!(t2.poll());
+
+ tx.broadcast("four").unwrap();
+
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "four");
+ drop(t1);
+
+ let mut t1 = spawn(rx1.recv());
+ assert_pending!(t1.poll());
+
+ drop(tx);
+
+ assert!(t1.is_woken());
+ let res = assert_ready!(t1.poll());
+ assert!(res.is_none());
+
+ let res = assert_ready!(t2.poll());
+ assert_eq!(res.unwrap(), "four");
+
+ drop(t2);
+ let mut t2 = spawn(rx2.recv());
+ let res = assert_ready!(t2.poll());
+ assert!(res.is_none());
+ }
+}
+
+#[test]
+fn rx_observes_final_value() {
+ // Initial value
+
+ let (tx, mut rx) = watch::channel("one");
+ drop(tx);
+
+ {
+ let mut t1 = spawn(rx.recv());
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "one");
+ }
+
+ {
+ let mut t1 = spawn(rx.recv());
+ let res = assert_ready!(t1.poll());
+ assert!(res.is_none());
+ }
+
+ // Sending a value
+
+ let (tx, mut rx) = watch::channel("one");
+
+ tx.broadcast("two").unwrap();
+
+ {
+ let mut t1 = spawn(rx.recv());
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "two");
+ }
+
+ {
+ let mut t1 = spawn(rx.recv());
+ assert_pending!(t1.poll());
+
+ tx.broadcast("three").unwrap();
+ drop(tx);
+
+ assert!(t1.is_woken());
+
+ let res = assert_ready!(t1.poll());
+ assert_eq!(res.unwrap(), "three");
+ }
+
+ {
+ let mut t1 = spawn(rx.recv());
+ let res = assert_ready!(t1.poll());
+ assert!(res.is_none());
+ }
+}
+
+#[test]
+fn poll_close() {
+ let (mut tx, rx) = watch::channel("one");
+
+ {
+ let mut t = spawn(tx.closed());
+ assert_pending!(t.poll());
+
+ drop(rx);
+
+ assert!(t.is_woken());
+ assert_ready!(t.poll());
+ }
+
+ assert!(tx.broadcast("two").is_err());
+}
+
+#[test]
+fn stream_impl() {
+ use tokio::stream::StreamExt;
+
+ let (tx, mut rx) = watch::channel("one");
+
+ {
+ let mut t = spawn(rx.next());
+ let v = assert_ready!(t.poll()).unwrap();
+ assert_eq!(v, "one");
+ }
+
+ {
+ let mut t = spawn(rx.next());
+
+ assert_pending!(t.poll());
+
+ tx.broadcast("two").unwrap();
+
+ assert!(t.is_woken());
+
+ let v = assert_ready!(t.poll()).unwrap();
+ assert_eq!(v, "two");
+ }
+
+ {
+ let mut t = spawn(rx.next());
+
+ assert_pending!(t.poll());
+
+ drop(tx);
+
+ let res = assert_ready!(t.poll());
+ assert!(res.is_none());
+ }
+}
diff --git a/third_party/rust/tokio/tests/task_blocking.rs b/third_party/rust/tokio/tests/task_blocking.rs
new file mode 100644
index 0000000000..4cd83d8a0d
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_blocking.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::task;
+use tokio_test::assert_ok;
+
+use std::thread;
+use std::time::Duration;
+
+#[tokio::test]
+async fn basic_blocking() {
+ // Run a few times
+ for _ in 0..100 {
+ let out = assert_ok!(
+ tokio::spawn(async {
+ assert_ok!(
+ task::spawn_blocking(|| {
+ thread::sleep(Duration::from_millis(5));
+ "hello"
+ })
+ .await
+ )
+ })
+ .await
+ );
+
+ assert_eq!(out, "hello");
+ }
+}
diff --git a/third_party/rust/tokio/tests/task_local.rs b/third_party/rust/tokio/tests/task_local.rs
new file mode 100644
index 0000000000..7f508997f2
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_local.rs
@@ -0,0 +1,31 @@
+tokio::task_local! {
+ static REQ_ID: u32;
+ pub static FOO: bool;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn local() {
+ let j1 = tokio::spawn(REQ_ID.scope(1, async move {
+ assert_eq!(REQ_ID.get(), 1);
+ assert_eq!(REQ_ID.get(), 1);
+ }));
+
+ let j2 = tokio::spawn(REQ_ID.scope(2, async move {
+ REQ_ID.with(|v| {
+ assert_eq!(REQ_ID.get(), 2);
+ assert_eq!(*v, 2);
+ });
+
+ tokio::time::delay_for(std::time::Duration::from_millis(10)).await;
+
+ assert_eq!(REQ_ID.get(), 2);
+ }));
+
+ let j3 = tokio::spawn(FOO.scope(true, async move {
+ assert!(FOO.get());
+ }));
+
+ j1.await.unwrap();
+ j2.await.unwrap();
+ j3.await.unwrap();
+}
diff --git a/third_party/rust/tokio/tests/task_local_set.rs b/third_party/rust/tokio/tests/task_local_set.rs
new file mode 100644
index 0000000000..1a10fefa68
--- /dev/null
+++ b/third_party/rust/tokio/tests/task_local_set.rs
@@ -0,0 +1,466 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::runtime::{self, Runtime};
+use tokio::sync::{mpsc, oneshot};
+use tokio::task::{self, LocalSet};
+use tokio::time;
+
+use std::cell::Cell;
+use std::sync::atomic::Ordering::{self, SeqCst};
+use std::sync::atomic::{AtomicBool, AtomicUsize};
+use std::time::Duration;
+
+#[tokio::test(basic_scheduler)]
+async fn local_basic_scheduler() {
+ LocalSet::new()
+ .run_until(async {
+ task::spawn_local(async {}).await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn local_threadpool() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ .await
+ .unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn localset_future_threadpool() {
+ thread_local! {
+ static ON_LOCAL_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_LOCAL_THREAD.with(|cell| cell.set(true));
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ assert!(ON_LOCAL_THREAD.with(|cell| cell.get()));
+ });
+ local.await;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn localset_future_timers() {
+ static RAN1: AtomicBool = AtomicBool::new(false);
+ static RAN2: AtomicBool = AtomicBool::new(false);
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ time::delay_for(Duration::from_millis(10)).await;
+ RAN1.store(true, Ordering::SeqCst);
+ });
+ local.spawn_local(async move {
+ time::delay_for(Duration::from_millis(20)).await;
+ RAN2.store(true, Ordering::SeqCst);
+ });
+ local.await;
+ assert!(RAN1.load(Ordering::SeqCst));
+ assert!(RAN2.load(Ordering::SeqCst));
+}
+
+#[tokio::test]
+async fn localset_future_drives_all_local_futs() {
+ static RAN1: AtomicBool = AtomicBool::new(false);
+ static RAN2: AtomicBool = AtomicBool::new(false);
+ static RAN3: AtomicBool = AtomicBool::new(false);
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ task::spawn_local(async {
+ task::yield_now().await;
+ RAN3.store(true, Ordering::SeqCst);
+ });
+ task::yield_now().await;
+ RAN1.store(true, Ordering::SeqCst);
+ });
+ local.spawn_local(async move {
+ task::yield_now().await;
+ RAN2.store(true, Ordering::SeqCst);
+ });
+ local.await;
+ assert!(RAN1.load(Ordering::SeqCst));
+ assert!(RAN2.load(Ordering::SeqCst));
+ assert!(RAN3.load(Ordering::SeqCst));
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn local_threadpool_timer() {
+ // This test ensures that runtime services like the timer are properly
+ // set for the local task set.
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ time::delay_for(Duration::from_millis(10)).await;
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ })
+ .await;
+}
+
+#[test]
+// This will panic, since the thread that calls `block_on` cannot use
+// in-place blocking inside of `block_on`.
+#[should_panic]
+fn local_threadpool_blocking_in_place() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ let mut rt = runtime::Builder::new()
+ .threaded_scheduler()
+ .enable_all()
+ .build()
+ .unwrap();
+ LocalSet::new().block_on(&mut rt, async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::block_in_place(|| {});
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ });
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn local_threadpool_blocking_run() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let join = task::spawn_local(async move {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_blocking(|| {
+ assert!(
+ !ON_RT_THREAD.with(|cell| cell.get()),
+ "blocking must not run on the local task set's thread"
+ );
+ })
+ .await
+ .unwrap();
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ });
+ join.await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn all_spawns_are_local() {
+ use futures::future;
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ let handles = (0..128)
+ .map(|_| {
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ })
+ .collect::<Vec<_>>();
+ for joined in future::join_all(handles).await {
+ joined.unwrap();
+ }
+ })
+ .await;
+}
+
+#[tokio::test(threaded_scheduler)]
+async fn nested_spawn_is_local() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ LocalSet::new()
+ .run_until(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ task::spawn_local(async {
+ assert!(ON_RT_THREAD.with(|cell| cell.get()));
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await
+ .unwrap();
+ })
+ .await;
+}
+
+#[test]
+fn join_local_future_elsewhere() {
+ thread_local! {
+ static ON_RT_THREAD: Cell<bool> = Cell::new(false);
+ }
+
+ ON_RT_THREAD.with(|cell| cell.set(true));
+
+ let mut rt = runtime::Builder::new()
+ .threaded_scheduler()
+ .build()
+ .unwrap();
+ let local = LocalSet::new();
+ local.block_on(&mut rt, async move {
+ let (tx, rx) = oneshot::channel();
+ let join = task::spawn_local(async move {
+ println!("hello world running...");
+ assert!(
+ ON_RT_THREAD.with(|cell| cell.get()),
+ "local task must run on local thread, no matter where it is awaited"
+ );
+ rx.await.unwrap();
+
+ println!("hello world task done");
+ "hello world"
+ });
+ let join2 = task::spawn(async move {
+ assert!(
+ !ON_RT_THREAD.with(|cell| cell.get()),
+ "spawned task should be on a worker"
+ );
+
+ tx.send(()).expect("task shouldn't have ended yet");
+ println!("waking up hello world...");
+
+ join.await.expect("task should complete successfully");
+
+ println!("hello world task joined");
+ });
+ join2.await.unwrap()
+ });
+}
+
+#[test]
+fn drop_cancels_tasks() {
+ use std::rc::Rc;
+
+ // This test reproduces issue #1842
+ let mut rt = rt();
+ let rc1 = Rc::new(());
+ let rc2 = rc1.clone();
+
+ let (started_tx, started_rx) = oneshot::channel();
+
+ let local = LocalSet::new();
+ local.spawn_local(async move {
+ // Move this in
+ let _rc2 = rc2;
+
+ started_tx.send(()).unwrap();
+ loop {
+ time::delay_for(Duration::from_secs(3600)).await;
+ }
+ });
+
+ local.block_on(&mut rt, async {
+ started_rx.await.unwrap();
+ });
+ drop(local);
+ drop(rt);
+
+ assert_eq!(1, Rc::strong_count(&rc1));
+}
+
+#[test]
+fn drop_cancels_remote_tasks() {
+ // This test reproduces issue #1885.
+ use std::sync::mpsc::RecvTimeoutError;
+
+ let (done_tx, done_rx) = std::sync::mpsc::channel();
+ let thread = std::thread::spawn(move || {
+ let (tx, mut rx) = mpsc::channel::<()>(1024);
+
+ let mut rt = rt();
+
+ let local = LocalSet::new();
+ local.spawn_local(async move { while let Some(_) = rx.recv().await {} });
+ local.block_on(&mut rt, async {
+ time::delay_for(Duration::from_millis(1)).await;
+ });
+
+ drop(tx);
+
+ // This enters an infinite loop if the remote notified tasks are not
+ // properly cancelled.
+ drop(local);
+
+ // Send a message on the channel so that the test thread can
+ // determine if we have entered an infinite loop:
+ done_tx.send(()).unwrap();
+ });
+
+ // Since the failure mode of this test is an infinite loop, rather than
+ // something we can easily make assertions about, we'll run it in a
+ // thread. When the test thread finishes, it will send a message on a
+ // channel to this thread. We'll wait for that message with a fairly
+ // generous timeout, and if we don't recieve it, we assume the test
+ // thread has hung.
+ //
+ // Note that it should definitely complete in under a minute, but just
+ // in case CI is slow, we'll give it a long timeout.
+ match done_rx.recv_timeout(Duration::from_secs(60)) {
+ Err(RecvTimeoutError::Timeout) => panic!(
+ "test did not complete within 60 seconds, \
+ we have (probably) entered an infinite loop!"
+ ),
+ // Did the test thread panic? We'll find out for sure when we `join`
+ // with it.
+ Err(RecvTimeoutError::Disconnected) => {
+ println!("done_rx dropped, did the test thread panic?");
+ }
+ // Test completed successfully!
+ Ok(()) => {}
+ }
+
+ thread.join().expect("test thread should not panic!")
+}
+
+#[tokio::test]
+async fn local_tasks_are_polled_after_tick() {
+ // Reproduces issues #1899 and #1900
+
+ static RX1: AtomicUsize = AtomicUsize::new(0);
+ static RX2: AtomicUsize = AtomicUsize::new(0);
+ static EXPECTED: usize = 500;
+
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ let local = LocalSet::new();
+
+ local
+ .run_until(async {
+ let task2 = task::spawn(async move {
+ // Wait a bit
+ time::delay_for(Duration::from_millis(100)).await;
+
+ let mut oneshots = Vec::with_capacity(EXPECTED);
+
+ // Send values
+ for _ in 0..EXPECTED {
+ let (oneshot_tx, oneshot_rx) = oneshot::channel();
+ oneshots.push(oneshot_tx);
+ tx.send(oneshot_rx).unwrap();
+ }
+
+ time::delay_for(Duration::from_millis(100)).await;
+
+ for tx in oneshots.drain(..) {
+ tx.send(()).unwrap();
+ }
+
+ time::delay_for(Duration::from_millis(300)).await;
+ let rx1 = RX1.load(SeqCst);
+ let rx2 = RX2.load(SeqCst);
+ println!("EXPECT = {}; RX1 = {}; RX2 = {}", EXPECTED, rx1, rx2);
+ assert_eq!(EXPECTED, rx1);
+ assert_eq!(EXPECTED, rx2);
+ });
+
+ while let Some(oneshot) = rx.recv().await {
+ RX1.fetch_add(1, SeqCst);
+
+ task::spawn_local(async move {
+ oneshot.await.unwrap();
+ RX2.fetch_add(1, SeqCst);
+ });
+ }
+
+ task2.await.unwrap();
+ })
+ .await;
+}
+
+#[tokio::test]
+async fn acquire_mutex_in_drop() {
+ use futures::future::pending;
+
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let local = LocalSet::new();
+
+ local.spawn_local(async move {
+ let _ = rx2.await;
+ unreachable!();
+ });
+
+ local.spawn_local(async move {
+ let _ = rx1.await;
+ tx2.send(()).unwrap();
+ unreachable!();
+ });
+
+ // Spawn a task that will never notify
+ local.spawn_local(async move {
+ pending::<()>().await;
+ tx1.send(()).unwrap();
+ });
+
+ // Tick the loop
+ local
+ .run_until(async {
+ task::yield_now().await;
+ })
+ .await;
+
+ // Drop the LocalSet
+ drop(local);
+}
+
+fn rt() -> Runtime {
+ tokio::runtime::Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap()
+}
diff --git a/third_party/rust/tokio/tests/tcp_accept.rs b/third_party/rust/tokio/tests/tcp_accept.rs
new file mode 100644
index 0000000000..ff62fb96a2
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_accept.rs
@@ -0,0 +1,99 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::{mpsc, oneshot};
+use tokio_test::assert_ok;
+
+use std::net::{IpAddr, SocketAddr};
+
+macro_rules! test_accept {
+ ($(($ident:ident, $target:expr),)*) => {
+ $(
+ #[tokio::test]
+ async fn $ident() {
+ let mut listener = assert_ok!(TcpListener::bind($target).await);
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, _) = assert_ok!(listener.accept().await);
+ assert_ok!(tx.send(socket));
+ });
+
+ let cli = assert_ok!(TcpStream::connect(&addr).await);
+ let srv = assert_ok!(rx.await);
+
+ assert_eq!(cli.local_addr().unwrap(), srv.peer_addr().unwrap());
+ }
+ )*
+ }
+}
+
+test_accept! {
+ (ip_str, "127.0.0.1:0"),
+ (host_str, "localhost:0"),
+ (socket_addr, "127.0.0.1:0".parse::<SocketAddr>().unwrap()),
+ (str_port_tuple, ("127.0.0.1", 0)),
+ (ip_port_tuple, ("127.0.0.1".parse::<IpAddr>().unwrap(), 0)),
+}
+
+use std::pin::Pin;
+use std::sync::{
+ atomic::{AtomicUsize, Ordering::SeqCst},
+ Arc,
+};
+use std::task::{Context, Poll};
+use tokio::stream::{Stream, StreamExt};
+
+struct TrackPolls<S> {
+ npolls: Arc<AtomicUsize>,
+ s: S,
+}
+
+impl<S> Stream for TrackPolls<S>
+where
+ S: Stream,
+{
+ type Item = S::Item;
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ // safety: we do not move s
+ let this = unsafe { self.get_unchecked_mut() };
+ this.npolls.fetch_add(1, SeqCst);
+ // safety: we are pinned, and so is s
+ unsafe { Pin::new_unchecked(&mut this.s) }.poll_next(cx)
+ }
+}
+
+#[tokio::test]
+async fn no_extra_poll() {
+ let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = oneshot::channel();
+ let (accepted_tx, mut accepted_rx) = mpsc::unbounded_channel();
+
+ tokio::spawn(async move {
+ let mut incoming = TrackPolls {
+ npolls: Arc::new(AtomicUsize::new(0)),
+ s: listener.incoming(),
+ };
+ assert_ok!(tx.send(Arc::clone(&incoming.npolls)));
+ while let Some(_) = incoming.next().await {
+ accepted_tx.send(()).unwrap();
+ }
+ });
+
+ let npolls = assert_ok!(rx.await);
+ tokio::task::yield_now().await;
+
+ // should have been polled exactly once: the initial poll
+ assert_eq!(npolls.load(SeqCst), 1);
+
+ let _ = assert_ok!(TcpStream::connect(&addr).await);
+ accepted_rx.next().await.unwrap();
+
+ // should have been polled twice more: once to yield Some(), then once to yield Pending
+ assert_eq!(npolls.load(SeqCst), 1 + 2);
+}
diff --git a/third_party/rust/tokio/tests/tcp_connect.rs b/third_party/rust/tokio/tests/tcp_connect.rs
new file mode 100644
index 0000000000..de1cead829
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_connect.rs
@@ -0,0 +1,229 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+use futures::join;
+
+#[tokio::test]
+async fn connect_v4() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ assert!(addr.is_ipv4());
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, addr) = assert_ok!(srv.accept().await);
+ assert_eq!(addr, assert_ok!(socket.peer_addr()));
+ assert_ok!(tx.send(socket));
+ });
+
+ let mine = assert_ok!(TcpStream::connect(&addr).await);
+ let theirs = assert_ok!(rx.await);
+
+ assert_eq!(
+ assert_ok!(mine.local_addr()),
+ assert_ok!(theirs.peer_addr())
+ );
+ assert_eq!(
+ assert_ok!(theirs.local_addr()),
+ assert_ok!(mine.peer_addr())
+ );
+}
+
+#[tokio::test]
+async fn connect_v6() {
+ let mut srv = assert_ok!(TcpListener::bind("[::1]:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ assert!(addr.is_ipv6());
+
+ let (tx, rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let (socket, addr) = assert_ok!(srv.accept().await);
+ assert_eq!(addr, assert_ok!(socket.peer_addr()));
+ assert_ok!(tx.send(socket));
+ });
+
+ let mine = assert_ok!(TcpStream::connect(&addr).await);
+ let theirs = assert_ok!(rx.await);
+
+ assert_eq!(
+ assert_ok!(mine.local_addr()),
+ assert_ok!(theirs.peer_addr())
+ );
+ assert_eq!(
+ assert_ok!(theirs.local_addr()),
+ assert_ok!(mine.peer_addr())
+ );
+}
+
+#[tokio::test]
+async fn connect_addr_ip_string() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("127.0.0.1:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_str_slice() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("127.0.0.1:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr[..]).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_host_string() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = format!("localhost:{}", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_port_tuple() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = (addr.ip(), addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_ip_str_port_tuple() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = ("127.0.0.1", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+#[tokio::test]
+async fn connect_addr_host_str_port_tuple() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+ let addr = ("localhost", addr.port());
+
+ let server = async {
+ assert_ok!(srv.accept().await);
+ };
+
+ let client = async {
+ assert_ok!(TcpStream::connect(&addr).await);
+ };
+
+ join!(server, client);
+}
+
+/*
+ * TODO: bring this back once TCP exposes HUP again
+ *
+#[cfg(target_os = "linux")]
+mod linux {
+ use tokio::net::{TcpListener, TcpStream};
+ use tokio::prelude::*;
+ use tokio_test::assert_ok;
+
+ use mio::unix::UnixReady;
+
+ use futures_util::future::poll_fn;
+ use std::io::Write;
+ use std::time::Duration;
+ use std::{net, thread};
+
+ #[tokio::test]
+ fn poll_hup() {
+ let addr = assert_ok!("127.0.0.1:0".parse());
+ let mut srv = assert_ok!(TcpListener::bind(&addr));
+ let addr = assert_ok!(srv.local_addr());
+
+ tokio::spawn(async move {
+ let (mut client, _) = assert_ok!(srv.accept().await);
+ assert_ok!(client.set_linger(Some(Duration::from_millis(0))));
+ assert_ok!(client.write_all(b"hello world").await);
+
+ // TODO: Drop?
+ });
+
+ /*
+ let t = thread::spawn(move || {
+ let mut client = assert_ok!(srv.accept()).0;
+ client.set_linger(Some(Duration::from_millis(0))).unwrap();
+ client.write(b"hello world").unwrap();
+ thread::sleep(Duration::from_millis(200));
+ });
+ */
+
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ // Poll for HUP before reading.
+ future::poll_fn(|| stream.poll_read_ready(UnixReady::hup().into()))
+ .wait()
+ .unwrap();
+
+ // Same for write half
+ future::poll_fn(|| stream.poll_write_ready())
+ .wait()
+ .unwrap();
+
+ let mut buf = vec![0; 11];
+
+ // Read the data
+ future::poll_fn(|| stream.poll_read(&mut buf))
+ .wait()
+ .unwrap();
+
+ assert_eq!(b"hello world", &buf[..]);
+
+ t.join().unwrap();
+ }
+}
+*/
diff --git a/third_party/rust/tokio/tests/tcp_echo.rs b/third_party/rust/tokio/tests/tcp_echo.rs
new file mode 100644
index 0000000000..1feba63ee7
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_echo.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::{TcpListener, TcpStream};
+use tokio::prelude::*;
+use tokio::sync::oneshot;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn echo_server() {
+ const ITER: usize = 1024;
+
+ let (tx, rx) = oneshot::channel();
+
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ let msg = "foo bar baz";
+ tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ for _ in 0..ITER {
+ // write
+ assert_ok!(stream.write_all(msg.as_bytes()).await);
+
+ // read
+ let mut buf = [0; 11];
+ assert_ok!(stream.read_exact(&mut buf).await);
+ assert_eq!(&buf[..], msg.as_bytes());
+ }
+
+ assert_ok!(tx.send(()));
+ });
+
+ let (mut stream, _) = assert_ok!(srv.accept().await);
+ let (mut rd, mut wr) = stream.split();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, (ITER * msg.len()) as u64);
+
+ assert_ok!(rx.await);
+}
diff --git a/third_party/rust/tokio/tests/tcp_peek.rs b/third_party/rust/tokio/tests/tcp_peek.rs
new file mode 100644
index 0000000000..aecc0ac19c
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_peek.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::AsyncReadExt;
+use tokio::net::TcpStream;
+
+use tokio_test::assert_ok;
+
+use std::thread;
+use std::{convert::TryInto, io::Write, net};
+
+#[tokio::test]
+async fn peek() {
+ let listener = net::TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = listener.local_addr().unwrap();
+ let t = thread::spawn(move || assert_ok!(listener.accept()).0);
+
+ let left = net::TcpStream::connect(&addr).unwrap();
+ let mut right = t.join().unwrap();
+ let _ = right.write(&[1, 2, 3, 4]).unwrap();
+
+ let mut left: TcpStream = left.try_into().unwrap();
+ let mut buf = [0u8; 16];
+ let n = assert_ok!(left.peek(&mut buf).await);
+ assert_eq!([1, 2, 3, 4], buf[..n]);
+
+ let n = assert_ok!(left.read(&mut buf).await);
+ assert_eq!([1, 2, 3, 4], buf[..n]);
+}
diff --git a/third_party/rust/tokio/tests/tcp_shutdown.rs b/third_party/rust/tokio/tests/tcp_shutdown.rs
new file mode 100644
index 0000000000..bd43e143b8
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_shutdown.rs
@@ -0,0 +1,29 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::io::{self, AsyncWriteExt};
+use tokio::net::{TcpListener, TcpStream};
+use tokio::prelude::*;
+use tokio_test::assert_ok;
+
+#[tokio::test]
+async fn shutdown() {
+ let mut srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
+ let addr = assert_ok!(srv.local_addr());
+
+ tokio::spawn(async move {
+ let mut stream = assert_ok!(TcpStream::connect(&addr).await);
+
+ assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
+
+ let mut buf = [0; 1];
+ let n = assert_ok!(stream.read(&mut buf).await);
+ assert_eq!(n, 0);
+ });
+
+ let (mut stream, _) = assert_ok!(srv.accept().await);
+ let (mut rd, mut wr) = stream.split();
+
+ let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
+ assert_eq!(n, 0);
+}
diff --git a/third_party/rust/tokio/tests/tcp_split.rs b/third_party/rust/tokio/tests/tcp_split.rs
new file mode 100644
index 0000000000..42f797708c
--- /dev/null
+++ b/third_party/rust/tokio/tests/tcp_split.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::io::Result;
+use std::io::{Read, Write};
+use std::{net, thread};
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::TcpStream;
+
+#[tokio::test]
+async fn split() -> Result<()> {
+ const MSG: &[u8] = b"split";
+
+ let listener = net::TcpListener::bind("127.0.0.1:0")?;
+ let addr = listener.local_addr()?;
+
+ let handle = thread::spawn(move || {
+ let (mut stream, _) = listener.accept().unwrap();
+ stream.write(MSG).unwrap();
+
+ let mut read_buf = [0u8; 32];
+ let read_len = stream.read(&mut read_buf).unwrap();
+ assert_eq!(&read_buf[..read_len], MSG);
+ });
+
+ let mut stream = TcpStream::connect(&addr).await?;
+ let (mut read_half, mut write_half) = stream.split();
+
+ let mut read_buf = [0u8; 32];
+ let peek_len1 = read_half.peek(&mut read_buf[..]).await?;
+ let peek_len2 = read_half.peek(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, peek_len2);
+
+ let read_len = read_half.read(&mut read_buf[..]).await?;
+ assert_eq!(peek_len1, read_len);
+ assert_eq!(&read_buf[..read_len], MSG);
+
+ write_half.write(MSG).await?;
+ handle.join().unwrap();
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/test_clock.rs b/third_party/rust/tokio/tests/test_clock.rs
new file mode 100644
index 0000000000..891636fdb2
--- /dev/null
+++ b/third_party/rust/tokio/tests/test_clock.rs
@@ -0,0 +1,50 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, Duration, Instant};
+
+#[tokio::test]
+async fn resume_lets_time_move_forward_instead_of_resetting_it() {
+ let start = Instant::now();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ let advanced_by_ten_secs = Instant::now();
+ assert!(advanced_by_ten_secs - start > Duration::from_secs(10));
+ assert!(advanced_by_ten_secs - start < Duration::from_secs(11));
+ time::resume();
+ assert!(advanced_by_ten_secs < Instant::now());
+ assert!(Instant::now() - advanced_by_ten_secs < Duration::from_secs(1));
+}
+
+#[tokio::test]
+async fn can_pause_after_resume() {
+ let start = Instant::now();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ time::resume();
+ time::pause();
+ time::advance(Duration::from_secs(10)).await;
+ assert!(Instant::now() - start > Duration::from_secs(20));
+ assert!(Instant::now() - start < Duration::from_secs(21));
+}
+
+#[tokio::test]
+#[should_panic]
+async fn freezing_time_while_frozen_panics() {
+ time::pause();
+ time::pause();
+}
+
+#[tokio::test]
+#[should_panic]
+async fn advancing_time_when_time_is_not_frozen_panics() {
+ time::advance(Duration::from_secs(1)).await;
+}
+
+#[tokio::test]
+#[should_panic]
+async fn resuming_time_when_not_frozen_panics() {
+ time::pause();
+ time::resume();
+ time::resume();
+}
diff --git a/third_party/rust/tokio/tests/time_delay.rs b/third_party/rust/tokio/tests/time_delay.rs
new file mode 100644
index 0000000000..e763ae03be
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_delay.rs
@@ -0,0 +1,176 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, Duration, Instant};
+use tokio_test::{assert_pending, task};
+
+macro_rules! assert_elapsed {
+ ($now:expr, $ms:expr) => {{
+ let elapsed = $now.elapsed();
+ let lower = ms($ms);
+
+ // Handles ms rounding
+ assert!(
+ elapsed >= lower && elapsed <= lower + ms(1),
+ "actual = {:?}, expected = {:?}",
+ elapsed,
+ lower
+ );
+ }};
+}
+
+#[tokio::test]
+async fn immediate_delay() {
+ time::pause();
+
+ let now = Instant::now();
+
+ // Ready!
+ time::delay_until(now).await;
+ assert_elapsed!(now, 0);
+}
+
+#[tokio::test]
+async fn delayed_delay_level_0() {
+ time::pause();
+
+ for &i in &[1, 10, 60] {
+ let now = Instant::now();
+
+ time::delay_until(now + ms(i)).await;
+
+ assert_elapsed!(now, i);
+ }
+}
+
+#[tokio::test]
+async fn sub_ms_delayed_delay() {
+ time::pause();
+
+ for _ in 0..5 {
+ let now = Instant::now();
+ let deadline = now + ms(1) + Duration::new(0, 1);
+
+ time::delay_until(deadline).await;
+
+ assert_elapsed!(now, 1);
+ }
+}
+
+#[tokio::test]
+async fn delayed_delay_wrapping_level_0() {
+ time::pause();
+
+ time::delay_for(ms(5)).await;
+
+ let now = Instant::now();
+ time::delay_until(now + ms(60)).await;
+
+ assert_elapsed!(now, 60);
+}
+
+#[tokio::test]
+async fn reset_future_delay_before_fire() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut delay = task::spawn(time::delay_until(now + ms(100)));
+ assert_pending!(delay.poll());
+
+ let mut delay = delay.into_inner();
+
+ delay.reset(Instant::now() + ms(200));
+ delay.await;
+
+ assert_elapsed!(now, 200);
+}
+
+#[tokio::test]
+async fn reset_past_delay_before_turn() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut delay = task::spawn(time::delay_until(now + ms(100)));
+ assert_pending!(delay.poll());
+
+ let mut delay = delay.into_inner();
+
+ delay.reset(now + ms(80));
+ delay.await;
+
+ assert_elapsed!(now, 80);
+}
+
+#[tokio::test]
+async fn reset_past_delay_before_fire() {
+ time::pause();
+
+ let now = Instant::now();
+
+ let mut delay = task::spawn(time::delay_until(now + ms(100)));
+ assert_pending!(delay.poll());
+
+ let mut delay = delay.into_inner();
+
+ time::delay_for(ms(10)).await;
+
+ delay.reset(now + ms(80));
+ delay.await;
+
+ assert_elapsed!(now, 80);
+}
+
+#[tokio::test]
+async fn reset_future_delay_after_fire() {
+ time::pause();
+
+ let now = Instant::now();
+ let mut delay = time::delay_until(now + ms(100));
+
+ (&mut delay).await;
+ assert_elapsed!(now, 100);
+
+ delay.reset(now + ms(110));
+ delay.await;
+ assert_elapsed!(now, 110);
+}
+
+#[test]
+#[should_panic]
+fn creating_delay_outside_of_context() {
+ let now = Instant::now();
+
+ // This creates a delay outside of the context of a mock timer. This tests
+ // that it will panic.
+ let _fut = time::delay_until(now + ms(500));
+}
+
+#[should_panic]
+#[tokio::test]
+async fn greater_than_max() {
+ const YR_5: u64 = 5 * 365 * 24 * 60 * 60 * 1000;
+
+ time::delay_until(Instant::now() + ms(YR_5)).await;
+}
+
+const NUM_LEVELS: usize = 6;
+const MAX_DURATION: u64 = (1 << (6 * NUM_LEVELS)) - 1;
+
+#[should_panic]
+#[tokio::test]
+async fn exactly_max() {
+ // TODO: this should not panic but `time::ms()` is acting up
+ time::delay_for(ms(MAX_DURATION)).await;
+}
+
+#[tokio::test]
+async fn no_out_of_bounds_close_to_max() {
+ time::pause();
+ time::delay_for(ms(MAX_DURATION - 1)).await;
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/time_delay_queue.rs b/third_party/rust/tokio/tests/time_delay_queue.rs
new file mode 100644
index 0000000000..214b9ebee6
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_delay_queue.rs
@@ -0,0 +1,448 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, delay_for, DelayQueue, Duration, Instant};
+use tokio_test::{assert_ok, assert_pending, assert_ready, task};
+
+macro_rules! poll {
+ ($queue:ident) => {
+ $queue.enter(|cx, mut queue| queue.poll_expired(cx))
+ };
+}
+
+macro_rules! assert_ready_ok {
+ ($e:expr) => {{
+ assert_ok!(match assert_ready!($e) {
+ Some(v) => v,
+ None => panic!("None"),
+ })
+ }};
+}
+
+#[tokio::test]
+async fn single_immediate_delay() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+ let _key = queue.insert_at("foo", Instant::now());
+
+ // Advance time by 1ms to handle thee rounding
+ delay_for(ms(1)).await;
+
+ assert_ready_ok!(poll!(queue));
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none())
+}
+
+#[tokio::test]
+async fn multi_immediate_delays() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let _k = queue.insert_at("1", Instant::now());
+ let _k = queue.insert_at("2", Instant::now());
+ let _k = queue.insert_at("3", Instant::now());
+
+ delay_for(ms(1)).await;
+
+ let mut res = vec![];
+
+ while res.len() < 3 {
+ let entry = assert_ready_ok!(poll!(queue));
+ res.push(entry.into_inner());
+ }
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none());
+
+ res.sort();
+
+ assert_eq!("1", res[0]);
+ assert_eq!("2", res[1]);
+ assert_eq!("3", res[2]);
+}
+
+#[tokio::test]
+async fn single_short_delay() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+ let _key = queue.insert_at("foo", Instant::now() + ms(5));
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(1)).await;
+
+ assert!(!queue.is_woken());
+
+ delay_for(ms(5)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue));
+ assert_eq!(*entry.get_ref(), "foo");
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none());
+}
+
+#[tokio::test]
+async fn multi_delay_at_start() {
+ time::pause();
+
+ let long = 262_144 + 9 * 4096;
+ let delays = &[1000, 2, 234, long, 60, 10];
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ // Setup the delays
+ for &i in delays {
+ let _key = queue.insert_at(i, Instant::now() + ms(i));
+ }
+
+ assert_pending!(poll!(queue));
+ assert!(!queue.is_woken());
+
+ for elapsed in 0..1200 {
+ delay_for(ms(1)).await;
+ let elapsed = elapsed + 1;
+
+ if delays.contains(&elapsed) {
+ assert!(queue.is_woken());
+ assert_ready!(poll!(queue));
+ assert_pending!(poll!(queue));
+ } else if queue.is_woken() {
+ let cascade = &[192, 960];
+ assert!(cascade.contains(&elapsed), "elapsed={}", elapsed);
+
+ assert_pending!(poll!(queue));
+ }
+ }
+}
+
+#[tokio::test]
+async fn insert_in_past_fires_immediately() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+ let now = Instant::now();
+
+ delay_for(ms(10)).await;
+
+ queue.insert_at("foo", now);
+
+ assert_ready!(poll!(queue));
+}
+
+#[tokio::test]
+async fn remove_entry() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let key = queue.insert_at("foo", Instant::now() + ms(5));
+
+ assert_pending!(poll!(queue));
+
+ let entry = queue.remove(&key);
+ assert_eq!(entry.into_inner(), "foo");
+
+ delay_for(ms(10)).await;
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none());
+}
+
+#[tokio::test]
+async fn reset_entry() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+ let key = queue.insert_at("foo", now + ms(5));
+
+ assert_pending!(poll!(queue));
+ delay_for(ms(1)).await;
+
+ queue.reset_at(&key, now + ms(10));
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(7)).await;
+
+ assert!(!queue.is_woken());
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(3)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue));
+ assert_eq!(*entry.get_ref(), "foo");
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none())
+}
+
+// Reproduces tokio-rs/tokio#849.
+#[tokio::test]
+async fn reset_much_later() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+ delay_for(ms(1)).await;
+
+ let key = queue.insert_at("foo", now + ms(200));
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(3)).await;
+
+ queue.reset_at(&key, now + ms(5));
+
+ delay_for(ms(20)).await;
+
+ assert!(queue.is_woken());
+}
+
+// Reproduces tokio-rs/tokio#849.
+#[tokio::test]
+async fn reset_twice() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+ let now = Instant::now();
+
+ delay_for(ms(1)).await;
+
+ let key = queue.insert_at("foo", now + ms(200));
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(3)).await;
+
+ queue.reset_at(&key, now + ms(50));
+
+ delay_for(ms(20)).await;
+
+ queue.reset_at(&key, now + ms(40));
+
+ delay_for(ms(20)).await;
+
+ assert!(queue.is_woken());
+}
+
+#[tokio::test]
+async fn remove_expired_item() {
+ time::pause();
+
+ let mut queue = DelayQueue::new();
+
+ let now = Instant::now();
+
+ delay_for(ms(10)).await;
+
+ let key = queue.insert_at("foo", now);
+
+ let entry = queue.remove(&key);
+ assert_eq!(entry.into_inner(), "foo");
+}
+
+#[tokio::test]
+async fn expires_before_last_insert() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ queue.insert_at("foo", now + ms(10_000));
+
+ // Delay should be set to 8.192s here.
+ assert_pending!(poll!(queue));
+
+ // Delay should be set to the delay of the new item here
+ queue.insert_at("bar", now + ms(600));
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(600)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue)).into_inner();
+ assert_eq!(entry, "bar");
+}
+
+#[tokio::test]
+async fn multi_reset() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ let one = queue.insert_at("one", now + ms(200));
+ let two = queue.insert_at("two", now + ms(250));
+
+ assert_pending!(poll!(queue));
+
+ queue.reset_at(&one, now + ms(300));
+ queue.reset_at(&two, now + ms(350));
+ queue.reset_at(&one, now + ms(400));
+
+ delay_for(ms(310)).await;
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(50)).await;
+
+ let entry = assert_ready_ok!(poll!(queue));
+ assert_eq!(*entry.get_ref(), "two");
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(50)).await;
+
+ let entry = assert_ready_ok!(poll!(queue));
+ assert_eq!(*entry.get_ref(), "one");
+
+ let entry = assert_ready!(poll!(queue));
+ assert!(entry.is_none())
+}
+
+#[tokio::test]
+async fn expire_first_key_when_reset_to_expire_earlier() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ let one = queue.insert_at("one", now + ms(200));
+ queue.insert_at("two", now + ms(250));
+
+ assert_pending!(poll!(queue));
+
+ queue.reset_at(&one, now + ms(100));
+
+ delay_for(ms(100)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue)).into_inner();
+ assert_eq!(entry, "one");
+}
+
+#[tokio::test]
+async fn expire_second_key_when_reset_to_expire_earlier() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ queue.insert_at("one", now + ms(200));
+ let two = queue.insert_at("two", now + ms(250));
+
+ assert_pending!(poll!(queue));
+
+ queue.reset_at(&two, now + ms(100));
+
+ delay_for(ms(100)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue)).into_inner();
+ assert_eq!(entry, "two");
+}
+
+#[tokio::test]
+async fn reset_first_expiring_item_to_expire_later() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ let one = queue.insert_at("one", now + ms(200));
+ let _two = queue.insert_at("two", now + ms(250));
+
+ assert_pending!(poll!(queue));
+
+ queue.reset_at(&one, now + ms(300));
+ delay_for(ms(250)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue)).into_inner();
+ assert_eq!(entry, "two");
+}
+
+#[tokio::test]
+async fn insert_before_first_after_poll() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ let _one = queue.insert_at("one", now + ms(200));
+
+ assert_pending!(poll!(queue));
+
+ let _two = queue.insert_at("two", now + ms(100));
+
+ delay_for(ms(99)).await;
+
+ assert!(!queue.is_woken());
+
+ delay_for(ms(1)).await;
+
+ assert!(queue.is_woken());
+
+ let entry = assert_ready_ok!(poll!(queue)).into_inner();
+ assert_eq!(entry, "two");
+}
+
+#[tokio::test]
+async fn insert_after_ready_poll() {
+ time::pause();
+
+ let mut queue = task::spawn(DelayQueue::new());
+
+ let now = Instant::now();
+
+ queue.insert_at("1", now + ms(100));
+ queue.insert_at("2", now + ms(100));
+ queue.insert_at("3", now + ms(100));
+
+ assert_pending!(poll!(queue));
+
+ delay_for(ms(100)).await;
+
+ assert!(queue.is_woken());
+
+ let mut res = vec![];
+
+ while res.len() < 3 {
+ let entry = assert_ready_ok!(poll!(queue));
+ res.push(entry.into_inner());
+ queue.insert_at("foo", now + ms(500));
+ }
+
+ res.sort();
+
+ assert_eq!("1", res[0]);
+ assert_eq!("2", res[1]);
+ assert_eq!("3", res[2]);
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/time_interval.rs b/third_party/rust/tokio/tests/time_interval.rs
new file mode 100644
index 0000000000..1123681f49
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_interval.rs
@@ -0,0 +1,66 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, Duration, Instant};
+use tokio_test::{assert_pending, assert_ready_eq, task};
+
+use std::task::Poll;
+
+#[tokio::test]
+#[should_panic]
+async fn interval_zero_duration() {
+ let _ = time::interval_at(Instant::now(), ms(0));
+}
+
+#[tokio::test]
+async fn usage() {
+ time::pause();
+
+ let start = Instant::now();
+
+ // TODO: Skip this
+ time::advance(ms(1)).await;
+
+ let mut i = task::spawn(time::interval_at(start, ms(300)));
+
+ assert_ready_eq!(poll_next(&mut i), start);
+ assert_pending!(poll_next(&mut i));
+
+ time::advance(ms(100)).await;
+ assert_pending!(poll_next(&mut i));
+
+ time::advance(ms(200)).await;
+ assert_ready_eq!(poll_next(&mut i), start + ms(300));
+ assert_pending!(poll_next(&mut i));
+
+ time::advance(ms(400)).await;
+ assert_ready_eq!(poll_next(&mut i), start + ms(600));
+ assert_pending!(poll_next(&mut i));
+
+ time::advance(ms(500)).await;
+ assert_ready_eq!(poll_next(&mut i), start + ms(900));
+ assert_ready_eq!(poll_next(&mut i), start + ms(1200));
+ assert_pending!(poll_next(&mut i));
+}
+
+#[tokio::test]
+async fn usage_stream() {
+ use tokio::stream::StreamExt;
+
+ let start = Instant::now();
+ let mut interval = time::interval(ms(10));
+
+ for _ in 0..3 {
+ interval.next().await.unwrap();
+ }
+
+ assert!(start.elapsed() > ms(20));
+}
+
+fn poll_next(interval: &mut task::Spawn<time::Interval>) -> Poll<Instant> {
+ interval.enter(|cx, mut interval| interval.poll_tick(cx))
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/time_rt.rs b/third_party/rust/tokio/tests/time_rt.rs
new file mode 100644
index 0000000000..b739f1b2f6
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_rt.rs
@@ -0,0 +1,93 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::*;
+
+use std::sync::mpsc;
+
+#[test]
+fn timer_with_threaded_runtime() {
+ use tokio::runtime::Runtime;
+
+ let rt = Runtime::new().unwrap();
+ let (tx, rx) = mpsc::channel();
+
+ rt.spawn(async move {
+ let when = Instant::now() + Duration::from_millis(100);
+
+ delay_until(when).await;
+ assert!(Instant::now() >= when);
+
+ tx.send(()).unwrap();
+ });
+
+ rx.recv().unwrap();
+}
+
+#[test]
+fn timer_with_basic_scheduler() {
+ use tokio::runtime::Builder;
+
+ let mut rt = Builder::new()
+ .basic_scheduler()
+ .enable_all()
+ .build()
+ .unwrap();
+ let (tx, rx) = mpsc::channel();
+
+ rt.block_on(async move {
+ let when = Instant::now() + Duration::from_millis(100);
+
+ delay_until(when).await;
+ assert!(Instant::now() >= when);
+
+ tx.send(()).unwrap();
+ });
+
+ rx.recv().unwrap();
+}
+
+#[tokio::test]
+async fn starving() {
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ struct Starve<T: Future<Output = ()> + Unpin>(T, u64);
+
+ impl<T: Future<Output = ()> + Unpin> Future for Starve<T> {
+ type Output = u64;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<u64> {
+ if Pin::new(&mut self.0).poll(cx).is_ready() {
+ return Poll::Ready(self.1);
+ }
+
+ self.1 += 1;
+
+ cx.waker().wake_by_ref();
+
+ Poll::Pending
+ }
+ }
+
+ let when = Instant::now() + Duration::from_millis(20);
+ let starve = Starve(delay_until(when), 0);
+
+ starve.await;
+ assert!(Instant::now() >= when);
+}
+
+#[tokio::test]
+async fn timeout_value() {
+ use tokio::sync::oneshot;
+
+ let (_tx, rx) = oneshot::channel::<()>();
+
+ let now = Instant::now();
+ let dur = Duration::from_millis(20);
+
+ let res = timeout(dur, rx).await;
+ assert!(res.is_err());
+ assert!(Instant::now() >= now + dur);
+}
diff --git a/third_party/rust/tokio/tests/time_throttle.rs b/third_party/rust/tokio/tests/time_throttle.rs
new file mode 100644
index 0000000000..7102d17343
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_throttle.rs
@@ -0,0 +1,30 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::time::{self, throttle};
+use tokio_test::*;
+
+use std::time::Duration;
+
+#[tokio::test]
+async fn usage() {
+ time::pause();
+
+ let mut stream = task::spawn(throttle(
+ Duration::from_millis(100),
+ futures::stream::repeat(()),
+ ));
+
+ assert_ready!(stream.poll_next());
+ assert_pending!(stream.poll_next());
+
+ time::advance(Duration::from_millis(90)).await;
+
+ assert_pending!(stream.poll_next());
+
+ time::advance(Duration::from_millis(101)).await;
+
+ assert!(stream.is_woken());
+
+ assert_ready!(stream.poll_next());
+}
diff --git a/third_party/rust/tokio/tests/time_timeout.rs b/third_party/rust/tokio/tests/time_timeout.rs
new file mode 100644
index 0000000000..4efcd8ca82
--- /dev/null
+++ b/third_party/rust/tokio/tests/time_timeout.rs
@@ -0,0 +1,110 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::sync::oneshot;
+use tokio::time::{self, timeout, timeout_at, Instant};
+use tokio_test::*;
+
+use futures::future::pending;
+use std::time::Duration;
+
+#[tokio::test]
+async fn simultaneous_deadline_future_completion() {
+ // Create a future that is immediately ready
+ let mut fut = task::spawn(timeout_at(Instant::now(), async {}));
+
+ // Ready!
+ assert_ready_ok!(fut.poll());
+}
+
+#[tokio::test]
+async fn completed_future_past_deadline() {
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() - ms(1000), async {}));
+
+ // Ready!
+ assert_ready_ok!(fut.poll());
+}
+
+#[tokio::test]
+async fn future_and_deadline_in_future() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() + ms(100), rx));
+
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(ms(90)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+ assert!(fut.is_woken());
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
+async fn future_and_timeout_in_future() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout(ms(100), rx));
+
+ // Ready!
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(ms(90)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
+async fn deadline_now_elapses() {
+ use futures::future::pending;
+
+ time::pause();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now(), pending::<()>()));
+
+ // Factor in jitter
+ // TODO: don't require this
+ time::advance(ms(1)).await;
+
+ assert_ready_err!(fut.poll());
+}
+
+#[tokio::test]
+async fn deadline_future_elapses() {
+ time::pause();
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout_at(Instant::now() + ms(300), pending::<()>()));
+
+ assert_pending!(fut.poll());
+
+ time::advance(ms(301)).await;
+
+ assert!(fut.is_woken());
+ assert_ready_err!(fut.poll());
+}
+
+fn ms(n: u64) -> Duration {
+ Duration::from_millis(n)
+}
diff --git a/third_party/rust/tokio/tests/udp.rs b/third_party/rust/tokio/tests/udp.rs
new file mode 100644
index 0000000000..71c282a5cd
--- /dev/null
+++ b/third_party/rust/tokio/tests/udp.rs
@@ -0,0 +1,73 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use tokio::net::UdpSocket;
+
+#[tokio::test]
+async fn send_recv() -> std::io::Result<()> {
+ let mut sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let mut receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ sender.connect(receiver.local_addr()?).await?;
+ receiver.connect(sender.local_addr()?).await?;
+
+ let message = b"hello!";
+ sender.send(message).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let len = receiver.recv(&mut recv_buf[..]).await?;
+
+ assert_eq!(&recv_buf[..len], message);
+ Ok(())
+}
+
+#[tokio::test]
+async fn send_to_recv_from() -> std::io::Result<()> {
+ let mut sender = UdpSocket::bind("127.0.0.1:0").await?;
+ let mut receiver = UdpSocket::bind("127.0.0.1:0").await?;
+
+ let message = b"hello!";
+ let receiver_addr = receiver.local_addr()?;
+ sender.send_to(message, &receiver_addr).await?;
+
+ let mut recv_buf = [0u8; 32];
+ let (len, addr) = receiver.recv_from(&mut recv_buf[..]).await?;
+
+ assert_eq!(&recv_buf[..len], message);
+ assert_eq!(addr, sender.local_addr()?);
+ Ok(())
+}
+
+#[tokio::test]
+async fn split() -> std::io::Result<()> {
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let (mut r, mut s) = socket.split();
+
+ let msg = b"hello";
+ let addr = s.as_ref().local_addr()?;
+ tokio::spawn(async move {
+ s.send_to(msg, &addr).await.unwrap();
+ });
+ let mut recv_buf = [0u8; 32];
+ let (len, _) = r.recv_from(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], msg);
+ Ok(())
+}
+
+#[tokio::test]
+async fn reunite() -> std::io::Result<()> {
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let (s, r) = socket.split();
+ assert!(s.reunite(r).is_ok());
+ Ok(())
+}
+
+#[tokio::test]
+async fn reunite_error() -> std::io::Result<()> {
+ let socket = UdpSocket::bind("127.0.0.1:0").await?;
+ let socket1 = UdpSocket::bind("127.0.0.1:0").await?;
+ let (s, _) = socket.split();
+ let (_, r1) = socket1.split();
+ assert!(s.reunite(r1).is_err());
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/uds_cred.rs b/third_party/rust/tokio/tests/uds_cred.rs
new file mode 100644
index 0000000000..c02b2aee4a
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_cred.rs
@@ -0,0 +1,30 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(all(unix, not(target_os = "dragonfly")))]
+
+use tokio::net::UnixStream;
+
+use libc::getegid;
+use libc::geteuid;
+
+#[tokio::test]
+#[cfg_attr(
+ target_os = "freebsd",
+ ignore = "Requires FreeBSD 12.0 or later. https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=176419"
+)]
+#[cfg_attr(
+ target_os = "netbsd",
+ ignore = "NetBSD does not support getpeereid() for sockets created by socketpair()"
+)]
+async fn test_socket_pair() {
+ let (a, b) = UnixStream::pair().unwrap();
+ let cred_a = a.peer_cred().unwrap();
+ let cred_b = b.peer_cred().unwrap();
+ assert_eq!(cred_a, cred_b);
+
+ let uid = unsafe { geteuid() };
+ let gid = unsafe { getegid() };
+
+ assert_eq!(cred_a.uid, uid);
+ assert_eq!(cred_a.gid, gid);
+}
diff --git a/third_party/rust/tokio/tests/uds_datagram.rs b/third_party/rust/tokio/tests/uds_datagram.rs
new file mode 100644
index 0000000000..dd9952378f
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_datagram.rs
@@ -0,0 +1,43 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use tokio::net::UnixDatagram;
+
+use std::io;
+
+async fn echo_server(mut socket: UnixDatagram) -> io::Result<()> {
+ let mut recv_buf = vec![0u8; 1024];
+ loop {
+ let (len, peer_addr) = socket.recv_from(&mut recv_buf[..]).await?;
+ if let Some(path) = peer_addr.as_pathname() {
+ socket.send_to(&recv_buf[..len], path).await?;
+ }
+ }
+}
+
+#[tokio::test]
+async fn echo() -> io::Result<()> {
+ let dir = tempfile::tempdir().unwrap();
+ let server_path = dir.path().join("server.sock");
+ let client_path = dir.path().join("client.sock");
+
+ let server_socket = UnixDatagram::bind(server_path.clone())?;
+
+ tokio::spawn(async move {
+ if let Err(e) = echo_server(server_socket).await {
+ eprintln!("Error in echo server: {}", e);
+ }
+ });
+
+ {
+ let mut socket = UnixDatagram::bind(&client_path).unwrap();
+ socket.connect(server_path)?;
+ socket.send(b"ECHO").await?;
+ let mut recv_buf = [0u8; 16];
+ let len = socket.recv(&mut recv_buf[..]).await?;
+ assert_eq!(&recv_buf[..len], b"ECHO");
+ }
+
+ Ok(())
+}
diff --git a/third_party/rust/tokio/tests/uds_split.rs b/third_party/rust/tokio/tests/uds_split.rs
new file mode 100644
index 0000000000..76ff4613cd
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_split.rs
@@ -0,0 +1,43 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+#![cfg(unix)]
+
+use tokio::net::UnixStream;
+use tokio::prelude::*;
+
+/// Checks that `UnixStream` can be split into a read half and a write half using
+/// `UnixStream::split` and `UnixStream::split_mut`.
+///
+/// Verifies that the implementation of `AsyncWrite::poll_shutdown` shutdowns the stream for
+/// writing by reading to the end of stream on the other side of the connection.
+#[tokio::test]
+async fn split() -> std::io::Result<()> {
+ let (mut a, mut b) = UnixStream::pair()?;
+
+ let (mut a_read, mut a_write) = a.split();
+ let (mut b_read, mut b_write) = b.split();
+
+ let (a_response, b_response) = futures::future::try_join(
+ send_recv_all(&mut a_read, &mut a_write, b"A"),
+ send_recv_all(&mut b_read, &mut b_write, b"B"),
+ )
+ .await?;
+
+ assert_eq!(a_response, b"B");
+ assert_eq!(b_response, b"A");
+
+ Ok(())
+}
+
+async fn send_recv_all(
+ read: &mut (dyn AsyncRead + Unpin),
+ write: &mut (dyn AsyncWrite + Unpin),
+ input: &[u8],
+) -> std::io::Result<Vec<u8>> {
+ write.write_all(input).await?;
+ write.shutdown().await?;
+
+ let mut output = Vec::new();
+ read.read_to_end(&mut output).await?;
+ Ok(output)
+}
diff --git a/third_party/rust/tokio/tests/uds_stream.rs b/third_party/rust/tokio/tests/uds_stream.rs
new file mode 100644
index 0000000000..29f118a2d4
--- /dev/null
+++ b/third_party/rust/tokio/tests/uds_stream.rs
@@ -0,0 +1,58 @@
+#![cfg(feature = "full")]
+#![warn(rust_2018_idioms)]
+#![cfg(unix)]
+
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::{UnixListener, UnixStream};
+
+use futures::future::try_join;
+
+#[tokio::test]
+async fn accept_read_write() -> std::io::Result<()> {
+ let dir = tempfile::Builder::new()
+ .prefix("tokio-uds-tests")
+ .tempdir()
+ .unwrap();
+ let sock_path = dir.path().join("connect.sock");
+
+ let mut listener = UnixListener::bind(&sock_path)?;
+
+ let accept = listener.accept();
+ let connect = UnixStream::connect(&sock_path);
+ let ((mut server, _), mut client) = try_join(accept, connect).await?;
+
+ // Write to the client. TODO: Switch to write_all.
+ let write_len = client.write(b"hello").await?;
+ assert_eq!(write_len, 5);
+ drop(client);
+ // Read from the server. TODO: Switch to read_to_end.
+ let mut buf = [0u8; 5];
+ server.read_exact(&mut buf).await?;
+ assert_eq!(&buf, b"hello");
+ let len = server.read(&mut buf).await?;
+ assert_eq!(len, 0);
+ Ok(())
+}
+
+#[tokio::test]
+async fn shutdown() -> std::io::Result<()> {
+ let dir = tempfile::Builder::new()
+ .prefix("tokio-uds-tests")
+ .tempdir()
+ .unwrap();
+ let sock_path = dir.path().join("connect.sock");
+
+ let mut listener = UnixListener::bind(&sock_path)?;
+
+ let accept = listener.accept();
+ let connect = UnixStream::connect(&sock_path);
+ let ((mut server, _), mut client) = try_join(accept, connect).await?;
+
+ // Shut down the client
+ AsyncWriteExt::shutdown(&mut client).await?;
+ // Read from the server should return 0 to indicate the channel has been closed.
+ let mut buf = [0u8; 1];
+ let n = server.read(&mut buf).await?;
+ assert_eq!(n, 0);
+ Ok(())
+}