summaryrefslogtreecommitdiffstats
path: root/vendor/futures
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/futures
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--vendor/futures-channel/.cargo-checksum.json1
-rw-r--r--vendor/futures-channel/Cargo.toml41
-rw-r--r--vendor/futures-channel/LICENSE-APACHE202
-rw-r--r--vendor/futures-channel/LICENSE-MIT26
-rw-r--r--vendor/futures-channel/README.md23
-rw-r--r--vendor/futures-channel/benches/sync_mpsc.rs135
-rw-r--r--vendor/futures-channel/build.rs42
-rw-r--r--vendor/futures-channel/no_atomic_cas.rs13
-rw-r--r--vendor/futures-channel/src/lib.rs42
-rw-r--r--vendor/futures-channel/src/lock.rs102
-rw-r--r--vendor/futures-channel/src/mpsc/mod.rs1308
-rw-r--r--vendor/futures-channel/src/mpsc/queue.rs176
-rw-r--r--vendor/futures-channel/src/mpsc/sink_impl.rs73
-rw-r--r--vendor/futures-channel/src/oneshot.rs488
-rw-r--r--vendor/futures-channel/tests/channel.rs66
-rw-r--r--vendor/futures-channel/tests/mpsc-close.rs298
-rw-r--r--vendor/futures-channel/tests/mpsc.rs630
-rw-r--r--vendor/futures-channel/tests/oneshot.rs252
-rw-r--r--vendor/futures-core/.cargo-checksum.json1
-rw-r--r--vendor/futures-core/Cargo.toml34
-rw-r--r--vendor/futures-core/LICENSE-APACHE202
-rw-r--r--vendor/futures-core/LICENSE-MIT26
-rw-r--r--vendor/futures-core/README.md23
-rw-r--r--vendor/futures-core/build.rs42
-rw-r--r--vendor/futures-core/no_atomic_cas.rs13
-rw-r--r--vendor/futures-core/src/future.rs103
-rw-r--r--vendor/futures-core/src/lib.rs27
-rw-r--r--vendor/futures-core/src/stream.rs235
-rw-r--r--vendor/futures-core/src/task/__internal/atomic_waker.rs409
-rw-r--r--vendor/futures-core/src/task/__internal/mod.rs4
-rw-r--r--vendor/futures-core/src/task/mod.rs10
-rw-r--r--vendor/futures-core/src/task/poll.rs12
-rw-r--r--vendor/futures-executor/.cargo-checksum.json1
-rw-r--r--vendor/futures-executor/Cargo.toml45
-rw-r--r--vendor/futures-executor/LICENSE-APACHE202
-rw-r--r--vendor/futures-executor/LICENSE-MIT26
-rw-r--r--vendor/futures-executor/README.md23
-rw-r--r--vendor/futures-executor/benches/thread_notify.rs109
-rw-r--r--vendor/futures-executor/src/enter.rs80
-rw-r--r--vendor/futures-executor/src/lib.rs76
-rw-r--r--vendor/futures-executor/src/local_pool.rs400
-rw-r--r--vendor/futures-executor/src/thread_pool.rs375
-rw-r--r--vendor/futures-executor/src/unpark_mutex.rs137
-rw-r--r--vendor/futures-executor/tests/local_pool.rs434
-rw-r--r--vendor/futures-io/.cargo-checksum.json1
-rw-r--r--vendor/futures-io/Cargo.toml30
-rw-r--r--vendor/futures-io/LICENSE-APACHE202
-rw-r--r--vendor/futures-io/LICENSE-MIT26
-rw-r--r--vendor/futures-io/README.md23
-rw-r--r--vendor/futures-io/src/lib.rs558
-rw-r--r--vendor/futures-macro/.cargo-checksum.json1
-rw-r--r--vendor/futures-macro/Cargo.toml34
-rw-r--r--vendor/futures-macro/LICENSE-APACHE202
-rw-r--r--vendor/futures-macro/LICENSE-MIT26
-rw-r--r--vendor/futures-macro/src/executor.rs55
-rw-r--r--vendor/futures-macro/src/join.rs143
-rw-r--r--vendor/futures-macro/src/lib.rs61
-rw-r--r--vendor/futures-macro/src/select.rs330
-rw-r--r--vendor/futures-macro/src/stream_select.rs113
-rw-r--r--vendor/futures-sink/.cargo-checksum.json1
-rw-r--r--vendor/futures-sink/Cargo.toml29
-rw-r--r--vendor/futures-sink/LICENSE-APACHE202
-rw-r--r--vendor/futures-sink/LICENSE-MIT26
-rw-r--r--vendor/futures-sink/README.md23
-rw-r--r--vendor/futures-sink/src/lib.rs240
-rw-r--r--vendor/futures-task/.cargo-checksum.json1
-rw-r--r--vendor/futures-task/Cargo.toml33
-rw-r--r--vendor/futures-task/LICENSE-APACHE202
-rw-r--r--vendor/futures-task/LICENSE-MIT26
-rw-r--r--vendor/futures-task/README.md23
-rw-r--r--vendor/futures-task/build.rs42
-rw-r--r--vendor/futures-task/no_atomic_cas.rs13
-rw-r--r--vendor/futures-task/src/arc_wake.rs49
-rw-r--r--vendor/futures-task/src/future_obj.rs337
-rw-r--r--vendor/futures-task/src/lib.rs50
-rw-r--r--vendor/futures-task/src/noop_waker.rs63
-rw-r--r--vendor/futures-task/src/spawn.rs192
-rw-r--r--vendor/futures-task/src/waker.rs59
-rw-r--r--vendor/futures-task/src/waker_ref.rs63
-rw-r--r--vendor/futures-util/.cargo-checksum.json1
-rw-r--r--vendor/futures-util/Cargo.toml93
-rw-r--r--vendor/futures-util/LICENSE-APACHE202
-rw-r--r--vendor/futures-util/LICENSE-MIT26
-rw-r--r--vendor/futures-util/README.md23
-rw-r--r--vendor/futures-util/benches/futures_unordered.rs43
-rw-r--r--vendor/futures-util/benches_disabled/bilock.rs122
-rw-r--r--vendor/futures-util/build.rs42
-rw-r--r--vendor/futures-util/no_atomic_cas.rs13
-rw-r--r--vendor/futures-util/src/abortable.rs185
-rw-r--r--vendor/futures-util/src/async_await/join_mod.rs110
-rw-r--r--vendor/futures-util/src/async_await/mod.rs58
-rw-r--r--vendor/futures-util/src/async_await/pending.rs43
-rw-r--r--vendor/futures-util/src/async_await/poll.rs39
-rw-r--r--vendor/futures-util/src/async_await/random.rs54
-rw-r--r--vendor/futures-util/src/async_await/select_mod.rs336
-rw-r--r--vendor/futures-util/src/async_await/stream_select_mod.rs40
-rw-r--r--vendor/futures-util/src/compat/compat01as03.rs449
-rw-r--r--vendor/futures-util/src/compat/compat03as01.rs265
-rw-r--r--vendor/futures-util/src/compat/executor.rs85
-rw-r--r--vendor/futures-util/src/compat/mod.rs22
-rw-r--r--vendor/futures-util/src/fns.rs372
-rw-r--r--vendor/futures-util/src/future/abortable.rs19
-rw-r--r--vendor/futures-util/src/future/either.rs297
-rw-r--r--vendor/futures-util/src/future/future/catch_unwind.rs38
-rw-r--r--vendor/futures-util/src/future/future/flatten.rs153
-rw-r--r--vendor/futures-util/src/future/future/fuse.rs93
-rw-r--r--vendor/futures-util/src/future/future/map.rs66
-rw-r--r--vendor/futures-util/src/future/future/mod.rs610
-rw-r--r--vendor/futures-util/src/future/future/remote_handle.rs126
-rw-r--r--vendor/futures-util/src/future/future/shared.rs371
-rw-r--r--vendor/futures-util/src/future/join.rs217
-rw-r--r--vendor/futures-util/src/future/join_all.rs167
-rw-r--r--vendor/futures-util/src/future/lazy.rs60
-rw-r--r--vendor/futures-util/src/future/maybe_done.rs104
-rw-r--r--vendor/futures-util/src/future/mod.rs131
-rw-r--r--vendor/futures-util/src/future/option.rs64
-rw-r--r--vendor/futures-util/src/future/pending.rs54
-rw-r--r--vendor/futures-util/src/future/poll_fn.rs58
-rw-r--r--vendor/futures-util/src/future/poll_immediate.rs126
-rw-r--r--vendor/futures-util/src/future/ready.rs82
-rw-r--r--vendor/futures-util/src/future/select.rs124
-rw-r--r--vendor/futures-util/src/future/select_all.rs74
-rw-r--r--vendor/futures-util/src/future/select_ok.rs85
-rw-r--r--vendor/futures-util/src/future/try_future/into_future.rs36
-rw-r--r--vendor/futures-util/src/future/try_future/mod.rs619
-rw-r--r--vendor/futures-util/src/future/try_future/try_flatten.rs162
-rw-r--r--vendor/futures-util/src/future/try_future/try_flatten_err.rs62
-rw-r--r--vendor/futures-util/src/future/try_join.rs256
-rw-r--r--vendor/futures-util/src/future/try_join_all.rs137
-rw-r--r--vendor/futures-util/src/future/try_maybe_done.rs92
-rw-r--r--vendor/futures-util/src/future/try_select.rs84
-rw-r--r--vendor/futures-util/src/io/allow_std.rs200
-rw-r--r--vendor/futures-util/src/io/buf_reader.rs263
-rw-r--r--vendor/futures-util/src/io/buf_writer.rs224
-rw-r--r--vendor/futures-util/src/io/chain.rs142
-rw-r--r--vendor/futures-util/src/io/close.rs28
-rw-r--r--vendor/futures-util/src/io/copy.rs58
-rw-r--r--vendor/futures-util/src/io/copy_buf.rs78
-rw-r--r--vendor/futures-util/src/io/cursor.rs240
-rw-r--r--vendor/futures-util/src/io/empty.rs59
-rw-r--r--vendor/futures-util/src/io/fill_buf.rs51
-rw-r--r--vendor/futures-util/src/io/flush.rs31
-rw-r--r--vendor/futures-util/src/io/into_sink.rs82
-rw-r--r--vendor/futures-util/src/io/line_writer.rs155
-rw-r--r--vendor/futures-util/src/io/lines.rs47
-rw-r--r--vendor/futures-util/src/io/mod.rs838
-rw-r--r--vendor/futures-util/src/io/read.rs30
-rw-r--r--vendor/futures-util/src/io/read_exact.rs42
-rw-r--r--vendor/futures-util/src/io/read_line.rs57
-rw-r--r--vendor/futures-util/src/io/read_to_end.rs91
-rw-r--r--vendor/futures-util/src/io/read_to_string.rs59
-rw-r--r--vendor/futures-util/src/io/read_until.rs60
-rw-r--r--vendor/futures-util/src/io/read_vectored.rs30
-rw-r--r--vendor/futures-util/src/io/repeat.rs66
-rw-r--r--vendor/futures-util/src/io/seek.rs30
-rw-r--r--vendor/futures-util/src/io/sink.rs67
-rw-r--r--vendor/futures-util/src/io/split.rs115
-rw-r--r--vendor/futures-util/src/io/take.rs125
-rw-r--r--vendor/futures-util/src/io/window.rs104
-rw-r--r--vendor/futures-util/src/io/write.rs30
-rw-r--r--vendor/futures-util/src/io/write_all.rs43
-rw-r--r--vendor/futures-util/src/io/write_all_vectored.rs193
-rw-r--r--vendor/futures-util/src/io/write_vectored.rs30
-rw-r--r--vendor/futures-util/src/lib.rs337
-rw-r--r--vendor/futures-util/src/lock/bilock.rs276
-rw-r--r--vendor/futures-util/src/lock/mod.rs25
-rw-r--r--vendor/futures-util/src/lock/mutex.rs406
-rw-r--r--vendor/futures-util/src/never.rs18
-rw-r--r--vendor/futures-util/src/sink/buffer.rs105
-rw-r--r--vendor/futures-util/src/sink/close.rs32
-rw-r--r--vendor/futures-util/src/sink/drain.rs53
-rw-r--r--vendor/futures-util/src/sink/err_into.rs57
-rw-r--r--vendor/futures-util/src/sink/fanout.rs111
-rw-r--r--vendor/futures-util/src/sink/feed.rs43
-rw-r--r--vendor/futures-util/src/sink/flush.rs36
-rw-r--r--vendor/futures-util/src/sink/map_err.rs65
-rw-r--r--vendor/futures-util/src/sink/mod.rs344
-rw-r--r--vendor/futures-util/src/sink/send.rs41
-rw-r--r--vendor/futures-util/src/sink/send_all.rs100
-rw-r--r--vendor/futures-util/src/sink/unfold.rs86
-rw-r--r--vendor/futures-util/src/sink/with.rs134
-rw-r--r--vendor/futures-util/src/sink/with_flat_map.rs127
-rw-r--r--vendor/futures-util/src/stream/abortable.rs19
-rw-r--r--vendor/futures-util/src/stream/empty.rs45
-rw-r--r--vendor/futures-util/src/stream/futures_ordered.rs220
-rw-r--r--vendor/futures-util/src/stream/futures_unordered/abort.rs12
-rw-r--r--vendor/futures-util/src/stream/futures_unordered/iter.rs168
-rw-r--r--vendor/futures-util/src/stream/futures_unordered/mod.rs674
-rw-r--r--vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs122
-rw-r--r--vendor/futures-util/src/stream/futures_unordered/task.rs118
-rw-r--r--vendor/futures-util/src/stream/iter.rs49
-rw-r--r--vendor/futures-util/src/stream/mod.rs143
-rw-r--r--vendor/futures-util/src/stream/once.rs67
-rw-r--r--vendor/futures-util/src/stream/pending.rs45
-rw-r--r--vendor/futures-util/src/stream/poll_fn.rs57
-rw-r--r--vendor/futures-util/src/stream/poll_immediate.rs80
-rw-r--r--vendor/futures-util/src/stream/repeat.rs58
-rw-r--r--vendor/futures-util/src/stream/repeat_with.rs93
-rw-r--r--vendor/futures-util/src/stream/select.rs117
-rw-r--r--vendor/futures-util/src/stream/select_all.rs254
-rw-r--r--vendor/futures-util/src/stream/select_with_strategy.rs229
-rw-r--r--vendor/futures-util/src/stream/stream/all.rs92
-rw-r--r--vendor/futures-util/src/stream/stream/any.rs92
-rw-r--r--vendor/futures-util/src/stream/stream/buffer_unordered.rs124
-rw-r--r--vendor/futures-util/src/stream/stream/buffered.rs108
-rw-r--r--vendor/futures-util/src/stream/stream/catch_unwind.rs61
-rw-r--r--vendor/futures-util/src/stream/stream/chain.rs75
-rw-r--r--vendor/futures-util/src/stream/stream/chunks.rs106
-rw-r--r--vendor/futures-util/src/stream/stream/collect.rs56
-rw-r--r--vendor/futures-util/src/stream/stream/concat.rs62
-rw-r--r--vendor/futures-util/src/stream/stream/count.rs53
-rw-r--r--vendor/futures-util/src/stream/stream/cycle.rs68
-rw-r--r--vendor/futures-util/src/stream/stream/enumerate.rs64
-rw-r--r--vendor/futures-util/src/stream/stream/filter.rs117
-rw-r--r--vendor/futures-util/src/stream/stream/filter_map.rs111
-rw-r--r--vendor/futures-util/src/stream/stream/flatten.rs73
-rw-r--r--vendor/futures-util/src/stream/stream/fold.rs88
-rw-r--r--vendor/futures-util/src/stream/stream/for_each.rs78
-rw-r--r--vendor/futures-util/src/stream/stream/for_each_concurrent.rs119
-rw-r--r--vendor/futures-util/src/stream/stream/forward.rs75
-rw-r--r--vendor/futures-util/src/stream/stream/fuse.rs75
-rw-r--r--vendor/futures-util/src/stream/stream/into_future.rs90
-rw-r--r--vendor/futures-util/src/stream/stream/map.rs77
-rw-r--r--vendor/futures-util/src/stream/stream/mod.rs1567
-rw-r--r--vendor/futures-util/src/stream/stream/next.rs34
-rw-r--r--vendor/futures-util/src/stream/stream/peek.rs433
-rw-r--r--vendor/futures-util/src/stream/stream/ready_chunks.rs114
-rw-r--r--vendor/futures-util/src/stream/stream/scan.rs128
-rw-r--r--vendor/futures-util/src/stream/stream/select_next_some.rs42
-rw-r--r--vendor/futures-util/src/stream/stream/skip.rs70
-rw-r--r--vendor/futures-util/src/stream/stream/skip_while.rs124
-rw-r--r--vendor/futures-util/src/stream/stream/split.rs144
-rw-r--r--vendor/futures-util/src/stream/stream/take.rs86
-rw-r--r--vendor/futures-util/src/stream/stream/take_until.rs170
-rw-r--r--vendor/futures-util/src/stream/stream/take_while.rs124
-rw-r--r--vendor/futures-util/src/stream/stream/then.rs101
-rw-r--r--vendor/futures-util/src/stream/stream/unzip.rs63
-rw-r--r--vendor/futures-util/src/stream/stream/zip.rs128
-rw-r--r--vendor/futures-util/src/stream/try_stream/and_then.rs105
-rw-r--r--vendor/futures-util/src/stream/try_stream/into_async_read.rs165
-rw-r--r--vendor/futures-util/src/stream/try_stream/into_stream.rs52
-rw-r--r--vendor/futures-util/src/stream/try_stream/mod.rs1064
-rw-r--r--vendor/futures-util/src/stream/try_stream/or_else.rs109
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs86
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_buffered.rs87
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_chunks.rs131
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_collect.rs52
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_concat.rs51
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_filter.rs112
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_filter_map.rs106
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_flatten.rs84
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_fold.rs93
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_for_each.rs68
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs133
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_next.rs34
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_skip_while.rs120
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_take_while.rs129
-rw-r--r--vendor/futures-util/src/stream/try_stream/try_unfold.rs122
-rw-r--r--vendor/futures-util/src/stream/unfold.rs119
-rw-r--r--vendor/futures-util/src/task/mod.rs37
-rw-r--r--vendor/futures-util/src/task/spawn.rs163
-rw-r--r--vendor/futures-util/src/unfold_state.rs39
-rw-r--r--vendor/futures/.cargo-checksum.json1
-rw-r--r--vendor/futures/Cargo.toml87
-rw-r--r--vendor/futures/LICENSE-APACHE202
-rw-r--r--vendor/futures/LICENSE-MIT26
-rw-r--r--vendor/futures/src/lib.rs194
-rw-r--r--vendor/futures/tests/_require_features.rs13
-rw-r--r--vendor/futures/tests/async_await_macros.rs389
-rw-r--r--vendor/futures/tests/auto_traits.rs1891
-rw-r--r--vendor/futures/tests/compat.rs15
-rw-r--r--vendor/futures/tests/eager_drop.rs121
-rw-r--r--vendor/futures/tests/eventual.rs159
-rw-r--r--vendor/futures/tests/future_abortable.rs44
-rw-r--r--vendor/futures/tests/future_basic_combinators.rs104
-rw-r--r--vendor/futures/tests/future_fuse.rs12
-rw-r--r--vendor/futures/tests/future_inspect.rs16
-rw-r--r--vendor/futures/tests/future_join_all.rs42
-rw-r--r--vendor/futures/tests/future_obj.rs33
-rw-r--r--vendor/futures/tests/future_select_all.rs25
-rw-r--r--vendor/futures/tests/future_select_ok.rs30
-rw-r--r--vendor/futures/tests/future_shared.rs195
-rw-r--r--vendor/futures/tests/future_try_flatten_stream.rs83
-rw-r--r--vendor/futures/tests/future_try_join_all.rs44
-rw-r--r--vendor/futures/tests/io_buf_reader.rs432
-rw-r--r--vendor/futures/tests/io_buf_writer.rs239
-rw-r--r--vendor/futures/tests/io_cursor.rs30
-rw-r--r--vendor/futures/tests/io_line_writer.rs73
-rw-r--r--vendor/futures/tests/io_lines.rs60
-rw-r--r--vendor/futures/tests/io_read.rs64
-rw-r--r--vendor/futures/tests/io_read_exact.rs17
-rw-r--r--vendor/futures/tests/io_read_line.rs58
-rw-r--r--vendor/futures/tests/io_read_to_end.rs65
-rw-r--r--vendor/futures/tests/io_read_to_string.rs44
-rw-r--r--vendor/futures/tests/io_read_until.rs60
-rw-r--r--vendor/futures/tests/io_window.rs30
-rw-r--r--vendor/futures/tests/io_write.rs65
-rw-r--r--vendor/futures/tests/lock_mutex.rs66
-rw-r--r--vendor/futures/tests/macro_comma_support.rs43
-rw-r--r--vendor/futures/tests/object_safety.rs49
-rw-r--r--vendor/futures/tests/oneshot.rs78
-rw-r--r--vendor/futures/tests/ready_queue.rs148
-rw-r--r--vendor/futures/tests/recurse.rs25
-rw-r--r--vendor/futures/tests/sink.rs554
-rw-r--r--vendor/futures/tests/sink_fanout.rs24
-rw-r--r--vendor/futures/tests/stream.rs151
-rw-r--r--vendor/futures/tests/stream_abortable.rs46
-rw-r--r--vendor/futures/tests/stream_buffer_unordered.rs73
-rw-r--r--vendor/futures/tests/stream_catch_unwind.rs27
-rw-r--r--vendor/futures/tests/stream_futures_ordered.rs84
-rw-r--r--vendor/futures/tests/stream_futures_unordered.rs369
-rw-r--r--vendor/futures/tests/stream_into_async_read.rs94
-rw-r--r--vendor/futures/tests/stream_peekable.rs58
-rw-r--r--vendor/futures/tests/stream_select_all.rs197
-rw-r--r--vendor/futures/tests/stream_select_next_some.rs86
-rw-r--r--vendor/futures/tests/stream_split.rs57
-rw-r--r--vendor/futures/tests/stream_try_stream.rs38
-rw-r--r--vendor/futures/tests/stream_unfold.rs32
-rw-r--r--vendor/futures/tests/task_arc_wake.rs79
-rw-r--r--vendor/futures/tests/task_atomic_waker.rs48
-rw-r--r--vendor/futures/tests/test_macro.rs20
-rw-r--r--vendor/futures/tests/try_join.rs35
-rw-r--r--vendor/futures/tests_disabled/all.rs400
-rw-r--r--vendor/futures/tests_disabled/bilock.rs102
-rw-r--r--vendor/futures/tests_disabled/stream.rs369
325 files changed, 43038 insertions, 0 deletions
diff --git a/vendor/futures-channel/.cargo-checksum.json b/vendor/futures-channel/.cargo-checksum.json
new file mode 100644
index 000000000..4b321d3c3
--- /dev/null
+++ b/vendor/futures-channel/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"2843b3fc245065891decdfce5244144f4b8a3e35d0d9499db431073930e9b550","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"fb9330147e41a15b5e569b8bad7692628be89b5fc219a5323a57fa63024c1684","benches/sync_mpsc.rs":"1019dd027f104f58883f396ff70efc3dd69b3a7d62df17af090e07b2b05eaf66","build.rs":"f6e21c09f18cc405bd7048cb7a2958f92d5414b9ca6b301d137e120a84fa020a","no_atomic_cas.rs":"ff8be002b49a5cd9e4ca0db17b1c9e6b98e55f556319eb6b953dd6ff52c397a6","src/lib.rs":"2955e70d292208747fbb29810ef88f390f0f1b22b112fa59d60f95480d470e75","src/lock.rs":"38655a797456ea4f67d132c42055cf74f18195e875c3b337fc81a12901f79292","src/mpsc/mod.rs":"71c8fb3ac645bc587684a9e115b8859044acbade540299a1f9dd952aa27d6ba5","src/mpsc/queue.rs":"8822f466e7fe5a8d25ba994b7022ad7c14bcfd473d354a6cd0490240d3e170e7","src/mpsc/sink_impl.rs":"c9977b530187e82c912fcd46e08316e48ed246e77bb2419d53020e69e403d086","src/oneshot.rs":"d1170289b39656ea5f0d5f42b905ddbd5fa9c1202aa3297c9f25280a48229910","tests/channel.rs":"88f4a41d82b5c1b01e153d071a2bf48e0697355908c55ca42342ed45e63fdec8","tests/mpsc-close.rs":"456e43d3b4aad317c84da81297b05743609af57b26d10470e478f1677e4bf731","tests/mpsc.rs":"c929860c11be704692e709c10a3f5e046d6c01df2cacf568983419cdf82aab97","tests/oneshot.rs":"c44b90681c577f8d0c88e810e883328eefec1d4346b9aa615fa47cc3a7c25c01"},"package":"ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b"} \ No newline at end of file
diff --git a/vendor/futures-channel/Cargo.toml b/vendor/futures-channel/Cargo.toml
new file mode 100644
index 000000000..99454ccaa
--- /dev/null
+++ b/vendor/futures-channel/Cargo.toml
@@ -0,0 +1,41 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.45"
+name = "futures-channel"
+version = "0.3.19"
+description = "Channels for asynchronous communication using futures-rs.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+[dependencies.futures-core]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-sink]
+version = "0.3.19"
+optional = true
+default-features = false
+
+[dev-dependencies]
+
+[features]
+alloc = ["futures-core/alloc"]
+cfg-target-has-atomic = []
+default = ["std"]
+sink = ["futures-sink"]
+std = ["alloc", "futures-core/std"]
+unstable = []
diff --git a/vendor/futures-channel/LICENSE-APACHE b/vendor/futures-channel/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-channel/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-channel/LICENSE-MIT b/vendor/futures-channel/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-channel/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-channel/README.md b/vendor/futures-channel/README.md
new file mode 100644
index 000000000..3287be924
--- /dev/null
+++ b/vendor/futures-channel/README.md
@@ -0,0 +1,23 @@
+# futures-channel
+
+Channels for asynchronous communication using futures-rs.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-channel = "0.3"
+```
+
+The current `futures-channel` requires Rust 1.45 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-channel/benches/sync_mpsc.rs b/vendor/futures-channel/benches/sync_mpsc.rs
new file mode 100644
index 000000000..7c3c3d3a8
--- /dev/null
+++ b/vendor/futures-channel/benches/sync_mpsc.rs
@@ -0,0 +1,135 @@
+#![feature(test)]
+
+extern crate test;
+use crate::test::Bencher;
+
+use {
+ futures::{
+ channel::mpsc::{self, Sender, UnboundedSender},
+ ready,
+ sink::Sink,
+ stream::{Stream, StreamExt},
+ task::{Context, Poll},
+ },
+ futures_test::task::noop_context,
+ std::pin::Pin,
+};
+
+/// Single producer, single consumer
+#[bench]
+fn unbounded_1_tx(b: &mut Bencher) {
+ let mut cx = noop_context();
+ b.iter(|| {
+ let (tx, mut rx) = mpsc::unbounded();
+
+ // 1000 iterations to avoid measuring overhead of initialization
+ // Result should be divided by 1000
+ for i in 0..1000 {
+ // Poll, not ready, park
+ assert_eq!(Poll::Pending, rx.poll_next_unpin(&mut cx));
+
+ UnboundedSender::unbounded_send(&tx, i).unwrap();
+
+ // Now poll ready
+ assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx));
+ }
+ })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn unbounded_100_tx(b: &mut Bencher) {
+ let mut cx = noop_context();
+ b.iter(|| {
+ let (tx, mut rx) = mpsc::unbounded();
+
+ let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect();
+
+ // 1000 send/recv operations total, result should be divided by 1000
+ for _ in 0..10 {
+ for (i, x) in tx.iter().enumerate() {
+ assert_eq!(Poll::Pending, rx.poll_next_unpin(&mut cx));
+
+ UnboundedSender::unbounded_send(x, i).unwrap();
+
+ assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx));
+ }
+ }
+ })
+}
+
+#[bench]
+fn unbounded_uncontended(b: &mut Bencher) {
+ let mut cx = noop_context();
+ b.iter(|| {
+ let (tx, mut rx) = mpsc::unbounded();
+
+ for i in 0..1000 {
+ UnboundedSender::unbounded_send(&tx, i).expect("send");
+ // No need to create a task, because poll is not going to park.
+ assert_eq!(Poll::Ready(Some(i)), rx.poll_next_unpin(&mut cx));
+ }
+ })
+}
+
+/// A Stream that continuously sends incrementing number of the queue
+struct TestSender {
+ tx: Sender<u32>,
+ last: u32, // Last number sent
+}
+
+// Could be a Future, it doesn't matter
+impl Stream for TestSender {
+ type Item = u32;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = &mut *self;
+ let mut tx = Pin::new(&mut this.tx);
+
+ ready!(tx.as_mut().poll_ready(cx)).unwrap();
+ tx.as_mut().start_send(this.last + 1).unwrap();
+ this.last += 1;
+ assert_eq!(Poll::Pending, tx.as_mut().poll_flush(cx));
+ Poll::Ready(Some(this.last))
+ }
+}
+
+/// Single producers, single consumer
+#[bench]
+fn bounded_1_tx(b: &mut Bencher) {
+ let mut cx = noop_context();
+ b.iter(|| {
+ let (tx, mut rx) = mpsc::channel(0);
+
+ let mut tx = TestSender { tx, last: 0 };
+
+ for i in 0..1000 {
+ assert_eq!(Poll::Ready(Some(i + 1)), tx.poll_next_unpin(&mut cx));
+ assert_eq!(Poll::Pending, tx.poll_next_unpin(&mut cx));
+ assert_eq!(Poll::Ready(Some(i + 1)), rx.poll_next_unpin(&mut cx));
+ }
+ })
+}
+
+/// 100 producers, single consumer
+#[bench]
+fn bounded_100_tx(b: &mut Bencher) {
+ let mut cx = noop_context();
+ b.iter(|| {
+ // Each sender can send one item after specified capacity
+ let (tx, mut rx) = mpsc::channel(0);
+
+ let mut tx: Vec<_> = (0..100).map(|_| TestSender { tx: tx.clone(), last: 0 }).collect();
+
+ for i in 0..10 {
+ for x in &mut tx {
+ // Send an item
+ assert_eq!(Poll::Ready(Some(i + 1)), x.poll_next_unpin(&mut cx));
+ // Then block
+ assert_eq!(Poll::Pending, x.poll_next_unpin(&mut cx));
+ // Recv the item
+ assert_eq!(Poll::Ready(Some(i + 1)), rx.poll_next_unpin(&mut cx));
+ }
+ }
+ })
+}
diff --git a/vendor/futures-channel/build.rs b/vendor/futures-channel/build.rs
new file mode 100644
index 000000000..07b50bd55
--- /dev/null
+++ b/vendor/futures-channel/build.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms, single_use_lifetimes)]
+
+use std::env;
+
+include!("no_atomic_cas.rs");
+
+// The rustc-cfg listed below are considered public API, but it is *unstable*
+// and outside of the normal semver guarantees:
+//
+// - `futures_no_atomic_cas`
+// Assume the target does *not* support atomic CAS operations.
+// This is usually detected automatically by the build script, but you may
+// need to enable it manually when building for custom targets or using
+// non-cargo build systems that don't run the build script.
+//
+// With the exceptions mentioned above, the rustc-cfg strings below are
+// *not* public API. Please let us know by opening a GitHub issue if your build
+// environment requires some way to enable these cfgs other than by executing
+// our build script.
+fn main() {
+ let target = match env::var("TARGET") {
+ Ok(target) => target,
+ Err(e) => {
+ println!(
+ "cargo:warning={}: unable to get TARGET environment variable: {}",
+ env!("CARGO_PKG_NAME"),
+ e
+ );
+ return;
+ }
+ };
+
+ // Note that this is `no_*`, not `has_*`. This allows treating
+ // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't
+ // run. This is needed for compatibility with non-cargo build systems that
+ // don't run the build script.
+ if NO_ATOMIC_CAS_TARGETS.contains(&&*target) {
+ println!("cargo:rustc-cfg=futures_no_atomic_cas");
+ }
+
+ println!("cargo:rerun-if-changed=no_atomic_cas.rs");
+}
diff --git a/vendor/futures-channel/no_atomic_cas.rs b/vendor/futures-channel/no_atomic_cas.rs
new file mode 100644
index 000000000..4708bf853
--- /dev/null
+++ b/vendor/futures-channel/no_atomic_cas.rs
@@ -0,0 +1,13 @@
+// This file is @generated by no_atomic_cas.sh.
+// It is not intended for manual editing.
+
+const NO_ATOMIC_CAS_TARGETS: &[&str] = &[
+ "avr-unknown-gnu-atmega328",
+ "bpfeb-unknown-none",
+ "bpfel-unknown-none",
+ "msp430-none-elf",
+ "riscv32i-unknown-none-elf",
+ "riscv32imc-unknown-none-elf",
+ "thumbv4t-none-eabi",
+ "thumbv6m-none-eabi",
+];
diff --git a/vendor/futures-channel/src/lib.rs b/vendor/futures-channel/src/lib.rs
new file mode 100644
index 000000000..4cd936d55
--- /dev/null
+++ b/vendor/futures-channel/src/lib.rs
@@ -0,0 +1,42 @@
+//! Asynchronous channels.
+//!
+//! Like threads, concurrent tasks sometimes need to communicate with each
+//! other. This module contains two basic abstractions for doing so:
+//!
+//! - [oneshot], a way of sending a single value from one task to another.
+//! - [mpsc], a multi-producer, single-consumer channel for sending values
+//! between tasks, analogous to the similarly-named structure in the standard
+//! library.
+//!
+//! All items are only available when the `std` or `alloc` feature of this
+//! library is activated, and it is activated by default.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(
+ missing_debug_implementations,
+ missing_docs,
+ rust_2018_idioms,
+ single_use_lifetimes,
+ unreachable_pub
+)]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod lock;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "std")]
+pub mod mpsc;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub mod oneshot;
diff --git a/vendor/futures-channel/src/lock.rs b/vendor/futures-channel/src/lock.rs
new file mode 100644
index 000000000..b328d0f7d
--- /dev/null
+++ b/vendor/futures-channel/src/lock.rs
@@ -0,0 +1,102 @@
+//! A "mutex" which only supports `try_lock`
+//!
+//! As a futures library the eventual call to an event loop should be the only
+//! thing that ever blocks, so this is assisted with a fast user-space
+//! implementation of a lock that can only have a `try_lock` operation.
+
+use core::cell::UnsafeCell;
+use core::ops::{Deref, DerefMut};
+use core::sync::atomic::AtomicBool;
+use core::sync::atomic::Ordering::SeqCst;
+
+/// A "mutex" around a value, similar to `std::sync::Mutex<T>`.
+///
+/// This lock only supports the `try_lock` operation, however, and does not
+/// implement poisoning.
+#[derive(Debug)]
+pub(crate) struct Lock<T> {
+ locked: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+/// Sentinel representing an acquired lock through which the data can be
+/// accessed.
+pub(crate) struct TryLock<'a, T> {
+ __ptr: &'a Lock<T>,
+}
+
+// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are
+// intended to mirror the standard library's corresponding impls for `Mutex<T>`.
+//
+// If a `T` is sendable across threads, so is the lock, and `T` must be sendable
+// across threads to be `Sync` because it allows mutable access from multiple
+// threads.
+unsafe impl<T: Send> Send for Lock<T> {}
+unsafe impl<T: Send> Sync for Lock<T> {}
+
+impl<T> Lock<T> {
+ /// Creates a new lock around the given value.
+ pub(crate) fn new(t: T) -> Self {
+ Self { locked: AtomicBool::new(false), data: UnsafeCell::new(t) }
+ }
+
+ /// Attempts to acquire this lock, returning whether the lock was acquired or
+ /// not.
+ ///
+ /// If `Some` is returned then the data this lock protects can be accessed
+ /// through the sentinel. This sentinel allows both mutable and immutable
+ /// access.
+ ///
+ /// If `None` is returned then the lock is already locked, either elsewhere
+ /// on this thread or on another thread.
+ pub(crate) fn try_lock(&self) -> Option<TryLock<'_, T>> {
+ if !self.locked.swap(true, SeqCst) {
+ Some(TryLock { __ptr: self })
+ } else {
+ None
+ }
+ }
+}
+
+impl<T> Deref for TryLock<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ // The existence of `TryLock` represents that we own the lock, so we
+ // can safely access the data here.
+ unsafe { &*self.__ptr.data.get() }
+ }
+}
+
+impl<T> DerefMut for TryLock<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // The existence of `TryLock` represents that we own the lock, so we
+ // can safely access the data here.
+ //
+ // Additionally, we're the *only* `TryLock` in existence so mutable
+ // access should be ok.
+ unsafe { &mut *self.__ptr.data.get() }
+ }
+}
+
+impl<T> Drop for TryLock<'_, T> {
+ fn drop(&mut self) {
+ self.__ptr.locked.store(false, SeqCst);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Lock;
+
+ #[test]
+ fn smoke() {
+ let a = Lock::new(1);
+ let mut a1 = a.try_lock().unwrap();
+ assert!(a.try_lock().is_none());
+ assert_eq!(*a1, 1);
+ *a1 = 2;
+ drop(a1);
+ assert_eq!(*a.try_lock().unwrap(), 2);
+ assert_eq!(*a.try_lock().unwrap(), 2);
+ }
+}
diff --git a/vendor/futures-channel/src/mpsc/mod.rs b/vendor/futures-channel/src/mpsc/mod.rs
new file mode 100644
index 000000000..44834b7c9
--- /dev/null
+++ b/vendor/futures-channel/src/mpsc/mod.rs
@@ -0,0 +1,1308 @@
+//! A multi-producer, single-consumer queue for sending values across
+//! asynchronous tasks.
+//!
+//! Similarly to the `std`, channel creation provides [`Receiver`] and
+//! [`Sender`] handles. [`Receiver`] implements [`Stream`] and allows a task to
+//! read values out of the channel. If there is no message to read from the
+//! channel, the current task will be notified when a new value is sent.
+//! [`Sender`] implements the `Sink` trait and allows a task to send messages into
+//! the channel. If the channel is at capacity, the send will be rejected and
+//! the task will be notified when additional capacity is available. In other
+//! words, the channel provides backpressure.
+//!
+//! Unbounded channels are also available using the `unbounded` constructor.
+//!
+//! # Disconnection
+//!
+//! When all [`Sender`] handles have been dropped, it is no longer
+//! possible to send values into the channel. This is considered the termination
+//! event of the stream. As such, [`Receiver::poll_next`]
+//! will return `Ok(Ready(None))`.
+//!
+//! If the [`Receiver`] handle is dropped, then messages can no longer
+//! be read out of the channel. In this case, all further attempts to send will
+//! result in an error.
+//!
+//! # Clean Shutdown
+//!
+//! If the [`Receiver`] is simply dropped, then it is possible for
+//! there to be messages still in the channel that will not be processed. As
+//! such, it is usually desirable to perform a "clean" shutdown. To do this, the
+//! receiver will first call `close`, which will prevent any further messages to
+//! be sent into the channel. Then, the receiver consumes the channel to
+//! completion, at which point the receiver can be dropped.
+//!
+//! [`Sender`]: struct.Sender.html
+//! [`Receiver`]: struct.Receiver.html
+//! [`Stream`]: ../../futures_core/stream/trait.Stream.html
+//! [`Receiver::poll_next`]:
+//! ../../futures_core/stream/trait.Stream.html#tymethod.poll_next
+
+// At the core, the channel uses an atomic FIFO queue for message passing. This
+// queue is used as the primary coordination primitive. In order to enforce
+// capacity limits and handle back pressure, a secondary FIFO queue is used to
+// send parked task handles.
+//
+// The general idea is that the channel is created with a `buffer` size of `n`.
+// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed"
+// slot to hold a message. This allows `Sender` to know for a fact that a send
+// will succeed *before* starting to do the actual work of sending the value.
+// Since most of this work is lock-free, once the work starts, it is impossible
+// to safely revert.
+//
+// If the sender is unable to process a send operation, then the current
+// task is parked and the handle is sent on the parked task queue.
+//
+// Note that the implementation guarantees that the channel capacity will never
+// exceed the configured limit, however there is no *strict* guarantee that the
+// receiver will wake up a parked task *immediately* when a slot becomes
+// available. However, it will almost always unpark a task when a slot becomes
+// available and it is *guaranteed* that a sender will be unparked when the
+// message that caused the sender to become parked is read out of the channel.
+//
+// The steps for sending a message are roughly:
+//
+// 1) Increment the channel message count
+// 2) If the channel is at capacity, push the task handle onto the wait queue
+// 3) Push the message onto the message queue.
+//
+// The steps for receiving a message are roughly:
+//
+// 1) Pop a message from the message queue
+// 2) Pop a task handle from the wait queue
+// 3) Decrement the channel message count.
+//
+// It's important for the order of operations on lock-free structures to happen
+// in reverse order between the sender and receiver. This makes the message
+// queue the primary coordination structure and establishes the necessary
+// happens-before semantics required for the acquire / release semantics used
+// by the queue structure.
+
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::__internal::AtomicWaker;
+use futures_core::task::{Context, Poll, Waker};
+use std::fmt;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+use crate::mpsc::queue::Queue;
+
+mod queue;
+#[cfg(feature = "sink")]
+mod sink_impl;
+
+#[derive(Debug)]
+struct UnboundedSenderInner<T> {
+ // Channel state shared between the sender and receiver.
+ inner: Arc<UnboundedInner<T>>,
+}
+
+#[derive(Debug)]
+struct BoundedSenderInner<T> {
+ // Channel state shared between the sender and receiver.
+ inner: Arc<BoundedInner<T>>,
+
+ // Handle to the task that is blocked on this sender. This handle is sent
+ // to the receiver half in order to be notified when the sender becomes
+ // unblocked.
+ sender_task: Arc<Mutex<SenderTask>>,
+
+ // `true` if the sender might be blocked. This is an optimization to avoid
+ // having to lock the mutex most of the time.
+ maybe_parked: bool,
+}
+
+// We never project Pin<&mut SenderInner> to `Pin<&mut T>`
+impl<T> Unpin for UnboundedSenderInner<T> {}
+impl<T> Unpin for BoundedSenderInner<T> {}
+
+/// The transmission end of a bounded mpsc channel.
+///
+/// This value is created by the [`channel`](channel) function.
+#[derive(Debug)]
+pub struct Sender<T>(Option<BoundedSenderInner<T>>);
+
+/// The transmission end of an unbounded mpsc channel.
+///
+/// This value is created by the [`unbounded`](unbounded) function.
+#[derive(Debug)]
+pub struct UnboundedSender<T>(Option<UnboundedSenderInner<T>>);
+
+trait AssertKinds: Send + Sync + Clone {}
+impl AssertKinds for UnboundedSender<u32> {}
+
+/// The receiving end of a bounded mpsc channel.
+///
+/// This value is created by the [`channel`](channel) function.
+#[derive(Debug)]
+pub struct Receiver<T> {
+ inner: Option<Arc<BoundedInner<T>>>,
+}
+
+/// The receiving end of an unbounded mpsc channel.
+///
+/// This value is created by the [`unbounded`](unbounded) function.
+#[derive(Debug)]
+pub struct UnboundedReceiver<T> {
+ inner: Option<Arc<UnboundedInner<T>>>,
+}
+
+// `Pin<&mut UnboundedReceiver<T>>` is never projected to `Pin<&mut T>`
+impl<T> Unpin for UnboundedReceiver<T> {}
+
+/// The error type for [`Sender`s](Sender) used as `Sink`s.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct SendError {
+ kind: SendErrorKind,
+}
+
+/// The error type returned from [`try_send`](Sender::try_send).
+#[derive(Clone, PartialEq, Eq)]
+pub struct TrySendError<T> {
+ err: SendError,
+ val: T,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum SendErrorKind {
+ Full,
+ Disconnected,
+}
+
+/// The error type returned from [`try_next`](Receiver::try_next).
+pub struct TryRecvError {
+ _priv: (),
+}
+
+impl fmt::Display for SendError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.is_full() {
+ write!(f, "send failed because channel is full")
+ } else {
+ write!(f, "send failed because receiver is gone")
+ }
+ }
+}
+
+impl std::error::Error for SendError {}
+
+impl SendError {
+ /// Returns `true` if this error is a result of the channel being full.
+ pub fn is_full(&self) -> bool {
+ match self.kind {
+ SendErrorKind::Full => true,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if this error is a result of the receiver being dropped.
+ pub fn is_disconnected(&self) -> bool {
+ match self.kind {
+ SendErrorKind::Disconnected => true,
+ _ => false,
+ }
+ }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TrySendError").field("kind", &self.err.kind).finish()
+ }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.is_full() {
+ write!(f, "send failed because channel is full")
+ } else {
+ write!(f, "send failed because receiver is gone")
+ }
+ }
+}
+
+impl<T: core::any::Any> std::error::Error for TrySendError<T> {}
+
+impl<T> TrySendError<T> {
+ /// Returns `true` if this error is a result of the channel being full.
+ pub fn is_full(&self) -> bool {
+ self.err.is_full()
+ }
+
+ /// Returns `true` if this error is a result of the receiver being dropped.
+ pub fn is_disconnected(&self) -> bool {
+ self.err.is_disconnected()
+ }
+
+ /// Returns the message that was attempted to be sent but failed.
+ pub fn into_inner(self) -> T {
+ self.val
+ }
+
+ /// Drops the message and converts into a `SendError`.
+ pub fn into_send_error(self) -> SendError {
+ self.err
+ }
+}
+
+impl fmt::Debug for TryRecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("TryRecvError").finish()
+ }
+}
+
+impl fmt::Display for TryRecvError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "receiver channel is empty")
+ }
+}
+
+impl std::error::Error for TryRecvError {}
+
+#[derive(Debug)]
+struct UnboundedInner<T> {
+ // Internal channel state. Consists of the number of messages stored in the
+ // channel as well as a flag signalling that the channel is closed.
+ state: AtomicUsize,
+
+ // Atomic, FIFO queue used to send messages to the receiver
+ message_queue: Queue<T>,
+
+ // Number of senders in existence
+ num_senders: AtomicUsize,
+
+ // Handle to the receiver's task.
+ recv_task: AtomicWaker,
+}
+
+#[derive(Debug)]
+struct BoundedInner<T> {
+ // Max buffer size of the channel. If `None` then the channel is unbounded.
+ buffer: usize,
+
+ // Internal channel state. Consists of the number of messages stored in the
+ // channel as well as a flag signalling that the channel is closed.
+ state: AtomicUsize,
+
+ // Atomic, FIFO queue used to send messages to the receiver
+ message_queue: Queue<T>,
+
+ // Atomic, FIFO queue used to send parked task handles to the receiver.
+ parked_queue: Queue<Arc<Mutex<SenderTask>>>,
+
+ // Number of senders in existence
+ num_senders: AtomicUsize,
+
+ // Handle to the receiver's task.
+ recv_task: AtomicWaker,
+}
+
+// Struct representation of `Inner::state`.
+#[derive(Debug, Clone, Copy)]
+struct State {
+ // `true` when the channel is open
+ is_open: bool,
+
+ // Number of messages in the channel
+ num_messages: usize,
+}
+
+// The `is_open` flag is stored in the left-most bit of `Inner::state`
+const OPEN_MASK: usize = usize::max_value() - (usize::max_value() >> 1);
+
+// When a new channel is created, it is created in the open state with no
+// pending messages.
+const INIT_STATE: usize = OPEN_MASK;
+
+// The maximum number of messages that a channel can track is `usize::max_value() >> 1`
+const MAX_CAPACITY: usize = !(OPEN_MASK);
+
+// The maximum requested buffer size must be less than the maximum capacity of
+// a channel. This is because each sender gets a guaranteed slot.
+const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
+
+// Sent to the consumer to wake up blocked producers
+#[derive(Debug)]
+struct SenderTask {
+ task: Option<Waker>,
+ is_parked: bool,
+}
+
+impl SenderTask {
+ fn new() -> Self {
+ Self { task: None, is_parked: false }
+ }
+
+ fn notify(&mut self) {
+ self.is_parked = false;
+
+ if let Some(task) = self.task.take() {
+ task.wake();
+ }
+ }
+}
+
+/// Creates a bounded mpsc channel for communicating between asynchronous tasks.
+///
+/// Being bounded, this channel provides backpressure to ensure that the sender
+/// outpaces the receiver by only a limited amount. The channel's capacity is
+/// equal to `buffer + num-senders`. In other words, each sender gets a
+/// guaranteed slot in the channel capacity, and on top of that there are
+/// `buffer` "first come, first serve" slots available to all senders.
+///
+/// The [`Receiver`](Receiver) returned implements the
+/// [`Stream`](futures_core::stream::Stream) trait, while [`Sender`](Sender) implements
+/// `Sink`.
+pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
+ // Check that the requested buffer size does not exceed the maximum buffer
+ // size permitted by the system.
+ assert!(buffer < MAX_BUFFER, "requested buffer size too large");
+
+ let inner = Arc::new(BoundedInner {
+ buffer,
+ state: AtomicUsize::new(INIT_STATE),
+ message_queue: Queue::new(),
+ parked_queue: Queue::new(),
+ num_senders: AtomicUsize::new(1),
+ recv_task: AtomicWaker::new(),
+ });
+
+ let tx = BoundedSenderInner {
+ inner: inner.clone(),
+ sender_task: Arc::new(Mutex::new(SenderTask::new())),
+ maybe_parked: false,
+ };
+
+ let rx = Receiver { inner: Some(inner) };
+
+ (Sender(Some(tx)), rx)
+}
+
+/// Creates an unbounded mpsc channel for communicating between asynchronous
+/// tasks.
+///
+/// A `send` on this channel will always succeed as long as the receive half has
+/// not been closed. If the receiver falls behind, messages will be arbitrarily
+/// buffered.
+///
+/// **Note** that the amount of available system memory is an implicit bound to
+/// the channel. Using an `unbounded` channel has the ability of causing the
+/// process to run out of memory. In this case, the process will be aborted.
+pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
+ let inner = Arc::new(UnboundedInner {
+ state: AtomicUsize::new(INIT_STATE),
+ message_queue: Queue::new(),
+ num_senders: AtomicUsize::new(1),
+ recv_task: AtomicWaker::new(),
+ });
+
+ let tx = UnboundedSenderInner { inner: inner.clone() };
+
+ let rx = UnboundedReceiver { inner: Some(inner) };
+
+ (UnboundedSender(Some(tx)), rx)
+}
+
+/*
+ *
+ * ===== impl Sender =====
+ *
+ */
+
+impl<T> UnboundedSenderInner<T> {
+ fn poll_ready_nb(&self) -> Poll<Result<(), SendError>> {
+ let state = decode_state(self.inner.state.load(SeqCst));
+ if state.is_open {
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Ready(Err(SendError { kind: SendErrorKind::Disconnected }))
+ }
+ }
+
+ // Push message to the queue and signal to the receiver
+ fn queue_push_and_signal(&self, msg: T) {
+ // Push the message onto the message queue
+ self.inner.message_queue.push(msg);
+
+ // Signal to the receiver that a message has been enqueued. If the
+ // receiver is parked, this will unpark the task.
+ self.inner.recv_task.wake();
+ }
+
+ // Increment the number of queued messages. Returns the resulting number.
+ fn inc_num_messages(&self) -> Option<usize> {
+ let mut curr = self.inner.state.load(SeqCst);
+
+ loop {
+ let mut state = decode_state(curr);
+
+ // The receiver end closed the channel.
+ if !state.is_open {
+ return None;
+ }
+
+ // This probably is never hit? Odds are the process will run out of
+ // memory first. It may be worth to return something else in this
+ // case?
+ assert!(
+ state.num_messages < MAX_CAPACITY,
+ "buffer space \
+ exhausted; sending this messages would overflow the state"
+ );
+
+ state.num_messages += 1;
+
+ let next = encode_state(&state);
+ match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => return Some(state.num_messages),
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+
+ /// Returns whether the senders send to the same receiver.
+ fn same_receiver(&self, other: &Self) -> bool {
+ Arc::ptr_eq(&self.inner, &other.inner)
+ }
+
+ /// Returns whether the sender send to this receiver.
+ fn is_connected_to(&self, inner: &Arc<UnboundedInner<T>>) -> bool {
+ Arc::ptr_eq(&self.inner, inner)
+ }
+
+ /// Returns pointer to the Arc containing sender
+ ///
+ /// The returned pointer is not referenced and should be only used for hashing!
+ fn ptr(&self) -> *const UnboundedInner<T> {
+ &*self.inner
+ }
+
+ /// Returns whether this channel is closed without needing a context.
+ fn is_closed(&self) -> bool {
+ !decode_state(self.inner.state.load(SeqCst)).is_open
+ }
+
+ /// Closes this channel from the sender side, preventing any new messages.
+ fn close_channel(&self) {
+ // There's no need to park this sender, its dropping,
+ // and we don't want to check for capacity, so skip
+ // that stuff from `do_send`.
+
+ self.inner.set_closed();
+ self.inner.recv_task.wake();
+ }
+}
+
+impl<T> BoundedSenderInner<T> {
+ /// Attempts to send a message on this `Sender`, returning the message
+ /// if there was an error.
+ fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+ // If the sender is currently blocked, reject the message
+ if !self.poll_unparked(None).is_ready() {
+ return Err(TrySendError { err: SendError { kind: SendErrorKind::Full }, val: msg });
+ }
+
+ // The channel has capacity to accept the message, so send it
+ self.do_send_b(msg)
+ }
+
+ // Do the send without failing.
+ // Can be called only by bounded sender.
+ fn do_send_b(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+ // Anyone calling do_send *should* make sure there is room first,
+ // but assert here for tests as a sanity check.
+ debug_assert!(self.poll_unparked(None).is_ready());
+
+ // First, increment the number of messages contained by the channel.
+ // This operation will also atomically determine if the sender task
+ // should be parked.
+ //
+ // `None` is returned in the case that the channel has been closed by the
+ // receiver. This happens when `Receiver::close` is called or the
+ // receiver is dropped.
+ let park_self = match self.inc_num_messages() {
+ Some(num_messages) => {
+ // Block if the current number of pending messages has exceeded
+ // the configured buffer size
+ num_messages > self.inner.buffer
+ }
+ None => {
+ return Err(TrySendError {
+ err: SendError { kind: SendErrorKind::Disconnected },
+ val: msg,
+ })
+ }
+ };
+
+ // If the channel has reached capacity, then the sender task needs to
+ // be parked. This will send the task handle on the parked task queue.
+ //
+ // However, when `do_send` is called while dropping the `Sender`,
+ // `task::current()` can't be called safely. In this case, in order to
+ // maintain internal consistency, a blank message is pushed onto the
+ // parked task queue.
+ if park_self {
+ self.park();
+ }
+
+ self.queue_push_and_signal(msg);
+
+ Ok(())
+ }
+
+ // Push message to the queue and signal to the receiver
+ fn queue_push_and_signal(&self, msg: T) {
+ // Push the message onto the message queue
+ self.inner.message_queue.push(msg);
+
+ // Signal to the receiver that a message has been enqueued. If the
+ // receiver is parked, this will unpark the task.
+ self.inner.recv_task.wake();
+ }
+
+ // Increment the number of queued messages. Returns the resulting number.
+ fn inc_num_messages(&self) -> Option<usize> {
+ let mut curr = self.inner.state.load(SeqCst);
+
+ loop {
+ let mut state = decode_state(curr);
+
+ // The receiver end closed the channel.
+ if !state.is_open {
+ return None;
+ }
+
+ // This probably is never hit? Odds are the process will run out of
+ // memory first. It may be worth to return something else in this
+ // case?
+ assert!(
+ state.num_messages < MAX_CAPACITY,
+ "buffer space \
+ exhausted; sending this messages would overflow the state"
+ );
+
+ state.num_messages += 1;
+
+ let next = encode_state(&state);
+ match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => return Some(state.num_messages),
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+
+ fn park(&mut self) {
+ {
+ let mut sender = self.sender_task.lock().unwrap();
+ sender.task = None;
+ sender.is_parked = true;
+ }
+
+ // Send handle over queue
+ let t = self.sender_task.clone();
+ self.inner.parked_queue.push(t);
+
+ // Check to make sure we weren't closed after we sent our task on the
+ // queue
+ let state = decode_state(self.inner.state.load(SeqCst));
+ self.maybe_parked = state.is_open;
+ }
+
+ /// Polls the channel to determine if there is guaranteed capacity to send
+ /// at least one item without waiting.
+ ///
+ /// # Return value
+ ///
+ /// This method returns:
+ ///
+ /// - `Poll::Ready(Ok(_))` if there is sufficient capacity;
+ /// - `Poll::Pending` if the channel may not have
+ /// capacity, in which case the current task is queued to be notified once
+ /// capacity is available;
+ /// - `Poll::Ready(Err(SendError))` if the receiver has been dropped.
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), SendError>> {
+ let state = decode_state(self.inner.state.load(SeqCst));
+ if !state.is_open {
+ return Poll::Ready(Err(SendError { kind: SendErrorKind::Disconnected }));
+ }
+
+ self.poll_unparked(Some(cx)).map(Ok)
+ }
+
+ /// Returns whether the senders send to the same receiver.
+ fn same_receiver(&self, other: &Self) -> bool {
+ Arc::ptr_eq(&self.inner, &other.inner)
+ }
+
+ /// Returns whether the sender send to this receiver.
+ fn is_connected_to(&self, receiver: &Arc<BoundedInner<T>>) -> bool {
+ Arc::ptr_eq(&self.inner, receiver)
+ }
+
+ /// Returns pointer to the Arc containing sender
+ ///
+ /// The returned pointer is not referenced and should be only used for hashing!
+ fn ptr(&self) -> *const BoundedInner<T> {
+ &*self.inner
+ }
+
+ /// Returns whether this channel is closed without needing a context.
+ fn is_closed(&self) -> bool {
+ !decode_state(self.inner.state.load(SeqCst)).is_open
+ }
+
+ /// Closes this channel from the sender side, preventing any new messages.
+ fn close_channel(&self) {
+ // There's no need to park this sender, its dropping,
+ // and we don't want to check for capacity, so skip
+ // that stuff from `do_send`.
+
+ self.inner.set_closed();
+ self.inner.recv_task.wake();
+ }
+
+ fn poll_unparked(&mut self, cx: Option<&mut Context<'_>>) -> Poll<()> {
+ // First check the `maybe_parked` variable. This avoids acquiring the
+ // lock in most cases
+ if self.maybe_parked {
+ // Get a lock on the task handle
+ let mut task = self.sender_task.lock().unwrap();
+
+ if !task.is_parked {
+ self.maybe_parked = false;
+ return Poll::Ready(());
+ }
+
+ // At this point, an unpark request is pending, so there will be an
+ // unpark sometime in the future. We just need to make sure that
+ // the correct task will be notified.
+ //
+ // Update the task in case the `Sender` has been moved to another
+ // task
+ task.task = cx.map(|cx| cx.waker().clone());
+
+ Poll::Pending
+ } else {
+ Poll::Ready(())
+ }
+ }
+}
+
+impl<T> Sender<T> {
+ /// Attempts to send a message on this `Sender`, returning the message
+ /// if there was an error.
+ pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+ if let Some(inner) = &mut self.0 {
+ inner.try_send(msg)
+ } else {
+ Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected }, val: msg })
+ }
+ }
+
+ /// Send a message on the channel.
+ ///
+ /// This function should only be called after
+ /// [`poll_ready`](Sender::poll_ready) has reported that the channel is
+ /// ready to receive a message.
+ pub fn start_send(&mut self, msg: T) -> Result<(), SendError> {
+ self.try_send(msg).map_err(|e| e.err)
+ }
+
+ /// Polls the channel to determine if there is guaranteed capacity to send
+ /// at least one item without waiting.
+ ///
+ /// # Return value
+ ///
+ /// This method returns:
+ ///
+ /// - `Poll::Ready(Ok(_))` if there is sufficient capacity;
+ /// - `Poll::Pending` if the channel may not have
+ /// capacity, in which case the current task is queued to be notified once
+ /// capacity is available;
+ /// - `Poll::Ready(Err(SendError))` if the receiver has been dropped.
+ pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), SendError>> {
+ let inner = self.0.as_mut().ok_or(SendError { kind: SendErrorKind::Disconnected })?;
+ inner.poll_ready(cx)
+ }
+
+ /// Returns whether this channel is closed without needing a context.
+ pub fn is_closed(&self) -> bool {
+ self.0.as_ref().map(BoundedSenderInner::is_closed).unwrap_or(true)
+ }
+
+ /// Closes this channel from the sender side, preventing any new messages.
+ pub fn close_channel(&mut self) {
+ if let Some(inner) = &mut self.0 {
+ inner.close_channel();
+ }
+ }
+
+ /// Disconnects this sender from the channel, closing it if there are no more senders left.
+ pub fn disconnect(&mut self) {
+ self.0 = None;
+ }
+
+ /// Returns whether the senders send to the same receiver.
+ pub fn same_receiver(&self, other: &Self) -> bool {
+ match (&self.0, &other.0) {
+ (Some(inner), Some(other)) => inner.same_receiver(other),
+ _ => false,
+ }
+ }
+
+ /// Returns whether the sender send to this receiver.
+ pub fn is_connected_to(&self, receiver: &Receiver<T>) -> bool {
+ match (&self.0, &receiver.inner) {
+ (Some(inner), Some(receiver)) => inner.is_connected_to(receiver),
+ _ => false,
+ }
+ }
+
+ /// Hashes the receiver into the provided hasher
+ pub fn hash_receiver<H>(&self, hasher: &mut H)
+ where
+ H: std::hash::Hasher,
+ {
+ use std::hash::Hash;
+
+ let ptr = self.0.as_ref().map(|inner| inner.ptr());
+ ptr.hash(hasher);
+ }
+}
+
+impl<T> UnboundedSender<T> {
+ /// Check if the channel is ready to receive a message.
+ pub fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), SendError>> {
+ let inner = self.0.as_ref().ok_or(SendError { kind: SendErrorKind::Disconnected })?;
+ inner.poll_ready_nb()
+ }
+
+ /// Returns whether this channel is closed without needing a context.
+ pub fn is_closed(&self) -> bool {
+ self.0.as_ref().map(UnboundedSenderInner::is_closed).unwrap_or(true)
+ }
+
+ /// Closes this channel from the sender side, preventing any new messages.
+ pub fn close_channel(&self) {
+ if let Some(inner) = &self.0 {
+ inner.close_channel();
+ }
+ }
+
+ /// Disconnects this sender from the channel, closing it if there are no more senders left.
+ pub fn disconnect(&mut self) {
+ self.0 = None;
+ }
+
+ // Do the send without parking current task.
+ fn do_send_nb(&self, msg: T) -> Result<(), TrySendError<T>> {
+ if let Some(inner) = &self.0 {
+ if inner.inc_num_messages().is_some() {
+ inner.queue_push_and_signal(msg);
+ return Ok(());
+ }
+ }
+
+ Err(TrySendError { err: SendError { kind: SendErrorKind::Disconnected }, val: msg })
+ }
+
+ /// Send a message on the channel.
+ ///
+ /// This method should only be called after `poll_ready` has been used to
+ /// verify that the channel is ready to receive a message.
+ pub fn start_send(&mut self, msg: T) -> Result<(), SendError> {
+ self.do_send_nb(msg).map_err(|e| e.err)
+ }
+
+ /// Sends a message along this channel.
+ ///
+ /// This is an unbounded sender, so this function differs from `Sink::send`
+ /// by ensuring the return type reflects that the channel is always ready to
+ /// receive messages.
+ pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError<T>> {
+ self.do_send_nb(msg)
+ }
+
+ /// Returns whether the senders send to the same receiver.
+ pub fn same_receiver(&self, other: &Self) -> bool {
+ match (&self.0, &other.0) {
+ (Some(inner), Some(other)) => inner.same_receiver(other),
+ _ => false,
+ }
+ }
+
+ /// Returns whether the sender send to this receiver.
+ pub fn is_connected_to(&self, receiver: &UnboundedReceiver<T>) -> bool {
+ match (&self.0, &receiver.inner) {
+ (Some(inner), Some(receiver)) => inner.is_connected_to(receiver),
+ _ => false,
+ }
+ }
+
+ /// Hashes the receiver into the provided hasher
+ pub fn hash_receiver<H>(&self, hasher: &mut H)
+ where
+ H: std::hash::Hasher,
+ {
+ use std::hash::Hash;
+
+ let ptr = self.0.as_ref().map(|inner| inner.ptr());
+ ptr.hash(hasher);
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Self {
+ Self(self.0.clone())
+ }
+}
+
+impl<T> Clone for UnboundedSender<T> {
+ fn clone(&self) -> Self {
+ Self(self.0.clone())
+ }
+}
+
+impl<T> Clone for UnboundedSenderInner<T> {
+ fn clone(&self) -> Self {
+ // Since this atomic op isn't actually guarding any memory and we don't
+ // care about any orderings besides the ordering on the single atomic
+ // variable, a relaxed ordering is acceptable.
+ let mut curr = self.inner.num_senders.load(SeqCst);
+
+ loop {
+ // If the maximum number of senders has been reached, then fail
+ if curr == MAX_BUFFER {
+ panic!("cannot clone `Sender` -- too many outstanding senders");
+ }
+
+ debug_assert!(curr < MAX_BUFFER);
+
+ let next = curr + 1;
+ match self.inner.num_senders.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => {
+ // The ABA problem doesn't matter here. We only care that the
+ // number of senders never exceeds the maximum.
+ return Self { inner: self.inner.clone() };
+ }
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+}
+
+impl<T> Clone for BoundedSenderInner<T> {
+ fn clone(&self) -> Self {
+ // Since this atomic op isn't actually guarding any memory and we don't
+ // care about any orderings besides the ordering on the single atomic
+ // variable, a relaxed ordering is acceptable.
+ let mut curr = self.inner.num_senders.load(SeqCst);
+
+ loop {
+ // If the maximum number of senders has been reached, then fail
+ if curr == self.inner.max_senders() {
+ panic!("cannot clone `Sender` -- too many outstanding senders");
+ }
+
+ debug_assert!(curr < self.inner.max_senders());
+
+ let next = curr + 1;
+ match self.inner.num_senders.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => {
+ // The ABA problem doesn't matter here. We only care that the
+ // number of senders never exceeds the maximum.
+ return Self {
+ inner: self.inner.clone(),
+ sender_task: Arc::new(Mutex::new(SenderTask::new())),
+ maybe_parked: false,
+ };
+ }
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+}
+
+impl<T> Drop for UnboundedSenderInner<T> {
+ fn drop(&mut self) {
+ // Ordering between variables don't matter here
+ let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
+
+ if prev == 1 {
+ self.close_channel();
+ }
+ }
+}
+
+impl<T> Drop for BoundedSenderInner<T> {
+ fn drop(&mut self) {
+ // Ordering between variables don't matter here
+ let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
+
+ if prev == 1 {
+ self.close_channel();
+ }
+ }
+}
+
+/*
+ *
+ * ===== impl Receiver =====
+ *
+ */
+
+impl<T> Receiver<T> {
+ /// Closes the receiving half of a channel, without dropping it.
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ if let Some(inner) = &mut self.inner {
+ inner.set_closed();
+
+ // Wake up any threads waiting as they'll see that we've closed the
+ // channel and will continue on their merry way.
+ while let Some(task) = unsafe { inner.parked_queue.pop_spin() } {
+ task.lock().unwrap().notify();
+ }
+ }
+ }
+
+ /// Tries to receive the next message without notifying a context if empty.
+ ///
+ /// It is not recommended to call this function from inside of a future,
+ /// only when you've otherwise arranged to be notified when the channel is
+ /// no longer empty.
+ ///
+ /// This function returns:
+ /// * `Ok(Some(t))` when message is fetched
+ /// * `Ok(None)` when channel is closed and no messages left in the queue
+ /// * `Err(e)` when there are no messages available, but channel is not yet closed
+ pub fn try_next(&mut self) -> Result<Option<T>, TryRecvError> {
+ match self.next_message() {
+ Poll::Ready(msg) => Ok(msg),
+ Poll::Pending => Err(TryRecvError { _priv: () }),
+ }
+ }
+
+ fn next_message(&mut self) -> Poll<Option<T>> {
+ let inner = match self.inner.as_mut() {
+ None => return Poll::Ready(None),
+ Some(inner) => inner,
+ };
+ // Pop off a message
+ match unsafe { inner.message_queue.pop_spin() } {
+ Some(msg) => {
+ // If there are any parked task handles in the parked queue,
+ // pop one and unpark it.
+ self.unpark_one();
+
+ // Decrement number of messages
+ self.dec_num_messages();
+
+ Poll::Ready(Some(msg))
+ }
+ None => {
+ let state = decode_state(inner.state.load(SeqCst));
+ if state.is_closed() {
+ // If closed flag is set AND there are no pending messages
+ // it means end of stream
+ self.inner = None;
+ Poll::Ready(None)
+ } else {
+ // If queue is open, we need to return Pending
+ // to be woken up when new messages arrive.
+ // If queue is closed but num_messages is non-zero,
+ // it means that senders updated the state,
+ // but didn't put message to queue yet,
+ // so we need to park until sender unparks the task
+ // after queueing the message.
+ Poll::Pending
+ }
+ }
+ }
+ }
+
+ // Unpark a single task handle if there is one pending in the parked queue
+ fn unpark_one(&mut self) {
+ if let Some(inner) = &mut self.inner {
+ if let Some(task) = unsafe { inner.parked_queue.pop_spin() } {
+ task.lock().unwrap().notify();
+ }
+ }
+ }
+
+ fn dec_num_messages(&self) {
+ if let Some(inner) = &self.inner {
+ // OPEN_MASK is highest bit, so it's unaffected by subtraction
+ // unless there's underflow, and we know there's no underflow
+ // because number of messages at this point is always > 0.
+ inner.state.fetch_sub(1, SeqCst);
+ }
+ }
+}
+
+// The receiver does not ever take a Pin to the inner T
+impl<T> Unpin for Receiver<T> {}
+
+impl<T> FusedStream for Receiver<T> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<T> Stream for Receiver<T> {
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ // Try to read a message off of the message queue.
+ match self.next_message() {
+ Poll::Ready(msg) => {
+ if msg.is_none() {
+ self.inner = None;
+ }
+ Poll::Ready(msg)
+ }
+ Poll::Pending => {
+ // There are no messages to read, in this case, park.
+ self.inner.as_ref().unwrap().recv_task.register(cx.waker());
+ // Check queue again after parking to prevent race condition:
+ // a message could be added to the queue after previous `next_message`
+ // before `register` call.
+ self.next_message()
+ }
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ // Drain the channel of all pending messages
+ self.close();
+ if self.inner.is_some() {
+ loop {
+ match self.next_message() {
+ Poll::Ready(Some(_)) => {}
+ Poll::Ready(None) => break,
+ Poll::Pending => {
+ let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst));
+
+ // If the channel is closed, then there is no need to park.
+ if state.is_closed() {
+ break;
+ }
+
+ // TODO: Spinning isn't ideal, it might be worth
+ // investigating using a condvar or some other strategy
+ // here. That said, if this case is hit, then another thread
+ // is about to push the value into the queue and this isn't
+ // the only spinlock in the impl right now.
+ thread::yield_now();
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<T> UnboundedReceiver<T> {
+ /// Closes the receiving half of a channel, without dropping it.
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ if let Some(inner) = &mut self.inner {
+ inner.set_closed();
+ }
+ }
+
+ /// Tries to receive the next message without notifying a context if empty.
+ ///
+ /// It is not recommended to call this function from inside of a future,
+ /// only when you've otherwise arranged to be notified when the channel is
+ /// no longer empty.
+ ///
+ /// This function returns:
+ /// * `Ok(Some(t))` when message is fetched
+ /// * `Ok(None)` when channel is closed and no messages left in the queue
+ /// * `Err(e)` when there are no messages available, but channel is not yet closed
+ pub fn try_next(&mut self) -> Result<Option<T>, TryRecvError> {
+ match self.next_message() {
+ Poll::Ready(msg) => Ok(msg),
+ Poll::Pending => Err(TryRecvError { _priv: () }),
+ }
+ }
+
+ fn next_message(&mut self) -> Poll<Option<T>> {
+ let inner = match self.inner.as_mut() {
+ None => return Poll::Ready(None),
+ Some(inner) => inner,
+ };
+ // Pop off a message
+ match unsafe { inner.message_queue.pop_spin() } {
+ Some(msg) => {
+ // Decrement number of messages
+ self.dec_num_messages();
+
+ Poll::Ready(Some(msg))
+ }
+ None => {
+ let state = decode_state(inner.state.load(SeqCst));
+ if state.is_closed() {
+ // If closed flag is set AND there are no pending messages
+ // it means end of stream
+ self.inner = None;
+ Poll::Ready(None)
+ } else {
+ // If queue is open, we need to return Pending
+ // to be woken up when new messages arrive.
+ // If queue is closed but num_messages is non-zero,
+ // it means that senders updated the state,
+ // but didn't put message to queue yet,
+ // so we need to park until sender unparks the task
+ // after queueing the message.
+ Poll::Pending
+ }
+ }
+ }
+ }
+
+ fn dec_num_messages(&self) {
+ if let Some(inner) = &self.inner {
+ // OPEN_MASK is highest bit, so it's unaffected by subtraction
+ // unless there's underflow, and we know there's no underflow
+ // because number of messages at this point is always > 0.
+ inner.state.fetch_sub(1, SeqCst);
+ }
+ }
+}
+
+impl<T> FusedStream for UnboundedReceiver<T> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<T> Stream for UnboundedReceiver<T> {
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ // Try to read a message off of the message queue.
+ match self.next_message() {
+ Poll::Ready(msg) => {
+ if msg.is_none() {
+ self.inner = None;
+ }
+ Poll::Ready(msg)
+ }
+ Poll::Pending => {
+ // There are no messages to read, in this case, park.
+ self.inner.as_ref().unwrap().recv_task.register(cx.waker());
+ // Check queue again after parking to prevent race condition:
+ // a message could be added to the queue after previous `next_message`
+ // before `register` call.
+ self.next_message()
+ }
+ }
+ }
+}
+
+impl<T> Drop for UnboundedReceiver<T> {
+ fn drop(&mut self) {
+ // Drain the channel of all pending messages
+ self.close();
+ if self.inner.is_some() {
+ loop {
+ match self.next_message() {
+ Poll::Ready(Some(_)) => {}
+ Poll::Ready(None) => break,
+ Poll::Pending => {
+ let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst));
+
+ // If the channel is closed, then there is no need to park.
+ if state.is_closed() {
+ break;
+ }
+
+ // TODO: Spinning isn't ideal, it might be worth
+ // investigating using a condvar or some other strategy
+ // here. That said, if this case is hit, then another thread
+ // is about to push the value into the queue and this isn't
+ // the only spinlock in the impl right now.
+ thread::yield_now();
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ *
+ * ===== impl Inner =====
+ *
+ */
+
+impl<T> UnboundedInner<T> {
+ // Clear `open` flag in the state, keep `num_messages` intact.
+ fn set_closed(&self) {
+ let curr = self.state.load(SeqCst);
+ if !decode_state(curr).is_open {
+ return;
+ }
+
+ self.state.fetch_and(!OPEN_MASK, SeqCst);
+ }
+}
+
+impl<T> BoundedInner<T> {
+ // The return value is such that the total number of messages that can be
+ // enqueued into the channel will never exceed MAX_CAPACITY
+ fn max_senders(&self) -> usize {
+ MAX_CAPACITY - self.buffer
+ }
+
+ // Clear `open` flag in the state, keep `num_messages` intact.
+ fn set_closed(&self) {
+ let curr = self.state.load(SeqCst);
+ if !decode_state(curr).is_open {
+ return;
+ }
+
+ self.state.fetch_and(!OPEN_MASK, SeqCst);
+ }
+}
+
+unsafe impl<T: Send> Send for UnboundedInner<T> {}
+unsafe impl<T: Send> Sync for UnboundedInner<T> {}
+
+unsafe impl<T: Send> Send for BoundedInner<T> {}
+unsafe impl<T: Send> Sync for BoundedInner<T> {}
+
+impl State {
+ fn is_closed(&self) -> bool {
+ !self.is_open && self.num_messages == 0
+ }
+}
+
+/*
+ *
+ * ===== Helpers =====
+ *
+ */
+
+fn decode_state(num: usize) -> State {
+ State { is_open: num & OPEN_MASK == OPEN_MASK, num_messages: num & MAX_CAPACITY }
+}
+
+fn encode_state(state: &State) -> usize {
+ let mut num = state.num_messages;
+
+ if state.is_open {
+ num |= OPEN_MASK;
+ }
+
+ num
+}
diff --git a/vendor/futures-channel/src/mpsc/queue.rs b/vendor/futures-channel/src/mpsc/queue.rs
new file mode 100644
index 000000000..57dc7f565
--- /dev/null
+++ b/vendor/futures-channel/src/mpsc/queue.rs
@@ -0,0 +1,176 @@
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+//! A mostly lock-free multi-producer, single consumer queue for sending
+//! messages between asynchronous tasks.
+//!
+//! The queue implementation is essentially the same one used for mpsc channels
+//! in the standard library.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue may not be appropriate for all use-cases.
+
+// http://www.1024cores.net/home/lock-free-algorithms
+// /queues/non-intrusive-mpsc-node-based-queue
+
+// NOTE: this implementation is lifted from the standard library and only
+// slightly modified
+
+pub(super) use self::PopResult::*;
+
+use std::cell::UnsafeCell;
+use std::ptr;
+use std::sync::atomic::{AtomicPtr, Ordering};
+use std::thread;
+
+/// A result of the `pop` function.
+pub(super) enum PopResult<T> {
+ /// Some data has been popped
+ Data(T),
+ /// The queue is empty
+ Empty,
+ /// The queue is in an inconsistent state. Popping data should succeed, but
+ /// some pushers have yet to make enough progress in order allow a pop to
+ /// succeed. It is recommended that a pop() occur "in the near future" in
+ /// order to see if the sender has made progress or not
+ Inconsistent,
+}
+
+#[derive(Debug)]
+struct Node<T> {
+ next: AtomicPtr<Self>,
+ value: Option<T>,
+}
+
+/// The multi-producer single-consumer structure. This is not cloneable, but it
+/// may be safely shared so long as it is guaranteed that there is only one
+/// popper at a time (many pushers are allowed).
+#[derive(Debug)]
+pub(super) struct Queue<T> {
+ head: AtomicPtr<Node<T>>,
+ tail: UnsafeCell<*mut Node<T>>,
+}
+
+unsafe impl<T: Send> Send for Queue<T> {}
+unsafe impl<T: Send> Sync for Queue<T> {}
+
+impl<T> Node<T> {
+ unsafe fn new(v: Option<T>) -> *mut Self {
+ Box::into_raw(Box::new(Self { next: AtomicPtr::new(ptr::null_mut()), value: v }))
+ }
+}
+
+impl<T> Queue<T> {
+ /// Creates a new queue that is safe to share among multiple producers and
+ /// one consumer.
+ pub(super) fn new() -> Self {
+ let stub = unsafe { Node::new(None) };
+ Self { head: AtomicPtr::new(stub), tail: UnsafeCell::new(stub) }
+ }
+
+ /// Pushes a new value onto this queue.
+ pub(super) fn push(&self, t: T) {
+ unsafe {
+ let n = Node::new(Some(t));
+ let prev = self.head.swap(n, Ordering::AcqRel);
+ (*prev).next.store(n, Ordering::Release);
+ }
+ }
+
+ /// Pops some data from this queue.
+ ///
+ /// Note that the current implementation means that this function cannot
+ /// return `Option<T>`. It is possible for this queue to be in an
+ /// inconsistent state where many pushes have succeeded and completely
+ /// finished, but pops cannot return `Some(t)`. This inconsistent state
+ /// happens when a pusher is preempted at an inopportune moment.
+ ///
+ /// This inconsistent state means that this queue does indeed have data, but
+ /// it does not currently have access to it at this time.
+ ///
+ /// This function is unsafe because only one thread can call it at a time.
+ pub(super) unsafe fn pop(&self) -> PopResult<T> {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Ordering::Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ assert!((*tail).value.is_none());
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take().unwrap();
+ drop(Box::from_raw(tail));
+ return Data(ret);
+ }
+
+ if self.head.load(Ordering::Acquire) == tail {
+ Empty
+ } else {
+ Inconsistent
+ }
+ }
+
+ /// Pop an element similarly to `pop` function, but spin-wait on inconsistent
+ /// queue state instead of returning `Inconsistent`.
+ ///
+ /// This function is unsafe because only one thread can call it at a time.
+ pub(super) unsafe fn pop_spin(&self) -> Option<T> {
+ loop {
+ match self.pop() {
+ Empty => return None,
+ Data(t) => return Some(t),
+ // Inconsistent means that there will be a message to pop
+ // in a short time. This branch can only be reached if
+ // values are being produced from another thread, so there
+ // are a few ways that we can deal with this:
+ //
+ // 1) Spin
+ // 2) thread::yield_now()
+ // 3) task::current().unwrap() & return Pending
+ //
+ // For now, thread::yield_now() is used, but it would
+ // probably be better to spin a few times then yield.
+ Inconsistent => {
+ thread::yield_now();
+ }
+ }
+ }
+ }
+}
+
+impl<T> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.tail.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Ordering::Relaxed);
+ drop(Box::from_raw(cur));
+ cur = next;
+ }
+ }
+ }
+}
diff --git a/vendor/futures-channel/src/mpsc/sink_impl.rs b/vendor/futures-channel/src/mpsc/sink_impl.rs
new file mode 100644
index 000000000..1be20162c
--- /dev/null
+++ b/vendor/futures-channel/src/mpsc/sink_impl.rs
@@ -0,0 +1,73 @@
+use super::{SendError, Sender, TrySendError, UnboundedSender};
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use std::pin::Pin;
+
+impl<T> Sink<T> for Sender<T> {
+ type Error = SendError;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ (*self).poll_ready(cx)
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> {
+ (*self).start_send(msg)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match (*self).poll_ready(cx) {
+ Poll::Ready(Err(ref e)) if e.is_disconnected() => {
+ // If the receiver disconnected, we consider the sink to be flushed.
+ Poll::Ready(Ok(()))
+ }
+ x => x,
+ }
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.disconnect();
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> Sink<T> for UnboundedSender<T> {
+ type Error = SendError;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Self::poll_ready(&*self, cx)
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> {
+ Self::start_send(&mut *self, msg)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.disconnect();
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> Sink<T> for &UnboundedSender<T> {
+ type Error = SendError;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ UnboundedSender::poll_ready(*self, cx)
+ }
+
+ fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> {
+ self.unbounded_send(msg).map_err(TrySendError::into_send_error)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.close_channel();
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-channel/src/oneshot.rs b/vendor/futures-channel/src/oneshot.rs
new file mode 100644
index 000000000..5af651b91
--- /dev/null
+++ b/vendor/futures-channel/src/oneshot.rs
@@ -0,0 +1,488 @@
+//! A channel for sending a single message between asynchronous tasks.
+//!
+//! This is a single-producer, single-consumer channel.
+
+use alloc::sync::Arc;
+use core::fmt;
+use core::pin::Pin;
+use core::sync::atomic::AtomicBool;
+use core::sync::atomic::Ordering::SeqCst;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll, Waker};
+
+use crate::lock::Lock;
+
+/// A future for a value that will be provided by another asynchronous task.
+///
+/// This is created by the [`channel`](channel) function.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Receiver<T> {
+ inner: Arc<Inner<T>>,
+}
+
+/// A means of transmitting a single value to another task.
+///
+/// This is created by the [`channel`](channel) function.
+pub struct Sender<T> {
+ inner: Arc<Inner<T>>,
+}
+
+// The channels do not ever project Pin to the inner T
+impl<T> Unpin for Receiver<T> {}
+impl<T> Unpin for Sender<T> {}
+
+/// Internal state of the `Receiver`/`Sender` pair above. This is all used as
+/// the internal synchronization between the two for send/recv operations.
+struct Inner<T> {
+ /// Indicates whether this oneshot is complete yet. This is filled in both
+ /// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it
+ /// appropriately.
+ ///
+ /// For `Receiver`, if this is `true`, then it's guaranteed that `data` is
+ /// unlocked and ready to be inspected.
+ ///
+ /// For `Sender` if this is `true` then the oneshot has gone away and it
+ /// can return ready from `poll_canceled`.
+ complete: AtomicBool,
+
+ /// The actual data being transferred as part of this `Receiver`. This is
+ /// filled in by `Sender::complete` and read by `Receiver::poll`.
+ ///
+ /// Note that this is protected by `Lock`, but it is in theory safe to
+ /// replace with an `UnsafeCell` as it's actually protected by `complete`
+ /// above. I wouldn't recommend doing this, however, unless someone is
+ /// supremely confident in the various atomic orderings here and there.
+ data: Lock<Option<T>>,
+
+ /// Field to store the task which is blocked in `Receiver::poll`.
+ ///
+ /// This is filled in when a oneshot is polled but not ready yet. Note that
+ /// the `Lock` here, unlike in `data` above, is important to resolve races.
+ /// Both the `Receiver` and the `Sender` halves understand that if they
+ /// can't acquire the lock then some important interference is happening.
+ rx_task: Lock<Option<Waker>>,
+
+ /// Like `rx_task` above, except for the task blocked in
+ /// `Sender::poll_canceled`. Additionally, `Lock` cannot be `UnsafeCell`.
+ tx_task: Lock<Option<Waker>>,
+}
+
+/// Creates a new one-shot channel for sending a single value across asynchronous tasks.
+///
+/// The channel works for a spsc (single-producer, single-consumer) scheme.
+///
+/// This function is similar to Rust's channel constructor found in the standard
+/// library. Two halves are returned, the first of which is a `Sender` handle,
+/// used to signal the end of a computation and provide its value. The second
+/// half is a `Receiver` which implements the `Future` trait, resolving to the
+/// value that was given to the `Sender` handle.
+///
+/// Each half can be separately owned and sent across tasks.
+///
+/// # Examples
+///
+/// ```
+/// use futures::channel::oneshot;
+/// use std::{thread, time::Duration};
+///
+/// let (sender, receiver) = oneshot::channel::<i32>();
+///
+/// thread::spawn(|| {
+/// println!("THREAD: sleeping zzz...");
+/// thread::sleep(Duration::from_millis(1000));
+/// println!("THREAD: i'm awake! sending.");
+/// sender.send(3).unwrap();
+/// });
+///
+/// println!("MAIN: doing some useful stuff");
+///
+/// futures::executor::block_on(async {
+/// println!("MAIN: waiting for msg...");
+/// println!("MAIN: got: {:?}", receiver.await)
+/// });
+/// ```
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let inner = Arc::new(Inner::new());
+ let receiver = Receiver { inner: inner.clone() };
+ let sender = Sender { inner };
+ (sender, receiver)
+}
+
+impl<T> Inner<T> {
+ fn new() -> Self {
+ Self {
+ complete: AtomicBool::new(false),
+ data: Lock::new(None),
+ rx_task: Lock::new(None),
+ tx_task: Lock::new(None),
+ }
+ }
+
+ fn send(&self, t: T) -> Result<(), T> {
+ if self.complete.load(SeqCst) {
+ return Err(t);
+ }
+
+ // Note that this lock acquisition may fail if the receiver
+ // is closed and sets the `complete` flag to `true`, whereupon
+ // the receiver may call `poll()`.
+ if let Some(mut slot) = self.data.try_lock() {
+ assert!(slot.is_none());
+ *slot = Some(t);
+ drop(slot);
+
+ // If the receiver called `close()` between the check at the
+ // start of the function, and the lock being released, then
+ // the receiver may not be around to receive it, so try to
+ // pull it back out.
+ if self.complete.load(SeqCst) {
+ // If lock acquisition fails, then receiver is actually
+ // receiving it, so we're good.
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(t) = slot.take() {
+ return Err(t);
+ }
+ }
+ }
+ Ok(())
+ } else {
+ // Must have been closed
+ Err(t)
+ }
+ }
+
+ fn poll_canceled(&self, cx: &mut Context<'_>) -> Poll<()> {
+ // Fast path up first, just read the flag and see if our other half is
+ // gone. This flag is set both in our destructor and the oneshot
+ // destructor, but our destructor hasn't run yet so if it's set then the
+ // oneshot is gone.
+ if self.complete.load(SeqCst) {
+ return Poll::Ready(());
+ }
+
+ // If our other half is not gone then we need to park our current task
+ // and move it into the `tx_task` slot to get notified when it's
+ // actually gone.
+ //
+ // If `try_lock` fails, then the `Receiver` is in the process of using
+ // it, so we can deduce that it's now in the process of going away and
+ // hence we're canceled. If it succeeds then we just store our handle.
+ //
+ // Crucially we then check `complete` *again* before we return.
+ // While we were storing our handle inside `tx_task` the
+ // `Receiver` may have been dropped. The first thing it does is set the
+ // flag, and if it fails to acquire the lock it assumes that we'll see
+ // the flag later on. So... we then try to see the flag later on!
+ let handle = cx.waker().clone();
+ match self.tx_task.try_lock() {
+ Some(mut p) => *p = Some(handle),
+ None => return Poll::Ready(()),
+ }
+ if self.complete.load(SeqCst) {
+ Poll::Ready(())
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn is_canceled(&self) -> bool {
+ self.complete.load(SeqCst)
+ }
+
+ fn drop_tx(&self) {
+ // Flag that we're a completed `Sender` and try to wake up a receiver.
+ // Whether or not we actually stored any data will get picked up and
+ // translated to either an item or cancellation.
+ //
+ // Note that if we fail to acquire the `rx_task` lock then that means
+ // we're in one of two situations:
+ //
+ // 1. The receiver is trying to block in `poll`
+ // 2. The receiver is being dropped
+ //
+ // In the first case it'll check the `complete` flag after it's done
+ // blocking to see if it succeeded. In the latter case we don't need to
+ // wake up anyone anyway. So in both cases it's ok to ignore the `None`
+ // case of `try_lock` and bail out.
+ //
+ // The first case crucially depends on `Lock` using `SeqCst` ordering
+ // under the hood. If it instead used `Release` / `Acquire` ordering,
+ // then it would not necessarily synchronize with `inner.complete`
+ // and deadlock might be possible, as was observed in
+ // https://github.com/rust-lang/futures-rs/pull/219.
+ self.complete.store(true, SeqCst);
+
+ if let Some(mut slot) = self.rx_task.try_lock() {
+ if let Some(task) = slot.take() {
+ drop(slot);
+ task.wake();
+ }
+ }
+
+ // If we registered a task for cancel notification drop it to reduce
+ // spurious wakeups
+ if let Some(mut slot) = self.tx_task.try_lock() {
+ drop(slot.take());
+ }
+ }
+
+ fn close_rx(&self) {
+ // Flag our completion and then attempt to wake up the sender if it's
+ // blocked. See comments in `drop` below for more info
+ self.complete.store(true, SeqCst);
+ if let Some(mut handle) = self.tx_task.try_lock() {
+ if let Some(task) = handle.take() {
+ drop(handle);
+ task.wake()
+ }
+ }
+ }
+
+ fn try_recv(&self) -> Result<Option<T>, Canceled> {
+ // If we're complete, either `::close_rx` or `::drop_tx` was called.
+ // We can assume a successful send if data is present.
+ if self.complete.load(SeqCst) {
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(data) = slot.take() {
+ return Ok(Some(data));
+ }
+ }
+ Err(Canceled)
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn recv(&self, cx: &mut Context<'_>) -> Poll<Result<T, Canceled>> {
+ // Check to see if some data has arrived. If it hasn't then we need to
+ // block our task.
+ //
+ // Note that the acquisition of the `rx_task` lock might fail below, but
+ // the only situation where this can happen is during `Sender::drop`
+ // when we are indeed completed already. If that's happening then we
+ // know we're completed so keep going.
+ let done = if self.complete.load(SeqCst) {
+ true
+ } else {
+ let task = cx.waker().clone();
+ match self.rx_task.try_lock() {
+ Some(mut slot) => {
+ *slot = Some(task);
+ false
+ }
+ None => true,
+ }
+ };
+
+ // If we're `done` via one of the paths above, then look at the data and
+ // figure out what the answer is. If, however, we stored `rx_task`
+ // successfully above we need to check again if we're completed in case
+ // a message was sent while `rx_task` was locked and couldn't notify us
+ // otherwise.
+ //
+ // If we're not done, and we're not complete, though, then we've
+ // successfully blocked our task and we return `Pending`.
+ if done || self.complete.load(SeqCst) {
+ // If taking the lock fails, the sender will realise that the we're
+ // `done` when it checks the `complete` flag on the way out, and
+ // will treat the send as a failure.
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(data) = slot.take() {
+ return Poll::Ready(Ok(data));
+ }
+ }
+ Poll::Ready(Err(Canceled))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn drop_rx(&self) {
+ // Indicate to the `Sender` that we're done, so any future calls to
+ // `poll_canceled` are weeded out.
+ self.complete.store(true, SeqCst);
+
+ // If we've blocked a task then there's no need for it to stick around,
+ // so we need to drop it. If this lock acquisition fails, though, then
+ // it's just because our `Sender` is trying to take the task, so we
+ // let them take care of that.
+ if let Some(mut slot) = self.rx_task.try_lock() {
+ let task = slot.take();
+ drop(slot);
+ drop(task);
+ }
+
+ // Finally, if our `Sender` wants to get notified of us going away, it
+ // would have stored something in `tx_task`. Here we try to peel that
+ // out and unpark it.
+ //
+ // Note that the `try_lock` here may fail, but only if the `Sender` is
+ // in the process of filling in the task. If that happens then we
+ // already flagged `complete` and they'll pick that up above.
+ if let Some(mut handle) = self.tx_task.try_lock() {
+ if let Some(task) = handle.take() {
+ drop(handle);
+ task.wake()
+ }
+ }
+ }
+}
+
+impl<T> Sender<T> {
+ /// Completes this oneshot with a successful result.
+ ///
+ /// This function will consume `self` and indicate to the other end, the
+ /// [`Receiver`](Receiver), that the value provided is the result of the
+ /// computation this represents.
+ ///
+ /// If the value is successfully enqueued for the remote end to receive,
+ /// then `Ok(())` is returned. If the receiving end was dropped before
+ /// this function was called, however, then `Err(t)` is returned.
+ pub fn send(self, t: T) -> Result<(), T> {
+ self.inner.send(t)
+ }
+
+ /// Polls this `Sender` half to detect whether its associated
+ /// [`Receiver`](Receiver) has been dropped.
+ ///
+ /// # Return values
+ ///
+ /// If `Ready(())` is returned then the associated `Receiver` has been
+ /// dropped, which means any work required for sending should be canceled.
+ ///
+ /// If `Pending` is returned then the associated `Receiver` is still
+ /// alive and may be able to receive a message if sent. The current task,
+ /// however, is scheduled to receive a notification if the corresponding
+ /// `Receiver` goes away.
+ pub fn poll_canceled(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ self.inner.poll_canceled(cx)
+ }
+
+ /// Creates a future that resolves when this `Sender`'s corresponding
+ /// [`Receiver`](Receiver) half has hung up.
+ ///
+ /// This is a utility wrapping [`poll_canceled`](Sender::poll_canceled)
+ /// to expose a [`Future`](core::future::Future).
+ pub fn cancellation(&mut self) -> Cancellation<'_, T> {
+ Cancellation { inner: self }
+ }
+
+ /// Tests to see whether this `Sender`'s corresponding `Receiver`
+ /// has been dropped.
+ ///
+ /// Unlike [`poll_canceled`](Sender::poll_canceled), this function does not
+ /// enqueue a task for wakeup upon cancellation, but merely reports the
+ /// current state, which may be subject to concurrent modification.
+ pub fn is_canceled(&self) -> bool {
+ self.inner.is_canceled()
+ }
+
+ /// Tests to see whether this `Sender` is connected to the given `Receiver`. That is, whether
+ /// they were created by the same call to `channel`.
+ pub fn is_connected_to(&self, receiver: &Receiver<T>) -> bool {
+ Arc::ptr_eq(&self.inner, &receiver.inner)
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ self.inner.drop_tx()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Sender<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Sender").field("complete", &self.inner.complete).finish()
+ }
+}
+
+/// A future that resolves when the receiving end of a channel has hung up.
+///
+/// This is an `.await`-friendly interface around [`poll_canceled`](Sender::poll_canceled).
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct Cancellation<'a, T> {
+ inner: &'a mut Sender<T>,
+}
+
+impl<T> Future for Cancellation<'_, T> {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ self.inner.poll_canceled(cx)
+ }
+}
+
+/// Error returned from a [`Receiver`](Receiver) when the corresponding
+/// [`Sender`](Sender) is dropped.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct Canceled;
+
+impl fmt::Display for Canceled {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "oneshot canceled")
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for Canceled {}
+
+impl<T> Receiver<T> {
+ /// Gracefully close this receiver, preventing any subsequent attempts to
+ /// send to it.
+ ///
+ /// Any `send` operation which happens after this method returns is
+ /// guaranteed to fail. After calling this method, you can use
+ /// [`Receiver::poll`](core::future::Future::poll) to determine whether a
+ /// message had previously been sent.
+ pub fn close(&mut self) {
+ self.inner.close_rx()
+ }
+
+ /// Attempts to receive a message outside of the context of a task.
+ ///
+ /// Does not schedule a task wakeup or have any other side effects.
+ ///
+ /// A return value of `None` must be considered immediately stale (out of
+ /// date) unless [`close`](Receiver::close) has been called first.
+ ///
+ /// Returns an error if the sender was dropped.
+ pub fn try_recv(&mut self) -> Result<Option<T>, Canceled> {
+ self.inner.try_recv()
+ }
+}
+
+impl<T> Future for Receiver<T> {
+ type Output = Result<T, Canceled>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<T, Canceled>> {
+ self.inner.recv(cx)
+ }
+}
+
+impl<T> FusedFuture for Receiver<T> {
+ fn is_terminated(&self) -> bool {
+ if self.inner.complete.load(SeqCst) {
+ if let Some(slot) = self.inner.data.try_lock() {
+ if slot.is_some() {
+ return false;
+ }
+ }
+ true
+ } else {
+ false
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ self.inner.drop_rx()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Receiver<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Receiver").field("complete", &self.inner.complete).finish()
+ }
+}
diff --git a/vendor/futures-channel/tests/channel.rs b/vendor/futures-channel/tests/channel.rs
new file mode 100644
index 000000000..5f01a8ef4
--- /dev/null
+++ b/vendor/futures-channel/tests/channel.rs
@@ -0,0 +1,66 @@
+use futures::channel::mpsc;
+use futures::executor::block_on;
+use futures::future::poll_fn;
+use futures::sink::SinkExt;
+use futures::stream::StreamExt;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::thread;
+
+#[test]
+fn sequence() {
+ let (tx, rx) = mpsc::channel(1);
+
+ let amt = 20;
+ let t = thread::spawn(move || block_on(send_sequence(amt, tx)));
+ let list: Vec<_> = block_on(rx.collect());
+ let mut list = list.into_iter();
+ for i in (1..=amt).rev() {
+ assert_eq!(list.next(), Some(i));
+ }
+ assert_eq!(list.next(), None);
+
+ t.join().unwrap();
+}
+
+async fn send_sequence(n: u32, mut sender: mpsc::Sender<u32>) {
+ for x in 0..n {
+ sender.send(n - x).await.unwrap();
+ }
+}
+
+#[test]
+fn drop_sender() {
+ let (tx, mut rx) = mpsc::channel::<u32>(1);
+ drop(tx);
+ let f = poll_fn(|cx| rx.poll_next_unpin(cx));
+ assert_eq!(block_on(f), None)
+}
+
+#[test]
+fn drop_rx() {
+ let (mut tx, rx) = mpsc::channel::<u32>(1);
+ block_on(tx.send(1)).unwrap();
+ drop(rx);
+ assert!(block_on(tx.send(1)).is_err());
+}
+
+#[test]
+fn drop_order() {
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+ let (mut tx, rx) = mpsc::channel(1);
+
+ struct A;
+
+ impl Drop for A {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ block_on(tx.send(A)).unwrap();
+ assert_eq!(DROPS.load(Ordering::SeqCst), 0);
+ drop(rx);
+ assert_eq!(DROPS.load(Ordering::SeqCst), 1);
+ assert!(block_on(tx.send(A)).is_err());
+ assert_eq!(DROPS.load(Ordering::SeqCst), 2);
+}
diff --git a/vendor/futures-channel/tests/mpsc-close.rs b/vendor/futures-channel/tests/mpsc-close.rs
new file mode 100644
index 000000000..81203d334
--- /dev/null
+++ b/vendor/futures-channel/tests/mpsc-close.rs
@@ -0,0 +1,298 @@
+use futures::channel::mpsc;
+use futures::executor::block_on;
+use futures::future::Future;
+use futures::sink::SinkExt;
+use futures::stream::StreamExt;
+use futures::task::{Context, Poll};
+use std::pin::Pin;
+use std::sync::{Arc, Weak};
+use std::thread;
+use std::time::{Duration, Instant};
+
+#[test]
+fn smoke() {
+ let (mut sender, receiver) = mpsc::channel(1);
+
+ let t = thread::spawn(move || while let Ok(()) = block_on(sender.send(42)) {});
+
+ // `receiver` needs to be dropped for `sender` to stop sending and therefore before the join.
+ block_on(receiver.take(3).for_each(|_| futures::future::ready(())));
+
+ t.join().unwrap()
+}
+
+#[test]
+fn multiple_senders_disconnect() {
+ {
+ let (mut tx1, mut rx) = mpsc::channel(1);
+ let (tx2, mut tx3, mut tx4) = (tx1.clone(), tx1.clone(), tx1.clone());
+
+ // disconnect, dropping and Sink::poll_close should all close this sender but leave the
+ // channel open for other senders
+ tx1.disconnect();
+ drop(tx2);
+ block_on(tx3.close()).unwrap();
+
+ assert!(tx1.is_closed());
+ assert!(tx3.is_closed());
+ assert!(!tx4.is_closed());
+
+ block_on(tx4.send(5)).unwrap();
+ assert_eq!(block_on(rx.next()), Some(5));
+
+ // dropping the final sender will close the channel
+ drop(tx4);
+ assert_eq!(block_on(rx.next()), None);
+ }
+
+ {
+ let (mut tx1, mut rx) = mpsc::unbounded();
+ let (tx2, mut tx3, mut tx4) = (tx1.clone(), tx1.clone(), tx1.clone());
+
+ // disconnect, dropping and Sink::poll_close should all close this sender but leave the
+ // channel open for other senders
+ tx1.disconnect();
+ drop(tx2);
+ block_on(tx3.close()).unwrap();
+
+ assert!(tx1.is_closed());
+ assert!(tx3.is_closed());
+ assert!(!tx4.is_closed());
+
+ block_on(tx4.send(5)).unwrap();
+ assert_eq!(block_on(rx.next()), Some(5));
+
+ // dropping the final sender will close the channel
+ drop(tx4);
+ assert_eq!(block_on(rx.next()), None);
+ }
+}
+
+#[test]
+fn multiple_senders_close_channel() {
+ {
+ let (mut tx1, mut rx) = mpsc::channel(1);
+ let mut tx2 = tx1.clone();
+
+ // close_channel should shut down the whole channel
+ tx1.close_channel();
+
+ assert!(tx1.is_closed());
+ assert!(tx2.is_closed());
+
+ let err = block_on(tx2.send(5)).unwrap_err();
+ assert!(err.is_disconnected());
+
+ assert_eq!(block_on(rx.next()), None);
+ }
+
+ {
+ let (tx1, mut rx) = mpsc::unbounded();
+ let mut tx2 = tx1.clone();
+
+ // close_channel should shut down the whole channel
+ tx1.close_channel();
+
+ assert!(tx1.is_closed());
+ assert!(tx2.is_closed());
+
+ let err = block_on(tx2.send(5)).unwrap_err();
+ assert!(err.is_disconnected());
+
+ assert_eq!(block_on(rx.next()), None);
+ }
+}
+
+#[test]
+fn single_receiver_drop_closes_channel_and_drains() {
+ {
+ let ref_count = Arc::new(0);
+ let weak_ref = Arc::downgrade(&ref_count);
+
+ let (sender, receiver) = mpsc::unbounded();
+ sender.unbounded_send(ref_count).expect("failed to send");
+
+ // Verify that the sent message is still live.
+ assert!(weak_ref.upgrade().is_some());
+
+ drop(receiver);
+
+ // The sender should know the channel is closed.
+ assert!(sender.is_closed());
+
+ // Verify that the sent message has been dropped.
+ assert!(weak_ref.upgrade().is_none());
+ }
+
+ {
+ let ref_count = Arc::new(0);
+ let weak_ref = Arc::downgrade(&ref_count);
+
+ let (mut sender, receiver) = mpsc::channel(1);
+ sender.try_send(ref_count).expect("failed to send");
+
+ // Verify that the sent message is still live.
+ assert!(weak_ref.upgrade().is_some());
+
+ drop(receiver);
+
+ // The sender should know the channel is closed.
+ assert!(sender.is_closed());
+
+ // Verify that the sent message has been dropped.
+ assert!(weak_ref.upgrade().is_none());
+ assert!(sender.is_closed());
+ }
+}
+
+// Stress test that `try_send()`s occurring concurrently with receiver
+// close/drops don't appear as successful sends.
+#[test]
+fn stress_try_send_as_receiver_closes() {
+ const AMT: usize = 10000;
+ // To provide variable timing characteristics (in the hopes of
+ // reproducing the collision that leads to a race), we busy-re-poll
+ // the test MPSC receiver a variable number of times before actually
+ // stopping. We vary this countdown between 1 and the following
+ // value.
+ const MAX_COUNTDOWN: usize = 20;
+ // When we detect that a successfully sent item is still in the
+ // queue after a disconnect, we spin for up to 100ms to confirm that
+ // it is a persistent condition and not a concurrency illusion.
+ const SPIN_TIMEOUT_S: u64 = 10;
+ const SPIN_SLEEP_MS: u64 = 10;
+ struct TestRx {
+ rx: mpsc::Receiver<Arc<()>>,
+ // The number of times to query `rx` before dropping it.
+ poll_count: usize,
+ }
+ struct TestTask {
+ command_rx: mpsc::Receiver<TestRx>,
+ test_rx: Option<mpsc::Receiver<Arc<()>>>,
+ countdown: usize,
+ }
+ impl TestTask {
+ /// Create a new TestTask
+ fn new() -> (TestTask, mpsc::Sender<TestRx>) {
+ let (command_tx, command_rx) = mpsc::channel::<TestRx>(0);
+ (
+ TestTask {
+ command_rx,
+ test_rx: None,
+ countdown: 0, // 0 means no countdown is in progress.
+ },
+ command_tx,
+ )
+ }
+ }
+ impl Future for TestTask {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // Poll the test channel, if one is present.
+ if let Some(rx) = &mut self.test_rx {
+ if let Poll::Ready(v) = rx.poll_next_unpin(cx) {
+ let _ = v.expect("test finished unexpectedly!");
+ }
+ self.countdown -= 1;
+ // Busy-poll until the countdown is finished.
+ cx.waker().wake_by_ref();
+ }
+ // Accept any newly submitted MPSC channels for testing.
+ match self.command_rx.poll_next_unpin(cx) {
+ Poll::Ready(Some(TestRx { rx, poll_count })) => {
+ self.test_rx = Some(rx);
+ self.countdown = poll_count;
+ cx.waker().wake_by_ref();
+ }
+ Poll::Ready(None) => return Poll::Ready(()),
+ Poll::Pending => {}
+ }
+ if self.countdown == 0 {
+ // Countdown complete -- drop the Receiver.
+ self.test_rx = None;
+ }
+ Poll::Pending
+ }
+ }
+ let (f, mut cmd_tx) = TestTask::new();
+ let bg = thread::spawn(move || block_on(f));
+ for i in 0..AMT {
+ let (mut test_tx, rx) = mpsc::channel(0);
+ let poll_count = i % MAX_COUNTDOWN;
+ cmd_tx.try_send(TestRx { rx, poll_count }).unwrap();
+ let mut prev_weak: Option<Weak<()>> = None;
+ let mut attempted_sends = 0;
+ let mut successful_sends = 0;
+ loop {
+ // Create a test item.
+ let item = Arc::new(());
+ let weak = Arc::downgrade(&item);
+ match test_tx.try_send(item) {
+ Ok(_) => {
+ prev_weak = Some(weak);
+ successful_sends += 1;
+ }
+ Err(ref e) if e.is_full() => {}
+ Err(ref e) if e.is_disconnected() => {
+ // Test for evidence of the race condition.
+ if let Some(prev_weak) = prev_weak {
+ if prev_weak.upgrade().is_some() {
+ // The previously sent item is still allocated.
+ // However, there appears to be some aspect of the
+ // concurrency that can legitimately cause the Arc
+ // to be momentarily valid. Spin for up to 100ms
+ // waiting for the previously sent item to be
+ // dropped.
+ let t0 = Instant::now();
+ let mut spins = 0;
+ loop {
+ if prev_weak.upgrade().is_none() {
+ break;
+ }
+ assert!(
+ t0.elapsed() < Duration::from_secs(SPIN_TIMEOUT_S),
+ "item not dropped on iteration {} after \
+ {} sends ({} successful). spin=({})",
+ i,
+ attempted_sends,
+ successful_sends,
+ spins
+ );
+ spins += 1;
+ thread::sleep(Duration::from_millis(SPIN_SLEEP_MS));
+ }
+ }
+ }
+ break;
+ }
+ Err(ref e) => panic!("unexpected error: {}", e),
+ }
+ attempted_sends += 1;
+ }
+ }
+ drop(cmd_tx);
+ bg.join().expect("background thread join");
+}
+
+#[test]
+fn unbounded_try_next_after_none() {
+ let (tx, mut rx) = mpsc::unbounded::<String>();
+ // Drop the sender, close the channel.
+ drop(tx);
+ // Receive the end of channel.
+ assert_eq!(Ok(None), rx.try_next().map_err(|_| ()));
+ // None received, check we can call `try_next` again.
+ assert_eq!(Ok(None), rx.try_next().map_err(|_| ()));
+}
+
+#[test]
+fn bounded_try_next_after_none() {
+ let (tx, mut rx) = mpsc::channel::<String>(17);
+ // Drop the sender, close the channel.
+ drop(tx);
+ // Receive the end of channel.
+ assert_eq!(Ok(None), rx.try_next().map_err(|_| ()));
+ // None received, check we can call `try_next` again.
+ assert_eq!(Ok(None), rx.try_next().map_err(|_| ()));
+}
diff --git a/vendor/futures-channel/tests/mpsc.rs b/vendor/futures-channel/tests/mpsc.rs
new file mode 100644
index 000000000..88cdef13d
--- /dev/null
+++ b/vendor/futures-channel/tests/mpsc.rs
@@ -0,0 +1,630 @@
+use futures::channel::{mpsc, oneshot};
+use futures::executor::{block_on, block_on_stream};
+use futures::future::{poll_fn, FutureExt};
+use futures::pin_mut;
+use futures::sink::{Sink, SinkExt};
+use futures::stream::{Stream, StreamExt};
+use futures::task::{Context, Poll};
+use futures_test::task::{new_count_waker, noop_context};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+trait AssertSend: Send {}
+impl AssertSend for mpsc::Sender<i32> {}
+impl AssertSend for mpsc::Receiver<i32> {}
+
+#[test]
+fn send_recv() {
+ let (mut tx, rx) = mpsc::channel::<i32>(16);
+
+ block_on(tx.send(1)).unwrap();
+ drop(tx);
+ let v: Vec<_> = block_on(rx.collect());
+ assert_eq!(v, vec![1]);
+}
+
+#[test]
+fn send_recv_no_buffer() {
+ // Run on a task context
+ block_on(poll_fn(move |cx| {
+ let (tx, rx) = mpsc::channel::<i32>(0);
+ pin_mut!(tx, rx);
+
+ assert!(tx.as_mut().poll_flush(cx).is_ready());
+ assert!(tx.as_mut().poll_ready(cx).is_ready());
+
+ // Send first message
+ assert!(tx.as_mut().start_send(1).is_ok());
+ assert!(tx.as_mut().poll_ready(cx).is_pending());
+
+ // poll_ready said Pending, so no room in buffer, therefore new sends
+ // should get rejected with is_full.
+ assert!(tx.as_mut().start_send(0).unwrap_err().is_full());
+ assert!(tx.as_mut().poll_ready(cx).is_pending());
+
+ // Take the value
+ assert_eq!(rx.as_mut().poll_next(cx), Poll::Ready(Some(1)));
+ assert!(tx.as_mut().poll_ready(cx).is_ready());
+
+ // Send second message
+ assert!(tx.as_mut().poll_ready(cx).is_ready());
+ assert!(tx.as_mut().start_send(2).is_ok());
+ assert!(tx.as_mut().poll_ready(cx).is_pending());
+
+ // Take the value
+ assert_eq!(rx.as_mut().poll_next(cx), Poll::Ready(Some(2)));
+ assert!(tx.as_mut().poll_ready(cx).is_ready());
+
+ Poll::Ready(())
+ }));
+}
+
+#[test]
+fn send_shared_recv() {
+ let (mut tx1, rx) = mpsc::channel::<i32>(16);
+ let mut rx = block_on_stream(rx);
+ let mut tx2 = tx1.clone();
+
+ block_on(tx1.send(1)).unwrap();
+ assert_eq!(rx.next(), Some(1));
+
+ block_on(tx2.send(2)).unwrap();
+ assert_eq!(rx.next(), Some(2));
+}
+
+#[test]
+fn send_recv_threads() {
+ let (mut tx, rx) = mpsc::channel::<i32>(16);
+
+ let t = thread::spawn(move || {
+ block_on(tx.send(1)).unwrap();
+ });
+
+ let v: Vec<_> = block_on(rx.take(1).collect());
+ assert_eq!(v, vec![1]);
+
+ t.join().unwrap();
+}
+
+#[test]
+fn send_recv_threads_no_capacity() {
+ let (mut tx, rx) = mpsc::channel::<i32>(0);
+
+ let t = thread::spawn(move || {
+ block_on(tx.send(1)).unwrap();
+ block_on(tx.send(2)).unwrap();
+ });
+
+ let v: Vec<_> = block_on(rx.collect());
+ assert_eq!(v, vec![1, 2]);
+
+ t.join().unwrap();
+}
+
+#[test]
+fn recv_close_gets_none() {
+ let (mut tx, mut rx) = mpsc::channel::<i32>(10);
+
+ // Run on a task context
+ block_on(poll_fn(move |cx| {
+ rx.close();
+
+ assert_eq!(rx.poll_next_unpin(cx), Poll::Ready(None));
+ match tx.poll_ready(cx) {
+ Poll::Pending | Poll::Ready(Ok(_)) => panic!(),
+ Poll::Ready(Err(e)) => assert!(e.is_disconnected()),
+ };
+
+ Poll::Ready(())
+ }));
+}
+
+#[test]
+fn tx_close_gets_none() {
+ let (_, mut rx) = mpsc::channel::<i32>(10);
+
+ // Run on a task context
+ block_on(poll_fn(move |cx| {
+ assert_eq!(rx.poll_next_unpin(cx), Poll::Ready(None));
+ Poll::Ready(())
+ }));
+}
+
+// #[test]
+// fn spawn_sends_items() {
+// let core = local_executor::Core::new();
+// let stream = unfold(0, |i| Some(ok::<_,u8>((i, i + 1))));
+// let rx = mpsc::spawn(stream, &core, 1);
+// assert_eq!(core.run(rx.take(4).collect()).unwrap(),
+// [0, 1, 2, 3]);
+// }
+
+// #[test]
+// fn spawn_kill_dead_stream() {
+// use std::thread;
+// use std::time::Duration;
+// use futures::future::Either;
+// use futures::sync::oneshot;
+//
+// // a stream which never returns anything (maybe a remote end isn't
+// // responding), but dropping it leads to observable side effects
+// // (like closing connections, releasing limited resources, ...)
+// #[derive(Debug)]
+// struct Dead {
+// // when dropped you should get Err(oneshot::Canceled) on the
+// // receiving end
+// done: oneshot::Sender<()>,
+// }
+// impl Stream for Dead {
+// type Item = ();
+// type Error = ();
+//
+// fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+// Ok(Poll::Pending)
+// }
+// }
+//
+// // need to implement a timeout for the test, as it would hang
+// // forever right now
+// let (timeout_tx, timeout_rx) = oneshot::channel();
+// thread::spawn(move || {
+// thread::sleep(Duration::from_millis(1000));
+// let _ = timeout_tx.send(());
+// });
+//
+// let core = local_executor::Core::new();
+// let (done_tx, done_rx) = oneshot::channel();
+// let stream = Dead{done: done_tx};
+// let rx = mpsc::spawn(stream, &core, 1);
+// let res = core.run(
+// Ok::<_, ()>(())
+// .into_future()
+// .then(move |_| {
+// // now drop the spawned stream: maybe some timeout exceeded,
+// // or some connection on this end was closed by the remote
+// // end.
+// drop(rx);
+// // and wait for the spawned stream to release its resources
+// done_rx
+// })
+// .select2(timeout_rx)
+// );
+// match res {
+// Err(Either::A((oneshot::Canceled, _))) => (),
+// _ => {
+// panic!("dead stream wasn't canceled");
+// },
+// }
+// }
+
+#[test]
+fn stress_shared_unbounded() {
+ const AMT: u32 = 10000;
+ const NTHREADS: u32 = 8;
+ let (tx, rx) = mpsc::unbounded::<i32>();
+
+ let t = thread::spawn(move || {
+ let result: Vec<_> = block_on(rx.collect());
+ assert_eq!(result.len(), (AMT * NTHREADS) as usize);
+ for item in result {
+ assert_eq!(item, 1);
+ }
+ });
+
+ for _ in 0..NTHREADS {
+ let tx = tx.clone();
+
+ thread::spawn(move || {
+ for _ in 0..AMT {
+ tx.unbounded_send(1).unwrap();
+ }
+ });
+ }
+
+ drop(tx);
+
+ t.join().ok().unwrap();
+}
+
+#[test]
+fn stress_shared_bounded_hard() {
+ const AMT: u32 = 10000;
+ const NTHREADS: u32 = 8;
+ let (tx, rx) = mpsc::channel::<i32>(0);
+
+ let t = thread::spawn(move || {
+ let result: Vec<_> = block_on(rx.collect());
+ assert_eq!(result.len(), (AMT * NTHREADS) as usize);
+ for item in result {
+ assert_eq!(item, 1);
+ }
+ });
+
+ for _ in 0..NTHREADS {
+ let mut tx = tx.clone();
+
+ thread::spawn(move || {
+ for _ in 0..AMT {
+ block_on(tx.send(1)).unwrap();
+ }
+ });
+ }
+
+ drop(tx);
+
+ t.join().unwrap();
+}
+
+#[allow(clippy::same_item_push)]
+#[test]
+fn stress_receiver_multi_task_bounded_hard() {
+ const AMT: usize = 10_000;
+ const NTHREADS: u32 = 2;
+
+ let (mut tx, rx) = mpsc::channel::<usize>(0);
+ let rx = Arc::new(Mutex::new(Some(rx)));
+ let n = Arc::new(AtomicUsize::new(0));
+
+ let mut th = vec![];
+
+ for _ in 0..NTHREADS {
+ let rx = rx.clone();
+ let n = n.clone();
+
+ let t = thread::spawn(move || {
+ let mut i = 0;
+
+ loop {
+ i += 1;
+ let mut rx_opt = rx.lock().unwrap();
+ if let Some(rx) = &mut *rx_opt {
+ if i % 5 == 0 {
+ let item = block_on(rx.next());
+
+ if item.is_none() {
+ *rx_opt = None;
+ break;
+ }
+
+ n.fetch_add(1, Ordering::Relaxed);
+ } else {
+ // Just poll
+ let n = n.clone();
+ match rx.poll_next_unpin(&mut noop_context()) {
+ Poll::Ready(Some(_)) => {
+ n.fetch_add(1, Ordering::Relaxed);
+ }
+ Poll::Ready(None) => {
+ *rx_opt = None;
+ break;
+ }
+ Poll::Pending => {}
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ });
+
+ th.push(t);
+ }
+
+ for i in 0..AMT {
+ block_on(tx.send(i)).unwrap();
+ }
+ drop(tx);
+
+ for t in th {
+ t.join().unwrap();
+ }
+
+ assert_eq!(AMT, n.load(Ordering::Relaxed));
+}
+
+/// Stress test that receiver properly receives all the messages
+/// after sender dropped.
+#[test]
+fn stress_drop_sender() {
+ fn list() -> impl Stream<Item = i32> {
+ let (tx, rx) = mpsc::channel(1);
+ thread::spawn(move || {
+ block_on(send_one_two_three(tx));
+ });
+ rx
+ }
+
+ for _ in 0..10000 {
+ let v: Vec<_> = block_on(list().collect());
+ assert_eq!(v, vec![1, 2, 3]);
+ }
+}
+
+async fn send_one_two_three(mut tx: mpsc::Sender<i32>) {
+ for i in 1..=3 {
+ tx.send(i).await.unwrap();
+ }
+}
+
+/// Stress test that after receiver dropped,
+/// no messages are lost.
+fn stress_close_receiver_iter() {
+ let (tx, rx) = mpsc::unbounded();
+ let mut rx = block_on_stream(rx);
+ let (unwritten_tx, unwritten_rx) = std::sync::mpsc::channel();
+ let th = thread::spawn(move || {
+ for i in 1.. {
+ if tx.unbounded_send(i).is_err() {
+ unwritten_tx.send(i).expect("unwritten_tx");
+ return;
+ }
+ }
+ });
+
+ // Read one message to make sure thread effectively started
+ assert_eq!(Some(1), rx.next());
+
+ rx.close();
+
+ for i in 2.. {
+ match rx.next() {
+ Some(r) => assert!(i == r),
+ None => {
+ let unwritten = unwritten_rx.recv().expect("unwritten_rx");
+ assert_eq!(unwritten, i);
+ th.join().unwrap();
+ return;
+ }
+ }
+ }
+}
+
+#[test]
+fn stress_close_receiver() {
+ for _ in 0..10000 {
+ stress_close_receiver_iter();
+ }
+}
+
+async fn stress_poll_ready_sender(mut sender: mpsc::Sender<u32>, count: u32) {
+ for i in (1..=count).rev() {
+ sender.send(i).await.unwrap();
+ }
+}
+
+/// Tests that after `poll_ready` indicates capacity a channel can always send without waiting.
+#[allow(clippy::same_item_push)]
+#[test]
+fn stress_poll_ready() {
+ const AMT: u32 = 1000;
+ const NTHREADS: u32 = 8;
+
+ /// Run a stress test using the specified channel capacity.
+ fn stress(capacity: usize) {
+ let (tx, rx) = mpsc::channel(capacity);
+ let mut threads = Vec::new();
+ for _ in 0..NTHREADS {
+ let sender = tx.clone();
+ threads.push(thread::spawn(move || block_on(stress_poll_ready_sender(sender, AMT))));
+ }
+ drop(tx);
+
+ let result: Vec<_> = block_on(rx.collect());
+ assert_eq!(result.len() as u32, AMT * NTHREADS);
+
+ for thread in threads {
+ thread.join().unwrap();
+ }
+ }
+
+ stress(0);
+ stress(1);
+ stress(8);
+ stress(16);
+}
+
+#[test]
+fn try_send_1() {
+ const N: usize = 3000;
+ let (mut tx, rx) = mpsc::channel(0);
+
+ let t = thread::spawn(move || {
+ for i in 0..N {
+ loop {
+ if tx.try_send(i).is_ok() {
+ break;
+ }
+ }
+ }
+ });
+
+ let result: Vec<_> = block_on(rx.collect());
+ for (i, j) in result.into_iter().enumerate() {
+ assert_eq!(i, j);
+ }
+
+ t.join().unwrap();
+}
+
+#[test]
+fn try_send_2() {
+ let (mut tx, rx) = mpsc::channel(0);
+ let mut rx = block_on_stream(rx);
+
+ tx.try_send("hello").unwrap();
+
+ let (readytx, readyrx) = oneshot::channel::<()>();
+
+ let th = thread::spawn(move || {
+ block_on(poll_fn(|cx| {
+ assert!(tx.poll_ready(cx).is_pending());
+ Poll::Ready(())
+ }));
+
+ drop(readytx);
+ block_on(tx.send("goodbye")).unwrap();
+ });
+
+ let _ = block_on(readyrx);
+ assert_eq!(rx.next(), Some("hello"));
+ assert_eq!(rx.next(), Some("goodbye"));
+ assert_eq!(rx.next(), None);
+
+ th.join().unwrap();
+}
+
+#[test]
+fn try_send_fail() {
+ let (mut tx, rx) = mpsc::channel(0);
+ let mut rx = block_on_stream(rx);
+
+ tx.try_send("hello").unwrap();
+
+ // This should fail
+ assert!(tx.try_send("fail").is_err());
+
+ assert_eq!(rx.next(), Some("hello"));
+
+ tx.try_send("goodbye").unwrap();
+ drop(tx);
+
+ assert_eq!(rx.next(), Some("goodbye"));
+ assert_eq!(rx.next(), None);
+}
+
+#[test]
+fn try_send_recv() {
+ let (mut tx, mut rx) = mpsc::channel(1);
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap();
+ tx.try_send("hello").unwrap_err(); // should be full
+ rx.try_next().unwrap();
+ rx.try_next().unwrap();
+ rx.try_next().unwrap_err(); // should be empty
+ tx.try_send("hello").unwrap();
+ rx.try_next().unwrap();
+ rx.try_next().unwrap_err(); // should be empty
+}
+
+#[test]
+fn same_receiver() {
+ let (mut txa1, _) = mpsc::channel::<i32>(1);
+ let txa2 = txa1.clone();
+
+ let (mut txb1, _) = mpsc::channel::<i32>(1);
+ let txb2 = txb1.clone();
+
+ assert!(txa1.same_receiver(&txa2));
+ assert!(txb1.same_receiver(&txb2));
+ assert!(!txa1.same_receiver(&txb1));
+
+ txa1.disconnect();
+ txb1.close_channel();
+
+ assert!(!txa1.same_receiver(&txa2));
+ assert!(txb1.same_receiver(&txb2));
+}
+
+#[test]
+fn is_connected_to() {
+ let (txa, rxa) = mpsc::channel::<i32>(1);
+ let (txb, rxb) = mpsc::channel::<i32>(1);
+
+ assert!(txa.is_connected_to(&rxa));
+ assert!(txb.is_connected_to(&rxb));
+ assert!(!txa.is_connected_to(&rxb));
+ assert!(!txb.is_connected_to(&rxa));
+}
+
+#[test]
+fn hash_receiver() {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hasher;
+
+ let mut hasher_a1 = DefaultHasher::new();
+ let mut hasher_a2 = DefaultHasher::new();
+ let mut hasher_b1 = DefaultHasher::new();
+ let mut hasher_b2 = DefaultHasher::new();
+ let (mut txa1, _) = mpsc::channel::<i32>(1);
+ let txa2 = txa1.clone();
+
+ let (mut txb1, _) = mpsc::channel::<i32>(1);
+ let txb2 = txb1.clone();
+
+ txa1.hash_receiver(&mut hasher_a1);
+ let hash_a1 = hasher_a1.finish();
+ txa2.hash_receiver(&mut hasher_a2);
+ let hash_a2 = hasher_a2.finish();
+ txb1.hash_receiver(&mut hasher_b1);
+ let hash_b1 = hasher_b1.finish();
+ txb2.hash_receiver(&mut hasher_b2);
+ let hash_b2 = hasher_b2.finish();
+
+ assert_eq!(hash_a1, hash_a2);
+ assert_eq!(hash_b1, hash_b2);
+ assert!(hash_a1 != hash_b1);
+
+ txa1.disconnect();
+ txb1.close_channel();
+
+ let mut hasher_a1 = DefaultHasher::new();
+ let mut hasher_a2 = DefaultHasher::new();
+ let mut hasher_b1 = DefaultHasher::new();
+ let mut hasher_b2 = DefaultHasher::new();
+
+ txa1.hash_receiver(&mut hasher_a1);
+ let hash_a1 = hasher_a1.finish();
+ txa2.hash_receiver(&mut hasher_a2);
+ let hash_a2 = hasher_a2.finish();
+ txb1.hash_receiver(&mut hasher_b1);
+ let hash_b1 = hasher_b1.finish();
+ txb2.hash_receiver(&mut hasher_b2);
+ let hash_b2 = hasher_b2.finish();
+
+ assert!(hash_a1 != hash_a2);
+ assert_eq!(hash_b1, hash_b2);
+}
+
+#[test]
+fn send_backpressure() {
+ let (waker, counter) = new_count_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ let (mut tx, mut rx) = mpsc::channel(1);
+ block_on(tx.send(1)).unwrap();
+
+ let mut task = tx.send(2);
+ assert_eq!(task.poll_unpin(&mut cx), Poll::Pending);
+ assert_eq!(counter, 0);
+
+ let item = block_on(rx.next()).unwrap();
+ assert_eq!(item, 1);
+ assert_eq!(counter, 1);
+ assert_eq!(task.poll_unpin(&mut cx), Poll::Ready(Ok(())));
+
+ let item = block_on(rx.next()).unwrap();
+ assert_eq!(item, 2);
+}
+
+#[test]
+fn send_backpressure_multi_senders() {
+ let (waker, counter) = new_count_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ let (mut tx1, mut rx) = mpsc::channel(1);
+ let mut tx2 = tx1.clone();
+ block_on(tx1.send(1)).unwrap();
+
+ let mut task = tx2.send(2);
+ assert_eq!(task.poll_unpin(&mut cx), Poll::Pending);
+ assert_eq!(counter, 0);
+
+ let item = block_on(rx.next()).unwrap();
+ assert_eq!(item, 1);
+ assert_eq!(counter, 1);
+ assert_eq!(task.poll_unpin(&mut cx), Poll::Ready(Ok(())));
+
+ let item = block_on(rx.next()).unwrap();
+ assert_eq!(item, 2);
+}
diff --git a/vendor/futures-channel/tests/oneshot.rs b/vendor/futures-channel/tests/oneshot.rs
new file mode 100644
index 000000000..979cd8a15
--- /dev/null
+++ b/vendor/futures-channel/tests/oneshot.rs
@@ -0,0 +1,252 @@
+use futures::channel::oneshot::{self, Sender};
+use futures::executor::block_on;
+use futures::future::{poll_fn, FutureExt};
+use futures::task::{Context, Poll};
+use futures_test::task::panic_waker_ref;
+use std::sync::mpsc;
+use std::thread;
+
+#[test]
+fn smoke_poll() {
+ let (mut tx, rx) = oneshot::channel::<u32>();
+ let mut rx = Some(rx);
+ let f = poll_fn(|cx| {
+ assert!(tx.poll_canceled(cx).is_pending());
+ assert!(tx.poll_canceled(cx).is_pending());
+ drop(rx.take());
+ assert!(tx.poll_canceled(cx).is_ready());
+ assert!(tx.poll_canceled(cx).is_ready());
+ Poll::Ready(())
+ });
+
+ block_on(f);
+}
+
+#[test]
+fn cancel_notifies() {
+ let (mut tx, rx) = oneshot::channel::<u32>();
+
+ let t = thread::spawn(move || {
+ block_on(tx.cancellation());
+ });
+ drop(rx);
+ t.join().unwrap();
+}
+
+#[test]
+fn cancel_lots() {
+ let (tx, rx) = mpsc::channel::<(Sender<_>, mpsc::Sender<_>)>();
+ let t = thread::spawn(move || {
+ for (mut tx, tx2) in rx {
+ block_on(tx.cancellation());
+ tx2.send(()).unwrap();
+ }
+ });
+
+ for _ in 0..20000 {
+ let (otx, orx) = oneshot::channel::<u32>();
+ let (tx2, rx2) = mpsc::channel();
+ tx.send((otx, tx2)).unwrap();
+ drop(orx);
+ rx2.recv().unwrap();
+ }
+ drop(tx);
+
+ t.join().unwrap();
+}
+
+#[test]
+fn cancel_after_sender_drop_doesnt_notify() {
+ let (mut tx, rx) = oneshot::channel::<u32>();
+ let mut cx = Context::from_waker(panic_waker_ref());
+ assert_eq!(tx.poll_canceled(&mut cx), Poll::Pending);
+ drop(tx);
+ drop(rx);
+}
+
+#[test]
+fn close() {
+ let (mut tx, mut rx) = oneshot::channel::<u32>();
+ rx.close();
+ block_on(poll_fn(|cx| {
+ match rx.poll_unpin(cx) {
+ Poll::Ready(Err(_)) => {}
+ _ => panic!(),
+ };
+ assert!(tx.poll_canceled(cx).is_ready());
+ Poll::Ready(())
+ }));
+}
+
+#[test]
+fn close_wakes() {
+ let (mut tx, mut rx) = oneshot::channel::<u32>();
+ let (tx2, rx2) = mpsc::channel();
+ let t = thread::spawn(move || {
+ rx.close();
+ rx2.recv().unwrap();
+ });
+ block_on(tx.cancellation());
+ tx2.send(()).unwrap();
+ t.join().unwrap();
+}
+
+#[test]
+fn is_canceled() {
+ let (tx, rx) = oneshot::channel::<u32>();
+ assert!(!tx.is_canceled());
+ drop(rx);
+ assert!(tx.is_canceled());
+}
+
+#[test]
+fn cancel_sends() {
+ let (tx, rx) = mpsc::channel::<Sender<_>>();
+ let t = thread::spawn(move || {
+ for otx in rx {
+ let _ = otx.send(42);
+ }
+ });
+
+ for _ in 0..20000 {
+ let (otx, mut orx) = oneshot::channel::<u32>();
+ tx.send(otx).unwrap();
+
+ orx.close();
+ let _ = block_on(orx);
+ }
+
+ drop(tx);
+ t.join().unwrap();
+}
+
+// #[test]
+// fn spawn_sends_items() {
+// let core = local_executor::Core::new();
+// let future = ok::<_, ()>(1);
+// let rx = spawn(future, &core);
+// assert_eq!(core.run(rx).unwrap(), 1);
+// }
+//
+// #[test]
+// fn spawn_kill_dead_stream() {
+// use std::thread;
+// use std::time::Duration;
+// use futures::future::Either;
+// use futures::sync::oneshot;
+//
+// // a future which never returns anything (forever accepting incoming
+// // connections), but dropping it leads to observable side effects
+// // (like closing listening sockets, releasing limited resources,
+// // ...)
+// #[derive(Debug)]
+// struct Dead {
+// // when dropped you should get Err(oneshot::Canceled) on the
+// // receiving end
+// done: oneshot::Sender<()>,
+// }
+// impl Future for Dead {
+// type Item = ();
+// type Error = ();
+//
+// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+// Ok(Poll::Pending)
+// }
+// }
+//
+// // need to implement a timeout for the test, as it would hang
+// // forever right now
+// let (timeout_tx, timeout_rx) = oneshot::channel();
+// thread::spawn(move || {
+// thread::sleep(Duration::from_millis(1000));
+// let _ = timeout_tx.send(());
+// });
+//
+// let core = local_executor::Core::new();
+// let (done_tx, done_rx) = oneshot::channel();
+// let future = Dead{done: done_tx};
+// let rx = spawn(future, &core);
+// let res = core.run(
+// Ok::<_, ()>(())
+// .into_future()
+// .then(move |_| {
+// // now drop the spawned future: maybe some timeout exceeded,
+// // or some connection on this end was closed by the remote
+// // end.
+// drop(rx);
+// // and wait for the spawned future to release its resources
+// done_rx
+// })
+// .select2(timeout_rx)
+// );
+// match res {
+// Err(Either::A((oneshot::Canceled, _))) => (),
+// Ok(Either::B(((), _))) => {
+// panic!("dead future wasn't canceled (timeout)");
+// },
+// _ => {
+// panic!("dead future wasn't canceled (unexpected result)");
+// },
+// }
+// }
+//
+// #[test]
+// fn spawn_dont_kill_forgot_dead_stream() {
+// use std::thread;
+// use std::time::Duration;
+// use futures::future::Either;
+// use futures::sync::oneshot;
+//
+// // a future which never returns anything (forever accepting incoming
+// // connections), but dropping it leads to observable side effects
+// // (like closing listening sockets, releasing limited resources,
+// // ...)
+// #[derive(Debug)]
+// struct Dead {
+// // when dropped you should get Err(oneshot::Canceled) on the
+// // receiving end
+// done: oneshot::Sender<()>,
+// }
+// impl Future for Dead {
+// type Item = ();
+// type Error = ();
+//
+// fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+// Ok(Poll::Pending)
+// }
+// }
+//
+// // need to implement a timeout for the test, as it would hang
+// // forever right now
+// let (timeout_tx, timeout_rx) = oneshot::channel();
+// thread::spawn(move || {
+// thread::sleep(Duration::from_millis(1000));
+// let _ = timeout_tx.send(());
+// });
+//
+// let core = local_executor::Core::new();
+// let (done_tx, done_rx) = oneshot::channel();
+// let future = Dead{done: done_tx};
+// let rx = spawn(future, &core);
+// let res = core.run(
+// Ok::<_, ()>(())
+// .into_future()
+// .then(move |_| {
+// // forget the spawned future: should keep running, i.e. hit
+// // the timeout below.
+// rx.forget();
+// // and wait for the spawned future to release its resources
+// done_rx
+// })
+// .select2(timeout_rx)
+// );
+// match res {
+// Err(Either::A((oneshot::Canceled, _))) => {
+// panic!("forgotten dead future was canceled");
+// },
+// Ok(Either::B(((), _))) => (), // reached timeout
+// _ => {
+// panic!("forgotten dead future was canceled (unexpected result)");
+// },
+// }
+// }
diff --git a/vendor/futures-core/.cargo-checksum.json b/vendor/futures-core/.cargo-checksum.json
new file mode 100644
index 000000000..77e16eb62
--- /dev/null
+++ b/vendor/futures-core/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"a167cc2eb28add765dbe69220643c977744f206230321ac071e4bbb39981c8b9","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"e8258273fed6f1796485777655118f2369fd3f000191e9d8cdbd10bf052946a9","build.rs":"f6e21c09f18cc405bd7048cb7a2958f92d5414b9ca6b301d137e120a84fa020a","no_atomic_cas.rs":"ff8be002b49a5cd9e4ca0db17b1c9e6b98e55f556319eb6b953dd6ff52c397a6","src/future.rs":"0cb559fad0d43566dab959e929c4631c25cf749e2e29a5444fbcad464c9262ae","src/lib.rs":"eacd5816fbb914ca061d49ff6203723ebbe639eb7c45ebfa8a0613069d174111","src/stream.rs":"f1c7ab84161c5d5b424655b257fc3183eb6f2ed5324ba4006a70f9a4b0dc8872","src/task/__internal/atomic_waker.rs":"4ca94b25d3bcf4db863f008224cc4797dbbe7c93495a1abb232048846694a716","src/task/__internal/mod.rs":"7d0d297f58987b05ffa152605feb78ddc9b6e5168e7d621ec36dfbee558e4bec","src/task/mod.rs":"e213602a2fe5ae78ad5f1ca20e6d32dcbab17aba5b6b072fb927a72da99b4a11","src/task/poll.rs":"74c2717c1f9a37587a367da1b690d1cd2312e95dbaffca42be4755f1cd164bb8"},"package":"d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7"} \ No newline at end of file
diff --git a/vendor/futures-core/Cargo.toml b/vendor/futures-core/Cargo.toml
new file mode 100644
index 000000000..69deea9fe
--- /dev/null
+++ b/vendor/futures-core/Cargo.toml
@@ -0,0 +1,34 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.36"
+name = "futures-core"
+version = "0.3.19"
+description = "The core traits and types in for the `futures` library.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[dependencies]
+
+[dev-dependencies]
+
+[features]
+alloc = []
+cfg-target-has-atomic = []
+default = ["std"]
+std = ["alloc"]
+unstable = []
diff --git a/vendor/futures-core/LICENSE-APACHE b/vendor/futures-core/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-core/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-core/LICENSE-MIT b/vendor/futures-core/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-core/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-core/README.md b/vendor/futures-core/README.md
new file mode 100644
index 000000000..96e0e064b
--- /dev/null
+++ b/vendor/futures-core/README.md
@@ -0,0 +1,23 @@
+# futures-core
+
+The core traits and types in for the `futures` library.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-core = "0.3"
+```
+
+The current `futures-core` requires Rust 1.36 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-core/build.rs b/vendor/futures-core/build.rs
new file mode 100644
index 000000000..07b50bd55
--- /dev/null
+++ b/vendor/futures-core/build.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms, single_use_lifetimes)]
+
+use std::env;
+
+include!("no_atomic_cas.rs");
+
+// The rustc-cfg listed below are considered public API, but it is *unstable*
+// and outside of the normal semver guarantees:
+//
+// - `futures_no_atomic_cas`
+// Assume the target does *not* support atomic CAS operations.
+// This is usually detected automatically by the build script, but you may
+// need to enable it manually when building for custom targets or using
+// non-cargo build systems that don't run the build script.
+//
+// With the exceptions mentioned above, the rustc-cfg strings below are
+// *not* public API. Please let us know by opening a GitHub issue if your build
+// environment requires some way to enable these cfgs other than by executing
+// our build script.
+fn main() {
+ let target = match env::var("TARGET") {
+ Ok(target) => target,
+ Err(e) => {
+ println!(
+ "cargo:warning={}: unable to get TARGET environment variable: {}",
+ env!("CARGO_PKG_NAME"),
+ e
+ );
+ return;
+ }
+ };
+
+ // Note that this is `no_*`, not `has_*`. This allows treating
+ // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't
+ // run. This is needed for compatibility with non-cargo build systems that
+ // don't run the build script.
+ if NO_ATOMIC_CAS_TARGETS.contains(&&*target) {
+ println!("cargo:rustc-cfg=futures_no_atomic_cas");
+ }
+
+ println!("cargo:rerun-if-changed=no_atomic_cas.rs");
+}
diff --git a/vendor/futures-core/no_atomic_cas.rs b/vendor/futures-core/no_atomic_cas.rs
new file mode 100644
index 000000000..4708bf853
--- /dev/null
+++ b/vendor/futures-core/no_atomic_cas.rs
@@ -0,0 +1,13 @@
+// This file is @generated by no_atomic_cas.sh.
+// It is not intended for manual editing.
+
+const NO_ATOMIC_CAS_TARGETS: &[&str] = &[
+ "avr-unknown-gnu-atmega328",
+ "bpfeb-unknown-none",
+ "bpfel-unknown-none",
+ "msp430-none-elf",
+ "riscv32i-unknown-none-elf",
+ "riscv32imc-unknown-none-elf",
+ "thumbv4t-none-eabi",
+ "thumbv6m-none-eabi",
+];
diff --git a/vendor/futures-core/src/future.rs b/vendor/futures-core/src/future.rs
new file mode 100644
index 000000000..7540cd027
--- /dev/null
+++ b/vendor/futures-core/src/future.rs
@@ -0,0 +1,103 @@
+//! Futures.
+
+use core::ops::DerefMut;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+#[doc(no_inline)]
+pub use core::future::Future;
+
+/// An owned dynamically typed [`Future`] for use in cases where you can't
+/// statically type your result or need to add some indirection.
+#[cfg(feature = "alloc")]
+pub type BoxFuture<'a, T> = Pin<alloc::boxed::Box<dyn Future<Output = T> + Send + 'a>>;
+
+/// `BoxFuture`, but without the `Send` requirement.
+#[cfg(feature = "alloc")]
+pub type LocalBoxFuture<'a, T> = Pin<alloc::boxed::Box<dyn Future<Output = T> + 'a>>;
+
+/// A future which tracks whether or not the underlying future
+/// should no longer be polled.
+///
+/// `is_terminated` will return `true` if a future should no longer be polled.
+/// Usually, this state occurs after `poll` (or `try_poll`) returned
+/// `Poll::Ready`. However, `is_terminated` may also return `true` if a future
+/// has become inactive and can no longer make progress and should be ignored
+/// or dropped rather than being `poll`ed again.
+pub trait FusedFuture: Future {
+ /// Returns `true` if the underlying future should no longer be polled.
+ fn is_terminated(&self) -> bool;
+}
+
+impl<F: FusedFuture + ?Sized + Unpin> FusedFuture for &mut F {
+ fn is_terminated(&self) -> bool {
+ <F as FusedFuture>::is_terminated(&**self)
+ }
+}
+
+impl<P> FusedFuture for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: FusedFuture,
+{
+ fn is_terminated(&self) -> bool {
+ <P::Target as FusedFuture>::is_terminated(&**self)
+ }
+}
+
+mod private_try_future {
+ use super::Future;
+
+ pub trait Sealed {}
+
+ impl<F, T, E> Sealed for F where F: ?Sized + Future<Output = Result<T, E>> {}
+}
+
+/// A convenience for futures that return `Result` values that includes
+/// a variety of adapters tailored to such futures.
+pub trait TryFuture: Future + private_try_future::Sealed {
+ /// The type of successful values yielded by this future
+ type Ok;
+
+ /// The type of failures yielded by this future
+ type Error;
+
+ /// Poll this `TryFuture` as if it were a `Future`.
+ ///
+ /// This method is a stopgap for a compiler limitation that prevents us from
+ /// directly inheriting from the `Future` trait; in the future it won't be
+ /// needed.
+ fn try_poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<Self::Ok, Self::Error>>;
+}
+
+impl<F, T, E> TryFuture for F
+where
+ F: ?Sized + Future<Output = Result<T, E>>,
+{
+ type Ok = T;
+ type Error = E;
+
+ #[inline]
+ fn try_poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.poll(cx)
+ }
+}
+
+#[cfg(feature = "alloc")]
+mod if_alloc {
+ use super::*;
+ use alloc::boxed::Box;
+
+ impl<F: FusedFuture + ?Sized + Unpin> FusedFuture for Box<F> {
+ fn is_terminated(&self) -> bool {
+ <F as FusedFuture>::is_terminated(&**self)
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<F: FusedFuture> FusedFuture for std::panic::AssertUnwindSafe<F> {
+ fn is_terminated(&self) -> bool {
+ <F as FusedFuture>::is_terminated(&**self)
+ }
+ }
+}
diff --git a/vendor/futures-core/src/lib.rs b/vendor/futures-core/src/lib.rs
new file mode 100644
index 000000000..9c31d8d90
--- /dev/null
+++ b/vendor/futures-core/src/lib.rs
@@ -0,0 +1,27 @@
+//! Core traits and types for asynchronous operations in Rust.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)]
+// It cannot be included in the published code because this lints have false positives in the minimum required version.
+#![cfg_attr(test, warn(single_use_lifetimes))]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+pub mod future;
+#[doc(no_inline)]
+pub use self::future::{FusedFuture, Future, TryFuture};
+
+pub mod stream;
+#[doc(no_inline)]
+pub use self::stream::{FusedStream, Stream, TryStream};
+
+#[macro_use]
+pub mod task;
diff --git a/vendor/futures-core/src/stream.rs b/vendor/futures-core/src/stream.rs
new file mode 100644
index 000000000..ad5350b79
--- /dev/null
+++ b/vendor/futures-core/src/stream.rs
@@ -0,0 +1,235 @@
+//! Asynchronous streams.
+
+use core::ops::DerefMut;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// An owned dynamically typed [`Stream`] for use in cases where you can't
+/// statically type your result or need to add some indirection.
+#[cfg(feature = "alloc")]
+pub type BoxStream<'a, T> = Pin<alloc::boxed::Box<dyn Stream<Item = T> + Send + 'a>>;
+
+/// `BoxStream`, but without the `Send` requirement.
+#[cfg(feature = "alloc")]
+pub type LocalBoxStream<'a, T> = Pin<alloc::boxed::Box<dyn Stream<Item = T> + 'a>>;
+
+/// A stream of values produced asynchronously.
+///
+/// If `Future<Output = T>` is an asynchronous version of `T`, then `Stream<Item
+/// = T>` is an asynchronous version of `Iterator<Item = T>`. A stream
+/// represents a sequence of value-producing events that occur asynchronously to
+/// the caller.
+///
+/// The trait is modeled after `Future`, but allows `poll_next` to be called
+/// even after a value has been produced, yielding `None` once the stream has
+/// been fully exhausted.
+#[must_use = "streams do nothing unless polled"]
+pub trait Stream {
+ /// Values yielded by the stream.
+ type Item;
+
+ /// Attempt to pull out the next value of this stream, registering the
+ /// current task for wakeup if the value is not yet available, and returning
+ /// `None` if the stream is exhausted.
+ ///
+ /// # Return value
+ ///
+ /// There are several possible return values, each indicating a distinct
+ /// stream state:
+ ///
+ /// - `Poll::Pending` means that this stream's next value is not ready
+ /// yet. Implementations will ensure that the current task will be notified
+ /// when the next value may be ready.
+ ///
+ /// - `Poll::Ready(Some(val))` means that the stream has successfully
+ /// produced a value, `val`, and may produce further values on subsequent
+ /// `poll_next` calls.
+ ///
+ /// - `Poll::Ready(None)` means that the stream has terminated, and
+ /// `poll_next` should not be invoked again.
+ ///
+ /// # Panics
+ ///
+ /// Once a stream has finished (returned `Ready(None)` from `poll_next`), calling its
+ /// `poll_next` method again may panic, block forever, or cause other kinds of
+ /// problems; the `Stream` trait places no requirements on the effects of
+ /// such a call. However, as the `poll_next` method is not marked `unsafe`,
+ /// Rust's usual rules apply: calls must never cause undefined behavior
+ /// (memory corruption, incorrect use of `unsafe` functions, or the like),
+ /// regardless of the stream's state.
+ ///
+ /// If this is difficult to guard against then the [`fuse`] adapter can be used
+ /// to ensure that `poll_next` always returns `Ready(None)` in subsequent
+ /// calls.
+ ///
+ /// [`fuse`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.fuse
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>;
+
+ /// Returns the bounds on the remaining length of the stream.
+ ///
+ /// Specifically, `size_hint()` returns a tuple where the first element
+ /// is the lower bound, and the second element is the upper bound.
+ ///
+ /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`.
+ /// A [`None`] here means that either there is no known upper bound, or the
+ /// upper bound is larger than [`usize`].
+ ///
+ /// # Implementation notes
+ ///
+ /// It is not enforced that a stream implementation yields the declared
+ /// number of elements. A buggy stream may yield less than the lower bound
+ /// or more than the upper bound of elements.
+ ///
+ /// `size_hint()` is primarily intended to be used for optimizations such as
+ /// reserving space for the elements of the stream, but must not be
+ /// trusted to e.g., omit bounds checks in unsafe code. An incorrect
+ /// implementation of `size_hint()` should not lead to memory safety
+ /// violations.
+ ///
+ /// That said, the implementation should provide a correct estimation,
+ /// because otherwise it would be a violation of the trait's protocol.
+ ///
+ /// The default implementation returns `(0, `[`None`]`)` which is correct for any
+ /// stream.
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, None)
+ }
+}
+
+impl<S: ?Sized + Stream + Unpin> Stream for &mut S {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ S::poll_next(Pin::new(&mut **self), cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
+
+impl<P> Stream for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: Stream,
+{
+ type Item = <P::Target as Stream>::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.get_mut().as_mut().poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
+
+/// A stream which tracks whether or not the underlying stream
+/// should no longer be polled.
+///
+/// `is_terminated` will return `true` if a future should no longer be polled.
+/// Usually, this state occurs after `poll_next` (or `try_poll_next`) returned
+/// `Poll::Ready(None)`. However, `is_terminated` may also return `true` if a
+/// stream has become inactive and can no longer make progress and should be
+/// ignored or dropped rather than being polled again.
+pub trait FusedStream: Stream {
+ /// Returns `true` if the stream should no longer be polled.
+ fn is_terminated(&self) -> bool;
+}
+
+impl<F: ?Sized + FusedStream + Unpin> FusedStream for &mut F {
+ fn is_terminated(&self) -> bool {
+ <F as FusedStream>::is_terminated(&**self)
+ }
+}
+
+impl<P> FusedStream for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: FusedStream,
+{
+ fn is_terminated(&self) -> bool {
+ <P::Target as FusedStream>::is_terminated(&**self)
+ }
+}
+
+mod private_try_stream {
+ use super::Stream;
+
+ pub trait Sealed {}
+
+ impl<S, T, E> Sealed for S where S: ?Sized + Stream<Item = Result<T, E>> {}
+}
+
+/// A convenience for streams that return `Result` values that includes
+/// a variety of adapters tailored to such futures.
+pub trait TryStream: Stream + private_try_stream::Sealed {
+ /// The type of successful values yielded by this future
+ type Ok;
+
+ /// The type of failures yielded by this future
+ type Error;
+
+ /// Poll this `TryStream` as if it were a `Stream`.
+ ///
+ /// This method is a stopgap for a compiler limitation that prevents us from
+ /// directly inheriting from the `Stream` trait; in the future it won't be
+ /// needed.
+ fn try_poll_next(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<Self::Ok, Self::Error>>>;
+}
+
+impl<S, T, E> TryStream for S
+where
+ S: ?Sized + Stream<Item = Result<T, E>>,
+{
+ type Ok = T;
+ type Error = E;
+
+ fn try_poll_next(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<Self::Ok, Self::Error>>> {
+ self.poll_next(cx)
+ }
+}
+
+#[cfg(feature = "alloc")]
+mod if_alloc {
+ use super::*;
+ use alloc::boxed::Box;
+
+ impl<S: ?Sized + Stream + Unpin> Stream for Box<S> {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Pin::new(&mut **self).poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<S: Stream> Stream for std::panic::AssertUnwindSafe<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
+ unsafe { self.map_unchecked_mut(|x| &mut x.0) }.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+ }
+
+ impl<S: ?Sized + FusedStream + Unpin> FusedStream for Box<S> {
+ fn is_terminated(&self) -> bool {
+ <S as FusedStream>::is_terminated(&**self)
+ }
+ }
+}
diff --git a/vendor/futures-core/src/task/__internal/atomic_waker.rs b/vendor/futures-core/src/task/__internal/atomic_waker.rs
new file mode 100644
index 000000000..d49d04361
--- /dev/null
+++ b/vendor/futures-core/src/task/__internal/atomic_waker.rs
@@ -0,0 +1,409 @@
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering::{AcqRel, Acquire, Release};
+use core::task::Waker;
+
+/// A synchronization primitive for task wakeup.
+///
+/// Sometimes the task interested in a given event will change over time.
+/// An `AtomicWaker` can coordinate concurrent notifications with the consumer
+/// potentially "updating" the underlying task to wake up. This is useful in
+/// scenarios where a computation completes in another thread and wants to
+/// notify the consumer, but the consumer is in the process of being migrated to
+/// a new logical task.
+///
+/// Consumers should call `register` before checking the result of a computation
+/// and producers should call `wake` after producing the computation (this
+/// differs from the usual `thread::park` pattern). It is also permitted for
+/// `wake` to be called **before** `register`. This results in a no-op.
+///
+/// A single `AtomicWaker` may be reused for any number of calls to `register` or
+/// `wake`.
+///
+/// # Memory ordering
+///
+/// Calling `register` "acquires" all memory "released" by calls to `wake`
+/// before the call to `register`. Later calls to `wake` will wake the
+/// registered waker (on contention this wake might be triggered in `register`).
+///
+/// For concurrent calls to `register` (should be avoided) the ordering is only
+/// guaranteed for the winning call.
+///
+/// # Examples
+///
+/// Here is a simple example providing a `Flag` that can be signalled manually
+/// when it is ready.
+///
+/// ```
+/// use futures::future::Future;
+/// use futures::task::{Context, Poll, AtomicWaker};
+/// use std::sync::Arc;
+/// use std::sync::atomic::AtomicBool;
+/// use std::sync::atomic::Ordering::Relaxed;
+/// use std::pin::Pin;
+///
+/// struct Inner {
+/// waker: AtomicWaker,
+/// set: AtomicBool,
+/// }
+///
+/// #[derive(Clone)]
+/// struct Flag(Arc<Inner>);
+///
+/// impl Flag {
+/// pub fn new() -> Self {
+/// Self(Arc::new(Inner {
+/// waker: AtomicWaker::new(),
+/// set: AtomicBool::new(false),
+/// }))
+/// }
+///
+/// pub fn signal(&self) {
+/// self.0.set.store(true, Relaxed);
+/// self.0.waker.wake();
+/// }
+/// }
+///
+/// impl Future for Flag {
+/// type Output = ();
+///
+/// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+/// // quick check to avoid registration if already done.
+/// if self.0.set.load(Relaxed) {
+/// return Poll::Ready(());
+/// }
+///
+/// self.0.waker.register(cx.waker());
+///
+/// // Need to check condition **after** `register` to avoid a race
+/// // condition that would result in lost notifications.
+/// if self.0.set.load(Relaxed) {
+/// Poll::Ready(())
+/// } else {
+/// Poll::Pending
+/// }
+/// }
+/// }
+/// ```
+pub struct AtomicWaker {
+ state: AtomicUsize,
+ waker: UnsafeCell<Option<Waker>>,
+}
+
+// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell
+// stores a `Waker` value produced by calls to `register` and many threads can
+// race to take the waker (to wake it) by calling `wake`.
+//
+// If a new `Waker` instance is produced by calling `register` before an
+// existing one is consumed, then the existing one is overwritten.
+//
+// While `AtomicWaker` is single-producer, the implementation ensures memory
+// safety. In the event of concurrent calls to `register`, there will be a
+// single winner whose waker will get stored in the cell. The losers will not
+// have their tasks woken. As such, callers should ensure to add synchronization
+// to calls to `register`.
+//
+// The implementation uses a single `AtomicUsize` value to coordinate access to
+// the `Waker` cell. There are two bits that are operated on independently.
+// These are represented by `REGISTERING` and `WAKING`.
+//
+// The `REGISTERING` bit is set when a producer enters the critical section. The
+// `WAKING` bit is set when a consumer enters the critical section. Neither bit
+// being set is represented by `WAITING`.
+//
+// A thread obtains an exclusive lock on the waker cell by transitioning the
+// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the operation
+// the thread wishes to perform. When this transition is made, it is guaranteed
+// that no other thread will access the waker cell.
+//
+// # Registering
+//
+// On a call to `register`, an attempt to transition the state from WAITING to
+// REGISTERING is made. On success, the caller obtains a lock on the waker cell.
+//
+// If the lock is obtained, then the thread sets the waker cell to the waker
+// provided as an argument. Then it attempts to transition the state back from
+// `REGISTERING` -> `WAITING`.
+//
+// If this transition is successful, then the registering process is complete
+// and the next call to `wake` will observe the waker.
+//
+// If the transition fails, then there was a concurrent call to `wake` that was
+// unable to access the waker cell (due to the registering thread holding the
+// lock). To handle this, the registering thread removes the waker it just set
+// from the cell and calls `wake` on it. This call to wake represents the
+// attempt to wake by the other thread (that set the `WAKING` bit). The state is
+// then transitioned from `REGISTERING | WAKING` back to `WAITING`. This
+// transition must succeed because, at this point, the state cannot be
+// transitioned by another thread.
+//
+// # Waking
+//
+// On a call to `wake`, an attempt to transition the state from `WAITING` to
+// `WAKING` is made. On success, the caller obtains a lock on the waker cell.
+//
+// If the lock is obtained, then the thread takes ownership of the current value
+// in the waker cell, and calls `wake` on it. The state is then transitioned
+// back to `WAITING`. This transition must succeed as, at this point, the state
+// cannot be transitioned by another thread.
+//
+// If the thread is unable to obtain the lock, the `WAKING` bit is still. This
+// is because it has either been set by the current thread but the previous
+// value included the `REGISTERING` bit **or** a concurrent thread is in the
+// `WAKING` critical section. Either way, no action must be taken.
+//
+// If the current thread is the only concurrent call to `wake` and another
+// thread is in the `register` critical section, when the other thread **exits**
+// the `register` critical section, it will observe the `WAKING` bit and handle
+// the wake itself.
+//
+// If another thread is in the `wake` critical section, then it will handle
+// waking the task.
+//
+// # A potential race (is safely handled).
+//
+// Imagine the following situation:
+//
+// * Thread A obtains the `wake` lock and wakes a task.
+//
+// * Before thread A releases the `wake` lock, the woken task is scheduled.
+//
+// * Thread B attempts to wake the task. In theory this should result in the
+// task being woken, but it cannot because thread A still holds the wake lock.
+//
+// This case is handled by requiring users of `AtomicWaker` to call `register`
+// **before** attempting to observe the application state change that resulted
+// in the task being awoken. The wakers also change the application state before
+// calling wake.
+//
+// Because of this, the waker will do one of two things.
+//
+// 1) Observe the application state change that Thread B is woken for. In this
+// case, it is OK for Thread B's wake to be lost.
+//
+// 2) Call register before attempting to observe the application state. Since
+// Thread A still holds the `wake` lock, the call to `register` will result
+// in the task waking itself and get scheduled again.
+
+/// Idle state
+const WAITING: usize = 0;
+
+/// A new waker value is being registered with the `AtomicWaker` cell.
+const REGISTERING: usize = 0b01;
+
+/// The waker currently registered with the `AtomicWaker` cell is being woken.
+const WAKING: usize = 0b10;
+
+impl AtomicWaker {
+ /// Create an `AtomicWaker`.
+ pub const fn new() -> Self {
+ // Make sure that task is Sync
+ trait AssertSync: Sync {}
+ impl AssertSync for Waker {}
+
+ Self { state: AtomicUsize::new(WAITING), waker: UnsafeCell::new(None) }
+ }
+
+ /// Registers the waker to be notified on calls to `wake`.
+ ///
+ /// The new task will take place of any previous tasks that were registered
+ /// by previous calls to `register`. Any calls to `wake` that happen after
+ /// a call to `register` (as defined by the memory ordering rules), will
+ /// notify the `register` caller's task and deregister the waker from future
+ /// notifications. Because of this, callers should ensure `register` gets
+ /// invoked with a new `Waker` **each** time they require a wakeup.
+ ///
+ /// It is safe to call `register` with multiple other threads concurrently
+ /// calling `wake`. This will result in the `register` caller's current
+ /// task being notified once.
+ ///
+ /// This function is safe to call concurrently, but this is generally a bad
+ /// idea. Concurrent calls to `register` will attempt to register different
+ /// tasks to be notified. One of the callers will win and have its task set,
+ /// but there is no guarantee as to which caller will succeed.
+ ///
+ /// # Examples
+ ///
+ /// Here is how `register` is used when implementing a flag.
+ ///
+ /// ```
+ /// use futures::future::Future;
+ /// use futures::task::{Context, Poll, AtomicWaker};
+ /// use std::sync::atomic::AtomicBool;
+ /// use std::sync::atomic::Ordering::Relaxed;
+ /// use std::pin::Pin;
+ ///
+ /// struct Flag {
+ /// waker: AtomicWaker,
+ /// set: AtomicBool,
+ /// }
+ ///
+ /// impl Future for Flag {
+ /// type Output = ();
+ ///
+ /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ /// // Register **before** checking `set` to avoid a race condition
+ /// // that would result in lost notifications.
+ /// self.waker.register(cx.waker());
+ ///
+ /// if self.set.load(Relaxed) {
+ /// Poll::Ready(())
+ /// } else {
+ /// Poll::Pending
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub fn register(&self, waker: &Waker) {
+ match self
+ .state
+ .compare_exchange(WAITING, REGISTERING, Acquire, Acquire)
+ .unwrap_or_else(|x| x)
+ {
+ WAITING => {
+ unsafe {
+ // Locked acquired, update the waker cell
+ *self.waker.get() = Some(waker.clone());
+
+ // Release the lock. If the state transitioned to include
+ // the `WAKING` bit, this means that at least one wake has
+ // been called concurrently.
+ //
+ // Start by assuming that the state is `REGISTERING` as this
+ // is what we just set it to. If this holds, we know that no
+ // other writes were performed in the meantime, so there is
+ // nothing to acquire, only release. In case of concurrent
+ // wakers, we need to acquire their releases, so success needs
+ // to do both.
+ let res = self.state.compare_exchange(REGISTERING, WAITING, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => {
+ // memory ordering: acquired self.state during CAS
+ // - if previous wakes went through it syncs with
+ // their final release (`fetch_and`)
+ // - if there was no previous wake the next wake
+ // will wake us, no sync needed.
+ }
+ Err(actual) => {
+ // This branch can only be reached if at least one
+ // concurrent thread called `wake`. In this
+ // case, `actual` **must** be `REGISTERING |
+ // `WAKING`.
+ debug_assert_eq!(actual, REGISTERING | WAKING);
+
+ // Take the waker to wake once the atomic operation has
+ // completed.
+ let waker = (*self.waker.get()).take().unwrap();
+
+ // We need to return to WAITING state (clear our lock and
+ // concurrent WAKING flag). This needs to acquire all
+ // WAKING fetch_or releases and it needs to release our
+ // update to self.waker, so we need a `swap` operation.
+ self.state.swap(WAITING, AcqRel);
+
+ // memory ordering: we acquired the state for all
+ // concurrent wakes, but future wakes might still
+ // need to wake us in case we can't make progress
+ // from the pending wakes.
+ //
+ // So we simply schedule to come back later (we could
+ // also simply leave the registration in place above).
+ waker.wake();
+ }
+ }
+ }
+ }
+ WAKING => {
+ // Currently in the process of waking the task, i.e.,
+ // `wake` is currently being called on the old task handle.
+ //
+ // memory ordering: we acquired the state for all
+ // concurrent wakes, but future wakes might still
+ // need to wake us in case we can't make progress
+ // from the pending wakes.
+ //
+ // So we simply schedule to come back later (we
+ // could also spin here trying to acquire the lock
+ // to register).
+ waker.wake_by_ref();
+ }
+ state => {
+ // In this case, a concurrent thread is holding the
+ // "registering" lock. This probably indicates a bug in the
+ // caller's code as racing to call `register` doesn't make much
+ // sense.
+ //
+ // memory ordering: don't care. a concurrent register() is going
+ // to succeed and provide proper memory ordering.
+ //
+ // We just want to maintain memory safety. It is ok to drop the
+ // call to `register`.
+ debug_assert!(state == REGISTERING || state == REGISTERING | WAKING);
+ }
+ }
+ }
+
+ /// Calls `wake` on the last `Waker` passed to `register`.
+ ///
+ /// If `register` has not been called yet, then this does nothing.
+ pub fn wake(&self) {
+ if let Some(waker) = self.take() {
+ waker.wake();
+ }
+ }
+
+ /// Returns the last `Waker` passed to `register`, so that the user can wake it.
+ ///
+ ///
+ /// Sometimes, just waking the AtomicWaker is not fine grained enough. This allows the user
+ /// to take the waker and then wake it separately, rather than performing both steps in one
+ /// atomic action.
+ ///
+ /// If a waker has not been registered, this returns `None`.
+ pub fn take(&self) -> Option<Waker> {
+ // AcqRel ordering is used in order to acquire the value of the `task`
+ // cell as well as to establish a `release` ordering with whatever
+ // memory the `AtomicWaker` is associated with.
+ match self.state.fetch_or(WAKING, AcqRel) {
+ WAITING => {
+ // The waking lock has been acquired.
+ let waker = unsafe { (*self.waker.get()).take() };
+
+ // Release the lock
+ self.state.fetch_and(!WAKING, Release);
+
+ waker
+ }
+ state => {
+ // There is a concurrent thread currently updating the
+ // associated task.
+ //
+ // Nothing more to do as the `WAKING` bit has been set. It
+ // doesn't matter if there are concurrent registering threads or
+ // not.
+ //
+ debug_assert!(
+ state == REGISTERING || state == REGISTERING | WAKING || state == WAKING
+ );
+ None
+ }
+ }
+ }
+}
+
+impl Default for AtomicWaker {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl fmt::Debug for AtomicWaker {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "AtomicWaker")
+ }
+}
+
+unsafe impl Send for AtomicWaker {}
+unsafe impl Sync for AtomicWaker {}
diff --git a/vendor/futures-core/src/task/__internal/mod.rs b/vendor/futures-core/src/task/__internal/mod.rs
new file mode 100644
index 000000000..c902eb4bf
--- /dev/null
+++ b/vendor/futures-core/src/task/__internal/mod.rs
@@ -0,0 +1,4 @@
+#[cfg(not(futures_no_atomic_cas))]
+mod atomic_waker;
+#[cfg(not(futures_no_atomic_cas))]
+pub use self::atomic_waker::AtomicWaker;
diff --git a/vendor/futures-core/src/task/mod.rs b/vendor/futures-core/src/task/mod.rs
new file mode 100644
index 000000000..19e4eaecd
--- /dev/null
+++ b/vendor/futures-core/src/task/mod.rs
@@ -0,0 +1,10 @@
+//! Task notification.
+
+#[macro_use]
+mod poll;
+
+#[doc(hidden)]
+pub mod __internal;
+
+#[doc(no_inline)]
+pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
diff --git a/vendor/futures-core/src/task/poll.rs b/vendor/futures-core/src/task/poll.rs
new file mode 100644
index 000000000..607e78e06
--- /dev/null
+++ b/vendor/futures-core/src/task/poll.rs
@@ -0,0 +1,12 @@
+/// Extracts the successful type of a `Poll<T>`.
+///
+/// This macro bakes in propagation of `Pending` signals by returning early.
+#[macro_export]
+macro_rules! ready {
+ ($e:expr $(,)?) => {
+ match $e {
+ $crate::task::Poll::Ready(t) => t,
+ $crate::task::Poll::Pending => return $crate::task::Poll::Pending,
+ }
+ };
+}
diff --git a/vendor/futures-executor/.cargo-checksum.json b/vendor/futures-executor/.cargo-checksum.json
new file mode 100644
index 000000000..a8ad6b92b
--- /dev/null
+++ b/vendor/futures-executor/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"c6d60a83b1a88b21d0173ac269f6811d42618d8a216e14bfb32d56347871747a","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"151d3753b1ae87a1e1b1604c001ab8b2a5041b0e90ed09ea18d792081c424370","benches/thread_notify.rs":"e601968527bee85766f32d2d11de5ed8f6b4bd5a29989b5c369a52bd3cd3d024","src/enter.rs":"c1a771f373b469d98e2599d8e37da7d7a7083c30332d643f37867f86406ab1e2","src/lib.rs":"08a25594c789cb4ce1c8929a9ddd745e67fee1db373e011a7ebe135933522614","src/local_pool.rs":"1661a58468491d714a358b6382df88bbd7557e19506009763f841cbcf85781f5","src/thread_pool.rs":"206d5c9d16857d6b2cc9aecb63cd1c9859177b2eaea9b1d7055f5c42bd1ce33f","src/unpark_mutex.rs":"e186464d9bdec22a6d1e1d900ed03a1154e6b0d422ede9bd3b768657cdbb6113","tests/local_pool.rs":"c7f870582a29cdb6ebbb3a325ddb8485c61efac80fb96656003162294f4ec923"},"package":"29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a"} \ No newline at end of file
diff --git a/vendor/futures-executor/Cargo.toml b/vendor/futures-executor/Cargo.toml
new file mode 100644
index 000000000..50632b1e4
--- /dev/null
+++ b/vendor/futures-executor/Cargo.toml
@@ -0,0 +1,45 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.45"
+name = "futures-executor"
+version = "0.3.19"
+description = "Executors for asynchronous tasks based on the futures-rs library.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+[dependencies.futures-core]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-task]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-util]
+version = "0.3.19"
+default-features = false
+
+[dependencies.num_cpus]
+version = "1.8.0"
+optional = true
+
+[dev-dependencies]
+
+[features]
+default = ["std"]
+std = ["futures-core/std", "futures-task/std", "futures-util/std"]
+thread-pool = ["std", "num_cpus"]
diff --git a/vendor/futures-executor/LICENSE-APACHE b/vendor/futures-executor/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-executor/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-executor/LICENSE-MIT b/vendor/futures-executor/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-executor/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-executor/README.md b/vendor/futures-executor/README.md
new file mode 100644
index 000000000..67086851e
--- /dev/null
+++ b/vendor/futures-executor/README.md
@@ -0,0 +1,23 @@
+# futures-executor
+
+Executors for asynchronous tasks based on the futures-rs library.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-executor = "0.3"
+```
+
+The current `futures-executor` requires Rust 1.45 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-executor/benches/thread_notify.rs b/vendor/futures-executor/benches/thread_notify.rs
new file mode 100644
index 000000000..88d0447cf
--- /dev/null
+++ b/vendor/futures-executor/benches/thread_notify.rs
@@ -0,0 +1,109 @@
+#![feature(test)]
+
+extern crate test;
+use crate::test::Bencher;
+
+use futures::executor::block_on;
+use futures::future::Future;
+use futures::task::{Context, Poll, Waker};
+use std::pin::Pin;
+
+#[bench]
+fn thread_yield_single_thread_one_wait(b: &mut Bencher) {
+ const NUM: usize = 10_000;
+
+ struct Yield {
+ rem: usize,
+ }
+
+ impl Future for Yield {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ if self.rem == 0 {
+ Poll::Ready(())
+ } else {
+ self.rem -= 1;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ }
+
+ b.iter(|| {
+ let y = Yield { rem: NUM };
+ block_on(y);
+ });
+}
+
+#[bench]
+fn thread_yield_single_thread_many_wait(b: &mut Bencher) {
+ const NUM: usize = 10_000;
+
+ struct Yield {
+ rem: usize,
+ }
+
+ impl Future for Yield {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ if self.rem == 0 {
+ Poll::Ready(())
+ } else {
+ self.rem -= 1;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ }
+
+ b.iter(|| {
+ for _ in 0..NUM {
+ let y = Yield { rem: 1 };
+ block_on(y);
+ }
+ });
+}
+
+#[bench]
+fn thread_yield_multi_thread(b: &mut Bencher) {
+ use std::sync::mpsc;
+ use std::thread;
+
+ const NUM: usize = 1_000;
+
+ let (tx, rx) = mpsc::sync_channel::<Waker>(10_000);
+
+ struct Yield {
+ rem: usize,
+ tx: mpsc::SyncSender<Waker>,
+ }
+ impl Unpin for Yield {}
+
+ impl Future for Yield {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ if self.rem == 0 {
+ Poll::Ready(())
+ } else {
+ self.rem -= 1;
+ self.tx.send(cx.waker().clone()).unwrap();
+ Poll::Pending
+ }
+ }
+ }
+
+ thread::spawn(move || {
+ while let Ok(task) = rx.recv() {
+ task.wake();
+ }
+ });
+
+ b.iter(move || {
+ let y = Yield { rem: NUM, tx: tx.clone() };
+
+ block_on(y);
+ });
+}
diff --git a/vendor/futures-executor/src/enter.rs b/vendor/futures-executor/src/enter.rs
new file mode 100644
index 000000000..5895a9efb
--- /dev/null
+++ b/vendor/futures-executor/src/enter.rs
@@ -0,0 +1,80 @@
+use std::cell::Cell;
+use std::fmt;
+
+thread_local!(static ENTERED: Cell<bool> = Cell::new(false));
+
+/// Represents an executor context.
+///
+/// For more details, see [`enter` documentation](enter()).
+pub struct Enter {
+ _priv: (),
+}
+
+/// An error returned by `enter` if an execution scope has already been
+/// entered.
+pub struct EnterError {
+ _priv: (),
+}
+
+impl fmt::Debug for EnterError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("EnterError").finish()
+ }
+}
+
+impl fmt::Display for EnterError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "an execution scope has already been entered")
+ }
+}
+
+impl std::error::Error for EnterError {}
+
+/// Marks the current thread as being within the dynamic extent of an
+/// executor.
+///
+/// Executor implementations should call this function before beginning to
+/// execute a tasks, and drop the returned [`Enter`](Enter) value after
+/// completing task execution:
+///
+/// ```
+/// use futures::executor::enter;
+///
+/// let enter = enter().expect("...");
+/// /* run task */
+/// drop(enter);
+/// ```
+///
+/// Doing so ensures that executors aren't
+/// accidentally invoked in a nested fashion.
+///
+/// # Error
+///
+/// Returns an error if the current thread is already marked, in which case the
+/// caller should panic with a tailored error message.
+pub fn enter() -> Result<Enter, EnterError> {
+ ENTERED.with(|c| {
+ if c.get() {
+ Err(EnterError { _priv: () })
+ } else {
+ c.set(true);
+
+ Ok(Enter { _priv: () })
+ }
+ })
+}
+
+impl fmt::Debug for Enter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Enter").finish()
+ }
+}
+
+impl Drop for Enter {
+ fn drop(&mut self) {
+ ENTERED.with(|c| {
+ assert!(c.get());
+ c.set(false);
+ });
+ }
+}
diff --git a/vendor/futures-executor/src/lib.rs b/vendor/futures-executor/src/lib.rs
new file mode 100644
index 000000000..b1af87545
--- /dev/null
+++ b/vendor/futures-executor/src/lib.rs
@@ -0,0 +1,76 @@
+//! Built-in executors and related tools.
+//!
+//! All asynchronous computation occurs within an executor, which is
+//! capable of spawning futures as tasks. This module provides several
+//! built-in executors, as well as tools for building your own.
+//!
+//! All items are only available when the `std` feature of this
+//! library is activated, and it is activated by default.
+//!
+//! # Using a thread pool (M:N task scheduling)
+//!
+//! Most of the time tasks should be executed on a [thread pool](ThreadPool).
+//! A small set of worker threads can handle a very large set of spawned tasks
+//! (which are much lighter weight than threads). Tasks spawned onto the pool
+//! with the [`spawn_ok`](ThreadPool::spawn_ok) function will run ambiently on
+//! the created threads.
+//!
+//! # Spawning additional tasks
+//!
+//! Tasks can be spawned onto a spawner by calling its [`spawn_obj`] method
+//! directly. In the case of `!Send` futures, [`spawn_local_obj`] can be used
+//! instead.
+//!
+//! # Single-threaded execution
+//!
+//! In addition to thread pools, it's possible to run a task (and the tasks
+//! it spawns) entirely within a single thread via the [`LocalPool`] executor.
+//! Aside from cutting down on synchronization costs, this executor also makes
+//! it possible to spawn non-`Send` tasks, via [`spawn_local_obj`]. The
+//! [`LocalPool`] is best suited for running I/O-bound tasks that do relatively
+//! little work between I/O operations.
+//!
+//! There is also a convenience function [`block_on`] for simply running a
+//! future to completion on the current thread.
+//!
+//! [`spawn_obj`]: https://docs.rs/futures/0.3/futures/task/trait.Spawn.html#tymethod.spawn_obj
+//! [`spawn_local_obj`]: https://docs.rs/futures/0.3/futures/task/trait.LocalSpawn.html#tymethod.spawn_local_obj
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(
+ missing_debug_implementations,
+ missing_docs,
+ rust_2018_idioms,
+ single_use_lifetimes,
+ unreachable_pub
+)]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+#[cfg(feature = "std")]
+mod local_pool;
+#[cfg(feature = "std")]
+pub use crate::local_pool::{block_on, block_on_stream, BlockingStream, LocalPool, LocalSpawner};
+
+#[cfg(feature = "thread-pool")]
+#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))]
+#[cfg(feature = "std")]
+mod thread_pool;
+#[cfg(feature = "thread-pool")]
+#[cfg(feature = "std")]
+mod unpark_mutex;
+#[cfg(feature = "thread-pool")]
+#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))]
+#[cfg(feature = "std")]
+pub use crate::thread_pool::{ThreadPool, ThreadPoolBuilder};
+
+#[cfg(feature = "std")]
+mod enter;
+#[cfg(feature = "std")]
+pub use crate::enter::{enter, Enter, EnterError};
diff --git a/vendor/futures-executor/src/local_pool.rs b/vendor/futures-executor/src/local_pool.rs
new file mode 100644
index 000000000..bee96d8db
--- /dev/null
+++ b/vendor/futures-executor/src/local_pool.rs
@@ -0,0 +1,400 @@
+use crate::enter;
+use futures_core::future::Future;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use futures_task::{waker_ref, ArcWake};
+use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError};
+use futures_util::pin_mut;
+use futures_util::stream::FuturesUnordered;
+use futures_util::stream::StreamExt;
+use std::cell::RefCell;
+use std::ops::{Deref, DerefMut};
+use std::rc::{Rc, Weak};
+use std::sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+};
+use std::thread::{self, Thread};
+
+/// A single-threaded task pool for polling futures to completion.
+///
+/// This executor allows you to multiplex any number of tasks onto a single
+/// thread. It's appropriate to poll strictly I/O-bound futures that do very
+/// little work in between I/O actions.
+///
+/// To get a handle to the pool that implements
+/// [`Spawn`](futures_task::Spawn), use the
+/// [`spawner()`](LocalPool::spawner) method. Because the executor is
+/// single-threaded, it supports a special form of task spawning for non-`Send`
+/// futures, via [`spawn_local_obj`](futures_task::LocalSpawn::spawn_local_obj).
+#[derive(Debug)]
+pub struct LocalPool {
+ pool: FuturesUnordered<LocalFutureObj<'static, ()>>,
+ incoming: Rc<Incoming>,
+}
+
+/// A handle to a [`LocalPool`](LocalPool) that implements
+/// [`Spawn`](futures_task::Spawn).
+#[derive(Clone, Debug)]
+pub struct LocalSpawner {
+ incoming: Weak<Incoming>,
+}
+
+type Incoming = RefCell<Vec<LocalFutureObj<'static, ()>>>;
+
+pub(crate) struct ThreadNotify {
+ /// The (single) executor thread.
+ thread: Thread,
+ /// A flag to ensure a wakeup (i.e. `unpark()`) is not "forgotten"
+ /// before the next `park()`, which may otherwise happen if the code
+ /// being executed as part of the future(s) being polled makes use of
+ /// park / unpark calls of its own, i.e. we cannot assume that no other
+ /// code uses park / unpark on the executing `thread`.
+ unparked: AtomicBool,
+}
+
+thread_local! {
+ static CURRENT_THREAD_NOTIFY: Arc<ThreadNotify> = Arc::new(ThreadNotify {
+ thread: thread::current(),
+ unparked: AtomicBool::new(false),
+ });
+}
+
+impl ArcWake for ThreadNotify {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ // Make sure the wakeup is remembered until the next `park()`.
+ let unparked = arc_self.unparked.swap(true, Ordering::Relaxed);
+ if !unparked {
+ // If the thread has not been unparked yet, it must be done
+ // now. If it was actually parked, it will run again,
+ // otherwise the token made available by `unpark`
+ // may be consumed before reaching `park()`, but `unparked`
+ // ensures it is not forgotten.
+ arc_self.thread.unpark();
+ }
+ }
+}
+
+// Set up and run a basic single-threaded spawner loop, invoking `f` on each
+// turn.
+fn run_executor<T, F: FnMut(&mut Context<'_>) -> Poll<T>>(mut f: F) -> T {
+ let _enter = enter().expect(
+ "cannot execute `LocalPool` executor from within \
+ another executor",
+ );
+
+ CURRENT_THREAD_NOTIFY.with(|thread_notify| {
+ let waker = waker_ref(thread_notify);
+ let mut cx = Context::from_waker(&waker);
+ loop {
+ if let Poll::Ready(t) = f(&mut cx) {
+ return t;
+ }
+ // Consume the wakeup that occurred while executing `f`, if any.
+ let unparked = thread_notify.unparked.swap(false, Ordering::Acquire);
+ if !unparked {
+ // No wakeup occurred. It may occur now, right before parking,
+ // but in that case the token made available by `unpark()`
+ // is guaranteed to still be available and `park()` is a no-op.
+ thread::park();
+ // When the thread is unparked, `unparked` will have been set
+ // and needs to be unset before the next call to `f` to avoid
+ // a redundant loop iteration.
+ thread_notify.unparked.store(false, Ordering::Release);
+ }
+ }
+ })
+}
+
+fn poll_executor<T, F: FnMut(&mut Context<'_>) -> T>(mut f: F) -> T {
+ let _enter = enter().expect(
+ "cannot execute `LocalPool` executor from within \
+ another executor",
+ );
+
+ CURRENT_THREAD_NOTIFY.with(|thread_notify| {
+ let waker = waker_ref(thread_notify);
+ let mut cx = Context::from_waker(&waker);
+ f(&mut cx)
+ })
+}
+
+impl LocalPool {
+ /// Create a new, empty pool of tasks.
+ pub fn new() -> Self {
+ Self { pool: FuturesUnordered::new(), incoming: Default::default() }
+ }
+
+ /// Get a clonable handle to the pool as a [`Spawn`].
+ pub fn spawner(&self) -> LocalSpawner {
+ LocalSpawner { incoming: Rc::downgrade(&self.incoming) }
+ }
+
+ /// Run all tasks in the pool to completion.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ ///
+ /// let mut pool = LocalPool::new();
+ ///
+ /// // ... spawn some initial tasks using `spawn.spawn()` or `spawn.spawn_local()`
+ ///
+ /// // run *all* tasks in the pool to completion, including any newly-spawned ones.
+ /// pool.run();
+ /// ```
+ ///
+ /// The function will block the calling thread until *all* tasks in the pool
+ /// are complete, including any spawned while running existing tasks.
+ pub fn run(&mut self) {
+ run_executor(|cx| self.poll_pool(cx))
+ }
+
+ /// Runs all the tasks in the pool until the given future completes.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ ///
+ /// let mut pool = LocalPool::new();
+ /// # let my_app = async {};
+ ///
+ /// // run tasks in the pool until `my_app` completes
+ /// pool.run_until(my_app);
+ /// ```
+ ///
+ /// The function will block the calling thread *only* until the future `f`
+ /// completes; there may still be incomplete tasks in the pool, which will
+ /// be inert after the call completes, but can continue with further use of
+ /// one of the pool's run or poll methods. While the function is running,
+ /// however, all tasks in the pool will try to make progress.
+ pub fn run_until<F: Future>(&mut self, future: F) -> F::Output {
+ pin_mut!(future);
+
+ run_executor(|cx| {
+ {
+ // if our main task is done, so are we
+ let result = future.as_mut().poll(cx);
+ if let Poll::Ready(output) = result {
+ return Poll::Ready(output);
+ }
+ }
+
+ let _ = self.poll_pool(cx);
+ Poll::Pending
+ })
+ }
+
+ /// Runs all tasks and returns after completing one future or until no more progress
+ /// can be made. Returns `true` if one future was completed, `false` otherwise.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ /// use futures::task::LocalSpawnExt;
+ /// use futures::future::{ready, pending};
+ ///
+ /// let mut pool = LocalPool::new();
+ /// let spawner = pool.spawner();
+ ///
+ /// spawner.spawn_local(ready(())).unwrap();
+ /// spawner.spawn_local(ready(())).unwrap();
+ /// spawner.spawn_local(pending()).unwrap();
+ ///
+ /// // Run the two ready tasks and return true for them.
+ /// pool.try_run_one(); // returns true after completing one of the ready futures
+ /// pool.try_run_one(); // returns true after completing the other ready future
+ ///
+ /// // the remaining task can not be completed
+ /// assert!(!pool.try_run_one()); // returns false
+ /// ```
+ ///
+ /// This function will not block the calling thread and will return the moment
+ /// that there are no tasks left for which progress can be made or after exactly one
+ /// task was completed; Remaining incomplete tasks in the pool can continue with
+ /// further use of one of the pool's run or poll methods.
+ /// Though only one task will be completed, progress may be made on multiple tasks.
+ pub fn try_run_one(&mut self) -> bool {
+ poll_executor(|ctx| {
+ loop {
+ let ret = self.poll_pool_once(ctx);
+
+ // return if we have executed a future
+ if let Poll::Ready(Some(_)) = ret {
+ return true;
+ }
+
+ // if there are no new incoming futures
+ // then there is no feature that can make progress
+ // and we can return without having completed a single future
+ if self.incoming.borrow().is_empty() {
+ return false;
+ }
+ }
+ })
+ }
+
+ /// Runs all tasks in the pool and returns if no more progress can be made
+ /// on any task.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ /// use futures::task::LocalSpawnExt;
+ /// use futures::future::{ready, pending};
+ ///
+ /// let mut pool = LocalPool::new();
+ /// let spawner = pool.spawner();
+ ///
+ /// spawner.spawn_local(ready(())).unwrap();
+ /// spawner.spawn_local(ready(())).unwrap();
+ /// spawner.spawn_local(pending()).unwrap();
+ ///
+ /// // Runs the two ready task and returns.
+ /// // The empty task remains in the pool.
+ /// pool.run_until_stalled();
+ /// ```
+ ///
+ /// This function will not block the calling thread and will return the moment
+ /// that there are no tasks left for which progress can be made;
+ /// remaining incomplete tasks in the pool can continue with further use of one
+ /// of the pool's run or poll methods. While the function is running, all tasks
+ /// in the pool will try to make progress.
+ pub fn run_until_stalled(&mut self) {
+ poll_executor(|ctx| {
+ let _ = self.poll_pool(ctx);
+ });
+ }
+
+ // Make maximal progress on the entire pool of spawned task, returning `Ready`
+ // if the pool is empty and `Pending` if no further progress can be made.
+ fn poll_pool(&mut self, cx: &mut Context<'_>) -> Poll<()> {
+ // state for the FuturesUnordered, which will never be used
+ loop {
+ let ret = self.poll_pool_once(cx);
+
+ // we queued up some new tasks; add them and poll again
+ if !self.incoming.borrow().is_empty() {
+ continue;
+ }
+
+ // no queued tasks; we may be done
+ match ret {
+ Poll::Pending => return Poll::Pending,
+ Poll::Ready(None) => return Poll::Ready(()),
+ _ => {}
+ }
+ }
+ }
+
+ // Try make minimal progress on the pool of spawned tasks
+ fn poll_pool_once(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ // empty the incoming queue of newly-spawned tasks
+ {
+ let mut incoming = self.incoming.borrow_mut();
+ for task in incoming.drain(..) {
+ self.pool.push(task)
+ }
+ }
+
+ // try to execute the next ready future
+ self.pool.poll_next_unpin(cx)
+ }
+}
+
+impl Default for LocalPool {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+/// Run a future to completion on the current thread.
+///
+/// This function will block the caller until the given future has completed.
+///
+/// Use a [`LocalPool`](LocalPool) if you need finer-grained control over
+/// spawned tasks.
+pub fn block_on<F: Future>(f: F) -> F::Output {
+ pin_mut!(f);
+ run_executor(|cx| f.as_mut().poll(cx))
+}
+
+/// Turn a stream into a blocking iterator.
+///
+/// When `next` is called on the resulting `BlockingStream`, the caller
+/// will be blocked until the next element of the `Stream` becomes available.
+pub fn block_on_stream<S: Stream + Unpin>(stream: S) -> BlockingStream<S> {
+ BlockingStream { stream }
+}
+
+/// An iterator which blocks on values from a stream until they become available.
+#[derive(Debug)]
+pub struct BlockingStream<S: Stream + Unpin> {
+ stream: S,
+}
+
+impl<S: Stream + Unpin> Deref for BlockingStream<S> {
+ type Target = S;
+ fn deref(&self) -> &Self::Target {
+ &self.stream
+ }
+}
+
+impl<S: Stream + Unpin> DerefMut for BlockingStream<S> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.stream
+ }
+}
+
+impl<S: Stream + Unpin> BlockingStream<S> {
+ /// Convert this `BlockingStream` into the inner `Stream` type.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+impl<S: Stream + Unpin> Iterator for BlockingStream<S> {
+ type Item = S::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ LocalPool::new().run_until(self.stream.next())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
+
+impl Spawn for LocalSpawner {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ if let Some(incoming) = self.incoming.upgrade() {
+ incoming.borrow_mut().push(future.into());
+ Ok(())
+ } else {
+ Err(SpawnError::shutdown())
+ }
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ if self.incoming.upgrade().is_some() {
+ Ok(())
+ } else {
+ Err(SpawnError::shutdown())
+ }
+ }
+}
+
+impl LocalSpawn for LocalSpawner {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ if let Some(incoming) = self.incoming.upgrade() {
+ incoming.borrow_mut().push(future);
+ Ok(())
+ } else {
+ Err(SpawnError::shutdown())
+ }
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ if self.incoming.upgrade().is_some() {
+ Ok(())
+ } else {
+ Err(SpawnError::shutdown())
+ }
+ }
+}
diff --git a/vendor/futures-executor/src/thread_pool.rs b/vendor/futures-executor/src/thread_pool.rs
new file mode 100644
index 000000000..5e1f586eb
--- /dev/null
+++ b/vendor/futures-executor/src/thread_pool.rs
@@ -0,0 +1,375 @@
+use crate::enter;
+use crate::unpark_mutex::UnparkMutex;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_task::{waker_ref, ArcWake};
+use futures_task::{FutureObj, Spawn, SpawnError};
+use futures_util::future::FutureExt;
+use std::cmp;
+use std::fmt;
+use std::io;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::mpsc;
+use std::sync::{Arc, Mutex};
+use std::thread;
+
+/// A general-purpose thread pool for scheduling tasks that poll futures to
+/// completion.
+///
+/// The thread pool multiplexes any number of tasks onto a fixed number of
+/// worker threads.
+///
+/// This type is a clonable handle to the threadpool itself.
+/// Cloning it will only create a new reference, not a new threadpool.
+///
+/// This type is only available when the `thread-pool` feature of this
+/// library is activated.
+#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))]
+pub struct ThreadPool {
+ state: Arc<PoolState>,
+}
+
+/// Thread pool configuration object.
+///
+/// This type is only available when the `thread-pool` feature of this
+/// library is activated.
+#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))]
+pub struct ThreadPoolBuilder {
+ pool_size: usize,
+ stack_size: usize,
+ name_prefix: Option<String>,
+ after_start: Option<Arc<dyn Fn(usize) + Send + Sync>>,
+ before_stop: Option<Arc<dyn Fn(usize) + Send + Sync>>,
+}
+
+trait AssertSendSync: Send + Sync {}
+impl AssertSendSync for ThreadPool {}
+
+struct PoolState {
+ tx: Mutex<mpsc::Sender<Message>>,
+ rx: Mutex<mpsc::Receiver<Message>>,
+ cnt: AtomicUsize,
+ size: usize,
+}
+
+impl fmt::Debug for ThreadPool {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ThreadPool").field("size", &self.state.size).finish()
+ }
+}
+
+impl fmt::Debug for ThreadPoolBuilder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ThreadPoolBuilder")
+ .field("pool_size", &self.pool_size)
+ .field("name_prefix", &self.name_prefix)
+ .finish()
+ }
+}
+
+enum Message {
+ Run(Task),
+ Close,
+}
+
+impl ThreadPool {
+ /// Creates a new thread pool with the default configuration.
+ ///
+ /// See documentation for the methods in
+ /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default
+ /// configuration.
+ pub fn new() -> Result<Self, io::Error> {
+ ThreadPoolBuilder::new().create()
+ }
+
+ /// Create a default thread pool configuration, which can then be customized.
+ ///
+ /// See documentation for the methods in
+ /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default
+ /// configuration.
+ pub fn builder() -> ThreadPoolBuilder {
+ ThreadPoolBuilder::new()
+ }
+
+ /// Spawns a future that will be run to completion.
+ ///
+ /// > **Note**: This method is similar to `Spawn::spawn_obj`, except that
+ /// > it is guaranteed to always succeed.
+ pub fn spawn_obj_ok(&self, future: FutureObj<'static, ()>) {
+ let task = Task {
+ future,
+ wake_handle: Arc::new(WakeHandle { exec: self.clone(), mutex: UnparkMutex::new() }),
+ exec: self.clone(),
+ };
+ self.state.send(Message::Run(task));
+ }
+
+ /// Spawns a task that polls the given future with output `()` to
+ /// completion.
+ ///
+ /// ```
+ /// use futures::executor::ThreadPool;
+ ///
+ /// let pool = ThreadPool::new().unwrap();
+ ///
+ /// let future = async { /* ... */ };
+ /// pool.spawn_ok(future);
+ /// ```
+ ///
+ /// > **Note**: This method is similar to `SpawnExt::spawn`, except that
+ /// > it is guaranteed to always succeed.
+ pub fn spawn_ok<Fut>(&self, future: Fut)
+ where
+ Fut: Future<Output = ()> + Send + 'static,
+ {
+ self.spawn_obj_ok(FutureObj::new(Box::new(future)))
+ }
+}
+
+impl Spawn for ThreadPool {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ self.spawn_obj_ok(future);
+ Ok(())
+ }
+}
+
+impl PoolState {
+ fn send(&self, msg: Message) {
+ self.tx.lock().unwrap().send(msg).unwrap();
+ }
+
+ fn work(
+ &self,
+ idx: usize,
+ after_start: Option<Arc<dyn Fn(usize) + Send + Sync>>,
+ before_stop: Option<Arc<dyn Fn(usize) + Send + Sync>>,
+ ) {
+ let _scope = enter().unwrap();
+ if let Some(after_start) = after_start {
+ after_start(idx);
+ }
+ loop {
+ let msg = self.rx.lock().unwrap().recv().unwrap();
+ match msg {
+ Message::Run(task) => task.run(),
+ Message::Close => break,
+ }
+ }
+ if let Some(before_stop) = before_stop {
+ before_stop(idx);
+ }
+ }
+}
+
+impl Clone for ThreadPool {
+ fn clone(&self) -> Self {
+ self.state.cnt.fetch_add(1, Ordering::Relaxed);
+ Self { state: self.state.clone() }
+ }
+}
+
+impl Drop for ThreadPool {
+ fn drop(&mut self) {
+ if self.state.cnt.fetch_sub(1, Ordering::Relaxed) == 1 {
+ for _ in 0..self.state.size {
+ self.state.send(Message::Close);
+ }
+ }
+ }
+}
+
+impl ThreadPoolBuilder {
+ /// Create a default thread pool configuration.
+ ///
+ /// See the other methods on this type for details on the defaults.
+ pub fn new() -> Self {
+ Self {
+ pool_size: cmp::max(1, num_cpus::get()),
+ stack_size: 0,
+ name_prefix: None,
+ after_start: None,
+ before_stop: None,
+ }
+ }
+
+ /// Set size of a future ThreadPool
+ ///
+ /// The size of a thread pool is the number of worker threads spawned. By
+ /// default, this is equal to the number of CPU cores.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `pool_size == 0`.
+ pub fn pool_size(&mut self, size: usize) -> &mut Self {
+ assert!(size > 0);
+ self.pool_size = size;
+ self
+ }
+
+ /// Set stack size of threads in the pool, in bytes.
+ ///
+ /// By default, worker threads use Rust's standard stack size.
+ pub fn stack_size(&mut self, stack_size: usize) -> &mut Self {
+ self.stack_size = stack_size;
+ self
+ }
+
+ /// Set thread name prefix of a future ThreadPool.
+ ///
+ /// Thread name prefix is used for generating thread names. For example, if prefix is
+ /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc.
+ ///
+ /// By default, worker threads are assigned Rust's standard thread name.
+ pub fn name_prefix<S: Into<String>>(&mut self, name_prefix: S) -> &mut Self {
+ self.name_prefix = Some(name_prefix.into());
+ self
+ }
+
+ /// Execute the closure `f` immediately after each worker thread is started,
+ /// but before running any tasks on it.
+ ///
+ /// This hook is intended for bookkeeping and monitoring.
+ /// The closure `f` will be dropped after the `builder` is dropped
+ /// and all worker threads in the pool have executed it.
+ ///
+ /// The closure provided will receive an index corresponding to the worker
+ /// thread it's running on.
+ pub fn after_start<F>(&mut self, f: F) -> &mut Self
+ where
+ F: Fn(usize) + Send + Sync + 'static,
+ {
+ self.after_start = Some(Arc::new(f));
+ self
+ }
+
+ /// Execute closure `f` just prior to shutting down each worker thread.
+ ///
+ /// This hook is intended for bookkeeping and monitoring.
+ /// The closure `f` will be dropped after the `builder` is dropped
+ /// and all threads in the pool have executed it.
+ ///
+ /// The closure provided will receive an index corresponding to the worker
+ /// thread it's running on.
+ pub fn before_stop<F>(&mut self, f: F) -> &mut Self
+ where
+ F: Fn(usize) + Send + Sync + 'static,
+ {
+ self.before_stop = Some(Arc::new(f));
+ self
+ }
+
+ /// Create a [`ThreadPool`](ThreadPool) with the given configuration.
+ pub fn create(&mut self) -> Result<ThreadPool, io::Error> {
+ let (tx, rx) = mpsc::channel();
+ let pool = ThreadPool {
+ state: Arc::new(PoolState {
+ tx: Mutex::new(tx),
+ rx: Mutex::new(rx),
+ cnt: AtomicUsize::new(1),
+ size: self.pool_size,
+ }),
+ };
+
+ for counter in 0..self.pool_size {
+ let state = pool.state.clone();
+ let after_start = self.after_start.clone();
+ let before_stop = self.before_stop.clone();
+ let mut thread_builder = thread::Builder::new();
+ if let Some(ref name_prefix) = self.name_prefix {
+ thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter));
+ }
+ if self.stack_size > 0 {
+ thread_builder = thread_builder.stack_size(self.stack_size);
+ }
+ thread_builder.spawn(move || state.work(counter, after_start, before_stop))?;
+ }
+ Ok(pool)
+ }
+}
+
+impl Default for ThreadPoolBuilder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+/// A task responsible for polling a future to completion.
+struct Task {
+ future: FutureObj<'static, ()>,
+ exec: ThreadPool,
+ wake_handle: Arc<WakeHandle>,
+}
+
+struct WakeHandle {
+ mutex: UnparkMutex<Task>,
+ exec: ThreadPool,
+}
+
+impl Task {
+ /// Actually run the task (invoking `poll` on the future) on the current
+ /// thread.
+ fn run(self) {
+ let Self { mut future, wake_handle, mut exec } = self;
+ let waker = waker_ref(&wake_handle);
+ let mut cx = Context::from_waker(&waker);
+
+ // Safety: The ownership of this `Task` object is evidence that
+ // we are in the `POLLING`/`REPOLL` state for the mutex.
+ unsafe {
+ wake_handle.mutex.start_poll();
+
+ loop {
+ let res = future.poll_unpin(&mut cx);
+ match res {
+ Poll::Pending => {}
+ Poll::Ready(()) => return wake_handle.mutex.complete(),
+ }
+ let task = Self { future, wake_handle: wake_handle.clone(), exec };
+ match wake_handle.mutex.wait(task) {
+ Ok(()) => return, // we've waited
+ Err(task) => {
+ // someone's notified us
+ future = task.future;
+ exec = task.exec;
+ }
+ }
+ }
+ }
+ }
+}
+
+impl fmt::Debug for Task {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Task").field("contents", &"...").finish()
+ }
+}
+
+impl ArcWake for WakeHandle {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ match arc_self.mutex.notify() {
+ Ok(task) => arc_self.exec.state.send(Message::Run(task)),
+ Err(()) => {}
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::sync::mpsc;
+
+ #[test]
+ fn test_drop_after_start() {
+ let (tx, rx) = mpsc::sync_channel(2);
+ let _cpu_pool = ThreadPoolBuilder::new()
+ .pool_size(2)
+ .after_start(move |_| tx.send(1).unwrap())
+ .create()
+ .unwrap();
+
+ // After ThreadPoolBuilder is deconstructed, the tx should be dropped
+ // so that we can use rx as an iterator.
+ let count = rx.into_iter().count();
+ assert_eq!(count, 2);
+ }
+}
diff --git a/vendor/futures-executor/src/unpark_mutex.rs b/vendor/futures-executor/src/unpark_mutex.rs
new file mode 100644
index 000000000..ac5112cfa
--- /dev/null
+++ b/vendor/futures-executor/src/unpark_mutex.rs
@@ -0,0 +1,137 @@
+use std::cell::UnsafeCell;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+
+/// A "lock" around data `D`, which employs a *helping* strategy.
+///
+/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
+/// invoked on only a single thread at a time (2) `poll` being invoked at least
+/// once after each `unpark` (unless the future has completed).
+pub(crate) struct UnparkMutex<D> {
+ // The state of task execution (state machine described below)
+ status: AtomicUsize,
+
+ // The actual task data, accessible only in the POLLING state
+ inner: UnsafeCell<Option<D>>,
+}
+
+// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
+// acquisition failure, the current lock holder performs the desired work --
+// re-polling.
+//
+// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
+// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
+// must therefore be `Send`.
+unsafe impl<D: Send> Send for UnparkMutex<D> {}
+unsafe impl<D: Send> Sync for UnparkMutex<D> {}
+
+// There are four possible task states, listed below with their possible
+// transitions:
+
+// The task is blocked, waiting on an event
+const WAITING: usize = 0; // --> POLLING
+
+// The task is actively being polled by a thread; arrival of additional events
+// of interest should move it to the REPOLL state
+const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE
+
+// The task is actively being polled, but will need to be re-polled upon
+// completion to ensure that all events were observed.
+const REPOLL: usize = 2; // --> POLLING
+
+// The task has finished executing (either successfully or with an error/panic)
+const COMPLETE: usize = 3; // No transitions out
+
+impl<D> UnparkMutex<D> {
+ pub(crate) fn new() -> Self {
+ Self { status: AtomicUsize::new(WAITING), inner: UnsafeCell::new(None) }
+ }
+
+ /// Attempt to "notify" the mutex that a poll should occur.
+ ///
+ /// An `Ok` result indicates that the `POLLING` state has been entered, and
+ /// the caller can proceed to poll the future. An `Err` result indicates
+ /// that polling is not necessary (because the task is finished or the
+ /// polling has been delegated).
+ pub(crate) fn notify(&self) -> Result<D, ()> {
+ let mut status = self.status.load(SeqCst);
+ loop {
+ match status {
+ // The task is idle, so try to run it immediately.
+ WAITING => {
+ match self.status.compare_exchange(WAITING, POLLING, SeqCst, SeqCst) {
+ Ok(_) => {
+ let data = unsafe {
+ // SAFETY: we've ensured mutual exclusion via
+ // the status protocol; we are the only thread
+ // that has transitioned to the POLLING state,
+ // and we won't transition back to QUEUED until
+ // the lock is "released" by this thread. See
+ // the protocol diagram above.
+ (*self.inner.get()).take().unwrap()
+ };
+ return Ok(data);
+ }
+ Err(cur) => status = cur,
+ }
+ }
+
+ // The task is being polled, so we need to record that it should
+ // be *repolled* when complete.
+ POLLING => match self.status.compare_exchange(POLLING, REPOLL, SeqCst, SeqCst) {
+ Ok(_) => return Err(()),
+ Err(cur) => status = cur,
+ },
+
+ // The task is already scheduled for polling, or is complete, so
+ // we've got nothing to do.
+ _ => return Err(()),
+ }
+ }
+ }
+
+ /// Alert the mutex that polling is about to begin, clearing any accumulated
+ /// re-poll requests.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub(crate) unsafe fn start_poll(&self) {
+ self.status.store(POLLING, SeqCst);
+ }
+
+ /// Alert the mutex that polling completed with `Pending`.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub(crate) unsafe fn wait(&self, data: D) -> Result<(), D> {
+ *self.inner.get() = Some(data);
+
+ match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
+ // no unparks came in while we were running
+ Ok(_) => Ok(()),
+
+ // guaranteed to be in REPOLL state; just clobber the
+ // state and run again.
+ Err(status) => {
+ assert_eq!(status, REPOLL);
+ self.status.store(POLLING, SeqCst);
+ Err((*self.inner.get()).take().unwrap())
+ }
+ }
+ }
+
+ /// Alert the mutex that the task has completed execution and should not be
+ /// notified again.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub(crate) unsafe fn complete(&self) {
+ self.status.store(COMPLETE, SeqCst);
+ }
+}
diff --git a/vendor/futures-executor/tests/local_pool.rs b/vendor/futures-executor/tests/local_pool.rs
new file mode 100644
index 000000000..9b1316b99
--- /dev/null
+++ b/vendor/futures-executor/tests/local_pool.rs
@@ -0,0 +1,434 @@
+use futures::channel::oneshot;
+use futures::executor::LocalPool;
+use futures::future::{self, lazy, poll_fn, Future};
+use futures::task::{Context, LocalSpawn, Poll, Spawn, Waker};
+use std::cell::{Cell, RefCell};
+use std::pin::Pin;
+use std::rc::Rc;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
+use std::thread;
+use std::time::Duration;
+
+struct Pending(Rc<()>);
+
+impl Future for Pending {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ Poll::Pending
+ }
+}
+
+fn pending() -> Pending {
+ Pending(Rc::new(()))
+}
+
+#[test]
+fn run_until_single_future() {
+ let mut cnt = 0;
+
+ {
+ let mut pool = LocalPool::new();
+ let fut = lazy(|_| {
+ cnt += 1;
+ });
+ pool.run_until(fut);
+ }
+
+ assert_eq!(cnt, 1);
+}
+
+#[test]
+fn run_until_ignores_spawned() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap();
+ pool.run_until(lazy(|_| ()));
+}
+
+#[test]
+fn run_until_executes_spawned() {
+ let (tx, rx) = oneshot::channel();
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ spawn
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ tx.send(()).unwrap();
+ }))
+ .into(),
+ )
+ .unwrap();
+ pool.run_until(rx).unwrap();
+}
+
+#[test]
+fn run_returns_if_empty() {
+ let mut pool = LocalPool::new();
+ pool.run();
+ pool.run();
+}
+
+#[test]
+fn run_executes_spawned() {
+ let cnt = Rc::new(Cell::new(0));
+ let cnt2 = cnt.clone();
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ let spawn2 = pool.spawner();
+
+ spawn
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ spawn2
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ cnt2.set(cnt2.get() + 1);
+ }))
+ .into(),
+ )
+ .unwrap();
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ pool.run();
+
+ assert_eq!(cnt.get(), 1);
+}
+
+#[test]
+fn run_spawn_many() {
+ const ITER: usize = 200;
+
+ let cnt = Rc::new(Cell::new(0));
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ for _ in 0..ITER {
+ let cnt = cnt.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ cnt.set(cnt.get() + 1);
+ }))
+ .into(),
+ )
+ .unwrap();
+ }
+
+ pool.run();
+
+ assert_eq!(cnt.get(), ITER);
+}
+
+#[test]
+fn try_run_one_returns_if_empty() {
+ let mut pool = LocalPool::new();
+ assert!(!pool.try_run_one());
+}
+
+#[test]
+fn try_run_one_executes_one_ready() {
+ const ITER: usize = 200;
+
+ let cnt = Rc::new(Cell::new(0));
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ for _ in 0..ITER {
+ spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap();
+
+ let cnt = cnt.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ cnt.set(cnt.get() + 1);
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap();
+ }
+
+ for i in 0..ITER {
+ assert_eq!(cnt.get(), i);
+ assert!(pool.try_run_one());
+ assert_eq!(cnt.get(), i + 1);
+ }
+ assert!(!pool.try_run_one());
+}
+
+#[test]
+fn try_run_one_returns_on_no_progress() {
+ const ITER: usize = 10;
+
+ let cnt = Rc::new(Cell::new(0));
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ let waker: Rc<Cell<Option<Waker>>> = Rc::new(Cell::new(None));
+ {
+ let cnt = cnt.clone();
+ let waker = waker.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(poll_fn(move |ctx| {
+ cnt.set(cnt.get() + 1);
+ waker.set(Some(ctx.waker().clone()));
+ if cnt.get() == ITER {
+ Poll::Ready(())
+ } else {
+ Poll::Pending
+ }
+ }))
+ .into(),
+ )
+ .unwrap();
+ }
+
+ for i in 0..ITER - 1 {
+ assert_eq!(cnt.get(), i);
+ assert!(!pool.try_run_one());
+ assert_eq!(cnt.get(), i + 1);
+ let w = waker.take();
+ assert!(w.is_some());
+ w.unwrap().wake();
+ }
+ assert!(pool.try_run_one());
+ assert_eq!(cnt.get(), ITER);
+}
+
+#[test]
+fn try_run_one_runs_sub_futures() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ let cnt = Rc::new(Cell::new(0));
+
+ let inner_spawner = spawn.clone();
+ let cnt1 = cnt.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(poll_fn(move |_| {
+ cnt1.set(cnt1.get() + 1);
+
+ let cnt2 = cnt1.clone();
+ inner_spawner
+ .spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into())
+ .unwrap();
+
+ Poll::Pending
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ pool.try_run_one();
+ assert_eq!(cnt.get(), 2);
+}
+
+#[test]
+fn run_until_stalled_returns_if_empty() {
+ let mut pool = LocalPool::new();
+ pool.run_until_stalled();
+ pool.run_until_stalled();
+}
+
+#[test]
+fn run_until_stalled_returns_multiple_times() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ let cnt = Rc::new(Cell::new(0));
+
+ let cnt1 = cnt.clone();
+ spawn.spawn_local_obj(Box::pin(lazy(move |_| cnt1.set(cnt1.get() + 1))).into()).unwrap();
+ pool.run_until_stalled();
+ assert_eq!(cnt.get(), 1);
+
+ let cnt2 = cnt.clone();
+ spawn.spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into()).unwrap();
+ pool.run_until_stalled();
+ assert_eq!(cnt.get(), 2);
+}
+
+#[test]
+fn run_until_stalled_runs_spawned_sub_futures() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+ let cnt = Rc::new(Cell::new(0));
+
+ let inner_spawner = spawn.clone();
+ let cnt1 = cnt.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(poll_fn(move |_| {
+ cnt1.set(cnt1.get() + 1);
+
+ let cnt2 = cnt1.clone();
+ inner_spawner
+ .spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into())
+ .unwrap();
+
+ Poll::Pending
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ pool.run_until_stalled();
+ assert_eq!(cnt.get(), 2);
+}
+
+#[test]
+fn run_until_stalled_executes_all_ready() {
+ const ITER: usize = 200;
+ const PER_ITER: usize = 3;
+
+ let cnt = Rc::new(Cell::new(0));
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ for i in 0..ITER {
+ for _ in 0..PER_ITER {
+ spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap();
+
+ let cnt = cnt.clone();
+ spawn
+ .spawn_local_obj(
+ Box::pin(lazy(move |_| {
+ cnt.set(cnt.get() + 1);
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ // also add some pending tasks to test if they are ignored
+ spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap();
+ }
+ assert_eq!(cnt.get(), i * PER_ITER);
+ pool.run_until_stalled();
+ assert_eq!(cnt.get(), (i + 1) * PER_ITER);
+ }
+}
+
+#[test]
+#[should_panic]
+fn nesting_run() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ spawn
+ .spawn_obj(
+ Box::pin(lazy(|_| {
+ let mut pool = LocalPool::new();
+ pool.run();
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ pool.run();
+}
+
+#[test]
+#[should_panic]
+fn nesting_run_run_until_stalled() {
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ spawn
+ .spawn_obj(
+ Box::pin(lazy(|_| {
+ let mut pool = LocalPool::new();
+ pool.run_until_stalled();
+ }))
+ .into(),
+ )
+ .unwrap();
+
+ pool.run();
+}
+
+#[test]
+fn tasks_are_scheduled_fairly() {
+ let state = Rc::new(RefCell::new([0, 0]));
+
+ struct Spin {
+ state: Rc<RefCell<[i32; 2]>>,
+ idx: usize,
+ }
+
+ impl Future for Spin {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let mut state = self.state.borrow_mut();
+
+ if self.idx == 0 {
+ let diff = state[0] - state[1];
+
+ assert!(diff.abs() <= 1);
+
+ if state[0] >= 50 {
+ return Poll::Ready(());
+ }
+ }
+
+ state[self.idx] += 1;
+
+ if state[self.idx] >= 100 {
+ return Poll::Ready(());
+ }
+
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+
+ let mut pool = LocalPool::new();
+ let spawn = pool.spawner();
+
+ spawn.spawn_local_obj(Box::pin(Spin { state: state.clone(), idx: 0 }).into()).unwrap();
+
+ spawn.spawn_local_obj(Box::pin(Spin { state, idx: 1 }).into()).unwrap();
+
+ pool.run();
+}
+
+// Tests that the use of park/unpark in user-code has no
+// effect on the expected behavior of the executor.
+#[test]
+fn park_unpark_independence() {
+ let mut done = false;
+
+ let future = future::poll_fn(move |cx| {
+ if done {
+ return Poll::Ready(());
+ }
+ done = true;
+ cx.waker().clone().wake(); // (*)
+ // some user-code that temporarily parks the thread
+ let test = thread::current();
+ let latch = Arc::new(AtomicBool::new(false));
+ let signal = latch.clone();
+ thread::spawn(move || {
+ thread::sleep(Duration::from_millis(10));
+ signal.store(true, Ordering::SeqCst);
+ test.unpark()
+ });
+ while !latch.load(Ordering::Relaxed) {
+ thread::park();
+ }
+ Poll::Pending // Expect to be called again due to (*).
+ });
+
+ futures::executor::block_on(future)
+}
diff --git a/vendor/futures-io/.cargo-checksum.json b/vendor/futures-io/.cargo-checksum.json
new file mode 100644
index 000000000..77a401656
--- /dev/null
+++ b/vendor/futures-io/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"3a6ff8f01952afc090840100031af93a25a429da7baf4d134e559abdfd9c9b2a","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"575430be5c47352d85f36b44dcc2c2851a6a19e2384593415c4af22c6654cee7","src/lib.rs":"526e9700c28250b7512f122952257d57adc38eb001af92ef25bdb48a8c453175"},"package":"b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2"} \ No newline at end of file
diff --git a/vendor/futures-io/Cargo.toml b/vendor/futures-io/Cargo.toml
new file mode 100644
index 000000000..0f7cd6b14
--- /dev/null
+++ b/vendor/futures-io/Cargo.toml
@@ -0,0 +1,30 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.36"
+name = "futures-io"
+version = "0.3.19"
+description = "The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[dependencies]
+
+[features]
+default = ["std"]
+std = []
+unstable = []
diff --git a/vendor/futures-io/LICENSE-APACHE b/vendor/futures-io/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-io/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-io/LICENSE-MIT b/vendor/futures-io/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-io/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-io/README.md b/vendor/futures-io/README.md
new file mode 100644
index 000000000..da6eec28b
--- /dev/null
+++ b/vendor/futures-io/README.md
@@ -0,0 +1,23 @@
+# futures-io
+
+The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-io = "0.3"
+```
+
+The current `futures-io` requires Rust 1.36 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-io/src/lib.rs b/vendor/futures-io/src/lib.rs
new file mode 100644
index 000000000..e91eb7849
--- /dev/null
+++ b/vendor/futures-io/src/lib.rs
@@ -0,0 +1,558 @@
+//! Asynchronous I/O
+//!
+//! This crate contains the `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and
+//! `AsyncBufRead` traits, the asynchronous analogs to
+//! `std::io::{Read, Write, Seek, BufRead}`. The primary difference is
+//! that these traits integrate with the asynchronous task system.
+//!
+//! All items of this library are only available when the `std` feature of this
+//! library is activated, and it is activated by default.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)]
+// It cannot be included in the published code because this lints have false positives in the minimum required version.
+#![cfg_attr(test, warn(single_use_lifetimes))]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+#[cfg(feature = "std")]
+mod if_std {
+ use std::io;
+ use std::ops::DerefMut;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ // Re-export some types from `std::io` so that users don't have to deal
+ // with conflicts when `use`ing `futures::io` and `std::io`.
+ #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+ #[doc(no_inline)]
+ pub use io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom};
+
+ /// Read bytes asynchronously.
+ ///
+ /// This trait is analogous to the `std::io::Read` trait, but integrates
+ /// with the asynchronous task system. In particular, the `poll_read`
+ /// method, unlike `Read::read`, will automatically queue the current task
+ /// for wakeup and return if data is not yet available, rather than blocking
+ /// the calling thread.
+ pub trait AsyncRead {
+ /// Attempt to read from the `AsyncRead` into `buf`.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_read))`.
+ ///
+ /// If no data is available for reading, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>>;
+
+ /// Attempt to read from the `AsyncRead` into `bufs` using vectored
+ /// IO operations.
+ ///
+ /// This method is similar to `poll_read`, but allows data to be read
+ /// into multiple buffers using a single operation.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_read))`.
+ ///
+ /// If no data is available for reading, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ /// By default, this method delegates to using `poll_read` on the first
+ /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which
+ /// support vectored IO should override this method.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_read_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<Result<usize>> {
+ for b in bufs {
+ if !b.is_empty() {
+ return self.poll_read(cx, b);
+ }
+ }
+
+ self.poll_read(cx, &mut [])
+ }
+ }
+
+ /// Write bytes asynchronously.
+ ///
+ /// This trait is analogous to the `std::io::Write` trait, but integrates
+ /// with the asynchronous task system. In particular, the `poll_write`
+ /// method, unlike `Write::write`, will automatically queue the current task
+ /// for wakeup and return if the writer cannot take more data, rather than blocking
+ /// the calling thread.
+ pub trait AsyncWrite {
+ /// Attempt to write bytes from `buf` into the object.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_written))`.
+ ///
+ /// If the object is not ready for writing, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// writable or is closed.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ ///
+ /// `poll_write` must try to make progress by flushing the underlying object if
+ /// that is the only way the underlying object can become writable again.
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>>;
+
+ /// Attempt to write bytes from `bufs` into the object using vectored
+ /// IO operations.
+ ///
+ /// This method is similar to `poll_write`, but allows data from multiple buffers to be written
+ /// using a single operation.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_written))`.
+ ///
+ /// If the object is not ready for writing, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// writable or is closed.
+ ///
+ /// By default, this method delegates to using `poll_write` on the first
+ /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which
+ /// support vectored IO should override this method.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize>> {
+ for b in bufs {
+ if !b.is_empty() {
+ return self.poll_write(cx, b);
+ }
+ }
+
+ self.poll_write(cx, &[])
+ }
+
+ /// Attempt to flush the object, ensuring that any buffered data reach
+ /// their destination.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))`.
+ ///
+ /// If flushing cannot immediately complete, this method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make
+ /// progress towards flushing.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ ///
+ /// It only makes sense to do anything here if you actually buffer data.
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>>;
+
+ /// Attempt to close the object.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))`.
+ ///
+ /// If closing cannot immediately complete, this function returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make
+ /// progress towards closing.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>>;
+ }
+
+ /// Seek bytes asynchronously.
+ ///
+ /// This trait is analogous to the `std::io::Seek` trait, but integrates
+ /// with the asynchronous task system. In particular, the `poll_seek`
+ /// method, unlike `Seek::seek`, will automatically queue the current task
+ /// for wakeup and return if data is not yet available, rather than blocking
+ /// the calling thread.
+ pub trait AsyncSeek {
+ /// Attempt to seek to an offset, in bytes, in a stream.
+ ///
+ /// A seek beyond the end of a stream is allowed, but behavior is defined
+ /// by the implementation.
+ ///
+ /// If the seek operation completed successfully,
+ /// this method returns the new position from the start of the stream.
+ /// That position can be used later with [`SeekFrom::Start`].
+ ///
+ /// # Errors
+ ///
+ /// Seeking to a negative offset is considered an error.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<Result<u64>>;
+ }
+
+ /// Read bytes asynchronously.
+ ///
+ /// This trait is analogous to the `std::io::BufRead` trait, but integrates
+ /// with the asynchronous task system. In particular, the `poll_fill_buf`
+ /// method, unlike `BufRead::fill_buf`, will automatically queue the current task
+ /// for wakeup and return if data is not yet available, rather than blocking
+ /// the calling thread.
+ pub trait AsyncBufRead: AsyncRead {
+ /// Attempt to return the contents of the internal buffer, filling it with more data
+ /// from the inner reader if it is empty.
+ ///
+ /// On success, returns `Poll::Ready(Ok(buf))`.
+ ///
+ /// If no data is available for reading, the method returns
+ /// `Poll::Pending` and arranges for the current task (via
+ /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes
+ /// readable or is closed.
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`consume`] method to function properly. When calling this
+ /// method, none of the contents will be "read" in the sense that later
+ /// calling [`poll_read`] may return the same contents. As such, [`consume`] must
+ /// be called with the number of bytes that are consumed from this buffer to
+ /// ensure that the bytes are never returned twice.
+ ///
+ /// [`poll_read`]: AsyncRead::poll_read
+ /// [`consume`]: AsyncBufRead::consume
+ ///
+ /// An empty buffer returned indicates that the stream has reached EOF.
+ ///
+ /// # Implementation
+ ///
+ /// This function may not return errors of kind `WouldBlock` or
+ /// `Interrupted`. Implementations must convert `WouldBlock` into
+ /// `Poll::Pending` and either internally retry or convert
+ /// `Interrupted` into another error kind.
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>>;
+
+ /// Tells this buffer that `amt` bytes have been consumed from the buffer,
+ /// so they should no longer be returned in calls to [`poll_read`].
+ ///
+ /// This function is a lower-level call. It needs to be paired with the
+ /// [`poll_fill_buf`] method to function properly. This function does
+ /// not perform any I/O, it simply informs this object that some amount of
+ /// its buffer, returned from [`poll_fill_buf`], has been consumed and should
+ /// no longer be returned. As such, this function may do odd things if
+ /// [`poll_fill_buf`] isn't called before calling it.
+ ///
+ /// The `amt` must be `<=` the number of bytes in the buffer returned by
+ /// [`poll_fill_buf`].
+ ///
+ /// [`poll_read`]: AsyncRead::poll_read
+ /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf
+ fn consume(self: Pin<&mut Self>, amt: usize);
+ }
+
+ macro_rules! deref_async_read {
+ () => {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>> {
+ Pin::new(&mut **self).poll_read(cx, buf)
+ }
+
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<Result<usize>> {
+ Pin::new(&mut **self).poll_read_vectored(cx, bufs)
+ }
+ };
+ }
+
+ impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for Box<T> {
+ deref_async_read!();
+ }
+
+ impl<T: ?Sized + AsyncRead + Unpin> AsyncRead for &mut T {
+ deref_async_read!();
+ }
+
+ impl<P> AsyncRead for Pin<P>
+ where
+ P: DerefMut + Unpin,
+ P::Target: AsyncRead,
+ {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>> {
+ self.get_mut().as_mut().poll_read(cx, buf)
+ }
+
+ fn poll_read_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<Result<usize>> {
+ self.get_mut().as_mut().poll_read_vectored(cx, bufs)
+ }
+ }
+
+ macro_rules! delegate_async_read_to_stdio {
+ () => {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>> {
+ Poll::Ready(io::Read::read(&mut *self, buf))
+ }
+
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<Result<usize>> {
+ Poll::Ready(io::Read::read_vectored(&mut *self, bufs))
+ }
+ };
+ }
+
+ impl AsyncRead for &[u8] {
+ delegate_async_read_to_stdio!();
+ }
+
+ macro_rules! deref_async_write {
+ () => {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>> {
+ Pin::new(&mut **self).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize>> {
+ Pin::new(&mut **self).poll_write_vectored(cx, bufs)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ Pin::new(&mut **self).poll_close(cx)
+ }
+ };
+ }
+
+ impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for Box<T> {
+ deref_async_write!();
+ }
+
+ impl<T: ?Sized + AsyncWrite + Unpin> AsyncWrite for &mut T {
+ deref_async_write!();
+ }
+
+ impl<P> AsyncWrite for Pin<P>
+ where
+ P: DerefMut + Unpin,
+ P::Target: AsyncWrite,
+ {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>> {
+ self.get_mut().as_mut().poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize>> {
+ self.get_mut().as_mut().poll_write_vectored(cx, bufs)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ self.get_mut().as_mut().poll_flush(cx)
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ self.get_mut().as_mut().poll_close(cx)
+ }
+ }
+
+ macro_rules! delegate_async_write_to_stdio {
+ () => {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>> {
+ Poll::Ready(io::Write::write(&mut *self, buf))
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize>> {
+ Poll::Ready(io::Write::write_vectored(&mut *self, bufs))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<()>> {
+ Poll::Ready(io::Write::flush(&mut *self))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ self.poll_flush(cx)
+ }
+ };
+ }
+
+ impl AsyncWrite for Vec<u8> {
+ delegate_async_write_to_stdio!();
+ }
+
+ macro_rules! deref_async_seek {
+ () => {
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<Result<u64>> {
+ Pin::new(&mut **self).poll_seek(cx, pos)
+ }
+ };
+ }
+
+ impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for Box<T> {
+ deref_async_seek!();
+ }
+
+ impl<T: ?Sized + AsyncSeek + Unpin> AsyncSeek for &mut T {
+ deref_async_seek!();
+ }
+
+ impl<P> AsyncSeek for Pin<P>
+ where
+ P: DerefMut + Unpin,
+ P::Target: AsyncSeek,
+ {
+ fn poll_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<Result<u64>> {
+ self.get_mut().as_mut().poll_seek(cx, pos)
+ }
+ }
+
+ macro_rules! deref_async_buf_read {
+ () => {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
+ Pin::new(&mut **self.get_mut()).poll_fill_buf(cx)
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ Pin::new(&mut **self).consume(amt)
+ }
+ };
+ }
+
+ impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for Box<T> {
+ deref_async_buf_read!();
+ }
+
+ impl<T: ?Sized + AsyncBufRead + Unpin> AsyncBufRead for &mut T {
+ deref_async_buf_read!();
+ }
+
+ impl<P> AsyncBufRead for Pin<P>
+ where
+ P: DerefMut + Unpin,
+ P::Target: AsyncBufRead,
+ {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
+ self.get_mut().as_mut().poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.get_mut().as_mut().consume(amt)
+ }
+ }
+
+ macro_rules! delegate_async_buf_read_to_stdio {
+ () => {
+ fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<&[u8]>> {
+ Poll::Ready(io::BufRead::fill_buf(self.get_mut()))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ io::BufRead::consume(self.get_mut(), amt)
+ }
+ };
+ }
+
+ impl AsyncBufRead for &[u8] {
+ delegate_async_buf_read_to_stdio!();
+ }
+}
+
+#[cfg(feature = "std")]
+pub use self::if_std::*;
diff --git a/vendor/futures-macro/.cargo-checksum.json b/vendor/futures-macro/.cargo-checksum.json
new file mode 100644
index 000000000..4eda2b921
--- /dev/null
+++ b/vendor/futures-macro/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"46ecfcda3cd6979538f7543858fcfecb52c319cec94d041ab369139723ffa69d","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","src/executor.rs":"2a6c40ebf1fb70ac5bd0dfb991c7b945210c731b558b546f2ecb6d7a8976f3f6","src/join.rs":"e0d286558bd944fd02c1bd2501d13e62de2aa65e6bd3a2e0567488ac1a2374ed","src/lib.rs":"8324c4d5cc4e9e377b2f95afde751168d7e94196c1f2cb35802193c900ca0026","src/select.rs":"a7ed344932225fbe1b070d132a937184250c31385ac6764a8a6e6817413c7538","src/stream_select.rs":"5fb84834a40876ab1fd975c3af67594d0c5a4f8d724cb164db9bee71e70d14b1"},"package":"6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c"} \ No newline at end of file
diff --git a/vendor/futures-macro/Cargo.toml b/vendor/futures-macro/Cargo.toml
new file mode 100644
index 000000000..b192c17ff
--- /dev/null
+++ b/vendor/futures-macro/Cargo.toml
@@ -0,0 +1,34 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.45"
+name = "futures-macro"
+version = "0.3.19"
+description = "The futures-rs procedural macro implementations.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+
+[lib]
+proc-macro = true
+[dependencies.proc-macro2]
+version = "1.0"
+
+[dependencies.quote]
+version = "1.0"
+
+[dependencies.syn]
+version = "1.0.56"
+features = ["full"]
+
+[features]
diff --git a/vendor/futures-macro/LICENSE-APACHE b/vendor/futures-macro/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-macro/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-macro/LICENSE-MIT b/vendor/futures-macro/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-macro/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-macro/src/executor.rs b/vendor/futures-macro/src/executor.rs
new file mode 100644
index 000000000..40a091f94
--- /dev/null
+++ b/vendor/futures-macro/src/executor.rs
@@ -0,0 +1,55 @@
+use proc_macro::TokenStream;
+use proc_macro2::Span;
+use quote::{quote, quote_spanned, ToTokens};
+
+pub(crate) fn test(args: TokenStream, item: TokenStream) -> TokenStream {
+ if !args.is_empty() {
+ return syn::Error::new_spanned(proc_macro2::TokenStream::from(args), "invalid argument")
+ .to_compile_error()
+ .into();
+ }
+
+ let mut input = syn::parse_macro_input!(item as syn::ItemFn);
+
+ if input.sig.asyncness.take().is_none() {
+ return syn::Error::new_spanned(input.sig.fn_token, "Only async functions are supported")
+ .to_compile_error()
+ .into();
+ }
+
+ // If type mismatch occurs, the current rustc points to the last statement.
+ let (last_stmt_start_span, last_stmt_end_span) = {
+ let mut last_stmt = input
+ .block
+ .stmts
+ .last()
+ .map(ToTokens::into_token_stream)
+ .unwrap_or_default()
+ .into_iter();
+ // `Span` on stable Rust has a limitation that only points to the first
+ // token, not the whole tokens. We can work around this limitation by
+ // using the first/last span of the tokens like
+ // `syn::Error::new_spanned` does.
+ let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span());
+ let end = last_stmt.last().map_or(start, |t| t.span());
+ (start, end)
+ };
+
+ let path = quote_spanned! {last_stmt_start_span=>
+ ::futures_test::__private
+ };
+ let body = &input.block;
+ input.block.stmts = vec![syn::Stmt::Expr(
+ syn::parse2(quote_spanned! {last_stmt_end_span=>
+ #path::block_on(async #body)
+ })
+ .unwrap(),
+ )];
+
+ let gen = quote! {
+ #[::core::prelude::v1::test]
+ #input
+ };
+
+ gen.into()
+}
diff --git a/vendor/futures-macro/src/join.rs b/vendor/futures-macro/src/join.rs
new file mode 100644
index 000000000..d427da27a
--- /dev/null
+++ b/vendor/futures-macro/src/join.rs
@@ -0,0 +1,143 @@
+//! The futures-rs `join! macro implementation.
+
+use proc_macro::TokenStream;
+use proc_macro2::{Span, TokenStream as TokenStream2};
+use quote::{format_ident, quote};
+use syn::parse::{Parse, ParseStream};
+use syn::{Expr, Ident, Token};
+
+#[derive(Default)]
+struct Join {
+ fut_exprs: Vec<Expr>,
+}
+
+impl Parse for Join {
+ fn parse(input: ParseStream<'_>) -> syn::Result<Self> {
+ let mut join = Self::default();
+
+ while !input.is_empty() {
+ join.fut_exprs.push(input.parse::<Expr>()?);
+
+ if !input.is_empty() {
+ input.parse::<Token![,]>()?;
+ }
+ }
+
+ Ok(join)
+ }
+}
+
+fn bind_futures(fut_exprs: Vec<Expr>, span: Span) -> (Vec<TokenStream2>, Vec<Ident>) {
+ let mut future_let_bindings = Vec::with_capacity(fut_exprs.len());
+ let future_names: Vec<_> = fut_exprs
+ .into_iter()
+ .enumerate()
+ .map(|(i, expr)| {
+ let name = format_ident!("_fut{}", i, span = span);
+ future_let_bindings.push(quote! {
+ // Move future into a local so that it is pinned in one place and
+ // is no longer accessible by the end user.
+ let mut #name = __futures_crate::future::maybe_done(#expr);
+ });
+ name
+ })
+ .collect();
+
+ (future_let_bindings, future_names)
+}
+
+/// The `join!` macro.
+pub(crate) fn join(input: TokenStream) -> TokenStream {
+ let parsed = syn::parse_macro_input!(input as Join);
+
+ // should be def_site, but that's unstable
+ let span = Span::call_site();
+
+ let (future_let_bindings, future_names) = bind_futures(parsed.fut_exprs, span);
+
+ let poll_futures = future_names.iter().map(|fut| {
+ quote! {
+ __all_done &= __futures_crate::future::Future::poll(
+ unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }, __cx).is_ready();
+ }
+ });
+ let take_outputs = future_names.iter().map(|fut| {
+ quote! {
+ unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }.take_output().unwrap(),
+ }
+ });
+
+ TokenStream::from(quote! { {
+ #( #future_let_bindings )*
+
+ __futures_crate::future::poll_fn(move |__cx: &mut __futures_crate::task::Context<'_>| {
+ let mut __all_done = true;
+ #( #poll_futures )*
+ if __all_done {
+ __futures_crate::task::Poll::Ready((
+ #( #take_outputs )*
+ ))
+ } else {
+ __futures_crate::task::Poll::Pending
+ }
+ }).await
+ } })
+}
+
+/// The `try_join!` macro.
+pub(crate) fn try_join(input: TokenStream) -> TokenStream {
+ let parsed = syn::parse_macro_input!(input as Join);
+
+ // should be def_site, but that's unstable
+ let span = Span::call_site();
+
+ let (future_let_bindings, future_names) = bind_futures(parsed.fut_exprs, span);
+
+ let poll_futures = future_names.iter().map(|fut| {
+ quote! {
+ if __futures_crate::future::Future::poll(
+ unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }, __cx).is_pending()
+ {
+ __all_done = false;
+ } else if unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }.output_mut().unwrap().is_err() {
+ // `.err().unwrap()` rather than `.unwrap_err()` so that we don't introduce
+ // a `T: Debug` bound.
+ // Also, for an error type of ! any code after `err().unwrap()` is unreachable.
+ #[allow(unreachable_code)]
+ return __futures_crate::task::Poll::Ready(
+ __futures_crate::Err(
+ unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }.take_output().unwrap().err().unwrap()
+ )
+ );
+ }
+ }
+ });
+ let take_outputs = future_names.iter().map(|fut| {
+ quote! {
+ // `.ok().unwrap()` rather than `.unwrap()` so that we don't introduce
+ // an `E: Debug` bound.
+ // Also, for an ok type of ! any code after `ok().unwrap()` is unreachable.
+ #[allow(unreachable_code)]
+ unsafe { __futures_crate::Pin::new_unchecked(&mut #fut) }.take_output().unwrap().ok().unwrap(),
+ }
+ });
+
+ TokenStream::from(quote! { {
+ #( #future_let_bindings )*
+
+ #[allow(clippy::diverging_sub_expression)]
+ __futures_crate::future::poll_fn(move |__cx: &mut __futures_crate::task::Context<'_>| {
+ let mut __all_done = true;
+ #( #poll_futures )*
+ if __all_done {
+ __futures_crate::task::Poll::Ready(
+ __futures_crate::Ok((
+ #( #take_outputs )*
+ ))
+ )
+ } else {
+ __futures_crate::task::Poll::Pending
+ }
+ }).await
+ } })
+}
diff --git a/vendor/futures-macro/src/lib.rs b/vendor/futures-macro/src/lib.rs
new file mode 100644
index 000000000..0afe34b83
--- /dev/null
+++ b/vendor/futures-macro/src/lib.rs
@@ -0,0 +1,61 @@
+//! The futures-rs procedural macro implementations.
+
+#![warn(rust_2018_idioms, single_use_lifetimes, unreachable_pub)]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+
+// Since https://github.com/rust-lang/cargo/pull/7700 `proc_macro` is part of the prelude for
+// proc-macro crates, but to support older compilers we still need this explicit `extern crate`.
+#[allow(unused_extern_crates)]
+extern crate proc_macro;
+
+use proc_macro::TokenStream;
+
+mod executor;
+mod join;
+mod select;
+mod stream_select;
+
+/// The `join!` macro.
+#[proc_macro]
+pub fn join_internal(input: TokenStream) -> TokenStream {
+ crate::join::join(input)
+}
+
+/// The `try_join!` macro.
+#[proc_macro]
+pub fn try_join_internal(input: TokenStream) -> TokenStream {
+ crate::join::try_join(input)
+}
+
+/// The `select!` macro.
+#[proc_macro]
+pub fn select_internal(input: TokenStream) -> TokenStream {
+ crate::select::select(input)
+}
+
+/// The `select_biased!` macro.
+#[proc_macro]
+pub fn select_biased_internal(input: TokenStream) -> TokenStream {
+ crate::select::select_biased(input)
+}
+
+// TODO: Change this to doc comment once rustdoc bug fixed: https://github.com/rust-lang/futures-rs/pull/2435
+// The `test` attribute.
+#[proc_macro_attribute]
+pub fn test_internal(input: TokenStream, item: TokenStream) -> TokenStream {
+ crate::executor::test(input, item)
+}
+
+/// The `stream_select!` macro.
+#[proc_macro]
+pub fn stream_select_internal(input: TokenStream) -> TokenStream {
+ crate::stream_select::stream_select(input.into())
+ .unwrap_or_else(syn::Error::into_compile_error)
+ .into()
+}
diff --git a/vendor/futures-macro/src/select.rs b/vendor/futures-macro/src/select.rs
new file mode 100644
index 000000000..0c8e5f1ca
--- /dev/null
+++ b/vendor/futures-macro/src/select.rs
@@ -0,0 +1,330 @@
+//! The futures-rs `select! macro implementation.
+
+use proc_macro::TokenStream;
+use proc_macro2::Span;
+use quote::{format_ident, quote};
+use syn::parse::{Parse, ParseStream};
+use syn::{parse_quote, Expr, Ident, Pat, Token};
+
+mod kw {
+ syn::custom_keyword!(complete);
+}
+
+struct Select {
+ // span of `complete`, then expression after `=> ...`
+ complete: Option<Expr>,
+ default: Option<Expr>,
+ normal_fut_exprs: Vec<Expr>,
+ normal_fut_handlers: Vec<(Pat, Expr)>,
+}
+
+#[allow(clippy::large_enum_variant)]
+enum CaseKind {
+ Complete,
+ Default,
+ Normal(Pat, Expr),
+}
+
+impl Parse for Select {
+ fn parse(input: ParseStream<'_>) -> syn::Result<Self> {
+ let mut select = Self {
+ complete: None,
+ default: None,
+ normal_fut_exprs: vec![],
+ normal_fut_handlers: vec![],
+ };
+
+ while !input.is_empty() {
+ let case_kind = if input.peek(kw::complete) {
+ // `complete`
+ if select.complete.is_some() {
+ return Err(input.error("multiple `complete` cases found, only one allowed"));
+ }
+ input.parse::<kw::complete>()?;
+ CaseKind::Complete
+ } else if input.peek(Token![default]) {
+ // `default`
+ if select.default.is_some() {
+ return Err(input.error("multiple `default` cases found, only one allowed"));
+ }
+ input.parse::<Ident>()?;
+ CaseKind::Default
+ } else {
+ // `<pat> = <expr>`
+ let pat = input.parse()?;
+ input.parse::<Token![=]>()?;
+ let expr = input.parse()?;
+ CaseKind::Normal(pat, expr)
+ };
+
+ // `=> <expr>`
+ input.parse::<Token![=>]>()?;
+ let expr = input.parse::<Expr>()?;
+
+ // Commas after the expression are only optional if it's a `Block`
+ // or it is the last branch in the `match`.
+ let is_block = match expr {
+ Expr::Block(_) => true,
+ _ => false,
+ };
+ if is_block || input.is_empty() {
+ input.parse::<Option<Token![,]>>()?;
+ } else {
+ input.parse::<Token![,]>()?;
+ }
+
+ match case_kind {
+ CaseKind::Complete => select.complete = Some(expr),
+ CaseKind::Default => select.default = Some(expr),
+ CaseKind::Normal(pat, fut_expr) => {
+ select.normal_fut_exprs.push(fut_expr);
+ select.normal_fut_handlers.push((pat, expr));
+ }
+ }
+ }
+
+ Ok(select)
+ }
+}
+
+// Enum over all the cases in which the `select!` waiting has completed and the result
+// can be processed.
+//
+// `enum __PrivResult<_1, _2, ...> { _1(_1), _2(_2), ..., Complete }`
+fn declare_result_enum(
+ result_ident: Ident,
+ variants: usize,
+ complete: bool,
+ span: Span,
+) -> (Vec<Ident>, syn::ItemEnum) {
+ // "_0", "_1", "_2"
+ let variant_names: Vec<Ident> =
+ (0..variants).map(|num| format_ident!("_{}", num, span = span)).collect();
+
+ let type_parameters = &variant_names;
+ let variants = &variant_names;
+
+ let complete_variant = if complete { Some(quote!(Complete)) } else { None };
+
+ let enum_item = parse_quote! {
+ enum #result_ident<#(#type_parameters,)*> {
+ #(
+ #variants(#type_parameters),
+ )*
+ #complete_variant
+ }
+ };
+
+ (variant_names, enum_item)
+}
+
+/// The `select!` macro.
+pub(crate) fn select(input: TokenStream) -> TokenStream {
+ select_inner(input, true)
+}
+
+/// The `select_biased!` macro.
+pub(crate) fn select_biased(input: TokenStream) -> TokenStream {
+ select_inner(input, false)
+}
+
+fn select_inner(input: TokenStream, random: bool) -> TokenStream {
+ let parsed = syn::parse_macro_input!(input as Select);
+
+ // should be def_site, but that's unstable
+ let span = Span::call_site();
+
+ let enum_ident = Ident::new("__PrivResult", span);
+
+ let (variant_names, enum_item) = declare_result_enum(
+ enum_ident.clone(),
+ parsed.normal_fut_exprs.len(),
+ parsed.complete.is_some(),
+ span,
+ );
+
+ // bind non-`Ident` future exprs w/ `let`
+ let mut future_let_bindings = Vec::with_capacity(parsed.normal_fut_exprs.len());
+ let bound_future_names: Vec<_> = parsed
+ .normal_fut_exprs
+ .into_iter()
+ .zip(variant_names.iter())
+ .map(|(expr, variant_name)| {
+ match expr {
+ syn::Expr::Path(path) => {
+ // Don't bind futures that are already a path.
+ // This prevents creating redundant stack space
+ // for them.
+ // Passing Futures by path requires those Futures to implement Unpin.
+ // We check for this condition here in order to be able to
+ // safely use Pin::new_unchecked(&mut #path) later on.
+ future_let_bindings.push(quote! {
+ __futures_crate::async_await::assert_fused_future(&#path);
+ __futures_crate::async_await::assert_unpin(&#path);
+ });
+ path
+ }
+ _ => {
+ // Bind and pin the resulting Future on the stack. This is
+ // necessary to support direct select! calls on !Unpin
+ // Futures. The Future is not explicitly pinned here with
+ // a Pin call, but assumed as pinned. The actual Pin is
+ // created inside the poll() function below to defer the
+ // creation of the temporary pointer, which would otherwise
+ // increase the size of the generated Future.
+ // Safety: This is safe since the lifetime of the Future
+ // is totally constraint to the lifetime of the select!
+ // expression, and the Future can't get moved inside it
+ // (it is shadowed).
+ future_let_bindings.push(quote! {
+ let mut #variant_name = #expr;
+ });
+ parse_quote! { #variant_name }
+ }
+ }
+ })
+ .collect();
+
+ // For each future, make an `&mut dyn FnMut(&mut Context<'_>) -> Option<Poll<__PrivResult<...>>`
+ // to use for polling that individual future. These will then be put in an array.
+ let poll_functions = bound_future_names.iter().zip(variant_names.iter()).map(
+ |(bound_future_name, variant_name)| {
+ // Below we lazily create the Pin on the Future below.
+ // This is done in order to avoid allocating memory in the generator
+ // for the Pin variable.
+ // Safety: This is safe because one of the following condition applies:
+ // 1. The Future is passed by the caller by name, and we assert that
+ // it implements Unpin.
+ // 2. The Future is created in scope of the select! function and will
+ // not be moved for the duration of it. It is thereby stack-pinned
+ quote! {
+ let mut #variant_name = |__cx: &mut __futures_crate::task::Context<'_>| {
+ let mut #bound_future_name = unsafe {
+ __futures_crate::Pin::new_unchecked(&mut #bound_future_name)
+ };
+ if __futures_crate::future::FusedFuture::is_terminated(&#bound_future_name) {
+ __futures_crate::None
+ } else {
+ __futures_crate::Some(__futures_crate::future::FutureExt::poll_unpin(
+ &mut #bound_future_name,
+ __cx,
+ ).map(#enum_ident::#variant_name))
+ }
+ };
+ let #variant_name: &mut dyn FnMut(
+ &mut __futures_crate::task::Context<'_>
+ ) -> __futures_crate::Option<__futures_crate::task::Poll<_>> = &mut #variant_name;
+ }
+ },
+ );
+
+ let none_polled = if parsed.complete.is_some() {
+ quote! {
+ __futures_crate::task::Poll::Ready(#enum_ident::Complete)
+ }
+ } else {
+ quote! {
+ panic!("all futures in select! were completed,\
+ but no `complete =>` handler was provided")
+ }
+ };
+
+ let branches = parsed.normal_fut_handlers.into_iter().zip(variant_names.iter()).map(
+ |((pat, expr), variant_name)| {
+ quote! {
+ #enum_ident::#variant_name(#pat) => { #expr },
+ }
+ },
+ );
+ let branches = quote! { #( #branches )* };
+
+ let complete_branch = parsed.complete.map(|complete_expr| {
+ quote! {
+ #enum_ident::Complete => { #complete_expr },
+ }
+ });
+
+ let branches = quote! {
+ #branches
+ #complete_branch
+ };
+
+ let await_select_fut = if parsed.default.is_some() {
+ // For select! with default this returns the Poll result
+ quote! {
+ __poll_fn(&mut __futures_crate::task::Context::from_waker(
+ __futures_crate::task::noop_waker_ref()
+ ))
+ }
+ } else {
+ quote! {
+ __futures_crate::future::poll_fn(__poll_fn).await
+ }
+ };
+
+ let execute_result_expr = if let Some(default_expr) = &parsed.default {
+ // For select! with default __select_result is a Poll, otherwise not
+ quote! {
+ match __select_result {
+ __futures_crate::task::Poll::Ready(result) => match result {
+ #branches
+ },
+ _ => #default_expr
+ }
+ }
+ } else {
+ quote! {
+ match __select_result {
+ #branches
+ }
+ }
+ };
+
+ let shuffle = if random {
+ quote! {
+ __futures_crate::async_await::shuffle(&mut __select_arr);
+ }
+ } else {
+ quote!()
+ };
+
+ TokenStream::from(quote! { {
+ #enum_item
+
+ let __select_result = {
+ #( #future_let_bindings )*
+
+ let mut __poll_fn = |__cx: &mut __futures_crate::task::Context<'_>| {
+ let mut __any_polled = false;
+
+ #( #poll_functions )*
+
+ let mut __select_arr = [#( #variant_names ),*];
+ #shuffle
+ for poller in &mut __select_arr {
+ let poller: &mut &mut dyn FnMut(
+ &mut __futures_crate::task::Context<'_>
+ ) -> __futures_crate::Option<__futures_crate::task::Poll<_>> = poller;
+ match poller(__cx) {
+ __futures_crate::Some(x @ __futures_crate::task::Poll::Ready(_)) =>
+ return x,
+ __futures_crate::Some(__futures_crate::task::Poll::Pending) => {
+ __any_polled = true;
+ }
+ __futures_crate::None => {}
+ }
+ }
+
+ if !__any_polled {
+ #none_polled
+ } else {
+ __futures_crate::task::Poll::Pending
+ }
+ };
+
+ #await_select_fut
+ };
+
+ #execute_result_expr
+ } })
+}
diff --git a/vendor/futures-macro/src/stream_select.rs b/vendor/futures-macro/src/stream_select.rs
new file mode 100644
index 000000000..9927b5307
--- /dev/null
+++ b/vendor/futures-macro/src/stream_select.rs
@@ -0,0 +1,113 @@
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote, ToTokens};
+use syn::{parse::Parser, punctuated::Punctuated, Expr, Index, Token};
+
+/// The `stream_select!` macro.
+pub(crate) fn stream_select(input: TokenStream) -> Result<TokenStream, syn::Error> {
+ let args = Punctuated::<Expr, Token![,]>::parse_terminated.parse2(input)?;
+ if args.len() < 2 {
+ return Ok(quote! {
+ compile_error!("stream select macro needs at least two arguments.")
+ });
+ }
+ let generic_idents = (0..args.len()).map(|i| format_ident!("_{}", i)).collect::<Vec<_>>();
+ let field_idents = (0..args.len()).map(|i| format_ident!("__{}", i)).collect::<Vec<_>>();
+ let field_idents_2 = (0..args.len()).map(|i| format_ident!("___{}", i)).collect::<Vec<_>>();
+ let field_indices = (0..args.len()).map(Index::from).collect::<Vec<_>>();
+ let args = args.iter().map(|e| e.to_token_stream());
+
+ Ok(quote! {
+ {
+ #[derive(Debug)]
+ struct StreamSelect<#(#generic_idents),*> (#(Option<#generic_idents>),*);
+
+ enum StreamEnum<#(#generic_idents),*> {
+ #(
+ #generic_idents(#generic_idents)
+ ),*,
+ None,
+ }
+
+ impl<ITEM, #(#generic_idents),*> __futures_crate::stream::Stream for StreamEnum<#(#generic_idents),*>
+ where #(#generic_idents: __futures_crate::stream::Stream<Item=ITEM> + ::std::marker::Unpin,)*
+ {
+ type Item = ITEM;
+
+ fn poll_next(mut self: ::std::pin::Pin<&mut Self>, cx: &mut __futures_crate::task::Context<'_>) -> __futures_crate::task::Poll<Option<Self::Item>> {
+ match self.get_mut() {
+ #(
+ Self::#generic_idents(#generic_idents) => ::std::pin::Pin::new(#generic_idents).poll_next(cx)
+ ),*,
+ Self::None => panic!("StreamEnum::None should never be polled!"),
+ }
+ }
+ }
+
+ impl<ITEM, #(#generic_idents),*> __futures_crate::stream::Stream for StreamSelect<#(#generic_idents),*>
+ where #(#generic_idents: __futures_crate::stream::Stream<Item=ITEM> + ::std::marker::Unpin,)*
+ {
+ type Item = ITEM;
+
+ fn poll_next(mut self: ::std::pin::Pin<&mut Self>, cx: &mut __futures_crate::task::Context<'_>) -> __futures_crate::task::Poll<Option<Self::Item>> {
+ let Self(#(ref mut #field_idents),*) = self.get_mut();
+ #(
+ let mut #field_idents_2 = false;
+ )*
+ let mut any_pending = false;
+ {
+ let mut stream_array = [#(#field_idents.as_mut().map(|f| StreamEnum::#generic_idents(f)).unwrap_or(StreamEnum::None)),*];
+ __futures_crate::async_await::shuffle(&mut stream_array);
+
+ for mut s in stream_array {
+ if let StreamEnum::None = s {
+ continue;
+ } else {
+ match __futures_crate::stream::Stream::poll_next(::std::pin::Pin::new(&mut s), cx) {
+ r @ __futures_crate::task::Poll::Ready(Some(_)) => {
+ return r;
+ },
+ __futures_crate::task::Poll::Pending => {
+ any_pending = true;
+ },
+ __futures_crate::task::Poll::Ready(None) => {
+ match s {
+ #(
+ StreamEnum::#generic_idents(_) => { #field_idents_2 = true; }
+ ),*,
+ StreamEnum::None => panic!("StreamEnum::None should never be polled!"),
+ }
+ },
+ }
+ }
+ }
+ }
+ #(
+ if #field_idents_2 {
+ *#field_idents = None;
+ }
+ )*
+ if any_pending {
+ __futures_crate::task::Poll::Pending
+ } else {
+ __futures_crate::task::Poll::Ready(None)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let mut s = (0, Some(0));
+ #(
+ if let Some(new_hint) = self.#field_indices.as_ref().map(|s| s.size_hint()) {
+ s.0 += new_hint.0;
+ // We can change this out for `.zip` when the MSRV is 1.46.0 or higher.
+ s.1 = s.1.and_then(|a| new_hint.1.map(|b| a + b));
+ }
+ )*
+ s
+ }
+ }
+
+ StreamSelect(#(Some(#args)),*)
+
+ }
+ })
+}
diff --git a/vendor/futures-sink/.cargo-checksum.json b/vendor/futures-sink/.cargo-checksum.json
new file mode 100644
index 000000000..e86b5f90f
--- /dev/null
+++ b/vendor/futures-sink/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"0d41bfc59fc07239fd6c7a084dbfe9379398a2e9a081160476229bf30da16ecd","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"a509e1ce84f285190130def6d2b9e3861988f9be725f7697f09fba347601d86f","src/lib.rs":"90c41f91e4b6764a218d4f337a9a46fba1e256f59f67b0afa5352ba92bf641c0"},"package":"e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508"} \ No newline at end of file
diff --git a/vendor/futures-sink/Cargo.toml b/vendor/futures-sink/Cargo.toml
new file mode 100644
index 000000000..e15063632
--- /dev/null
+++ b/vendor/futures-sink/Cargo.toml
@@ -0,0 +1,29 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.36"
+name = "futures-sink"
+version = "0.3.19"
+description = "The asynchronous `Sink` trait for the futures-rs library.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+
+[dependencies]
+
+[features]
+alloc = []
+default = ["std"]
+std = ["alloc"]
diff --git a/vendor/futures-sink/LICENSE-APACHE b/vendor/futures-sink/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-sink/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-sink/LICENSE-MIT b/vendor/futures-sink/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-sink/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-sink/README.md b/vendor/futures-sink/README.md
new file mode 100644
index 000000000..1d683e95b
--- /dev/null
+++ b/vendor/futures-sink/README.md
@@ -0,0 +1,23 @@
+# futures-sink
+
+The asynchronous `Sink` trait for the futures-rs library.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-sink = "0.3"
+```
+
+The current `futures-sink` requires Rust 1.36 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-sink/src/lib.rs b/vendor/futures-sink/src/lib.rs
new file mode 100644
index 000000000..0328740ef
--- /dev/null
+++ b/vendor/futures-sink/src/lib.rs
@@ -0,0 +1,240 @@
+//! Asynchronous sinks
+//!
+//! This crate contains the `Sink` trait which allows values to be sent
+//! asynchronously.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)]
+// It cannot be included in the published code because this lints have false positives in the minimum required version.
+#![cfg_attr(test, warn(single_use_lifetimes))]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+use core::ops::DerefMut;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+/// A `Sink` is a value into which other values can be sent, asynchronously.
+///
+/// Basic examples of sinks include the sending side of:
+///
+/// - Channels
+/// - Sockets
+/// - Pipes
+///
+/// In addition to such "primitive" sinks, it's typical to layer additional
+/// functionality, such as buffering, on top of an existing sink.
+///
+/// Sending to a sink is "asynchronous" in the sense that the value may not be
+/// sent in its entirety immediately. Instead, values are sent in a two-phase
+/// way: first by initiating a send, and then by polling for completion. This
+/// two-phase setup is analogous to buffered writing in synchronous code, where
+/// writes often succeed immediately, but internally are buffered and are
+/// *actually* written only upon flushing.
+///
+/// In addition, the `Sink` may be *full*, in which case it is not even possible
+/// to start the sending process.
+///
+/// As with `Future` and `Stream`, the `Sink` trait is built from a few core
+/// required methods, and a host of default methods for working in a
+/// higher-level way. The `Sink::send_all` combinator is of particular
+/// importance: you can use it to send an entire stream to a sink, which is
+/// the simplest way to ultimately consume a stream.
+#[must_use = "sinks do nothing unless polled"]
+pub trait Sink<Item> {
+ /// The type of value produced by the sink when an error occurs.
+ type Error;
+
+ /// Attempts to prepare the `Sink` to receive a value.
+ ///
+ /// This method must be called and return `Poll::Ready(Ok(()))` prior to
+ /// each call to `start_send`.
+ ///
+ /// This method returns `Poll::Ready` once the underlying sink is ready to
+ /// receive data. If this method returns `Poll::Pending`, the current task
+ /// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready`
+ /// should be called again.
+ ///
+ /// In most cases, if the sink encounters an error, the sink will
+ /// permanently be unable to receive items.
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>;
+
+ /// Begin the process of sending a value to the sink.
+ /// Each call to this function must be preceded by a successful call to
+ /// `poll_ready` which returned `Poll::Ready(Ok(()))`.
+ ///
+ /// As the name suggests, this method only *begins* the process of sending
+ /// the item. If the sink employs buffering, the item isn't fully processed
+ /// until the buffer is fully flushed. Since sinks are designed to work with
+ /// asynchronous I/O, the process of actually writing out the data to an
+ /// underlying object takes place asynchronously. **You *must* use
+ /// `poll_flush` or `poll_close` in order to guarantee completion of a
+ /// send**.
+ ///
+ /// Implementations of `poll_ready` and `start_send` will usually involve
+ /// flushing behind the scenes in order to make room for new messages.
+ /// It is only necessary to call `poll_flush` if you need to guarantee that
+ /// *all* of the items placed into the `Sink` have been sent.
+ ///
+ /// In most cases, if the sink encounters an error, the sink will
+ /// permanently be unable to receive items.
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error>;
+
+ /// Flush any remaining output from this sink.
+ ///
+ /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. If this
+ /// value is returned then it is guaranteed that all previous values sent
+ /// via `start_send` have been flushed.
+ ///
+ /// Returns `Poll::Pending` if there is more work left to do, in which
+ /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when
+ /// `poll_flush` should be called again.
+ ///
+ /// In most cases, if the sink encounters an error, the sink will
+ /// permanently be unable to receive items.
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>;
+
+ /// Flush any remaining output and close this sink, if necessary.
+ ///
+ /// Returns `Poll::Ready(Ok(()))` when no buffered items remain and the sink
+ /// has been successfully closed.
+ ///
+ /// Returns `Poll::Pending` if there is more work left to do, in which
+ /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when
+ /// `poll_close` should be called again.
+ ///
+ /// If this function encounters an error, the sink should be considered to
+ /// have failed permanently, and no more `Sink` methods should be called.
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>;
+}
+
+impl<S: ?Sized + Sink<Item> + Unpin, Item> Sink<Item> for &mut S {
+ type Error = S::Error;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_ready(cx)
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ Pin::new(&mut **self).start_send(item)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_close(cx)
+ }
+}
+
+impl<P, Item> Sink<Item> for Pin<P>
+where
+ P: DerefMut + Unpin,
+ P::Target: Sink<Item>,
+{
+ type Error = <P::Target as Sink<Item>>::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.get_mut().as_mut().poll_ready(cx)
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ self.get_mut().as_mut().start_send(item)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.get_mut().as_mut().poll_flush(cx)
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.get_mut().as_mut().poll_close(cx)
+ }
+}
+
+#[cfg(feature = "alloc")]
+mod if_alloc {
+ use super::*;
+ use core::convert::Infallible as Never;
+
+ impl<T> Sink<T> for alloc::vec::Vec<T> {
+ type Error = Never;
+
+ fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> {
+ // TODO: impl<T> Unpin for Vec<T> {}
+ unsafe { self.get_unchecked_mut() }.push(item);
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ impl<T> Sink<T> for alloc::collections::VecDeque<T> {
+ type Error = Never;
+
+ fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> {
+ // TODO: impl<T> Unpin for Vec<T> {}
+ unsafe { self.get_unchecked_mut() }.push_back(item);
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ impl<S: ?Sized + Sink<Item> + Unpin, Item> Sink<Item> for alloc::boxed::Box<S> {
+ type Error = S::Error;
+
+ fn poll_ready(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_ready(cx)
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ Pin::new(&mut **self).start_send(item)
+ }
+
+ fn poll_flush(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_close(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), Self::Error>> {
+ Pin::new(&mut **self).poll_close(cx)
+ }
+ }
+}
diff --git a/vendor/futures-task/.cargo-checksum.json b/vendor/futures-task/.cargo-checksum.json
new file mode 100644
index 000000000..6a5f8a143
--- /dev/null
+++ b/vendor/futures-task/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"984bf931be396558123b5ebca33f16f0e24468c7b1aea54f7a11271c42f168e0","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"8d029604e66d3fc39468bd937859e642c843ad43f8dddfb4f9cbb467a111f9e6","build.rs":"f6e21c09f18cc405bd7048cb7a2958f92d5414b9ca6b301d137e120a84fa020a","no_atomic_cas.rs":"ff8be002b49a5cd9e4ca0db17b1c9e6b98e55f556319eb6b953dd6ff52c397a6","src/arc_wake.rs":"0e3f7d7883b75337b0b92ff55e477f0bf96f6eb08def7d953676a289fd9696ec","src/future_obj.rs":"10dab39a613d938823f09c3ecdbf7e199ac173a775fd8c5db675c7ecb3b429a2","src/lib.rs":"c55281988768d44d3305b2352c7ebb66e6449797239c07b14257a2d8e612e06b","src/noop_waker.rs":"41246601dab77f69bf09257afc3321031a5a31a7eda51787029870eda9922356","src/spawn.rs":"afcf46b98d62e78d2c974f91df32590bd78fe8c79031e4ae7accf9270e1f6224","src/waker.rs":"748d4a045ea9be605a67f3c20607cc3a5ba20036942c0016cc4299df0446507c","src/waker_ref.rs":"8e3ce1aea4f433ce04c2d15eb065d89582527c1a3a15886c445eb3a78f4fd0d6"},"package":"6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72"} \ No newline at end of file
diff --git a/vendor/futures-task/Cargo.toml b/vendor/futures-task/Cargo.toml
new file mode 100644
index 000000000..0cc029577
--- /dev/null
+++ b/vendor/futures-task/Cargo.toml
@@ -0,0 +1,33 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.36"
+name = "futures-task"
+version = "0.3.19"
+description = "Tools for working with tasks.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+
+[dependencies]
+
+[dev-dependencies]
+
+[features]
+alloc = []
+cfg-target-has-atomic = []
+default = ["std"]
+std = ["alloc"]
+unstable = []
diff --git a/vendor/futures-task/LICENSE-APACHE b/vendor/futures-task/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-task/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-task/LICENSE-MIT b/vendor/futures-task/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-task/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-task/README.md b/vendor/futures-task/README.md
new file mode 100644
index 000000000..79f12da88
--- /dev/null
+++ b/vendor/futures-task/README.md
@@ -0,0 +1,23 @@
+# futures-task
+
+Tools for working with tasks.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-task = "0.3"
+```
+
+The current `futures-task` requires Rust 1.36 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-task/build.rs b/vendor/futures-task/build.rs
new file mode 100644
index 000000000..07b50bd55
--- /dev/null
+++ b/vendor/futures-task/build.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms, single_use_lifetimes)]
+
+use std::env;
+
+include!("no_atomic_cas.rs");
+
+// The rustc-cfg listed below are considered public API, but it is *unstable*
+// and outside of the normal semver guarantees:
+//
+// - `futures_no_atomic_cas`
+// Assume the target does *not* support atomic CAS operations.
+// This is usually detected automatically by the build script, but you may
+// need to enable it manually when building for custom targets or using
+// non-cargo build systems that don't run the build script.
+//
+// With the exceptions mentioned above, the rustc-cfg strings below are
+// *not* public API. Please let us know by opening a GitHub issue if your build
+// environment requires some way to enable these cfgs other than by executing
+// our build script.
+fn main() {
+ let target = match env::var("TARGET") {
+ Ok(target) => target,
+ Err(e) => {
+ println!(
+ "cargo:warning={}: unable to get TARGET environment variable: {}",
+ env!("CARGO_PKG_NAME"),
+ e
+ );
+ return;
+ }
+ };
+
+ // Note that this is `no_*`, not `has_*`. This allows treating
+ // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't
+ // run. This is needed for compatibility with non-cargo build systems that
+ // don't run the build script.
+ if NO_ATOMIC_CAS_TARGETS.contains(&&*target) {
+ println!("cargo:rustc-cfg=futures_no_atomic_cas");
+ }
+
+ println!("cargo:rerun-if-changed=no_atomic_cas.rs");
+}
diff --git a/vendor/futures-task/no_atomic_cas.rs b/vendor/futures-task/no_atomic_cas.rs
new file mode 100644
index 000000000..4708bf853
--- /dev/null
+++ b/vendor/futures-task/no_atomic_cas.rs
@@ -0,0 +1,13 @@
+// This file is @generated by no_atomic_cas.sh.
+// It is not intended for manual editing.
+
+const NO_ATOMIC_CAS_TARGETS: &[&str] = &[
+ "avr-unknown-gnu-atmega328",
+ "bpfeb-unknown-none",
+ "bpfel-unknown-none",
+ "msp430-none-elf",
+ "riscv32i-unknown-none-elf",
+ "riscv32imc-unknown-none-elf",
+ "thumbv4t-none-eabi",
+ "thumbv6m-none-eabi",
+];
diff --git a/vendor/futures-task/src/arc_wake.rs b/vendor/futures-task/src/arc_wake.rs
new file mode 100644
index 000000000..aa6de0fc4
--- /dev/null
+++ b/vendor/futures-task/src/arc_wake.rs
@@ -0,0 +1,49 @@
+use alloc::sync::Arc;
+
+/// A way of waking up a specific task.
+///
+/// By implementing this trait, types that are expected to be wrapped in an `Arc`
+/// can be converted into [`Waker`] objects.
+/// Those Wakers can be used to signal executors that a task it owns
+/// is ready to be `poll`ed again.
+///
+/// Currently, there are two ways to convert `ArcWake` into [`Waker`]:
+///
+/// * [`waker`](super::waker()) converts `Arc<impl ArcWake>` into [`Waker`].
+/// * [`waker_ref`](super::waker_ref()) converts `&Arc<impl ArcWake>` into [`WakerRef`] that
+/// provides access to a [`&Waker`][`Waker`].
+///
+/// [`Waker`]: std::task::Waker
+/// [`WakerRef`]: super::WakerRef
+// Note: Send + Sync required because `Arc<T>` doesn't automatically imply
+// those bounds, but `Waker` implements them.
+pub trait ArcWake: Send + Sync {
+ /// Indicates that the associated task is ready to make progress and should
+ /// be `poll`ed.
+ ///
+ /// This function can be called from an arbitrary thread, including threads which
+ /// did not create the `ArcWake` based [`Waker`].
+ ///
+ /// Executors generally maintain a queue of "ready" tasks; `wake` should place
+ /// the associated task onto this queue.
+ ///
+ /// [`Waker`]: std::task::Waker
+ fn wake(self: Arc<Self>) {
+ Self::wake_by_ref(&self)
+ }
+
+ /// Indicates that the associated task is ready to make progress and should
+ /// be `poll`ed.
+ ///
+ /// This function can be called from an arbitrary thread, including threads which
+ /// did not create the `ArcWake` based [`Waker`].
+ ///
+ /// Executors generally maintain a queue of "ready" tasks; `wake_by_ref` should place
+ /// the associated task onto this queue.
+ ///
+ /// This function is similar to [`wake`](ArcWake::wake), but must not consume the provided data
+ /// pointer.
+ ///
+ /// [`Waker`]: std::task::Waker
+ fn wake_by_ref(arc_self: &Arc<Self>);
+}
diff --git a/vendor/futures-task/src/future_obj.rs b/vendor/futures-task/src/future_obj.rs
new file mode 100644
index 000000000..48ec12beb
--- /dev/null
+++ b/vendor/futures-task/src/future_obj.rs
@@ -0,0 +1,337 @@
+use core::{
+ fmt,
+ future::Future,
+ marker::PhantomData,
+ mem,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
+/// A custom trait object for polling futures, roughly akin to
+/// `Box<dyn Future<Output = T> + 'a>`.
+///
+/// This custom trait object was introduced as currently it is not possible to
+/// take `dyn Trait` by value and `Box<dyn Trait>` is not available in no_std
+/// contexts.
+pub struct LocalFutureObj<'a, T> {
+ future: *mut (dyn Future<Output = T> + 'static),
+ drop_fn: unsafe fn(*mut (dyn Future<Output = T> + 'static)),
+ _marker: PhantomData<&'a ()>,
+}
+
+// As LocalFutureObj only holds pointers, even if we move it, the pointed to values won't move,
+// so this is safe as long as we don't provide any way for a user to directly access the pointers
+// and move their values.
+impl<T> Unpin for LocalFutureObj<'_, T> {}
+
+#[allow(single_use_lifetimes)]
+#[allow(clippy::transmute_ptr_to_ptr)]
+unsafe fn remove_future_lifetime<'a, T>(
+ ptr: *mut (dyn Future<Output = T> + 'a),
+) -> *mut (dyn Future<Output = T> + 'static) {
+ mem::transmute(ptr)
+}
+
+#[allow(single_use_lifetimes)]
+unsafe fn remove_drop_lifetime<'a, T>(
+ ptr: unsafe fn(*mut (dyn Future<Output = T> + 'a)),
+) -> unsafe fn(*mut (dyn Future<Output = T> + 'static)) {
+ mem::transmute(ptr)
+}
+
+impl<'a, T> LocalFutureObj<'a, T> {
+ /// Create a `LocalFutureObj` from a custom trait object representation.
+ #[inline]
+ pub fn new<F: UnsafeFutureObj<'a, T> + 'a>(f: F) -> Self {
+ Self {
+ future: unsafe { remove_future_lifetime(f.into_raw()) },
+ drop_fn: unsafe { remove_drop_lifetime(F::drop) },
+ _marker: PhantomData,
+ }
+ }
+
+ /// Converts the `LocalFutureObj` into a `FutureObj`.
+ ///
+ /// # Safety
+ ///
+ /// To make this operation safe one has to ensure that the `UnsafeFutureObj`
+ /// instance from which this `LocalFutureObj` was created actually
+ /// implements `Send`.
+ #[inline]
+ pub unsafe fn into_future_obj(self) -> FutureObj<'a, T> {
+ FutureObj(self)
+ }
+}
+
+impl<T> fmt::Debug for LocalFutureObj<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("LocalFutureObj").finish()
+ }
+}
+
+impl<'a, T> From<FutureObj<'a, T>> for LocalFutureObj<'a, T> {
+ #[inline]
+ fn from(f: FutureObj<'a, T>) -> Self {
+ f.0
+ }
+}
+
+impl<T> Future for LocalFutureObj<'_, T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ unsafe { Pin::new_unchecked(&mut *self.future).poll(cx) }
+ }
+}
+
+impl<T> Drop for LocalFutureObj<'_, T> {
+ fn drop(&mut self) {
+ unsafe { (self.drop_fn)(self.future) }
+ }
+}
+
+/// A custom trait object for polling futures, roughly akin to
+/// `Box<dyn Future<Output = T> + Send + 'a>`.
+///
+/// This custom trait object was introduced as currently it is not possible to
+/// take `dyn Trait` by value and `Box<dyn Trait>` is not available in no_std
+/// contexts.
+///
+/// You should generally not need to use this type outside of `no_std` or when
+/// implementing `Spawn`, consider using `BoxFuture` instead.
+pub struct FutureObj<'a, T>(LocalFutureObj<'a, T>);
+
+impl<T> Unpin for FutureObj<'_, T> {}
+unsafe impl<T> Send for FutureObj<'_, T> {}
+
+impl<'a, T> FutureObj<'a, T> {
+ /// Create a `FutureObj` from a custom trait object representation.
+ #[inline]
+ pub fn new<F: UnsafeFutureObj<'a, T> + Send>(f: F) -> Self {
+ Self(LocalFutureObj::new(f))
+ }
+}
+
+impl<T> fmt::Debug for FutureObj<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FutureObj").finish()
+ }
+}
+
+impl<T> Future for FutureObj<'_, T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ Pin::new(&mut self.0).poll(cx)
+ }
+}
+
+/// A custom implementation of a future trait object for `FutureObj`, providing
+/// a vtable with drop support.
+///
+/// This custom representation is typically used only in `no_std` contexts,
+/// where the default `Box`-based implementation is not available.
+///
+/// # Safety
+///
+/// See the safety notes on individual methods for what guarantees an
+/// implementor must provide.
+pub unsafe trait UnsafeFutureObj<'a, T>: 'a {
+ /// Convert an owned instance into a (conceptually owned) fat pointer.
+ ///
+ /// # Safety
+ ///
+ /// ## Implementor
+ ///
+ /// The trait implementor must guarantee that it is safe to convert the
+ /// provided `*mut (dyn Future<Output = T> + 'a)` into a `Pin<&mut (dyn
+ /// Future<Output = T> + 'a)>` and call methods on it, non-reentrantly,
+ /// until `UnsafeFutureObj::drop` is called with it.
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a);
+
+ /// Drops the future represented by the given fat pointer.
+ ///
+ /// # Safety
+ ///
+ /// ## Implementor
+ ///
+ /// The trait implementor must guarantee that it is safe to call this
+ /// function once per `into_raw` invocation.
+ ///
+ /// ## Caller
+ ///
+ /// The caller must ensure:
+ ///
+ /// * the pointer passed was obtained from an `into_raw` invocation from
+ /// this same trait object
+ /// * the pointer is not currently in use as a `Pin<&mut (dyn Future<Output
+ /// = T> + 'a)>`
+ /// * the pointer must not be used again after this function is called
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a));
+}
+
+unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for &'a mut F
+where
+ F: Future<Output = T> + Unpin + 'a,
+{
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ self as *mut dyn Future<Output = T>
+ }
+
+ unsafe fn drop(_ptr: *mut (dyn Future<Output = T> + 'a)) {}
+}
+
+unsafe impl<'a, T> UnsafeFutureObj<'a, T> for &'a mut (dyn Future<Output = T> + Unpin + 'a) {
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ self as *mut dyn Future<Output = T>
+ }
+
+ unsafe fn drop(_ptr: *mut (dyn Future<Output = T> + 'a)) {}
+}
+
+unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Pin<&'a mut F>
+where
+ F: Future<Output = T> + 'a,
+{
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ unsafe { self.get_unchecked_mut() as *mut dyn Future<Output = T> }
+ }
+
+ unsafe fn drop(_ptr: *mut (dyn Future<Output = T> + 'a)) {}
+}
+
+unsafe impl<'a, T> UnsafeFutureObj<'a, T> for Pin<&'a mut (dyn Future<Output = T> + 'a)> {
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ unsafe { self.get_unchecked_mut() as *mut dyn Future<Output = T> }
+ }
+
+ unsafe fn drop(_ptr: *mut (dyn Future<Output = T> + 'a)) {}
+}
+
+#[cfg(feature = "alloc")]
+mod if_alloc {
+ use super::*;
+ use alloc::boxed::Box;
+
+ unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Box<F>
+ where
+ F: Future<Output = T> + 'a,
+ {
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ Box::into_raw(self)
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Box::from_raw(ptr as *mut F))
+ }
+ }
+
+ unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Box<dyn Future<Output = T> + 'a> {
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ Box::into_raw(self)
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Box::from_raw(ptr))
+ }
+ }
+
+ unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Box<dyn Future<Output = T> + Send + 'a> {
+ fn into_raw(self) -> *mut (dyn Future<Output = T> + 'a) {
+ Box::into_raw(self)
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Box::from_raw(ptr))
+ }
+ }
+
+ unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Pin<Box<F>>
+ where
+ F: Future<Output = T> + 'a,
+ {
+ fn into_raw(mut self) -> *mut (dyn Future<Output = T> + 'a) {
+ let ptr = unsafe { self.as_mut().get_unchecked_mut() as *mut _ };
+ mem::forget(self);
+ ptr
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Pin::from(Box::from_raw(ptr)))
+ }
+ }
+
+ unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Pin<Box<dyn Future<Output = T> + 'a>> {
+ fn into_raw(mut self) -> *mut (dyn Future<Output = T> + 'a) {
+ let ptr = unsafe { self.as_mut().get_unchecked_mut() as *mut _ };
+ mem::forget(self);
+ ptr
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Pin::from(Box::from_raw(ptr)))
+ }
+ }
+
+ unsafe impl<'a, T: 'a> UnsafeFutureObj<'a, T> for Pin<Box<dyn Future<Output = T> + Send + 'a>> {
+ fn into_raw(mut self) -> *mut (dyn Future<Output = T> + 'a) {
+ let ptr = unsafe { self.as_mut().get_unchecked_mut() as *mut _ };
+ mem::forget(self);
+ ptr
+ }
+
+ unsafe fn drop(ptr: *mut (dyn Future<Output = T> + 'a)) {
+ drop(Pin::from(Box::from_raw(ptr)))
+ }
+ }
+
+ impl<'a, F: Future<Output = ()> + Send + 'a> From<Box<F>> for FutureObj<'a, ()> {
+ fn from(boxed: Box<F>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a> From<Box<dyn Future<Output = ()> + Send + 'a>> for FutureObj<'a, ()> {
+ fn from(boxed: Box<dyn Future<Output = ()> + Send + 'a>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a, F: Future<Output = ()> + Send + 'a> From<Pin<Box<F>>> for FutureObj<'a, ()> {
+ fn from(boxed: Pin<Box<F>>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a> From<Pin<Box<dyn Future<Output = ()> + Send + 'a>>> for FutureObj<'a, ()> {
+ fn from(boxed: Pin<Box<dyn Future<Output = ()> + Send + 'a>>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a, F: Future<Output = ()> + 'a> From<Box<F>> for LocalFutureObj<'a, ()> {
+ fn from(boxed: Box<F>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a> From<Box<dyn Future<Output = ()> + 'a>> for LocalFutureObj<'a, ()> {
+ fn from(boxed: Box<dyn Future<Output = ()> + 'a>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a, F: Future<Output = ()> + 'a> From<Pin<Box<F>>> for LocalFutureObj<'a, ()> {
+ fn from(boxed: Pin<Box<F>>) -> Self {
+ Self::new(boxed)
+ }
+ }
+
+ impl<'a> From<Pin<Box<dyn Future<Output = ()> + 'a>>> for LocalFutureObj<'a, ()> {
+ fn from(boxed: Pin<Box<dyn Future<Output = ()> + 'a>>) -> Self {
+ Self::new(boxed)
+ }
+ }
+}
diff --git a/vendor/futures-task/src/lib.rs b/vendor/futures-task/src/lib.rs
new file mode 100644
index 000000000..c72460744
--- /dev/null
+++ b/vendor/futures-task/src/lib.rs
@@ -0,0 +1,50 @@
+//! Tools for working with tasks.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)]
+// It cannot be included in the published code because this lints have false positives in the minimum required version.
+#![cfg_attr(test, warn(single_use_lifetimes))]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+mod spawn;
+pub use crate::spawn::{LocalSpawn, Spawn, SpawnError};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod arc_wake;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use crate::arc_wake::ArcWake;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod waker;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use crate::waker::waker;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod waker_ref;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use crate::waker_ref::{waker_ref, WakerRef};
+
+mod future_obj;
+pub use crate::future_obj::{FutureObj, LocalFutureObj, UnsafeFutureObj};
+
+mod noop_waker;
+pub use crate::noop_waker::noop_waker;
+pub use crate::noop_waker::noop_waker_ref;
+
+#[doc(no_inline)]
+pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
diff --git a/vendor/futures-task/src/noop_waker.rs b/vendor/futures-task/src/noop_waker.rs
new file mode 100644
index 000000000..f76a8a2e9
--- /dev/null
+++ b/vendor/futures-task/src/noop_waker.rs
@@ -0,0 +1,63 @@
+//! Utilities for creating zero-cost wakers that don't do anything.
+
+use core::ptr::null;
+use core::task::{RawWaker, RawWakerVTable, Waker};
+
+unsafe fn noop_clone(_data: *const ()) -> RawWaker {
+ noop_raw_waker()
+}
+
+unsafe fn noop(_data: *const ()) {}
+
+const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop);
+
+const fn noop_raw_waker() -> RawWaker {
+ RawWaker::new(null(), &NOOP_WAKER_VTABLE)
+}
+
+/// Create a new [`Waker`] which does
+/// nothing when `wake()` is called on it.
+///
+/// # Examples
+///
+/// ```
+/// use futures::task::noop_waker;
+/// let waker = noop_waker();
+/// waker.wake();
+/// ```
+#[inline]
+pub fn noop_waker() -> Waker {
+ // FIXME: Since 1.46.0 we can use transmute in consts, allowing this function to be const.
+ unsafe { Waker::from_raw(noop_raw_waker()) }
+}
+
+/// Get a static reference to a [`Waker`] which
+/// does nothing when `wake()` is called on it.
+///
+/// # Examples
+///
+/// ```
+/// use futures::task::noop_waker_ref;
+/// let waker = noop_waker_ref();
+/// waker.wake_by_ref();
+/// ```
+#[inline]
+pub fn noop_waker_ref() -> &'static Waker {
+ struct SyncRawWaker(RawWaker);
+ unsafe impl Sync for SyncRawWaker {}
+
+ static NOOP_WAKER_INSTANCE: SyncRawWaker = SyncRawWaker(noop_raw_waker());
+
+ // SAFETY: `Waker` is #[repr(transparent)] over its `RawWaker`.
+ unsafe { &*(&NOOP_WAKER_INSTANCE.0 as *const RawWaker as *const Waker) }
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ #[cfg(feature = "std")]
+ fn issue_2091_cross_thread_segfault() {
+ let waker = std::thread::spawn(super::noop_waker_ref).join().unwrap();
+ waker.wake_by_ref();
+ }
+}
diff --git a/vendor/futures-task/src/spawn.rs b/vendor/futures-task/src/spawn.rs
new file mode 100644
index 000000000..f4e63397b
--- /dev/null
+++ b/vendor/futures-task/src/spawn.rs
@@ -0,0 +1,192 @@
+use crate::{FutureObj, LocalFutureObj};
+use core::fmt;
+
+/// The `Spawn` trait allows for pushing futures onto an executor that will
+/// run them to completion.
+pub trait Spawn {
+ /// Spawns a future that will be run to completion.
+ ///
+ /// # Errors
+ ///
+ /// The executor may be unable to spawn tasks. Spawn errors should
+ /// represent relatively rare scenarios, such as the executor
+ /// having been shut down so that it is no longer able to accept
+ /// tasks.
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError>;
+
+ /// Determines whether the executor is able to spawn new tasks.
+ ///
+ /// This method will return `Ok` when the executor is *likely*
+ /// (but not guaranteed) to accept a subsequent spawn attempt.
+ /// Likewise, an `Err` return means that `spawn` is likely, but
+ /// not guaranteed, to yield an error.
+ #[inline]
+ fn status(&self) -> Result<(), SpawnError> {
+ Ok(())
+ }
+}
+
+/// The `LocalSpawn` is similar to [`Spawn`], but allows spawning futures
+/// that don't implement `Send`.
+pub trait LocalSpawn {
+ /// Spawns a future that will be run to completion.
+ ///
+ /// # Errors
+ ///
+ /// The executor may be unable to spawn tasks. Spawn errors should
+ /// represent relatively rare scenarios, such as the executor
+ /// having been shut down so that it is no longer able to accept
+ /// tasks.
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError>;
+
+ /// Determines whether the executor is able to spawn new tasks.
+ ///
+ /// This method will return `Ok` when the executor is *likely*
+ /// (but not guaranteed) to accept a subsequent spawn attempt.
+ /// Likewise, an `Err` return means that `spawn` is likely, but
+ /// not guaranteed, to yield an error.
+ #[inline]
+ fn status_local(&self) -> Result<(), SpawnError> {
+ Ok(())
+ }
+}
+
+/// An error that occurred during spawning.
+pub struct SpawnError {
+ _priv: (),
+}
+
+impl fmt::Debug for SpawnError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("SpawnError").field(&"shutdown").finish()
+ }
+}
+
+impl fmt::Display for SpawnError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Executor is shutdown")
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for SpawnError {}
+
+impl SpawnError {
+ /// Spawning failed because the executor has been shut down.
+ pub fn shutdown() -> Self {
+ Self { _priv: () }
+ }
+
+ /// Check whether spawning failed to the executor being shut down.
+ pub fn is_shutdown(&self) -> bool {
+ true
+ }
+}
+
+impl<Sp: ?Sized + Spawn> Spawn for &Sp {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ Sp::spawn_obj(self, future)
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ Sp::status(self)
+ }
+}
+
+impl<Sp: ?Sized + Spawn> Spawn for &mut Sp {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ Sp::spawn_obj(self, future)
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ Sp::status(self)
+ }
+}
+
+impl<Sp: ?Sized + LocalSpawn> LocalSpawn for &Sp {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ Sp::spawn_local_obj(self, future)
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ Sp::status_local(self)
+ }
+}
+
+impl<Sp: ?Sized + LocalSpawn> LocalSpawn for &mut Sp {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ Sp::spawn_local_obj(self, future)
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ Sp::status_local(self)
+ }
+}
+
+#[cfg(feature = "alloc")]
+mod if_alloc {
+ use super::*;
+ use alloc::{boxed::Box, rc::Rc};
+
+ impl<Sp: ?Sized + Spawn> Spawn for Box<Sp> {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_obj(future)
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ (**self).status()
+ }
+ }
+
+ impl<Sp: ?Sized + LocalSpawn> LocalSpawn for Box<Sp> {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_local_obj(future)
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ (**self).status_local()
+ }
+ }
+
+ impl<Sp: ?Sized + Spawn> Spawn for Rc<Sp> {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_obj(future)
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ (**self).status()
+ }
+ }
+
+ impl<Sp: ?Sized + LocalSpawn> LocalSpawn for Rc<Sp> {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_local_obj(future)
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ (**self).status_local()
+ }
+ }
+
+ #[cfg(not(futures_no_atomic_cas))]
+ impl<Sp: ?Sized + Spawn> Spawn for alloc::sync::Arc<Sp> {
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_obj(future)
+ }
+
+ fn status(&self) -> Result<(), SpawnError> {
+ (**self).status()
+ }
+ }
+
+ #[cfg(not(futures_no_atomic_cas))]
+ impl<Sp: ?Sized + LocalSpawn> LocalSpawn for alloc::sync::Arc<Sp> {
+ fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ (**self).spawn_local_obj(future)
+ }
+
+ fn status_local(&self) -> Result<(), SpawnError> {
+ (**self).status_local()
+ }
+ }
+}
diff --git a/vendor/futures-task/src/waker.rs b/vendor/futures-task/src/waker.rs
new file mode 100644
index 000000000..a7310a07a
--- /dev/null
+++ b/vendor/futures-task/src/waker.rs
@@ -0,0 +1,59 @@
+use super::arc_wake::ArcWake;
+use alloc::sync::Arc;
+use core::mem;
+use core::task::{RawWaker, RawWakerVTable, Waker};
+
+pub(super) fn waker_vtable<W: ArcWake>() -> &'static RawWakerVTable {
+ &RawWakerVTable::new(
+ clone_arc_raw::<W>,
+ wake_arc_raw::<W>,
+ wake_by_ref_arc_raw::<W>,
+ drop_arc_raw::<W>,
+ )
+}
+
+/// Creates a [`Waker`] from an `Arc<impl ArcWake>`.
+///
+/// The returned [`Waker`] will call
+/// [`ArcWake.wake()`](ArcWake::wake) if awoken.
+pub fn waker<W>(wake: Arc<W>) -> Waker
+where
+ W: ArcWake + 'static,
+{
+ let ptr = Arc::into_raw(wake) as *const ();
+
+ unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) }
+}
+
+// FIXME: panics on Arc::clone / refcount changes could wreak havoc on the
+// code here. We should guard against this by aborting.
+
+#[allow(clippy::redundant_clone)] // The clone here isn't actually redundant.
+unsafe fn increase_refcount<T: ArcWake>(data: *const ()) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = mem::ManuallyDrop::new(Arc::<T>::from_raw(data as *const T));
+ // Now increase refcount, but don't drop new refcount either
+ let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
+}
+
+// used by `waker_ref`
+unsafe fn clone_arc_raw<T: ArcWake>(data: *const ()) -> RawWaker {
+ increase_refcount::<T>(data);
+ RawWaker::new(data, waker_vtable::<T>())
+}
+
+unsafe fn wake_arc_raw<T: ArcWake>(data: *const ()) {
+ let arc: Arc<T> = Arc::from_raw(data as *const T);
+ ArcWake::wake(arc);
+}
+
+// used by `waker_ref`
+unsafe fn wake_by_ref_arc_raw<T: ArcWake>(data: *const ()) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = mem::ManuallyDrop::new(Arc::<T>::from_raw(data as *const T));
+ ArcWake::wake_by_ref(&arc);
+}
+
+unsafe fn drop_arc_raw<T: ArcWake>(data: *const ()) {
+ drop(Arc::<T>::from_raw(data as *const T))
+}
diff --git a/vendor/futures-task/src/waker_ref.rs b/vendor/futures-task/src/waker_ref.rs
new file mode 100644
index 000000000..791c69012
--- /dev/null
+++ b/vendor/futures-task/src/waker_ref.rs
@@ -0,0 +1,63 @@
+use super::arc_wake::ArcWake;
+use super::waker::waker_vtable;
+use alloc::sync::Arc;
+use core::marker::PhantomData;
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+use core::task::{RawWaker, Waker};
+
+/// A [`Waker`] that is only valid for a given lifetime.
+///
+/// Note: this type implements [`Deref<Target = Waker>`](std::ops::Deref),
+/// so it can be used to get a `&Waker`.
+#[derive(Debug)]
+pub struct WakerRef<'a> {
+ waker: ManuallyDrop<Waker>,
+ _marker: PhantomData<&'a ()>,
+}
+
+impl<'a> WakerRef<'a> {
+ /// Create a new [`WakerRef`] from a [`Waker`] reference.
+ pub fn new(waker: &'a Waker) -> Self {
+ // copy the underlying (raw) waker without calling a clone,
+ // as we won't call Waker::drop either.
+ let waker = ManuallyDrop::new(unsafe { core::ptr::read(waker) });
+ Self { waker, _marker: PhantomData }
+ }
+
+ /// Create a new [`WakerRef`] from a [`Waker`] that must not be dropped.
+ ///
+ /// Note: this if for rare cases where the caller created a [`Waker`] in
+ /// an unsafe way (that will be valid only for a lifetime to be determined
+ /// by the caller), and the [`Waker`] doesn't need to or must not be
+ /// destroyed.
+ pub fn new_unowned(waker: ManuallyDrop<Waker>) -> Self {
+ Self { waker, _marker: PhantomData }
+ }
+}
+
+impl Deref for WakerRef<'_> {
+ type Target = Waker;
+
+ fn deref(&self) -> &Waker {
+ &self.waker
+ }
+}
+
+/// Creates a reference to a [`Waker`] from a reference to `Arc<impl ArcWake>`.
+///
+/// The resulting [`Waker`] will call
+/// [`ArcWake.wake()`](ArcWake::wake) if awoken.
+#[inline]
+pub fn waker_ref<W>(wake: &Arc<W>) -> WakerRef<'_>
+where
+ W: ArcWake,
+{
+ // simply copy the pointer instead of using Arc::into_raw,
+ // as we don't actually keep a refcount by using ManuallyDrop.<
+ let ptr = (&**wake as *const W) as *const ();
+
+ let waker =
+ ManuallyDrop::new(unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) });
+ WakerRef::new_unowned(waker)
+}
diff --git a/vendor/futures-util/.cargo-checksum.json b/vendor/futures-util/.cargo-checksum.json
new file mode 100644
index 000000000..2db230686
--- /dev/null
+++ b/vendor/futures-util/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"fb0e0a9cd20fff0e75bddf0f79a2c06200f5177b338a3bb2b3cd27f3b08eaff7","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"727f58ddc0ad88244d784b56a090410b39805e256e3415d4ae20cf2ba471d260","benches/futures_unordered.rs":"5eb8280be8d8fb7bd5fb103ce20db10f618f47e180a402105e0d5e9f8c9fe35a","benches_disabled/bilock.rs":"ab8b47fba5cfa5366477ef1034a042efde9b0aff6b77110d1f3a2715ab7508e8","build.rs":"f6e21c09f18cc405bd7048cb7a2958f92d5414b9ca6b301d137e120a84fa020a","no_atomic_cas.rs":"ff8be002b49a5cd9e4ca0db17b1c9e6b98e55f556319eb6b953dd6ff52c397a6","src/abortable.rs":"d88dd2501ed379b3540bd971d367b4629755d6d9f264e7b54ae59eea0ff83623","src/async_await/join_mod.rs":"8f83c0001df867f5eb47a4174bf4a0c0b548f8ff3be3b532e0c759ad981b87da","src/async_await/mod.rs":"3d25c343cc3e789d3f982cdacd6f8ed91511ba656c3923da310700f318f423a4","src/async_await/pending.rs":"7971ec1d5d89ad80390e2a0c51e396257b2e78f1436cce79ea2b55ac2f13b328","src/async_await/poll.rs":"440c19a89fd42b12da09ff48a69523b5a8a5baea0bcd2f860589a0ab996ed781","src/async_await/random.rs":"daf229cd01595d38ef0f6284865fe2f60ed3b8134f7a15c82564b97ff3a5be98","src/async_await/select_mod.rs":"414c7fb7923cfe21116d558bf3cd1a6ae5bef4ed01f9877f0e7cb3e42ee6c79d","src/async_await/stream_select_mod.rs":"9a51338914cbb1502619fed591dfe4fc676919499b9d041898e59f630fe5e7f0","src/compat/compat01as03.rs":"07c83b0820e1b6a5a792043e0b0eb5be70f365e5462c8a1fa5fa6b0f62f9e57d","src/compat/compat03as01.rs":"7cf29e57f8ee14b64123b3d2c16dceced25af5491a5ef81b655b2de2e9587fbe","src/compat/executor.rs":"a0edd7baa2192daa14a5a26bf53bd229defaeac538d2ec771c60992c6dfeb393","src/compat/mod.rs":"6cf3412f6a3f9ee8406118ea75de65468a83febc6ba61bdbad69261f0cfea02e","src/fns.rs":"f8e396128791169098a38a82c3c28aaa6dd5d40718635f7cc30b59b32f7110b8","src/future/abortable.rs":"373ce61c0c7c31718ff572113503bb88f55e3b49ed5d028a3dfafd69070f44c1","src/future/either.rs":"d7fb8728fac6fccbbc14afaa16ebd51c2633dd653277289c089b5c478742b37f","src/future/future/catch_unwind.rs":"08b0ac049cdee28325d378209aa5bb4d91b14a29ddd9c2b0e5c661b61f9cfcfe","src/future/future/flatten.rs":"5bf9846cef8dec5dcc38b992653e11146bc149a0d3efc09b1f8268bd29de0b2b","src/future/future/fuse.rs":"6531a95dc1917b2a5724b35e364faa741143811afc654a45c360111e9807864c","src/future/future/map.rs":"de607c2a4d80d2bddb590781c37328ddd294bb9d5064a9ecb99455244239b597","src/future/future/mod.rs":"ecfac09dcba801cede7c58acfaa76a9ab76d26a3f4c968d66c2a49caa57faefe","src/future/future/remote_handle.rs":"2ae17a409569b32c78e20026a8ecdf667352c2597a4a0a8deefa4761fafcb223","src/future/future/shared.rs":"ebf46b4bf428676bf553015e384f7f41da03558481aaa38deb1e8a896d212dae","src/future/join.rs":"38b55fc7cdbbdaaa525e51f8ce09783dbbcb65eabfd7de9f46610593e0bbef17","src/future/join_all.rs":"294a8e76862f447dea8759f78f7224d885a826c80193ceef2389345221e6e3c0","src/future/lazy.rs":"d161fc4108a97348c1becbbd5ba8fccb7225dcf1d81c097666f5c8b40718251d","src/future/maybe_done.rs":"559e41cb170f9fe7246d2a5b112527a9f9cbca63b8a5a872b3aa9c861f70f307","src/future/mod.rs":"51e018100362f20b071225268f1d81f25c8e9664e94730af199069c2692bf26a","src/future/option.rs":"73daca814800b91b707753dcfe074265372b0077fae2504ea6efddc713453579","src/future/pending.rs":"86598a5d5ade7c0416566deb280150bac34bd4703c997d2c7af572342d8d6d02","src/future/poll_fn.rs":"8e54bf57d60e01d496ae31df35e0b96868f4bda504c024a14f51ab723d67885f","src/future/poll_immediate.rs":"7e199fc102894c9095de17af602a7c8f05d427269aefce5d71cd5136d54659c0","src/future/ready.rs":"c9860ccd8ac529f44f66dee73ca9b9d7f1b1b3e5e9e4dc70c59640c752553d58","src/future/select.rs":"a582b1ed9c1e6cd8dcaa80b5f45e2176ed4a1740fe303f7143e29cab8e0dbc22","src/future/select_all.rs":"179b8168370e2c7105e348fdfbeb965eb746336d9660aa7fbc9185d681ae8c2d","src/future/select_ok.rs":"fed28e1fd368cdd465d297a84ea9436a00757eff4b34e592d94d7747b3bf4996","src/future/try_future/into_future.rs":"d966bde7b06a88443f0efd877e95f91541778c4e713f3f4b66e00ca5d3f352b6","src/future/try_future/mod.rs":"4733167d93e8f728e87f79b7d4dfe66de6afb306735ca76f9f843270286b3f6b","src/future/try_future/try_flatten.rs":"16c02e1780bd312b8b386e41c1d9dd4bcc4e8ef10f26007364f857b3adcc6e99","src/future/try_future/try_flatten_err.rs":"130f3fc3fd95a19f4e4a50e69301106fab02f77d0faf3aac9c473a92b826c2ca","src/future/try_join.rs":"1836931f8ba32da41c6810e6acc0ea2fee75b74b3153e760c4542cb12b220540","src/future/try_join_all.rs":"d4f262e80bb5347597c75b25de3c7784ffb4bd766227d6dc70cdeb77a38f4a5d","src/future/try_maybe_done.rs":"1cce46b2ee43ad51b7c5f9c02bc90a890af32bc549ce99098a2c8813508051e1","src/future/try_select.rs":"2e1b7e0b0cb7343f766fade87269093558db206f7fbe7dddfa3143885e17bac4","src/io/allow_std.rs":"a125959c255fd344399fb0be19218a8ee7d613ce2485d6df9cdbc2ed5d3987df","src/io/buf_reader.rs":"46a1e24046c5bc2ab8f266e3d904281bec3ab4ba6c13d4213a52599b57b8de66","src/io/buf_writer.rs":"d6666b8dde60eefbb7fa69da4a2eea2b34ea0e4a85e21e5ac6e83cc680ea9140","src/io/chain.rs":"12f508fc39c3234a71a0f886505245c5d659aed09c7d874b1bd8ca0a0d456cf3","src/io/close.rs":"9832210a870637198fa58642cdf2779afab71f2e31a9953e663fa6854bd73ac7","src/io/copy.rs":"cb2466dcd7ea8bb1f07d00c03e66ed55abf71fe4be6937adc9f533ef9d99fb2d","src/io/copy_buf.rs":"e9a5f6aac8375e298bddb332f23d8b626d056ce452b58f772a05df7e2cd326cf","src/io/cursor.rs":"612bdb8b4055d26816fb0e4c3e9859b06b3d08c99e4a27ed4946c95e219a29ab","src/io/empty.rs":"6ae40b4bc8fc41572abad2d013285d78d8df445868d41fac77bde508ec9bc1a5","src/io/fill_buf.rs":"4f217fed8eb3f66dbde2371c3fbcfa9420d38ba20da544a0658584e5778aa47d","src/io/flush.rs":"0c9b588dfd9da039dc123ba9448ac31ca21ee3da0a164a21f6c2c182183d43e2","src/io/into_sink.rs":"ab5bdb12bff62672175b69b8c9f5a4bbbea716b9cf89169ed6a723ab43da9df8","src/io/line_writer.rs":"16c151c68d89b7c2ab929c4a782539b1ad512b723eed9b544f50f1ff06f0b661","src/io/lines.rs":"ccfa24e212a610aad0c81042cfa64ada820c4305ba0e911a2c16221d7867468e","src/io/mod.rs":"599416e4d7dd5c6523a87bf778001ce0c3848ee760827af0b69c7b7aafd8a8a0","src/io/read.rs":"4ea675a83cec98a22c9c4731ff980209f0cf67f63c71871cd1deed53c1266345","src/io/read_exact.rs":"d27d5ec082ccb1b051d1292e029e398926a164c82e1f0c983ca9928410aa2abe","src/io/read_line.rs":"a3c62ca2034089a22ea9567e0b3cab0dfe09309782fcf151d92311a77223e37c","src/io/read_to_end.rs":"5e9e38dc087623dac5a3ae3ad329ed44ffe4f6205a78e546adadc3ffb76703fc","src/io/read_to_string.rs":"2c073d05f0361acda1f0172b24fd4c5da61840ac925a5bdfae9111c697759d1b","src/io/read_until.rs":"354507ce95242a735940f0aaa6ef11cc7d6d0505ae148f05277ce6e7537f168a","src/io/read_vectored.rs":"bd7f442c92f2cb320075d0983b0d08d51c23078898d72e6c2857cf6c7ad4cec7","src/io/repeat.rs":"53bc472e4bd7d286bf90765ce574f13b7aabc871c4f04f712da7cea160491390","src/io/seek.rs":"9863e9fb6495eb6e1f8c45c283c8a6993b9bdb1462f75a3e525e135c6840dec7","src/io/sink.rs":"30a503631d196e5da92c386d0afc1af9656a5f7682456cfa2489a2c30a05cac5","src/io/split.rs":"2aa567452b713497d5b85813980b69e888aee32be14492c92404d261fd50eb09","src/io/take.rs":"c53fec5b5e8c3742b7e60e6ebfa625cf2e566fbea193fb1eee2f0a8e561d63d5","src/io/window.rs":"295d7dc18ad101642003cd67687242e4bdba11552cfb7f18c521cbff369e6f71","src/io/write.rs":"60670eb00f999f2e2c43b099759a7fb030325b323744d88c9d20f75926ec30df","src/io/write_all.rs":"c88930fd23c88cc01fef2c6118d53d33996c011c4abf28778a27646fe1f7896a","src/io/write_all_vectored.rs":"53becf89c031bf4c3073f0903ce809eee7606b1b4fbeb518605875badba216d3","src/io/write_vectored.rs":"bc98ff4a709cb75cd9ffedefa8ef251089a49906b98e142d76447ddf4ac098bb","src/lib.rs":"384447fb9bfcd3b110656979cca71b53c3abe72690e970c30563c1baba27fd74","src/lock/bilock.rs":"f1b955cb2e10c906933e63bbfb8e953af634428ce15faf3696b07d11da0cc279","src/lock/mod.rs":"e964dd0d999ccf9d9d167d7ecbfeb7a66d180a80eeb6fd41ec3fa698c1067674","src/lock/mutex.rs":"782375724e4abbdaf3221eb422911c37fe13e794e6f30ea819acece7303c3368","src/never.rs":"2066481ab04921269cfa768cb8b778a035ab6aa49ec404d9ac0aeb07a4bf6094","src/sink/buffer.rs":"33a7380f8232225a8e9ac5ee138fd095979efa3a64f9fecf5fcaf2e78fcbc355","src/sink/close.rs":"f2f31c884f048163abebd4f5a877b7b4306f7d02beae428325636fd00ed42ca9","src/sink/drain.rs":"392d9487003fcd55a3373c3e2558f6549b9633b82fc08b5a665a573b137ae9f7","src/sink/err_into.rs":"ced2998b2b0b792d80f7543523c9e07e8f5d20a4336cae93084b995e46671b15","src/sink/fanout.rs":"66dcde056e0bbee4e0074d331838ed2743dc872ea1597f05d61970523dc34926","src/sink/feed.rs":"64b9d296d37aedde37e1421c459ebcd9a7e8814db905996996167850124f3b3f","src/sink/flush.rs":"fbba344f428ca7636541ba013f7db2ece480b404a9e0b421c5537552d61e2492","src/sink/map_err.rs":"0f68f444ef13fe7115164be855c3b7b1d269e1119e69fcdad1706988255641f1","src/sink/mod.rs":"37cf379170f3099992eb59f3181be4c4e4a5c2d3581dbe424d22ab360840d321","src/sink/send.rs":"56aaba9aa4a562e0af39473a5779206d91b0acb1fced4fc06cd8b959d1897524","src/sink/send_all.rs":"a8e4956604fe73e321b0a3896c2018bc5c27149f2862f8406112db140b3aa2dd","src/sink/unfold.rs":"428080b76213b504fcc981d2f05840f1a93c8db305301af1cf5852b6c47c4be5","src/sink/with.rs":"850cd3b96304df1f38360a0bc60b02d485535e399ef7642acdd9add7876867d8","src/sink/with_flat_map.rs":"5e0f527b33ee8f1cc6a6a46d45b6d74dad5c735d88b2cb24e1cb34fdc6ef501b","src/stream/abortable.rs":"935d79aa44d793f4abe87ca27a9e4a20891500488cf942693cd2756d65b3aab2","src/stream/empty.rs":"5000c856186408a17f68bbef432d4a1a3edb7fb5a07ed8699342fef04b10a181","src/stream/futures_ordered.rs":"46217ed3802d052724a4a3166370f74e6d5fcd248d6f983caea10bc3335a1f0e","src/stream/futures_unordered/abort.rs":"bdfece9f91accafd5122be36d628c37c5b219ac0eecec181267840fbb1e95a45","src/stream/futures_unordered/iter.rs":"e8862300ddb0504090c059b3dba2425af6335874cb6ef393fef26e87788b6d3e","src/stream/futures_unordered/mod.rs":"da8b7adb93b50c1c6507c1af4b3c1ec0de4ce33f68f64dc876749c07e231d642","src/stream/futures_unordered/ready_to_run_queue.rs":"6223c67519c1ae35cbc449dd5654fda422aaba61a2344cc0b190c73b4b1e9f80","src/stream/futures_unordered/task.rs":"ab2de99b2a42c1da70d56e4be43c0ef72e8d5a4504adc0f870f8d28afd332a37","src/stream/iter.rs":"609fa821a460e901a54ae51f8da58220881157cef02b8b7b8c9e4321c2d05a23","src/stream/mod.rs":"33873b13535443cce2d49fdb3f0b359286bfc74f3553419fe7174cf7c1840da0","src/stream/once.rs":"d7b70adabad1f10af711ac3dcef33fd4c287e9852fdb678406e7ff350ba8fd47","src/stream/pending.rs":"84aaa15c8bbb17a250da5b1b5f0c7f6717410915d63340a3fcbf098bebe19d6f","src/stream/poll_fn.rs":"35952ea514b8aade14a3934d7777006475f50bbf0c5b50141710e31637f980be","src/stream/poll_immediate.rs":"e7a53ff8275ebe89dab8f9b984cce2ee0fde0a828e540b77c5500ca017d5bb98","src/stream/repeat.rs":"e4e4a9b6f2fca72bcbf098c3ac0c4a41323a840741d4dce9d9416464b7e8bd0d","src/stream/repeat_with.rs":"525780d24f3f99152b879765ca6eab99bcc0c757dc6654b6635c099b93ea654d","src/stream/select.rs":"28eb422c0eca9fd02778a6003004471b3489db09746a70e617a506303ea8b81d","src/stream/select_all.rs":"4358fa26cfe8c1b56f19d077b841bbdfe22f7adc043034fd6313b004e94e310d","src/stream/select_with_strategy.rs":"7fe249fd92fc66ad2bfa5a2dec7148b2a0102b3a8d915b2103bfbcd1b8870447","src/stream/stream/all.rs":"43cfb69de0ea991497d26d0aeb02091f10eb241ef93758b54c5e7aced5b63b63","src/stream/stream/any.rs":"2582da02f9a1ce2bd0af87a64b65188fc93686c5e3dd9128e89e5f57c1d70e43","src/stream/stream/buffer_unordered.rs":"66c3f4bd2fabfbdf6a4033dfaed44dd0262b68e6533509029c984ae037e35392","src/stream/stream/buffered.rs":"eabd0c0e50eaaaf0a92a7b39fdb5b77e068bbfbbfd5e216a09c3a6e0c1fc102d","src/stream/stream/catch_unwind.rs":"b2e801ff744d5d9e17177ec1156b0ab67bdd56b94c618ed8590344ec8a0f35e7","src/stream/stream/chain.rs":"ba1a206b3ce0160186021f5c1e4c95a770d26b843e3640e52609a2facaf756ac","src/stream/stream/chunks.rs":"d3aaddc05779ef70e2f0e59570cf6d5a1d231ae4885c8b8b2e4813fc02832562","src/stream/stream/collect.rs":"977ed1970b46029517ecc45f4af924b8e585d3770f01b2a0d2df0e01519ca50f","src/stream/stream/concat.rs":"171ea941b45c0295ed978c3f318a449ea295e33cb4ea82c764f4e9e7c48ad5de","src/stream/stream/count.rs":"ff218aea3d2d2456c8163926ea0c357b2752e92578e5fd4bec6b789fe1246556","src/stream/stream/cycle.rs":"ed7e3d15e7b1adec5ad5789b0d3186b5995a3353cc974fb7f41a72f6d8ad4cbb","src/stream/stream/enumerate.rs":"fc7565d21d39565790859eeac9ae8dd74123a9d15b88258d3abe894f1876cc39","src/stream/stream/filter.rs":"3dd080914e6770f8790455fc9cbedf30c5f44a589ef99b19112344c75f9e9042","src/stream/stream/filter_map.rs":"3a8a3e06dfac48dd3f7b6b1a552a51a3e31ea943e012dd35729d461a1fcfad80","src/stream/stream/flatten.rs":"69493fc106a1447abe109fd54375bb30363f7bc419463a8f835e4c80d97f2186","src/stream/stream/fold.rs":"75d61d4321db1bcbbdd1a0102d9ad60206275777167c008fc8953e50cd978a09","src/stream/stream/for_each.rs":"07bca889821bad18ff083e54abe679fbeb8cd19c086581c2f2722cba6b42263f","src/stream/stream/for_each_concurrent.rs":"4e1e7eb3d4ccfae0e8000651b75834e2960a7f9c62ab92dba35a0bdbbf5bbb21","src/stream/stream/forward.rs":"cd024ba1a3d5098d3ff2d5178a12e068916cc4307284b00c18dbc54b554a5560","src/stream/stream/fuse.rs":"061c5385f12f80c7906cb15ddb8f455ced6ce21d1de9a97de9db2616407c0cac","src/stream/stream/into_future.rs":"b46ad45cc03ddd778a9ffaa0d603c8ee0b411f49333100160959942cde9588bd","src/stream/stream/map.rs":"b91bdd5b33821a50c9b5034261a14f89ff1a9d541ab99b9d9a6921b12a5d434e","src/stream/stream/mod.rs":"106daa368424cca9e35aab6ec1bc177570aeca1dfaec57b188682ae3aaee11b7","src/stream/stream/next.rs":"7b4d5a22b5e00aa191ea82346bb1f392121cc68692864a8230e462d59e622928","src/stream/stream/peek.rs":"1ef5f11b1f0cc11d01690bebe282d8953ff8e860597f4ce21208fc274be5e98e","src/stream/stream/ready_chunks.rs":"4e6deb3a6d453fd4e982bdba416188311a72daca1b218d4e9ef20819fc09b5b2","src/stream/stream/scan.rs":"54489c8efef60dbf3c35ee803afee5c5ea7c364fb9b68939a04956e46febb856","src/stream/stream/select_next_some.rs":"0094eccc96cfe78d9b6d0a9bdb82cada8fb7929770a3ac00ffcb5441d7dc4f51","src/stream/stream/skip.rs":"61f7ec7fe25663d2c87cffaad19ed27eda032842edb8af731b521025b244f120","src/stream/stream/skip_while.rs":"75ee580e0111200758d0c0fe154276007ff233db6b63a8223f0baeac1db18874","src/stream/stream/split.rs":"fa4adea18708dad384eb347260cfb965d30c40a677e15f9267f97aa382c6306c","src/stream/stream/take.rs":"505f83d341dc84eeab46f5e66adfa21a36207cb66f2394dd6a256576db665827","src/stream/stream/take_until.rs":"0f1fa7d158192a5dee32392dfdd062c15dab6d246b0ca267e91aae490d7d7fdb","src/stream/stream/take_while.rs":"51007dbde8434fd22c5ef2481a99463f11b3785e4bdeb73fa583a17f29f5c228","src/stream/stream/then.rs":"9dcfdc741d1d7dea0100aa9f1feadb932f8530f7a7d3071befc1e490a6cb50ed","src/stream/stream/unzip.rs":"9ad4db7522f66a9133e464c13d6c95682c797ae5a986e60d3aba358c65031fe8","src/stream/stream/zip.rs":"56f30f513e11754f59ead5ca4112014cba9278d02796eb8fe0937ae8bb4d44cd","src/stream/try_stream/and_then.rs":"22ca6e547d0db2e07b0a928c48118a532adaf28d85c60ab84b8366dbfeab9161","src/stream/try_stream/into_async_read.rs":"f584fd8dfdab90328fc89eac78306caa308d43f3035c1c5489e55384007e77ed","src/stream/try_stream/into_stream.rs":"4fee94e89956a42871fc4a0cdba7ae1b7d4265e884528799cd227c9dd851acce","src/stream/try_stream/mod.rs":"7a83406bfbefe4fe651b9535035fef80d52e995a44dcd0e16105bf274e0fef06","src/stream/try_stream/or_else.rs":"8cc7f602da1ffee21bf06c5203aa0427514a83b67941ae264459f1eff8dc8aec","src/stream/try_stream/try_buffer_unordered.rs":"64e698ea6aefbe7e32d48e737553b20b9cde5c258963bb20486b48b7d6899660","src/stream/try_stream/try_buffered.rs":"7546d396026bf700d3f37f55d5a4e39abe5fb05919e6a269feeb8be7af19256c","src/stream/try_stream/try_chunks.rs":"58b8c5af4914eae3698e528b0361532b391bf4b3463f4c790e43c8069cfe1bd7","src/stream/try_stream/try_collect.rs":"1132751055a51b936ed28b83c4eed7dee3f40a4be13ea374b30086e864e1ee09","src/stream/try_stream/try_concat.rs":"f2330ebeeab30273e9ac0e8600bfe2f405ce671f6386e688b3afb1d2fdd7c2c6","src/stream/try_stream/try_filter.rs":"7c2a09cdb1753ecb49a44d1d84742cb2050a999a8148448b31bb404eb2d17154","src/stream/try_stream/try_filter_map.rs":"5afc6ab35e2b425e37ed217a6bb038459c8828d6bcd6a3699883d6df071dc7e7","src/stream/try_stream/try_flatten.rs":"e05614d86a27ab8386476eea35fd424c07e5f7f99cf0401d63a6655eb7ca1247","src/stream/try_stream/try_fold.rs":"b96aa2fe1a16f625d5045028a86ff8684dcf5198ef8c7c072f52f39aeaa8b619","src/stream/try_stream/try_for_each.rs":"3f3901d618333b740d470eb02fcbb645df92483493872298bb7bd0382646028a","src/stream/try_stream/try_for_each_concurrent.rs":"78a94a77f329862c2a245ec3add97e49c534985f0d9da98f205b7fa3c7c08df3","src/stream/try_stream/try_next.rs":"6e29473153db1435906e79f7eaa13ce9da842d4528ba9eb1c0034665feacc565","src/stream/try_stream/try_skip_while.rs":"c0259ec70bdf4a81c1fa569275766e3e65db9d5715c81e93ada04817c1835add","src/stream/try_stream/try_take_while.rs":"54927dfa95ff58b542a1d7382f564eeae5e02e633c948b1a39ac09bc7e92f5f5","src/stream/try_stream/try_unfold.rs":"aaf0f4857a4ec8233ac842ae509f29e5a210827a0bb40cfc0dc3e858f153d2b4","src/stream/unfold.rs":"8b2feb00f979562b43064eb078d53a160cdb3c65deed17ec25a05938df2d370f","src/task/mod.rs":"074ce7f3869663d2e768bb08ea201ed1be176e13edd4150f201bc1ea362170d3","src/task/spawn.rs":"26bbcf1d65e1467de0ecdad2b56f464a510cda7c1933427d69a1b50459836489","src/unfold_state.rs":"ffe848071a99d6afcdbe8281a8a77a559a7dde434fc41f734c90e6b9b5d8a5af"},"package":"d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164"} \ No newline at end of file
diff --git a/vendor/futures-util/Cargo.toml b/vendor/futures-util/Cargo.toml
new file mode 100644
index 000000000..f18cfbcf9
--- /dev/null
+++ b/vendor/futures-util/Cargo.toml
@@ -0,0 +1,93 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.45"
+name = "futures-util"
+version = "0.3.19"
+description = "Common utilities and extension traits for the futures-rs library.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+[dependencies.futures-channel]
+version = "0.3.19"
+features = ["std"]
+optional = true
+default-features = false
+
+[dependencies.futures-core]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-io]
+version = "0.3.19"
+features = ["std"]
+optional = true
+default-features = false
+
+[dependencies.futures-macro]
+version = "=0.3.19"
+optional = true
+default-features = false
+
+[dependencies.futures-sink]
+version = "0.3.19"
+optional = true
+default-features = false
+
+[dependencies.futures-task]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures_01]
+version = "0.1.25"
+optional = true
+package = "futures"
+
+[dependencies.memchr]
+version = "2.2"
+optional = true
+
+[dependencies.pin-project-lite]
+version = "0.2.4"
+
+[dependencies.pin-utils]
+version = "0.1.0"
+
+[dependencies.slab]
+version = "0.4.2"
+optional = true
+
+[dependencies.tokio-io]
+version = "0.1.9"
+optional = true
+[dev-dependencies.tokio]
+version = "0.1.11"
+
+[features]
+alloc = ["futures-core/alloc", "futures-task/alloc"]
+async-await = []
+async-await-macro = ["async-await", "futures-macro"]
+bilock = []
+cfg-target-has-atomic = []
+channel = ["std", "futures-channel"]
+compat = ["std", "futures_01"]
+default = ["std", "async-await", "async-await-macro"]
+io = ["std", "futures-io", "memchr"]
+io-compat = ["io", "compat", "tokio-io"]
+sink = ["futures-sink"]
+std = ["alloc", "futures-core/std", "futures-task/std", "slab"]
+unstable = ["futures-core/unstable", "futures-task/unstable"]
+write-all-vectored = ["io"]
diff --git a/vendor/futures-util/LICENSE-APACHE b/vendor/futures-util/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures-util/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures-util/LICENSE-MIT b/vendor/futures-util/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures-util/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures-util/README.md b/vendor/futures-util/README.md
new file mode 100644
index 000000000..6e0aaed84
--- /dev/null
+++ b/vendor/futures-util/README.md
@@ -0,0 +1,23 @@
+# futures-util
+
+Common utilities and extension traits for the futures-rs library.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures-util = "0.3"
+```
+
+The current `futures-util` requires Rust 1.45 or later.
+
+## License
+
+Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or
+[MIT license](LICENSE-MIT) at your option.
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/vendor/futures-util/benches/futures_unordered.rs b/vendor/futures-util/benches/futures_unordered.rs
new file mode 100644
index 000000000..d5fe7a59d
--- /dev/null
+++ b/vendor/futures-util/benches/futures_unordered.rs
@@ -0,0 +1,43 @@
+#![feature(test)]
+
+extern crate test;
+use crate::test::Bencher;
+
+use futures::channel::oneshot;
+use futures::executor::block_on;
+use futures::future;
+use futures::stream::{FuturesUnordered, StreamExt};
+use futures::task::Poll;
+use std::collections::VecDeque;
+use std::thread;
+
+#[bench]
+fn oneshots(b: &mut Bencher) {
+ const NUM: usize = 10_000;
+
+ b.iter(|| {
+ let mut txs = VecDeque::with_capacity(NUM);
+ let mut rxs = FuturesUnordered::new();
+
+ for _ in 0..NUM {
+ let (tx, rx) = oneshot::channel();
+ txs.push_back(tx);
+ rxs.push(rx);
+ }
+
+ thread::spawn(move || {
+ while let Some(tx) = txs.pop_front() {
+ let _ = tx.send("hello");
+ }
+ });
+
+ block_on(future::poll_fn(move |cx| {
+ loop {
+ if let Poll::Ready(None) = rxs.poll_next_unpin(cx) {
+ break;
+ }
+ }
+ Poll::Ready(())
+ }))
+ });
+}
diff --git a/vendor/futures-util/benches_disabled/bilock.rs b/vendor/futures-util/benches_disabled/bilock.rs
new file mode 100644
index 000000000..417f75d31
--- /dev/null
+++ b/vendor/futures-util/benches_disabled/bilock.rs
@@ -0,0 +1,122 @@
+#![feature(test)]
+
+#[cfg(feature = "bilock")]
+mod bench {
+ use futures::executor::LocalPool;
+ use futures::task::{Context, Waker};
+ use futures_util::lock::BiLock;
+ use futures_util::lock::BiLockAcquire;
+ use futures_util::lock::BiLockAcquired;
+ use futures_util::task::ArcWake;
+
+ use std::sync::Arc;
+ use test::Bencher;
+
+ fn notify_noop() -> Waker {
+ struct Noop;
+
+ impl ArcWake for Noop {
+ fn wake(_: &Arc<Self>) {}
+ }
+
+ ArcWake::into_waker(Arc::new(Noop))
+ }
+
+ /// Pseudo-stream which simply calls `lock.poll()` on `poll`
+ struct LockStream {
+ lock: BiLockAcquire<u32>,
+ }
+
+ impl LockStream {
+ fn new(lock: BiLock<u32>) -> Self {
+ Self { lock: lock.lock() }
+ }
+
+ /// Release a lock after it was acquired in `poll`,
+ /// so `poll` could be called again.
+ fn release_lock(&mut self, guard: BiLockAcquired<u32>) {
+ self.lock = guard.unlock().lock()
+ }
+ }
+
+ impl Stream for LockStream {
+ type Item = BiLockAcquired<u32>;
+ type Error = ();
+
+ fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Self::Item>, Self::Error> {
+ self.lock.poll(cx).map(|a| a.map(Some))
+ }
+ }
+
+ #[bench]
+ fn contended(b: &mut Bencher) {
+ let pool = LocalPool::new();
+ let mut exec = pool.executor();
+ let waker = notify_noop();
+ let mut map = task::LocalMap::new();
+ let mut waker = task::Context::new(&mut map, &waker, &mut exec);
+
+ b.iter(|| {
+ let (x, y) = BiLock::new(1);
+
+ let mut x = LockStream::new(x);
+ let mut y = LockStream::new(y);
+
+ for _ in 0..1000 {
+ let x_guard = match x.poll_next(&mut waker) {
+ Ok(Poll::Ready(Some(guard))) => guard,
+ _ => panic!(),
+ };
+
+ // Try poll second lock while first lock still holds the lock
+ match y.poll_next(&mut waker) {
+ Ok(Poll::Pending) => (),
+ _ => panic!(),
+ };
+
+ x.release_lock(x_guard);
+
+ let y_guard = match y.poll_next(&mut waker) {
+ Ok(Poll::Ready(Some(guard))) => guard,
+ _ => panic!(),
+ };
+
+ y.release_lock(y_guard);
+ }
+ (x, y)
+ });
+ }
+
+ #[bench]
+ fn lock_unlock(b: &mut Bencher) {
+ let pool = LocalPool::new();
+ let mut exec = pool.executor();
+ let waker = notify_noop();
+ let mut map = task::LocalMap::new();
+ let mut waker = task::Context::new(&mut map, &waker, &mut exec);
+
+ b.iter(|| {
+ let (x, y) = BiLock::new(1);
+
+ let mut x = LockStream::new(x);
+ let mut y = LockStream::new(y);
+
+ for _ in 0..1000 {
+ let x_guard = match x.poll_next(&mut waker) {
+ Ok(Poll::Ready(Some(guard))) => guard,
+ _ => panic!(),
+ };
+
+ x.release_lock(x_guard);
+
+ let y_guard = match y.poll_next(&mut waker) {
+ Ok(Poll::Ready(Some(guard))) => guard,
+ _ => panic!(),
+ };
+
+ y.release_lock(y_guard);
+ }
+ (x, y)
+ })
+ }
+}
diff --git a/vendor/futures-util/build.rs b/vendor/futures-util/build.rs
new file mode 100644
index 000000000..07b50bd55
--- /dev/null
+++ b/vendor/futures-util/build.rs
@@ -0,0 +1,42 @@
+#![warn(rust_2018_idioms, single_use_lifetimes)]
+
+use std::env;
+
+include!("no_atomic_cas.rs");
+
+// The rustc-cfg listed below are considered public API, but it is *unstable*
+// and outside of the normal semver guarantees:
+//
+// - `futures_no_atomic_cas`
+// Assume the target does *not* support atomic CAS operations.
+// This is usually detected automatically by the build script, but you may
+// need to enable it manually when building for custom targets or using
+// non-cargo build systems that don't run the build script.
+//
+// With the exceptions mentioned above, the rustc-cfg strings below are
+// *not* public API. Please let us know by opening a GitHub issue if your build
+// environment requires some way to enable these cfgs other than by executing
+// our build script.
+fn main() {
+ let target = match env::var("TARGET") {
+ Ok(target) => target,
+ Err(e) => {
+ println!(
+ "cargo:warning={}: unable to get TARGET environment variable: {}",
+ env!("CARGO_PKG_NAME"),
+ e
+ );
+ return;
+ }
+ };
+
+ // Note that this is `no_*`, not `has_*`. This allows treating
+ // `cfg(target_has_atomic = "ptr")` as true when the build script doesn't
+ // run. This is needed for compatibility with non-cargo build systems that
+ // don't run the build script.
+ if NO_ATOMIC_CAS_TARGETS.contains(&&*target) {
+ println!("cargo:rustc-cfg=futures_no_atomic_cas");
+ }
+
+ println!("cargo:rerun-if-changed=no_atomic_cas.rs");
+}
diff --git a/vendor/futures-util/no_atomic_cas.rs b/vendor/futures-util/no_atomic_cas.rs
new file mode 100644
index 000000000..4708bf853
--- /dev/null
+++ b/vendor/futures-util/no_atomic_cas.rs
@@ -0,0 +1,13 @@
+// This file is @generated by no_atomic_cas.sh.
+// It is not intended for manual editing.
+
+const NO_ATOMIC_CAS_TARGETS: &[&str] = &[
+ "avr-unknown-gnu-atmega328",
+ "bpfeb-unknown-none",
+ "bpfel-unknown-none",
+ "msp430-none-elf",
+ "riscv32i-unknown-none-elf",
+ "riscv32imc-unknown-none-elf",
+ "thumbv4t-none-eabi",
+ "thumbv6m-none-eabi",
+];
diff --git a/vendor/futures-util/src/abortable.rs b/vendor/futures-util/src/abortable.rs
new file mode 100644
index 000000000..bb82dd0db
--- /dev/null
+++ b/vendor/futures-util/src/abortable.rs
@@ -0,0 +1,185 @@
+use crate::task::AtomicWaker;
+use alloc::sync::Arc;
+use core::fmt;
+use core::pin::Pin;
+use core::sync::atomic::{AtomicBool, Ordering};
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_core::Stream;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// A future/stream which can be remotely short-circuited using an `AbortHandle`.
+ #[derive(Debug, Clone)]
+ #[must_use = "futures/streams do nothing unless you poll them"]
+ pub struct Abortable<T> {
+ #[pin]
+ task: T,
+ inner: Arc<AbortInner>,
+ }
+}
+
+impl<T> Abortable<T> {
+ /// Creates a new `Abortable` future/stream using an existing `AbortRegistration`.
+ /// `AbortRegistration`s can be acquired through `AbortHandle::new`.
+ ///
+ /// When `abort` is called on the handle tied to `reg` or if `abort` has
+ /// already been called, the future/stream will complete immediately without making
+ /// any further progress.
+ ///
+ /// # Examples:
+ ///
+ /// Usage with futures:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::{Abortable, AbortHandle, Aborted};
+ ///
+ /// let (abort_handle, abort_registration) = AbortHandle::new_pair();
+ /// let future = Abortable::new(async { 2 }, abort_registration);
+ /// abort_handle.abort();
+ /// assert_eq!(future.await, Err(Aborted));
+ /// # });
+ /// ```
+ ///
+ /// Usage with streams:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// # use futures::future::{Abortable, AbortHandle};
+ /// # use futures::stream::{self, StreamExt};
+ ///
+ /// let (abort_handle, abort_registration) = AbortHandle::new_pair();
+ /// let mut stream = Abortable::new(stream::iter(vec![1, 2, 3]), abort_registration);
+ /// abort_handle.abort();
+ /// assert_eq!(stream.next().await, None);
+ /// # });
+ /// ```
+ pub fn new(task: T, reg: AbortRegistration) -> Self {
+ Self { task, inner: reg.inner }
+ }
+
+ /// Checks whether the task has been aborted. Note that all this
+ /// method indicates is whether [`AbortHandle::abort`] was *called*.
+ /// This means that it will return `true` even if:
+ /// * `abort` was called after the task had completed.
+ /// * `abort` was called while the task was being polled - the task may still be running and
+ /// will not be stopped until `poll` returns.
+ pub fn is_aborted(&self) -> bool {
+ self.inner.aborted.load(Ordering::Relaxed)
+ }
+}
+
+/// A registration handle for an `Abortable` task.
+/// Values of this type can be acquired from `AbortHandle::new` and are used
+/// in calls to `Abortable::new`.
+#[derive(Debug)]
+pub struct AbortRegistration {
+ inner: Arc<AbortInner>,
+}
+
+/// A handle to an `Abortable` task.
+#[derive(Debug, Clone)]
+pub struct AbortHandle {
+ inner: Arc<AbortInner>,
+}
+
+impl AbortHandle {
+ /// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used
+ /// to abort a running future or stream.
+ ///
+ /// This function is usually paired with a call to [`Abortable::new`].
+ pub fn new_pair() -> (Self, AbortRegistration) {
+ let inner =
+ Arc::new(AbortInner { waker: AtomicWaker::new(), aborted: AtomicBool::new(false) });
+
+ (Self { inner: inner.clone() }, AbortRegistration { inner })
+ }
+}
+
+// Inner type storing the waker to awaken and a bool indicating that it
+// should be aborted.
+#[derive(Debug)]
+struct AbortInner {
+ waker: AtomicWaker,
+ aborted: AtomicBool,
+}
+
+/// Indicator that the `Abortable` task was aborted.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct Aborted;
+
+impl fmt::Display for Aborted {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "`Abortable` future has been aborted")
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for Aborted {}
+
+impl<T> Abortable<T> {
+ fn try_poll<I>(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ poll: impl Fn(Pin<&mut T>, &mut Context<'_>) -> Poll<I>,
+ ) -> Poll<Result<I, Aborted>> {
+ // Check if the task has been aborted
+ if self.is_aborted() {
+ return Poll::Ready(Err(Aborted));
+ }
+
+ // attempt to complete the task
+ if let Poll::Ready(x) = poll(self.as_mut().project().task, cx) {
+ return Poll::Ready(Ok(x));
+ }
+
+ // Register to receive a wakeup if the task is aborted in the future
+ self.inner.waker.register(cx.waker());
+
+ // Check to see if the task was aborted between the first check and
+ // registration.
+ // Checking with `is_aborted` which uses `Relaxed` is sufficient because
+ // `register` introduces an `AcqRel` barrier.
+ if self.is_aborted() {
+ return Poll::Ready(Err(Aborted));
+ }
+
+ Poll::Pending
+ }
+}
+
+impl<Fut> Future for Abortable<Fut>
+where
+ Fut: Future,
+{
+ type Output = Result<Fut::Output, Aborted>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.try_poll(cx, |fut, cx| fut.poll(cx))
+ }
+}
+
+impl<St> Stream for Abortable<St>
+where
+ St: Stream,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.try_poll(cx, |stream, cx| stream.poll_next(cx)).map(Result::ok).map(Option::flatten)
+ }
+}
+
+impl AbortHandle {
+ /// Abort the `Abortable` stream/future associated with this handle.
+ ///
+ /// Notifies the Abortable task associated with this handle that it
+ /// should abort. Note that if the task is currently being polled on
+ /// another thread, it will not immediately stop running. Instead, it will
+ /// continue to run until its poll method returns.
+ pub fn abort(&self) {
+ self.inner.aborted.store(true, Ordering::Relaxed);
+ self.inner.waker.wake();
+ }
+}
diff --git a/vendor/futures-util/src/async_await/join_mod.rs b/vendor/futures-util/src/async_await/join_mod.rs
new file mode 100644
index 000000000..28f3b232e
--- /dev/null
+++ b/vendor/futures-util/src/async_await/join_mod.rs
@@ -0,0 +1,110 @@
+//! The `join` macro.
+
+macro_rules! document_join_macro {
+ ($join:item $try_join:item) => {
+ /// Polls multiple futures simultaneously, returning a tuple
+ /// of all results once complete.
+ ///
+ /// While `join!(a, b)` is similar to `(a.await, b.await)`,
+ /// `join!` polls both futures concurrently and therefore is more efficient.
+ ///
+ /// This macro is only usable inside of async functions, closures, and blocks.
+ /// It is also gated behind the `async-await` feature of this library, which is
+ /// activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::join;
+ ///
+ /// let a = async { 1 };
+ /// let b = async { 2 };
+ /// assert_eq!(join!(a, b), (1, 2));
+ ///
+ /// // `join!` is variadic, so you can pass any number of futures
+ /// let c = async { 3 };
+ /// let d = async { 4 };
+ /// let e = async { 5 };
+ /// assert_eq!(join!(c, d, e), (3, 4, 5));
+ /// # });
+ /// ```
+ $join
+
+ /// Polls multiple futures simultaneously, resolving to a [`Result`] containing
+ /// either a tuple of the successful outputs or an error.
+ ///
+ /// `try_join!` is similar to [`join!`], but completes immediately if any of
+ /// the futures return an error.
+ ///
+ /// This macro is only usable inside of async functions, closures, and blocks.
+ /// It is also gated behind the `async-await` feature of this library, which is
+ /// activated by default.
+ ///
+ /// # Examples
+ ///
+ /// When used on multiple futures that return `Ok`, `try_join!` will return
+ /// `Ok` of a tuple of the values:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::try_join;
+ ///
+ /// let a = async { Ok::<i32, i32>(1) };
+ /// let b = async { Ok::<i32, i32>(2) };
+ /// assert_eq!(try_join!(a, b), Ok((1, 2)));
+ ///
+ /// // `try_join!` is variadic, so you can pass any number of futures
+ /// let c = async { Ok::<i32, i32>(3) };
+ /// let d = async { Ok::<i32, i32>(4) };
+ /// let e = async { Ok::<i32, i32>(5) };
+ /// assert_eq!(try_join!(c, d, e), Ok((3, 4, 5)));
+ /// # });
+ /// ```
+ ///
+ /// If one of the futures resolves to an error, `try_join!` will return
+ /// that error:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::try_join;
+ ///
+ /// let a = async { Ok::<i32, i32>(1) };
+ /// let b = async { Err::<u64, i32>(2) };
+ ///
+ /// assert_eq!(try_join!(a, b), Err(2));
+ /// # });
+ /// ```
+ $try_join
+ }
+}
+
+#[allow(unreachable_pub)]
+#[doc(hidden)]
+pub use futures_macro::join_internal;
+
+#[allow(unreachable_pub)]
+#[doc(hidden)]
+pub use futures_macro::try_join_internal;
+
+document_join_macro! {
+ #[macro_export]
+ macro_rules! join {
+ ($($tokens:tt)*) => {{
+ use $crate::__private as __futures_crate;
+ $crate::join_internal! {
+ $( $tokens )*
+ }
+ }}
+ }
+
+ #[macro_export]
+ macro_rules! try_join {
+ ($($tokens:tt)*) => {{
+ use $crate::__private as __futures_crate;
+ $crate::try_join_internal! {
+ $( $tokens )*
+ }
+ }}
+ }
+}
diff --git a/vendor/futures-util/src/async_await/mod.rs b/vendor/futures-util/src/async_await/mod.rs
new file mode 100644
index 000000000..7276da227
--- /dev/null
+++ b/vendor/futures-util/src/async_await/mod.rs
@@ -0,0 +1,58 @@
+//! Await
+//!
+//! This module contains a number of functions and combinators for working
+//! with `async`/`await` code.
+
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::{FusedStream, Stream};
+
+#[macro_use]
+mod poll;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+pub use self::poll::*;
+
+#[macro_use]
+mod pending;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+pub use self::pending::*;
+
+// Primary export is a macro
+#[cfg(feature = "async-await-macro")]
+mod join_mod;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+#[cfg(feature = "async-await-macro")]
+pub use self::join_mod::*;
+
+// Primary export is a macro
+#[cfg(feature = "async-await-macro")]
+mod select_mod;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+#[cfg(feature = "async-await-macro")]
+pub use self::select_mod::*;
+
+// Primary export is a macro
+#[cfg(feature = "async-await-macro")]
+mod stream_select_mod;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+#[cfg(feature = "async-await-macro")]
+pub use self::stream_select_mod::*;
+
+#[cfg(feature = "std")]
+#[cfg(feature = "async-await-macro")]
+mod random;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/64762
+#[cfg(feature = "std")]
+#[cfg(feature = "async-await-macro")]
+pub use self::random::*;
+
+#[doc(hidden)]
+#[inline(always)]
+pub fn assert_unpin<T: Unpin>(_: &T) {}
+
+#[doc(hidden)]
+#[inline(always)]
+pub fn assert_fused_future<T: Future + FusedFuture>(_: &T) {}
+
+#[doc(hidden)]
+#[inline(always)]
+pub fn assert_fused_stream<T: Stream + FusedStream>(_: &T) {}
diff --git a/vendor/futures-util/src/async_await/pending.rs b/vendor/futures-util/src/async_await/pending.rs
new file mode 100644
index 000000000..5d7a43181
--- /dev/null
+++ b/vendor/futures-util/src/async_await/pending.rs
@@ -0,0 +1,43 @@
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+
+/// A macro which yields to the event loop once.
+///
+/// This is equivalent to returning [`Poll::Pending`](futures_core::task::Poll)
+/// from a [`Future::poll`](futures_core::future::Future::poll) implementation.
+/// Similarly, when using this macro, it must be ensured that [`wake`](std::task::Waker::wake)
+/// is called somewhere when further progress can be made.
+///
+/// This macro is only usable inside of async functions, closures, and blocks.
+/// It is also gated behind the `async-await` feature of this library, which is
+/// activated by default.
+#[macro_export]
+macro_rules! pending {
+ () => {
+ $crate::__private::async_await::pending_once().await
+ };
+}
+
+#[doc(hidden)]
+pub fn pending_once() -> PendingOnce {
+ PendingOnce { is_ready: false }
+}
+
+#[allow(missing_debug_implementations)]
+#[doc(hidden)]
+pub struct PendingOnce {
+ is_ready: bool,
+}
+
+impl Future for PendingOnce {
+ type Output = ();
+ fn poll(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
+ if self.is_ready {
+ Poll::Ready(())
+ } else {
+ self.is_ready = true;
+ Poll::Pending
+ }
+ }
+}
diff --git a/vendor/futures-util/src/async_await/poll.rs b/vendor/futures-util/src/async_await/poll.rs
new file mode 100644
index 000000000..b62f45a94
--- /dev/null
+++ b/vendor/futures-util/src/async_await/poll.rs
@@ -0,0 +1,39 @@
+use crate::future::FutureExt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+
+/// A macro which returns the result of polling a future once within the
+/// current `async` context.
+///
+/// This macro is only usable inside of `async` functions, closures, and blocks.
+/// It is also gated behind the `async-await` feature of this library, which is
+/// activated by default.
+///
+/// If you need the result of polling a [`Stream`](crate::stream::Stream),
+/// you can use this macro with the [`next`](crate::stream::StreamExt::next) method:
+/// `poll!(stream.next())`.
+#[macro_export]
+macro_rules! poll {
+ ($x:expr $(,)?) => {
+ $crate::__private::async_await::poll($x).await
+ };
+}
+
+#[doc(hidden)]
+pub fn poll<F: Future + Unpin>(future: F) -> PollOnce<F> {
+ PollOnce { future }
+}
+
+#[allow(missing_debug_implementations)]
+#[doc(hidden)]
+pub struct PollOnce<F: Future + Unpin> {
+ future: F,
+}
+
+impl<F: Future + Unpin> Future for PollOnce<F> {
+ type Output = Poll<F::Output>;
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Poll::Ready(self.future.poll_unpin(cx))
+ }
+}
diff --git a/vendor/futures-util/src/async_await/random.rs b/vendor/futures-util/src/async_await/random.rs
new file mode 100644
index 000000000..4f8c7254b
--- /dev/null
+++ b/vendor/futures-util/src/async_await/random.rs
@@ -0,0 +1,54 @@
+use std::{
+ cell::Cell,
+ collections::hash_map::DefaultHasher,
+ hash::Hasher,
+ num::Wrapping,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+
+// Based on [Fisher–Yates shuffle].
+//
+// [Fisher–Yates shuffle]: https://en.wikipedia.org/wiki/Fisher–Yates_shuffle
+#[doc(hidden)]
+pub fn shuffle<T>(slice: &mut [T]) {
+ for i in (1..slice.len()).rev() {
+ slice.swap(i, gen_index(i + 1));
+ }
+}
+
+/// Return a value from `0..n`.
+fn gen_index(n: usize) -> usize {
+ (random() % n as u64) as usize
+}
+
+/// Pseudorandom number generator based on [xorshift*].
+///
+/// [xorshift*]: https://en.wikipedia.org/wiki/Xorshift#xorshift*
+fn random() -> u64 {
+ thread_local! {
+ static RNG: Cell<Wrapping<u64>> = Cell::new(Wrapping(prng_seed()));
+ }
+
+ fn prng_seed() -> u64 {
+ static COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ // Any non-zero seed will do
+ let mut seed = 0;
+ while seed == 0 {
+ let mut hasher = DefaultHasher::new();
+ hasher.write_usize(COUNTER.fetch_add(1, Ordering::Relaxed));
+ seed = hasher.finish();
+ }
+ seed
+ }
+
+ RNG.with(|rng| {
+ let mut x = rng.get();
+ debug_assert_ne!(x.0, 0);
+ x ^= x >> 12;
+ x ^= x << 25;
+ x ^= x >> 27;
+ rng.set(x);
+ x.0.wrapping_mul(0x2545_f491_4f6c_dd1d)
+ })
+}
diff --git a/vendor/futures-util/src/async_await/select_mod.rs b/vendor/futures-util/src/async_await/select_mod.rs
new file mode 100644
index 000000000..1d13067d3
--- /dev/null
+++ b/vendor/futures-util/src/async_await/select_mod.rs
@@ -0,0 +1,336 @@
+//! The `select` macro.
+
+macro_rules! document_select_macro {
+ // This branch is required for `futures 0.3.1`, from before select_biased was introduced
+ ($select:item) => {
+ /// Polls multiple futures and streams simultaneously, executing the branch
+ /// for the future that finishes first. If multiple futures are ready,
+ /// one will be pseudo-randomly selected at runtime. Futures directly
+ /// passed to `select!` must be `Unpin` and implement `FusedFuture`.
+ ///
+ /// If an expression which yields a `Future` is passed to `select!`
+ /// (e.g. an `async fn` call) instead of a `Future` by name the `Unpin`
+ /// requirement is relaxed, since the macro will pin the resulting `Future`
+ /// on the stack. However the `Future` returned by the expression must
+ /// still implement `FusedFuture`.
+ ///
+ /// Futures and streams which are not already fused can be fused using the
+ /// `.fuse()` method. Note, though, that fusing a future or stream directly
+ /// in the call to `select!` will not be enough to prevent it from being
+ /// polled after completion if the `select!` call is in a loop, so when
+ /// `select!`ing in a loop, users should take care to `fuse()` outside of
+ /// the loop.
+ ///
+ /// `select!` can be used as an expression and will return the return
+ /// value of the selected branch. For this reason the return type of every
+ /// branch in a `select!` must be the same.
+ ///
+ /// This macro is only usable inside of async functions, closures, and blocks.
+ /// It is also gated behind the `async-await` feature of this library, which is
+ /// activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::select;
+ /// let mut a = future::ready(4);
+ /// let mut b = future::pending::<()>();
+ ///
+ /// let res = select! {
+ /// a_res = a => a_res + 1,
+ /// _ = b => 0,
+ /// };
+ /// assert_eq!(res, 5);
+ /// # });
+ /// ```
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::select;
+ /// let mut st = stream::iter(vec![2]).fuse();
+ /// let mut fut = future::pending::<()>();
+ ///
+ /// select! {
+ /// x = st.next() => assert_eq!(Some(2), x),
+ /// _ = fut => panic!(),
+ /// };
+ /// # });
+ /// ```
+ ///
+ /// As described earlier, `select` can directly select on expressions
+ /// which return `Future`s - even if those do not implement `Unpin`:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::select;
+ ///
+ /// // Calling the following async fn returns a Future which does not
+ /// // implement Unpin
+ /// async fn async_identity_fn(arg: usize) -> usize {
+ /// arg
+ /// }
+ ///
+ /// let res = select! {
+ /// a_res = async_identity_fn(62).fuse() => a_res + 1,
+ /// b_res = async_identity_fn(13).fuse() => b_res,
+ /// };
+ /// assert!(res == 63 || res == 13);
+ /// # });
+ /// ```
+ ///
+ /// If a similar async function is called outside of `select` to produce
+ /// a `Future`, the `Future` must be pinned in order to be able to pass
+ /// it to `select`. This can be achieved via `Box::pin` for pinning a
+ /// `Future` on the heap or the `pin_mut!` macro for pinning a `Future`
+ /// on the stack.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::select;
+ /// use futures::pin_mut;
+ ///
+ /// // Calling the following async fn returns a Future which does not
+ /// // implement Unpin
+ /// async fn async_identity_fn(arg: usize) -> usize {
+ /// arg
+ /// }
+ ///
+ /// let fut_1 = async_identity_fn(1).fuse();
+ /// let fut_2 = async_identity_fn(2).fuse();
+ /// let mut fut_1 = Box::pin(fut_1); // Pins the Future on the heap
+ /// pin_mut!(fut_2); // Pins the Future on the stack
+ ///
+ /// let res = select! {
+ /// a_res = fut_1 => a_res,
+ /// b_res = fut_2 => b_res,
+ /// };
+ /// assert!(res == 1 || res == 2);
+ /// # });
+ /// ```
+ ///
+ /// `select` also accepts a `complete` branch and a `default` branch.
+ /// `complete` will run if all futures and streams have already been
+ /// exhausted. `default` will run if no futures or streams are
+ /// immediately ready. `complete` takes priority over `default` in
+ /// the case where all futures have completed.
+ /// A motivating use-case for passing `Future`s by name as well as for
+ /// `complete` blocks is to call `select!` in a loop, which is
+ /// demonstrated in the following example:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::select;
+ /// let mut a_fut = future::ready(4);
+ /// let mut b_fut = future::ready(6);
+ /// let mut total = 0;
+ ///
+ /// loop {
+ /// select! {
+ /// a = a_fut => total += a,
+ /// b = b_fut => total += b,
+ /// complete => break,
+ /// default => panic!(), // never runs (futures run first, then complete)
+ /// };
+ /// }
+ /// assert_eq!(total, 10);
+ /// # });
+ /// ```
+ ///
+ /// Note that the futures that have been matched over can still be mutated
+ /// from inside the `select!` block's branches. This can be used to implement
+ /// more complex behavior such as timer resets or writing into the head of
+ /// a stream.
+ $select
+ };
+
+ ($select:item $select_biased:item) => {
+ document_select_macro!($select);
+
+ /// Polls multiple futures and streams simultaneously, executing the branch
+ /// for the future that finishes first. Unlike [`select!`], if multiple futures are ready,
+ /// one will be selected in order of declaration. Futures directly
+ /// passed to `select_biased!` must be `Unpin` and implement `FusedFuture`.
+ ///
+ /// If an expression which yields a `Future` is passed to `select_biased!`
+ /// (e.g. an `async fn` call) instead of a `Future` by name the `Unpin`
+ /// requirement is relaxed, since the macro will pin the resulting `Future`
+ /// on the stack. However the `Future` returned by the expression must
+ /// still implement `FusedFuture`.
+ ///
+ /// Futures and streams which are not already fused can be fused using the
+ /// `.fuse()` method. Note, though, that fusing a future or stream directly
+ /// in the call to `select_biased!` will not be enough to prevent it from being
+ /// polled after completion if the `select_biased!` call is in a loop, so when
+ /// `select_biased!`ing in a loop, users should take care to `fuse()` outside of
+ /// the loop.
+ ///
+ /// `select_biased!` can be used as an expression and will return the return
+ /// value of the selected branch. For this reason the return type of every
+ /// branch in a `select_biased!` must be the same.
+ ///
+ /// This macro is only usable inside of async functions, closures, and blocks.
+ /// It is also gated behind the `async-await` feature of this library, which is
+ /// activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::select_biased;
+ /// let mut a = future::ready(4);
+ /// let mut b = future::pending::<()>();
+ ///
+ /// let res = select_biased! {
+ /// a_res = a => a_res + 1,
+ /// _ = b => 0,
+ /// };
+ /// assert_eq!(res, 5);
+ /// # });
+ /// ```
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::select_biased;
+ /// let mut st = stream::iter(vec![2]).fuse();
+ /// let mut fut = future::pending::<()>();
+ ///
+ /// select_biased! {
+ /// x = st.next() => assert_eq!(Some(2), x),
+ /// _ = fut => panic!(),
+ /// };
+ /// # });
+ /// ```
+ ///
+ /// As described earlier, `select_biased` can directly select on expressions
+ /// which return `Future`s - even if those do not implement `Unpin`:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::select_biased;
+ ///
+ /// // Calling the following async fn returns a Future which does not
+ /// // implement Unpin
+ /// async fn async_identity_fn(arg: usize) -> usize {
+ /// arg
+ /// }
+ ///
+ /// let res = select_biased! {
+ /// a_res = async_identity_fn(62).fuse() => a_res + 1,
+ /// b_res = async_identity_fn(13).fuse() => b_res,
+ /// };
+ /// assert!(res == 63 || res == 12);
+ /// # });
+ /// ```
+ ///
+ /// If a similar async function is called outside of `select_biased` to produce
+ /// a `Future`, the `Future` must be pinned in order to be able to pass
+ /// it to `select_biased`. This can be achieved via `Box::pin` for pinning a
+ /// `Future` on the heap or the `pin_mut!` macro for pinning a `Future`
+ /// on the stack.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::select_biased;
+ /// use futures::pin_mut;
+ ///
+ /// // Calling the following async fn returns a Future which does not
+ /// // implement Unpin
+ /// async fn async_identity_fn(arg: usize) -> usize {
+ /// arg
+ /// }
+ ///
+ /// let fut_1 = async_identity_fn(1).fuse();
+ /// let fut_2 = async_identity_fn(2).fuse();
+ /// let mut fut_1 = Box::pin(fut_1); // Pins the Future on the heap
+ /// pin_mut!(fut_2); // Pins the Future on the stack
+ ///
+ /// let res = select_biased! {
+ /// a_res = fut_1 => a_res,
+ /// b_res = fut_2 => b_res,
+ /// };
+ /// assert!(res == 1 || res == 2);
+ /// # });
+ /// ```
+ ///
+ /// `select_biased` also accepts a `complete` branch and a `default` branch.
+ /// `complete` will run if all futures and streams have already been
+ /// exhausted. `default` will run if no futures or streams are
+ /// immediately ready. `complete` takes priority over `default` in
+ /// the case where all futures have completed.
+ /// A motivating use-case for passing `Future`s by name as well as for
+ /// `complete` blocks is to call `select_biased!` in a loop, which is
+ /// demonstrated in the following example:
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::select_biased;
+ /// let mut a_fut = future::ready(4);
+ /// let mut b_fut = future::ready(6);
+ /// let mut total = 0;
+ ///
+ /// loop {
+ /// select_biased! {
+ /// a = a_fut => total += a,
+ /// b = b_fut => total += b,
+ /// complete => break,
+ /// default => panic!(), // never runs (futures run first, then complete)
+ /// };
+ /// }
+ /// assert_eq!(total, 10);
+ /// # });
+ /// ```
+ ///
+ /// Note that the futures that have been matched over can still be mutated
+ /// from inside the `select_biased!` block's branches. This can be used to implement
+ /// more complex behavior such as timer resets or writing into the head of
+ /// a stream.
+ ///
+ /// [`select!`]: macro.select.html
+ $select_biased
+ };
+}
+
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)]
+#[doc(hidden)]
+pub use futures_macro::select_internal;
+
+#[allow(unreachable_pub)]
+#[doc(hidden)]
+pub use futures_macro::select_biased_internal;
+
+document_select_macro! {
+ #[cfg(feature = "std")]
+ #[macro_export]
+ macro_rules! select {
+ ($($tokens:tt)*) => {{
+ use $crate::__private as __futures_crate;
+ $crate::select_internal! {
+ $( $tokens )*
+ }
+ }}
+ }
+
+ #[macro_export]
+ macro_rules! select_biased {
+ ($($tokens:tt)*) => {{
+ use $crate::__private as __futures_crate;
+ $crate::select_biased_internal! {
+ $( $tokens )*
+ }
+ }}
+ }
+}
diff --git a/vendor/futures-util/src/async_await/stream_select_mod.rs b/vendor/futures-util/src/async_await/stream_select_mod.rs
new file mode 100644
index 000000000..1c8002fff
--- /dev/null
+++ b/vendor/futures-util/src/async_await/stream_select_mod.rs
@@ -0,0 +1,40 @@
+//! The `stream_select` macro.
+
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)]
+#[doc(hidden)]
+pub use futures_macro::stream_select_internal;
+
+/// Combines several streams, all producing the same `Item` type, into one stream.
+/// This is similar to `select_all` but does not require the streams to all be the same type.
+/// It also keeps the streams inline, and does not require `Box<dyn Stream>`s to be allocated.
+/// Streams passed to this macro must be `Unpin`.
+///
+/// If multiple streams are ready, one will be pseudo randomly selected at runtime.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::{stream, StreamExt, stream_select};
+/// let endless_ints = |i| stream::iter(vec![i].into_iter().cycle()).fuse();
+///
+/// let mut endless_numbers = stream_select!(endless_ints(1i32), endless_ints(2), endless_ints(3));
+/// match endless_numbers.next().await {
+/// Some(1) => println!("Got a 1"),
+/// Some(2) => println!("Got a 2"),
+/// Some(3) => println!("Got a 3"),
+/// _ => unreachable!(),
+/// }
+/// # });
+/// ```
+#[cfg(feature = "std")]
+#[macro_export]
+macro_rules! stream_select {
+ ($($tokens:tt)*) => {{
+ use $crate::__private as __futures_crate;
+ $crate::stream_select_internal! {
+ $( $tokens )*
+ }
+ }}
+}
diff --git a/vendor/futures-util/src/compat/compat01as03.rs b/vendor/futures-util/src/compat/compat01as03.rs
new file mode 100644
index 000000000..754e3d82a
--- /dev/null
+++ b/vendor/futures-util/src/compat/compat01as03.rs
@@ -0,0 +1,449 @@
+use futures_01::executor::{
+ spawn as spawn01, Notify as Notify01, NotifyHandle as NotifyHandle01, Spawn as Spawn01,
+ UnsafeNotify as UnsafeNotify01,
+};
+use futures_01::{Async as Async01, Future as Future01, Stream as Stream01};
+#[cfg(feature = "sink")]
+use futures_01::{AsyncSink as AsyncSink01, Sink as Sink01};
+use futures_core::{future::Future as Future03, stream::Stream as Stream03, task as task03};
+#[cfg(feature = "sink")]
+use futures_sink::Sink as Sink03;
+use std::pin::Pin;
+use std::task::Context;
+
+#[cfg(feature = "io-compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use io::{AsyncRead01CompatExt, AsyncWrite01CompatExt};
+
+/// Converts a futures 0.1 Future, Stream, AsyncRead, or AsyncWrite
+/// object to a futures 0.3-compatible version,
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Compat01As03<T> {
+ pub(crate) inner: Spawn01<T>,
+}
+
+impl<T> Unpin for Compat01As03<T> {}
+
+impl<T> Compat01As03<T> {
+ /// Wraps a futures 0.1 Future, Stream, AsyncRead, or AsyncWrite
+ /// object in a futures 0.3-compatible wrapper.
+ pub fn new(object: T) -> Self {
+ Self { inner: spawn01(object) }
+ }
+
+ fn in_notify<R>(&mut self, cx: &mut Context<'_>, f: impl FnOnce(&mut T) -> R) -> R {
+ let notify = &WakerToHandle(cx.waker());
+ self.inner.poll_fn_notify(notify, 0, f)
+ }
+
+ /// Get a reference to 0.1 Future, Stream, AsyncRead, or AsyncWrite object contained within.
+ pub fn get_ref(&self) -> &T {
+ self.inner.get_ref()
+ }
+
+ /// Get a mutable reference to 0.1 Future, Stream, AsyncRead or AsyncWrite object contained
+ /// within.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.inner.get_mut()
+ }
+
+ /// Consume this wrapper to return the underlying 0.1 Future, Stream, AsyncRead, or
+ /// AsyncWrite object.
+ pub fn into_inner(self) -> T {
+ self.inner.into_inner()
+ }
+}
+
+/// Extension trait for futures 0.1 [`Future`](futures_01::future::Future)
+pub trait Future01CompatExt: Future01 {
+ /// Converts a futures 0.1
+ /// [`Future<Item = T, Error = E>`](futures_01::future::Future)
+ /// into a futures 0.3
+ /// [`Future<Output = Result<T, E>>`](futures_core::future::Future).
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// # // TODO: These should be all using `futures::compat`, but that runs up against Cargo
+ /// # // feature issues
+ /// use futures_util::compat::Future01CompatExt;
+ ///
+ /// let future = futures_01::future::ok::<u32, ()>(1);
+ /// assert_eq!(future.compat().await, Ok(1));
+ /// # });
+ /// ```
+ fn compat(self) -> Compat01As03<Self>
+ where
+ Self: Sized,
+ {
+ Compat01As03::new(self)
+ }
+}
+impl<Fut: Future01> Future01CompatExt for Fut {}
+
+/// Extension trait for futures 0.1 [`Stream`](futures_01::stream::Stream)
+pub trait Stream01CompatExt: Stream01 {
+ /// Converts a futures 0.1
+ /// [`Stream<Item = T, Error = E>`](futures_01::stream::Stream)
+ /// into a futures 0.3
+ /// [`Stream<Item = Result<T, E>>`](futures_core::stream::Stream).
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::StreamExt;
+ /// use futures_util::compat::Stream01CompatExt;
+ ///
+ /// let stream = futures_01::stream::once::<u32, ()>(Ok(1));
+ /// let mut stream = stream.compat();
+ /// assert_eq!(stream.next().await, Some(Ok(1)));
+ /// assert_eq!(stream.next().await, None);
+ /// # });
+ /// ```
+ fn compat(self) -> Compat01As03<Self>
+ where
+ Self: Sized,
+ {
+ Compat01As03::new(self)
+ }
+}
+impl<St: Stream01> Stream01CompatExt for St {}
+
+/// Extension trait for futures 0.1 [`Sink`](futures_01::sink::Sink)
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub trait Sink01CompatExt: Sink01 {
+ /// Converts a futures 0.1
+ /// [`Sink<SinkItem = T, SinkError = E>`](futures_01::sink::Sink)
+ /// into a futures 0.3
+ /// [`Sink<T, Error = E>`](futures_sink::Sink).
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::{sink::SinkExt, stream::StreamExt};
+ /// use futures_util::compat::{Stream01CompatExt, Sink01CompatExt};
+ ///
+ /// let (tx, rx) = futures_01::unsync::mpsc::channel(1);
+ /// let (mut tx, mut rx) = (tx.sink_compat(), rx.compat());
+ ///
+ /// tx.send(1).await.unwrap();
+ /// drop(tx);
+ /// assert_eq!(rx.next().await, Some(Ok(1)));
+ /// assert_eq!(rx.next().await, None);
+ /// # });
+ /// ```
+ fn sink_compat(self) -> Compat01As03Sink<Self, Self::SinkItem>
+ where
+ Self: Sized,
+ {
+ Compat01As03Sink::new(self)
+ }
+}
+#[cfg(feature = "sink")]
+impl<Si: Sink01> Sink01CompatExt for Si {}
+
+fn poll_01_to_03<T, E>(x: Result<Async01<T>, E>) -> task03::Poll<Result<T, E>> {
+ match x? {
+ Async01::Ready(t) => task03::Poll::Ready(Ok(t)),
+ Async01::NotReady => task03::Poll::Pending,
+ }
+}
+
+impl<Fut: Future01> Future03 for Compat01As03<Fut> {
+ type Output = Result<Fut::Item, Fut::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> task03::Poll<Self::Output> {
+ poll_01_to_03(self.in_notify(cx, Future01::poll))
+ }
+}
+
+impl<St: Stream01> Stream03 for Compat01As03<St> {
+ type Item = Result<St::Item, St::Error>;
+
+ fn poll_next(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Option<Self::Item>> {
+ match self.in_notify(cx, Stream01::poll)? {
+ Async01::Ready(Some(t)) => task03::Poll::Ready(Some(Ok(t))),
+ Async01::Ready(None) => task03::Poll::Ready(None),
+ Async01::NotReady => task03::Poll::Pending,
+ }
+ }
+}
+
+/// Converts a futures 0.1 Sink object to a futures 0.3-compatible version
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct Compat01As03Sink<S, SinkItem> {
+ pub(crate) inner: Spawn01<S>,
+ pub(crate) buffer: Option<SinkItem>,
+ pub(crate) close_started: bool,
+}
+
+#[cfg(feature = "sink")]
+impl<S, SinkItem> Unpin for Compat01As03Sink<S, SinkItem> {}
+
+#[cfg(feature = "sink")]
+impl<S, SinkItem> Compat01As03Sink<S, SinkItem> {
+ /// Wraps a futures 0.1 Sink object in a futures 0.3-compatible wrapper.
+ pub fn new(inner: S) -> Self {
+ Self { inner: spawn01(inner), buffer: None, close_started: false }
+ }
+
+ fn in_notify<R>(&mut self, cx: &mut Context<'_>, f: impl FnOnce(&mut S) -> R) -> R {
+ let notify = &WakerToHandle(cx.waker());
+ self.inner.poll_fn_notify(notify, 0, f)
+ }
+
+ /// Get a reference to 0.1 Sink object contained within.
+ pub fn get_ref(&self) -> &S {
+ self.inner.get_ref()
+ }
+
+ /// Get a mutable reference to 0.1 Sink contained within.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.inner.get_mut()
+ }
+
+ /// Consume this wrapper to return the underlying 0.1 Sink.
+ pub fn into_inner(self) -> S {
+ self.inner.into_inner()
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<S, SinkItem> Stream03 for Compat01As03Sink<S, SinkItem>
+where
+ S: Stream01,
+{
+ type Item = Result<S::Item, S::Error>;
+
+ fn poll_next(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Option<Self::Item>> {
+ match self.in_notify(cx, Stream01::poll)? {
+ Async01::Ready(Some(t)) => task03::Poll::Ready(Some(Ok(t))),
+ Async01::Ready(None) => task03::Poll::Ready(None),
+ Async01::NotReady => task03::Poll::Pending,
+ }
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<S, SinkItem> Sink03<SinkItem> for Compat01As03Sink<S, SinkItem>
+where
+ S: Sink01<SinkItem = SinkItem>,
+{
+ type Error = S::SinkError;
+
+ fn start_send(mut self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> {
+ debug_assert!(self.buffer.is_none());
+ self.buffer = Some(item);
+ Ok(())
+ }
+
+ fn poll_ready(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Result<(), Self::Error>> {
+ match self.buffer.take() {
+ Some(item) => match self.in_notify(cx, |f| f.start_send(item))? {
+ AsyncSink01::Ready => task03::Poll::Ready(Ok(())),
+ AsyncSink01::NotReady(i) => {
+ self.buffer = Some(i);
+ task03::Poll::Pending
+ }
+ },
+ None => task03::Poll::Ready(Ok(())),
+ }
+ }
+
+ fn poll_flush(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Result<(), Self::Error>> {
+ let item = self.buffer.take();
+ match self.in_notify(cx, |f| match item {
+ Some(i) => match f.start_send(i)? {
+ AsyncSink01::Ready => f.poll_complete().map(|i| (i, None)),
+ AsyncSink01::NotReady(t) => Ok((Async01::NotReady, Some(t))),
+ },
+ None => f.poll_complete().map(|i| (i, None)),
+ })? {
+ (Async01::Ready(_), _) => task03::Poll::Ready(Ok(())),
+ (Async01::NotReady, item) => {
+ self.buffer = item;
+ task03::Poll::Pending
+ }
+ }
+ }
+
+ fn poll_close(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Result<(), Self::Error>> {
+ let item = self.buffer.take();
+ let close_started = self.close_started;
+
+ let result = self.in_notify(cx, |f| {
+ if !close_started {
+ if let Some(item) = item {
+ if let AsyncSink01::NotReady(item) = f.start_send(item)? {
+ return Ok((Async01::NotReady, Some(item), false));
+ }
+ }
+
+ if let Async01::NotReady = f.poll_complete()? {
+ return Ok((Async01::NotReady, None, false));
+ }
+ }
+
+ Ok((<S as Sink01>::close(f)?, None, true))
+ });
+
+ match result? {
+ (Async01::Ready(_), _, _) => task03::Poll::Ready(Ok(())),
+ (Async01::NotReady, item, close_started) => {
+ self.buffer = item;
+ self.close_started = close_started;
+ task03::Poll::Pending
+ }
+ }
+ }
+}
+
+struct NotifyWaker(task03::Waker);
+
+#[allow(missing_debug_implementations)] // false positive: this is private type
+#[derive(Clone)]
+struct WakerToHandle<'a>(&'a task03::Waker);
+
+impl From<WakerToHandle<'_>> for NotifyHandle01 {
+ fn from(handle: WakerToHandle<'_>) -> Self {
+ let ptr = Box::new(NotifyWaker(handle.0.clone()));
+
+ unsafe { Self::new(Box::into_raw(ptr)) }
+ }
+}
+
+impl Notify01 for NotifyWaker {
+ fn notify(&self, _: usize) {
+ self.0.wake_by_ref();
+ }
+}
+
+unsafe impl UnsafeNotify01 for NotifyWaker {
+ unsafe fn clone_raw(&self) -> NotifyHandle01 {
+ WakerToHandle(&self.0).into()
+ }
+
+ unsafe fn drop_raw(&self) {
+ let ptr: *const dyn UnsafeNotify01 = self;
+ drop(Box::from_raw(ptr as *mut dyn UnsafeNotify01));
+ }
+}
+
+#[cfg(feature = "io-compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+mod io {
+ use super::*;
+ use futures_io::{AsyncRead as AsyncRead03, AsyncWrite as AsyncWrite03};
+ use std::io::Error;
+ use tokio_io::{AsyncRead as AsyncRead01, AsyncWrite as AsyncWrite01};
+
+ /// Extension trait for tokio-io [`AsyncRead`](tokio_io::AsyncRead)
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+ pub trait AsyncRead01CompatExt: AsyncRead01 {
+ /// Converts a tokio-io [`AsyncRead`](tokio_io::AsyncRead) into a futures-io 0.3
+ /// [`AsyncRead`](futures_io::AsyncRead).
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::AsyncReadExt;
+ /// use futures_util::compat::AsyncRead01CompatExt;
+ ///
+ /// let input = b"Hello World!";
+ /// let reader /* : impl tokio_io::AsyncRead */ = std::io::Cursor::new(input);
+ /// let mut reader /* : impl futures::io::AsyncRead + Unpin */ = reader.compat();
+ ///
+ /// let mut output = Vec::with_capacity(12);
+ /// reader.read_to_end(&mut output).await.unwrap();
+ /// assert_eq!(output, input);
+ /// # });
+ /// ```
+ fn compat(self) -> Compat01As03<Self>
+ where
+ Self: Sized,
+ {
+ Compat01As03::new(self)
+ }
+ }
+ impl<R: AsyncRead01> AsyncRead01CompatExt for R {}
+
+ /// Extension trait for tokio-io [`AsyncWrite`](tokio_io::AsyncWrite)
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+ pub trait AsyncWrite01CompatExt: AsyncWrite01 {
+ /// Converts a tokio-io [`AsyncWrite`](tokio_io::AsyncWrite) into a futures-io 0.3
+ /// [`AsyncWrite`](futures_io::AsyncWrite).
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::AsyncWriteExt;
+ /// use futures_util::compat::AsyncWrite01CompatExt;
+ ///
+ /// let input = b"Hello World!";
+ /// let mut cursor = std::io::Cursor::new(Vec::with_capacity(12));
+ ///
+ /// let mut writer = (&mut cursor).compat();
+ /// writer.write_all(input).await.unwrap();
+ ///
+ /// assert_eq!(cursor.into_inner(), input);
+ /// # });
+ /// ```
+ fn compat(self) -> Compat01As03<Self>
+ where
+ Self: Sized,
+ {
+ Compat01As03::new(self)
+ }
+ }
+ impl<W: AsyncWrite01> AsyncWrite01CompatExt for W {}
+
+ impl<R: AsyncRead01> AsyncRead03 for Compat01As03<R> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> task03::Poll<Result<usize, Error>> {
+ poll_01_to_03(self.in_notify(cx, |x| x.poll_read(buf)))
+ }
+ }
+
+ impl<W: AsyncWrite01> AsyncWrite03 for Compat01As03<W> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> task03::Poll<Result<usize, Error>> {
+ poll_01_to_03(self.in_notify(cx, |x| x.poll_write(buf)))
+ }
+
+ fn poll_flush(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Result<(), Error>> {
+ poll_01_to_03(self.in_notify(cx, AsyncWrite01::poll_flush))
+ }
+
+ fn poll_close(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> task03::Poll<Result<(), Error>> {
+ poll_01_to_03(self.in_notify(cx, AsyncWrite01::shutdown))
+ }
+ }
+}
diff --git a/vendor/futures-util/src/compat/compat03as01.rs b/vendor/futures-util/src/compat/compat03as01.rs
new file mode 100644
index 000000000..5d3a6e920
--- /dev/null
+++ b/vendor/futures-util/src/compat/compat03as01.rs
@@ -0,0 +1,265 @@
+use crate::task::{self as task03, ArcWake as ArcWake03, WakerRef};
+use futures_01::{
+ task as task01, Async as Async01, Future as Future01, Poll as Poll01, Stream as Stream01,
+};
+#[cfg(feature = "sink")]
+use futures_01::{AsyncSink as AsyncSink01, Sink as Sink01, StartSend as StartSend01};
+use futures_core::{
+ future::TryFuture as TryFuture03,
+ stream::TryStream as TryStream03,
+ task::{RawWaker, RawWakerVTable},
+};
+#[cfg(feature = "sink")]
+use futures_sink::Sink as Sink03;
+#[cfg(feature = "sink")]
+use std::marker::PhantomData;
+use std::{mem, pin::Pin, sync::Arc, task::Context};
+
+/// Converts a futures 0.3 [`TryFuture`](futures_core::future::TryFuture) or
+/// [`TryStream`](futures_core::stream::TryStream) into a futures 0.1
+/// [`Future`](futures_01::future::Future) or
+/// [`Stream`](futures_01::stream::Stream).
+#[derive(Debug, Clone, Copy)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Compat<T> {
+ pub(crate) inner: T,
+}
+
+/// Converts a futures 0.3 [`Sink`](futures_sink::Sink) into a futures 0.1
+/// [`Sink`](futures_01::sink::Sink).
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct CompatSink<T, Item> {
+ inner: T,
+ _phantom: PhantomData<fn(Item)>,
+}
+
+impl<T> Compat<T> {
+ /// Creates a new [`Compat`].
+ ///
+ /// For types which implement appropriate futures `0.3`
+ /// traits, the result will be a type which implements
+ /// the corresponding futures 0.1 type.
+ pub fn new(inner: T) -> Self {
+ Self { inner }
+ }
+
+ /// Get a reference to 0.3 Future, Stream, AsyncRead, or AsyncWrite object
+ /// contained within.
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Get a mutable reference to 0.3 Future, Stream, AsyncRead, or AsyncWrite object
+ /// contained within.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Returns the inner item.
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<T, Item> CompatSink<T, Item> {
+ /// Creates a new [`CompatSink`].
+ pub fn new(inner: T) -> Self {
+ Self { inner, _phantom: PhantomData }
+ }
+
+ /// Get a reference to 0.3 Sink contained within.
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Get a mutable reference to 0.3 Sink contained within.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Returns the inner item.
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+}
+
+fn poll_03_to_01<T, E>(x: task03::Poll<Result<T, E>>) -> Result<Async01<T>, E> {
+ match x? {
+ task03::Poll::Ready(t) => Ok(Async01::Ready(t)),
+ task03::Poll::Pending => Ok(Async01::NotReady),
+ }
+}
+
+impl<Fut> Future01 for Compat<Fut>
+where
+ Fut: TryFuture03 + Unpin,
+{
+ type Item = Fut::Ok;
+ type Error = Fut::Error;
+
+ fn poll(&mut self) -> Poll01<Self::Item, Self::Error> {
+ with_context(self, |inner, cx| poll_03_to_01(inner.try_poll(cx)))
+ }
+}
+
+impl<St> Stream01 for Compat<St>
+where
+ St: TryStream03 + Unpin,
+{
+ type Item = St::Ok;
+ type Error = St::Error;
+
+ fn poll(&mut self) -> Poll01<Option<Self::Item>, Self::Error> {
+ with_context(self, |inner, cx| match inner.try_poll_next(cx)? {
+ task03::Poll::Ready(None) => Ok(Async01::Ready(None)),
+ task03::Poll::Ready(Some(t)) => Ok(Async01::Ready(Some(t))),
+ task03::Poll::Pending => Ok(Async01::NotReady),
+ })
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<T, Item> Sink01 for CompatSink<T, Item>
+where
+ T: Sink03<Item> + Unpin,
+{
+ type SinkItem = Item;
+ type SinkError = T::Error;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> StartSend01<Self::SinkItem, Self::SinkError> {
+ with_sink_context(self, |mut inner, cx| match inner.as_mut().poll_ready(cx)? {
+ task03::Poll::Ready(()) => inner.start_send(item).map(|()| AsyncSink01::Ready),
+ task03::Poll::Pending => Ok(AsyncSink01::NotReady(item)),
+ })
+ }
+
+ fn poll_complete(&mut self) -> Poll01<(), Self::SinkError> {
+ with_sink_context(self, |inner, cx| poll_03_to_01(inner.poll_flush(cx)))
+ }
+
+ fn close(&mut self) -> Poll01<(), Self::SinkError> {
+ with_sink_context(self, |inner, cx| poll_03_to_01(inner.poll_close(cx)))
+ }
+}
+
+#[derive(Clone)]
+struct Current(task01::Task);
+
+impl Current {
+ fn new() -> Self {
+ Self(task01::current())
+ }
+
+ fn as_waker(&self) -> WakerRef<'_> {
+ unsafe fn ptr_to_current<'a>(ptr: *const ()) -> &'a Current {
+ &*(ptr as *const Current)
+ }
+ fn current_to_ptr(current: &Current) -> *const () {
+ current as *const Current as *const ()
+ }
+
+ unsafe fn clone(ptr: *const ()) -> RawWaker {
+ // Lazily create the `Arc` only when the waker is actually cloned.
+ // FIXME: remove `transmute` when a `Waker` -> `RawWaker` conversion
+ // function is landed in `core`.
+ mem::transmute::<task03::Waker, RawWaker>(task03::waker(Arc::new(
+ ptr_to_current(ptr).clone(),
+ )))
+ }
+ unsafe fn drop(_: *const ()) {}
+ unsafe fn wake(ptr: *const ()) {
+ ptr_to_current(ptr).0.notify()
+ }
+
+ let ptr = current_to_ptr(self);
+ let vtable = &RawWakerVTable::new(clone, wake, wake, drop);
+ WakerRef::new_unowned(std::mem::ManuallyDrop::new(unsafe {
+ task03::Waker::from_raw(RawWaker::new(ptr, vtable))
+ }))
+ }
+}
+
+impl ArcWake03 for Current {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.0.notify();
+ }
+}
+
+fn with_context<T, R, F>(compat: &mut Compat<T>, f: F) -> R
+where
+ T: Unpin,
+ F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> R,
+{
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ f(Pin::new(&mut compat.inner), &mut cx)
+}
+
+#[cfg(feature = "sink")]
+fn with_sink_context<T, Item, R, F>(compat: &mut CompatSink<T, Item>, f: F) -> R
+where
+ T: Unpin,
+ F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> R,
+{
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ f(Pin::new(&mut compat.inner), &mut cx)
+}
+
+#[cfg(feature = "io-compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+mod io {
+ use super::*;
+ use futures_io::{AsyncRead as AsyncRead03, AsyncWrite as AsyncWrite03};
+ use tokio_io::{AsyncRead as AsyncRead01, AsyncWrite as AsyncWrite01};
+
+ fn poll_03_to_io<T>(x: task03::Poll<Result<T, std::io::Error>>) -> Result<T, std::io::Error> {
+ match x {
+ task03::Poll::Ready(Ok(t)) => Ok(t),
+ task03::Poll::Pending => Err(std::io::ErrorKind::WouldBlock.into()),
+ task03::Poll::Ready(Err(e)) => Err(e),
+ }
+ }
+
+ impl<R: AsyncRead03 + Unpin> std::io::Read for Compat<R> {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ poll_03_to_io(Pin::new(&mut self.inner).poll_read(&mut cx, buf))
+ }
+ }
+
+ impl<R: AsyncRead03 + Unpin> AsyncRead01 for Compat<R> {}
+
+ impl<W: AsyncWrite03 + Unpin> std::io::Write for Compat<W> {
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ poll_03_to_io(Pin::new(&mut self.inner).poll_write(&mut cx, buf))
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ poll_03_to_io(Pin::new(&mut self.inner).poll_flush(&mut cx))
+ }
+ }
+
+ impl<W: AsyncWrite03 + Unpin> AsyncWrite01 for Compat<W> {
+ fn shutdown(&mut self) -> std::io::Result<Async01<()>> {
+ let current = Current::new();
+ let waker = current.as_waker();
+ let mut cx = Context::from_waker(&waker);
+ poll_03_to_01(Pin::new(&mut self.inner).poll_close(&mut cx))
+ }
+ }
+}
diff --git a/vendor/futures-util/src/compat/executor.rs b/vendor/futures-util/src/compat/executor.rs
new file mode 100644
index 000000000..e25705be1
--- /dev/null
+++ b/vendor/futures-util/src/compat/executor.rs
@@ -0,0 +1,85 @@
+use super::{Compat, Future01CompatExt};
+use crate::{
+ future::{FutureExt, TryFutureExt, UnitError},
+ task::SpawnExt,
+};
+use futures_01::future::{ExecuteError as ExecuteError01, Executor as Executor01};
+use futures_01::Future as Future01;
+use futures_task::{FutureObj, Spawn as Spawn03, SpawnError as SpawnError03};
+
+/// A future that can run on a futures 0.1
+/// [`Executor`](futures_01::future::Executor).
+pub type Executor01Future = Compat<UnitError<FutureObj<'static, ()>>>;
+
+/// Extension trait for futures 0.1 [`Executor`](futures_01::future::Executor).
+pub trait Executor01CompatExt: Executor01<Executor01Future> + Clone + Send + 'static {
+ /// Converts a futures 0.1 [`Executor`](futures_01::future::Executor) into a
+ /// futures 0.3 [`Spawn`](futures_task::Spawn).
+ ///
+ /// ```
+ /// use futures::task::SpawnExt;
+ /// use futures::future::{FutureExt, TryFutureExt};
+ /// use futures_util::compat::Executor01CompatExt;
+ /// use tokio::executor::DefaultExecutor;
+ ///
+ /// # let (tx, rx) = futures::channel::oneshot::channel();
+ ///
+ /// let spawner = DefaultExecutor::current().compat();
+ /// let future03 = async move {
+ /// println!("Running on the pool");
+ /// spawner.spawn(async {
+ /// println!("Spawned!");
+ /// # tx.send(42).unwrap();
+ /// }).unwrap();
+ /// };
+ ///
+ /// let future01 = future03.unit_error().boxed().compat();
+ ///
+ /// tokio::run(future01);
+ /// # futures::executor::block_on(rx).unwrap();
+ /// ```
+ fn compat(self) -> Executor01As03<Self>
+ where
+ Self: Sized;
+}
+
+impl<Ex> Executor01CompatExt for Ex
+where
+ Ex: Executor01<Executor01Future> + Clone + Send + 'static,
+{
+ fn compat(self) -> Executor01As03<Self> {
+ Executor01As03 { executor01: self }
+ }
+}
+
+/// Converts a futures 0.1 [`Executor`](futures_01::future::Executor) into a
+/// futures 0.3 [`Spawn`](futures_task::Spawn).
+#[derive(Debug, Clone)]
+pub struct Executor01As03<Ex> {
+ executor01: Ex,
+}
+
+impl<Ex> Spawn03 for Executor01As03<Ex>
+where
+ Ex: Executor01<Executor01Future> + Clone + Send + 'static,
+{
+ fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError03> {
+ let future = future.unit_error().compat();
+
+ self.executor01.execute(future).map_err(|_| SpawnError03::shutdown())
+ }
+}
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<Sp, Fut> Executor01<Fut> for Compat<Sp>
+where
+ for<'a> &'a Sp: Spawn03,
+ Fut: Future01<Item = (), Error = ()> + Send + 'static,
+{
+ fn execute(&self, future: Fut) -> Result<(), ExecuteError01<Fut>> {
+ (&self.inner)
+ .spawn(future.compat().map(|_| ()))
+ .expect("unable to spawn future from Compat executor");
+ Ok(())
+ }
+}
diff --git a/vendor/futures-util/src/compat/mod.rs b/vendor/futures-util/src/compat/mod.rs
new file mode 100644
index 000000000..4812803eb
--- /dev/null
+++ b/vendor/futures-util/src/compat/mod.rs
@@ -0,0 +1,22 @@
+//! Interop between `futures` 0.1 and 0.3.
+//!
+//! This module is only available when the `compat` feature of this
+//! library is activated.
+
+mod executor;
+pub use self::executor::{Executor01As03, Executor01CompatExt, Executor01Future};
+
+mod compat01as03;
+#[cfg(feature = "io-compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+pub use self::compat01as03::{AsyncRead01CompatExt, AsyncWrite01CompatExt};
+pub use self::compat01as03::{Compat01As03, Future01CompatExt, Stream01CompatExt};
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub use self::compat01as03::{Compat01As03Sink, Sink01CompatExt};
+
+mod compat03as01;
+pub use self::compat03as01::Compat;
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub use self::compat03as01::CompatSink;
diff --git a/vendor/futures-util/src/fns.rs b/vendor/futures-util/src/fns.rs
new file mode 100644
index 000000000..37ee03e6d
--- /dev/null
+++ b/vendor/futures-util/src/fns.rs
@@ -0,0 +1,372 @@
+use core::fmt::{self, Debug};
+use core::marker::PhantomData;
+
+pub trait FnOnce1<A> {
+ type Output;
+ fn call_once(self, arg: A) -> Self::Output;
+}
+
+impl<T, A, R> FnOnce1<A> for T
+where
+ T: FnOnce(A) -> R,
+{
+ type Output = R;
+ fn call_once(self, arg: A) -> R {
+ self(arg)
+ }
+}
+
+pub trait FnMut1<A>: FnOnce1<A> {
+ fn call_mut(&mut self, arg: A) -> Self::Output;
+}
+
+impl<T, A, R> FnMut1<A> for T
+where
+ T: FnMut(A) -> R,
+{
+ fn call_mut(&mut self, arg: A) -> R {
+ self(arg)
+ }
+}
+
+// Not used, but present for completeness
+#[allow(unreachable_pub)]
+pub trait Fn1<A>: FnMut1<A> {
+ fn call(&self, arg: A) -> Self::Output;
+}
+
+impl<T, A, R> Fn1<A> for T
+where
+ T: Fn(A) -> R,
+{
+ fn call(&self, arg: A) -> R {
+ self(arg)
+ }
+}
+
+macro_rules! trivial_fn_impls {
+ ($name:ident <$($arg:ident),*> $t:ty = $debug:literal) => {
+ impl<$($arg),*> Copy for $t {}
+ impl<$($arg),*> Clone for $t {
+ fn clone(&self) -> Self { *self }
+ }
+ impl<$($arg),*> Debug for $t {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str($debug)
+ }
+ }
+ impl<$($arg,)* A> FnMut1<A> for $t where Self: FnOnce1<A> {
+ fn call_mut(&mut self, arg: A) -> Self::Output {
+ self.call_once(arg)
+ }
+ }
+ impl<$($arg,)* A> Fn1<A> for $t where Self: FnOnce1<A> {
+ fn call(&self, arg: A) -> Self::Output {
+ self.call_once(arg)
+ }
+ }
+ pub(crate) fn $name<$($arg),*>() -> $t {
+ Default::default()
+ }
+ }
+}
+
+pub struct OkFn<E>(PhantomData<fn(E)>);
+
+impl<E> Default for OkFn<E> {
+ fn default() -> Self {
+ Self(PhantomData)
+ }
+}
+
+impl<A, E> FnOnce1<A> for OkFn<E> {
+ type Output = Result<A, E>;
+ fn call_once(self, arg: A) -> Self::Output {
+ Ok(arg)
+ }
+}
+
+trivial_fn_impls!(ok_fn <T> OkFn<T> = "Ok");
+
+#[derive(Debug, Copy, Clone, Default)]
+pub struct ChainFn<F, G>(F, G);
+
+impl<F, G, A> FnOnce1<A> for ChainFn<F, G>
+where
+ F: FnOnce1<A>,
+ G: FnOnce1<F::Output>,
+{
+ type Output = G::Output;
+ fn call_once(self, arg: A) -> Self::Output {
+ self.1.call_once(self.0.call_once(arg))
+ }
+}
+impl<F, G, A> FnMut1<A> for ChainFn<F, G>
+where
+ F: FnMut1<A>,
+ G: FnMut1<F::Output>,
+{
+ fn call_mut(&mut self, arg: A) -> Self::Output {
+ self.1.call_mut(self.0.call_mut(arg))
+ }
+}
+impl<F, G, A> Fn1<A> for ChainFn<F, G>
+where
+ F: Fn1<A>,
+ G: Fn1<F::Output>,
+{
+ fn call(&self, arg: A) -> Self::Output {
+ self.1.call(self.0.call(arg))
+ }
+}
+pub(crate) fn chain_fn<F, G>(f: F, g: G) -> ChainFn<F, G> {
+ ChainFn(f, g)
+}
+
+#[derive(Default)]
+pub struct MergeResultFn;
+
+impl<T> FnOnce1<Result<T, T>> for MergeResultFn {
+ type Output = T;
+ fn call_once(self, arg: Result<T, T>) -> Self::Output {
+ match arg {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+}
+trivial_fn_impls!(merge_result_fn <> MergeResultFn = "merge_result");
+
+#[derive(Debug, Copy, Clone, Default)]
+pub struct InspectFn<F>(F);
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<F, A> FnOnce1<A> for InspectFn<F>
+where
+ F: for<'a> FnOnce1<&'a A, Output = ()>,
+{
+ type Output = A;
+ fn call_once(self, arg: A) -> Self::Output {
+ self.0.call_once(&arg);
+ arg
+ }
+}
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<F, A> FnMut1<A> for InspectFn<F>
+where
+ F: for<'a> FnMut1<&'a A, Output = ()>,
+{
+ fn call_mut(&mut self, arg: A) -> Self::Output {
+ self.0.call_mut(&arg);
+ arg
+ }
+}
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<F, A> Fn1<A> for InspectFn<F>
+where
+ F: for<'a> Fn1<&'a A, Output = ()>,
+{
+ fn call(&self, arg: A) -> Self::Output {
+ self.0.call(&arg);
+ arg
+ }
+}
+pub(crate) fn inspect_fn<F>(f: F) -> InspectFn<F> {
+ InspectFn(f)
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+pub struct MapOkFn<F>(F);
+
+impl<F, T, E> FnOnce1<Result<T, E>> for MapOkFn<F>
+where
+ F: FnOnce1<T>,
+{
+ type Output = Result<F::Output, E>;
+ fn call_once(self, arg: Result<T, E>) -> Self::Output {
+ arg.map(|x| self.0.call_once(x))
+ }
+}
+impl<F, T, E> FnMut1<Result<T, E>> for MapOkFn<F>
+where
+ F: FnMut1<T>,
+{
+ fn call_mut(&mut self, arg: Result<T, E>) -> Self::Output {
+ arg.map(|x| self.0.call_mut(x))
+ }
+}
+impl<F, T, E> Fn1<Result<T, E>> for MapOkFn<F>
+where
+ F: Fn1<T>,
+{
+ fn call(&self, arg: Result<T, E>) -> Self::Output {
+ arg.map(|x| self.0.call(x))
+ }
+}
+pub(crate) fn map_ok_fn<F>(f: F) -> MapOkFn<F> {
+ MapOkFn(f)
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+pub struct MapErrFn<F>(F);
+
+impl<F, T, E> FnOnce1<Result<T, E>> for MapErrFn<F>
+where
+ F: FnOnce1<E>,
+{
+ type Output = Result<T, F::Output>;
+ fn call_once(self, arg: Result<T, E>) -> Self::Output {
+ arg.map_err(|x| self.0.call_once(x))
+ }
+}
+impl<F, T, E> FnMut1<Result<T, E>> for MapErrFn<F>
+where
+ F: FnMut1<E>,
+{
+ fn call_mut(&mut self, arg: Result<T, E>) -> Self::Output {
+ arg.map_err(|x| self.0.call_mut(x))
+ }
+}
+impl<F, T, E> Fn1<Result<T, E>> for MapErrFn<F>
+where
+ F: Fn1<E>,
+{
+ fn call(&self, arg: Result<T, E>) -> Self::Output {
+ arg.map_err(|x| self.0.call(x))
+ }
+}
+pub(crate) fn map_err_fn<F>(f: F) -> MapErrFn<F> {
+ MapErrFn(f)
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct InspectOkFn<F>(F);
+
+impl<'a, F, T, E> FnOnce1<&'a Result<T, E>> for InspectOkFn<F>
+where
+ F: FnOnce1<&'a T, Output = ()>,
+{
+ type Output = ();
+ fn call_once(self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Ok(x) = arg {
+ self.0.call_once(x)
+ }
+ }
+}
+impl<'a, F, T, E> FnMut1<&'a Result<T, E>> for InspectOkFn<F>
+where
+ F: FnMut1<&'a T, Output = ()>,
+{
+ fn call_mut(&mut self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Ok(x) = arg {
+ self.0.call_mut(x)
+ }
+ }
+}
+impl<'a, F, T, E> Fn1<&'a Result<T, E>> for InspectOkFn<F>
+where
+ F: Fn1<&'a T, Output = ()>,
+{
+ fn call(&self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Ok(x) = arg {
+ self.0.call(x)
+ }
+ }
+}
+pub(crate) fn inspect_ok_fn<F>(f: F) -> InspectOkFn<F> {
+ InspectOkFn(f)
+}
+
+#[derive(Debug, Copy, Clone)]
+pub struct InspectErrFn<F>(F);
+
+impl<'a, F, T, E> FnOnce1<&'a Result<T, E>> for InspectErrFn<F>
+where
+ F: FnOnce1<&'a E, Output = ()>,
+{
+ type Output = ();
+ fn call_once(self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Err(x) = arg {
+ self.0.call_once(x)
+ }
+ }
+}
+impl<'a, F, T, E> FnMut1<&'a Result<T, E>> for InspectErrFn<F>
+where
+ F: FnMut1<&'a E, Output = ()>,
+{
+ fn call_mut(&mut self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Err(x) = arg {
+ self.0.call_mut(x)
+ }
+ }
+}
+impl<'a, F, T, E> Fn1<&'a Result<T, E>> for InspectErrFn<F>
+where
+ F: Fn1<&'a E, Output = ()>,
+{
+ fn call(&self, arg: &'a Result<T, E>) -> Self::Output {
+ if let Err(x) = arg {
+ self.0.call(x)
+ }
+ }
+}
+pub(crate) fn inspect_err_fn<F>(f: F) -> InspectErrFn<F> {
+ InspectErrFn(f)
+}
+
+pub(crate) type MapOkOrElseFn<F, G> = ChainFn<MapOkFn<F>, ChainFn<MapErrFn<G>, MergeResultFn>>;
+pub(crate) fn map_ok_or_else_fn<F, G>(f: F, g: G) -> MapOkOrElseFn<F, G> {
+ chain_fn(map_ok_fn(f), chain_fn(map_err_fn(g), merge_result_fn()))
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+pub struct UnwrapOrElseFn<F>(F);
+
+impl<F, T, E> FnOnce1<Result<T, E>> for UnwrapOrElseFn<F>
+where
+ F: FnOnce1<E, Output = T>,
+{
+ type Output = T;
+ fn call_once(self, arg: Result<T, E>) -> Self::Output {
+ arg.unwrap_or_else(|x| self.0.call_once(x))
+ }
+}
+impl<F, T, E> FnMut1<Result<T, E>> for UnwrapOrElseFn<F>
+where
+ F: FnMut1<E, Output = T>,
+{
+ fn call_mut(&mut self, arg: Result<T, E>) -> Self::Output {
+ arg.unwrap_or_else(|x| self.0.call_mut(x))
+ }
+}
+impl<F, T, E> Fn1<Result<T, E>> for UnwrapOrElseFn<F>
+where
+ F: Fn1<E, Output = T>,
+{
+ fn call(&self, arg: Result<T, E>) -> Self::Output {
+ arg.unwrap_or_else(|x| self.0.call(x))
+ }
+}
+pub(crate) fn unwrap_or_else_fn<F>(f: F) -> UnwrapOrElseFn<F> {
+ UnwrapOrElseFn(f)
+}
+
+pub struct IntoFn<T>(PhantomData<fn() -> T>);
+
+impl<T> Default for IntoFn<T> {
+ fn default() -> Self {
+ Self(PhantomData)
+ }
+}
+impl<A, T> FnOnce1<A> for IntoFn<T>
+where
+ A: Into<T>,
+{
+ type Output = T;
+ fn call_once(self, arg: A) -> Self::Output {
+ arg.into()
+ }
+}
+
+trivial_fn_impls!(into_fn <T> IntoFn<T> = "Into::into");
diff --git a/vendor/futures-util/src/future/abortable.rs b/vendor/futures-util/src/future/abortable.rs
new file mode 100644
index 000000000..d017ab734
--- /dev/null
+++ b/vendor/futures-util/src/future/abortable.rs
@@ -0,0 +1,19 @@
+use super::assert_future;
+use crate::future::{AbortHandle, Abortable, Aborted};
+use futures_core::future::Future;
+
+/// Creates a new `Abortable` future and an `AbortHandle` which can be used to stop it.
+///
+/// This function is a convenient (but less flexible) alternative to calling
+/// `AbortHandle::new` and `Abortable::new` manually.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+pub fn abortable<Fut>(future: Fut) -> (Abortable<Fut>, AbortHandle)
+where
+ Fut: Future,
+{
+ let (handle, reg) = AbortHandle::new_pair();
+ let abortable = assert_future::<Result<Fut::Output, Aborted>, _>(Abortable::new(future, reg));
+ (abortable, handle)
+}
diff --git a/vendor/futures-util/src/future/either.rs b/vendor/futures-util/src/future/either.rs
new file mode 100644
index 000000000..9602de7a4
--- /dev/null
+++ b/vendor/futures-util/src/future/either.rs
@@ -0,0 +1,297 @@
+use core::pin::Pin;
+use core::task::{Context, Poll};
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::{FusedStream, Stream};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+
+/// Combines two different futures, streams, or sinks having the same associated types into a single type.
+///
+/// This is useful when conditionally choosing between two distinct future types:
+///
+/// ```rust
+/// use futures::future::Either;
+///
+/// # futures::executor::block_on(async {
+/// let cond = true;
+///
+/// let fut = if cond {
+/// Either::Left(async move { 12 })
+/// } else {
+/// Either::Right(async move { 44 })
+/// };
+///
+/// assert_eq!(fut.await, 12);
+/// # })
+/// ```
+#[derive(Debug, Clone)]
+pub enum Either<A, B> {
+ /// First branch of the type
+ Left(/* #[pin] */ A),
+ /// Second branch of the type
+ Right(/* #[pin] */ B),
+}
+
+impl<A, B> Either<A, B> {
+ fn project(self: Pin<&mut Self>) -> Either<Pin<&mut A>, Pin<&mut B>> {
+ unsafe {
+ match self.get_unchecked_mut() {
+ Either::Left(a) => Either::Left(Pin::new_unchecked(a)),
+ Either::Right(b) => Either::Right(Pin::new_unchecked(b)),
+ }
+ }
+ }
+}
+
+impl<A, B, T> Either<(T, A), (T, B)> {
+ /// Factor out a homogeneous type from an either of pairs.
+ ///
+ /// Here, the homogeneous type is the first element of the pairs.
+ pub fn factor_first(self) -> (T, Either<A, B>) {
+ match self {
+ Either::Left((x, a)) => (x, Either::Left(a)),
+ Either::Right((x, b)) => (x, Either::Right(b)),
+ }
+ }
+}
+
+impl<A, B, T> Either<(A, T), (B, T)> {
+ /// Factor out a homogeneous type from an either of pairs.
+ ///
+ /// Here, the homogeneous type is the second element of the pairs.
+ pub fn factor_second(self) -> (Either<A, B>, T) {
+ match self {
+ Either::Left((a, x)) => (Either::Left(a), x),
+ Either::Right((b, x)) => (Either::Right(b), x),
+ }
+ }
+}
+
+impl<T> Either<T, T> {
+ /// Extract the value of an either over two equivalent types.
+ pub fn into_inner(self) -> T {
+ match self {
+ Either::Left(x) => x,
+ Either::Right(x) => x,
+ }
+ }
+}
+
+impl<A, B> Future for Either<A, B>
+where
+ A: Future,
+ B: Future<Output = A::Output>,
+{
+ type Output = A::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match self.project() {
+ Either::Left(x) => x.poll(cx),
+ Either::Right(x) => x.poll(cx),
+ }
+ }
+}
+
+impl<A, B> FusedFuture for Either<A, B>
+where
+ A: FusedFuture,
+ B: FusedFuture<Output = A::Output>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Either::Left(x) => x.is_terminated(),
+ Either::Right(x) => x.is_terminated(),
+ }
+ }
+}
+
+impl<A, B> Stream for Either<A, B>
+where
+ A: Stream,
+ B: Stream<Item = A::Item>,
+{
+ type Item = A::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ match self.project() {
+ Either::Left(x) => x.poll_next(cx),
+ Either::Right(x) => x.poll_next(cx),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ Either::Left(x) => x.size_hint(),
+ Either::Right(x) => x.size_hint(),
+ }
+ }
+}
+
+impl<A, B> FusedStream for Either<A, B>
+where
+ A: FusedStream,
+ B: FusedStream<Item = A::Item>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Either::Left(x) => x.is_terminated(),
+ Either::Right(x) => x.is_terminated(),
+ }
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<A, B, Item> Sink<Item> for Either<A, B>
+where
+ A: Sink<Item>,
+ B: Sink<Item, Error = A::Error>,
+{
+ type Error = A::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.project() {
+ Either::Left(x) => x.poll_ready(cx),
+ Either::Right(x) => x.poll_ready(cx),
+ }
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ match self.project() {
+ Either::Left(x) => x.start_send(item),
+ Either::Right(x) => x.start_send(item),
+ }
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.project() {
+ Either::Left(x) => x.poll_flush(cx),
+ Either::Right(x) => x.poll_flush(cx),
+ }
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.project() {
+ Either::Left(x) => x.poll_close(cx),
+ Either::Right(x) => x.poll_close(cx),
+ }
+ }
+}
+
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+mod if_std {
+ use super::*;
+
+ use core::pin::Pin;
+ use core::task::{Context, Poll};
+ use futures_io::{
+ AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, Result, SeekFrom,
+ };
+
+ impl<A, B> AsyncRead for Either<A, B>
+ where
+ A: AsyncRead,
+ B: AsyncRead,
+ {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>> {
+ match self.project() {
+ Either::Left(x) => x.poll_read(cx, buf),
+ Either::Right(x) => x.poll_read(cx, buf),
+ }
+ }
+
+ fn poll_read_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<Result<usize>> {
+ match self.project() {
+ Either::Left(x) => x.poll_read_vectored(cx, bufs),
+ Either::Right(x) => x.poll_read_vectored(cx, bufs),
+ }
+ }
+ }
+
+ impl<A, B> AsyncWrite for Either<A, B>
+ where
+ A: AsyncWrite,
+ B: AsyncWrite,
+ {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>> {
+ match self.project() {
+ Either::Left(x) => x.poll_write(cx, buf),
+ Either::Right(x) => x.poll_write(cx, buf),
+ }
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<Result<usize>> {
+ match self.project() {
+ Either::Left(x) => x.poll_write_vectored(cx, bufs),
+ Either::Right(x) => x.poll_write_vectored(cx, bufs),
+ }
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ match self.project() {
+ Either::Left(x) => x.poll_flush(cx),
+ Either::Right(x) => x.poll_flush(cx),
+ }
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ match self.project() {
+ Either::Left(x) => x.poll_close(cx),
+ Either::Right(x) => x.poll_close(cx),
+ }
+ }
+ }
+
+ impl<A, B> AsyncSeek for Either<A, B>
+ where
+ A: AsyncSeek,
+ B: AsyncSeek,
+ {
+ fn poll_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<Result<u64>> {
+ match self.project() {
+ Either::Left(x) => x.poll_seek(cx, pos),
+ Either::Right(x) => x.poll_seek(cx, pos),
+ }
+ }
+ }
+
+ impl<A, B> AsyncBufRead for Either<A, B>
+ where
+ A: AsyncBufRead,
+ B: AsyncBufRead,
+ {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
+ match self.project() {
+ Either::Left(x) => x.poll_fill_buf(cx),
+ Either::Right(x) => x.poll_fill_buf(cx),
+ }
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ match self.project() {
+ Either::Left(x) => x.consume(amt),
+ Either::Right(x) => x.consume(amt),
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/future/future/catch_unwind.rs b/vendor/futures-util/src/future/future/catch_unwind.rs
new file mode 100644
index 000000000..0e09d6eeb
--- /dev/null
+++ b/vendor/futures-util/src/future/future/catch_unwind.rs
@@ -0,0 +1,38 @@
+use core::any::Any;
+use core::pin::Pin;
+use std::panic::{catch_unwind, AssertUnwindSafe, UnwindSafe};
+
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`catch_unwind`](super::FutureExt::catch_unwind) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct CatchUnwind<Fut> {
+ #[pin]
+ future: Fut,
+ }
+}
+
+impl<Fut> CatchUnwind<Fut>
+where
+ Fut: Future + UnwindSafe,
+{
+ pub(super) fn new(future: Fut) -> Self {
+ Self { future }
+ }
+}
+
+impl<Fut> Future for CatchUnwind<Fut>
+where
+ Fut: Future + UnwindSafe,
+{
+ type Output = Result<Fut::Output, Box<dyn Any + Send>>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let f = self.project().future;
+ catch_unwind(AssertUnwindSafe(|| f.poll(cx)))?.map(Ok)
+ }
+}
diff --git a/vendor/futures-util/src/future/future/flatten.rs b/vendor/futures-util/src/future/future/flatten.rs
new file mode 100644
index 000000000..bd767af34
--- /dev/null
+++ b/vendor/futures-util/src/future/future/flatten.rs
@@ -0,0 +1,153 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ #[project = FlattenProj]
+ #[derive(Debug)]
+ pub enum Flatten<Fut1, Fut2> {
+ First { #[pin] f: Fut1 },
+ Second { #[pin] f: Fut2 },
+ Empty,
+ }
+}
+
+impl<Fut1, Fut2> Flatten<Fut1, Fut2> {
+ pub(crate) fn new(future: Fut1) -> Self {
+ Self::First { f: future }
+ }
+}
+
+impl<Fut> FusedFuture for Flatten<Fut, Fut::Output>
+where
+ Fut: Future,
+ Fut::Output: Future,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Empty => true,
+ _ => false,
+ }
+ }
+}
+
+impl<Fut> Future for Flatten<Fut, Fut::Output>
+where
+ Fut: Future,
+ Fut::Output: Future,
+{
+ type Output = <Fut::Output as Future>::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ FlattenProj::First { f } => {
+ let f = ready!(f.poll(cx));
+ self.set(Self::Second { f });
+ }
+ FlattenProj::Second { f } => {
+ let output = ready!(f.poll(cx));
+ self.set(Self::Empty);
+ break output;
+ }
+ FlattenProj::Empty => panic!("Flatten polled after completion"),
+ }
+ })
+ }
+}
+
+impl<Fut> FusedStream for Flatten<Fut, Fut::Output>
+where
+ Fut: Future,
+ Fut::Output: Stream,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Empty => true,
+ _ => false,
+ }
+ }
+}
+
+impl<Fut> Stream for Flatten<Fut, Fut::Output>
+where
+ Fut: Future,
+ Fut::Output: Stream,
+{
+ type Item = <Fut::Output as Stream>::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ FlattenProj::First { f } => {
+ let f = ready!(f.poll(cx));
+ self.set(Self::Second { f });
+ }
+ FlattenProj::Second { f } => {
+ let output = ready!(f.poll_next(cx));
+ if output.is_none() {
+ self.set(Self::Empty);
+ }
+ break output;
+ }
+ FlattenProj::Empty => break None,
+ }
+ })
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<Fut, Item> Sink<Item> for Flatten<Fut, Fut::Output>
+where
+ Fut: Future,
+ Fut::Output: Sink<Item>,
+{
+ type Error = <Fut::Output as Sink<Item>>::Error;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ FlattenProj::First { f } => {
+ let f = ready!(f.poll(cx));
+ self.set(Self::Second { f });
+ }
+ FlattenProj::Second { f } => {
+ break ready!(f.poll_ready(cx));
+ }
+ FlattenProj::Empty => panic!("poll_ready called after eof"),
+ }
+ })
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ match self.project() {
+ FlattenProj::First { .. } => panic!("poll_ready not called first"),
+ FlattenProj::Second { f } => f.start_send(item),
+ FlattenProj::Empty => panic!("start_send called after eof"),
+ }
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.project() {
+ FlattenProj::First { .. } => Poll::Ready(Ok(())),
+ FlattenProj::Second { f } => f.poll_flush(cx),
+ FlattenProj::Empty => panic!("poll_flush called after eof"),
+ }
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let res = match self.as_mut().project() {
+ FlattenProj::Second { f } => f.poll_close(cx),
+ _ => Poll::Ready(Ok(())),
+ };
+ if res.is_ready() {
+ self.set(Self::Empty);
+ }
+ res
+ }
+}
diff --git a/vendor/futures-util/src/future/future/fuse.rs b/vendor/futures-util/src/future/future/fuse.rs
new file mode 100644
index 000000000..597aec1a4
--- /dev/null
+++ b/vendor/futures-util/src/future/future/fuse.rs
@@ -0,0 +1,93 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`fuse`](super::FutureExt::fuse) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Fuse<Fut> {
+ #[pin]
+ inner: Option<Fut>,
+ }
+}
+
+impl<Fut> Fuse<Fut> {
+ pub(super) fn new(f: Fut) -> Self {
+ Self { inner: Some(f) }
+ }
+}
+
+impl<Fut: Future> Fuse<Fut> {
+ /// Creates a new `Fuse`-wrapped future which is already terminated.
+ ///
+ /// This can be useful in combination with looping and the `select!`
+ /// macro, which bypasses terminated futures.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::future::{Fuse, FusedFuture, FutureExt};
+ /// use futures::select;
+ /// use futures::stream::StreamExt;
+ /// use futures::pin_mut;
+ ///
+ /// let (sender, mut stream) = mpsc::unbounded();
+ ///
+ /// // Send a few messages into the stream
+ /// sender.unbounded_send(()).unwrap();
+ /// sender.unbounded_send(()).unwrap();
+ /// drop(sender);
+ ///
+ /// // Use `Fuse::terminated()` to create an already-terminated future
+ /// // which may be instantiated later.
+ /// let foo_printer = Fuse::terminated();
+ /// pin_mut!(foo_printer);
+ ///
+ /// loop {
+ /// select! {
+ /// _ = foo_printer => {},
+ /// () = stream.select_next_some() => {
+ /// if !foo_printer.is_terminated() {
+ /// println!("Foo is already being printed!");
+ /// } else {
+ /// foo_printer.set(async {
+ /// // do some other async operations
+ /// println!("Printing foo from `foo_printer` future");
+ /// }.fuse());
+ /// }
+ /// },
+ /// complete => break, // `foo_printer` is terminated and the stream is done
+ /// }
+ /// }
+ /// # });
+ /// ```
+ pub fn terminated() -> Self {
+ Self { inner: None }
+ }
+}
+
+impl<Fut: Future> FusedFuture for Fuse<Fut> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<Fut: Future> Future for Fuse<Fut> {
+ type Output = Fut::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Fut::Output> {
+ Poll::Ready(match self.as_mut().project().inner.as_pin_mut() {
+ Some(fut) => {
+ let output = ready!(fut.poll(cx));
+ self.project().inner.set(None);
+ output
+ }
+ None => return Poll::Pending,
+ })
+ }
+}
diff --git a/vendor/futures-util/src/future/future/map.rs b/vendor/futures-util/src/future/future/map.rs
new file mode 100644
index 000000000..7471aba00
--- /dev/null
+++ b/vendor/futures-util/src/future/future/map.rs
@@ -0,0 +1,66 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+use crate::fns::FnOnce1;
+
+pin_project! {
+ /// Internal Map future
+ #[project = MapProj]
+ #[project_replace = MapProjReplace]
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub enum Map<Fut, F> {
+ Incomplete {
+ #[pin]
+ future: Fut,
+ f: F,
+ },
+ Complete,
+ }
+}
+
+impl<Fut, F> Map<Fut, F> {
+ /// Creates a new Map.
+ pub(crate) fn new(future: Fut, f: F) -> Self {
+ Self::Incomplete { future, f }
+ }
+}
+
+impl<Fut, F, T> FusedFuture for Map<Fut, F>
+where
+ Fut: Future,
+ F: FnOnce1<Fut::Output, Output = T>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Incomplete { .. } => false,
+ Self::Complete => true,
+ }
+ }
+}
+
+impl<Fut, F, T> Future for Map<Fut, F>
+where
+ Fut: Future,
+ F: FnOnce1<Fut::Output, Output = T>,
+{
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ match self.as_mut().project() {
+ MapProj::Incomplete { future, .. } => {
+ let output = ready!(future.poll(cx));
+ match self.project_replace(Map::Complete) {
+ MapProjReplace::Incomplete { f, .. } => Poll::Ready(f.call_once(output)),
+ MapProjReplace::Complete => unreachable!(),
+ }
+ }
+ MapProj::Complete => {
+ panic!("Map must not be polled after it returned `Poll::Ready`")
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/future/future/mod.rs b/vendor/futures-util/src/future/future/mod.rs
new file mode 100644
index 000000000..c11d10820
--- /dev/null
+++ b/vendor/futures-util/src/future/future/mod.rs
@@ -0,0 +1,610 @@
+//! Futures
+//!
+//! This module contains a number of functions for working with `Future`s,
+//! including the `FutureExt` trait which adds methods to `Future` types.
+
+#[cfg(feature = "alloc")]
+use alloc::boxed::Box;
+use core::pin::Pin;
+
+use crate::fns::{inspect_fn, into_fn, ok_fn, InspectFn, IntoFn, OkFn};
+use crate::future::{assert_future, Either};
+use crate::never::Never;
+use crate::stream::assert_stream;
+#[cfg(feature = "alloc")]
+use futures_core::future::{BoxFuture, LocalBoxFuture};
+use futures_core::{
+ future::Future,
+ stream::Stream,
+ task::{Context, Poll},
+};
+use pin_utils::pin_mut;
+
+// Combinators
+
+mod flatten;
+mod fuse;
+mod map;
+
+delegate_all!(
+ /// Future for the [`flatten`](super::FutureExt::flatten) method.
+ Flatten<F>(
+ flatten::Flatten<F, <F as Future>::Output>
+ ): Debug + Future + FusedFuture + New[|x: F| flatten::Flatten::new(x)]
+ where F: Future
+);
+
+delegate_all!(
+ /// Stream for the [`flatten_stream`](FutureExt::flatten_stream) method.
+ FlattenStream<F>(
+ flatten::Flatten<F, <F as Future>::Output>
+ ): Debug + Sink + Stream + FusedStream + New[|x: F| flatten::Flatten::new(x)]
+ where F: Future
+);
+
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use fuse::Fuse;
+
+delegate_all!(
+ /// Future for the [`map`](super::FutureExt::map) method.
+ Map<Fut, F>(
+ map::Map<Fut, F>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| map::Map::new(x, f)]
+);
+
+delegate_all!(
+ /// Stream for the [`into_stream`](FutureExt::into_stream) method.
+ IntoStream<F>(
+ crate::stream::Once<F>
+ ): Debug + Stream + FusedStream + New[|x: F| crate::stream::Once::new(x)]
+);
+
+delegate_all!(
+ /// Future for the [`map_into`](FutureExt::map_into) combinator.
+ MapInto<Fut, T>(
+ Map<Fut, IntoFn<T>>
+ ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, into_fn())]
+);
+
+delegate_all!(
+ /// Future for the [`then`](FutureExt::then) method.
+ Then<Fut1, Fut2, F>(
+ flatten::Flatten<Map<Fut1, F>, Fut2>
+ ): Debug + Future + FusedFuture + New[|x: Fut1, y: F| flatten::Flatten::new(Map::new(x, y))]
+);
+
+delegate_all!(
+ /// Future for the [`inspect`](FutureExt::inspect) method.
+ Inspect<Fut, F>(
+ map::Map<Fut, InspectFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| map::Map::new(x, inspect_fn(f))]
+);
+
+delegate_all!(
+ /// Future for the [`never_error`](super::FutureExt::never_error) combinator.
+ NeverError<Fut>(
+ Map<Fut, OkFn<Never>>
+ ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, ok_fn())]
+);
+
+delegate_all!(
+ /// Future for the [`unit_error`](super::FutureExt::unit_error) combinator.
+ UnitError<Fut>(
+ Map<Fut, OkFn<()>>
+ ): Debug + Future + FusedFuture + New[|x: Fut| Map::new(x, ok_fn())]
+);
+
+#[cfg(feature = "std")]
+mod catch_unwind;
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::catch_unwind::CatchUnwind;
+
+#[cfg(feature = "channel")]
+#[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+#[cfg(feature = "std")]
+mod remote_handle;
+#[cfg(feature = "channel")]
+#[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::remote_handle::{Remote, RemoteHandle};
+
+#[cfg(feature = "std")]
+mod shared;
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::shared::{Shared, WeakShared};
+
+impl<T: ?Sized> FutureExt for T where T: Future {}
+
+/// An extension trait for `Future`s that provides a variety of convenient
+/// adapters.
+pub trait FutureExt: Future {
+ /// Map this future's output to a different type, returning a new future of
+ /// the resulting type.
+ ///
+ /// This function is similar to the `Option::map` or `Iterator::map` where
+ /// it will change the type of the underlying future. This is useful to
+ /// chain along a computation once a future has been resolved.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it, similar to the existing `map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let future = async { 1 };
+ /// let new_future = future.map(|x| x + 3);
+ /// assert_eq!(new_future.await, 4);
+ /// # });
+ /// ```
+ fn map<U, F>(self, f: F) -> Map<Self, F>
+ where
+ F: FnOnce(Self::Output) -> U,
+ Self: Sized,
+ {
+ assert_future::<U, _>(Map::new(self, f))
+ }
+
+ /// Map this future's output to a different type, returning a new future of
+ /// the resulting type.
+ ///
+ /// This function is equivalent to calling `map(Into::into)` but allows naming
+ /// the return type.
+ fn map_into<U>(self) -> MapInto<Self, U>
+ where
+ Self::Output: Into<U>,
+ Self: Sized,
+ {
+ assert_future::<U, _>(MapInto::new(self))
+ }
+
+ /// Chain on a computation for when a future finished, passing the result of
+ /// the future to the provided closure `f`.
+ ///
+ /// The returned value of the closure must implement the `Future` trait
+ /// and can represent some more work to be done before the composed future
+ /// is finished.
+ ///
+ /// The closure `f` is only run *after* successful completion of the `self`
+ /// future.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let future_of_1 = async { 1 };
+ /// let future_of_4 = future_of_1.then(|x| async move { x + 3 });
+ /// assert_eq!(future_of_4.await, 4);
+ /// # });
+ /// ```
+ fn then<Fut, F>(self, f: F) -> Then<Self, Fut, F>
+ where
+ F: FnOnce(Self::Output) -> Fut,
+ Fut: Future,
+ Self: Sized,
+ {
+ assert_future::<Fut::Output, _>(Then::new(self, f))
+ }
+
+ /// Wrap this future in an `Either` future, making it the left-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `right_future` method to write `if`
+ /// statements that evaluate to different futures in different branches.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let x = 6;
+ /// let future = if x < 10 {
+ /// async { true }.left_future()
+ /// } else {
+ /// async { false }.right_future()
+ /// };
+ ///
+ /// assert_eq!(future.await, true);
+ /// # });
+ /// ```
+ fn left_future<B>(self) -> Either<Self, B>
+ where
+ B: Future<Output = Self::Output>,
+ Self: Sized,
+ {
+ assert_future::<Self::Output, _>(Either::Left(self))
+ }
+
+ /// Wrap this future in an `Either` future, making it the right-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `left_future` method to write `if`
+ /// statements that evaluate to different futures in different branches.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let x = 6;
+ /// let future = if x > 10 {
+ /// async { true }.left_future()
+ /// } else {
+ /// async { false }.right_future()
+ /// };
+ ///
+ /// assert_eq!(future.await, false);
+ /// # });
+ /// ```
+ fn right_future<A>(self) -> Either<A, Self>
+ where
+ A: Future<Output = Self::Output>,
+ Self: Sized,
+ {
+ assert_future::<Self::Output, _>(Either::Right(self))
+ }
+
+ /// Convert this future into a single element stream.
+ ///
+ /// The returned stream contains single success if this future resolves to
+ /// success or single error if this future resolves into error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::stream::StreamExt;
+ ///
+ /// let future = async { 17 };
+ /// let stream = future.into_stream();
+ /// let collected: Vec<_> = stream.collect().await;
+ /// assert_eq!(collected, vec![17]);
+ /// # });
+ /// ```
+ fn into_stream(self) -> IntoStream<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Self::Output, _>(IntoStream::new(self))
+ }
+
+ /// Flatten the execution of this future when the output of this
+ /// future is itself another future.
+ ///
+ /// This can be useful when combining futures together to flatten the
+ /// computation out the final result.
+ ///
+ /// This method is roughly equivalent to `self.then(|x| x)`.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let nested_future = async { async { 1 } };
+ /// let future = nested_future.flatten();
+ /// assert_eq!(future.await, 1);
+ /// # });
+ /// ```
+ fn flatten(self) -> Flatten<Self>
+ where
+ Self::Output: Future,
+ Self: Sized,
+ {
+ let f = Flatten::new(self);
+ assert_future::<<<Self as Future>::Output as Future>::Output, _>(f)
+ }
+
+ /// Flatten the execution of this future when the successful result of this
+ /// future is a stream.
+ ///
+ /// This can be useful when stream initialization is deferred, and it is
+ /// convenient to work with that stream as if stream was available at the
+ /// call site.
+ ///
+ /// Note that this function consumes this future and returns a wrapped
+ /// version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream_items = vec![17, 18, 19];
+ /// let future_of_a_stream = async { stream::iter(stream_items) };
+ ///
+ /// let stream = future_of_a_stream.flatten_stream();
+ /// let list: Vec<_> = stream.collect().await;
+ /// assert_eq!(list, vec![17, 18, 19]);
+ /// # });
+ /// ```
+ fn flatten_stream(self) -> FlattenStream<Self>
+ where
+ Self::Output: Stream,
+ Self: Sized,
+ {
+ assert_stream::<<Self::Output as Stream>::Item, _>(FlattenStream::new(self))
+ }
+
+ /// Fuse a future such that `poll` will never again be called once it has
+ /// completed. This method can be used to turn any `Future` into a
+ /// `FusedFuture`.
+ ///
+ /// Normally, once a future has returned `Poll::Ready` from `poll`,
+ /// any further calls could exhibit bad behavior such as blocking
+ /// forever, panicking, never returning, etc. If it is known that `poll`
+ /// may be called too often then this method can be used to ensure that it
+ /// has defined semantics.
+ ///
+ /// If a `fuse`d future is `poll`ed after having returned `Poll::Ready`
+ /// previously, it will return `Poll::Pending`, from `poll` again (and will
+ /// continue to do so for all future calls to `poll`).
+ ///
+ /// This combinator will drop the underlying future as soon as it has been
+ /// completed to ensure resources are reclaimed as soon as possible.
+ fn fuse(self) -> Fuse<Self>
+ where
+ Self: Sized,
+ {
+ let f = Fuse::new(self);
+ assert_future::<Self::Output, _>(f)
+ }
+
+ /// Do something with the output of a future before passing it on.
+ ///
+ /// When using futures, you'll often chain several of them together. While
+ /// working on such code, you might want to check out what's happening at
+ /// various parts in the pipeline, without consuming the intermediate
+ /// value. To do that, insert a call to `inspect`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let future = async { 1 };
+ /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x));
+ /// assert_eq!(new_future.await, 1);
+ /// # });
+ /// ```
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where
+ F: FnOnce(&Self::Output),
+ Self: Sized,
+ {
+ assert_future::<Self::Output, _>(Inspect::new(self, f))
+ }
+
+ /// Catches unwinding panics while polling the future.
+ ///
+ /// In general, panics within a future can propagate all the way out to the
+ /// task level. This combinator makes it possible to halt unwinding within
+ /// the future itself. It's most commonly used within task executors. It's
+ /// not recommended to use this for error handling.
+ ///
+ /// Note that this method requires the `UnwindSafe` bound from the standard
+ /// library. This isn't always applied automatically, and the standard
+ /// library provides an `AssertUnwindSafe` wrapper type to apply it
+ /// after-the fact. To assist using this method, the `Future` trait is also
+ /// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`.
+ ///
+ /// This method is only available when the `std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::{self, FutureExt, Ready};
+ ///
+ /// let future = future::ready(2);
+ /// assert!(future.catch_unwind().await.is_ok());
+ ///
+ /// let future = future::lazy(|_| -> Ready<i32> {
+ /// unimplemented!()
+ /// });
+ /// assert!(future.catch_unwind().await.is_err());
+ /// # });
+ /// ```
+ #[cfg(feature = "std")]
+ fn catch_unwind(self) -> CatchUnwind<Self>
+ where
+ Self: Sized + ::std::panic::UnwindSafe,
+ {
+ assert_future::<Result<Self::Output, Box<dyn std::any::Any + Send>>, _>(CatchUnwind::new(
+ self,
+ ))
+ }
+
+ /// Create a cloneable handle to this future where all handles will resolve
+ /// to the same result.
+ ///
+ /// The `shared` combinator method provides a method to convert any future
+ /// into a cloneable future. It enables a future to be polled by multiple
+ /// threads.
+ ///
+ /// This method is only available when the `std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ ///
+ /// let future = async { 6 };
+ /// let shared1 = future.shared();
+ /// let shared2 = shared1.clone();
+ ///
+ /// assert_eq!(6, shared1.await);
+ /// assert_eq!(6, shared2.await);
+ /// # });
+ /// ```
+ ///
+ /// ```
+ /// // Note, unlike most examples this is written in the context of a
+ /// // synchronous function to better illustrate the cross-thread aspect of
+ /// // the `shared` combinator.
+ ///
+ /// # futures::executor::block_on(async {
+ /// use futures::future::FutureExt;
+ /// use futures::executor::block_on;
+ /// use std::thread;
+ ///
+ /// let future = async { 6 };
+ /// let shared1 = future.shared();
+ /// let shared2 = shared1.clone();
+ /// let join_handle = thread::spawn(move || {
+ /// assert_eq!(6, block_on(shared2));
+ /// });
+ /// assert_eq!(6, shared1.await);
+ /// join_handle.join().unwrap();
+ /// # });
+ /// ```
+ #[cfg(feature = "std")]
+ fn shared(self) -> Shared<Self>
+ where
+ Self: Sized,
+ Self::Output: Clone,
+ {
+ assert_future::<Self::Output, _>(Shared::new(self))
+ }
+
+ /// Turn this future into a future that yields `()` on completion and sends
+ /// its output to another future on a separate task.
+ ///
+ /// This can be used with spawning executors to easily retrieve the result
+ /// of a future executing on a separate task or thread.
+ ///
+ /// This method is only available when the `std` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "channel")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+ #[cfg(feature = "std")]
+ fn remote_handle(self) -> (Remote<Self>, RemoteHandle<Self::Output>)
+ where
+ Self: Sized,
+ {
+ let (wrapped, handle) = remote_handle::remote_handle(self);
+ (assert_future::<(), _>(wrapped), handle)
+ }
+
+ /// Wrap the future in a Box, pinning it.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "alloc")]
+ fn boxed<'a>(self) -> BoxFuture<'a, Self::Output>
+ where
+ Self: Sized + Send + 'a,
+ {
+ assert_future::<Self::Output, _>(Box::pin(self))
+ }
+
+ /// Wrap the future in a Box, pinning it.
+ ///
+ /// Similar to `boxed`, but without the `Send` requirement.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "alloc")]
+ fn boxed_local<'a>(self) -> LocalBoxFuture<'a, Self::Output>
+ where
+ Self: Sized + 'a,
+ {
+ assert_future::<Self::Output, _>(Box::pin(self))
+ }
+
+ /// Turns a [`Future<Output = T>`](Future) into a
+ /// [`TryFuture<Ok = T, Error = ()`>](futures_core::future::TryFuture).
+ fn unit_error(self) -> UnitError<Self>
+ where
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Output, ()>, _>(UnitError::new(self))
+ }
+
+ /// Turns a [`Future<Output = T>`](Future) into a
+ /// [`TryFuture<Ok = T, Error = Never`>](futures_core::future::TryFuture).
+ fn never_error(self) -> NeverError<Self>
+ where
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Output, Never>, _>(NeverError::new(self))
+ }
+
+ /// A convenience for calling `Future::poll` on `Unpin` future types.
+ fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Self::Output>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).poll(cx)
+ }
+
+ /// Evaluates and consumes the future, returning the resulting output if
+ /// the future is ready after the first call to `Future::poll`.
+ ///
+ /// If `poll` instead returns `Poll::Pending`, `None` is returned.
+ ///
+ /// This method is useful in cases where immediacy is more important than
+ /// waiting for a result. It is also convenient for quickly obtaining
+ /// the value of a future that is known to always resolve immediately.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use futures::prelude::*;
+ /// use futures::{future::ready, future::pending};
+ /// let future_ready = ready("foobar");
+ /// let future_pending = pending::<&'static str>();
+ ///
+ /// assert_eq!(future_ready.now_or_never(), Some("foobar"));
+ /// assert_eq!(future_pending.now_or_never(), None);
+ /// ```
+ ///
+ /// In cases where it is absolutely known that a future should always
+ /// resolve immediately and never return `Poll::Pending`, this method can
+ /// be combined with `expect()`:
+ ///
+ /// ```
+ /// # use futures::{prelude::*, future::ready};
+ /// let future_ready = ready("foobar");
+ ///
+ /// assert_eq!(future_ready.now_or_never().expect("Future not ready"), "foobar");
+ /// ```
+ fn now_or_never(self) -> Option<Self::Output>
+ where
+ Self: Sized,
+ {
+ let noop_waker = crate::task::noop_waker();
+ let mut cx = Context::from_waker(&noop_waker);
+
+ let this = self;
+ pin_mut!(this);
+ match this.poll(&mut cx) {
+ Poll::Ready(x) => Some(x),
+ _ => None,
+ }
+ }
+}
diff --git a/vendor/futures-util/src/future/future/remote_handle.rs b/vendor/futures-util/src/future/future/remote_handle.rs
new file mode 100644
index 000000000..1358902ca
--- /dev/null
+++ b/vendor/futures-util/src/future/future/remote_handle.rs
@@ -0,0 +1,126 @@
+use {
+ crate::future::{CatchUnwind, FutureExt},
+ futures_channel::oneshot::{self, Receiver, Sender},
+ futures_core::{
+ future::Future,
+ ready,
+ task::{Context, Poll},
+ },
+ pin_project_lite::pin_project,
+ std::{
+ any::Any,
+ fmt,
+ panic::{self, AssertUnwindSafe},
+ pin::Pin,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+ thread,
+ },
+};
+
+/// The handle to a remote future returned by
+/// [`remote_handle`](crate::future::FutureExt::remote_handle). When you drop this,
+/// the remote future will be woken up to be dropped by the executor.
+///
+/// ## Unwind safety
+///
+/// When the remote future panics, [Remote] will catch the unwind and transfer it to
+/// the thread where `RemoteHandle` is being awaited. This is good for the common
+/// case where [Remote] is spawned on a threadpool. It is unlikely that other code
+/// in the executor working thread shares mutable data with the spawned future and we
+/// preserve the executor from losing its working threads.
+///
+/// If you run the future locally and send the handle of to be awaited elsewhere, you
+/// must be careful with regard to unwind safety because the thread in which the future
+/// is polled will keep running after the panic and the thread running the [RemoteHandle]
+/// will unwind.
+#[must_use = "dropping a remote handle cancels the underlying future"]
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+pub struct RemoteHandle<T> {
+ rx: Receiver<thread::Result<T>>,
+ keep_running: Arc<AtomicBool>,
+}
+
+impl<T> RemoteHandle<T> {
+ /// Drops this handle *without* canceling the underlying future.
+ ///
+ /// This method can be used if you want to drop the handle, but let the
+ /// execution continue.
+ pub fn forget(self) {
+ self.keep_running.store(true, Ordering::SeqCst);
+ }
+}
+
+impl<T: 'static> Future for RemoteHandle<T> {
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ match ready!(self.rx.poll_unpin(cx)) {
+ Ok(Ok(output)) => Poll::Ready(output),
+ // the remote future panicked.
+ Ok(Err(e)) => panic::resume_unwind(e),
+ // The oneshot sender was dropped.
+ Err(e) => panic::resume_unwind(Box::new(e)),
+ }
+ }
+}
+
+type SendMsg<Fut> = Result<<Fut as Future>::Output, Box<(dyn Any + Send + 'static)>>;
+
+pin_project! {
+ /// A future which sends its output to the corresponding `RemoteHandle`.
+ /// Created by [`remote_handle`](crate::future::FutureExt::remote_handle).
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ #[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+ pub struct Remote<Fut: Future> {
+ tx: Option<Sender<SendMsg<Fut>>>,
+ keep_running: Arc<AtomicBool>,
+ #[pin]
+ future: CatchUnwind<AssertUnwindSafe<Fut>>,
+ }
+}
+
+impl<Fut: Future + fmt::Debug> fmt::Debug for Remote<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Remote").field(&self.future).finish()
+ }
+}
+
+impl<Fut: Future> Future for Remote<Fut> {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let this = self.project();
+
+ if this.tx.as_mut().unwrap().poll_canceled(cx).is_ready()
+ && !this.keep_running.load(Ordering::SeqCst)
+ {
+ // Cancelled, bail out
+ return Poll::Ready(());
+ }
+
+ let output = ready!(this.future.poll(cx));
+
+ // if the receiving end has gone away then that's ok, we just ignore the
+ // send error here.
+ drop(this.tx.take().unwrap().send(output));
+ Poll::Ready(())
+ }
+}
+
+pub(super) fn remote_handle<Fut: Future>(future: Fut) -> (Remote<Fut>, RemoteHandle<Fut::Output>) {
+ let (tx, rx) = oneshot::channel();
+ let keep_running = Arc::new(AtomicBool::new(false));
+
+ // Unwind Safety: See the docs for RemoteHandle.
+ let wrapped = Remote {
+ future: AssertUnwindSafe(future).catch_unwind(),
+ tx: Some(tx),
+ keep_running: keep_running.clone(),
+ };
+
+ (wrapped, RemoteHandle { rx, keep_running })
+}
diff --git a/vendor/futures-util/src/future/future/shared.rs b/vendor/futures-util/src/future/future/shared.rs
new file mode 100644
index 000000000..9b31932fe
--- /dev/null
+++ b/vendor/futures-util/src/future/future/shared.rs
@@ -0,0 +1,371 @@
+use crate::task::{waker_ref, ArcWake};
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll, Waker};
+use slab::Slab;
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::pin::Pin;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::{Acquire, SeqCst};
+use std::sync::{Arc, Mutex, Weak};
+
+/// Future for the [`shared`](super::FutureExt::shared) method.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Shared<Fut: Future> {
+ inner: Option<Arc<Inner<Fut>>>,
+ waker_key: usize,
+}
+
+struct Inner<Fut: Future> {
+ future_or_output: UnsafeCell<FutureOrOutput<Fut>>,
+ notifier: Arc<Notifier>,
+}
+
+struct Notifier {
+ state: AtomicUsize,
+ wakers: Mutex<Option<Slab<Option<Waker>>>>,
+}
+
+/// A weak reference to a [`Shared`] that can be upgraded much like an `Arc`.
+pub struct WeakShared<Fut: Future>(Weak<Inner<Fut>>);
+
+impl<Fut: Future> Clone for WeakShared<Fut> {
+ fn clone(&self) -> Self {
+ Self(self.0.clone())
+ }
+}
+
+// The future itself is polled behind the `Arc`, so it won't be moved
+// when `Shared` is moved.
+impl<Fut: Future> Unpin for Shared<Fut> {}
+
+impl<Fut: Future> fmt::Debug for Shared<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Shared")
+ .field("inner", &self.inner)
+ .field("waker_key", &self.waker_key)
+ .finish()
+ }
+}
+
+impl<Fut: Future> fmt::Debug for Inner<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Inner").finish()
+ }
+}
+
+impl<Fut: Future> fmt::Debug for WeakShared<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("WeakShared").finish()
+ }
+}
+
+enum FutureOrOutput<Fut: Future> {
+ Future(Fut),
+ Output(Fut::Output),
+}
+
+unsafe impl<Fut> Send for Inner<Fut>
+where
+ Fut: Future + Send,
+ Fut::Output: Send + Sync,
+{
+}
+
+unsafe impl<Fut> Sync for Inner<Fut>
+where
+ Fut: Future + Send,
+ Fut::Output: Send + Sync,
+{
+}
+
+const IDLE: usize = 0;
+const POLLING: usize = 1;
+const COMPLETE: usize = 2;
+const POISONED: usize = 3;
+
+const NULL_WAKER_KEY: usize = usize::max_value();
+
+impl<Fut: Future> Shared<Fut> {
+ pub(super) fn new(future: Fut) -> Self {
+ let inner = Inner {
+ future_or_output: UnsafeCell::new(FutureOrOutput::Future(future)),
+ notifier: Arc::new(Notifier {
+ state: AtomicUsize::new(IDLE),
+ wakers: Mutex::new(Some(Slab::new())),
+ }),
+ };
+
+ Self { inner: Some(Arc::new(inner)), waker_key: NULL_WAKER_KEY }
+ }
+}
+
+impl<Fut> Shared<Fut>
+where
+ Fut: Future,
+ Fut::Output: Clone,
+{
+ /// Returns [`Some`] containing a reference to this [`Shared`]'s output if
+ /// it has already been computed by a clone or [`None`] if it hasn't been
+ /// computed yet or this [`Shared`] already returned its output from
+ /// [`poll`](Future::poll).
+ pub fn peek(&self) -> Option<&Fut::Output> {
+ if let Some(inner) = self.inner.as_ref() {
+ match inner.notifier.state.load(SeqCst) {
+ COMPLETE => unsafe { return Some(inner.output()) },
+ POISONED => panic!("inner future panicked during poll"),
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Creates a new [`WeakShared`] for this [`Shared`].
+ ///
+ /// Returns [`None`] if it has already been polled to completion.
+ pub fn downgrade(&self) -> Option<WeakShared<Fut>> {
+ if let Some(inner) = self.inner.as_ref() {
+ return Some(WeakShared(Arc::downgrade(inner)));
+ }
+ None
+ }
+
+ /// Gets the number of strong pointers to this allocation.
+ ///
+ /// Returns [`None`] if it has already been polled to completion.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care. Another thread
+ /// can change the strong count at any time, including potentially between calling this method
+ /// and acting on the result.
+ pub fn strong_count(&self) -> Option<usize> {
+ self.inner.as_ref().map(|arc| Arc::strong_count(arc))
+ }
+
+ /// Gets the number of weak pointers to this allocation.
+ ///
+ /// Returns [`None`] if it has already been polled to completion.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care. Another thread
+ /// can change the weak count at any time, including potentially between calling this method
+ /// and acting on the result.
+ pub fn weak_count(&self) -> Option<usize> {
+ self.inner.as_ref().map(|arc| Arc::weak_count(arc))
+ }
+}
+
+impl<Fut> Inner<Fut>
+where
+ Fut: Future,
+ Fut::Output: Clone,
+{
+ /// Safety: callers must first ensure that `self.inner.state`
+ /// is `COMPLETE`
+ unsafe fn output(&self) -> &Fut::Output {
+ match &*self.future_or_output.get() {
+ FutureOrOutput::Output(ref item) => item,
+ FutureOrOutput::Future(_) => unreachable!(),
+ }
+ }
+ /// Registers the current task to receive a wakeup when we are awoken.
+ fn record_waker(&self, waker_key: &mut usize, cx: &mut Context<'_>) {
+ let mut wakers_guard = self.notifier.wakers.lock().unwrap();
+
+ let wakers = match wakers_guard.as_mut() {
+ Some(wakers) => wakers,
+ None => return,
+ };
+
+ let new_waker = cx.waker();
+
+ if *waker_key == NULL_WAKER_KEY {
+ *waker_key = wakers.insert(Some(new_waker.clone()));
+ } else {
+ match wakers[*waker_key] {
+ Some(ref old_waker) if new_waker.will_wake(old_waker) => {}
+ // Could use clone_from here, but Waker doesn't specialize it.
+ ref mut slot => *slot = Some(new_waker.clone()),
+ }
+ }
+ debug_assert!(*waker_key != NULL_WAKER_KEY);
+ }
+
+ /// Safety: callers must first ensure that `inner.state`
+ /// is `COMPLETE`
+ unsafe fn take_or_clone_output(self: Arc<Self>) -> Fut::Output {
+ match Arc::try_unwrap(self) {
+ Ok(inner) => match inner.future_or_output.into_inner() {
+ FutureOrOutput::Output(item) => item,
+ FutureOrOutput::Future(_) => unreachable!(),
+ },
+ Err(inner) => inner.output().clone(),
+ }
+ }
+}
+
+impl<Fut> FusedFuture for Shared<Fut>
+where
+ Fut: Future,
+ Fut::Output: Clone,
+{
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<Fut> Future for Shared<Fut>
+where
+ Fut: Future,
+ Fut::Output: Clone,
+{
+ type Output = Fut::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+
+ let inner = this.inner.take().expect("Shared future polled again after completion");
+
+ // Fast path for when the wrapped future has already completed
+ if inner.notifier.state.load(Acquire) == COMPLETE {
+ // Safety: We're in the COMPLETE state
+ return unsafe { Poll::Ready(inner.take_or_clone_output()) };
+ }
+
+ inner.record_waker(&mut this.waker_key, cx);
+
+ match inner
+ .notifier
+ .state
+ .compare_exchange(IDLE, POLLING, SeqCst, SeqCst)
+ .unwrap_or_else(|x| x)
+ {
+ IDLE => {
+ // Lock acquired, fall through
+ }
+ POLLING => {
+ // Another task is currently polling, at this point we just want
+ // to ensure that the waker for this task is registered
+ this.inner = Some(inner);
+ return Poll::Pending;
+ }
+ COMPLETE => {
+ // Safety: We're in the COMPLETE state
+ return unsafe { Poll::Ready(inner.take_or_clone_output()) };
+ }
+ POISONED => panic!("inner future panicked during poll"),
+ _ => unreachable!(),
+ }
+
+ let waker = waker_ref(&inner.notifier);
+ let mut cx = Context::from_waker(&waker);
+
+ struct Reset<'a>(&'a AtomicUsize);
+
+ impl Drop for Reset<'_> {
+ fn drop(&mut self) {
+ use std::thread;
+
+ if thread::panicking() {
+ self.0.store(POISONED, SeqCst);
+ }
+ }
+ }
+
+ let _reset = Reset(&inner.notifier.state);
+
+ let output = {
+ let future = unsafe {
+ match &mut *inner.future_or_output.get() {
+ FutureOrOutput::Future(fut) => Pin::new_unchecked(fut),
+ _ => unreachable!(),
+ }
+ };
+
+ match future.poll(&mut cx) {
+ Poll::Pending => {
+ if inner.notifier.state.compare_exchange(POLLING, IDLE, SeqCst, SeqCst).is_ok()
+ {
+ // Success
+ drop(_reset);
+ this.inner = Some(inner);
+ return Poll::Pending;
+ } else {
+ unreachable!()
+ }
+ }
+ Poll::Ready(output) => output,
+ }
+ };
+
+ unsafe {
+ *inner.future_or_output.get() = FutureOrOutput::Output(output);
+ }
+
+ inner.notifier.state.store(COMPLETE, SeqCst);
+
+ // Wake all tasks and drop the slab
+ let mut wakers_guard = inner.notifier.wakers.lock().unwrap();
+ let mut wakers = wakers_guard.take().unwrap();
+ for waker in wakers.drain().flatten() {
+ waker.wake();
+ }
+
+ drop(_reset); // Make borrow checker happy
+ drop(wakers_guard);
+
+ // Safety: We're in the COMPLETE state
+ unsafe { Poll::Ready(inner.take_or_clone_output()) }
+ }
+}
+
+impl<Fut> Clone for Shared<Fut>
+where
+ Fut: Future,
+{
+ fn clone(&self) -> Self {
+ Self { inner: self.inner.clone(), waker_key: NULL_WAKER_KEY }
+ }
+}
+
+impl<Fut> Drop for Shared<Fut>
+where
+ Fut: Future,
+{
+ fn drop(&mut self) {
+ if self.waker_key != NULL_WAKER_KEY {
+ if let Some(ref inner) = self.inner {
+ if let Ok(mut wakers) = inner.notifier.wakers.lock() {
+ if let Some(wakers) = wakers.as_mut() {
+ wakers.remove(self.waker_key);
+ }
+ }
+ }
+ }
+ }
+}
+
+impl ArcWake for Notifier {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ let wakers = &mut *arc_self.wakers.lock().unwrap();
+ if let Some(wakers) = wakers.as_mut() {
+ for (_key, opt_waker) in wakers {
+ if let Some(waker) = opt_waker.take() {
+ waker.wake();
+ }
+ }
+ }
+ }
+}
+
+impl<Fut: Future> WeakShared<Fut> {
+ /// Attempts to upgrade this [`WeakShared`] into a [`Shared`].
+ ///
+ /// Returns [`None`] if all clones of the [`Shared`] have been dropped or polled
+ /// to completion.
+ pub fn upgrade(&self) -> Option<Shared<Fut>> {
+ Some(Shared { inner: Some(self.0.upgrade()?), waker_key: NULL_WAKER_KEY })
+ }
+}
diff --git a/vendor/futures-util/src/future/join.rs b/vendor/futures-util/src/future/join.rs
new file mode 100644
index 000000000..740ffbc98
--- /dev/null
+++ b/vendor/futures-util/src/future/join.rs
@@ -0,0 +1,217 @@
+#![allow(non_snake_case)]
+
+use super::assert_future;
+use crate::future::{maybe_done, MaybeDone};
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+macro_rules! generate {
+ ($(
+ $(#[$doc:meta])*
+ ($Join:ident, <$($Fut:ident),*>),
+ )*) => ($(
+ pin_project! {
+ $(#[$doc])*
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct $Join<$($Fut: Future),*> {
+ $(#[pin] $Fut: MaybeDone<$Fut>,)*
+ }
+ }
+
+ impl<$($Fut),*> fmt::Debug for $Join<$($Fut),*>
+ where
+ $(
+ $Fut: Future + fmt::Debug,
+ $Fut::Output: fmt::Debug,
+ )*
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!($Join))
+ $(.field(stringify!($Fut), &self.$Fut))*
+ .finish()
+ }
+ }
+
+ impl<$($Fut: Future),*> $Join<$($Fut),*> {
+ fn new($($Fut: $Fut),*) -> Self {
+ Self {
+ $($Fut: maybe_done($Fut)),*
+ }
+ }
+ }
+
+ impl<$($Fut: Future),*> Future for $Join<$($Fut),*> {
+ type Output = ($($Fut::Output),*);
+
+ fn poll(
+ self: Pin<&mut Self>, cx: &mut Context<'_>
+ ) -> Poll<Self::Output> {
+ let mut all_done = true;
+ let mut futures = self.project();
+ $(
+ all_done &= futures.$Fut.as_mut().poll(cx).is_ready();
+ )*
+
+ if all_done {
+ Poll::Ready(($(futures.$Fut.take_output().unwrap()), *))
+ } else {
+ Poll::Pending
+ }
+ }
+ }
+
+ impl<$($Fut: FusedFuture),*> FusedFuture for $Join<$($Fut),*> {
+ fn is_terminated(&self) -> bool {
+ $(
+ self.$Fut.is_terminated()
+ ) && *
+ }
+ }
+ )*)
+}
+
+generate! {
+ /// Future for the [`join`](join()) function.
+ (Join, <Fut1, Fut2>),
+
+ /// Future for the [`join3`] function.
+ (Join3, <Fut1, Fut2, Fut3>),
+
+ /// Future for the [`join4`] function.
+ (Join4, <Fut1, Fut2, Fut3, Fut4>),
+
+ /// Future for the [`join5`] function.
+ (Join5, <Fut1, Fut2, Fut3, Fut4, Fut5>),
+}
+
+/// Joins the result of two futures, waiting for them both to complete.
+///
+/// This function will return a new future which awaits both futures to
+/// complete. The returned future will finish with a tuple of both results.
+///
+/// Note that this function consumes the passed futures and returns a
+/// wrapped version of it.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = async { 1 };
+/// let b = async { 2 };
+/// let pair = future::join(a, b);
+///
+/// assert_eq!(pair.await, (1, 2));
+/// # });
+/// ```
+pub fn join<Fut1, Fut2>(future1: Fut1, future2: Fut2) -> Join<Fut1, Fut2>
+where
+ Fut1: Future,
+ Fut2: Future,
+{
+ let f = Join::new(future1, future2);
+ assert_future::<(Fut1::Output, Fut2::Output), _>(f)
+}
+
+/// Same as [`join`](join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = async { 1 };
+/// let b = async { 2 };
+/// let c = async { 3 };
+/// let tuple = future::join3(a, b, c);
+///
+/// assert_eq!(tuple.await, (1, 2, 3));
+/// # });
+/// ```
+pub fn join3<Fut1, Fut2, Fut3>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+) -> Join3<Fut1, Fut2, Fut3>
+where
+ Fut1: Future,
+ Fut2: Future,
+ Fut3: Future,
+{
+ let f = Join3::new(future1, future2, future3);
+ assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output), _>(f)
+}
+
+/// Same as [`join`](join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = async { 1 };
+/// let b = async { 2 };
+/// let c = async { 3 };
+/// let d = async { 4 };
+/// let tuple = future::join4(a, b, c, d);
+///
+/// assert_eq!(tuple.await, (1, 2, 3, 4));
+/// # });
+/// ```
+pub fn join4<Fut1, Fut2, Fut3, Fut4>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+ future4: Fut4,
+) -> Join4<Fut1, Fut2, Fut3, Fut4>
+where
+ Fut1: Future,
+ Fut2: Future,
+ Fut3: Future,
+ Fut4: Future,
+{
+ let f = Join4::new(future1, future2, future3, future4);
+ assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output, Fut4::Output), _>(f)
+}
+
+/// Same as [`join`](join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = async { 1 };
+/// let b = async { 2 };
+/// let c = async { 3 };
+/// let d = async { 4 };
+/// let e = async { 5 };
+/// let tuple = future::join5(a, b, c, d, e);
+///
+/// assert_eq!(tuple.await, (1, 2, 3, 4, 5));
+/// # });
+/// ```
+pub fn join5<Fut1, Fut2, Fut3, Fut4, Fut5>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+ future4: Fut4,
+ future5: Fut5,
+) -> Join5<Fut1, Fut2, Fut3, Fut4, Fut5>
+where
+ Fut1: Future,
+ Fut2: Future,
+ Fut3: Future,
+ Fut4: Future,
+ Fut5: Future,
+{
+ let f = Join5::new(future1, future2, future3, future4, future5);
+ assert_future::<(Fut1::Output, Fut2::Output, Fut3::Output, Fut4::Output, Fut5::Output), _>(f)
+}
diff --git a/vendor/futures-util/src/future/join_all.rs b/vendor/futures-util/src/future/join_all.rs
new file mode 100644
index 000000000..2e52ac17f
--- /dev/null
+++ b/vendor/futures-util/src/future/join_all.rs
@@ -0,0 +1,167 @@
+//! Definition of the `JoinAll` combinator, waiting for all of a list of futures
+//! to finish.
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::fmt;
+use core::future::Future;
+use core::iter::FromIterator;
+use core::mem;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+use super::{assert_future, MaybeDone};
+
+#[cfg(not(futures_no_atomic_cas))]
+use crate::stream::{Collect, FuturesOrdered, StreamExt};
+
+fn iter_pin_mut<T>(slice: Pin<&mut [T]>) -> impl Iterator<Item = Pin<&mut T>> {
+ // Safety: `std` _could_ make this unsound if it were to decide Pin's
+ // invariants aren't required to transmit through slices. Otherwise this has
+ // the same safety as a normal field pin projection.
+ unsafe { slice.get_unchecked_mut() }.iter_mut().map(|t| unsafe { Pin::new_unchecked(t) })
+}
+
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+/// Future for the [`join_all`] function.
+pub struct JoinAll<F>
+where
+ F: Future,
+{
+ kind: JoinAllKind<F>,
+}
+
+#[cfg(not(futures_no_atomic_cas))]
+const SMALL: usize = 30;
+
+pub(crate) enum JoinAllKind<F>
+where
+ F: Future,
+{
+ Small {
+ elems: Pin<Box<[MaybeDone<F>]>>,
+ },
+ #[cfg(not(futures_no_atomic_cas))]
+ Big {
+ fut: Collect<FuturesOrdered<F>, Vec<F::Output>>,
+ },
+}
+
+impl<F> fmt::Debug for JoinAll<F>
+where
+ F: Future + fmt::Debug,
+ F::Output: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.kind {
+ JoinAllKind::Small { ref elems } => {
+ f.debug_struct("JoinAll").field("elems", elems).finish()
+ }
+ #[cfg(not(futures_no_atomic_cas))]
+ JoinAllKind::Big { ref fut, .. } => fmt::Debug::fmt(fut, f),
+ }
+ }
+}
+
+/// Creates a future which represents a collection of the outputs of the futures
+/// given.
+///
+/// The returned future will drive execution for all of its underlying futures,
+/// collecting the results into a destination `Vec<T>` in the same order as they
+/// were provided.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+///
+/// # See Also
+///
+/// `join_all` will switch to the more powerful [`FuturesOrdered`] for performance
+/// reasons if the number of futures is large. You may want to look into using it or
+/// it's counterpart [`FuturesUnordered`][crate::stream::FuturesUnordered] directly.
+///
+/// Some examples for additional functionality provided by these are:
+///
+/// * Adding new futures to the set even after it has been started.
+///
+/// * Only polling the specific futures that have been woken. In cases where
+/// you have a lot of futures this will result in much more efficient polling.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future::join_all;
+///
+/// async fn foo(i: u32) -> u32 { i }
+///
+/// let futures = vec![foo(1), foo(2), foo(3)];
+///
+/// assert_eq!(join_all(futures).await, [1, 2, 3]);
+/// # });
+/// ```
+pub fn join_all<I>(iter: I) -> JoinAll<I::Item>
+where
+ I: IntoIterator,
+ I::Item: Future,
+{
+ #[cfg(futures_no_atomic_cas)]
+ {
+ let elems = iter.into_iter().map(MaybeDone::Future).collect::<Box<[_]>>().into();
+ let kind = JoinAllKind::Small { elems };
+ assert_future::<Vec<<I::Item as Future>::Output>, _>(JoinAll { kind })
+ }
+ #[cfg(not(futures_no_atomic_cas))]
+ {
+ let iter = iter.into_iter();
+ let kind = match iter.size_hint().1 {
+ None => JoinAllKind::Big { fut: iter.collect::<FuturesOrdered<_>>().collect() },
+ Some(max) => {
+ if max <= SMALL {
+ let elems = iter.map(MaybeDone::Future).collect::<Box<[_]>>().into();
+ JoinAllKind::Small { elems }
+ } else {
+ JoinAllKind::Big { fut: iter.collect::<FuturesOrdered<_>>().collect() }
+ }
+ }
+ };
+ assert_future::<Vec<<I::Item as Future>::Output>, _>(JoinAll { kind })
+ }
+}
+
+impl<F> Future for JoinAll<F>
+where
+ F: Future,
+{
+ type Output = Vec<F::Output>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match &mut self.kind {
+ JoinAllKind::Small { elems } => {
+ let mut all_done = true;
+
+ for elem in iter_pin_mut(elems.as_mut()) {
+ if elem.poll(cx).is_pending() {
+ all_done = false;
+ }
+ }
+
+ if all_done {
+ let mut elems = mem::replace(elems, Box::pin([]));
+ let result =
+ iter_pin_mut(elems.as_mut()).map(|e| e.take_output().unwrap()).collect();
+ Poll::Ready(result)
+ } else {
+ Poll::Pending
+ }
+ }
+ #[cfg(not(futures_no_atomic_cas))]
+ JoinAllKind::Big { fut } => Pin::new(fut).poll(cx),
+ }
+ }
+}
+
+impl<F: Future> FromIterator<F> for JoinAll<F> {
+ fn from_iter<T: IntoIterator<Item = F>>(iter: T) -> Self {
+ join_all(iter)
+ }
+}
diff --git a/vendor/futures-util/src/future/lazy.rs b/vendor/futures-util/src/future/lazy.rs
new file mode 100644
index 000000000..e9a8cf2fa
--- /dev/null
+++ b/vendor/futures-util/src/future/lazy.rs
@@ -0,0 +1,60 @@
+use super::assert_future;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`lazy`] function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Lazy<F> {
+ f: Option<F>,
+}
+
+// safe because we never generate `Pin<&mut F>`
+impl<F> Unpin for Lazy<F> {}
+
+/// Creates a new future that allows delayed execution of a closure.
+///
+/// The provided closure is only run once the future is polled.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::lazy(|_| 1);
+/// assert_eq!(a.await, 1);
+///
+/// let b = future::lazy(|_| -> i32 {
+/// panic!("oh no!")
+/// });
+/// drop(b); // closure is never run
+/// # });
+/// ```
+pub fn lazy<F, R>(f: F) -> Lazy<F>
+where
+ F: FnOnce(&mut Context<'_>) -> R,
+{
+ assert_future::<R, _>(Lazy { f: Some(f) })
+}
+
+impl<F, R> FusedFuture for Lazy<F>
+where
+ F: FnOnce(&mut Context<'_>) -> R,
+{
+ fn is_terminated(&self) -> bool {
+ self.f.is_none()
+ }
+}
+
+impl<F, R> Future for Lazy<F>
+where
+ F: FnOnce(&mut Context<'_>) -> R,
+{
+ type Output = R;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<R> {
+ Poll::Ready((self.f.take().expect("Lazy polled after completion"))(cx))
+ }
+}
diff --git a/vendor/futures-util/src/future/maybe_done.rs b/vendor/futures-util/src/future/maybe_done.rs
new file mode 100644
index 000000000..26e6c2758
--- /dev/null
+++ b/vendor/futures-util/src/future/maybe_done.rs
@@ -0,0 +1,104 @@
+//! Definition of the MaybeDone combinator
+
+use super::assert_future;
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+
+/// A future that may have completed.
+///
+/// This is created by the [`maybe_done()`] function.
+#[derive(Debug)]
+pub enum MaybeDone<Fut: Future> {
+ /// A not-yet-completed future
+ Future(/* #[pin] */ Fut),
+ /// The output of the completed future
+ Done(Fut::Output),
+ /// The empty variant after the result of a [`MaybeDone`] has been
+ /// taken using the [`take_output`](MaybeDone::take_output) method.
+ Gone,
+}
+
+impl<Fut: Future + Unpin> Unpin for MaybeDone<Fut> {}
+
+/// Wraps a future into a `MaybeDone`
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+/// use futures::pin_mut;
+///
+/// let future = future::maybe_done(async { 5 });
+/// pin_mut!(future);
+/// assert_eq!(future.as_mut().take_output(), None);
+/// let () = future.as_mut().await;
+/// assert_eq!(future.as_mut().take_output(), Some(5));
+/// assert_eq!(future.as_mut().take_output(), None);
+/// # });
+/// ```
+pub fn maybe_done<Fut: Future>(future: Fut) -> MaybeDone<Fut> {
+ assert_future::<(), _>(MaybeDone::Future(future))
+}
+
+impl<Fut: Future> MaybeDone<Fut> {
+ /// Returns an [`Option`] containing a mutable reference to the output of the future.
+ /// The output of this method will be [`Some`] if and only if the inner
+ /// future has been completed and [`take_output`](MaybeDone::take_output)
+ /// has not yet been called.
+ #[inline]
+ pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Output> {
+ unsafe {
+ match self.get_unchecked_mut() {
+ MaybeDone::Done(res) => Some(res),
+ _ => None,
+ }
+ }
+ }
+
+ /// Attempt to take the output of a `MaybeDone` without driving it
+ /// towards completion.
+ #[inline]
+ pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Output> {
+ match &*self {
+ Self::Done(_) => {}
+ Self::Future(_) | Self::Gone => return None,
+ }
+ unsafe {
+ match mem::replace(self.get_unchecked_mut(), Self::Gone) {
+ MaybeDone::Done(output) => Some(output),
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+
+impl<Fut: Future> FusedFuture for MaybeDone<Fut> {
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Future(_) => false,
+ Self::Done(_) | Self::Gone => true,
+ }
+ }
+}
+
+impl<Fut: Future> Future for MaybeDone<Fut> {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ unsafe {
+ match self.as_mut().get_unchecked_mut() {
+ MaybeDone::Future(f) => {
+ let res = ready!(Pin::new_unchecked(f).poll(cx));
+ self.set(Self::Done(res));
+ }
+ MaybeDone::Done(_) => {}
+ MaybeDone::Gone => panic!("MaybeDone polled after value taken"),
+ }
+ }
+ Poll::Ready(())
+ }
+}
diff --git a/vendor/futures-util/src/future/mod.rs b/vendor/futures-util/src/future/mod.rs
new file mode 100644
index 000000000..374e36512
--- /dev/null
+++ b/vendor/futures-util/src/future/mod.rs
@@ -0,0 +1,131 @@
+//! Asynchronous values.
+//!
+//! This module contains:
+//!
+//! - The [`Future`] trait.
+//! - The [`FutureExt`] and [`TryFutureExt`] trait, which provides adapters for
+//! chaining and composing futures.
+//! - Top-level future combinators like [`lazy`](lazy()) which creates a future
+//! from a closure that defines its return value, and [`ready`](ready()),
+//! which constructs a future with an immediate defined value.
+
+#[doc(no_inline)]
+pub use core::future::Future;
+
+#[cfg(feature = "alloc")]
+pub use futures_core::future::{BoxFuture, LocalBoxFuture};
+pub use futures_core::future::{FusedFuture, TryFuture};
+pub use futures_task::{FutureObj, LocalFutureObj, UnsafeFutureObj};
+
+// Extension traits and combinators
+#[allow(clippy::module_inception)]
+mod future;
+pub use self::future::{
+ Flatten, Fuse, FutureExt, Inspect, IntoStream, Map, MapInto, NeverError, Then, UnitError,
+};
+
+#[deprecated(note = "This is now an alias for [Flatten](Flatten)")]
+pub use self::future::FlattenStream;
+
+#[cfg(feature = "std")]
+pub use self::future::CatchUnwind;
+
+#[cfg(feature = "channel")]
+#[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+#[cfg(feature = "std")]
+pub use self::future::{Remote, RemoteHandle};
+
+#[cfg(feature = "std")]
+pub use self::future::{Shared, WeakShared};
+
+mod try_future;
+pub use self::try_future::{
+ AndThen, ErrInto, InspectErr, InspectOk, IntoFuture, MapErr, MapOk, MapOkOrElse, OkInto,
+ OrElse, TryFlatten, TryFlattenStream, TryFutureExt, UnwrapOrElse,
+};
+
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub use self::try_future::FlattenSink;
+
+// Primitive futures
+
+mod lazy;
+pub use self::lazy::{lazy, Lazy};
+
+mod pending;
+pub use self::pending::{pending, Pending};
+
+mod maybe_done;
+pub use self::maybe_done::{maybe_done, MaybeDone};
+
+mod try_maybe_done;
+pub use self::try_maybe_done::{try_maybe_done, TryMaybeDone};
+
+mod option;
+pub use self::option::OptionFuture;
+
+mod poll_fn;
+pub use self::poll_fn::{poll_fn, PollFn};
+
+mod poll_immediate;
+pub use self::poll_immediate::{poll_immediate, PollImmediate};
+
+mod ready;
+pub use self::ready::{err, ok, ready, Ready};
+
+mod join;
+pub use self::join::{join, join3, join4, join5, Join, Join3, Join4, Join5};
+
+#[cfg(feature = "alloc")]
+mod join_all;
+#[cfg(feature = "alloc")]
+pub use self::join_all::{join_all, JoinAll};
+
+mod select;
+pub use self::select::{select, Select};
+
+#[cfg(feature = "alloc")]
+mod select_all;
+#[cfg(feature = "alloc")]
+pub use self::select_all::{select_all, SelectAll};
+
+mod try_join;
+pub use self::try_join::{
+ try_join, try_join3, try_join4, try_join5, TryJoin, TryJoin3, TryJoin4, TryJoin5,
+};
+
+#[cfg(feature = "alloc")]
+mod try_join_all;
+#[cfg(feature = "alloc")]
+pub use self::try_join_all::{try_join_all, TryJoinAll};
+
+mod try_select;
+pub use self::try_select::{try_select, TrySelect};
+
+#[cfg(feature = "alloc")]
+mod select_ok;
+#[cfg(feature = "alloc")]
+pub use self::select_ok::{select_ok, SelectOk};
+
+mod either;
+pub use self::either::Either;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod abortable;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use crate::abortable::{AbortHandle, AbortRegistration, Abortable, Aborted};
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use abortable::abortable;
+
+// Just a helper function to ensure the futures we're returning all have the
+// right implementations.
+pub(crate) fn assert_future<T, F>(future: F) -> F
+where
+ F: Future<Output = T>,
+{
+ future
+}
diff --git a/vendor/futures-util/src/future/option.rs b/vendor/futures-util/src/future/option.rs
new file mode 100644
index 000000000..0bc377758
--- /dev/null
+++ b/vendor/futures-util/src/future/option.rs
@@ -0,0 +1,64 @@
+//! Definition of the `Option` (optional step) combinator
+
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// A future representing a value which may or may not be present.
+ ///
+ /// Created by the [`From`] implementation for [`Option`](std::option::Option).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::OptionFuture;
+ ///
+ /// let mut a: OptionFuture<_> = Some(async { 123 }).into();
+ /// assert_eq!(a.await, Some(123));
+ ///
+ /// a = None.into();
+ /// assert_eq!(a.await, None);
+ /// # });
+ /// ```
+ #[derive(Debug, Clone)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct OptionFuture<F> {
+ #[pin]
+ inner: Option<F>,
+ }
+}
+
+impl<F> Default for OptionFuture<F> {
+ fn default() -> Self {
+ Self { inner: None }
+ }
+}
+
+impl<F: Future> Future for OptionFuture<F> {
+ type Output = Option<F::Output>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match self.project().inner.as_pin_mut() {
+ Some(x) => x.poll(cx).map(Some),
+ None => Poll::Ready(None),
+ }
+ }
+}
+
+impl<F: FusedFuture> FusedFuture for OptionFuture<F> {
+ fn is_terminated(&self) -> bool {
+ match &self.inner {
+ Some(x) => x.is_terminated(),
+ None => true,
+ }
+ }
+}
+
+impl<T> From<Option<T>> for OptionFuture<T> {
+ fn from(option: Option<T>) -> Self {
+ Self { inner: option }
+ }
+}
diff --git a/vendor/futures-util/src/future/pending.rs b/vendor/futures-util/src/future/pending.rs
new file mode 100644
index 000000000..92c78d52b
--- /dev/null
+++ b/vendor/futures-util/src/future/pending.rs
@@ -0,0 +1,54 @@
+use super::assert_future;
+use core::marker;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`pending()`] function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Pending<T> {
+ _data: marker::PhantomData<T>,
+}
+
+impl<T> FusedFuture for Pending<T> {
+ fn is_terminated(&self) -> bool {
+ true
+ }
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// The returned future will forever return [`Poll::Pending`].
+///
+/// # Examples
+///
+/// ```ignore
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let future = future::pending();
+/// let () = future.await;
+/// unreachable!();
+/// # });
+/// ```
+pub fn pending<T>() -> Pending<T> {
+ assert_future::<T, _>(Pending { _data: marker::PhantomData })
+}
+
+impl<T> Future for Pending<T> {
+ type Output = T;
+
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<T> {
+ Poll::Pending
+ }
+}
+
+impl<T> Unpin for Pending<T> {}
+
+impl<T> Clone for Pending<T> {
+ fn clone(&self) -> Self {
+ pending()
+ }
+}
diff --git a/vendor/futures-util/src/future/poll_fn.rs b/vendor/futures-util/src/future/poll_fn.rs
new file mode 100644
index 000000000..19311570b
--- /dev/null
+++ b/vendor/futures-util/src/future/poll_fn.rs
@@ -0,0 +1,58 @@
+//! Definition of the `PollFn` adapter combinator
+
+use super::assert_future;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`poll_fn`] function.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct PollFn<F> {
+ f: F,
+}
+
+impl<F> Unpin for PollFn<F> {}
+
+/// Creates a new future wrapping around a function returning [`Poll`].
+///
+/// Polling the returned future delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future::poll_fn;
+/// use futures::task::{Context, Poll};
+///
+/// fn read_line(_cx: &mut Context<'_>) -> Poll<String> {
+/// Poll::Ready("Hello, World!".into())
+/// }
+///
+/// let read_future = poll_fn(read_line);
+/// assert_eq!(read_future.await, "Hello, World!".to_owned());
+/// # });
+/// ```
+pub fn poll_fn<T, F>(f: F) -> PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ assert_future::<T, _>(PollFn { f })
+}
+
+impl<F> fmt::Debug for PollFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollFn").finish()
+ }
+}
+
+impl<T, F> Future for PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ (&mut self.f)(cx)
+ }
+}
diff --git a/vendor/futures-util/src/future/poll_immediate.rs b/vendor/futures-util/src/future/poll_immediate.rs
new file mode 100644
index 000000000..5ae555c73
--- /dev/null
+++ b/vendor/futures-util/src/future/poll_immediate.rs
@@ -0,0 +1,126 @@
+use super::assert_future;
+use core::pin::Pin;
+use futures_core::task::{Context, Poll};
+use futures_core::{FusedFuture, Future, Stream};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`poll_immediate`](poll_immediate()) function.
+ ///
+ /// It will never return [Poll::Pending](core::task::Poll::Pending)
+ #[derive(Debug, Clone)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct PollImmediate<T> {
+ #[pin]
+ future: Option<T>
+ }
+}
+
+impl<T, F> Future for PollImmediate<F>
+where
+ F: Future<Output = T>,
+{
+ type Output = Option<T>;
+
+ #[inline]
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ let mut this = self.project();
+ let inner =
+ this.future.as_mut().as_pin_mut().expect("PollImmediate polled after completion");
+ match inner.poll(cx) {
+ Poll::Ready(t) => {
+ this.future.set(None);
+ Poll::Ready(Some(t))
+ }
+ Poll::Pending => Poll::Ready(None),
+ }
+ }
+}
+
+impl<T: Future> FusedFuture for PollImmediate<T> {
+ fn is_terminated(&self) -> bool {
+ self.future.is_none()
+ }
+}
+
+/// A [Stream](crate::stream::Stream) implementation that can be polled repeatedly until the future is done.
+/// The stream will never return [Poll::Pending](core::task::Poll::Pending)
+/// so polling it in a tight loop is worse than using a blocking synchronous function.
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::task::Poll;
+/// use futures::{StreamExt, future, pin_mut};
+/// use future::FusedFuture;
+///
+/// let f = async { 1_u32 };
+/// pin_mut!(f);
+/// let mut r = future::poll_immediate(f);
+/// assert_eq!(r.next().await, Some(Poll::Ready(1)));
+///
+/// let f = async {futures::pending!(); 42_u8};
+/// pin_mut!(f);
+/// let mut p = future::poll_immediate(f);
+/// assert_eq!(p.next().await, Some(Poll::Pending));
+/// assert!(!p.is_terminated());
+/// assert_eq!(p.next().await, Some(Poll::Ready(42)));
+/// assert!(p.is_terminated());
+/// assert_eq!(p.next().await, None);
+/// # });
+/// ```
+impl<T, F> Stream for PollImmediate<F>
+where
+ F: Future<Output = T>,
+{
+ type Item = Poll<T>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ match this.future.as_mut().as_pin_mut() {
+ // inner is gone, so we can signal that the stream is closed.
+ None => Poll::Ready(None),
+ Some(fut) => Poll::Ready(Some(fut.poll(cx).map(|t| {
+ this.future.set(None);
+ t
+ }))),
+ }
+ }
+}
+
+/// Creates a future that is immediately ready with an Option of a value.
+/// Specifically this means that [poll](core::future::Future::poll()) always returns [Poll::Ready](core::task::Poll::Ready).
+///
+/// # Caution
+///
+/// When consuming the future by this function, note the following:
+///
+/// - This function does not guarantee that the future will run to completion, so it is generally incompatible with passing the non-cancellation-safe future by value.
+/// - Even if the future is cancellation-safe, creating and dropping new futures frequently may lead to performance problems.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let r = future::poll_immediate(async { 1_u32 });
+/// assert_eq!(r.await, Some(1));
+///
+/// let p = future::poll_immediate(future::pending::<i32>());
+/// assert_eq!(p.await, None);
+/// # });
+/// ```
+///
+/// ### Reusing a future
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::{future, pin_mut};
+/// let f = async {futures::pending!(); 42_u8};
+/// pin_mut!(f);
+/// assert_eq!(None, future::poll_immediate(&mut f).await);
+/// assert_eq!(42, f.await);
+/// # });
+/// ```
+pub fn poll_immediate<F: Future>(f: F) -> PollImmediate<F> {
+ assert_future::<Option<F::Output>, PollImmediate<F>>(PollImmediate { future: Some(f) })
+}
diff --git a/vendor/futures-util/src/future/ready.rs b/vendor/futures-util/src/future/ready.rs
new file mode 100644
index 000000000..e3d791b3c
--- /dev/null
+++ b/vendor/futures-util/src/future/ready.rs
@@ -0,0 +1,82 @@
+use super::assert_future;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`ready`](ready()) function.
+#[derive(Debug, Clone)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Ready<T>(Option<T>);
+
+impl<T> Ready<T> {
+ /// Unwraps the value from this immediately ready future.
+ #[inline]
+ pub fn into_inner(mut self) -> T {
+ self.0.take().unwrap()
+ }
+}
+
+impl<T> Unpin for Ready<T> {}
+
+impl<T> FusedFuture for Ready<T> {
+ fn is_terminated(&self) -> bool {
+ self.0.is_none()
+ }
+}
+
+impl<T> Future for Ready<T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
+ Poll::Ready(self.0.take().expect("Ready polled after completion"))
+ }
+}
+
+/// Creates a future that is immediately ready with a value.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(1);
+/// assert_eq!(a.await, 1);
+/// # });
+/// ```
+pub fn ready<T>(t: T) -> Ready<T> {
+ assert_future::<T, _>(Ready(Some(t)))
+}
+
+/// Create a future that is immediately ready with a success value.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ok::<i32, i32>(1);
+/// assert_eq!(a.await, Ok(1));
+/// # });
+/// ```
+pub fn ok<T, E>(t: T) -> Ready<Result<T, E>> {
+ Ready(Some(Ok(t)))
+}
+
+/// Create a future that is immediately ready with an error value.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::err::<i32, i32>(1);
+/// assert_eq!(a.await, Err(1));
+/// # });
+/// ```
+pub fn err<T, E>(err: E) -> Ready<Result<T, E>> {
+ Ready(Some(Err(err)))
+}
diff --git a/vendor/futures-util/src/future/select.rs b/vendor/futures-util/src/future/select.rs
new file mode 100644
index 000000000..bd44f20f7
--- /dev/null
+++ b/vendor/futures-util/src/future/select.rs
@@ -0,0 +1,124 @@
+use super::assert_future;
+use crate::future::{Either, FutureExt};
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`select()`] function.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct Select<A, B> {
+ inner: Option<(A, B)>,
+}
+
+impl<A: Unpin, B: Unpin> Unpin for Select<A, B> {}
+
+/// Waits for either one of two differently-typed futures to complete.
+///
+/// This function will return a new future which awaits for either one of both
+/// futures to complete. The returned future will finish with both the value
+/// resolved and a future representing the completion of the other work.
+///
+/// Note that this function consumes the receiving futures and returns a
+/// wrapped version of them.
+///
+/// Also note that if both this and the second future have the same
+/// output type you can use the `Either::factor_first` method to
+/// conveniently extract out the value at the end.
+///
+/// # Examples
+///
+/// A simple example
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::{
+/// pin_mut,
+/// future::Either,
+/// future::self,
+/// };
+///
+/// // These two futures have different types even though their outputs have the same type.
+/// let future1 = async {
+/// future::pending::<()>().await; // will never finish
+/// 1
+/// };
+/// let future2 = async {
+/// future::ready(2).await
+/// };
+///
+/// // 'select' requires Future + Unpin bounds
+/// pin_mut!(future1);
+/// pin_mut!(future2);
+///
+/// let value = match future::select(future1, future2).await {
+/// Either::Left((value1, _)) => value1, // `value1` is resolved from `future1`
+/// // `_` represents `future2`
+/// Either::Right((value2, _)) => value2, // `value2` is resolved from `future2`
+/// // `_` represents `future1`
+/// };
+///
+/// assert!(value == 2);
+/// # });
+/// ```
+///
+/// A more complex example
+///
+/// ```
+/// use futures::future::{self, Either, Future, FutureExt};
+///
+/// // A poor-man's join implemented on top of select
+///
+/// fn join<A, B>(a: A, b: B) -> impl Future<Output=(A::Output, B::Output)>
+/// where A: Future + Unpin,
+/// B: Future + Unpin,
+/// {
+/// future::select(a, b).then(|either| {
+/// match either {
+/// Either::Left((x, b)) => b.map(move |y| (x, y)).left_future(),
+/// Either::Right((y, a)) => a.map(move |x| (x, y)).right_future(),
+/// }
+/// })
+/// }
+/// ```
+pub fn select<A, B>(future1: A, future2: B) -> Select<A, B>
+where
+ A: Future + Unpin,
+ B: Future + Unpin,
+{
+ assert_future::<Either<(A::Output, B), (B::Output, A)>, _>(Select {
+ inner: Some((future1, future2)),
+ })
+}
+
+impl<A, B> Future for Select<A, B>
+where
+ A: Future + Unpin,
+ B: Future + Unpin,
+{
+ type Output = Either<(A::Output, B), (B::Output, A)>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let (mut a, mut b) = self.inner.take().expect("cannot poll Select twice");
+ match a.poll_unpin(cx) {
+ Poll::Ready(x) => Poll::Ready(Either::Left((x, b))),
+ Poll::Pending => match b.poll_unpin(cx) {
+ Poll::Ready(x) => Poll::Ready(Either::Right((x, a))),
+ Poll::Pending => {
+ self.inner = Some((a, b));
+ Poll::Pending
+ }
+ },
+ }
+ }
+}
+
+impl<A, B> FusedFuture for Select<A, B>
+where
+ A: Future + Unpin,
+ B: Future + Unpin,
+{
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
diff --git a/vendor/futures-util/src/future/select_all.rs b/vendor/futures-util/src/future/select_all.rs
new file mode 100644
index 000000000..106e50844
--- /dev/null
+++ b/vendor/futures-util/src/future/select_all.rs
@@ -0,0 +1,74 @@
+use super::assert_future;
+use crate::future::FutureExt;
+use alloc::vec::Vec;
+use core::iter::FromIterator;
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`select_all`] function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct SelectAll<Fut> {
+ inner: Vec<Fut>,
+}
+
+impl<Fut: Unpin> Unpin for SelectAll<Fut> {}
+
+/// Creates a new future which will select over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready. Upon
+/// completion the item resolved will be returned, along with the index of the
+/// future that was ready and the list of all the remaining futures.
+///
+/// There are no guarantees provided on the order of the list with the remaining
+/// futures. They might be swapped around, reversed, or completely random.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_all<I>(iter: I) -> SelectAll<I::Item>
+where
+ I: IntoIterator,
+ I::Item: Future + Unpin,
+{
+ let ret = SelectAll { inner: iter.into_iter().collect() };
+ assert!(!ret.inner.is_empty());
+ assert_future::<(<I::Item as Future>::Output, usize, Vec<I::Item>), _>(ret)
+}
+
+impl<Fut> SelectAll<Fut> {
+ /// Consumes this combinator, returning the underlying futures.
+ pub fn into_inner(self) -> Vec<Fut> {
+ self.inner
+ }
+}
+
+impl<Fut: Future + Unpin> Future for SelectAll<Fut> {
+ type Output = (Fut::Output, usize, Vec<Fut>);
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let item = self.inner.iter_mut().enumerate().find_map(|(i, f)| match f.poll_unpin(cx) {
+ Poll::Pending => None,
+ Poll::Ready(e) => Some((i, e)),
+ });
+ match item {
+ Some((idx, res)) => {
+ let _ = self.inner.swap_remove(idx);
+ let rest = mem::replace(&mut self.inner, Vec::new());
+ Poll::Ready((res, idx, rest))
+ }
+ None => Poll::Pending,
+ }
+ }
+}
+
+impl<Fut: Future + Unpin> FromIterator<Fut> for SelectAll<Fut> {
+ fn from_iter<T: IntoIterator<Item = Fut>>(iter: T) -> Self {
+ select_all(iter)
+ }
+}
diff --git a/vendor/futures-util/src/future/select_ok.rs b/vendor/futures-util/src/future/select_ok.rs
new file mode 100644
index 000000000..0ad83c6db
--- /dev/null
+++ b/vendor/futures-util/src/future/select_ok.rs
@@ -0,0 +1,85 @@
+use super::assert_future;
+use crate::future::TryFutureExt;
+use alloc::vec::Vec;
+use core::iter::FromIterator;
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{Future, TryFuture};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`select_ok`] function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct SelectOk<Fut> {
+ inner: Vec<Fut>,
+}
+
+impl<Fut: Unpin> Unpin for SelectOk<Fut> {}
+
+/// Creates a new future which will select the first successful future over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike
+/// `select_all`, this will only return the first successful completion, or the last
+/// failure. This is useful in contexts where any success is desired and failures
+/// are ignored, unless all the futures fail.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_ok<I>(iter: I) -> SelectOk<I::Item>
+where
+ I: IntoIterator,
+ I::Item: TryFuture + Unpin,
+{
+ let ret = SelectOk { inner: iter.into_iter().collect() };
+ assert!(!ret.inner.is_empty(), "iterator provided to select_ok was empty");
+ assert_future::<
+ Result<(<I::Item as TryFuture>::Ok, Vec<I::Item>), <I::Item as TryFuture>::Error>,
+ _,
+ >(ret)
+}
+
+impl<Fut: TryFuture + Unpin> Future for SelectOk<Fut> {
+ type Output = Result<(Fut::Ok, Vec<Fut>), Fut::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // loop until we've either exhausted all errors, a success was hit, or nothing is ready
+ loop {
+ let item =
+ self.inner.iter_mut().enumerate().find_map(|(i, f)| match f.try_poll_unpin(cx) {
+ Poll::Pending => None,
+ Poll::Ready(e) => Some((i, e)),
+ });
+ match item {
+ Some((idx, res)) => {
+ // always remove Ok or Err, if it's not the last Err continue looping
+ drop(self.inner.remove(idx));
+ match res {
+ Ok(e) => {
+ let rest = mem::replace(&mut self.inner, Vec::new());
+ return Poll::Ready(Ok((e, rest)));
+ }
+ Err(e) => {
+ if self.inner.is_empty() {
+ return Poll::Ready(Err(e));
+ }
+ }
+ }
+ }
+ None => {
+ // based on the filter above, nothing is ready, return
+ return Poll::Pending;
+ }
+ }
+ }
+ }
+}
+
+impl<Fut: TryFuture + Unpin> FromIterator<Fut> for SelectOk<Fut> {
+ fn from_iter<T: IntoIterator<Item = Fut>>(iter: T) -> Self {
+ select_ok(iter)
+ }
+}
diff --git a/vendor/futures-util/src/future/try_future/into_future.rs b/vendor/futures-util/src/future/try_future/into_future.rs
new file mode 100644
index 000000000..9f093d0e2
--- /dev/null
+++ b/vendor/futures-util/src/future/try_future/into_future.rs
@@ -0,0 +1,36 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future, TryFuture};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`into_future`](super::TryFutureExt::into_future) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct IntoFuture<Fut> {
+ #[pin]
+ future: Fut,
+ }
+}
+
+impl<Fut> IntoFuture<Fut> {
+ #[inline]
+ pub(crate) fn new(future: Fut) -> Self {
+ Self { future }
+ }
+}
+
+impl<Fut: TryFuture + FusedFuture> FusedFuture for IntoFuture<Fut> {
+ fn is_terminated(&self) -> bool {
+ self.future.is_terminated()
+ }
+}
+
+impl<Fut: TryFuture> Future for IntoFuture<Fut> {
+ type Output = Result<Fut::Ok, Fut::Error>;
+
+ #[inline]
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.project().future.try_poll(cx)
+ }
+}
diff --git a/vendor/futures-util/src/future/try_future/mod.rs b/vendor/futures-util/src/future/try_future/mod.rs
new file mode 100644
index 000000000..fb3bdd8a0
--- /dev/null
+++ b/vendor/futures-util/src/future/try_future/mod.rs
@@ -0,0 +1,619 @@
+//! Futures
+//!
+//! This module contains a number of functions for working with `Future`s,
+//! including the `FutureExt` trait which adds methods to `Future` types.
+
+#[cfg(feature = "compat")]
+use crate::compat::Compat;
+use core::pin::Pin;
+use futures_core::{
+ future::TryFuture,
+ stream::TryStream,
+ task::{Context, Poll},
+};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+
+use crate::fns::{
+ inspect_err_fn, inspect_ok_fn, into_fn, map_err_fn, map_ok_fn, map_ok_or_else_fn,
+ unwrap_or_else_fn, InspectErrFn, InspectOkFn, IntoFn, MapErrFn, MapOkFn, MapOkOrElseFn,
+ UnwrapOrElseFn,
+};
+use crate::future::{assert_future, Inspect, Map};
+use crate::stream::assert_stream;
+
+// Combinators
+mod into_future;
+mod try_flatten;
+mod try_flatten_err;
+
+delegate_all!(
+ /// Future for the [`try_flatten`](TryFutureExt::try_flatten) method.
+ TryFlatten<Fut1, Fut2>(
+ try_flatten::TryFlatten<Fut1, Fut2>
+ ): Debug + Future + FusedFuture + New[|x: Fut1| try_flatten::TryFlatten::new(x)]
+);
+
+delegate_all!(
+ /// Future for the [`try_flatten_err`](TryFutureExt::try_flatten_err) method.
+ TryFlattenErr<Fut1, Fut2>(
+ try_flatten_err::TryFlattenErr<Fut1, Fut2>
+ ): Debug + Future + FusedFuture + New[|x: Fut1| try_flatten_err::TryFlattenErr::new(x)]
+);
+
+delegate_all!(
+ /// Future for the [`try_flatten_stream`](TryFutureExt::try_flatten_stream) method.
+ TryFlattenStream<Fut>(
+ try_flatten::TryFlatten<Fut, Fut::Ok>
+ ): Debug + Sink + Stream + FusedStream + New[|x: Fut| try_flatten::TryFlatten::new(x)]
+ where Fut: TryFuture
+);
+
+#[cfg(feature = "sink")]
+delegate_all!(
+ /// Sink for the [`flatten_sink`](TryFutureExt::flatten_sink) method.
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ FlattenSink<Fut, Si>(
+ try_flatten::TryFlatten<Fut, Si>
+ ): Debug + Sink + Stream + FusedStream + New[|x: Fut| try_flatten::TryFlatten::new(x)]
+);
+
+delegate_all!(
+ /// Future for the [`and_then`](TryFutureExt::and_then) method.
+ AndThen<Fut1, Fut2, F>(
+ TryFlatten<MapOk<Fut1, F>, Fut2>
+ ): Debug + Future + FusedFuture + New[|x: Fut1, f: F| TryFlatten::new(MapOk::new(x, f))]
+);
+
+delegate_all!(
+ /// Future for the [`or_else`](TryFutureExt::or_else) method.
+ OrElse<Fut1, Fut2, F>(
+ TryFlattenErr<MapErr<Fut1, F>, Fut2>
+ ): Debug + Future + FusedFuture + New[|x: Fut1, f: F| TryFlattenErr::new(MapErr::new(x, f))]
+);
+
+delegate_all!(
+ /// Future for the [`err_into`](TryFutureExt::err_into) method.
+ ErrInto<Fut, E>(
+ MapErr<Fut, IntoFn<E>>
+ ): Debug + Future + FusedFuture + New[|x: Fut| MapErr::new(x, into_fn())]
+);
+
+delegate_all!(
+ /// Future for the [`ok_into`](TryFutureExt::ok_into) method.
+ OkInto<Fut, E>(
+ MapOk<Fut, IntoFn<E>>
+ ): Debug + Future + FusedFuture + New[|x: Fut| MapOk::new(x, into_fn())]
+);
+
+delegate_all!(
+ /// Future for the [`inspect_ok`](super::TryFutureExt::inspect_ok) method.
+ InspectOk<Fut, F>(
+ Inspect<IntoFuture<Fut>, InspectOkFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Inspect::new(IntoFuture::new(x), inspect_ok_fn(f))]
+);
+
+delegate_all!(
+ /// Future for the [`inspect_err`](super::TryFutureExt::inspect_err) method.
+ InspectErr<Fut, F>(
+ Inspect<IntoFuture<Fut>, InspectErrFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Inspect::new(IntoFuture::new(x), inspect_err_fn(f))]
+);
+
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::into_future::IntoFuture;
+
+delegate_all!(
+ /// Future for the [`map_ok`](TryFutureExt::map_ok) method.
+ MapOk<Fut, F>(
+ Map<IntoFuture<Fut>, MapOkFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), map_ok_fn(f))]
+);
+
+delegate_all!(
+ /// Future for the [`map_err`](TryFutureExt::map_err) method.
+ MapErr<Fut, F>(
+ Map<IntoFuture<Fut>, MapErrFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), map_err_fn(f))]
+);
+
+delegate_all!(
+ /// Future for the [`map_ok_or_else`](TryFutureExt::map_ok_or_else) method.
+ MapOkOrElse<Fut, F, G>(
+ Map<IntoFuture<Fut>, MapOkOrElseFn<F, G>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F, g: G| Map::new(IntoFuture::new(x), map_ok_or_else_fn(f, g))]
+);
+
+delegate_all!(
+ /// Future for the [`unwrap_or_else`](TryFutureExt::unwrap_or_else) method.
+ UnwrapOrElse<Fut, F>(
+ Map<IntoFuture<Fut>, UnwrapOrElseFn<F>>
+ ): Debug + Future + FusedFuture + New[|x: Fut, f: F| Map::new(IntoFuture::new(x), unwrap_or_else_fn(f))]
+);
+
+impl<Fut: ?Sized + TryFuture> TryFutureExt for Fut {}
+
+/// Adapters specific to [`Result`]-returning futures
+pub trait TryFutureExt: TryFuture {
+ /// Flattens the execution of this future when the successful result of this
+ /// future is a [`Sink`].
+ ///
+ /// This can be useful when sink initialization is deferred, and it is
+ /// convenient to work with that sink as if the sink was available at the
+ /// call site.
+ ///
+ /// Note that this function consumes this future and returns a wrapped
+ /// version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::{Future, TryFutureExt};
+ /// use futures::sink::Sink;
+ /// # use futures::channel::mpsc::{self, SendError};
+ /// # type T = i32;
+ /// # type E = SendError;
+ ///
+ /// fn make_sink_async() -> impl Future<Output = Result<
+ /// impl Sink<T, Error = E>,
+ /// E,
+ /// >> { // ... }
+ /// # let (tx, _rx) = mpsc::unbounded::<i32>();
+ /// # futures::future::ready(Ok(tx))
+ /// # }
+ /// fn take_sink(sink: impl Sink<T, Error = E>) { /* ... */ }
+ ///
+ /// let fut = make_sink_async();
+ /// take_sink(fut.flatten_sink())
+ /// ```
+ #[cfg(feature = "sink")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ fn flatten_sink<Item>(self) -> FlattenSink<Self, Self::Ok>
+ where
+ Self::Ok: Sink<Item, Error = Self::Error>,
+ Self: Sized,
+ {
+ crate::sink::assert_sink::<Item, Self::Error, _>(FlattenSink::new(self))
+ }
+
+ /// Maps this future's success value to a different value.
+ ///
+ /// This method can be used to change the [`Ok`](TryFuture::Ok) type of the
+ /// future into a different type. It is similar to the [`Result::map`]
+ /// method. You can use this method to chain along a computation once the
+ /// future has been resolved.
+ ///
+ /// The provided closure `f` will only be called if this future is resolved
+ /// to an [`Ok`]. If it resolves to an [`Err`], panics, or is dropped, then
+ /// the provided closure will never be invoked.
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Ok::<i32, i32>(1) };
+ /// let future = future.map_ok(|x| x + 3);
+ /// assert_eq!(future.await, Ok(4));
+ /// # });
+ /// ```
+ ///
+ /// Calling [`map_ok`](TryFutureExt::map_ok) on an errored future has no
+ /// effect:
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Err::<i32, i32>(1) };
+ /// let future = future.map_ok(|x| x + 3);
+ /// assert_eq!(future.await, Err(1));
+ /// # });
+ /// ```
+ fn map_ok<T, F>(self, f: F) -> MapOk<Self, F>
+ where
+ F: FnOnce(Self::Ok) -> T,
+ Self: Sized,
+ {
+ assert_future::<Result<T, Self::Error>, _>(MapOk::new(self, f))
+ }
+
+ /// Maps this future's success value to a different value, and permits for error handling resulting in the same type.
+ ///
+ /// This method can be used to coalesce your [`Ok`](TryFuture::Ok) type and [`Error`](TryFuture::Error) into another type,
+ /// where that type is the same for both outcomes.
+ ///
+ /// The provided closure `f` will only be called if this future is resolved
+ /// to an [`Ok`]. If it resolves to an [`Err`], panics, or is dropped, then
+ /// the provided closure will never be invoked.
+ ///
+ /// The provided closure `e` will only be called if this future is resolved
+ /// to an [`Err`]. If it resolves to an [`Ok`], panics, or is dropped, then
+ /// the provided closure will never be invoked.
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Ok::<i32, i32>(5) };
+ /// let future = future.map_ok_or_else(|x| x * 2, |x| x + 3);
+ /// assert_eq!(future.await, 8);
+ ///
+ /// let future = async { Err::<i32, i32>(5) };
+ /// let future = future.map_ok_or_else(|x| x * 2, |x| x + 3);
+ /// assert_eq!(future.await, 10);
+ /// # });
+ /// ```
+ ///
+ fn map_ok_or_else<T, E, F>(self, e: E, f: F) -> MapOkOrElse<Self, F, E>
+ where
+ F: FnOnce(Self::Ok) -> T,
+ E: FnOnce(Self::Error) -> T,
+ Self: Sized,
+ {
+ assert_future::<T, _>(MapOkOrElse::new(self, f, e))
+ }
+
+ /// Maps this future's error value to a different value.
+ ///
+ /// This method can be used to change the [`Error`](TryFuture::Error) type
+ /// of the future into a different type. It is similar to the
+ /// [`Result::map_err`] method. You can use this method for example to
+ /// ensure that futures have the same [`Error`](TryFuture::Error) type when
+ /// using [`select!`] or [`join!`].
+ ///
+ /// The provided closure `f` will only be called if this future is resolved
+ /// to an [`Err`]. If it resolves to an [`Ok`], panics, or is dropped, then
+ /// the provided closure will never be invoked.
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Err::<i32, i32>(1) };
+ /// let future = future.map_err(|x| x + 3);
+ /// assert_eq!(future.await, Err(4));
+ /// # });
+ /// ```
+ ///
+ /// Calling [`map_err`](TryFutureExt::map_err) on a successful future has
+ /// no effect:
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Ok::<i32, i32>(1) };
+ /// let future = future.map_err(|x| x + 3);
+ /// assert_eq!(future.await, Ok(1));
+ /// # });
+ /// ```
+ fn map_err<E, F>(self, f: F) -> MapErr<Self, F>
+ where
+ F: FnOnce(Self::Error) -> E,
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Ok, E>, _>(MapErr::new(self, f))
+ }
+
+ /// Maps this future's [`Error`](TryFuture::Error) to a new error type
+ /// using the [`Into`](std::convert::Into) trait.
+ ///
+ /// This method does for futures what the `?`-operator does for
+ /// [`Result`]: It lets the compiler infer the type of the resulting
+ /// error. Just as [`map_err`](TryFutureExt::map_err), this is useful for
+ /// example to ensure that futures have the same [`Error`](TryFuture::Error)
+ /// type when using [`select!`] or [`join!`].
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future_err_u8 = async { Err::<(), u8>(1) };
+ /// let future_err_i32 = future_err_u8.err_into::<i32>();
+ /// # });
+ /// ```
+ fn err_into<E>(self) -> ErrInto<Self, E>
+ where
+ Self: Sized,
+ Self::Error: Into<E>,
+ {
+ assert_future::<Result<Self::Ok, E>, _>(ErrInto::new(self))
+ }
+
+ /// Maps this future's [`Ok`](TryFuture::Ok) to a new type
+ /// using the [`Into`](std::convert::Into) trait.
+ fn ok_into<U>(self) -> OkInto<Self, U>
+ where
+ Self: Sized,
+ Self::Ok: Into<U>,
+ {
+ assert_future::<Result<U, Self::Error>, _>(OkInto::new(self))
+ }
+
+ /// Executes another future after this one resolves successfully. The
+ /// success value is passed to a closure to create this subsequent future.
+ ///
+ /// The provided closure `f` will only be called if this future is resolved
+ /// to an [`Ok`]. If this future resolves to an [`Err`], panics, or is
+ /// dropped, then the provided closure will never be invoked. The
+ /// [`Error`](TryFuture::Error) type of this future and the future
+ /// returned by `f` have to match.
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Ok::<i32, i32>(1) };
+ /// let future = future.and_then(|x| async move { Ok::<i32, i32>(x + 3) });
+ /// assert_eq!(future.await, Ok(4));
+ /// # });
+ /// ```
+ ///
+ /// Calling [`and_then`](TryFutureExt::and_then) on an errored future has no
+ /// effect:
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Err::<i32, i32>(1) };
+ /// let future = future.and_then(|x| async move { Err::<i32, i32>(x + 3) });
+ /// assert_eq!(future.await, Err(1));
+ /// # });
+ /// ```
+ fn and_then<Fut, F>(self, f: F) -> AndThen<Self, Fut, F>
+ where
+ F: FnOnce(Self::Ok) -> Fut,
+ Fut: TryFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_future::<Result<Fut::Ok, Fut::Error>, _>(AndThen::new(self, f))
+ }
+
+ /// Executes another future if this one resolves to an error. The
+ /// error value is passed to a closure to create this subsequent future.
+ ///
+ /// The provided closure `f` will only be called if this future is resolved
+ /// to an [`Err`]. If this future resolves to an [`Ok`], panics, or is
+ /// dropped, then the provided closure will never be invoked. The
+ /// [`Ok`](TryFuture::Ok) type of this future and the future returned by `f`
+ /// have to match.
+ ///
+ /// Note that this method consumes the future it is called on and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Err::<i32, i32>(1) };
+ /// let future = future.or_else(|x| async move { Err::<i32, i32>(x + 3) });
+ /// assert_eq!(future.await, Err(4));
+ /// # });
+ /// ```
+ ///
+ /// Calling [`or_else`](TryFutureExt::or_else) on a successful future has
+ /// no effect:
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Ok::<i32, i32>(1) };
+ /// let future = future.or_else(|x| async move { Ok::<i32, i32>(x + 3) });
+ /// assert_eq!(future.await, Ok(1));
+ /// # });
+ /// ```
+ fn or_else<Fut, F>(self, f: F) -> OrElse<Self, Fut, F>
+ where
+ F: FnOnce(Self::Error) -> Fut,
+ Fut: TryFuture<Ok = Self::Ok>,
+ Self: Sized,
+ {
+ assert_future::<Result<Fut::Ok, Fut::Error>, _>(OrElse::new(self, f))
+ }
+
+ /// Do something with the success value of a future before passing it on.
+ ///
+ /// When using futures, you'll often chain several of them together. While
+ /// working on such code, you might want to check out what's happening at
+ /// various parts in the pipeline, without consuming the intermediate
+ /// value. To do that, insert a call to `inspect_ok`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::TryFutureExt;
+ ///
+ /// let future = async { Ok::<_, ()>(1) };
+ /// let new_future = future.inspect_ok(|&x| println!("about to resolve: {}", x));
+ /// assert_eq!(new_future.await, Ok(1));
+ /// # });
+ /// ```
+ fn inspect_ok<F>(self, f: F) -> InspectOk<Self, F>
+ where
+ F: FnOnce(&Self::Ok),
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Ok, Self::Error>, _>(InspectOk::new(self, f))
+ }
+
+ /// Do something with the error value of a future before passing it on.
+ ///
+ /// When using futures, you'll often chain several of them together. While
+ /// working on such code, you might want to check out what's happening at
+ /// various parts in the pipeline, without consuming the intermediate
+ /// value. To do that, insert a call to `inspect_err`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::TryFutureExt;
+ ///
+ /// let future = async { Err::<(), _>(1) };
+ /// let new_future = future.inspect_err(|&x| println!("about to error: {}", x));
+ /// assert_eq!(new_future.await, Err(1));
+ /// # });
+ /// ```
+ fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
+ where
+ F: FnOnce(&Self::Error),
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Ok, Self::Error>, _>(InspectErr::new(self, f))
+ }
+
+ /// Flatten the execution of this future when the successful result of this
+ /// future is another future.
+ ///
+ /// This is equivalent to `future.and_then(|x| x)`.
+ fn try_flatten(self) -> TryFlatten<Self, Self::Ok>
+ where
+ Self::Ok: TryFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_future::<Result<<Self::Ok as TryFuture>::Ok, Self::Error>, _>(TryFlatten::new(self))
+ }
+
+ /// Flatten the execution of this future when the successful result of this
+ /// future is a stream.
+ ///
+ /// This can be useful when stream initialization is deferred, and it is
+ /// convenient to work with that stream as if stream was available at the
+ /// call site.
+ ///
+ /// Note that this function consumes this future and returns a wrapped
+ /// version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future::TryFutureExt;
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let stream_items = vec![17, 18, 19].into_iter().map(Ok);
+ /// let future_of_a_stream = async { Ok::<_, ()>(stream::iter(stream_items)) };
+ ///
+ /// let stream = future_of_a_stream.try_flatten_stream();
+ /// let list = stream.try_collect::<Vec<_>>().await;
+ /// assert_eq!(list, Ok(vec![17, 18, 19]));
+ /// # });
+ /// ```
+ fn try_flatten_stream(self) -> TryFlattenStream<Self>
+ where
+ Self::Ok: TryStream<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<<Self::Ok as TryStream>::Ok, Self::Error>, _>(TryFlattenStream::new(
+ self,
+ ))
+ }
+
+ /// Unwraps this future's output, producing a future with this future's
+ /// [`Ok`](TryFuture::Ok) type as its
+ /// [`Output`](std::future::Future::Output) type.
+ ///
+ /// If this future is resolved successfully, the returned future will
+ /// contain the original future's success value as output. Otherwise, the
+ /// closure `f` is called with the error value to produce an alternate
+ /// success value.
+ ///
+ /// This method is similar to the [`Result::unwrap_or_else`] method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::TryFutureExt;
+ ///
+ /// # futures::executor::block_on(async {
+ /// let future = async { Err::<(), &str>("Boom!") };
+ /// let future = future.unwrap_or_else(|_| ());
+ /// assert_eq!(future.await, ());
+ /// # });
+ /// ```
+ fn unwrap_or_else<F>(self, f: F) -> UnwrapOrElse<Self, F>
+ where
+ Self: Sized,
+ F: FnOnce(Self::Error) -> Self::Ok,
+ {
+ assert_future::<Self::Ok, _>(UnwrapOrElse::new(self, f))
+ }
+
+ /// Wraps a [`TryFuture`] into a future compatible with libraries using
+ /// futures 0.1 future definitions. Requires the `compat` feature to enable.
+ #[cfg(feature = "compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+ fn compat(self) -> Compat<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ Compat::new(self)
+ }
+
+ /// Wraps a [`TryFuture`] into a type that implements
+ /// [`Future`](std::future::Future).
+ ///
+ /// [`TryFuture`]s currently do not implement the
+ /// [`Future`](std::future::Future) trait due to limitations of the
+ /// compiler.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::{Future, TryFuture, TryFutureExt};
+ ///
+ /// # type T = i32;
+ /// # type E = ();
+ /// fn make_try_future() -> impl TryFuture<Ok = T, Error = E> { // ... }
+ /// # async { Ok::<i32, ()>(1) }
+ /// # }
+ /// fn take_future(future: impl Future<Output = Result<T, E>>) { /* ... */ }
+ ///
+ /// take_future(make_try_future().into_future());
+ /// ```
+ fn into_future(self) -> IntoFuture<Self>
+ where
+ Self: Sized,
+ {
+ assert_future::<Result<Self::Ok, Self::Error>, _>(IntoFuture::new(self))
+ }
+
+ /// A convenience method for calling [`TryFuture::try_poll`] on [`Unpin`]
+ /// future types.
+ fn try_poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Result<Self::Ok, Self::Error>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).try_poll(cx)
+ }
+}
diff --git a/vendor/futures-util/src/future/try_future/try_flatten.rs b/vendor/futures-util/src/future/try_future/try_flatten.rs
new file mode 100644
index 000000000..1ce4559ac
--- /dev/null
+++ b/vendor/futures-util/src/future/try_future/try_flatten.rs
@@ -0,0 +1,162 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future, TryFuture};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ #[project = TryFlattenProj]
+ #[derive(Debug)]
+ pub enum TryFlatten<Fut1, Fut2> {
+ First { #[pin] f: Fut1 },
+ Second { #[pin] f: Fut2 },
+ Empty,
+ }
+}
+
+impl<Fut1, Fut2> TryFlatten<Fut1, Fut2> {
+ pub(crate) fn new(future: Fut1) -> Self {
+ Self::First { f: future }
+ }
+}
+
+impl<Fut> FusedFuture for TryFlatten<Fut, Fut::Ok>
+where
+ Fut: TryFuture,
+ Fut::Ok: TryFuture<Error = Fut::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Empty => true,
+ _ => false,
+ }
+ }
+}
+
+impl<Fut> Future for TryFlatten<Fut, Fut::Ok>
+where
+ Fut: TryFuture,
+ Fut::Ok: TryFuture<Error = Fut::Error>,
+{
+ type Output = Result<<Fut::Ok as TryFuture>::Ok, Fut::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) {
+ Ok(f) => self.set(Self::Second { f }),
+ Err(e) => {
+ self.set(Self::Empty);
+ break Err(e);
+ }
+ },
+ TryFlattenProj::Second { f } => {
+ let output = ready!(f.try_poll(cx));
+ self.set(Self::Empty);
+ break output;
+ }
+ TryFlattenProj::Empty => panic!("TryFlatten polled after completion"),
+ }
+ })
+ }
+}
+
+impl<Fut> FusedStream for TryFlatten<Fut, Fut::Ok>
+where
+ Fut: TryFuture,
+ Fut::Ok: TryStream<Error = Fut::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Empty => true,
+ _ => false,
+ }
+ }
+}
+
+impl<Fut> Stream for TryFlatten<Fut, Fut::Ok>
+where
+ Fut: TryFuture,
+ Fut::Ok: TryStream<Error = Fut::Error>,
+{
+ type Item = Result<<Fut::Ok as TryStream>::Ok, Fut::Error>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) {
+ Ok(f) => self.set(Self::Second { f }),
+ Err(e) => {
+ self.set(Self::Empty);
+ break Some(Err(e));
+ }
+ },
+ TryFlattenProj::Second { f } => {
+ let output = ready!(f.try_poll_next(cx));
+ if output.is_none() {
+ self.set(Self::Empty);
+ }
+ break output;
+ }
+ TryFlattenProj::Empty => break None,
+ }
+ })
+ }
+}
+
+#[cfg(feature = "sink")]
+impl<Fut, Item> Sink<Item> for TryFlatten<Fut, Fut::Ok>
+where
+ Fut: TryFuture,
+ Fut::Ok: Sink<Item, Error = Fut::Error>,
+{
+ type Error = Fut::Error;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ TryFlattenProj::First { f } => match ready!(f.try_poll(cx)) {
+ Ok(f) => self.set(Self::Second { f }),
+ Err(e) => {
+ self.set(Self::Empty);
+ break Err(e);
+ }
+ },
+ TryFlattenProj::Second { f } => {
+ break ready!(f.poll_ready(cx));
+ }
+ TryFlattenProj::Empty => panic!("poll_ready called after eof"),
+ }
+ })
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ match self.project() {
+ TryFlattenProj::First { .. } => panic!("poll_ready not called first"),
+ TryFlattenProj::Second { f } => f.start_send(item),
+ TryFlattenProj::Empty => panic!("start_send called after eof"),
+ }
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.project() {
+ TryFlattenProj::First { .. } => Poll::Ready(Ok(())),
+ TryFlattenProj::Second { f } => f.poll_flush(cx),
+ TryFlattenProj::Empty => panic!("poll_flush called after eof"),
+ }
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let res = match self.as_mut().project() {
+ TryFlattenProj::Second { f } => f.poll_close(cx),
+ _ => Poll::Ready(Ok(())),
+ };
+ if res.is_ready() {
+ self.set(Self::Empty);
+ }
+ res
+ }
+}
diff --git a/vendor/futures-util/src/future/try_future/try_flatten_err.rs b/vendor/futures-util/src/future/try_future/try_flatten_err.rs
new file mode 100644
index 000000000..39b7d9f5f
--- /dev/null
+++ b/vendor/futures-util/src/future/try_future/try_flatten_err.rs
@@ -0,0 +1,62 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future, TryFuture};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ #[project = TryFlattenErrProj]
+ #[derive(Debug)]
+ pub enum TryFlattenErr<Fut1, Fut2> {
+ First { #[pin] f: Fut1 },
+ Second { #[pin] f: Fut2 },
+ Empty,
+ }
+}
+
+impl<Fut1, Fut2> TryFlattenErr<Fut1, Fut2> {
+ pub(crate) fn new(future: Fut1) -> Self {
+ Self::First { f: future }
+ }
+}
+
+impl<Fut> FusedFuture for TryFlattenErr<Fut, Fut::Error>
+where
+ Fut: TryFuture,
+ Fut::Error: TryFuture<Ok = Fut::Ok>,
+{
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Empty => true,
+ _ => false,
+ }
+ }
+}
+
+impl<Fut> Future for TryFlattenErr<Fut, Fut::Error>
+where
+ Fut: TryFuture,
+ Fut::Error: TryFuture<Ok = Fut::Ok>,
+{
+ type Output = Result<Fut::Ok, <Fut::Error as TryFuture>::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Poll::Ready(loop {
+ match self.as_mut().project() {
+ TryFlattenErrProj::First { f } => match ready!(f.try_poll(cx)) {
+ Err(f) => self.set(Self::Second { f }),
+ Ok(e) => {
+ self.set(Self::Empty);
+ break Ok(e);
+ }
+ },
+ TryFlattenErrProj::Second { f } => {
+ let output = ready!(f.try_poll(cx));
+ self.set(Self::Empty);
+ break output;
+ }
+ TryFlattenErrProj::Empty => panic!("TryFlattenErr polled after completion"),
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/future/try_join.rs b/vendor/futures-util/src/future/try_join.rs
new file mode 100644
index 000000000..6af1f0ccb
--- /dev/null
+++ b/vendor/futures-util/src/future/try_join.rs
@@ -0,0 +1,256 @@
+#![allow(non_snake_case)]
+
+use crate::future::{assert_future, try_maybe_done, TryMaybeDone};
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{Future, TryFuture};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+macro_rules! generate {
+ ($(
+ $(#[$doc:meta])*
+ ($Join:ident, <Fut1, $($Fut:ident),*>),
+ )*) => ($(
+ pin_project! {
+ $(#[$doc])*
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct $Join<Fut1: TryFuture, $($Fut: TryFuture),*> {
+ #[pin] Fut1: TryMaybeDone<Fut1>,
+ $(#[pin] $Fut: TryMaybeDone<$Fut>,)*
+ }
+ }
+
+ impl<Fut1, $($Fut),*> fmt::Debug for $Join<Fut1, $($Fut),*>
+ where
+ Fut1: TryFuture + fmt::Debug,
+ Fut1::Ok: fmt::Debug,
+ Fut1::Error: fmt::Debug,
+ $(
+ $Fut: TryFuture + fmt::Debug,
+ $Fut::Ok: fmt::Debug,
+ $Fut::Error: fmt::Debug,
+ )*
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!($Join))
+ .field("Fut1", &self.Fut1)
+ $(.field(stringify!($Fut), &self.$Fut))*
+ .finish()
+ }
+ }
+
+ impl<Fut1, $($Fut),*> $Join<Fut1, $($Fut),*>
+ where
+ Fut1: TryFuture,
+ $(
+ $Fut: TryFuture<Error=Fut1::Error>
+ ),*
+ {
+ fn new(Fut1: Fut1, $($Fut: $Fut),*) -> Self {
+ Self {
+ Fut1: try_maybe_done(Fut1),
+ $($Fut: try_maybe_done($Fut)),*
+ }
+ }
+ }
+
+ impl<Fut1, $($Fut),*> Future for $Join<Fut1, $($Fut),*>
+ where
+ Fut1: TryFuture,
+ $(
+ $Fut: TryFuture<Error=Fut1::Error>
+ ),*
+ {
+ type Output = Result<(Fut1::Ok, $($Fut::Ok),*), Fut1::Error>;
+
+ fn poll(
+ self: Pin<&mut Self>, cx: &mut Context<'_>
+ ) -> Poll<Self::Output> {
+ let mut all_done = true;
+ let mut futures = self.project();
+ all_done &= futures.Fut1.as_mut().poll(cx)?.is_ready();
+ $(
+ all_done &= futures.$Fut.as_mut().poll(cx)?.is_ready();
+ )*
+
+ if all_done {
+ Poll::Ready(Ok((
+ futures.Fut1.take_output().unwrap(),
+ $(
+ futures.$Fut.take_output().unwrap()
+ ),*
+ )))
+ } else {
+ Poll::Pending
+ }
+ }
+ }
+ )*)
+}
+
+generate! {
+ /// Future for the [`try_join`](try_join()) function.
+ (TryJoin, <Fut1, Fut2>),
+
+ /// Future for the [`try_join3`] function.
+ (TryJoin3, <Fut1, Fut2, Fut3>),
+
+ /// Future for the [`try_join4`] function.
+ (TryJoin4, <Fut1, Fut2, Fut3, Fut4>),
+
+ /// Future for the [`try_join5`] function.
+ (TryJoin5, <Fut1, Fut2, Fut3, Fut4, Fut5>),
+}
+
+/// Joins the result of two futures, waiting for them both to complete or
+/// for one to produce an error.
+///
+/// This function will return a new future which awaits both futures to
+/// complete. If successful, the returned future will finish with a tuple of
+/// both results. If unsuccessful, it will complete with the first error
+/// encountered.
+///
+/// Note that this function consumes the passed futures and returns a
+/// wrapped version of it.
+///
+/// # Examples
+///
+/// When used on multiple futures that return [`Ok`], `try_join` will return
+/// [`Ok`] of a tuple of the values:
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(Ok::<i32, i32>(1));
+/// let b = future::ready(Ok::<i32, i32>(2));
+/// let pair = future::try_join(a, b);
+///
+/// assert_eq!(pair.await, Ok((1, 2)));
+/// # });
+/// ```
+///
+/// If one of the futures resolves to an error, `try_join` will return
+/// that error:
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(Ok::<i32, i32>(1));
+/// let b = future::ready(Err::<i32, i32>(2));
+/// let pair = future::try_join(a, b);
+///
+/// assert_eq!(pair.await, Err(2));
+/// # });
+/// ```
+pub fn try_join<Fut1, Fut2>(future1: Fut1, future2: Fut2) -> TryJoin<Fut1, Fut2>
+where
+ Fut1: TryFuture,
+ Fut2: TryFuture<Error = Fut1::Error>,
+{
+ assert_future::<Result<(Fut1::Ok, Fut2::Ok), Fut1::Error>, _>(TryJoin::new(future1, future2))
+}
+
+/// Same as [`try_join`](try_join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(Ok::<i32, i32>(1));
+/// let b = future::ready(Ok::<i32, i32>(2));
+/// let c = future::ready(Ok::<i32, i32>(3));
+/// let tuple = future::try_join3(a, b, c);
+///
+/// assert_eq!(tuple.await, Ok((1, 2, 3)));
+/// # });
+/// ```
+pub fn try_join3<Fut1, Fut2, Fut3>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+) -> TryJoin3<Fut1, Fut2, Fut3>
+where
+ Fut1: TryFuture,
+ Fut2: TryFuture<Error = Fut1::Error>,
+ Fut3: TryFuture<Error = Fut1::Error>,
+{
+ assert_future::<Result<(Fut1::Ok, Fut2::Ok, Fut3::Ok), Fut1::Error>, _>(TryJoin3::new(
+ future1, future2, future3,
+ ))
+}
+
+/// Same as [`try_join`](try_join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(Ok::<i32, i32>(1));
+/// let b = future::ready(Ok::<i32, i32>(2));
+/// let c = future::ready(Ok::<i32, i32>(3));
+/// let d = future::ready(Ok::<i32, i32>(4));
+/// let tuple = future::try_join4(a, b, c, d);
+///
+/// assert_eq!(tuple.await, Ok((1, 2, 3, 4)));
+/// # });
+/// ```
+pub fn try_join4<Fut1, Fut2, Fut3, Fut4>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+ future4: Fut4,
+) -> TryJoin4<Fut1, Fut2, Fut3, Fut4>
+where
+ Fut1: TryFuture,
+ Fut2: TryFuture<Error = Fut1::Error>,
+ Fut3: TryFuture<Error = Fut1::Error>,
+ Fut4: TryFuture<Error = Fut1::Error>,
+{
+ assert_future::<Result<(Fut1::Ok, Fut2::Ok, Fut3::Ok, Fut4::Ok), Fut1::Error>, _>(
+ TryJoin4::new(future1, future2, future3, future4),
+ )
+}
+
+/// Same as [`try_join`](try_join()), but with more futures.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future;
+///
+/// let a = future::ready(Ok::<i32, i32>(1));
+/// let b = future::ready(Ok::<i32, i32>(2));
+/// let c = future::ready(Ok::<i32, i32>(3));
+/// let d = future::ready(Ok::<i32, i32>(4));
+/// let e = future::ready(Ok::<i32, i32>(5));
+/// let tuple = future::try_join5(a, b, c, d, e);
+///
+/// assert_eq!(tuple.await, Ok((1, 2, 3, 4, 5)));
+/// # });
+/// ```
+pub fn try_join5<Fut1, Fut2, Fut3, Fut4, Fut5>(
+ future1: Fut1,
+ future2: Fut2,
+ future3: Fut3,
+ future4: Fut4,
+ future5: Fut5,
+) -> TryJoin5<Fut1, Fut2, Fut3, Fut4, Fut5>
+where
+ Fut1: TryFuture,
+ Fut2: TryFuture<Error = Fut1::Error>,
+ Fut3: TryFuture<Error = Fut1::Error>,
+ Fut4: TryFuture<Error = Fut1::Error>,
+ Fut5: TryFuture<Error = Fut1::Error>,
+{
+ assert_future::<Result<(Fut1::Ok, Fut2::Ok, Fut3::Ok, Fut4::Ok, Fut5::Ok), Fut1::Error>, _>(
+ TryJoin5::new(future1, future2, future3, future4, future5),
+ )
+}
diff --git a/vendor/futures-util/src/future/try_join_all.rs b/vendor/futures-util/src/future/try_join_all.rs
new file mode 100644
index 000000000..29244af83
--- /dev/null
+++ b/vendor/futures-util/src/future/try_join_all.rs
@@ -0,0 +1,137 @@
+//! Definition of the `TryJoinAll` combinator, waiting for all of a list of
+//! futures to finish with either success or error.
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::fmt;
+use core::future::Future;
+use core::iter::FromIterator;
+use core::mem;
+use core::pin::Pin;
+use core::task::{Context, Poll};
+
+use super::{assert_future, TryFuture, TryMaybeDone};
+
+fn iter_pin_mut<T>(slice: Pin<&mut [T]>) -> impl Iterator<Item = Pin<&mut T>> {
+ // Safety: `std` _could_ make this unsound if it were to decide Pin's
+ // invariants aren't required to transmit through slices. Otherwise this has
+ // the same safety as a normal field pin projection.
+ unsafe { slice.get_unchecked_mut() }.iter_mut().map(|t| unsafe { Pin::new_unchecked(t) })
+}
+
+enum FinalState<E = ()> {
+ Pending,
+ AllDone,
+ Error(E),
+}
+
+/// Future for the [`try_join_all`] function.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct TryJoinAll<F>
+where
+ F: TryFuture,
+{
+ elems: Pin<Box<[TryMaybeDone<F>]>>,
+}
+
+impl<F> fmt::Debug for TryJoinAll<F>
+where
+ F: TryFuture + fmt::Debug,
+ F::Ok: fmt::Debug,
+ F::Error: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryJoinAll").field("elems", &self.elems).finish()
+ }
+}
+
+/// Creates a future which represents either a collection of the results of the
+/// futures given or an error.
+///
+/// The returned future will drive execution for all of its underlying futures,
+/// collecting the results into a destination `Vec<T>` in the same order as they
+/// were provided.
+///
+/// If any future returns an error then all other futures will be canceled and
+/// an error will be returned immediately. If all futures complete successfully,
+/// however, then the returned future will succeed with a `Vec` of all the
+/// successful results.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::future::{self, try_join_all};
+///
+/// let futures = vec![
+/// future::ok::<u32, u32>(1),
+/// future::ok::<u32, u32>(2),
+/// future::ok::<u32, u32>(3),
+/// ];
+///
+/// assert_eq!(try_join_all(futures).await, Ok(vec![1, 2, 3]));
+///
+/// let futures = vec![
+/// future::ok::<u32, u32>(1),
+/// future::err::<u32, u32>(2),
+/// future::ok::<u32, u32>(3),
+/// ];
+///
+/// assert_eq!(try_join_all(futures).await, Err(2));
+/// # });
+/// ```
+pub fn try_join_all<I>(i: I) -> TryJoinAll<I::Item>
+where
+ I: IntoIterator,
+ I::Item: TryFuture,
+{
+ let elems: Box<[_]> = i.into_iter().map(TryMaybeDone::Future).collect();
+ assert_future::<Result<Vec<<I::Item as TryFuture>::Ok>, <I::Item as TryFuture>::Error>, _>(
+ TryJoinAll { elems: elems.into() },
+ )
+}
+
+impl<F> Future for TryJoinAll<F>
+where
+ F: TryFuture,
+{
+ type Output = Result<Vec<F::Ok>, F::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut state = FinalState::AllDone;
+
+ for elem in iter_pin_mut(self.elems.as_mut()) {
+ match elem.try_poll(cx) {
+ Poll::Pending => state = FinalState::Pending,
+ Poll::Ready(Ok(())) => {}
+ Poll::Ready(Err(e)) => {
+ state = FinalState::Error(e);
+ break;
+ }
+ }
+ }
+
+ match state {
+ FinalState::Pending => Poll::Pending,
+ FinalState::AllDone => {
+ let mut elems = mem::replace(&mut self.elems, Box::pin([]));
+ let results =
+ iter_pin_mut(elems.as_mut()).map(|e| e.take_output().unwrap()).collect();
+ Poll::Ready(Ok(results))
+ }
+ FinalState::Error(e) => {
+ let _ = mem::replace(&mut self.elems, Box::pin([]));
+ Poll::Ready(Err(e))
+ }
+ }
+ }
+}
+
+impl<F: TryFuture> FromIterator<F> for TryJoinAll<F> {
+ fn from_iter<T: IntoIterator<Item = F>>(iter: T) -> Self {
+ try_join_all(iter)
+ }
+}
diff --git a/vendor/futures-util/src/future/try_maybe_done.rs b/vendor/futures-util/src/future/try_maybe_done.rs
new file mode 100644
index 000000000..24044d2c2
--- /dev/null
+++ b/vendor/futures-util/src/future/try_maybe_done.rs
@@ -0,0 +1,92 @@
+//! Definition of the TryMaybeDone combinator
+
+use super::assert_future;
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future, TryFuture};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+
+/// A future that may have completed with an error.
+///
+/// This is created by the [`try_maybe_done()`] function.
+#[derive(Debug)]
+pub enum TryMaybeDone<Fut: TryFuture> {
+ /// A not-yet-completed future
+ Future(/* #[pin] */ Fut),
+ /// The output of the completed future
+ Done(Fut::Ok),
+ /// The empty variant after the result of a [`TryMaybeDone`] has been
+ /// taken using the [`take_output`](TryMaybeDone::take_output) method,
+ /// or if the future returned an error.
+ Gone,
+}
+
+impl<Fut: TryFuture + Unpin> Unpin for TryMaybeDone<Fut> {}
+
+/// Wraps a future into a `TryMaybeDone`
+pub fn try_maybe_done<Fut: TryFuture>(future: Fut) -> TryMaybeDone<Fut> {
+ assert_future::<Result<(), Fut::Error>, _>(TryMaybeDone::Future(future))
+}
+
+impl<Fut: TryFuture> TryMaybeDone<Fut> {
+ /// Returns an [`Option`] containing a mutable reference to the output of the future.
+ /// The output of this method will be [`Some`] if and only if the inner
+ /// future has completed successfully and [`take_output`](TryMaybeDone::take_output)
+ /// has not yet been called.
+ #[inline]
+ pub fn output_mut(self: Pin<&mut Self>) -> Option<&mut Fut::Ok> {
+ unsafe {
+ match self.get_unchecked_mut() {
+ TryMaybeDone::Done(res) => Some(res),
+ _ => None,
+ }
+ }
+ }
+
+ /// Attempt to take the output of a `TryMaybeDone` without driving it
+ /// towards completion.
+ #[inline]
+ pub fn take_output(self: Pin<&mut Self>) -> Option<Fut::Ok> {
+ match &*self {
+ Self::Done(_) => {}
+ Self::Future(_) | Self::Gone => return None,
+ }
+ unsafe {
+ match mem::replace(self.get_unchecked_mut(), Self::Gone) {
+ TryMaybeDone::Done(output) => Some(output),
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+
+impl<Fut: TryFuture> FusedFuture for TryMaybeDone<Fut> {
+ fn is_terminated(&self) -> bool {
+ match self {
+ Self::Future(_) => false,
+ Self::Done(_) | Self::Gone => true,
+ }
+ }
+}
+
+impl<Fut: TryFuture> Future for TryMaybeDone<Fut> {
+ type Output = Result<(), Fut::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ unsafe {
+ match self.as_mut().get_unchecked_mut() {
+ TryMaybeDone::Future(f) => match ready!(Pin::new_unchecked(f).try_poll(cx)) {
+ Ok(res) => self.set(Self::Done(res)),
+ Err(e) => {
+ self.set(Self::Gone);
+ return Poll::Ready(Err(e));
+ }
+ },
+ TryMaybeDone::Done(_) => {}
+ TryMaybeDone::Gone => panic!("TryMaybeDone polled after value taken"),
+ }
+ }
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/future/try_select.rs b/vendor/futures-util/src/future/try_select.rs
new file mode 100644
index 000000000..4d0b7ff13
--- /dev/null
+++ b/vendor/futures-util/src/future/try_select.rs
@@ -0,0 +1,84 @@
+use crate::future::{Either, TryFutureExt};
+use core::pin::Pin;
+use futures_core::future::{Future, TryFuture};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`try_select()`] function.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct TrySelect<A, B> {
+ inner: Option<(A, B)>,
+}
+
+impl<A: Unpin, B: Unpin> Unpin for TrySelect<A, B> {}
+
+/// Waits for either one of two differently-typed futures to complete.
+///
+/// This function will return a new future which awaits for either one of both
+/// futures to complete. The returned future will finish with both the value
+/// resolved and a future representing the completion of the other work.
+///
+/// Note that this function consumes the receiving futures and returns a
+/// wrapped version of them.
+///
+/// Also note that if both this and the second future have the same
+/// success/error type you can use the `Either::factor_first` method to
+/// conveniently extract out the value at the end.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::{self, Either, Future, FutureExt, TryFuture, TryFutureExt};
+///
+/// // A poor-man's try_join implemented on top of select
+///
+/// fn try_join<A, B, E>(a: A, b: B) -> impl TryFuture<Ok=(A::Ok, B::Ok), Error=E>
+/// where A: TryFuture<Error = E> + Unpin + 'static,
+/// B: TryFuture<Error = E> + Unpin + 'static,
+/// E: 'static,
+/// {
+/// future::try_select(a, b).then(|res| -> Box<dyn Future<Output = Result<_, _>> + Unpin> {
+/// match res {
+/// Ok(Either::Left((x, b))) => Box::new(b.map_ok(move |y| (x, y))),
+/// Ok(Either::Right((y, a))) => Box::new(a.map_ok(move |x| (x, y))),
+/// Err(Either::Left((e, _))) => Box::new(future::err(e)),
+/// Err(Either::Right((e, _))) => Box::new(future::err(e)),
+/// }
+/// })
+/// }
+/// ```
+pub fn try_select<A, B>(future1: A, future2: B) -> TrySelect<A, B>
+where
+ A: TryFuture + Unpin,
+ B: TryFuture + Unpin,
+{
+ super::assert_future::<
+ Result<Either<(A::Ok, B), (B::Ok, A)>, Either<(A::Error, B), (B::Error, A)>>,
+ _,
+ >(TrySelect { inner: Some((future1, future2)) })
+}
+
+impl<A: Unpin, B: Unpin> Future for TrySelect<A, B>
+where
+ A: TryFuture,
+ B: TryFuture,
+{
+ #[allow(clippy::type_complexity)]
+ type Output = Result<Either<(A::Ok, B), (B::Ok, A)>, Either<(A::Error, B), (B::Error, A)>>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let (mut a, mut b) = self.inner.take().expect("cannot poll Select twice");
+ match a.try_poll_unpin(cx) {
+ Poll::Ready(Err(x)) => Poll::Ready(Err(Either::Left((x, b)))),
+ Poll::Ready(Ok(x)) => Poll::Ready(Ok(Either::Left((x, b)))),
+ Poll::Pending => match b.try_poll_unpin(cx) {
+ Poll::Ready(Err(x)) => Poll::Ready(Err(Either::Right((x, a)))),
+ Poll::Ready(Ok(x)) => Poll::Ready(Ok(Either::Right((x, a)))),
+ Poll::Pending => {
+ self.inner = Some((a, b));
+ Poll::Pending
+ }
+ },
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/allow_std.rs b/vendor/futures-util/src/io/allow_std.rs
new file mode 100644
index 000000000..ec30ee31e
--- /dev/null
+++ b/vendor/futures-util/src/io/allow_std.rs
@@ -0,0 +1,200 @@
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, SeekFrom};
+use std::pin::Pin;
+use std::{fmt, io};
+
+/// A simple wrapper type which allows types which implement only
+/// implement `std::io::Read` or `std::io::Write`
+/// to be used in contexts which expect an `AsyncRead` or `AsyncWrite`.
+///
+/// If these types issue an error with the kind `io::ErrorKind::WouldBlock`,
+/// it is expected that they will notify the current task on readiness.
+/// Synchronous `std` types should not issue errors of this kind and
+/// are safe to use in this context. However, using these types with
+/// `AllowStdIo` will cause the event loop to block, so they should be used
+/// with care.
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct AllowStdIo<T>(T);
+
+impl<T> Unpin for AllowStdIo<T> {}
+
+macro_rules! try_with_interrupt {
+ ($e:expr) => {
+ loop {
+ match $e {
+ Ok(e) => {
+ break e;
+ }
+ Err(ref e) if e.kind() == ::std::io::ErrorKind::Interrupted => {
+ continue;
+ }
+ Err(e) => {
+ return Poll::Ready(Err(e));
+ }
+ }
+ }
+ };
+}
+
+impl<T> AllowStdIo<T> {
+ /// Creates a new `AllowStdIo` from an existing IO object.
+ pub fn new(io: T) -> Self {
+ Self(io)
+ }
+
+ /// Returns a reference to the contained IO object.
+ pub fn get_ref(&self) -> &T {
+ &self.0
+ }
+
+ /// Returns a mutable reference to the contained IO object.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.0
+ }
+
+ /// Consumes self and returns the contained IO object.
+ pub fn into_inner(self) -> T {
+ self.0
+ }
+}
+
+impl<T> io::Write for AllowStdIo<T>
+where
+ T: io::Write,
+{
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.write(buf)
+ }
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ self.0.write_vectored(bufs)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.0.flush()
+ }
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.0.write_all(buf)
+ }
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
+ self.0.write_fmt(fmt)
+ }
+}
+
+impl<T> AsyncWrite for AllowStdIo<T>
+where
+ T: io::Write,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(try_with_interrupt!(self.0.write(buf))))
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(try_with_interrupt!(self.0.write_vectored(bufs))))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ try_with_interrupt!(self.0.flush());
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+}
+
+impl<T> io::Read for AllowStdIo<T>
+where
+ T: io::Read,
+{
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
+ self.0.read_vectored(bufs)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ self.0.read_to_end(buf)
+ }
+ fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
+ self.0.read_to_string(buf)
+ }
+ fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
+ self.0.read_exact(buf)
+ }
+}
+
+impl<T> AsyncRead for AllowStdIo<T>
+where
+ T: io::Read,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(try_with_interrupt!(self.0.read(buf))))
+ }
+
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(try_with_interrupt!(self.0.read_vectored(bufs))))
+ }
+}
+
+impl<T> io::Seek for AllowStdIo<T>
+where
+ T: io::Seek,
+{
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ self.0.seek(pos)
+ }
+}
+
+impl<T> AsyncSeek for AllowStdIo<T>
+where
+ T: io::Seek,
+{
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ Poll::Ready(Ok(try_with_interrupt!(self.0.seek(pos))))
+ }
+}
+
+impl<T> io::BufRead for AllowStdIo<T>
+where
+ T: io::BufRead,
+{
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ self.0.fill_buf()
+ }
+ fn consume(&mut self, amt: usize) {
+ self.0.consume(amt)
+ }
+}
+
+impl<T> AsyncBufRead for AllowStdIo<T>
+where
+ T: io::BufRead,
+{
+ fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let this: *mut Self = &mut *self as *mut _;
+ Poll::Ready(Ok(try_with_interrupt!(unsafe { &mut *this }.0.fill_buf())))
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ self.0.consume(amt)
+ }
+}
diff --git a/vendor/futures-util/src/io/buf_reader.rs b/vendor/futures-util/src/io/buf_reader.rs
new file mode 100644
index 000000000..0334a9f08
--- /dev/null
+++ b/vendor/futures-util/src/io/buf_reader.rs
@@ -0,0 +1,263 @@
+use super::DEFAULT_BUF_SIZE;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSliceMut, SeekFrom};
+use pin_project_lite::pin_project;
+use std::io::{self, Read};
+use std::pin::Pin;
+use std::{cmp, fmt};
+
+pin_project! {
+ /// The `BufReader` struct adds buffering to any reader.
+ ///
+ /// It can be excessively inefficient to work directly with a [`AsyncRead`]
+ /// instance. A `BufReader` performs large, infrequent reads on the underlying
+ /// [`AsyncRead`] and maintains an in-memory buffer of the results.
+ ///
+ /// `BufReader` can improve the speed of programs that make *small* and
+ /// *repeated* read calls to the same file or network socket. It does not
+ /// help when reading very large amounts at once, or reading just one or a few
+ /// times. It also provides no advantage when reading from a source that is
+ /// already in memory, like a `Vec<u8>`.
+ ///
+ /// When the `BufReader` is dropped, the contents of its buffer will be
+ /// discarded. Creating multiple instances of a `BufReader` on the same
+ /// stream can cause data loss.
+ ///
+ /// [`AsyncRead`]: futures_io::AsyncRead
+ ///
+ // TODO: Examples
+ pub struct BufReader<R> {
+ #[pin]
+ inner: R,
+ buffer: Box<[u8]>,
+ pos: usize,
+ cap: usize,
+ }
+}
+
+impl<R: AsyncRead> BufReader<R> {
+ /// Creates a new `BufReader` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ pub fn new(inner: R) -> Self {
+ Self::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufReader` with the specified buffer capacity.
+ pub fn with_capacity(capacity: usize, inner: R) -> Self {
+ unsafe {
+ let mut buffer = Vec::with_capacity(capacity);
+ buffer.set_len(capacity);
+ super::initialize(&inner, &mut buffer);
+ Self { inner, buffer: buffer.into_boxed_slice(), pos: 0, cap: 0 }
+ }
+ }
+
+ delegate_access_inner!(inner, R, ());
+
+ /// Returns a reference to the internally buffered data.
+ ///
+ /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
+ pub fn buffer(&self) -> &[u8] {
+ &self.buffer[self.pos..self.cap]
+ }
+
+ /// Invalidates all data in the internal buffer.
+ #[inline]
+ fn discard_buffer(self: Pin<&mut Self>) {
+ let this = self.project();
+ *this.pos = 0;
+ *this.cap = 0;
+ }
+}
+
+impl<R: AsyncRead + AsyncSeek> BufReader<R> {
+ /// Seeks relative to the current position. If the new position lies within the buffer,
+ /// the buffer will not be flushed, allowing for more efficient seeks.
+ /// This method does not return the location of the underlying reader, so the caller
+ /// must track this information themselves if it is required.
+ pub fn seek_relative(self: Pin<&mut Self>, offset: i64) -> SeeKRelative<'_, R> {
+ SeeKRelative { inner: self, offset, first: true }
+ }
+
+ /// Attempts to seek relative to the current position. If the new position lies within the buffer,
+ /// the buffer will not be flushed, allowing for more efficient seeks.
+ /// This method does not return the location of the underlying reader, so the caller
+ /// must track this information themselves if it is required.
+ pub fn poll_seek_relative(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ offset: i64,
+ ) -> Poll<io::Result<()>> {
+ let pos = self.pos as u64;
+ if offset < 0 {
+ if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
+ *self.project().pos = new_pos as usize;
+ return Poll::Ready(Ok(()));
+ }
+ } else if let Some(new_pos) = pos.checked_add(offset as u64) {
+ if new_pos <= self.cap as u64 {
+ *self.project().pos = new_pos as usize;
+ return Poll::Ready(Ok(()));
+ }
+ }
+ self.poll_seek(cx, SeekFrom::Current(offset)).map(|res| res.map(|_| ()))
+ }
+}
+
+impl<R: AsyncRead> AsyncRead for BufReader<R> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ // If we don't have any buffered data and we're doing a massive read
+ // (larger than our internal buffer), bypass our internal buffer
+ // entirely.
+ if self.pos == self.cap && buf.len() >= self.buffer.len() {
+ let res = ready!(self.as_mut().project().inner.poll_read(cx, buf));
+ self.discard_buffer();
+ return Poll::Ready(res);
+ }
+ let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?;
+ let nread = rem.read(buf)?;
+ self.consume(nread);
+ Poll::Ready(Ok(nread))
+ }
+
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
+ if self.pos == self.cap && total_len >= self.buffer.len() {
+ let res = ready!(self.as_mut().project().inner.poll_read_vectored(cx, bufs));
+ self.discard_buffer();
+ return Poll::Ready(res);
+ }
+ let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?;
+ let nread = rem.read_vectored(bufs)?;
+ self.consume(nread);
+ Poll::Ready(Ok(nread))
+ }
+}
+
+impl<R: AsyncRead> AsyncBufRead for BufReader<R> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let this = self.project();
+
+ // If we've reached the end of our internal buffer then we need to fetch
+ // some more data from the underlying reader.
+ // Branch using `>=` instead of the more correct `==`
+ // to tell the compiler that the pos..cap slice is always valid.
+ if *this.pos >= *this.cap {
+ debug_assert!(*this.pos == *this.cap);
+ *this.cap = ready!(this.inner.poll_read(cx, this.buffer))?;
+ *this.pos = 0;
+ }
+ Poll::Ready(Ok(&this.buffer[*this.pos..*this.cap]))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ *self.project().pos = cmp::min(self.pos + amt, self.cap);
+ }
+}
+
+impl<R: AsyncWrite> AsyncWrite for BufReader<R> {
+ delegate_async_write!(inner);
+}
+
+impl<R: fmt::Debug> fmt::Debug for BufReader<R> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BufReader")
+ .field("reader", &self.inner)
+ .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buffer.len()))
+ .finish()
+ }
+}
+
+impl<R: AsyncRead + AsyncSeek> AsyncSeek for BufReader<R> {
+ /// Seek to an offset, in bytes, in the underlying reader.
+ ///
+ /// The position used for seeking with `SeekFrom::Current(_)` is the
+ /// position the underlying reader would be at if the `BufReader` had no
+ /// internal buffer.
+ ///
+ /// Seeking always discards the internal buffer, even if the seek position
+ /// would otherwise fall within it. This guarantees that calling
+ /// `.into_inner()` immediately after a seek yields the underlying reader
+ /// at the same position.
+ ///
+ /// To seek without discarding the internal buffer, use
+ /// [`BufReader::seek_relative`](BufReader::seek_relative) or
+ /// [`BufReader::poll_seek_relative`](BufReader::poll_seek_relative).
+ ///
+ /// See [`AsyncSeek`](futures_io::AsyncSeek) for more details.
+ ///
+ /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
+ /// where `n` minus the internal buffer length overflows an `i64`, two
+ /// seeks will be performed instead of one. If the second seek returns
+ /// `Err`, the underlying reader will be left at the same position it would
+ /// have if you called `seek` with `SeekFrom::Current(0)`.
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ let result: u64;
+ if let SeekFrom::Current(n) = pos {
+ let remainder = (self.cap - self.pos) as i64;
+ // it should be safe to assume that remainder fits within an i64 as the alternative
+ // means we managed to allocate 8 exbibytes and that's absurd.
+ // But it's not out of the realm of possibility for some weird underlying reader to
+ // support seeking by i64::min_value() so we need to handle underflow when subtracting
+ // remainder.
+ if let Some(offset) = n.checked_sub(remainder) {
+ result =
+ ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(offset)))?;
+ } else {
+ // seek backwards by our remainder, and then by the offset
+ ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(-remainder)))?;
+ self.as_mut().discard_buffer();
+ result = ready!(self.as_mut().project().inner.poll_seek(cx, SeekFrom::Current(n)))?;
+ }
+ } else {
+ // Seeking with Start/End doesn't care about our buffer length.
+ result = ready!(self.as_mut().project().inner.poll_seek(cx, pos))?;
+ }
+ self.discard_buffer();
+ Poll::Ready(Ok(result))
+ }
+}
+
+/// Future for the [`BufReader::seek_relative`](self::BufReader::seek_relative) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SeeKRelative<'a, R> {
+ inner: Pin<&'a mut BufReader<R>>,
+ offset: i64,
+ first: bool,
+}
+
+impl<R> Future for SeeKRelative<'_, R>
+where
+ R: AsyncRead + AsyncSeek,
+{
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let offset = self.offset;
+ if self.first {
+ self.first = false;
+ self.inner.as_mut().poll_seek_relative(cx, offset)
+ } else {
+ self.inner
+ .as_mut()
+ .as_mut()
+ .poll_seek(cx, SeekFrom::Current(offset))
+ .map(|res| res.map(|_| ()))
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/buf_writer.rs b/vendor/futures-util/src/io/buf_writer.rs
new file mode 100644
index 000000000..cb74863ad
--- /dev/null
+++ b/vendor/futures-util/src/io/buf_writer.rs
@@ -0,0 +1,224 @@
+use super::DEFAULT_BUF_SIZE;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
+use pin_project_lite::pin_project;
+use std::fmt;
+use std::io::{self, Write};
+use std::pin::Pin;
+use std::ptr;
+
+pin_project! {
+ /// Wraps a writer and buffers its output.
+ ///
+ /// It can be excessively inefficient to work directly with something that
+ /// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
+ /// writes it to an underlying writer in large, infrequent batches.
+ ///
+ /// `BufWriter` can improve the speed of programs that make *small* and
+ /// *repeated* write calls to the same file or network socket. It does not
+ /// help when writing very large amounts at once, or writing just one or a few
+ /// times. It also provides no advantage when writing to a destination that is
+ /// in memory, like a `Vec<u8>`.
+ ///
+ /// When the `BufWriter` is dropped, the contents of its buffer will be
+ /// discarded. Creating multiple instances of a `BufWriter` on the same
+ /// stream can cause data loss. If you need to write out the contents of its
+ /// buffer, you must manually call flush before the writer is dropped.
+ ///
+ /// [`AsyncWrite`]: futures_io::AsyncWrite
+ /// [`flush`]: super::AsyncWriteExt::flush
+ ///
+ // TODO: Examples
+ pub struct BufWriter<W> {
+ #[pin]
+ inner: W,
+ buf: Vec<u8>,
+ written: usize,
+ }
+}
+
+impl<W: AsyncWrite> BufWriter<W> {
+ /// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
+ /// but may change in the future.
+ pub fn new(inner: W) -> Self {
+ Self::with_capacity(DEFAULT_BUF_SIZE, inner)
+ }
+
+ /// Creates a new `BufWriter` with the specified buffer capacity.
+ pub fn with_capacity(cap: usize, inner: W) -> Self {
+ Self { inner, buf: Vec::with_capacity(cap), written: 0 }
+ }
+
+ pub(super) fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let mut this = self.project();
+
+ let len = this.buf.len();
+ let mut ret = Ok(());
+ while *this.written < len {
+ match ready!(this.inner.as_mut().poll_write(cx, &this.buf[*this.written..])) {
+ Ok(0) => {
+ ret = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ break;
+ }
+ Ok(n) => *this.written += n,
+ Err(e) => {
+ ret = Err(e);
+ break;
+ }
+ }
+ }
+ if *this.written > 0 {
+ this.buf.drain(..*this.written);
+ }
+ *this.written = 0;
+ Poll::Ready(ret)
+ }
+
+ delegate_access_inner!(inner, W, ());
+
+ /// Returns a reference to the internally buffered data.
+ pub fn buffer(&self) -> &[u8] {
+ &self.buf
+ }
+
+ /// Capacity of `buf`. how many chars can be held in buffer
+ pub(super) fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Remaining number of bytes to reach `buf` 's capacity
+ #[inline]
+ pub(super) fn spare_capacity(&self) -> usize {
+ self.buf.capacity() - self.buf.len()
+ }
+
+ /// Write a byte slice directly into buffer
+ ///
+ /// Will truncate the number of bytes written to `spare_capacity()` so you want to
+ /// calculate the size of your slice to avoid losing bytes
+ ///
+ /// Based on `std::io::BufWriter`
+ pub(super) fn write_to_buf(self: Pin<&mut Self>, buf: &[u8]) -> usize {
+ let available = self.spare_capacity();
+ let amt_to_buffer = available.min(buf.len());
+
+ // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
+ unsafe {
+ self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
+ }
+
+ amt_to_buffer
+ }
+
+ /// Write byte slice directly into `self.buf`
+ ///
+ /// Based on `std::io::BufWriter`
+ #[inline]
+ unsafe fn write_to_buffer_unchecked(self: Pin<&mut Self>, buf: &[u8]) {
+ debug_assert!(buf.len() <= self.spare_capacity());
+ let this = self.project();
+ let old_len = this.buf.len();
+ let buf_len = buf.len();
+ let src = buf.as_ptr();
+ let dst = this.buf.as_mut_ptr().add(old_len);
+ ptr::copy_nonoverlapping(src, dst, buf_len);
+ this.buf.set_len(old_len + buf_len);
+ }
+
+ /// Write directly using `inner`, bypassing buffering
+ pub(super) fn inner_poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_write(cx, buf)
+ }
+
+ /// Write directly using `inner`, bypassing buffering
+ pub(super) fn inner_poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_write_vectored(cx, bufs)
+ }
+}
+
+impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.buf.len() + buf.len() > self.buf.capacity() {
+ ready!(self.as_mut().flush_buf(cx))?;
+ }
+ if buf.len() >= self.buf.capacity() {
+ self.project().inner.poll_write(cx, buf)
+ } else {
+ Poll::Ready(self.project().buf.write(buf))
+ }
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
+ if self.buf.len() + total_len > self.buf.capacity() {
+ ready!(self.as_mut().flush_buf(cx))?;
+ }
+ if total_len >= self.buf.capacity() {
+ self.project().inner.poll_write_vectored(cx, bufs)
+ } else {
+ Poll::Ready(self.project().buf.write_vectored(bufs))
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(self.as_mut().flush_buf(cx))?;
+ self.project().inner.poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ ready!(self.as_mut().flush_buf(cx))?;
+ self.project().inner.poll_close(cx)
+ }
+}
+
+impl<W: AsyncRead> AsyncRead for BufWriter<W> {
+ delegate_async_read!(inner);
+}
+
+impl<W: AsyncBufRead> AsyncBufRead for BufWriter<W> {
+ delegate_async_buf_read!(inner);
+}
+
+impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BufWriter")
+ .field("writer", &self.inner)
+ .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
+ .field("written", &self.written)
+ .finish()
+ }
+}
+
+impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
+ /// Seek to the offset, in bytes, in the underlying writer.
+ ///
+ /// Seeking always writes out the internal buffer before seeking.
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ ready!(self.as_mut().flush_buf(cx))?;
+ self.project().inner.poll_seek(cx, pos)
+ }
+}
diff --git a/vendor/futures-util/src/io/chain.rs b/vendor/futures-util/src/io/chain.rs
new file mode 100644
index 000000000..728a3d2dc
--- /dev/null
+++ b/vendor/futures-util/src/io/chain.rs
@@ -0,0 +1,142 @@
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead, IoSliceMut};
+use pin_project_lite::pin_project;
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+
+pin_project! {
+ /// Reader for the [`chain`](super::AsyncReadExt::chain) method.
+ #[must_use = "readers do nothing unless polled"]
+ pub struct Chain<T, U> {
+ #[pin]
+ first: T,
+ #[pin]
+ second: U,
+ done_first: bool,
+ }
+}
+
+impl<T, U> Chain<T, U>
+where
+ T: AsyncRead,
+ U: AsyncRead,
+{
+ pub(super) fn new(first: T, second: U) -> Self {
+ Self { first, second, done_first: false }
+ }
+
+ /// Gets references to the underlying readers in this `Chain`.
+ pub fn get_ref(&self) -> (&T, &U) {
+ (&self.first, &self.second)
+ }
+
+ /// Gets mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ pub fn get_mut(&mut self) -> (&mut T, &mut U) {
+ (&mut self.first, &mut self.second)
+ }
+
+ /// Gets pinned mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) {
+ let this = self.project();
+ (this.first, this.second)
+ }
+
+ /// Consumes the `Chain`, returning the wrapped readers.
+ pub fn into_inner(self) -> (T, U) {
+ (self.first, self.second)
+ }
+}
+
+impl<T, U> fmt::Debug for Chain<T, U>
+where
+ T: fmt::Debug,
+ U: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Chain")
+ .field("t", &self.first)
+ .field("u", &self.second)
+ .field("done_first", &self.done_first)
+ .finish()
+ }
+}
+
+impl<T, U> AsyncRead for Chain<T, U>
+where
+ T: AsyncRead,
+ U: AsyncRead,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ let this = self.project();
+
+ if !*this.done_first {
+ match ready!(this.first.poll_read(cx, buf)?) {
+ 0 if !buf.is_empty() => *this.done_first = true,
+ n => return Poll::Ready(Ok(n)),
+ }
+ }
+ this.second.poll_read(cx, buf)
+ }
+
+ fn poll_read_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let this = self.project();
+
+ if !*this.done_first {
+ let n = ready!(this.first.poll_read_vectored(cx, bufs)?);
+ if n == 0 && bufs.iter().any(|b| !b.is_empty()) {
+ *this.done_first = true
+ } else {
+ return Poll::Ready(Ok(n));
+ }
+ }
+ this.second.poll_read_vectored(cx, bufs)
+ }
+}
+
+impl<T, U> AsyncBufRead for Chain<T, U>
+where
+ T: AsyncBufRead,
+ U: AsyncBufRead,
+{
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let this = self.project();
+
+ if !*this.done_first {
+ match ready!(this.first.poll_fill_buf(cx)?) {
+ buf if buf.is_empty() => {
+ *this.done_first = true;
+ }
+ buf => return Poll::Ready(Ok(buf)),
+ }
+ }
+ this.second.poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ let this = self.project();
+
+ if !*this.done_first {
+ this.first.consume(amt)
+ } else {
+ this.second.consume(amt)
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/close.rs b/vendor/futures-util/src/io/close.rs
new file mode 100644
index 000000000..b94459279
--- /dev/null
+++ b/vendor/futures-util/src/io/close.rs
@@ -0,0 +1,28 @@
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`close`](super::AsyncWriteExt::close) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Close<'a, W: ?Sized> {
+ writer: &'a mut W,
+}
+
+impl<W: ?Sized + Unpin> Unpin for Close<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> Close<'a, W> {
+ pub(super) fn new(writer: &'a mut W) -> Self {
+ Self { writer }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized + Unpin> Future for Close<'_, W> {
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut *self.writer).poll_close(cx)
+ }
+}
diff --git a/vendor/futures-util/src/io/copy.rs b/vendor/futures-util/src/io/copy.rs
new file mode 100644
index 000000000..c80add271
--- /dev/null
+++ b/vendor/futures-util/src/io/copy.rs
@@ -0,0 +1,58 @@
+use super::{copy_buf, BufReader, CopyBuf};
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncRead, AsyncWrite};
+use pin_project_lite::pin_project;
+use std::io;
+use std::pin::Pin;
+
+/// Creates a future which copies all the bytes from one object to another.
+///
+/// The returned future will copy all the bytes read from this `AsyncRead` into the
+/// `writer` specified. This future will only complete once the `reader` has hit
+/// EOF and all bytes have been written to and flushed from the `writer`
+/// provided.
+///
+/// On success the number of bytes is returned.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::io::{self, AsyncWriteExt, Cursor};
+///
+/// let reader = Cursor::new([1, 2, 3, 4]);
+/// let mut writer = Cursor::new(vec![0u8; 5]);
+///
+/// let bytes = io::copy(reader, &mut writer).await?;
+/// writer.close().await?;
+///
+/// assert_eq!(bytes, 4);
+/// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]);
+/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+/// ```
+pub fn copy<R, W>(reader: R, writer: &mut W) -> Copy<'_, R, W>
+where
+ R: AsyncRead,
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ Copy { inner: copy_buf(BufReader::new(reader), writer) }
+}
+
+pin_project! {
+ /// Future for the [`copy()`] function.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Copy<'a, R, W: ?Sized> {
+ #[pin]
+ inner: CopyBuf<'a, BufReader<R>, W>,
+ }
+}
+
+impl<R: AsyncRead, W: AsyncWrite + Unpin + ?Sized> Future for Copy<'_, R, W> {
+ type Output = io::Result<u64>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.project().inner.poll(cx)
+ }
+}
diff --git a/vendor/futures-util/src/io/copy_buf.rs b/vendor/futures-util/src/io/copy_buf.rs
new file mode 100644
index 000000000..50f7abdca
--- /dev/null
+++ b/vendor/futures-util/src/io/copy_buf.rs
@@ -0,0 +1,78 @@
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncWrite};
+use pin_project_lite::pin_project;
+use std::io;
+use std::pin::Pin;
+
+/// Creates a future which copies all the bytes from one object to another.
+///
+/// The returned future will copy all the bytes read from this `AsyncBufRead` into the
+/// `writer` specified. This future will only complete once the `reader` has hit
+/// EOF and all bytes have been written to and flushed from the `writer`
+/// provided.
+///
+/// On success the number of bytes is returned.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::io::{self, AsyncWriteExt, Cursor};
+///
+/// let reader = Cursor::new([1, 2, 3, 4]);
+/// let mut writer = Cursor::new(vec![0u8; 5]);
+///
+/// let bytes = io::copy_buf(reader, &mut writer).await?;
+/// writer.close().await?;
+///
+/// assert_eq!(bytes, 4);
+/// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]);
+/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+/// ```
+pub fn copy_buf<R, W>(reader: R, writer: &mut W) -> CopyBuf<'_, R, W>
+where
+ R: AsyncBufRead,
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ CopyBuf { reader, writer, amt: 0 }
+}
+
+pin_project! {
+ /// Future for the [`copy_buf()`] function.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct CopyBuf<'a, R, W: ?Sized> {
+ #[pin]
+ reader: R,
+ writer: &'a mut W,
+ amt: u64,
+ }
+}
+
+impl<R, W> Future for CopyBuf<'_, R, W>
+where
+ R: AsyncBufRead,
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<u64>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+ loop {
+ let buffer = ready!(this.reader.as_mut().poll_fill_buf(cx))?;
+ if buffer.is_empty() {
+ ready!(Pin::new(&mut this.writer).poll_flush(cx))?;
+ return Poll::Ready(Ok(*this.amt));
+ }
+
+ let i = ready!(Pin::new(&mut this.writer).poll_write(cx, buffer))?;
+ if i == 0 {
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ *this.amt += i as u64;
+ this.reader.as_mut().consume(i);
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/cursor.rs b/vendor/futures-util/src/io/cursor.rs
new file mode 100644
index 000000000..b6fb3724c
--- /dev/null
+++ b/vendor/futures-util/src/io/cursor.rs
@@ -0,0 +1,240 @@
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "read_initializer")]
+use futures_io::Initializer;
+use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite, IoSlice, IoSliceMut, SeekFrom};
+use std::io;
+use std::pin::Pin;
+
+/// A `Cursor` wraps an in-memory buffer and provides it with a
+/// [`AsyncSeek`] implementation.
+///
+/// `Cursor`s are used with in-memory buffers, anything implementing
+/// `AsRef<[u8]>`, to allow them to implement [`AsyncRead`] and/or [`AsyncWrite`],
+/// allowing these buffers to be used anywhere you might use a reader or writer
+/// that does actual I/O.
+///
+/// This library implements some I/O traits on various types which
+/// are commonly used as a buffer, like `Cursor<`[`Vec`]`<u8>>` and
+/// `Cursor<`[`&[u8]`][bytes]`>`.
+///
+/// [`AsyncSeek`]: trait.AsyncSeek.html
+/// [`AsyncRead`]: trait.AsyncRead.html
+/// [`AsyncWrite`]: trait.AsyncWrite.html
+/// [bytes]: https://doc.rust-lang.org/std/primitive.slice.html
+#[derive(Clone, Debug, Default)]
+pub struct Cursor<T> {
+ inner: io::Cursor<T>,
+}
+
+impl<T> Cursor<T> {
+ /// Creates a new cursor wrapping the provided underlying in-memory buffer.
+ ///
+ /// Cursor initial position is `0` even if underlying buffer (e.g., `Vec`)
+ /// is not empty. So writing to cursor starts with overwriting `Vec`
+ /// content, not with appending to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ /// ```
+ pub fn new(inner: T) -> Self {
+ Self { inner: io::Cursor::new(inner) }
+ }
+
+ /// Consumes this cursor, returning the underlying value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let vec = buff.into_inner();
+ /// ```
+ pub fn into_inner(self) -> T {
+ self.inner.into_inner()
+ }
+
+ /// Gets a reference to the underlying value in this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::io::Cursor;
+ ///
+ /// let buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_ref();
+ /// ```
+ pub fn get_ref(&self) -> &T {
+ self.inner.get_ref()
+ }
+
+ /// Gets a mutable reference to the underlying value in this cursor.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying value as it may corrupt this cursor's position.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(Vec::new());
+ /// # fn force_inference(_: &Cursor<Vec<u8>>) {}
+ /// # force_inference(&buff);
+ ///
+ /// let reference = buff.get_mut();
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ self.inner.get_mut()
+ }
+
+ /// Returns the current position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncSeekExt, Cursor, SeekFrom};
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.seek(SeekFrom::Current(2)).await?;
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.seek(SeekFrom::Current(-1)).await?;
+ /// assert_eq!(buff.position(), 1);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ pub fn position(&self) -> u64 {
+ self.inner.position()
+ }
+
+ /// Sets the position of this cursor.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::io::Cursor;
+ ///
+ /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(buff.position(), 0);
+ ///
+ /// buff.set_position(2);
+ /// assert_eq!(buff.position(), 2);
+ ///
+ /// buff.set_position(4);
+ /// assert_eq!(buff.position(), 4);
+ /// ```
+ pub fn set_position(&mut self, pos: u64) {
+ self.inner.set_position(pos)
+ }
+}
+
+impl<T> AsyncSeek for Cursor<T>
+where
+ T: AsRef<[u8]> + Unpin,
+{
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ Poll::Ready(io::Seek::seek(&mut self.inner, pos))
+ }
+}
+
+impl<T: AsRef<[u8]> + Unpin> AsyncRead for Cursor<T> {
+ #[cfg(feature = "read_initializer")]
+ #[inline]
+ unsafe fn initializer(&self) -> Initializer {
+ io::Read::initializer(&self.inner)
+ }
+
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Read::read(&mut self.inner, buf))
+ }
+
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Read::read_vectored(&mut self.inner, bufs))
+ }
+}
+
+impl<T> AsyncBufRead for Cursor<T>
+where
+ T: AsRef<[u8]> + Unpin,
+{
+ fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ Poll::Ready(io::BufRead::fill_buf(&mut self.get_mut().inner))
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ io::BufRead::consume(&mut self.inner, amt)
+ }
+}
+
+macro_rules! delegate_async_write_to_stdio {
+ () => {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write(&mut self.inner, buf))
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(io::Write::write_vectored(&mut self.inner, bufs))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(io::Write::flush(&mut self.inner))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.poll_flush(cx)
+ }
+ };
+}
+
+impl AsyncWrite for Cursor<&mut [u8]> {
+ delegate_async_write_to_stdio!();
+}
+
+impl AsyncWrite for Cursor<&mut Vec<u8>> {
+ delegate_async_write_to_stdio!();
+}
+
+impl AsyncWrite for Cursor<Vec<u8>> {
+ delegate_async_write_to_stdio!();
+}
+
+impl AsyncWrite for Cursor<Box<[u8]>> {
+ delegate_async_write_to_stdio!();
+}
diff --git a/vendor/futures-util/src/io/empty.rs b/vendor/futures-util/src/io/empty.rs
new file mode 100644
index 000000000..02f6103f5
--- /dev/null
+++ b/vendor/futures-util/src/io/empty.rs
@@ -0,0 +1,59 @@
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead};
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+
+/// Reader for the [`empty()`] function.
+#[must_use = "readers do nothing unless polled"]
+pub struct Empty {
+ _priv: (),
+}
+
+/// Constructs a new handle to an empty reader.
+///
+/// All reads from the returned reader will return `Poll::Ready(Ok(0))`.
+///
+/// # Examples
+///
+/// A slightly sad example of not reading anything into a buffer:
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::io::{self, AsyncReadExt};
+///
+/// let mut buffer = String::new();
+/// let mut reader = io::empty();
+/// reader.read_to_string(&mut buffer).await?;
+/// assert!(buffer.is_empty());
+/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+/// ```
+pub fn empty() -> Empty {
+ Empty { _priv: () }
+}
+
+impl AsyncRead for Empty {
+ #[inline]
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ _: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(0))
+ }
+}
+
+impl AsyncBufRead for Empty {
+ #[inline]
+ fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ Poll::Ready(Ok(&[]))
+ }
+ #[inline]
+ fn consume(self: Pin<&mut Self>, _: usize) {}
+}
+
+impl fmt::Debug for Empty {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Empty { .. }")
+ }
+}
diff --git a/vendor/futures-util/src/io/fill_buf.rs b/vendor/futures-util/src/io/fill_buf.rs
new file mode 100644
index 000000000..a1484c032
--- /dev/null
+++ b/vendor/futures-util/src/io/fill_buf.rs
@@ -0,0 +1,51 @@
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncBufRead;
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`fill_buf`](super::AsyncBufReadExt::fill_buf) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct FillBuf<'a, R: ?Sized> {
+ reader: Option<&'a mut R>,
+}
+
+impl<R: ?Sized> Unpin for FillBuf<'_, R> {}
+
+impl<'a, R: AsyncBufRead + ?Sized + Unpin> FillBuf<'a, R> {
+ pub(super) fn new(reader: &'a mut R) -> Self {
+ Self { reader: Some(reader) }
+ }
+}
+
+impl<'a, R> Future for FillBuf<'a, R>
+where
+ R: AsyncBufRead + ?Sized + Unpin,
+{
+ type Output = io::Result<&'a [u8]>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ let reader = this.reader.take().expect("Polled FillBuf after completion");
+
+ match Pin::new(&mut *reader).poll_fill_buf(cx) {
+ // With polonius it is possible to remove this inner match and just have the correct
+ // lifetime of the reference inferred based on which branch is taken
+ Poll::Ready(Ok(_)) => match Pin::new(reader).poll_fill_buf(cx) {
+ Poll::Ready(Ok(slice)) => Poll::Ready(Ok(slice)),
+ Poll::Ready(Err(err)) => {
+ unreachable!("reader indicated readiness but then returned an error: {:?}", err)
+ }
+ Poll::Pending => {
+ unreachable!("reader indicated readiness but then returned pending")
+ }
+ },
+ Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
+ Poll::Pending => {
+ this.reader = Some(reader);
+ Poll::Pending
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/flush.rs b/vendor/futures-util/src/io/flush.rs
new file mode 100644
index 000000000..b75d14c5d
--- /dev/null
+++ b/vendor/futures-util/src/io/flush.rs
@@ -0,0 +1,31 @@
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`flush`](super::AsyncWriteExt::flush) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Flush<'a, W: ?Sized> {
+ writer: &'a mut W,
+}
+
+impl<W: ?Sized + Unpin> Unpin for Flush<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> Flush<'a, W> {
+ pub(super) fn new(writer: &'a mut W) -> Self {
+ Self { writer }
+ }
+}
+
+impl<W> Future for Flush<'_, W>
+where
+ W: AsyncWrite + ?Sized + Unpin,
+{
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut *self.writer).poll_flush(cx)
+ }
+}
diff --git a/vendor/futures-util/src/io/into_sink.rs b/vendor/futures-util/src/io/into_sink.rs
new file mode 100644
index 000000000..6a41ee226
--- /dev/null
+++ b/vendor/futures-util/src/io/into_sink.rs
@@ -0,0 +1,82 @@
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+use std::io;
+use std::pin::Pin;
+
+#[derive(Debug)]
+struct Block<Item> {
+ offset: usize,
+ bytes: Item,
+}
+
+pin_project! {
+ /// Sink for the [`into_sink`](super::AsyncWriteExt::into_sink) method.
+ #[must_use = "sinks do nothing unless polled"]
+ #[derive(Debug)]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ pub struct IntoSink<W, Item> {
+ #[pin]
+ writer: W,
+ // An outstanding block for us to push into the underlying writer, along with an offset of how
+ // far into this block we have written already.
+ buffer: Option<Block<Item>>,
+ }
+}
+
+impl<W: AsyncWrite, Item: AsRef<[u8]>> IntoSink<W, Item> {
+ pub(super) fn new(writer: W) -> Self {
+ Self { writer, buffer: None }
+ }
+
+ /// If we have an outstanding block in `buffer` attempt to push it into the writer, does _not_
+ /// flush the writer after it succeeds in pushing the block into it.
+ fn poll_flush_buffer(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ let mut this = self.project();
+
+ if let Some(buffer) = this.buffer {
+ loop {
+ let bytes = buffer.bytes.as_ref();
+ let written = ready!(this.writer.as_mut().poll_write(cx, &bytes[buffer.offset..]))?;
+ buffer.offset += written;
+ if buffer.offset == bytes.len() {
+ break;
+ }
+ }
+ }
+ *this.buffer = None;
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<W: AsyncWrite, Item: AsRef<[u8]>> Sink<Item> for IntoSink<W, Item> {
+ type Error = io::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.poll_flush_buffer(cx))?;
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ debug_assert!(self.buffer.is_none());
+ *self.project().buffer = Some(Block { offset: 0, bytes: item });
+ Ok(())
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().poll_flush_buffer(cx))?;
+ ready!(self.project().writer.poll_flush(cx))?;
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().poll_flush_buffer(cx))?;
+ ready!(self.project().writer.poll_close(cx))?;
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/io/line_writer.rs b/vendor/futures-util/src/io/line_writer.rs
new file mode 100644
index 000000000..71cd66832
--- /dev/null
+++ b/vendor/futures-util/src/io/line_writer.rs
@@ -0,0 +1,155 @@
+use super::buf_writer::BufWriter;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use futures_io::IoSlice;
+use pin_project_lite::pin_project;
+use std::io;
+use std::pin::Pin;
+
+pin_project! {
+/// Wrap a writer, like [`BufWriter`] does, but prioritizes buffering lines
+///
+/// This was written based on `std::io::LineWriter` which goes into further details
+/// explaining the code.
+///
+/// Buffering is actually done using `BufWriter`. This class will leverage `BufWriter`
+/// to write on-each-line.
+#[derive(Debug)]
+pub struct LineWriter<W: AsyncWrite> {
+ #[pin]
+ buf_writer: BufWriter<W>,
+}
+}
+
+impl<W: AsyncWrite> LineWriter<W> {
+ /// Create a new `LineWriter` with default buffer capacity. The default is currently 1KB
+ /// which was taken from `std::io::LineWriter`
+ pub fn new(inner: W) -> LineWriter<W> {
+ LineWriter::with_capacity(1024, inner)
+ }
+
+ /// Creates a new `LineWriter` with the specified buffer capacity.
+ pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
+ LineWriter { buf_writer: BufWriter::with_capacity(capacity, inner) }
+ }
+
+ /// Flush `buf_writer` if last char is "new line"
+ fn flush_if_completed_line(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let this = self.project();
+ match this.buf_writer.buffer().last().copied() {
+ Some(b'\n') => this.buf_writer.flush_buf(cx),
+ _ => Poll::Ready(Ok(())),
+ }
+ }
+
+ /// Returns a reference to `buf_writer`'s internally buffered data.
+ pub fn buffer(&self) -> &[u8] {
+ self.buf_writer.buffer()
+ }
+
+ /// Acquires a reference to the underlying sink or stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &W {
+ self.buf_writer.get_ref()
+ }
+}
+
+impl<W: AsyncWrite> AsyncWrite for LineWriter<W> {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ let mut this = self.as_mut().project();
+ let newline_index = match memchr::memrchr(b'\n', buf) {
+ None => {
+ ready!(self.as_mut().flush_if_completed_line(cx)?);
+ return self.project().buf_writer.poll_write(cx, buf);
+ }
+ Some(newline_index) => newline_index + 1,
+ };
+
+ ready!(this.buf_writer.as_mut().poll_flush(cx)?);
+
+ let lines = &buf[..newline_index];
+
+ let flushed = { ready!(this.buf_writer.as_mut().inner_poll_write(cx, lines))? };
+
+ if flushed == 0 {
+ return Poll::Ready(Ok(0));
+ }
+
+ let tail = if flushed >= newline_index {
+ &buf[flushed..]
+ } else if newline_index - flushed <= this.buf_writer.capacity() {
+ &buf[flushed..newline_index]
+ } else {
+ let scan_area = &buf[flushed..];
+ let scan_area = &scan_area[..this.buf_writer.capacity()];
+ match memchr::memrchr(b'\n', scan_area) {
+ Some(newline_index) => &scan_area[..newline_index + 1],
+ None => scan_area,
+ }
+ };
+
+ let buffered = this.buf_writer.as_mut().write_to_buf(tail);
+ Poll::Ready(Ok(flushed + buffered))
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let mut this = self.as_mut().project();
+ // `is_write_vectored()` is handled in original code, but not in this crate
+ // see https://github.com/rust-lang/rust/issues/70436
+
+ let last_newline_buf_idx = bufs
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(i, buf)| memchr::memchr(b'\n', buf).map(|_| i));
+ let last_newline_buf_idx = match last_newline_buf_idx {
+ None => {
+ ready!(self.as_mut().flush_if_completed_line(cx)?);
+ return self.project().buf_writer.poll_write_vectored(cx, bufs);
+ }
+ Some(i) => i,
+ };
+
+ ready!(this.buf_writer.as_mut().poll_flush(cx)?);
+
+ let (lines, tail) = bufs.split_at(last_newline_buf_idx + 1);
+
+ let flushed = { ready!(this.buf_writer.as_mut().inner_poll_write_vectored(cx, lines))? };
+ if flushed == 0 {
+ return Poll::Ready(Ok(0));
+ }
+
+ let lines_len = lines.iter().map(|buf| buf.len()).sum();
+ if flushed < lines_len {
+ return Poll::Ready(Ok(flushed));
+ }
+
+ let buffered: usize = tail
+ .iter()
+ .filter(|buf| !buf.is_empty())
+ .map(|buf| this.buf_writer.as_mut().write_to_buf(buf))
+ .take_while(|&n| n > 0)
+ .sum();
+
+ Poll::Ready(Ok(flushed + buffered))
+ }
+
+ /// Forward to `buf_writer` 's `BufWriter::poll_flush()`
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.as_mut().project().buf_writer.poll_flush(cx)
+ }
+
+ /// Forward to `buf_writer` 's `BufWriter::poll_close()`
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ self.as_mut().project().buf_writer.poll_close(cx)
+ }
+}
diff --git a/vendor/futures-util/src/io/lines.rs b/vendor/futures-util/src/io/lines.rs
new file mode 100644
index 000000000..13e70df23
--- /dev/null
+++ b/vendor/futures-util/src/io/lines.rs
@@ -0,0 +1,47 @@
+use super::read_line::read_line_internal;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncBufRead;
+use pin_project_lite::pin_project;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+
+pin_project! {
+ /// Stream for the [`lines`](super::AsyncBufReadExt::lines) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Lines<R> {
+ #[pin]
+ reader: R,
+ buf: String,
+ bytes: Vec<u8>,
+ read: usize,
+ }
+}
+
+impl<R: AsyncBufRead> Lines<R> {
+ pub(super) fn new(reader: R) -> Self {
+ Self { reader, buf: String::new(), bytes: Vec::new(), read: 0 }
+ }
+}
+
+impl<R: AsyncBufRead> Stream for Lines<R> {
+ type Item = io::Result<String>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+ let n = ready!(read_line_internal(this.reader, cx, this.buf, this.bytes, this.read))?;
+ if n == 0 && this.buf.is_empty() {
+ return Poll::Ready(None);
+ }
+ if this.buf.ends_with('\n') {
+ this.buf.pop();
+ if this.buf.ends_with('\r') {
+ this.buf.pop();
+ }
+ }
+ Poll::Ready(Some(Ok(mem::replace(this.buf, String::new()))))
+ }
+}
diff --git a/vendor/futures-util/src/io/mod.rs b/vendor/futures-util/src/io/mod.rs
new file mode 100644
index 000000000..4dd2e029b
--- /dev/null
+++ b/vendor/futures-util/src/io/mod.rs
@@ -0,0 +1,838 @@
+//! Asynchronous I/O.
+//!
+//! This module is the asynchronous version of `std::io`. It defines four
+//! traits, [`AsyncRead`], [`AsyncWrite`], [`AsyncSeek`], and [`AsyncBufRead`],
+//! which mirror the `Read`, `Write`, `Seek`, and `BufRead` traits of the
+//! standard library. However, these traits integrate with the asynchronous
+//! task system, so that if an I/O object isn't ready for reading (or writing),
+//! the thread is not blocked, and instead the current task is queued to be
+//! woken when I/O is ready.
+//!
+//! In addition, the [`AsyncReadExt`], [`AsyncWriteExt`], [`AsyncSeekExt`], and
+//! [`AsyncBufReadExt`] extension traits offer a variety of useful combinators
+//! for operating with asynchronous I/O objects, including ways to work with
+//! them using futures, streams and sinks.
+//!
+//! This module is only available when the `std` feature of this
+//! library is activated, and it is activated by default.
+
+#[cfg(feature = "io-compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+use crate::compat::Compat;
+use crate::future::assert_future;
+use crate::stream::assert_stream;
+use std::{pin::Pin, ptr};
+
+// Re-export some types from `std::io` so that users don't have to deal
+// with conflicts when `use`ing `futures::io` and `std::io`.
+#[doc(no_inline)]
+pub use std::io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom};
+
+pub use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite};
+
+// used by `BufReader` and `BufWriter`
+// https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1
+const DEFAULT_BUF_SIZE: usize = 8 * 1024;
+
+/// Initializes a buffer if necessary.
+///
+/// A buffer is currently always initialized.
+#[inline]
+unsafe fn initialize<R: AsyncRead>(_reader: &R, buf: &mut [u8]) {
+ ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len())
+}
+
+mod allow_std;
+pub use self::allow_std::AllowStdIo;
+
+mod buf_reader;
+pub use self::buf_reader::{BufReader, SeeKRelative};
+
+mod buf_writer;
+pub use self::buf_writer::BufWriter;
+
+mod line_writer;
+pub use self::line_writer::LineWriter;
+
+mod chain;
+pub use self::chain::Chain;
+
+mod close;
+pub use self::close::Close;
+
+mod copy;
+pub use self::copy::{copy, Copy};
+
+mod copy_buf;
+pub use self::copy_buf::{copy_buf, CopyBuf};
+
+mod cursor;
+pub use self::cursor::Cursor;
+
+mod empty;
+pub use self::empty::{empty, Empty};
+
+mod fill_buf;
+pub use self::fill_buf::FillBuf;
+
+mod flush;
+pub use self::flush::Flush;
+
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+mod into_sink;
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub use self::into_sink::IntoSink;
+
+mod lines;
+pub use self::lines::Lines;
+
+mod read;
+pub use self::read::Read;
+
+mod read_vectored;
+pub use self::read_vectored::ReadVectored;
+
+mod read_exact;
+pub use self::read_exact::ReadExact;
+
+mod read_line;
+pub use self::read_line::ReadLine;
+
+mod read_to_end;
+pub use self::read_to_end::ReadToEnd;
+
+mod read_to_string;
+pub use self::read_to_string::ReadToString;
+
+mod read_until;
+pub use self::read_until::ReadUntil;
+
+mod repeat;
+pub use self::repeat::{repeat, Repeat};
+
+mod seek;
+pub use self::seek::Seek;
+
+mod sink;
+pub use self::sink::{sink, Sink};
+
+mod split;
+pub use self::split::{ReadHalf, ReuniteError, WriteHalf};
+
+mod take;
+pub use self::take::Take;
+
+mod window;
+pub use self::window::Window;
+
+mod write;
+pub use self::write::Write;
+
+mod write_vectored;
+pub use self::write_vectored::WriteVectored;
+
+mod write_all;
+pub use self::write_all::WriteAll;
+
+#[cfg(feature = "write-all-vectored")]
+mod write_all_vectored;
+#[cfg(feature = "write-all-vectored")]
+pub use self::write_all_vectored::WriteAllVectored;
+
+/// An extension trait which adds utility methods to `AsyncRead` types.
+pub trait AsyncReadExt: AsyncRead {
+ /// Creates an adaptor which will chain this stream with another.
+ ///
+ /// The returned `AsyncRead` instance will first read all bytes from this object
+ /// until EOF is encountered. Afterwards the output is equivalent to the
+ /// output of `next`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let reader1 = Cursor::new([1, 2, 3, 4]);
+ /// let reader2 = Cursor::new([5, 6, 7, 8]);
+ ///
+ /// let mut reader = reader1.chain(reader2);
+ /// let mut buffer = Vec::new();
+ ///
+ /// // read the value into a Vec.
+ /// reader.read_to_end(&mut buffer).await?;
+ /// assert_eq!(buffer, [1, 2, 3, 4, 5, 6, 7, 8]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn chain<R>(self, next: R) -> Chain<Self, R>
+ where
+ Self: Sized,
+ R: AsyncRead,
+ {
+ assert_read(Chain::new(self, next))
+ }
+
+ /// Tries to read some bytes directly into the given `buf` in asynchronous
+ /// manner, returning a future type.
+ ///
+ /// The returned future will resolve to the number of bytes read once the read
+ /// operation is completed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let mut reader = Cursor::new([1, 2, 3, 4]);
+ /// let mut output = [0u8; 5];
+ ///
+ /// let bytes = reader.read(&mut output[..]).await?;
+ ///
+ /// // This is only guaranteed to be 4 because `&[u8]` is a synchronous
+ /// // reader. In a real system you could get anywhere from 1 to
+ /// // `output.len()` bytes in a single read.
+ /// assert_eq!(bytes, 4);
+ /// assert_eq!(output, [1, 2, 3, 4, 0]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn read<'a>(&'a mut self, buf: &'a mut [u8]) -> Read<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(Read::new(self, buf))
+ }
+
+ /// Creates a future which will read from the `AsyncRead` into `bufs` using vectored
+ /// IO operations.
+ ///
+ /// The returned future will resolve to the number of bytes read once the read
+ /// operation is completed.
+ fn read_vectored<'a>(&'a mut self, bufs: &'a mut [IoSliceMut<'a>]) -> ReadVectored<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(ReadVectored::new(self, bufs))
+ }
+
+ /// Creates a future which will read exactly enough bytes to fill `buf`,
+ /// returning an error if end of file (EOF) is hit sooner.
+ ///
+ /// The returned future will resolve once the read operation is completed.
+ ///
+ /// In the case of an error the buffer and the object will be discarded, with
+ /// the error yielded.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let mut reader = Cursor::new([1, 2, 3, 4]);
+ /// let mut output = [0u8; 4];
+ ///
+ /// reader.read_exact(&mut output).await?;
+ ///
+ /// assert_eq!(output, [1, 2, 3, 4]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ ///
+ /// ## EOF is hit before `buf` is filled
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{self, AsyncReadExt, Cursor};
+ ///
+ /// let mut reader = Cursor::new([1, 2, 3, 4]);
+ /// let mut output = [0u8; 5];
+ ///
+ /// let result = reader.read_exact(&mut output).await;
+ ///
+ /// assert_eq!(result.unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
+ /// # });
+ /// ```
+ fn read_exact<'a>(&'a mut self, buf: &'a mut [u8]) -> ReadExact<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<()>, _>(ReadExact::new(self, buf))
+ }
+
+ /// Creates a future which will read all the bytes from this `AsyncRead`.
+ ///
+ /// On success the total number of bytes read is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let mut reader = Cursor::new([1, 2, 3, 4]);
+ /// let mut output = Vec::with_capacity(4);
+ ///
+ /// let bytes = reader.read_to_end(&mut output).await?;
+ ///
+ /// assert_eq!(bytes, 4);
+ /// assert_eq!(output, vec![1, 2, 3, 4]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn read_to_end<'a>(&'a mut self, buf: &'a mut Vec<u8>) -> ReadToEnd<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(ReadToEnd::new(self, buf))
+ }
+
+ /// Creates a future which will read all the bytes from this `AsyncRead`.
+ ///
+ /// On success the total number of bytes read is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let mut reader = Cursor::new(&b"1234"[..]);
+ /// let mut buffer = String::with_capacity(4);
+ ///
+ /// let bytes = reader.read_to_string(&mut buffer).await?;
+ ///
+ /// assert_eq!(bytes, 4);
+ /// assert_eq!(buffer, String::from("1234"));
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn read_to_string<'a>(&'a mut self, buf: &'a mut String) -> ReadToString<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(ReadToString::new(self, buf))
+ }
+
+ /// Helper method for splitting this read/write object into two halves.
+ ///
+ /// The two halves returned implement the `AsyncRead` and `AsyncWrite`
+ /// traits, respectively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{self, AsyncReadExt, Cursor};
+ ///
+ /// // Note that for `Cursor` the read and write halves share a single
+ /// // seek position. This may or may not be true for other types that
+ /// // implement both `AsyncRead` and `AsyncWrite`.
+ ///
+ /// let reader = Cursor::new([1, 2, 3, 4]);
+ /// let mut buffer = Cursor::new(vec![0, 0, 0, 0, 5, 6, 7, 8]);
+ /// let mut writer = Cursor::new(vec![0u8; 5]);
+ ///
+ /// {
+ /// let (buffer_reader, mut buffer_writer) = (&mut buffer).split();
+ /// io::copy(reader, &mut buffer_writer).await?;
+ /// io::copy(buffer_reader, &mut writer).await?;
+ /// }
+ ///
+ /// assert_eq!(buffer.into_inner(), [1, 2, 3, 4, 5, 6, 7, 8]);
+ /// assert_eq!(writer.into_inner(), [5, 6, 7, 8, 0]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn split(self) -> (ReadHalf<Self>, WriteHalf<Self>)
+ where
+ Self: AsyncWrite + Sized,
+ {
+ let (r, w) = split::split(self);
+ (assert_read(r), assert_write(w))
+ }
+
+ /// Creates an AsyncRead adapter which will read at most `limit` bytes
+ /// from the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let reader = Cursor::new(&b"12345678"[..]);
+ /// let mut buffer = [0; 5];
+ ///
+ /// let mut take = reader.take(4);
+ /// let n = take.read(&mut buffer).await?;
+ ///
+ /// assert_eq!(n, 4);
+ /// assert_eq!(&buffer, b"1234\0");
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn take(self, limit: u64) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ assert_read(Take::new(self, limit))
+ }
+
+ /// Wraps an [`AsyncRead`] in a compatibility wrapper that allows it to be
+ /// used as a futures 0.1 / tokio-io 0.1 `AsyncRead`. If the wrapped type
+ /// implements [`AsyncWrite`] as well, the result will also implement the
+ /// futures 0.1 / tokio 0.1 `AsyncWrite` trait.
+ ///
+ /// Requires the `io-compat` feature to enable.
+ #[cfg(feature = "io-compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+ fn compat(self) -> Compat<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ Compat::new(self)
+ }
+}
+
+impl<R: AsyncRead + ?Sized> AsyncReadExt for R {}
+
+/// An extension trait which adds utility methods to `AsyncWrite` types.
+pub trait AsyncWriteExt: AsyncWrite {
+ /// Creates a future which will entirely flush this `AsyncWrite`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AllowStdIo, AsyncWriteExt};
+ /// use std::io::{BufWriter, Cursor};
+ ///
+ /// let mut output = vec![0u8; 5];
+ ///
+ /// {
+ /// let writer = Cursor::new(&mut output);
+ /// let mut buffered = AllowStdIo::new(BufWriter::new(writer));
+ /// buffered.write_all(&[1, 2]).await?;
+ /// buffered.write_all(&[3, 4]).await?;
+ /// buffered.flush().await?;
+ /// }
+ ///
+ /// assert_eq!(output, [1, 2, 3, 4, 0]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn flush(&mut self) -> Flush<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<()>, _>(Flush::new(self))
+ }
+
+ /// Creates a future which will entirely close this `AsyncWrite`.
+ fn close(&mut self) -> Close<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<()>, _>(Close::new(self))
+ }
+
+ /// Creates a future which will write bytes from `buf` into the object.
+ ///
+ /// The returned future will resolve to the number of bytes written once the write
+ /// operation is completed.
+ fn write<'a>(&'a mut self, buf: &'a [u8]) -> Write<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(Write::new(self, buf))
+ }
+
+ /// Creates a future which will write bytes from `bufs` into the object using vectored
+ /// IO operations.
+ ///
+ /// The returned future will resolve to the number of bytes written once the write
+ /// operation is completed.
+ fn write_vectored<'a>(&'a mut self, bufs: &'a [IoSlice<'a>]) -> WriteVectored<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(WriteVectored::new(self, bufs))
+ }
+
+ /// Write data into this object.
+ ///
+ /// Creates a future that will write the entire contents of the buffer `buf` into
+ /// this `AsyncWrite`.
+ ///
+ /// The returned future will not complete until all the data has been written.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncWriteExt, Cursor};
+ ///
+ /// let mut writer = Cursor::new(vec![0u8; 5]);
+ ///
+ /// writer.write_all(&[1, 2, 3, 4]).await?;
+ ///
+ /// assert_eq!(writer.into_inner(), [1, 2, 3, 4, 0]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn write_all<'a>(&'a mut self, buf: &'a [u8]) -> WriteAll<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<()>, _>(WriteAll::new(self, buf))
+ }
+
+ /// Attempts to write multiple buffers into this writer.
+ ///
+ /// Creates a future that will write the entire contents of `bufs` into this
+ /// `AsyncWrite` using [vectored writes].
+ ///
+ /// The returned future will not complete until all the data has been
+ /// written.
+ ///
+ /// [vectored writes]: std::io::Write::write_vectored
+ ///
+ /// # Notes
+ ///
+ /// Unlike `io::Write::write_vectored`, this takes a *mutable* reference to
+ /// a slice of `IoSlice`s, not an immutable one. That's because we need to
+ /// modify the slice to keep track of the bytes already written.
+ ///
+ /// Once this futures returns, the contents of `bufs` are unspecified, as
+ /// this depends on how many calls to `write_vectored` were necessary. It is
+ /// best to understand this function as taking ownership of `bufs` and to
+ /// not use `bufs` afterwards. The underlying buffers, to which the
+ /// `IoSlice`s point (but not the `IoSlice`s themselves), are unchanged and
+ /// can be reused.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::AsyncWriteExt;
+ /// use futures_util::io::Cursor;
+ /// use std::io::IoSlice;
+ ///
+ /// let mut writer = Cursor::new(Vec::new());
+ /// let bufs = &mut [
+ /// IoSlice::new(&[1]),
+ /// IoSlice::new(&[2, 3]),
+ /// IoSlice::new(&[4, 5, 6]),
+ /// ];
+ ///
+ /// writer.write_all_vectored(bufs).await?;
+ /// // Note: the contents of `bufs` is now unspecified, see the Notes section.
+ ///
+ /// assert_eq!(writer.into_inner(), &[1, 2, 3, 4, 5, 6]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ #[cfg(feature = "write-all-vectored")]
+ fn write_all_vectored<'a>(
+ &'a mut self,
+ bufs: &'a mut [IoSlice<'a>],
+ ) -> WriteAllVectored<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<()>, _>(WriteAllVectored::new(self, bufs))
+ }
+
+ /// Wraps an [`AsyncWrite`] in a compatibility wrapper that allows it to be
+ /// used as a futures 0.1 / tokio-io 0.1 `AsyncWrite`.
+ /// Requires the `io-compat` feature to enable.
+ #[cfg(feature = "io-compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))]
+ fn compat_write(self) -> Compat<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ Compat::new(self)
+ }
+
+ /// Allow using an [`AsyncWrite`] as a [`Sink`](futures_sink::Sink)`<Item: AsRef<[u8]>>`.
+ ///
+ /// This adapter produces a sink that will write each value passed to it
+ /// into the underlying writer.
+ ///
+ /// Note that this function consumes the given writer, returning a wrapped
+ /// version.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::AsyncWriteExt;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(vec![Ok([1, 2, 3]), Ok([4, 5, 6])]);
+ ///
+ /// let mut writer = vec![];
+ ///
+ /// stream.forward((&mut writer).into_sink()).await?;
+ ///
+ /// assert_eq!(writer, vec![1, 2, 3, 4, 5, 6]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// # })?;
+ /// # Ok::<(), Box<dyn std::error::Error>>(())
+ /// ```
+ #[cfg(feature = "sink")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ fn into_sink<Item: AsRef<[u8]>>(self) -> IntoSink<Self, Item>
+ where
+ Self: Sized,
+ {
+ crate::sink::assert_sink::<Item, Error, _>(IntoSink::new(self))
+ }
+}
+
+impl<W: AsyncWrite + ?Sized> AsyncWriteExt for W {}
+
+/// An extension trait which adds utility methods to `AsyncSeek` types.
+pub trait AsyncSeekExt: AsyncSeek {
+ /// Creates a future which will seek an IO object, and then yield the
+ /// new position in the object and the object itself.
+ ///
+ /// In the case of an error the buffer and the object will be discarded, with
+ /// the error yielded.
+ fn seek(&mut self, pos: SeekFrom) -> Seek<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<u64>, _>(Seek::new(self, pos))
+ }
+
+ /// Creates a future which will return the current seek position from the
+ /// start of the stream.
+ ///
+ /// This is equivalent to `self.seek(SeekFrom::Current(0))`.
+ fn stream_position(&mut self) -> Seek<'_, Self>
+ where
+ Self: Unpin,
+ {
+ self.seek(SeekFrom::Current(0))
+ }
+}
+
+impl<S: AsyncSeek + ?Sized> AsyncSeekExt for S {}
+
+/// An extension trait which adds utility methods to `AsyncBufRead` types.
+pub trait AsyncBufReadExt: AsyncBufRead {
+ /// Creates a future which will wait for a non-empty buffer to be available from this I/O
+ /// object or EOF to be reached.
+ ///
+ /// This method is the async equivalent to [`BufRead::fill_buf`](std::io::BufRead::fill_buf).
+ ///
+ /// ```rust
+ /// # futures::executor::block_on(async {
+ /// use futures::{io::AsyncBufReadExt as _, stream::{iter, TryStreamExt as _}};
+ ///
+ /// let mut stream = iter(vec![Ok(vec![1, 2, 3]), Ok(vec![4, 5, 6])]).into_async_read();
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![1, 2, 3]);
+ /// stream.consume_unpin(2);
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![3]);
+ /// stream.consume_unpin(1);
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![4, 5, 6]);
+ /// stream.consume_unpin(3);
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn fill_buf(&mut self) -> FillBuf<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<&[u8]>, _>(FillBuf::new(self))
+ }
+
+ /// A convenience for calling [`AsyncBufRead::consume`] on [`Unpin`] IO types.
+ ///
+ /// ```rust
+ /// # futures::executor::block_on(async {
+ /// use futures::{io::AsyncBufReadExt as _, stream::{iter, TryStreamExt as _}};
+ ///
+ /// let mut stream = iter(vec![Ok(vec![1, 2, 3])]).into_async_read();
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![1, 2, 3]);
+ /// stream.consume_unpin(2);
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![3]);
+ /// stream.consume_unpin(1);
+ ///
+ /// assert_eq!(stream.fill_buf().await?, vec![]);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn consume_unpin(&mut self, amt: usize)
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).consume(amt)
+ }
+
+ /// Creates a future which will read all the bytes associated with this I/O
+ /// object into `buf` until the delimiter `byte` or EOF is reached.
+ /// This method is the async equivalent to [`BufRead::read_until`](std::io::BufRead::read_until).
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// delimiter or EOF is found. Once found, all bytes up to, and including,
+ /// the delimiter (if found) will be appended to `buf`.
+ ///
+ /// The returned future will resolve to the number of bytes read once the read
+ /// operation is completed.
+ ///
+ /// In the case of an error the buffer and the object will be discarded, with
+ /// the error yielded.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncBufReadExt, Cursor};
+ ///
+ /// let mut cursor = Cursor::new(b"lorem-ipsum");
+ /// let mut buf = vec![];
+ ///
+ /// // cursor is at 'l'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf).await?;
+ /// assert_eq!(num_bytes, 6);
+ /// assert_eq!(buf, b"lorem-");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'i'
+ /// let num_bytes = cursor.read_until(b'-', &mut buf).await?;
+ /// assert_eq!(num_bytes, 5);
+ /// assert_eq!(buf, b"ipsum");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_until(b'-', &mut buf).await?;
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, b"");
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn read_until<'a>(&'a mut self, byte: u8, buf: &'a mut Vec<u8>) -> ReadUntil<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(ReadUntil::new(self, byte, buf))
+ }
+
+ /// Creates a future which will read all the bytes associated with this I/O
+ /// object into `buf` until a newline (the 0xA byte) or EOF is reached,
+ /// This method is the async equivalent to [`BufRead::read_line`](std::io::BufRead::read_line).
+ ///
+ /// This function will read bytes from the underlying stream until the
+ /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes
+ /// up to, and including, the delimiter (if found) will be appended to
+ /// `buf`.
+ ///
+ /// The returned future will resolve to the number of bytes read once the read
+ /// operation is completed.
+ ///
+ /// In the case of an error the buffer and the object will be discarded, with
+ /// the error yielded.
+ ///
+ /// # Errors
+ ///
+ /// This function has the same error semantics as [`read_until`] and will
+ /// also return an error if the read bytes are not valid UTF-8. If an I/O
+ /// error is encountered then `buf` may contain some bytes already read in
+ /// the event that all data read so far was valid UTF-8.
+ ///
+ /// [`read_until`]: AsyncBufReadExt::read_until
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncBufReadExt, Cursor};
+ ///
+ /// let mut cursor = Cursor::new(b"foo\nbar");
+ /// let mut buf = String::new();
+ ///
+ /// // cursor is at 'f'
+ /// let num_bytes = cursor.read_line(&mut buf).await?;
+ /// assert_eq!(num_bytes, 4);
+ /// assert_eq!(buf, "foo\n");
+ /// buf.clear();
+ ///
+ /// // cursor is at 'b'
+ /// let num_bytes = cursor.read_line(&mut buf).await?;
+ /// assert_eq!(num_bytes, 3);
+ /// assert_eq!(buf, "bar");
+ /// buf.clear();
+ ///
+ /// // cursor is at EOF
+ /// let num_bytes = cursor.read_line(&mut buf).await?;
+ /// assert_eq!(num_bytes, 0);
+ /// assert_eq!(buf, "");
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn read_line<'a>(&'a mut self, buf: &'a mut String) -> ReadLine<'a, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<usize>, _>(ReadLine::new(self, buf))
+ }
+
+ /// Returns a stream over the lines of this reader.
+ /// This method is the async equivalent to [`BufRead::lines`](std::io::BufRead::lines).
+ ///
+ /// The stream returned from this function will yield instances of
+ /// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline
+ /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end.
+ ///
+ /// [`io::Result`]: std::io::Result
+ /// [`String`]: String
+ ///
+ /// # Errors
+ ///
+ /// Each line of the stream has the same error semantics as [`AsyncBufReadExt::read_line`].
+ ///
+ /// [`AsyncBufReadExt::read_line`]: AsyncBufReadExt::read_line
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncBufReadExt, Cursor};
+ /// use futures::stream::StreamExt;
+ ///
+ /// let cursor = Cursor::new(b"lorem\nipsum\r\ndolor");
+ ///
+ /// let mut lines_stream = cursor.lines().map(|l| l.unwrap());
+ /// assert_eq!(lines_stream.next().await, Some(String::from("lorem")));
+ /// assert_eq!(lines_stream.next().await, Some(String::from("ipsum")));
+ /// assert_eq!(lines_stream.next().await, Some(String::from("dolor")));
+ /// assert_eq!(lines_stream.next().await, None);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ fn lines(self) -> Lines<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Result<String>, _>(Lines::new(self))
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized> AsyncBufReadExt for R {}
+
+// Just a helper function to ensure the reader we're returning all have the
+// right implementations.
+pub(crate) fn assert_read<R>(reader: R) -> R
+where
+ R: AsyncRead,
+{
+ reader
+}
+// Just a helper function to ensure the writer we're returning all have the
+// right implementations.
+pub(crate) fn assert_write<W>(writer: W) -> W
+where
+ W: AsyncWrite,
+{
+ writer
+}
diff --git a/vendor/futures-util/src/io/read.rs b/vendor/futures-util/src/io/read.rs
new file mode 100644
index 000000000..677ba818d
--- /dev/null
+++ b/vendor/futures-util/src/io/read.rs
@@ -0,0 +1,30 @@
+use crate::io::AsyncRead;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`read`](super::AsyncReadExt::read) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Read<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut [u8],
+}
+
+impl<R: ?Sized + Unpin> Unpin for Read<'_, R> {}
+
+impl<'a, R: AsyncRead + ?Sized + Unpin> Read<'a, R> {
+ pub(super) fn new(reader: &'a mut R, buf: &'a mut [u8]) -> Self {
+ Self { reader, buf }
+ }
+}
+
+impl<R: AsyncRead + ?Sized + Unpin> Future for Read<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ Pin::new(&mut this.reader).poll_read(cx, this.buf)
+ }
+}
diff --git a/vendor/futures-util/src/io/read_exact.rs b/vendor/futures-util/src/io/read_exact.rs
new file mode 100644
index 000000000..02e38c35b
--- /dev/null
+++ b/vendor/futures-util/src/io/read_exact.rs
@@ -0,0 +1,42 @@
+use crate::io::AsyncRead;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use std::io;
+use std::mem;
+use std::pin::Pin;
+
+/// Future for the [`read_exact`](super::AsyncReadExt::read_exact) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadExact<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut [u8],
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadExact<'_, R> {}
+
+impl<'a, R: AsyncRead + ?Sized + Unpin> ReadExact<'a, R> {
+ pub(super) fn new(reader: &'a mut R, buf: &'a mut [u8]) -> Self {
+ Self { reader, buf }
+ }
+}
+
+impl<R: AsyncRead + ?Sized + Unpin> Future for ReadExact<'_, R> {
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ while !this.buf.is_empty() {
+ let n = ready!(Pin::new(&mut this.reader).poll_read(cx, this.buf))?;
+ {
+ let (_, rest) = mem::replace(&mut this.buf, &mut []).split_at_mut(n);
+ this.buf = rest;
+ }
+ if n == 0 {
+ return Poll::Ready(Err(io::ErrorKind::UnexpectedEof.into()));
+ }
+ }
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/io/read_line.rs b/vendor/futures-util/src/io/read_line.rs
new file mode 100644
index 000000000..c75af9471
--- /dev/null
+++ b/vendor/futures-util/src/io/read_line.rs
@@ -0,0 +1,57 @@
+use super::read_until::read_until_internal;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncBufRead;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+use std::str;
+
+/// Future for the [`read_line`](super::AsyncBufReadExt::read_line) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadLine<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut String,
+ bytes: Vec<u8>,
+ read: usize,
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadLine<'_, R> {}
+
+impl<'a, R: AsyncBufRead + ?Sized + Unpin> ReadLine<'a, R> {
+ pub(super) fn new(reader: &'a mut R, buf: &'a mut String) -> Self {
+ Self { reader, bytes: mem::replace(buf, String::new()).into_bytes(), buf, read: 0 }
+ }
+}
+
+pub(super) fn read_line_internal<R: AsyncBufRead + ?Sized>(
+ reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut String,
+ bytes: &mut Vec<u8>,
+ read: &mut usize,
+) -> Poll<io::Result<usize>> {
+ let ret = ready!(read_until_internal(reader, cx, b'\n', bytes, read));
+ if str::from_utf8(bytes).is_err() {
+ Poll::Ready(ret.and_then(|_| {
+ Err(io::Error::new(io::ErrorKind::InvalidData, "stream did not contain valid UTF-8"))
+ }))
+ } else {
+ debug_assert!(buf.is_empty());
+ debug_assert_eq!(*read, 0);
+ // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
+ mem::swap(unsafe { buf.as_mut_vec() }, bytes);
+ Poll::Ready(ret)
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadLine<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self { reader, buf, bytes, read } = &mut *self;
+ read_line_internal(Pin::new(reader), cx, buf, bytes, read)
+ }
+}
diff --git a/vendor/futures-util/src/io/read_to_end.rs b/vendor/futures-util/src/io/read_to_end.rs
new file mode 100644
index 000000000..919d7d13c
--- /dev/null
+++ b/vendor/futures-util/src/io/read_to_end.rs
@@ -0,0 +1,91 @@
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncRead;
+use std::io;
+use std::pin::Pin;
+use std::vec::Vec;
+
+/// Future for the [`read_to_end`](super::AsyncReadExt::read_to_end) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadToEnd<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut Vec<u8>,
+ start_len: usize,
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadToEnd<'_, R> {}
+
+impl<'a, R: AsyncRead + ?Sized + Unpin> ReadToEnd<'a, R> {
+ pub(super) fn new(reader: &'a mut R, buf: &'a mut Vec<u8>) -> Self {
+ let start_len = buf.len();
+ Self { reader, buf, start_len }
+ }
+}
+
+struct Guard<'a> {
+ buf: &'a mut Vec<u8>,
+ len: usize,
+}
+
+impl Drop for Guard<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ self.buf.set_len(self.len);
+ }
+ }
+}
+
+// This uses an adaptive system to extend the vector when it fills. We want to
+// avoid paying to allocate and zero a huge chunk of memory if the reader only
+// has 4 bytes while still making large reads if the reader does have a ton
+// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
+// time is 4,500 times (!) slower than this if the reader has a very small
+// amount of data to return.
+//
+// Because we're extending the buffer with uninitialized data for trusted
+// readers, we need to make sure to truncate that if any of this panics.
+pub(super) fn read_to_end_internal<R: AsyncRead + ?Sized>(
+ mut rd: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut Vec<u8>,
+ start_len: usize,
+) -> Poll<io::Result<usize>> {
+ let mut g = Guard { len: buf.len(), buf };
+ loop {
+ if g.len == g.buf.len() {
+ unsafe {
+ g.buf.reserve(32);
+ let capacity = g.buf.capacity();
+ g.buf.set_len(capacity);
+ super::initialize(&rd, &mut g.buf[g.len..]);
+ }
+ }
+
+ let buf = &mut g.buf[g.len..];
+ match ready!(rd.as_mut().poll_read(cx, buf)) {
+ Ok(0) => return Poll::Ready(Ok(g.len - start_len)),
+ Ok(n) => {
+ // We can't allow bogus values from read. If it is too large, the returned vec could have its length
+ // set past its capacity, or if it overflows the vec could be shortened which could create an invalid
+ // string if this is called via read_to_string.
+ assert!(n <= buf.len());
+ g.len += n;
+ }
+ Err(e) => return Poll::Ready(Err(e)),
+ }
+ }
+}
+
+impl<A> Future for ReadToEnd<'_, A>
+where
+ A: AsyncRead + ?Sized + Unpin,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ read_to_end_internal(Pin::new(&mut this.reader), cx, this.buf, this.start_len)
+ }
+}
diff --git a/vendor/futures-util/src/io/read_to_string.rs b/vendor/futures-util/src/io/read_to_string.rs
new file mode 100644
index 000000000..457af59e4
--- /dev/null
+++ b/vendor/futures-util/src/io/read_to_string.rs
@@ -0,0 +1,59 @@
+use super::read_to_end::read_to_end_internal;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncRead;
+use std::pin::Pin;
+use std::vec::Vec;
+use std::{io, mem, str};
+
+/// Future for the [`read_to_string`](super::AsyncReadExt::read_to_string) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadToString<'a, R: ?Sized> {
+ reader: &'a mut R,
+ buf: &'a mut String,
+ bytes: Vec<u8>,
+ start_len: usize,
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadToString<'_, R> {}
+
+impl<'a, R: AsyncRead + ?Sized + Unpin> ReadToString<'a, R> {
+ pub(super) fn new(reader: &'a mut R, buf: &'a mut String) -> Self {
+ let start_len = buf.len();
+ Self { reader, bytes: mem::replace(buf, String::new()).into_bytes(), buf, start_len }
+ }
+}
+
+fn read_to_string_internal<R: AsyncRead + ?Sized>(
+ reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ buf: &mut String,
+ bytes: &mut Vec<u8>,
+ start_len: usize,
+) -> Poll<io::Result<usize>> {
+ let ret = ready!(read_to_end_internal(reader, cx, bytes, start_len));
+ if str::from_utf8(bytes).is_err() {
+ Poll::Ready(ret.and_then(|_| {
+ Err(io::Error::new(io::ErrorKind::InvalidData, "stream did not contain valid UTF-8"))
+ }))
+ } else {
+ debug_assert!(buf.is_empty());
+ // Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
+ mem::swap(unsafe { buf.as_mut_vec() }, bytes);
+ Poll::Ready(ret)
+ }
+}
+
+impl<A> Future for ReadToString<'_, A>
+where
+ A: AsyncRead + ?Sized + Unpin,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self { reader, buf, bytes, start_len } = &mut *self;
+ read_to_string_internal(Pin::new(reader), cx, buf, bytes, *start_len)
+ }
+}
diff --git a/vendor/futures-util/src/io/read_until.rs b/vendor/futures-util/src/io/read_until.rs
new file mode 100644
index 000000000..72b59eab1
--- /dev/null
+++ b/vendor/futures-util/src/io/read_until.rs
@@ -0,0 +1,60 @@
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncBufRead;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+
+/// Future for the [`read_until`](super::AsyncBufReadExt::read_until) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadUntil<'a, R: ?Sized> {
+ reader: &'a mut R,
+ byte: u8,
+ buf: &'a mut Vec<u8>,
+ read: usize,
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadUntil<'_, R> {}
+
+impl<'a, R: AsyncBufRead + ?Sized + Unpin> ReadUntil<'a, R> {
+ pub(super) fn new(reader: &'a mut R, byte: u8, buf: &'a mut Vec<u8>) -> Self {
+ Self { reader, byte, buf, read: 0 }
+ }
+}
+
+pub(super) fn read_until_internal<R: AsyncBufRead + ?Sized>(
+ mut reader: Pin<&mut R>,
+ cx: &mut Context<'_>,
+ byte: u8,
+ buf: &mut Vec<u8>,
+ read: &mut usize,
+) -> Poll<io::Result<usize>> {
+ loop {
+ let (done, used) = {
+ let available = ready!(reader.as_mut().poll_fill_buf(cx))?;
+ if let Some(i) = memchr::memchr(byte, available) {
+ buf.extend_from_slice(&available[..=i]);
+ (true, i + 1)
+ } else {
+ buf.extend_from_slice(available);
+ (false, available.len())
+ }
+ };
+ reader.as_mut().consume(used);
+ *read += used;
+ if done || used == 0 {
+ return Poll::Ready(Ok(mem::replace(read, 0)));
+ }
+ }
+}
+
+impl<R: AsyncBufRead + ?Sized + Unpin> Future for ReadUntil<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self { reader, byte, buf, read } = &mut *self;
+ read_until_internal(Pin::new(reader), cx, *byte, buf, read)
+ }
+}
diff --git a/vendor/futures-util/src/io/read_vectored.rs b/vendor/futures-util/src/io/read_vectored.rs
new file mode 100644
index 000000000..4e22df57e
--- /dev/null
+++ b/vendor/futures-util/src/io/read_vectored.rs
@@ -0,0 +1,30 @@
+use crate::io::AsyncRead;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use std::io::{self, IoSliceMut};
+use std::pin::Pin;
+
+/// Future for the [`read_vectored`](super::AsyncReadExt::read_vectored) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct ReadVectored<'a, R: ?Sized> {
+ reader: &'a mut R,
+ bufs: &'a mut [IoSliceMut<'a>],
+}
+
+impl<R: ?Sized + Unpin> Unpin for ReadVectored<'_, R> {}
+
+impl<'a, R: AsyncRead + ?Sized + Unpin> ReadVectored<'a, R> {
+ pub(super) fn new(reader: &'a mut R, bufs: &'a mut [IoSliceMut<'a>]) -> Self {
+ Self { reader, bufs }
+ }
+}
+
+impl<R: AsyncRead + ?Sized + Unpin> Future for ReadVectored<'_, R> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ Pin::new(&mut this.reader).poll_read_vectored(cx, this.bufs)
+ }
+}
diff --git a/vendor/futures-util/src/io/repeat.rs b/vendor/futures-util/src/io/repeat.rs
new file mode 100644
index 000000000..2828bf011
--- /dev/null
+++ b/vendor/futures-util/src/io/repeat.rs
@@ -0,0 +1,66 @@
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncRead, IoSliceMut};
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+
+/// Reader for the [`repeat()`] function.
+#[must_use = "readers do nothing unless polled"]
+pub struct Repeat {
+ byte: u8,
+}
+
+/// Creates an instance of a reader that infinitely repeats one byte.
+///
+/// All reads from this reader will succeed by filling the specified buffer with
+/// the given byte.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::io::{self, AsyncReadExt};
+///
+/// let mut buffer = [0; 3];
+/// let mut reader = io::repeat(0b101);
+/// reader.read_exact(&mut buffer).await.unwrap();
+/// assert_eq!(buffer, [0b101, 0b101, 0b101]);
+/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+/// ```
+pub fn repeat(byte: u8) -> Repeat {
+ Repeat { byte }
+}
+
+impl AsyncRead for Repeat {
+ #[inline]
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ for slot in &mut *buf {
+ *slot = self.byte;
+ }
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ #[inline]
+ fn poll_read_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let mut nwritten = 0;
+ for buf in bufs {
+ nwritten += ready!(self.as_mut().poll_read(cx, buf))?;
+ }
+ Poll::Ready(Ok(nwritten))
+ }
+}
+
+impl fmt::Debug for Repeat {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Repeat { .. }")
+ }
+}
diff --git a/vendor/futures-util/src/io/seek.rs b/vendor/futures-util/src/io/seek.rs
new file mode 100644
index 000000000..0aa237139
--- /dev/null
+++ b/vendor/futures-util/src/io/seek.rs
@@ -0,0 +1,30 @@
+use crate::io::{AsyncSeek, SeekFrom};
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`seek`](crate::io::AsyncSeekExt::seek) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Seek<'a, S: ?Sized> {
+ seek: &'a mut S,
+ pos: SeekFrom,
+}
+
+impl<S: ?Sized + Unpin> Unpin for Seek<'_, S> {}
+
+impl<'a, S: AsyncSeek + ?Sized + Unpin> Seek<'a, S> {
+ pub(super) fn new(seek: &'a mut S, pos: SeekFrom) -> Self {
+ Self { seek, pos }
+ }
+}
+
+impl<S: AsyncSeek + ?Sized + Unpin> Future for Seek<'_, S> {
+ type Output = io::Result<u64>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ Pin::new(&mut this.seek).poll_seek(cx, this.pos)
+ }
+}
diff --git a/vendor/futures-util/src/io/sink.rs b/vendor/futures-util/src/io/sink.rs
new file mode 100644
index 000000000..4a32ca704
--- /dev/null
+++ b/vendor/futures-util/src/io/sink.rs
@@ -0,0 +1,67 @@
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncWrite, IoSlice};
+use std::fmt;
+use std::io;
+use std::pin::Pin;
+
+/// Writer for the [`sink()`] function.
+#[must_use = "writers do nothing unless polled"]
+pub struct Sink {
+ _priv: (),
+}
+
+/// Creates an instance of a writer which will successfully consume all data.
+///
+/// All calls to `poll_write` on the returned instance will return `Poll::Ready(Ok(buf.len()))`
+/// and the contents of the buffer will not be inspected.
+///
+/// # Examples
+///
+/// ```rust
+/// # futures::executor::block_on(async {
+/// use futures::io::{self, AsyncWriteExt};
+///
+/// let buffer = vec![1, 2, 3, 5, 8];
+/// let mut writer = io::sink();
+/// let num_bytes = writer.write(&buffer).await?;
+/// assert_eq!(num_bytes, 5);
+/// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+/// ```
+pub fn sink() -> Sink {
+ Sink { _priv: () }
+}
+
+impl AsyncWrite for Sink {
+ #[inline]
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(buf.len()))
+ }
+
+ #[inline]
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(Ok(bufs.iter().map(|b| b.len()).sum()))
+ }
+
+ #[inline]
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+ #[inline]
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl fmt::Debug for Sink {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Sink { .. }")
+ }
+}
diff --git a/vendor/futures-util/src/io/split.rs b/vendor/futures-util/src/io/split.rs
new file mode 100644
index 000000000..3f1b9af45
--- /dev/null
+++ b/vendor/futures-util/src/io/split.rs
@@ -0,0 +1,115 @@
+use crate::lock::BiLock;
+use core::fmt;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncRead, AsyncWrite, IoSlice, IoSliceMut};
+use std::io;
+use std::pin::Pin;
+
+/// The readable half of an object returned from `AsyncRead::split`.
+#[derive(Debug)]
+pub struct ReadHalf<T> {
+ handle: BiLock<T>,
+}
+
+/// The writable half of an object returned from `AsyncRead::split`.
+#[derive(Debug)]
+pub struct WriteHalf<T> {
+ handle: BiLock<T>,
+}
+
+fn lock_and_then<T, U, E, F>(lock: &BiLock<T>, cx: &mut Context<'_>, f: F) -> Poll<Result<U, E>>
+where
+ F: FnOnce(Pin<&mut T>, &mut Context<'_>) -> Poll<Result<U, E>>,
+{
+ let mut l = ready!(lock.poll_lock(cx));
+ f(l.as_pin_mut(), cx)
+}
+
+pub(super) fn split<T: AsyncRead + AsyncWrite>(t: T) -> (ReadHalf<T>, WriteHalf<T>) {
+ let (a, b) = BiLock::new(t);
+ (ReadHalf { handle: a }, WriteHalf { handle: b })
+}
+
+impl<T: Unpin> ReadHalf<T> {
+ /// Attempts to put the two "halves" of a split `AsyncRead + AsyncWrite` back
+ /// together. Succeeds only if the `ReadHalf<T>` and `WriteHalf<T>` are
+ /// a matching pair originating from the same call to `AsyncReadExt::split`.
+ pub fn reunite(self, other: WriteHalf<T>) -> Result<T, ReuniteError<T>> {
+ self.handle
+ .reunite(other.handle)
+ .map_err(|err| ReuniteError(ReadHalf { handle: err.0 }, WriteHalf { handle: err.1 }))
+ }
+}
+
+impl<T: Unpin> WriteHalf<T> {
+ /// Attempts to put the two "halves" of a split `AsyncRead + AsyncWrite` back
+ /// together. Succeeds only if the `ReadHalf<T>` and `WriteHalf<T>` are
+ /// a matching pair originating from the same call to `AsyncReadExt::split`.
+ pub fn reunite(self, other: ReadHalf<T>) -> Result<T, ReuniteError<T>> {
+ other.reunite(self)
+ }
+}
+
+impl<R: AsyncRead> AsyncRead for ReadHalf<R> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_read(cx, buf))
+ }
+
+ fn poll_read_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &mut [IoSliceMut<'_>],
+ ) -> Poll<io::Result<usize>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_read_vectored(cx, bufs))
+ }
+}
+
+impl<W: AsyncWrite> AsyncWrite for WriteHalf<W> {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_write(cx, buf))
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_write_vectored(cx, bufs))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_flush(cx))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ lock_and_then(&self.handle, cx, |l, cx| l.poll_close(cx))
+ }
+}
+
+/// Error indicating a `ReadHalf<T>` and `WriteHalf<T>` were not two halves
+/// of a `AsyncRead + AsyncWrite`, and thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub ReadHalf<T>, pub WriteHalf<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("ReuniteError").field(&"...").finish()
+ }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "tried to reunite a ReadHalf and WriteHalf that don't form a pair")
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: core::any::Any> std::error::Error for ReuniteError<T> {}
diff --git a/vendor/futures-util/src/io/take.rs b/vendor/futures-util/src/io/take.rs
new file mode 100644
index 000000000..2c494804d
--- /dev/null
+++ b/vendor/futures-util/src/io/take.rs
@@ -0,0 +1,125 @@
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead};
+use pin_project_lite::pin_project;
+use std::pin::Pin;
+use std::{cmp, io};
+
+pin_project! {
+ /// Reader for the [`take`](super::AsyncReadExt::take) method.
+ #[derive(Debug)]
+ #[must_use = "readers do nothing unless you `.await` or poll them"]
+ pub struct Take<R> {
+ #[pin]
+ inner: R,
+ limit: u64,
+ }
+}
+
+impl<R: AsyncRead> Take<R> {
+ pub(super) fn new(inner: R, limit: u64) -> Self {
+ Self { inner, limit }
+ }
+
+ /// Returns the remaining number of bytes that can be
+ /// read before this instance will return EOF.
+ ///
+ /// # Note
+ ///
+ /// This instance may reach `EOF` after reading fewer bytes than indicated by
+ /// this method if the underlying [`AsyncRead`] instance reaches EOF.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let reader = Cursor::new(&b"12345678"[..]);
+ /// let mut buffer = [0; 2];
+ ///
+ /// let mut take = reader.take(4);
+ /// let n = take.read(&mut buffer).await?;
+ ///
+ /// assert_eq!(take.limit(), 2);
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ pub fn limit(&self) -> u64 {
+ self.limit
+ }
+
+ /// Sets the number of bytes that can be read before this instance will
+ /// return EOF. This is the same as constructing a new `Take` instance, so
+ /// the amount of bytes read and the previous limit value don't matter when
+ /// calling this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::io::{AsyncReadExt, Cursor};
+ ///
+ /// let reader = Cursor::new(&b"12345678"[..]);
+ /// let mut buffer = [0; 4];
+ ///
+ /// let mut take = reader.take(4);
+ /// let n = take.read(&mut buffer).await?;
+ ///
+ /// assert_eq!(n, 4);
+ /// assert_eq!(take.limit(), 0);
+ ///
+ /// take.set_limit(10);
+ /// let n = take.read(&mut buffer).await?;
+ /// assert_eq!(n, 4);
+ ///
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ pub fn set_limit(&mut self, limit: u64) {
+ self.limit = limit
+ }
+
+ delegate_access_inner!(inner, R, ());
+}
+
+impl<R: AsyncRead> AsyncRead for Take<R> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ let this = self.project();
+
+ if *this.limit == 0 {
+ return Poll::Ready(Ok(0));
+ }
+
+ let max = cmp::min(buf.len() as u64, *this.limit) as usize;
+ let n = ready!(this.inner.poll_read(cx, &mut buf[..max]))?;
+ *this.limit -= n as u64;
+ Poll::Ready(Ok(n))
+ }
+}
+
+impl<R: AsyncBufRead> AsyncBufRead for Take<R> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ let this = self.project();
+
+ // Don't call into inner reader at all at EOF because it may still block
+ if *this.limit == 0 {
+ return Poll::Ready(Ok(&[]));
+ }
+
+ let buf = ready!(this.inner.poll_fill_buf(cx)?);
+ let cap = cmp::min(buf.len() as u64, *this.limit) as usize;
+ Poll::Ready(Ok(&buf[..cap]))
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ let this = self.project();
+
+ // Don't let callers reset the limit by passing an overlarge value
+ let amt = cmp::min(amt as u64, *this.limit) as usize;
+ *this.limit -= amt as u64;
+ this.inner.consume(amt);
+ }
+}
diff --git a/vendor/futures-util/src/io/window.rs b/vendor/futures-util/src/io/window.rs
new file mode 100644
index 000000000..77b7267c6
--- /dev/null
+++ b/vendor/futures-util/src/io/window.rs
@@ -0,0 +1,104 @@
+use std::ops::{Bound, Range, RangeBounds};
+
+/// A owned window around an underlying buffer.
+///
+/// Normally slices work great for considering sub-portions of a buffer, but
+/// unfortunately a slice is a *borrowed* type in Rust which has an associated
+/// lifetime. When working with future and async I/O these lifetimes are not
+/// always appropriate, and are sometimes difficult to store in tasks. This
+/// type strives to fill this gap by providing an "owned slice" around an
+/// underlying buffer of bytes.
+///
+/// A `Window<T>` wraps an underlying buffer, `T`, and has configurable
+/// start/end indexes to alter the behavior of the `AsRef<[u8]>` implementation
+/// that this type carries.
+///
+/// This type can be particularly useful when working with the `write_all`
+/// combinator in this crate. Data can be sliced via `Window`, consumed by
+/// `write_all`, and then earned back once the write operation finishes through
+/// the `into_inner` method on this type.
+#[derive(Debug)]
+pub struct Window<T> {
+ inner: T,
+ range: Range<usize>,
+}
+
+impl<T: AsRef<[u8]>> Window<T> {
+ /// Creates a new window around the buffer `t` defaulting to the entire
+ /// slice.
+ ///
+ /// Further methods can be called on the returned `Window<T>` to alter the
+ /// window into the data provided.
+ pub fn new(t: T) -> Self {
+ Self { range: 0..t.as_ref().len(), inner: t }
+ }
+
+ /// Gets a shared reference to the underlying buffer inside of this
+ /// `Window`.
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying buffer inside of this
+ /// `Window`.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+
+ /// Consumes this `Window`, returning the underlying buffer.
+ pub fn into_inner(self) -> T {
+ self.inner
+ }
+
+ /// Returns the starting index of this window into the underlying buffer
+ /// `T`.
+ pub fn start(&self) -> usize {
+ self.range.start
+ }
+
+ /// Returns the end index of this window into the underlying buffer
+ /// `T`.
+ pub fn end(&self) -> usize {
+ self.range.end
+ }
+
+ /// Changes the range of this window to the range specified.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if `range` is out of bounds for the underlying
+ /// slice or if [`start_bound()`] of `range` comes after the [`end_bound()`].
+ ///
+ /// [`start_bound()`]: std::ops::RangeBounds::start_bound
+ /// [`end_bound()`]: std::ops::RangeBounds::end_bound
+ pub fn set<R: RangeBounds<usize>>(&mut self, range: R) {
+ let start = match range.start_bound() {
+ Bound::Included(n) => *n,
+ Bound::Excluded(n) => *n + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match range.end_bound() {
+ Bound::Included(n) => *n + 1,
+ Bound::Excluded(n) => *n,
+ Bound::Unbounded => self.inner.as_ref().len(),
+ };
+
+ assert!(end <= self.inner.as_ref().len());
+ assert!(start <= end);
+
+ self.range.start = start;
+ self.range.end = end;
+ }
+}
+
+impl<T: AsRef<[u8]>> AsRef<[u8]> for Window<T> {
+ fn as_ref(&self) -> &[u8] {
+ &self.inner.as_ref()[self.range.start..self.range.end]
+ }
+}
+
+impl<T: AsMut<[u8]>> AsMut<[u8]> for Window<T> {
+ fn as_mut(&mut self) -> &mut [u8] {
+ &mut self.inner.as_mut()[self.range.start..self.range.end]
+ }
+}
diff --git a/vendor/futures-util/src/io/write.rs b/vendor/futures-util/src/io/write.rs
new file mode 100644
index 000000000..c47ef9e2e
--- /dev/null
+++ b/vendor/futures-util/src/io/write.rs
@@ -0,0 +1,30 @@
+use crate::io::AsyncWrite;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use std::io;
+use std::pin::Pin;
+
+/// Future for the [`write`](super::AsyncWriteExt::write) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Write<'a, W: ?Sized> {
+ writer: &'a mut W,
+ buf: &'a [u8],
+}
+
+impl<W: ?Sized + Unpin> Unpin for Write<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> Write<'a, W> {
+ pub(super) fn new(writer: &'a mut W, buf: &'a [u8]) -> Self {
+ Self { writer, buf }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized + Unpin> Future for Write<'_, W> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ Pin::new(&mut this.writer).poll_write(cx, this.buf)
+ }
+}
diff --git a/vendor/futures-util/src/io/write_all.rs b/vendor/futures-util/src/io/write_all.rs
new file mode 100644
index 000000000..b134bf1b2
--- /dev/null
+++ b/vendor/futures-util/src/io/write_all.rs
@@ -0,0 +1,43 @@
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use std::io;
+use std::mem;
+use std::pin::Pin;
+
+/// Future for the [`write_all`](super::AsyncWriteExt::write_all) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct WriteAll<'a, W: ?Sized> {
+ writer: &'a mut W,
+ buf: &'a [u8],
+}
+
+impl<W: ?Sized + Unpin> Unpin for WriteAll<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteAll<'a, W> {
+ pub(super) fn new(writer: &'a mut W, buf: &'a [u8]) -> Self {
+ Self { writer, buf }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized + Unpin> Future for WriteAll<'_, W> {
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let this = &mut *self;
+ while !this.buf.is_empty() {
+ let n = ready!(Pin::new(&mut this.writer).poll_write(cx, this.buf))?;
+ {
+ let (_, rest) = mem::replace(&mut this.buf, &[]).split_at(n);
+ this.buf = rest;
+ }
+ if n == 0 {
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/io/write_all_vectored.rs b/vendor/futures-util/src/io/write_all_vectored.rs
new file mode 100644
index 000000000..a8fc4c641
--- /dev/null
+++ b/vendor/futures-util/src/io/write_all_vectored.rs
@@ -0,0 +1,193 @@
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_io::AsyncWrite;
+use futures_io::IoSlice;
+use std::io;
+use std::pin::Pin;
+
+/// Future for the
+/// [`write_all_vectored`](super::AsyncWriteExt::write_all_vectored) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct WriteAllVectored<'a, W: ?Sized + Unpin> {
+ writer: &'a mut W,
+ bufs: &'a mut [IoSlice<'a>],
+}
+
+impl<W: ?Sized + Unpin> Unpin for WriteAllVectored<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteAllVectored<'a, W> {
+ pub(super) fn new(writer: &'a mut W, mut bufs: &'a mut [IoSlice<'a>]) -> Self {
+ IoSlice::advance_slices(&mut bufs, 0);
+ Self { writer, bufs }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized + Unpin> Future for WriteAllVectored<'_, W> {
+ type Output = io::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ let this = &mut *self;
+ while !this.bufs.is_empty() {
+ let n = ready!(Pin::new(&mut this.writer).poll_write_vectored(cx, this.bufs))?;
+ if n == 0 {
+ return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
+ } else {
+ IoSlice::advance_slices(&mut this.bufs, n);
+ }
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::cmp::min;
+ use std::future::Future;
+ use std::io;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ use crate::io::{AsyncWrite, AsyncWriteExt, IoSlice};
+ use crate::task::noop_waker;
+
+ /// Create a new writer that reads from at most `n_bufs` and reads
+ /// `per_call` bytes (in total) per call to write.
+ fn test_writer(n_bufs: usize, per_call: usize) -> TestWriter {
+ TestWriter { n_bufs, per_call, written: Vec::new() }
+ }
+
+ // TODO: maybe move this the future-test crate?
+ struct TestWriter {
+ n_bufs: usize,
+ per_call: usize,
+ written: Vec<u8>,
+ }
+
+ impl AsyncWrite for TestWriter {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ self.poll_write_vectored(cx, &[IoSlice::new(buf)])
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ bufs: &[IoSlice<'_>],
+ ) -> Poll<io::Result<usize>> {
+ let mut left = self.per_call;
+ let mut written = 0;
+ for buf in bufs.iter().take(self.n_bufs) {
+ let n = min(left, buf.len());
+ self.written.extend_from_slice(&buf[0..n]);
+ left -= n;
+ written += n;
+ }
+ Poll::Ready(Ok(written))
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ // TODO: maybe move this the future-test crate?
+ macro_rules! assert_poll_ok {
+ ($e:expr, $expected:expr) => {
+ let expected = $expected;
+ match $e {
+ Poll::Ready(Ok(ok)) if ok == expected => {}
+ got => {
+ panic!("unexpected result, got: {:?}, wanted: Ready(Ok({:?}))", got, expected)
+ }
+ }
+ };
+ }
+
+ #[test]
+ fn test_writer_read_from_one_buf() {
+ let waker = noop_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ let mut dst = test_writer(1, 2);
+ let mut dst = Pin::new(&mut dst);
+
+ assert_poll_ok!(dst.as_mut().poll_write(&mut cx, &[]), 0);
+ assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, &[]), 0);
+
+ // Read at most 2 bytes.
+ assert_poll_ok!(dst.as_mut().poll_write(&mut cx, &[1, 1, 1]), 2);
+ let bufs = &[IoSlice::new(&[2, 2, 2])];
+ assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 2);
+
+ // Only read from first buf.
+ let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4, 4])];
+ assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 1);
+
+ assert_eq!(dst.written, &[1, 1, 2, 2, 3]);
+ }
+
+ #[test]
+ fn test_writer_read_from_multiple_bufs() {
+ let waker = noop_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ let mut dst = test_writer(3, 3);
+ let mut dst = Pin::new(&mut dst);
+
+ // Read at most 3 bytes from two buffers.
+ let bufs = &[IoSlice::new(&[1]), IoSlice::new(&[2, 2, 2])];
+ assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 3);
+
+ // Read at most 3 bytes from three buffers.
+ let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4]), IoSlice::new(&[5, 5])];
+ assert_poll_ok!(dst.as_mut().poll_write_vectored(&mut cx, bufs), 3);
+
+ assert_eq!(dst.written, &[1, 2, 2, 3, 4, 5]);
+ }
+
+ #[test]
+ fn test_write_all_vectored() {
+ let waker = noop_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ #[rustfmt::skip] // Becomes unreadable otherwise.
+ let tests: Vec<(_, &'static [u8])> = vec![
+ (vec![], &[]),
+ (vec![IoSlice::new(&[]), IoSlice::new(&[])], &[]),
+ (vec![IoSlice::new(&[1])], &[1]),
+ (vec![IoSlice::new(&[1, 2])], &[1, 2]),
+ (vec![IoSlice::new(&[1, 2, 3])], &[1, 2, 3]),
+ (vec![IoSlice::new(&[1, 2, 3, 4])], &[1, 2, 3, 4]),
+ (vec![IoSlice::new(&[1, 2, 3, 4, 5])], &[1, 2, 3, 4, 5]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2])], &[1, 2]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2])], &[1, 1, 2, 2]),
+ (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 1, 2, 2, 2]),
+ (vec![IoSlice::new(&[1, 1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 1, 2, 2, 2, 2]),
+ (vec![IoSlice::new(&[1]), IoSlice::new(&[2]), IoSlice::new(&[3])], &[1, 2, 3]),
+ (vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3])], &[1, 1, 2, 2, 3, 3]),
+ (vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 1, 1, 2, 2, 2, 3, 3, 3]),
+ ];
+
+ for (mut input, wanted) in tests {
+ let mut dst = test_writer(2, 2);
+ {
+ let mut future = dst.write_all_vectored(&mut *input);
+ match Pin::new(&mut future).poll(&mut cx) {
+ Poll::Ready(Ok(())) => {}
+ other => panic!("unexpected result polling future: {:?}", other),
+ }
+ }
+ assert_eq!(&*dst.written, &*wanted);
+ }
+ }
+}
diff --git a/vendor/futures-util/src/io/write_vectored.rs b/vendor/futures-util/src/io/write_vectored.rs
new file mode 100644
index 000000000..14a01d730
--- /dev/null
+++ b/vendor/futures-util/src/io/write_vectored.rs
@@ -0,0 +1,30 @@
+use crate::io::AsyncWrite;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use std::io::{self, IoSlice};
+use std::pin::Pin;
+
+/// Future for the [`write_vectored`](super::AsyncWriteExt::write_vectored) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct WriteVectored<'a, W: ?Sized> {
+ writer: &'a mut W,
+ bufs: &'a [IoSlice<'a>],
+}
+
+impl<W: ?Sized + Unpin> Unpin for WriteVectored<'_, W> {}
+
+impl<'a, W: AsyncWrite + ?Sized + Unpin> WriteVectored<'a, W> {
+ pub(super) fn new(writer: &'a mut W, bufs: &'a [IoSlice<'a>]) -> Self {
+ Self { writer, bufs }
+ }
+}
+
+impl<W: AsyncWrite + ?Sized + Unpin> Future for WriteVectored<'_, W> {
+ type Output = io::Result<usize>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ Pin::new(&mut this.writer).poll_write_vectored(cx, this.bufs)
+ }
+}
diff --git a/vendor/futures-util/src/lib.rs b/vendor/futures-util/src/lib.rs
new file mode 100644
index 000000000..9a10c93c9
--- /dev/null
+++ b/vendor/futures-util/src/lib.rs
@@ -0,0 +1,337 @@
+//! Combinators and utilities for working with `Future`s, `Stream`s, `Sink`s,
+//! and the `AsyncRead` and `AsyncWrite` traits.
+
+#![cfg_attr(feature = "write-all-vectored", feature(io_slice_advance))]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(
+ missing_debug_implementations,
+ missing_docs,
+ rust_2018_idioms,
+ single_use_lifetimes,
+ unreachable_pub
+)]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+#[cfg(all(feature = "bilock", not(feature = "unstable")))]
+compile_error!("The `bilock` feature requires the `unstable` feature as an explicit opt-in to unstable features");
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+// Macro re-exports
+pub use futures_core::ready;
+pub use pin_utils::pin_mut;
+
+#[cfg(feature = "async-await")]
+#[macro_use]
+mod async_await;
+#[cfg(feature = "async-await")]
+#[doc(hidden)]
+pub use self::async_await::*;
+
+// Not public API.
+#[cfg(feature = "async-await")]
+#[doc(hidden)]
+pub mod __private {
+ pub use crate::*;
+ pub use core::{
+ option::Option::{self, None, Some},
+ pin::Pin,
+ result::Result::{Err, Ok},
+ };
+
+ pub mod async_await {
+ pub use crate::async_await::*;
+ }
+}
+
+#[cfg(feature = "sink")]
+macro_rules! delegate_sink {
+ ($field:ident, $item:ty) => {
+ fn poll_ready(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<Result<(), Self::Error>> {
+ self.project().$field.poll_ready(cx)
+ }
+
+ fn start_send(self: core::pin::Pin<&mut Self>, item: $item) -> Result<(), Self::Error> {
+ self.project().$field.start_send(item)
+ }
+
+ fn poll_flush(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<Result<(), Self::Error>> {
+ self.project().$field.poll_flush(cx)
+ }
+
+ fn poll_close(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<Result<(), Self::Error>> {
+ self.project().$field.poll_close(cx)
+ }
+ };
+}
+
+macro_rules! delegate_future {
+ ($field:ident) => {
+ fn poll(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<Self::Output> {
+ self.project().$field.poll(cx)
+ }
+ };
+}
+
+macro_rules! delegate_stream {
+ ($field:ident) => {
+ fn poll_next(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<Option<Self::Item>> {
+ self.project().$field.poll_next(cx)
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.$field.size_hint()
+ }
+ };
+}
+
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+macro_rules! delegate_async_write {
+ ($field:ident) => {
+ fn poll_write(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ buf: &[u8],
+ ) -> core::task::Poll<std::io::Result<usize>> {
+ self.project().$field.poll_write(cx, buf)
+ }
+ fn poll_write_vectored(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> core::task::Poll<std::io::Result<usize>> {
+ self.project().$field.poll_write_vectored(cx, bufs)
+ }
+ fn poll_flush(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<std::io::Result<()>> {
+ self.project().$field.poll_flush(cx)
+ }
+ fn poll_close(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<std::io::Result<()>> {
+ self.project().$field.poll_close(cx)
+ }
+ };
+}
+
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+macro_rules! delegate_async_read {
+ ($field:ident) => {
+ fn poll_read(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ buf: &mut [u8],
+ ) -> core::task::Poll<std::io::Result<usize>> {
+ self.project().$field.poll_read(cx, buf)
+ }
+
+ fn poll_read_vectored(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ bufs: &mut [std::io::IoSliceMut<'_>],
+ ) -> core::task::Poll<std::io::Result<usize>> {
+ self.project().$field.poll_read_vectored(cx, bufs)
+ }
+ };
+}
+
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+macro_rules! delegate_async_buf_read {
+ ($field:ident) => {
+ fn poll_fill_buf(
+ self: core::pin::Pin<&mut Self>,
+ cx: &mut core::task::Context<'_>,
+ ) -> core::task::Poll<std::io::Result<&[u8]>> {
+ self.project().$field.poll_fill_buf(cx)
+ }
+
+ fn consume(self: core::pin::Pin<&mut Self>, amt: usize) {
+ self.project().$field.consume(amt)
+ }
+ };
+}
+
+macro_rules! delegate_access_inner {
+ ($field:ident, $inner:ty, ($($ind:tt)*)) => {
+ /// Acquires a reference to the underlying sink or stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &$inner {
+ (&self.$field) $($ind get_ref())*
+ }
+
+ /// Acquires a mutable reference to the underlying sink or stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// sink or stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut $inner {
+ (&mut self.$field) $($ind get_mut())*
+ }
+
+ /// Acquires a pinned mutable reference to the underlying sink or stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// sink or stream which may otherwise confuse this combinator.
+ pub fn get_pin_mut(self: core::pin::Pin<&mut Self>) -> core::pin::Pin<&mut $inner> {
+ self.project().$field $($ind get_pin_mut())*
+ }
+
+ /// Consumes this combinator, returning the underlying sink or stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> $inner {
+ self.$field $($ind into_inner())*
+ }
+ }
+}
+
+macro_rules! delegate_all {
+ (@trait Future $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> futures_core::future::Future for $name<$($arg),*> where $t: futures_core::future::Future $(, $($bound)*)* {
+ type Output = <$t as futures_core::future::Future>::Output;
+
+ delegate_future!(inner);
+ }
+ };
+ (@trait FusedFuture $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> futures_core::future::FusedFuture for $name<$($arg),*> where $t: futures_core::future::FusedFuture $(, $($bound)*)* {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_terminated()
+ }
+ }
+ };
+ (@trait Stream $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> futures_core::stream::Stream for $name<$($arg),*> where $t: futures_core::stream::Stream $(, $($bound)*)* {
+ type Item = <$t as futures_core::stream::Stream>::Item;
+
+ delegate_stream!(inner);
+ }
+ };
+ (@trait FusedStream $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> futures_core::stream::FusedStream for $name<$($arg),*> where $t: futures_core::stream::FusedStream $(, $($bound)*)* {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_terminated()
+ }
+ }
+ };
+ (@trait Sink $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ #[cfg(feature = "sink")]
+ impl<_Item, $($arg),*> futures_sink::Sink<_Item> for $name<$($arg),*> where $t: futures_sink::Sink<_Item> $(, $($bound)*)* {
+ type Error = <$t as futures_sink::Sink<_Item>>::Error;
+
+ delegate_sink!(inner, _Item);
+ }
+ };
+ (@trait Debug $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> core::fmt::Debug for $name<$($arg),*> where $t: core::fmt::Debug $(, $($bound)*)* {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ core::fmt::Debug::fmt(&self.inner, f)
+ }
+ }
+ };
+ (@trait AccessInner[$inner:ty, ($($ind:tt)*)] $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> $name<$($arg),*> $(where $($bound)*)* {
+ delegate_access_inner!(inner, $inner, ($($ind)*));
+ }
+ };
+ (@trait New[|$($param:ident: $paramt:ty),*| $cons:expr] $name:ident < $($arg:ident),* > ($t:ty) $(where $($bound:tt)*)*) => {
+ impl<$($arg),*> $name<$($arg),*> $(where $($bound)*)* {
+ pub(crate) fn new($($param: $paramt),*) -> Self {
+ Self { inner: $cons }
+ }
+ }
+ };
+ ($(#[$attr:meta])* $name:ident<$($arg:ident),*>($t:ty) : $ftrait:ident $([$($targs:tt)*])* $({$($item:tt)*})* $(where $($bound:tt)*)*) => {
+ pin_project_lite::pin_project! {
+ #[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"]
+ $(#[$attr])*
+ pub struct $name< $($arg),* > $(where $($bound)*)* { #[pin] inner: $t }
+ }
+
+ impl<$($arg),*> $name< $($arg),* > $(where $($bound)*)* {
+ $($($item)*)*
+ }
+
+ delegate_all!(@trait $ftrait $([$($targs)*])* $name<$($arg),*>($t) $(where $($bound)*)*);
+ };
+ ($(#[$attr:meta])* $name:ident<$($arg:ident),*>($t:ty) : $ftrait:ident $([$($ftargs:tt)*])* + $strait:ident $([$($stargs:tt)*])* $(+ $trait:ident $([$($targs:tt)*])*)* $({$($item:tt)*})* $(where $($bound:tt)*)*) => {
+ delegate_all!($(#[$attr])* $name<$($arg),*>($t) : $strait $([$($stargs)*])* $(+ $trait $([$($targs)*])*)* $({$($item)*})* $(where $($bound)*)*);
+
+ delegate_all!(@trait $ftrait $([$($ftargs)*])* $name<$($arg),*>($t) $(where $($bound)*)*);
+ };
+}
+
+pub mod future;
+#[doc(no_inline)]
+pub use crate::future::{Future, FutureExt, TryFuture, TryFutureExt};
+
+pub mod stream;
+#[doc(no_inline)]
+pub use crate::stream::{Stream, StreamExt, TryStream, TryStreamExt};
+
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub mod sink;
+#[cfg(feature = "sink")]
+#[doc(no_inline)]
+pub use crate::sink::{Sink, SinkExt};
+
+pub mod task;
+
+pub mod never;
+
+#[cfg(feature = "compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+pub mod compat;
+
+#[cfg(feature = "io")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
+#[cfg(feature = "std")]
+pub mod io;
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+#[doc(no_inline)]
+pub use crate::io::{
+ AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite,
+ AsyncWriteExt,
+};
+
+#[cfg(feature = "alloc")]
+pub mod lock;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod abortable;
+
+mod fns;
+mod unfold_state;
diff --git a/vendor/futures-util/src/lock/bilock.rs b/vendor/futures-util/src/lock/bilock.rs
new file mode 100644
index 000000000..2f51ae7c9
--- /dev/null
+++ b/vendor/futures-util/src/lock/bilock.rs
@@ -0,0 +1,276 @@
+//! Futures-powered synchronization primitives.
+
+use alloc::boxed::Box;
+use alloc::sync::Arc;
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::ops::{Deref, DerefMut};
+use core::pin::Pin;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering::SeqCst;
+#[cfg(feature = "bilock")]
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll, Waker};
+
+/// A type of futures-powered synchronization primitive which is a mutex between
+/// two possible owners.
+///
+/// This primitive is not as generic as a full-blown mutex but is sufficient for
+/// many use cases where there are only two possible owners of a resource. The
+/// implementation of `BiLock` can be more optimized for just the two possible
+/// owners.
+///
+/// Note that it's possible to use this lock through a poll-style interface with
+/// the `poll_lock` method but you can also use it as a future with the `lock`
+/// method that consumes a `BiLock` and returns a future that will resolve when
+/// it's locked.
+///
+/// A `BiLock` is typically used for "split" operations where data which serves
+/// two purposes wants to be split into two to be worked with separately. For
+/// example a TCP stream could be both a reader and a writer or a framing layer
+/// could be both a stream and a sink for messages. A `BiLock` enables splitting
+/// these two and then using each independently in a futures-powered fashion.
+///
+/// This type is only available when the `bilock` feature of this
+/// library is activated.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct BiLock<T> {
+ arc: Arc<Inner<T>>,
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+ state: AtomicUsize,
+ value: Option<UnsafeCell<T>>,
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl<T> BiLock<T> {
+ /// Creates a new `BiLock` protecting the provided data.
+ ///
+ /// Two handles to the lock are returned, and these are the only two handles
+ /// that will ever be available to the lock. These can then be sent to separate
+ /// tasks to be managed there.
+ ///
+ /// The data behind the bilock is considered to be pinned, which allows `Pin`
+ /// references to locked data. However, this means that the locked value
+ /// will only be available through `Pin<&mut T>` (not `&mut T`) unless `T` is `Unpin`.
+ /// Similarly, reuniting the lock and extracting the inner value is only
+ /// possible when `T` is `Unpin`.
+ pub fn new(t: T) -> (Self, Self) {
+ let arc = Arc::new(Inner { state: AtomicUsize::new(0), value: Some(UnsafeCell::new(t)) });
+
+ (Self { arc: arc.clone() }, Self { arc })
+ }
+
+ /// Attempt to acquire this lock, returning `Pending` if it can't be
+ /// acquired.
+ ///
+ /// This function will acquire the lock in a nonblocking fashion, returning
+ /// immediately if the lock is already held. If the lock is successfully
+ /// acquired then `Poll::Ready` is returned with a value that represents
+ /// the locked value (and can be used to access the protected data). The
+ /// lock is unlocked when the returned `BiLockGuard` is dropped.
+ ///
+ /// If the lock is already held then this function will return
+ /// `Poll::Pending`. In this case the current task will also be scheduled
+ /// to receive a notification when the lock would otherwise become
+ /// available.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called outside the context of a future's
+ /// task.
+ pub fn poll_lock(&self, cx: &mut Context<'_>) -> Poll<BiLockGuard<'_, T>> {
+ let mut waker = None;
+ loop {
+ match self.arc.state.swap(1, SeqCst) {
+ // Woohoo, we grabbed the lock!
+ 0 => return Poll::Ready(BiLockGuard { bilock: self }),
+
+ // Oops, someone else has locked the lock
+ 1 => {}
+
+ // A task was previously blocked on this lock, likely our task,
+ // so we need to update that task.
+ n => unsafe {
+ let mut prev = Box::from_raw(n as *mut Waker);
+ *prev = cx.waker().clone();
+ waker = Some(prev);
+ },
+ }
+
+ // type ascription for safety's sake!
+ let me: Box<Waker> = waker.take().unwrap_or_else(|| Box::new(cx.waker().clone()));
+ let me = Box::into_raw(me) as usize;
+
+ match self.arc.state.compare_exchange(1, me, SeqCst, SeqCst) {
+ // The lock is still locked, but we've now parked ourselves, so
+ // just report that we're scheduled to receive a notification.
+ Ok(_) => return Poll::Pending,
+
+ // Oops, looks like the lock was unlocked after our swap above
+ // and before the compare_exchange. Deallocate what we just
+ // allocated and go through the loop again.
+ Err(0) => unsafe {
+ waker = Some(Box::from_raw(me as *mut Waker));
+ },
+
+ // The top of this loop set the previous state to 1, so if we
+ // failed the CAS above then it's because the previous value was
+ // *not* zero or one. This indicates that a task was blocked,
+ // but we're trying to acquire the lock and there's only one
+ // other reference of the lock, so it should be impossible for
+ // that task to ever block itself.
+ Err(n) => panic!("invalid state: {}", n),
+ }
+ }
+ }
+
+ /// Perform a "blocking lock" of this lock, consuming this lock handle and
+ /// returning a future to the acquired lock.
+ ///
+ /// This function consumes the `BiLock<T>` and returns a sentinel future,
+ /// `BiLockAcquire<T>`. The returned future will resolve to
+ /// `BiLockAcquired<T>` which represents a locked lock similarly to
+ /// `BiLockGuard<T>`.
+ ///
+ /// Note that the returned future will never resolve to an error.
+ #[cfg(feature = "bilock")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+ pub fn lock(&self) -> BiLockAcquire<'_, T> {
+ BiLockAcquire { bilock: self }
+ }
+
+ /// Attempts to put the two "halves" of a `BiLock<T>` back together and
+ /// recover the original value. Succeeds only if the two `BiLock<T>`s
+ /// originated from the same call to `BiLock::new`.
+ pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>>
+ where
+ T: Unpin,
+ {
+ if Arc::ptr_eq(&self.arc, &other.arc) {
+ drop(other);
+ let inner = Arc::try_unwrap(self.arc)
+ .ok()
+ .expect("futures: try_unwrap failed in BiLock<T>::reunite");
+ Ok(unsafe { inner.into_value() })
+ } else {
+ Err(ReuniteError(self, other))
+ }
+ }
+
+ fn unlock(&self) {
+ match self.arc.state.swap(0, SeqCst) {
+ // we've locked the lock, shouldn't be possible for us to see an
+ // unlocked lock.
+ 0 => panic!("invalid unlocked state"),
+
+ // Ok, no one else tried to get the lock, we're done.
+ 1 => {}
+
+ // Another task has parked themselves on this lock, let's wake them
+ // up as its now their turn.
+ n => unsafe {
+ Box::from_raw(n as *mut Waker).wake();
+ },
+ }
+ }
+}
+
+impl<T: Unpin> Inner<T> {
+ unsafe fn into_value(mut self) -> T {
+ self.value.take().unwrap().into_inner()
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(SeqCst), 0);
+ }
+}
+
+/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
+/// thus could not be `reunite`d.
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("ReuniteError").field(&"...").finish()
+ }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "tried to reunite two BiLocks that don't form a pair")
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: core::any::Any> std::error::Error for ReuniteError<T> {}
+
+/// Returned RAII guard from the `poll_lock` method.
+///
+/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
+/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
+/// unlocked.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub struct BiLockGuard<'a, T> {
+ bilock: &'a BiLock<T>,
+}
+
+impl<T> Deref for BiLockGuard<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.bilock.arc.value.as_ref().unwrap().get() }
+ }
+}
+
+impl<T: Unpin> DerefMut for BiLockGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.bilock.arc.value.as_ref().unwrap().get() }
+ }
+}
+
+impl<T> BiLockGuard<'_, T> {
+ /// Get a mutable pinned reference to the locked value.
+ pub fn as_pin_mut(&mut self) -> Pin<&mut T> {
+ // Safety: we never allow moving a !Unpin value out of a bilock, nor
+ // allow mutable access to it
+ unsafe { Pin::new_unchecked(&mut *self.bilock.arc.value.as_ref().unwrap().get()) }
+ }
+}
+
+impl<T> Drop for BiLockGuard<'_, T> {
+ fn drop(&mut self) {
+ self.bilock.unlock();
+ }
+}
+
+/// Future returned by `BiLock::lock` which will resolve when the lock is
+/// acquired.
+#[cfg(feature = "bilock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[derive(Debug)]
+pub struct BiLockAcquire<'a, T> {
+ bilock: &'a BiLock<T>,
+}
+
+// Pinning is never projected to fields
+#[cfg(feature = "bilock")]
+impl<T> Unpin for BiLockAcquire<'_, T> {}
+
+#[cfg(feature = "bilock")]
+impl<'a, T> Future for BiLockAcquire<'a, T> {
+ type Output = BiLockGuard<'a, T>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.bilock.poll_lock(cx)
+ }
+}
diff --git a/vendor/futures-util/src/lock/mod.rs b/vendor/futures-util/src/lock/mod.rs
new file mode 100644
index 000000000..cf374c016
--- /dev/null
+++ b/vendor/futures-util/src/lock/mod.rs
@@ -0,0 +1,25 @@
+//! Futures-powered synchronization primitives.
+//!
+//! This module is only available when the `std` or `alloc` feature of this
+//! library is activated, and it is activated by default.
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "std")]
+mod mutex;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "std")]
+pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard, MutexLockFuture};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(any(feature = "bilock", feature = "sink", feature = "io"))]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+#[cfg_attr(not(feature = "bilock"), allow(unreachable_pub))]
+mod bilock;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(any(feature = "sink", feature = "io"))]
+#[cfg(not(feature = "bilock"))]
+pub(crate) use self::bilock::BiLock;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "bilock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "bilock")))]
+pub use self::bilock::{BiLock, BiLockAcquire, BiLockGuard, ReuniteError};
diff --git a/vendor/futures-util/src/lock/mutex.rs b/vendor/futures-util/src/lock/mutex.rs
new file mode 100644
index 000000000..85dcb1537
--- /dev/null
+++ b/vendor/futures-util/src/lock/mutex.rs
@@ -0,0 +1,406 @@
+use futures_core::future::{FusedFuture, Future};
+use futures_core::task::{Context, Poll, Waker};
+use slab::Slab;
+use std::cell::UnsafeCell;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut};
+use std::pin::Pin;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Mutex as StdMutex;
+use std::{fmt, mem};
+
+/// A futures-aware mutex.
+///
+/// # Fairness
+///
+/// This mutex provides no fairness guarantees. Tasks may not acquire the mutex
+/// in the order that they requested the lock, and it's possible for a single task
+/// which repeatedly takes the lock to starve other tasks, which may be left waiting
+/// indefinitely.
+pub struct Mutex<T: ?Sized> {
+ state: AtomicUsize,
+ waiters: StdMutex<Slab<Waiter>>,
+ value: UnsafeCell<T>,
+}
+
+impl<T: ?Sized> fmt::Debug for Mutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let state = self.state.load(Ordering::SeqCst);
+ f.debug_struct("Mutex")
+ .field("is_locked", &((state & IS_LOCKED) != 0))
+ .field("has_waiters", &((state & HAS_WAITERS) != 0))
+ .finish()
+ }
+}
+
+impl<T> From<T> for Mutex<T> {
+ fn from(t: T) -> Self {
+ Self::new(t)
+ }
+}
+
+impl<T: Default> Default for Mutex<T> {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+enum Waiter {
+ Waiting(Waker),
+ Woken,
+}
+
+impl Waiter {
+ fn register(&mut self, waker: &Waker) {
+ match self {
+ Self::Waiting(w) if waker.will_wake(w) => {}
+ _ => *self = Self::Waiting(waker.clone()),
+ }
+ }
+
+ fn wake(&mut self) {
+ match mem::replace(self, Self::Woken) {
+ Self::Waiting(waker) => waker.wake(),
+ Self::Woken => {}
+ }
+ }
+}
+
+const IS_LOCKED: usize = 1 << 0;
+const HAS_WAITERS: usize = 1 << 1;
+
+impl<T> Mutex<T> {
+ /// Creates a new futures-aware mutex.
+ pub fn new(t: T) -> Self {
+ Self {
+ state: AtomicUsize::new(0),
+ waiters: StdMutex::new(Slab::new()),
+ value: UnsafeCell::new(t),
+ }
+ }
+
+ /// Consumes this mutex, returning the underlying data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::lock::Mutex;
+ ///
+ /// let mutex = Mutex::new(0);
+ /// assert_eq!(mutex.into_inner(), 0);
+ /// ```
+ pub fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Attempt to acquire the lock immediately.
+ ///
+ /// If the lock is currently held, this will return `None`.
+ pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
+ let old_state = self.state.fetch_or(IS_LOCKED, Ordering::Acquire);
+ if (old_state & IS_LOCKED) == 0 {
+ Some(MutexGuard { mutex: self })
+ } else {
+ None
+ }
+ }
+
+ /// Acquire the lock asynchronously.
+ ///
+ /// This method returns a future that will resolve once the lock has been
+ /// successfully acquired.
+ pub fn lock(&self) -> MutexLockFuture<'_, T> {
+ MutexLockFuture { mutex: Some(self), wait_key: WAIT_KEY_NONE }
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no locks exist.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::Mutex;
+ ///
+ /// let mut mutex = Mutex::new(0);
+ /// *mutex.get_mut() = 10;
+ /// assert_eq!(*mutex.lock().await, 10);
+ /// # });
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ // We know statically that there are no other references to `self`, so
+ // there's no need to lock the inner mutex.
+ unsafe { &mut *self.value.get() }
+ }
+
+ fn remove_waker(&self, wait_key: usize, wake_another: bool) {
+ if wait_key != WAIT_KEY_NONE {
+ let mut waiters = self.waiters.lock().unwrap();
+ match waiters.remove(wait_key) {
+ Waiter::Waiting(_) => {}
+ Waiter::Woken => {
+ // We were awoken, but then dropped before we could
+ // wake up to acquire the lock. Wake up another
+ // waiter.
+ if wake_another {
+ if let Some((_i, waiter)) = waiters.iter_mut().next() {
+ waiter.wake();
+ }
+ }
+ }
+ }
+ if waiters.is_empty() {
+ self.state.fetch_and(!HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock
+ }
+ }
+ }
+
+ // Unlocks the mutex. Called by MutexGuard and MappedMutexGuard when they are
+ // dropped.
+ fn unlock(&self) {
+ let old_state = self.state.fetch_and(!IS_LOCKED, Ordering::AcqRel);
+ if (old_state & HAS_WAITERS) != 0 {
+ let mut waiters = self.waiters.lock().unwrap();
+ if let Some((_i, waiter)) = waiters.iter_mut().next() {
+ waiter.wake();
+ }
+ }
+ }
+}
+
+// Sentinel for when no slot in the `Slab` has been dedicated to this object.
+const WAIT_KEY_NONE: usize = usize::max_value();
+
+/// A future which resolves when the target mutex has been successfully acquired.
+pub struct MutexLockFuture<'a, T: ?Sized> {
+ // `None` indicates that the mutex was successfully acquired.
+ mutex: Option<&'a Mutex<T>>,
+ wait_key: usize,
+}
+
+impl<T: ?Sized> fmt::Debug for MutexLockFuture<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MutexLockFuture")
+ .field("was_acquired", &self.mutex.is_none())
+ .field("mutex", &self.mutex)
+ .field(
+ "wait_key",
+ &(if self.wait_key == WAIT_KEY_NONE { None } else { Some(self.wait_key) }),
+ )
+ .finish()
+ }
+}
+
+impl<T: ?Sized> FusedFuture for MutexLockFuture<'_, T> {
+ fn is_terminated(&self) -> bool {
+ self.mutex.is_none()
+ }
+}
+
+impl<'a, T: ?Sized> Future for MutexLockFuture<'a, T> {
+ type Output = MutexGuard<'a, T>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mutex = self.mutex.expect("polled MutexLockFuture after completion");
+
+ if let Some(lock) = mutex.try_lock() {
+ mutex.remove_waker(self.wait_key, false);
+ self.mutex = None;
+ return Poll::Ready(lock);
+ }
+
+ {
+ let mut waiters = mutex.waiters.lock().unwrap();
+ if self.wait_key == WAIT_KEY_NONE {
+ self.wait_key = waiters.insert(Waiter::Waiting(cx.waker().clone()));
+ if waiters.len() == 1 {
+ mutex.state.fetch_or(HAS_WAITERS, Ordering::Relaxed); // released by mutex unlock
+ }
+ } else {
+ waiters[self.wait_key].register(cx.waker());
+ }
+ }
+
+ // Ensure that we haven't raced `MutexGuard::drop`'s unlock path by
+ // attempting to acquire the lock again.
+ if let Some(lock) = mutex.try_lock() {
+ mutex.remove_waker(self.wait_key, false);
+ self.mutex = None;
+ return Poll::Ready(lock);
+ }
+
+ Poll::Pending
+ }
+}
+
+impl<T: ?Sized> Drop for MutexLockFuture<'_, T> {
+ fn drop(&mut self) {
+ if let Some(mutex) = self.mutex {
+ // This future was dropped before it acquired the mutex.
+ //
+ // Remove ourselves from the map, waking up another waiter if we
+ // had been awoken to acquire the lock.
+ mutex.remove_waker(self.wait_key, true);
+ }
+ }
+}
+
+/// An RAII guard returned by the `lock` and `try_lock` methods.
+/// When this structure is dropped (falls out of scope), the lock will be
+/// unlocked.
+pub struct MutexGuard<'a, T: ?Sized> {
+ mutex: &'a Mutex<T>,
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+ /// Returns a locked view over a portion of the locked data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::{Mutex, MutexGuard};
+ ///
+ /// let data = Mutex::new(Some("value".to_string()));
+ /// {
+ /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap());
+ /// assert_eq!(&*locked_str, "value");
+ /// }
+ /// # });
+ /// ```
+ #[inline]
+ pub fn map<U: ?Sized, F>(this: Self, f: F) -> MappedMutexGuard<'a, T, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let mutex = this.mutex;
+ let value = f(unsafe { &mut *this.mutex.value.get() });
+ // Don't run the `drop` method for MutexGuard. The ownership of the underlying
+ // locked state is being moved to the returned MappedMutexGuard.
+ mem::forget(this);
+ MappedMutexGuard { mutex, value, _marker: PhantomData }
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MutexGuard").field("value", &&**self).field("mutex", &self.mutex).finish()
+ }
+}
+
+impl<T: ?Sized> Drop for MutexGuard<'_, T> {
+ fn drop(&mut self) {
+ self.mutex.unlock()
+ }
+}
+
+impl<T: ?Sized> Deref for MutexGuard<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.mutex.value.get() }
+ }
+}
+
+impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.mutex.value.get() }
+ }
+}
+
+/// An RAII guard returned by the `MutexGuard::map` and `MappedMutexGuard::map` methods.
+/// When this structure is dropped (falls out of scope), the lock will be unlocked.
+pub struct MappedMutexGuard<'a, T: ?Sized, U: ?Sized> {
+ mutex: &'a Mutex<T>,
+ value: *mut U,
+ _marker: PhantomData<&'a mut U>,
+}
+
+impl<'a, T: ?Sized, U: ?Sized> MappedMutexGuard<'a, T, U> {
+ /// Returns a locked view over a portion of the locked data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::lock::{MappedMutexGuard, Mutex, MutexGuard};
+ ///
+ /// let data = Mutex::new(Some("value".to_string()));
+ /// {
+ /// let locked_str = MutexGuard::map(data.lock().await, |opt| opt.as_mut().unwrap());
+ /// let locked_char = MappedMutexGuard::map(locked_str, |s| s.get_mut(0..1).unwrap());
+ /// assert_eq!(&*locked_char, "v");
+ /// }
+ /// # });
+ /// ```
+ #[inline]
+ pub fn map<V: ?Sized, F>(this: Self, f: F) -> MappedMutexGuard<'a, T, V>
+ where
+ F: FnOnce(&mut U) -> &mut V,
+ {
+ let mutex = this.mutex;
+ let value = f(unsafe { &mut *this.value });
+ // Don't run the `drop` method for MappedMutexGuard. The ownership of the underlying
+ // locked state is being moved to the returned MappedMutexGuard.
+ mem::forget(this);
+ MappedMutexGuard { mutex, value, _marker: PhantomData }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized + fmt::Debug> fmt::Debug for MappedMutexGuard<'_, T, U> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MappedMutexGuard")
+ .field("value", &&**self)
+ .field("mutex", &self.mutex)
+ .finish()
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Drop for MappedMutexGuard<'_, T, U> {
+ fn drop(&mut self) {
+ self.mutex.unlock()
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Deref for MappedMutexGuard<'_, T, U> {
+ type Target = U;
+ fn deref(&self) -> &U {
+ unsafe { &*self.value }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> DerefMut for MappedMutexGuard<'_, T, U> {
+ fn deref_mut(&mut self) -> &mut U {
+ unsafe { &mut *self.value }
+ }
+}
+
+// Mutexes can be moved freely between threads and acquired on any thread so long
+// as the inner value can be safely sent between threads.
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+// It's safe to switch which thread the acquire is being attempted on so long as
+// `T` can be accessed on that thread.
+unsafe impl<T: ?Sized + Send> Send for MutexLockFuture<'_, T> {}
+// doesn't have any interesting `&self` methods (only Debug)
+unsafe impl<T: ?Sized> Sync for MutexLockFuture<'_, T> {}
+
+// Safe to send since we don't track any thread-specific details-- the inner
+// lock is essentially spinlock-equivalent (attempt to flip an atomic bool)
+unsafe impl<T: ?Sized + Send> Send for MutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
+unsafe impl<T: ?Sized + Send, U: ?Sized + Send> Send for MappedMutexGuard<'_, T, U> {}
+unsafe impl<T: ?Sized + Sync, U: ?Sized + Sync> Sync for MappedMutexGuard<'_, T, U> {}
+
+#[test]
+fn test_mutex_guard_debug_not_recurse() {
+ let mutex = Mutex::new(42);
+ let guard = mutex.try_lock().unwrap();
+ let _ = format!("{:?}", guard);
+ let guard = MutexGuard::map(guard, |n| n);
+ let _ = format!("{:?}", guard);
+}
diff --git a/vendor/futures-util/src/never.rs b/vendor/futures-util/src/never.rs
new file mode 100644
index 000000000..e811f97df
--- /dev/null
+++ b/vendor/futures-util/src/never.rs
@@ -0,0 +1,18 @@
+//! This module contains the `Never` type.
+//!
+//! Values of this type can never be created and will never exist.
+
+/// A type with no possible values.
+///
+/// This is used to indicate values which can never be created, such as the
+/// error type of infallible futures.
+///
+/// This type is a stable equivalent to the `!` type from `std`.
+///
+/// This is currently an alias for [`std::convert::Infallible`], but in
+/// the future it may be an alias for [`!`][never].
+/// See ["Future compatibility" section of `std::convert::Infallible`][infallible] for more.
+///
+/// [never]: https://doc.rust-lang.org/nightly/std/primitive.never.html
+/// [infallible]: https://doc.rust-lang.org/nightly/std/convert/enum.Infallible.html#future-compatibility
+pub type Never = core::convert::Infallible;
diff --git a/vendor/futures-util/src/sink/buffer.rs b/vendor/futures-util/src/sink/buffer.rs
new file mode 100644
index 000000000..4aa6c3603
--- /dev/null
+++ b/vendor/futures-util/src/sink/buffer.rs
@@ -0,0 +1,105 @@
+use alloc::collections::VecDeque;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`buffer`](super::SinkExt::buffer) method.
+ #[derive(Debug)]
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct Buffer<Si, Item> {
+ #[pin]
+ sink: Si,
+ buf: VecDeque<Item>,
+
+ // Track capacity separately from the `VecDeque`, which may be rounded up
+ capacity: usize,
+ }
+}
+
+impl<Si: Sink<Item>, Item> Buffer<Si, Item> {
+ pub(super) fn new(sink: Si, capacity: usize) -> Self {
+ Self { sink, buf: VecDeque::with_capacity(capacity), capacity }
+ }
+
+ delegate_access_inner!(sink, Si, ());
+
+ fn try_empty_buffer(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Si::Error>> {
+ let mut this = self.project();
+ ready!(this.sink.as_mut().poll_ready(cx))?;
+ while let Some(item) = this.buf.pop_front() {
+ this.sink.as_mut().start_send(item)?;
+ if !this.buf.is_empty() {
+ ready!(this.sink.as_mut().poll_ready(cx))?;
+ }
+ }
+ Poll::Ready(Ok(()))
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, Item> Stream for Buffer<S, Item>
+where
+ S: Sink<Item> + Stream,
+{
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
+ self.project().sink.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.sink.size_hint()
+ }
+}
+
+impl<S, Item> FusedStream for Buffer<S, Item>
+where
+ S: Sink<Item> + FusedStream,
+{
+ fn is_terminated(&self) -> bool {
+ self.sink.is_terminated()
+ }
+}
+
+impl<Si: Sink<Item>, Item> Sink<Item> for Buffer<Si, Item> {
+ type Error = Si::Error;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ if self.capacity == 0 {
+ return self.project().sink.poll_ready(cx);
+ }
+
+ let _ = self.as_mut().try_empty_buffer(cx)?;
+
+ if self.buf.len() >= self.capacity {
+ Poll::Pending
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ if self.capacity == 0 {
+ self.project().sink.start_send(item)
+ } else {
+ self.project().buf.push_back(item);
+ Ok(())
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().try_empty_buffer(cx))?;
+ debug_assert!(self.buf.is_empty());
+ self.project().sink.poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().try_empty_buffer(cx))?;
+ debug_assert!(self.buf.is_empty());
+ self.project().sink.poll_close(cx)
+ }
+}
diff --git a/vendor/futures-util/src/sink/close.rs b/vendor/futures-util/src/sink/close.rs
new file mode 100644
index 000000000..43eea74b0
--- /dev/null
+++ b/vendor/futures-util/src/sink/close.rs
@@ -0,0 +1,32 @@
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Future for the [`close`](super::SinkExt::close) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Close<'a, Si: ?Sized, Item> {
+ sink: &'a mut Si,
+ _phantom: PhantomData<fn(Item)>,
+}
+
+impl<Si: Unpin + ?Sized, Item> Unpin for Close<'_, Si, Item> {}
+
+/// A future that completes when the sink has finished closing.
+///
+/// The sink itself is returned after closing is complete.
+impl<'a, Si: Sink<Item> + Unpin + ?Sized, Item> Close<'a, Si, Item> {
+ pub(super) fn new(sink: &'a mut Si) -> Self {
+ Self { sink, _phantom: PhantomData }
+ }
+}
+
+impl<Si: Sink<Item> + Unpin + ?Sized, Item> Future for Close<'_, Si, Item> {
+ type Output = Result<(), Si::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.sink).poll_close(cx)
+ }
+}
diff --git a/vendor/futures-util/src/sink/drain.rs b/vendor/futures-util/src/sink/drain.rs
new file mode 100644
index 000000000..5295115b6
--- /dev/null
+++ b/vendor/futures-util/src/sink/drain.rs
@@ -0,0 +1,53 @@
+use super::assert_sink;
+use crate::never::Never;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Sink for the [`drain`] function.
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct Drain<T> {
+ marker: PhantomData<T>,
+}
+
+/// Create a sink that will just discard all items given to it.
+///
+/// Similar to [`io::Sink`](::std::io::Sink).
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::sink::{self, SinkExt};
+///
+/// let mut drain = sink::drain();
+/// drain.send(5).await?;
+/// # Ok::<(), futures::never::Never>(()) }).unwrap();
+/// ```
+pub fn drain<T>() -> Drain<T> {
+ assert_sink::<T, Never, _>(Drain { marker: PhantomData })
+}
+
+impl<T> Unpin for Drain<T> {}
+
+impl<T> Sink<T> for Drain<T> {
+ type Error = Never;
+
+ fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(self: Pin<&mut Self>, _item: T) -> Result<(), Self::Error> {
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/sink/err_into.rs b/vendor/futures-util/src/sink/err_into.rs
new file mode 100644
index 000000000..a64d1337b
--- /dev/null
+++ b/vendor/futures-util/src/sink/err_into.rs
@@ -0,0 +1,57 @@
+use crate::sink::{SinkExt, SinkMapErr};
+use futures_core::stream::{FusedStream, Stream};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`sink_err_into`](super::SinkExt::sink_err_into) method.
+ #[derive(Debug)]
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct SinkErrInto<Si: Sink<Item>, Item, E> {
+ #[pin]
+ sink: SinkMapErr<Si, fn(Si::Error) -> E>,
+ }
+}
+
+impl<Si, E, Item> SinkErrInto<Si, Item, E>
+where
+ Si: Sink<Item>,
+ Si::Error: Into<E>,
+{
+ pub(super) fn new(sink: Si) -> Self {
+ Self { sink: SinkExt::sink_map_err(sink, Into::into) }
+ }
+
+ delegate_access_inner!(sink, Si, (.));
+}
+
+impl<Si, Item, E> Sink<Item> for SinkErrInto<Si, Item, E>
+where
+ Si: Sink<Item>,
+ Si::Error: Into<E>,
+{
+ type Error = E;
+
+ delegate_sink!(sink, Item);
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, Item, E> Stream for SinkErrInto<S, Item, E>
+where
+ S: Sink<Item> + Stream,
+ S::Error: Into<E>,
+{
+ type Item = S::Item;
+
+ delegate_stream!(sink);
+}
+
+impl<S, Item, E> FusedStream for SinkErrInto<S, Item, E>
+where
+ S: Sink<Item> + FusedStream,
+ S::Error: Into<E>,
+{
+ fn is_terminated(&self) -> bool {
+ self.sink.is_terminated()
+ }
+}
diff --git a/vendor/futures-util/src/sink/fanout.rs b/vendor/futures-util/src/sink/fanout.rs
new file mode 100644
index 000000000..fe2038f27
--- /dev/null
+++ b/vendor/futures-util/src/sink/fanout.rs
@@ -0,0 +1,111 @@
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::pin::Pin;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink that clones incoming items and forwards them to two sinks at the same time.
+ ///
+ /// Backpressure from any downstream sink propagates up, which means that this sink
+ /// can only process items as fast as its _slowest_ downstream sink.
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct Fanout<Si1, Si2> {
+ #[pin]
+ sink1: Si1,
+ #[pin]
+ sink2: Si2
+ }
+}
+
+impl<Si1, Si2> Fanout<Si1, Si2> {
+ pub(super) fn new(sink1: Si1, sink2: Si2) -> Self {
+ Self { sink1, sink2 }
+ }
+
+ /// Get a shared reference to the inner sinks.
+ pub fn get_ref(&self) -> (&Si1, &Si2) {
+ (&self.sink1, &self.sink2)
+ }
+
+ /// Get a mutable reference to the inner sinks.
+ pub fn get_mut(&mut self) -> (&mut Si1, &mut Si2) {
+ (&mut self.sink1, &mut self.sink2)
+ }
+
+ /// Get a pinned mutable reference to the inner sinks.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut Si1>, Pin<&mut Si2>) {
+ let this = self.project();
+ (this.sink1, this.sink2)
+ }
+
+ /// Consumes this combinator, returning the underlying sinks.
+ ///
+ /// Note that this may discard intermediate state of this combinator,
+ /// so care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> (Si1, Si2) {
+ (self.sink1, self.sink2)
+ }
+}
+
+impl<Si1: Debug, Si2: Debug> Debug for Fanout<Si1, Si2> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
+ f.debug_struct("Fanout").field("sink1", &self.sink1).field("sink2", &self.sink2).finish()
+ }
+}
+
+impl<Si1, Si2, Item> Sink<Item> for Fanout<Si1, Si2>
+where
+ Si1: Sink<Item>,
+ Item: Clone,
+ Si2: Sink<Item, Error = Si1::Error>,
+{
+ type Error = Si1::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let this = self.project();
+
+ let sink1_ready = this.sink1.poll_ready(cx)?.is_ready();
+ let sink2_ready = this.sink2.poll_ready(cx)?.is_ready();
+ let ready = sink1_ready && sink2_ready;
+ if ready {
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ let this = self.project();
+
+ this.sink1.start_send(item.clone())?;
+ this.sink2.start_send(item)?;
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let this = self.project();
+
+ let sink1_ready = this.sink1.poll_flush(cx)?.is_ready();
+ let sink2_ready = this.sink2.poll_flush(cx)?.is_ready();
+ let ready = sink1_ready && sink2_ready;
+ if ready {
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let this = self.project();
+
+ let sink1_ready = this.sink1.poll_close(cx)?.is_ready();
+ let sink2_ready = this.sink2.poll_close(cx)?.is_ready();
+ let ready = sink1_ready && sink2_ready;
+ if ready {
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Pending
+ }
+ }
+}
diff --git a/vendor/futures-util/src/sink/feed.rs b/vendor/futures-util/src/sink/feed.rs
new file mode 100644
index 000000000..6701f7a1b
--- /dev/null
+++ b/vendor/futures-util/src/sink/feed.rs
@@ -0,0 +1,43 @@
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Future for the [`feed`](super::SinkExt::feed) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Feed<'a, Si: ?Sized, Item> {
+ sink: &'a mut Si,
+ item: Option<Item>,
+}
+
+// Pinning is never projected to children
+impl<Si: Unpin + ?Sized, Item> Unpin for Feed<'_, Si, Item> {}
+
+impl<'a, Si: Sink<Item> + Unpin + ?Sized, Item> Feed<'a, Si, Item> {
+ pub(super) fn new(sink: &'a mut Si, item: Item) -> Self {
+ Feed { sink, item: Some(item) }
+ }
+
+ pub(super) fn sink_pin_mut(&mut self) -> Pin<&mut Si> {
+ Pin::new(self.sink)
+ }
+
+ pub(super) fn is_item_pending(&self) -> bool {
+ self.item.is_some()
+ }
+}
+
+impl<Si: Sink<Item> + Unpin + ?Sized, Item> Future for Feed<'_, Si, Item> {
+ type Output = Result<(), Si::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = self.get_mut();
+ let mut sink = Pin::new(&mut this.sink);
+ ready!(sink.as_mut().poll_ready(cx))?;
+ let item = this.item.take().expect("polled Feed after completion");
+ sink.as_mut().start_send(item)?;
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/sink/flush.rs b/vendor/futures-util/src/sink/flush.rs
new file mode 100644
index 000000000..35a8372de
--- /dev/null
+++ b/vendor/futures-util/src/sink/flush.rs
@@ -0,0 +1,36 @@
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Future for the [`flush`](super::SinkExt::flush) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Flush<'a, Si: ?Sized, Item> {
+ sink: &'a mut Si,
+ _phantom: PhantomData<fn(Item)>,
+}
+
+// Pin is never projected to a field.
+impl<Si: Unpin + ?Sized, Item> Unpin for Flush<'_, Si, Item> {}
+
+/// A future that completes when the sink has finished processing all
+/// pending requests.
+///
+/// The sink itself is returned after flushing is complete; this adapter is
+/// intended to be used when you want to stop sending to the sink until
+/// all current requests are processed.
+impl<'a, Si: Sink<Item> + Unpin + ?Sized, Item> Flush<'a, Si, Item> {
+ pub(super) fn new(sink: &'a mut Si) -> Self {
+ Self { sink, _phantom: PhantomData }
+ }
+}
+
+impl<Si: Sink<Item> + Unpin + ?Sized, Item> Future for Flush<'_, Si, Item> {
+ type Output = Result<(), Si::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.sink).poll_flush(cx)
+ }
+}
diff --git a/vendor/futures-util/src/sink/map_err.rs b/vendor/futures-util/src/sink/map_err.rs
new file mode 100644
index 000000000..9d2ab7b24
--- /dev/null
+++ b/vendor/futures-util/src/sink/map_err.rs
@@ -0,0 +1,65 @@
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`sink_map_err`](super::SinkExt::sink_map_err) method.
+ #[derive(Debug, Clone)]
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct SinkMapErr<Si, F> {
+ #[pin]
+ sink: Si,
+ f: Option<F>,
+ }
+}
+
+impl<Si, F> SinkMapErr<Si, F> {
+ pub(super) fn new(sink: Si, f: F) -> Self {
+ Self { sink, f: Some(f) }
+ }
+
+ delegate_access_inner!(sink, Si, ());
+
+ fn take_f(self: Pin<&mut Self>) -> F {
+ self.project().f.take().expect("polled MapErr after completion")
+ }
+}
+
+impl<Si, F, E, Item> Sink<Item> for SinkMapErr<Si, F>
+where
+ Si: Sink<Item>,
+ F: FnOnce(Si::Error) -> E,
+{
+ type Error = E;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.as_mut().project().sink.poll_ready(cx).map_err(|e| self.as_mut().take_f()(e))
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ self.as_mut().project().sink.start_send(item).map_err(|e| self.as_mut().take_f()(e))
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.as_mut().project().sink.poll_flush(cx).map_err(|e| self.as_mut().take_f()(e))
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.as_mut().project().sink.poll_close(cx).map_err(|e| self.as_mut().take_f()(e))
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S: Stream, F> Stream for SinkMapErr<S, F> {
+ type Item = S::Item;
+
+ delegate_stream!(sink);
+}
+
+impl<S: FusedStream, F> FusedStream for SinkMapErr<S, F> {
+ fn is_terminated(&self) -> bool {
+ self.sink.is_terminated()
+ }
+}
diff --git a/vendor/futures-util/src/sink/mod.rs b/vendor/futures-util/src/sink/mod.rs
new file mode 100644
index 000000000..147e9adc9
--- /dev/null
+++ b/vendor/futures-util/src/sink/mod.rs
@@ -0,0 +1,344 @@
+//! Asynchronous sinks.
+//!
+//! This module contains:
+//!
+//! - The [`Sink`] trait, which allows you to asynchronously write data.
+//! - The [`SinkExt`] trait, which provides adapters for chaining and composing
+//! sinks.
+
+use crate::future::{assert_future, Either};
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::stream::{Stream, TryStream};
+use futures_core::task::{Context, Poll};
+
+#[cfg(feature = "compat")]
+use crate::compat::CompatSink;
+
+pub use futures_sink::Sink;
+
+mod close;
+pub use self::close::Close;
+
+mod drain;
+pub use self::drain::{drain, Drain};
+
+mod fanout;
+pub use self::fanout::Fanout;
+
+mod feed;
+pub use self::feed::Feed;
+
+mod flush;
+pub use self::flush::Flush;
+
+mod err_into;
+pub use self::err_into::SinkErrInto;
+
+mod map_err;
+pub use self::map_err::SinkMapErr;
+
+mod send;
+pub use self::send::Send;
+
+mod send_all;
+pub use self::send_all::SendAll;
+
+mod unfold;
+pub use self::unfold::{unfold, Unfold};
+
+mod with;
+pub use self::with::With;
+
+mod with_flat_map;
+pub use self::with_flat_map::WithFlatMap;
+
+#[cfg(feature = "alloc")]
+mod buffer;
+#[cfg(feature = "alloc")]
+pub use self::buffer::Buffer;
+
+impl<T: ?Sized, Item> SinkExt<Item> for T where T: Sink<Item> {}
+
+/// An extension trait for `Sink`s that provides a variety of convenient
+/// combinator functions.
+pub trait SinkExt<Item>: Sink<Item> {
+ /// Composes a function *in front of* the sink.
+ ///
+ /// This adapter produces a new sink that passes each value through the
+ /// given function `f` before sending it to `self`.
+ ///
+ /// To process each value, `f` produces a *future*, which is then polled to
+ /// completion before passing its result down to the underlying sink. If the
+ /// future produces an error, that error is returned by the new sink.
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::map`.
+ fn with<U, Fut, F, E>(self, f: F) -> With<Self, Item, U, Fut, F>
+ where
+ F: FnMut(U) -> Fut,
+ Fut: Future<Output = Result<Item, E>>,
+ E: From<Self::Error>,
+ Self: Sized,
+ {
+ assert_sink::<U, E, _>(With::new(self, f))
+ }
+
+ /// Composes a function *in front of* the sink.
+ ///
+ /// This adapter produces a new sink that passes each value through the
+ /// given function `f` before sending it to `self`.
+ ///
+ /// To process each value, `f` produces a *stream*, of which each value
+ /// is passed to the underlying sink. A new value will not be accepted until
+ /// the stream has been drained
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::flat_map`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::sink::SinkExt;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let (tx, rx) = mpsc::channel(5);
+ ///
+ /// let mut tx = tx.with_flat_map(|x| {
+ /// stream::iter(vec![Ok(42); x])
+ /// });
+ ///
+ /// tx.send(5).await.unwrap();
+ /// drop(tx);
+ /// let received: Vec<i32> = rx.collect().await;
+ /// assert_eq!(received, vec![42, 42, 42, 42, 42]);
+ /// # });
+ /// ```
+ fn with_flat_map<U, St, F>(self, f: F) -> WithFlatMap<Self, Item, U, St, F>
+ where
+ F: FnMut(U) -> St,
+ St: Stream<Item = Result<Item, Self::Error>>,
+ Self: Sized,
+ {
+ assert_sink::<U, Self::Error, _>(WithFlatMap::new(self, f))
+ }
+
+ /*
+ fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
+ where F: FnMut(U) -> Self::SinkItem,
+ Self: Sized;
+
+ fn with_filter<F>(self, f: F) -> WithFilter<Self, F>
+ where F: FnMut(Self::SinkItem) -> bool,
+ Self: Sized;
+
+ fn with_filter_map<U, F>(self, f: F) -> WithFilterMap<Self, U, F>
+ where F: FnMut(U) -> Option<Self::SinkItem>,
+ Self: Sized;
+ */
+
+ /// Transforms the error returned by the sink.
+ fn sink_map_err<E, F>(self, f: F) -> SinkMapErr<Self, F>
+ where
+ F: FnOnce(Self::Error) -> E,
+ Self: Sized,
+ {
+ assert_sink::<Item, E, _>(SinkMapErr::new(self, f))
+ }
+
+ /// Map this sink's error to a different error type using the `Into` trait.
+ ///
+ /// If wanting to map errors of a `Sink + Stream`, use `.sink_err_into().err_into()`.
+ fn sink_err_into<E>(self) -> err_into::SinkErrInto<Self, Item, E>
+ where
+ Self: Sized,
+ Self::Error: Into<E>,
+ {
+ assert_sink::<Item, E, _>(SinkErrInto::new(self))
+ }
+
+ /// Adds a fixed-size buffer to the current sink.
+ ///
+ /// The resulting sink will buffer up to `capacity` items when the
+ /// underlying sink is unwilling to accept additional items. Calling `flush`
+ /// on the buffered sink will attempt to both empty the buffer and complete
+ /// processing on the underlying sink.
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::map`.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "alloc")]
+ fn buffer(self, capacity: usize) -> Buffer<Self, Item>
+ where
+ Self: Sized,
+ {
+ assert_sink::<Item, Self::Error, _>(Buffer::new(self, capacity))
+ }
+
+ /// Close the sink.
+ fn close(&mut self) -> Close<'_, Self, Item>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<(), Self::Error>, _>(Close::new(self))
+ }
+
+ /// Fanout items to multiple sinks.
+ ///
+ /// This adapter clones each incoming item and forwards it to both this as well as
+ /// the other sink at the same time.
+ fn fanout<Si>(self, other: Si) -> Fanout<Self, Si>
+ where
+ Self: Sized,
+ Item: Clone,
+ Si: Sink<Item, Error = Self::Error>,
+ {
+ assert_sink::<Item, Self::Error, _>(Fanout::new(self, other))
+ }
+
+ /// Flush the sink, processing all pending items.
+ ///
+ /// This adapter is intended to be used when you want to stop sending to the sink
+ /// until all current requests are processed.
+ fn flush(&mut self) -> Flush<'_, Self, Item>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<(), Self::Error>, _>(Flush::new(self))
+ }
+
+ /// A future that completes after the given item has been fully processed
+ /// into the sink, including flushing.
+ ///
+ /// Note that, **because of the flushing requirement, it is usually better
+ /// to batch together items to send via `feed` or `send_all`,
+ /// rather than flushing between each item.**
+ fn send(&mut self, item: Item) -> Send<'_, Self, Item>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<(), Self::Error>, _>(Send::new(self, item))
+ }
+
+ /// A future that completes after the given item has been received
+ /// by the sink.
+ ///
+ /// Unlike `send`, the returned future does not flush the sink.
+ /// It is the caller's responsibility to ensure all pending items
+ /// are processed, which can be done via `flush` or `close`.
+ fn feed(&mut self, item: Item) -> Feed<'_, Self, Item>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<(), Self::Error>, _>(Feed::new(self, item))
+ }
+
+ /// A future that completes after the given stream has been fully processed
+ /// into the sink, including flushing.
+ ///
+ /// This future will drive the stream to keep producing items until it is
+ /// exhausted, sending each item to the sink. It will complete once both the
+ /// stream is exhausted, the sink has received all items, and the sink has
+ /// been flushed. Note that the sink is **not** closed. If the stream produces
+ /// an error, that error will be returned by this future without flushing the sink.
+ ///
+ /// Doing `sink.send_all(stream)` is roughly equivalent to
+ /// `stream.forward(sink)`. The returned future will exhaust all items from
+ /// `stream` and send them to `self`.
+ fn send_all<'a, St>(&'a mut self, stream: &'a mut St) -> SendAll<'a, Self, St>
+ where
+ St: TryStream<Ok = Item, Error = Self::Error> + Stream + Unpin + ?Sized,
+ // St: Stream<Item = Result<Item, Self::Error>> + Unpin + ?Sized,
+ Self: Unpin,
+ {
+ // TODO: type mismatch resolving `<St as Stream>::Item == std::result::Result<Item, <Self as futures_sink::Sink<Item>>::Error>`
+ // assert_future::<Result<(), Self::Error>, _>(SendAll::new(self, stream))
+ SendAll::new(self, stream)
+ }
+
+ /// Wrap this sink in an `Either` sink, making it the left-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `right_sink` method to write `if`
+ /// statements that evaluate to different streams in different branches.
+ fn left_sink<Si2>(self) -> Either<Self, Si2>
+ where
+ Si2: Sink<Item, Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_sink::<Item, Self::Error, _>(Either::Left(self))
+ }
+
+ /// Wrap this stream in an `Either` stream, making it the right-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `left_sink` method to write `if`
+ /// statements that evaluate to different streams in different branches.
+ fn right_sink<Si1>(self) -> Either<Si1, Self>
+ where
+ Si1: Sink<Item, Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_sink::<Item, Self::Error, _>(Either::Right(self))
+ }
+
+ /// Wraps a [`Sink`] into a sink compatible with libraries using
+ /// futures 0.1 `Sink`. Requires the `compat` feature to be enabled.
+ #[cfg(feature = "compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+ fn compat(self) -> CompatSink<Self, Item>
+ where
+ Self: Sized + Unpin,
+ {
+ CompatSink::new(self)
+ }
+
+ /// A convenience method for calling [`Sink::poll_ready`] on [`Unpin`]
+ /// sink types.
+ fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).poll_ready(cx)
+ }
+
+ /// A convenience method for calling [`Sink::start_send`] on [`Unpin`]
+ /// sink types.
+ fn start_send_unpin(&mut self, item: Item) -> Result<(), Self::Error>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).start_send(item)
+ }
+
+ /// A convenience method for calling [`Sink::poll_flush`] on [`Unpin`]
+ /// sink types.
+ fn poll_flush_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).poll_flush(cx)
+ }
+
+ /// A convenience method for calling [`Sink::poll_close`] on [`Unpin`]
+ /// sink types.
+ fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).poll_close(cx)
+ }
+}
+
+// Just a helper function to ensure the sinks we're returning all have the
+// right implementations.
+pub(crate) fn assert_sink<T, E, S>(sink: S) -> S
+where
+ S: Sink<T, Error = E>,
+{
+ sink
+}
diff --git a/vendor/futures-util/src/sink/send.rs b/vendor/futures-util/src/sink/send.rs
new file mode 100644
index 000000000..6d21f33fe
--- /dev/null
+++ b/vendor/futures-util/src/sink/send.rs
@@ -0,0 +1,41 @@
+use super::Feed;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Future for the [`send`](super::SinkExt::send) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Send<'a, Si: ?Sized, Item> {
+ feed: Feed<'a, Si, Item>,
+}
+
+// Pinning is never projected to children
+impl<Si: Unpin + ?Sized, Item> Unpin for Send<'_, Si, Item> {}
+
+impl<'a, Si: Sink<Item> + Unpin + ?Sized, Item> Send<'a, Si, Item> {
+ pub(super) fn new(sink: &'a mut Si, item: Item) -> Self {
+ Self { feed: Feed::new(sink, item) }
+ }
+}
+
+impl<Si: Sink<Item> + Unpin + ?Sized, Item> Future for Send<'_, Si, Item> {
+ type Output = Result<(), Si::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+
+ if this.feed.is_item_pending() {
+ ready!(Pin::new(&mut this.feed).poll(cx))?;
+ debug_assert!(!this.feed.is_item_pending());
+ }
+
+ // we're done sending the item, but want to block on flushing the
+ // sink
+ ready!(this.feed.sink_pin_mut().poll_flush(cx))?;
+
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/sink/send_all.rs b/vendor/futures-util/src/sink/send_all.rs
new file mode 100644
index 000000000..1302dd214
--- /dev/null
+++ b/vendor/futures-util/src/sink/send_all.rs
@@ -0,0 +1,100 @@
+use crate::stream::{Fuse, StreamExt, TryStreamExt};
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{Stream, TryStream};
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+/// Future for the [`send_all`](super::SinkExt::send_all) method.
+#[allow(explicit_outlives_requirements)] // https://github.com/rust-lang/rust/issues/60993
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct SendAll<'a, Si, St>
+where
+ Si: ?Sized,
+ St: ?Sized + TryStream,
+{
+ sink: &'a mut Si,
+ stream: Fuse<&'a mut St>,
+ buffered: Option<St::Ok>,
+}
+
+impl<Si, St> fmt::Debug for SendAll<'_, Si, St>
+where
+ Si: fmt::Debug + ?Sized,
+ St: fmt::Debug + ?Sized + TryStream,
+ St::Ok: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SendAll")
+ .field("sink", &self.sink)
+ .field("stream", &self.stream)
+ .field("buffered", &self.buffered)
+ .finish()
+ }
+}
+
+// Pinning is never projected to any fields
+impl<Si, St> Unpin for SendAll<'_, Si, St>
+where
+ Si: Unpin + ?Sized,
+ St: TryStream + Unpin + ?Sized,
+{
+}
+
+impl<'a, Si, St, Ok, Error> SendAll<'a, Si, St>
+where
+ Si: Sink<Ok, Error = Error> + Unpin + ?Sized,
+ St: TryStream<Ok = Ok, Error = Error> + Stream + Unpin + ?Sized,
+{
+ pub(super) fn new(sink: &'a mut Si, stream: &'a mut St) -> Self {
+ Self { sink, stream: stream.fuse(), buffered: None }
+ }
+
+ fn try_start_send(
+ &mut self,
+ cx: &mut Context<'_>,
+ item: St::Ok,
+ ) -> Poll<Result<(), Si::Error>> {
+ debug_assert!(self.buffered.is_none());
+ match Pin::new(&mut self.sink).poll_ready(cx)? {
+ Poll::Ready(()) => Poll::Ready(Pin::new(&mut self.sink).start_send(item)),
+ Poll::Pending => {
+ self.buffered = Some(item);
+ Poll::Pending
+ }
+ }
+ }
+}
+
+impl<Si, St, Ok, Error> Future for SendAll<'_, Si, St>
+where
+ Si: Sink<Ok, Error = Error> + Unpin + ?Sized,
+ St: Stream<Item = Result<Ok, Error>> + Unpin + ?Sized,
+{
+ type Output = Result<(), Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = &mut *self;
+ // If we've got an item buffered already, we need to write it to the
+ // sink before we can do anything else
+ if let Some(item) = this.buffered.take() {
+ ready!(this.try_start_send(cx, item))?
+ }
+
+ loop {
+ match this.stream.try_poll_next_unpin(cx)? {
+ Poll::Ready(Some(item)) => ready!(this.try_start_send(cx, item))?,
+ Poll::Ready(None) => {
+ ready!(Pin::new(&mut this.sink).poll_flush(cx))?;
+ return Poll::Ready(Ok(()));
+ }
+ Poll::Pending => {
+ ready!(Pin::new(&mut this.sink).poll_flush(cx))?;
+ return Poll::Pending;
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/sink/unfold.rs b/vendor/futures-util/src/sink/unfold.rs
new file mode 100644
index 000000000..330a068c3
--- /dev/null
+++ b/vendor/futures-util/src/sink/unfold.rs
@@ -0,0 +1,86 @@
+use super::assert_sink;
+use crate::unfold_state::UnfoldState;
+use core::{future::Future, pin::Pin};
+use futures_core::ready;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`unfold`] function.
+ #[derive(Debug)]
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct Unfold<T, F, R> {
+ function: F,
+ #[pin]
+ state: UnfoldState<T, R>,
+ }
+}
+
+/// Create a sink from a function which processes one item at a time.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::sink::{self, SinkExt};
+///
+/// let unfold = sink::unfold(0, |mut sum, i: i32| {
+/// async move {
+/// sum += i;
+/// eprintln!("{}", i);
+/// Ok::<_, futures::never::Never>(sum)
+/// }
+/// });
+/// futures::pin_mut!(unfold);
+/// unfold.send(5).await?;
+/// # Ok::<(), futures::never::Never>(()) }).unwrap();
+/// ```
+pub fn unfold<T, F, R, Item, E>(init: T, function: F) -> Unfold<T, F, R>
+where
+ F: FnMut(T, Item) -> R,
+ R: Future<Output = Result<T, E>>,
+{
+ assert_sink::<Item, E, _>(Unfold { function, state: UnfoldState::Value { value: init } })
+}
+
+impl<T, F, R, Item, E> Sink<Item> for Unfold<T, F, R>
+where
+ F: FnMut(T, Item) -> R,
+ R: Future<Output = Result<T, E>>,
+{
+ type Error = E;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.poll_flush(cx)
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ let mut this = self.project();
+ let future = match this.state.as_mut().take_value() {
+ Some(value) => (this.function)(value, item),
+ None => panic!("start_send called without poll_ready being called first"),
+ };
+ this.state.set(UnfoldState::Future { future });
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ let mut this = self.project();
+ Poll::Ready(if let Some(future) = this.state.as_mut().project_future() {
+ match ready!(future.poll(cx)) {
+ Ok(state) => {
+ this.state.set(UnfoldState::Value { value: state });
+ Ok(())
+ }
+ Err(err) => Err(err),
+ }
+ } else {
+ Ok(())
+ })
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.poll_flush(cx)
+ }
+}
diff --git a/vendor/futures-util/src/sink/with.rs b/vendor/futures-util/src/sink/with.rs
new file mode 100644
index 000000000..86d3dcc7b
--- /dev/null
+++ b/vendor/futures-util/src/sink/with.rs
@@ -0,0 +1,134 @@
+use core::fmt;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`with`](super::SinkExt::with) method.
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct With<Si, Item, U, Fut, F> {
+ #[pin]
+ sink: Si,
+ f: F,
+ #[pin]
+ state: Option<Fut>,
+ _phantom: PhantomData<fn(U) -> Item>,
+ }
+}
+
+impl<Si, Item, U, Fut, F> fmt::Debug for With<Si, Item, U, Fut, F>
+where
+ Si: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("With").field("sink", &self.sink).field("state", &self.state).finish()
+ }
+}
+
+impl<Si, Item, U, Fut, F> With<Si, Item, U, Fut, F>
+where
+ Si: Sink<Item>,
+ F: FnMut(U) -> Fut,
+ Fut: Future,
+{
+ pub(super) fn new<E>(sink: Si, f: F) -> Self
+ where
+ Fut: Future<Output = Result<Item, E>>,
+ E: From<Si::Error>,
+ {
+ Self { state: None, sink, f, _phantom: PhantomData }
+ }
+}
+
+impl<Si, Item, U, Fut, F> Clone for With<Si, Item, U, Fut, F>
+where
+ Si: Clone,
+ F: Clone,
+ Fut: Clone,
+{
+ fn clone(&self) -> Self {
+ Self {
+ state: self.state.clone(),
+ sink: self.sink.clone(),
+ f: self.f.clone(),
+ _phantom: PhantomData,
+ }
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, Item, U, Fut, F> Stream for With<S, Item, U, Fut, F>
+where
+ S: Stream + Sink<Item>,
+ F: FnMut(U) -> Fut,
+ Fut: Future,
+{
+ type Item = S::Item;
+
+ delegate_stream!(sink);
+}
+
+impl<Si, Item, U, Fut, F, E> With<Si, Item, U, Fut, F>
+where
+ Si: Sink<Item>,
+ F: FnMut(U) -> Fut,
+ Fut: Future<Output = Result<Item, E>>,
+ E: From<Si::Error>,
+{
+ delegate_access_inner!(sink, Si, ());
+
+ /// Completes the processing of previous item if any.
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
+ let mut this = self.project();
+
+ let item = match this.state.as_mut().as_pin_mut() {
+ None => return Poll::Ready(Ok(())),
+ Some(fut) => ready!(fut.poll(cx))?,
+ };
+ this.state.set(None);
+ this.sink.start_send(item)?;
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<Si, Item, U, Fut, F, E> Sink<U> for With<Si, Item, U, Fut, F>
+where
+ Si: Sink<Item>,
+ F: FnMut(U) -> Fut,
+ Fut: Future<Output = Result<Item, E>>,
+ E: From<Si::Error>,
+{
+ type Error = E;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().poll(cx))?;
+ ready!(self.project().sink.poll_ready(cx)?);
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: U) -> Result<(), Self::Error> {
+ let mut this = self.project();
+
+ assert!(this.state.is_none());
+ this.state.set(Some((this.f)(item)));
+ Ok(())
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().poll(cx))?;
+ ready!(self.project().sink.poll_flush(cx)?);
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().poll(cx))?;
+ ready!(self.project().sink.poll_close(cx)?);
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/sink/with_flat_map.rs b/vendor/futures-util/src/sink/with_flat_map.rs
new file mode 100644
index 000000000..2ae877a24
--- /dev/null
+++ b/vendor/futures-util/src/sink/with_flat_map.rs
@@ -0,0 +1,127 @@
+use core::fmt;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Sink for the [`with_flat_map`](super::SinkExt::with_flat_map) method.
+ #[must_use = "sinks do nothing unless polled"]
+ pub struct WithFlatMap<Si, Item, U, St, F> {
+ #[pin]
+ sink: Si,
+ f: F,
+ #[pin]
+ stream: Option<St>,
+ buffer: Option<Item>,
+ _marker: PhantomData<fn(U)>,
+ }
+}
+
+impl<Si, Item, U, St, F> fmt::Debug for WithFlatMap<Si, Item, U, St, F>
+where
+ Si: fmt::Debug,
+ St: fmt::Debug,
+ Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("WithFlatMap")
+ .field("sink", &self.sink)
+ .field("stream", &self.stream)
+ .field("buffer", &self.buffer)
+ .finish()
+ }
+}
+
+impl<Si, Item, U, St, F> WithFlatMap<Si, Item, U, St, F>
+where
+ Si: Sink<Item>,
+ F: FnMut(U) -> St,
+ St: Stream<Item = Result<Item, Si::Error>>,
+{
+ pub(super) fn new(sink: Si, f: F) -> Self {
+ Self { sink, f, stream: None, buffer: None, _marker: PhantomData }
+ }
+
+ delegate_access_inner!(sink, Si, ());
+
+ fn try_empty_stream(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Si::Error>> {
+ let mut this = self.project();
+
+ if this.buffer.is_some() {
+ ready!(this.sink.as_mut().poll_ready(cx))?;
+ let item = this.buffer.take().unwrap();
+ this.sink.as_mut().start_send(item)?;
+ }
+ if let Some(mut some_stream) = this.stream.as_mut().as_pin_mut() {
+ while let Some(item) = ready!(some_stream.as_mut().poll_next(cx)?) {
+ match this.sink.as_mut().poll_ready(cx)? {
+ Poll::Ready(()) => this.sink.as_mut().start_send(item)?,
+ Poll::Pending => {
+ *this.buffer = Some(item);
+ return Poll::Pending;
+ }
+ };
+ }
+ }
+ this.stream.set(None);
+ Poll::Ready(Ok(()))
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, Item, U, St, F> Stream for WithFlatMap<S, Item, U, St, F>
+where
+ S: Stream + Sink<Item>,
+ F: FnMut(U) -> St,
+ St: Stream<Item = Result<Item, S::Error>>,
+{
+ type Item = S::Item;
+
+ delegate_stream!(sink);
+}
+
+impl<S, Item, U, St, F> FusedStream for WithFlatMap<S, Item, U, St, F>
+where
+ S: FusedStream + Sink<Item>,
+ F: FnMut(U) -> St,
+ St: Stream<Item = Result<Item, S::Error>>,
+{
+ fn is_terminated(&self) -> bool {
+ self.sink.is_terminated()
+ }
+}
+
+impl<Si, Item, U, St, F> Sink<U> for WithFlatMap<Si, Item, U, St, F>
+where
+ Si: Sink<Item>,
+ F: FnMut(U) -> St,
+ St: Stream<Item = Result<Item, Si::Error>>,
+{
+ type Error = Si::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.try_empty_stream(cx)
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: U) -> Result<(), Self::Error> {
+ let mut this = self.project();
+
+ assert!(this.stream.is_none());
+ this.stream.set(Some((this.f)(item)));
+ Ok(())
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().try_empty_stream(cx)?);
+ self.project().sink.poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ ready!(self.as_mut().try_empty_stream(cx)?);
+ self.project().sink.poll_close(cx)
+ }
+}
diff --git a/vendor/futures-util/src/stream/abortable.rs b/vendor/futures-util/src/stream/abortable.rs
new file mode 100644
index 000000000..1fea89582
--- /dev/null
+++ b/vendor/futures-util/src/stream/abortable.rs
@@ -0,0 +1,19 @@
+use super::assert_stream;
+use crate::stream::{AbortHandle, Abortable};
+use crate::Stream;
+
+/// Creates a new `Abortable` stream and an `AbortHandle` which can be used to stop it.
+///
+/// This function is a convenient (but less flexible) alternative to calling
+/// `AbortHandle::new` and `Abortable::new` manually.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+pub fn abortable<St>(stream: St) -> (Abortable<St>, AbortHandle)
+where
+ St: Stream,
+{
+ let (handle, reg) = AbortHandle::new_pair();
+ let abortable = assert_stream::<St::Item, _>(Abortable::new(stream, reg));
+ (abortable, handle)
+}
diff --git a/vendor/futures-util/src/stream/empty.rs b/vendor/futures-util/src/stream/empty.rs
new file mode 100644
index 000000000..e4fd87326
--- /dev/null
+++ b/vendor/futures-util/src/stream/empty.rs
@@ -0,0 +1,45 @@
+use super::assert_stream;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+/// Stream for the [`empty`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Empty<T> {
+ _phantom: PhantomData<T>,
+}
+
+/// Creates a stream which contains no elements.
+///
+/// The returned stream will always return `Ready(None)` when polled.
+pub fn empty<T>() -> Empty<T> {
+ assert_stream::<T, _>(Empty { _phantom: PhantomData })
+}
+
+impl<T> Unpin for Empty<T> {}
+
+impl<T> FusedStream for Empty<T> {
+ fn is_terminated(&self) -> bool {
+ true
+ }
+}
+
+impl<T> Stream for Empty<T> {
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(None)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
+
+impl<T> Clone for Empty<T> {
+ fn clone(&self) -> Self {
+ empty()
+ }
+}
diff --git a/vendor/futures-util/src/stream/futures_ordered.rs b/vendor/futures-util/src/stream/futures_ordered.rs
new file mode 100644
index 000000000..f596b3b0e
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_ordered.rs
@@ -0,0 +1,220 @@
+use crate::stream::{FuturesUnordered, StreamExt};
+use alloc::collections::binary_heap::{BinaryHeap, PeekMut};
+use core::cmp::Ordering;
+use core::fmt::{self, Debug};
+use core::iter::FromIterator;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::{
+ task::{Context, Poll},
+ FusedStream,
+};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ #[derive(Debug)]
+ struct OrderWrapper<T> {
+ #[pin]
+ data: T, // A future or a future's output
+ index: usize,
+ }
+}
+
+impl<T> PartialEq for OrderWrapper<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.index == other.index
+ }
+}
+
+impl<T> Eq for OrderWrapper<T> {}
+
+impl<T> PartialOrd for OrderWrapper<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T> Ord for OrderWrapper<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ // BinaryHeap is a max heap, so compare backwards here.
+ other.index.cmp(&self.index)
+ }
+}
+
+impl<T> Future for OrderWrapper<T>
+where
+ T: Future,
+{
+ type Output = OrderWrapper<T::Output>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let index = self.index;
+ self.project().data.poll(cx).map(|output| OrderWrapper { data: output, index })
+ }
+}
+
+/// An unbounded queue of futures.
+///
+/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order
+/// on top of the set of futures. While futures in the set will race to
+/// completion in parallel, results will only be returned in the order their
+/// originating futures were added to the queue.
+///
+/// Futures are pushed into this queue and their realized values are yielded in
+/// order. This structure is optimized to manage a large number of futures.
+/// Futures managed by `FuturesOrdered` will only be polled when they generate
+/// notifications. This reduces the required amount of work needed to coordinate
+/// large numbers of futures.
+///
+/// When a `FuturesOrdered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Poll::Ready(None))` to be
+/// returned. Futures are submitted to the queue using `push`; however, the
+/// future will **not** be polled at this point. `FuturesOrdered` will only
+/// poll managed futures when `FuturesOrdered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesOrdered::poll` returns `Poll::Ready(None)` this means that
+/// the queue is currently not managing any futures. A future may be submitted
+/// to the queue at a later time. At that point, a call to
+/// `FuturesOrdered::poll` will either return the future's resolved value
+/// **or** `Poll::Pending` if the future has not yet completed. When
+/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will
+/// return `Poll::Pending` until the first future completes, even if
+/// some of the later futures have already completed.
+///
+/// Note that you can create a ready-made `FuturesOrdered` via the
+/// [`collect`](Iterator::collect) method, or you can start with an empty queue
+/// with the `FuturesOrdered::new` constructor.
+///
+/// This type is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesOrdered<T: Future> {
+ in_progress_queue: FuturesUnordered<OrderWrapper<T>>,
+ queued_outputs: BinaryHeap<OrderWrapper<T::Output>>,
+ next_incoming_index: usize,
+ next_outgoing_index: usize,
+}
+
+impl<T: Future> Unpin for FuturesOrdered<T> {}
+
+impl<Fut: Future> FuturesOrdered<Fut> {
+ /// Constructs a new, empty `FuturesOrdered`
+ ///
+ /// The returned `FuturesOrdered` does not contain any futures and, in this
+ /// state, `FuturesOrdered::poll_next` will return `Poll::Ready(None)`.
+ pub fn new() -> Self {
+ Self {
+ in_progress_queue: FuturesUnordered::new(),
+ queued_outputs: BinaryHeap::new(),
+ next_incoming_index: 0,
+ next_outgoing_index: 0,
+ }
+ }
+
+ /// Returns the number of futures contained in the queue.
+ ///
+ /// This represents the total number of in-flight futures, both
+ /// those currently processing and those that have completed but
+ /// which are waiting for earlier futures to complete.
+ pub fn len(&self) -> usize {
+ self.in_progress_queue.len() + self.queued_outputs.len()
+ }
+
+ /// Returns `true` if the queue contains no futures
+ pub fn is_empty(&self) -> bool {
+ self.in_progress_queue.is_empty() && self.queued_outputs.is_empty()
+ }
+
+ /// Push a future into the queue.
+ ///
+ /// This function submits the given future to the internal set for managing.
+ /// This function will not call `poll` on the submitted future. The caller
+ /// must ensure that `FuturesOrdered::poll` is called in order to receive
+ /// task notifications.
+ pub fn push(&mut self, future: Fut) {
+ let wrapped = OrderWrapper { data: future, index: self.next_incoming_index };
+ self.next_incoming_index += 1;
+ self.in_progress_queue.push(wrapped);
+ }
+}
+
+impl<Fut: Future> Default for FuturesOrdered<Fut> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<Fut: Future> Stream for FuturesOrdered<Fut> {
+ type Item = Fut::Output;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = &mut *self;
+
+ // Check to see if we've already received the next value
+ if let Some(next_output) = this.queued_outputs.peek_mut() {
+ if next_output.index == this.next_outgoing_index {
+ this.next_outgoing_index += 1;
+ return Poll::Ready(Some(PeekMut::pop(next_output).data));
+ }
+ }
+
+ loop {
+ match ready!(this.in_progress_queue.poll_next_unpin(cx)) {
+ Some(output) => {
+ if output.index == this.next_outgoing_index {
+ this.next_outgoing_index += 1;
+ return Poll::Ready(Some(output.data));
+ } else {
+ this.queued_outputs.push(output)
+ }
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<Fut: Future> Debug for FuturesOrdered<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "FuturesOrdered {{ ... }}")
+ }
+}
+
+impl<Fut: Future> FromIterator<Fut> for FuturesOrdered<Fut> {
+ fn from_iter<T>(iter: T) -> Self
+ where
+ T: IntoIterator<Item = Fut>,
+ {
+ let acc = Self::new();
+ iter.into_iter().fold(acc, |mut acc, item| {
+ acc.push(item);
+ acc
+ })
+ }
+}
+
+impl<Fut: Future> FusedStream for FuturesOrdered<Fut> {
+ fn is_terminated(&self) -> bool {
+ self.in_progress_queue.is_terminated() && self.queued_outputs.is_empty()
+ }
+}
+
+impl<Fut: Future> Extend<Fut> for FuturesOrdered<Fut> {
+ fn extend<I>(&mut self, iter: I)
+ where
+ I: IntoIterator<Item = Fut>,
+ {
+ for item in iter {
+ self.push(item);
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/futures_unordered/abort.rs b/vendor/futures-util/src/stream/futures_unordered/abort.rs
new file mode 100644
index 000000000..1a42d2436
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_unordered/abort.rs
@@ -0,0 +1,12 @@
+pub(super) fn abort(s: &str) -> ! {
+ struct DoublePanic;
+
+ impl Drop for DoublePanic {
+ fn drop(&mut self) {
+ panic!("panicking twice to abort the program");
+ }
+ }
+
+ let _bomb = DoublePanic;
+ panic!("{}", s);
+}
diff --git a/vendor/futures-util/src/stream/futures_unordered/iter.rs b/vendor/futures-util/src/stream/futures_unordered/iter.rs
new file mode 100644
index 000000000..04db5ee75
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_unordered/iter.rs
@@ -0,0 +1,168 @@
+use super::task::Task;
+use super::FuturesUnordered;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use core::sync::atomic::Ordering::Relaxed;
+
+/// Mutable iterator over all futures in the unordered set.
+#[derive(Debug)]
+pub struct IterPinMut<'a, Fut> {
+ pub(super) task: *const Task<Fut>,
+ pub(super) len: usize,
+ pub(super) _marker: PhantomData<&'a mut FuturesUnordered<Fut>>,
+}
+
+/// Mutable iterator over all futures in the unordered set.
+#[derive(Debug)]
+pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>);
+
+/// Immutable iterator over all futures in the unordered set.
+#[derive(Debug)]
+pub struct IterPinRef<'a, Fut> {
+ pub(super) task: *const Task<Fut>,
+ pub(super) len: usize,
+ pub(super) pending_next_all: *mut Task<Fut>,
+ pub(super) _marker: PhantomData<&'a FuturesUnordered<Fut>>,
+}
+
+/// Immutable iterator over all the futures in the unordered set.
+#[derive(Debug)]
+pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>);
+
+/// Owned iterator over all futures in the unordered set.
+#[derive(Debug)]
+pub struct IntoIter<Fut: Unpin> {
+ pub(super) len: usize,
+ pub(super) inner: FuturesUnordered<Fut>,
+}
+
+impl<Fut: Unpin> Iterator for IntoIter<Fut> {
+ type Item = Fut;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // `head_all` can be accessed directly and we don't need to spin on
+ // `Task::next_all` since we have exclusive access to the set.
+ let task = self.inner.head_all.get_mut();
+
+ if (*task).is_null() {
+ return None;
+ }
+
+ unsafe {
+ // Moving out of the future is safe because it is `Unpin`
+ let future = (*(**task).future.get()).take().unwrap();
+
+ // Mutable access to a previously shared `FuturesUnordered` implies
+ // that the other threads already released the object before the
+ // current thread acquired it, so relaxed ordering can be used and
+ // valid `next_all` checks can be skipped.
+ let next = (**task).next_all.load(Relaxed);
+ *task = next;
+ self.len -= 1;
+ Some(future)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+}
+
+impl<Fut: Unpin> ExactSizeIterator for IntoIter<Fut> {}
+
+impl<'a, Fut> Iterator for IterPinMut<'a, Fut> {
+ type Item = Pin<&'a mut Fut>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.task.is_null() {
+ return None;
+ }
+
+ unsafe {
+ let future = (*(*self.task).future.get()).as_mut().unwrap();
+
+ // Mutable access to a previously shared `FuturesUnordered` implies
+ // that the other threads already released the object before the
+ // current thread acquired it, so relaxed ordering can be used and
+ // valid `next_all` checks can be skipped.
+ let next = (*self.task).next_all.load(Relaxed);
+ self.task = next;
+ self.len -= 1;
+ Some(Pin::new_unchecked(future))
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+}
+
+impl<Fut> ExactSizeIterator for IterPinMut<'_, Fut> {}
+
+impl<'a, Fut: Unpin> Iterator for IterMut<'a, Fut> {
+ type Item = &'a mut Fut;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next().map(Pin::get_mut)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl<Fut: Unpin> ExactSizeIterator for IterMut<'_, Fut> {}
+
+impl<'a, Fut> Iterator for IterPinRef<'a, Fut> {
+ type Item = Pin<&'a Fut>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.task.is_null() {
+ return None;
+ }
+
+ unsafe {
+ let future = (*(*self.task).future.get()).as_ref().unwrap();
+
+ // Relaxed ordering can be used since acquire ordering when
+ // `head_all` was initially read for this iterator implies acquire
+ // ordering for all previously inserted nodes (and we don't need to
+ // read `len_all` again for any other nodes).
+ let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed);
+ self.task = next;
+ self.len -= 1;
+ Some(Pin::new_unchecked(future))
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+}
+
+impl<Fut> ExactSizeIterator for IterPinRef<'_, Fut> {}
+
+impl<'a, Fut: Unpin> Iterator for Iter<'a, Fut> {
+ type Item = &'a Fut;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next().map(Pin::get_ref)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl<Fut: Unpin> ExactSizeIterator for Iter<'_, Fut> {}
+
+// SAFETY: we do nothing thread-local and there is no interior mutability,
+// so the usual structural `Send`/`Sync` apply.
+unsafe impl<Fut: Send> Send for IterPinRef<'_, Fut> {}
+unsafe impl<Fut: Sync> Sync for IterPinRef<'_, Fut> {}
+
+unsafe impl<Fut: Send> Send for IterPinMut<'_, Fut> {}
+unsafe impl<Fut: Sync> Sync for IterPinMut<'_, Fut> {}
+
+unsafe impl<Fut: Send + Unpin> Send for IntoIter<Fut> {}
+unsafe impl<Fut: Sync + Unpin> Sync for IntoIter<Fut> {}
diff --git a/vendor/futures-util/src/stream/futures_unordered/mod.rs b/vendor/futures-util/src/stream/futures_unordered/mod.rs
new file mode 100644
index 000000000..aab2bb446
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_unordered/mod.rs
@@ -0,0 +1,674 @@
+//! An unbounded set of futures.
+//!
+//! This module is only available when the `std` or `alloc` feature of this
+//! library is activated, and it is activated by default.
+
+use crate::task::AtomicWaker;
+use alloc::sync::{Arc, Weak};
+use core::cell::UnsafeCell;
+use core::cmp;
+use core::fmt::{self, Debug};
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem;
+use core::pin::Pin;
+use core::ptr;
+use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
+use core::sync::atomic::{AtomicBool, AtomicPtr};
+use futures_core::future::Future;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError};
+
+mod abort;
+
+mod iter;
+pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef};
+
+mod task;
+use self::task::Task;
+
+mod ready_to_run_queue;
+use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue};
+
+/// Constant used for a `FuturesUnordered` to determine how many times it is
+/// allowed to poll underlying futures without yielding.
+///
+/// A single call to `poll_next` may potentially do a lot of work before
+/// yielding. This happens in particular if the underlying futures are awoken
+/// frequently but continue to return `Pending`. This is problematic if other
+/// tasks are waiting on the executor, since they do not get to run. This value
+/// caps the number of calls to `poll` on underlying futures a single call to
+/// `poll_next` is allowed to make.
+///
+/// The value itself is chosen somewhat arbitrarily. It needs to be high enough
+/// that amortize wakeup and scheduling costs, but low enough that we do not
+/// starve other tasks for long.
+///
+/// See also https://github.com/rust-lang/futures-rs/issues/2047.
+///
+/// Note that using the length of the `FuturesUnordered` instead of this value
+/// may cause problems if the number of futures is large.
+/// See also https://github.com/rust-lang/futures-rs/pull/2527.
+///
+/// Additionally, polling the same future twice per iteration may cause another
+/// problem. So, when using this value, it is necessary to limit the max value
+/// based on the length of the `FuturesUnordered`.
+/// (e.g., `cmp::min(self.len(), YIELD_EVERY)`)
+/// See also https://github.com/rust-lang/futures-rs/pull/2333.
+const YIELD_EVERY: usize = 32;
+
+/// A set of futures which may complete in any order.
+///
+/// This structure is optimized to manage a large number of futures.
+/// Futures managed by [`FuturesUnordered`] will only be polled when they
+/// generate wake-up notifications. This reduces the required amount of work
+/// needed to poll large numbers of futures.
+///
+/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an
+/// iterator of futures into a [`FuturesUnordered`], or by
+/// [`push`](FuturesUnordered::push)ing futures onto an existing
+/// [`FuturesUnordered`]. When new futures are added,
+/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
+/// wake-ups for new futures.
+///
+/// Note that you can create a ready-made [`FuturesUnordered`] via the
+/// [`collect`](Iterator::collect) method, or you can start with an empty set
+/// with the [`FuturesUnordered::new`] constructor.
+///
+/// This type is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesUnordered<Fut> {
+ ready_to_run_queue: Arc<ReadyToRunQueue<Fut>>,
+ head_all: AtomicPtr<Task<Fut>>,
+ is_terminated: AtomicBool,
+}
+
+unsafe impl<Fut: Send> Send for FuturesUnordered<Fut> {}
+unsafe impl<Fut: Sync> Sync for FuturesUnordered<Fut> {}
+impl<Fut> Unpin for FuturesUnordered<Fut> {}
+
+impl Spawn for FuturesUnordered<FutureObj<'_, ()>> {
+ fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> {
+ self.push(future_obj);
+ Ok(())
+ }
+}
+
+impl LocalSpawn for FuturesUnordered<LocalFutureObj<'_, ()>> {
+ fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
+ self.push(future_obj);
+ Ok(())
+ }
+}
+
+// FuturesUnordered is implemented using two linked lists. One which links all
+// futures managed by a `FuturesUnordered` and one that tracks futures that have
+// been scheduled for polling. The first linked list allows for thread safe
+// insertion of nodes at the head as well as forward iteration, but is otherwise
+// not thread safe and is only accessed by the thread that owns the
+// `FuturesUnordered` value for any other operations. The second linked list is
+// an implementation of the intrusive MPSC queue algorithm described by
+// 1024cores.net.
+//
+// When a future is submitted to the set, a task is allocated and inserted in
+// both linked lists. The next call to `poll_next` will (eventually) see this
+// task and call `poll` on the future.
+//
+// Before a managed future is polled, the current context's waker is replaced
+// with one that is aware of the specific future being run. This ensures that
+// wake-up notifications generated by that specific future are visible to
+// `FuturesUnordered`. When a wake-up notification is received, the task is
+// inserted into the ready to run queue, so that its future can be polled later.
+//
+// Each task is wrapped in an `Arc` and thereby atomically reference counted.
+// Also, each task contains an `AtomicBool` which acts as a flag that indicates
+// whether the task is currently inserted in the atomic queue. When a wake-up
+// notification is received, the task will only be inserted into the ready to
+// run queue if it isn't inserted already.
+
+impl<Fut> Default for FuturesUnordered<Fut> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<Fut> FuturesUnordered<Fut> {
+ /// Constructs a new, empty [`FuturesUnordered`].
+ ///
+ /// The returned [`FuturesUnordered`] does not contain any futures.
+ /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will
+ /// return [`Poll::Ready(None)`](Poll::Ready).
+ pub fn new() -> Self {
+ let stub = Arc::new(Task {
+ future: UnsafeCell::new(None),
+ next_all: AtomicPtr::new(ptr::null_mut()),
+ prev_all: UnsafeCell::new(ptr::null()),
+ len_all: UnsafeCell::new(0),
+ next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
+ queued: AtomicBool::new(true),
+ ready_to_run_queue: Weak::new(),
+ });
+ let stub_ptr = &*stub as *const Task<Fut>;
+ let ready_to_run_queue = Arc::new(ReadyToRunQueue {
+ waker: AtomicWaker::new(),
+ head: AtomicPtr::new(stub_ptr as *mut _),
+ tail: UnsafeCell::new(stub_ptr),
+ stub,
+ });
+
+ Self {
+ head_all: AtomicPtr::new(ptr::null_mut()),
+ ready_to_run_queue,
+ is_terminated: AtomicBool::new(false),
+ }
+ }
+
+ /// Returns the number of futures contained in the set.
+ ///
+ /// This represents the total number of in-flight futures.
+ pub fn len(&self) -> usize {
+ let (_, len) = self.atomic_load_head_and_len_all();
+ len
+ }
+
+ /// Returns `true` if the set contains no futures.
+ pub fn is_empty(&self) -> bool {
+ // Relaxed ordering can be used here since we don't need to read from
+ // the head pointer, only check whether it is null.
+ self.head_all.load(Relaxed).is_null()
+ }
+
+ /// Push a future into the set.
+ ///
+ /// This method adds the given future to the set. This method will not
+ /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must
+ /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called
+ /// in order to receive wake-up notifications for the given future.
+ pub fn push(&self, future: Fut) {
+ let task = Arc::new(Task {
+ future: UnsafeCell::new(Some(future)),
+ next_all: AtomicPtr::new(self.pending_next_all()),
+ prev_all: UnsafeCell::new(ptr::null_mut()),
+ len_all: UnsafeCell::new(0),
+ next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
+ queued: AtomicBool::new(true),
+ ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
+ });
+
+ // Reset the `is_terminated` flag if we've previously marked ourselves
+ // as terminated.
+ self.is_terminated.store(false, Relaxed);
+
+ // Right now our task has a strong reference count of 1. We transfer
+ // ownership of this reference count to our internal linked list
+ // and we'll reclaim ownership through the `unlink` method below.
+ let ptr = self.link(task);
+
+ // We'll need to get the future "into the system" to start tracking it,
+ // e.g. getting its wake-up notifications going to us tracking which
+ // futures are ready. To do that we unconditionally enqueue it for
+ // polling here.
+ self.ready_to_run_queue.enqueue(ptr);
+ }
+
+ /// Returns an iterator that allows inspecting each future in the set.
+ pub fn iter(&self) -> Iter<'_, Fut>
+ where
+ Fut: Unpin,
+ {
+ Iter(Pin::new(self).iter_pin_ref())
+ }
+
+ /// Returns an iterator that allows inspecting each future in the set.
+ pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> {
+ let (task, len) = self.atomic_load_head_and_len_all();
+ let pending_next_all = self.pending_next_all();
+
+ IterPinRef { task, len, pending_next_all, _marker: PhantomData }
+ }
+
+ /// Returns an iterator that allows modifying each future in the set.
+ pub fn iter_mut(&mut self) -> IterMut<'_, Fut>
+ where
+ Fut: Unpin,
+ {
+ IterMut(Pin::new(self).iter_pin_mut())
+ }
+
+ /// Returns an iterator that allows modifying each future in the set.
+ pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> {
+ // `head_all` can be accessed directly and we don't need to spin on
+ // `Task::next_all` since we have exclusive access to the set.
+ let task = *self.head_all.get_mut();
+ let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
+
+ IterPinMut { task, len, _marker: PhantomData }
+ }
+
+ /// Returns the current head node and number of futures in the list of all
+ /// futures within a context where access is shared with other threads
+ /// (mostly for use with the `len` and `iter_pin_ref` methods).
+ fn atomic_load_head_and_len_all(&self) -> (*const Task<Fut>, usize) {
+ let task = self.head_all.load(Acquire);
+ let len = if task.is_null() {
+ 0
+ } else {
+ unsafe {
+ (*task).spin_next_all(self.pending_next_all(), Acquire);
+ *(*task).len_all.get()
+ }
+ };
+
+ (task, len)
+ }
+
+ /// Releases the task. It destroys the future inside and either drops
+ /// the `Arc<Task>` or transfers ownership to the ready to run queue.
+ /// The task this method is called on must have been unlinked before.
+ fn release_task(&mut self, task: Arc<Task<Fut>>) {
+ // `release_task` must only be called on unlinked tasks
+ debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
+ unsafe {
+ debug_assert!((*task.prev_all.get()).is_null());
+ }
+
+ // The future is done, try to reset the queued flag. This will prevent
+ // `wake` from doing any work in the future
+ let prev = task.queued.swap(true, SeqCst);
+
+ // Drop the future, even if it hasn't finished yet. This is safe
+ // because we're dropping the future on the thread that owns
+ // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and
+ // such.
+ unsafe {
+ // Set to `None` rather than `take()`ing to prevent moving the
+ // future.
+ *task.future.get() = None;
+ }
+
+ // If the queued flag was previously set, then it means that this task
+ // is still in our internal ready to run queue. We then transfer
+ // ownership of our reference count to the ready to run queue, and it'll
+ // come along and free it later, noticing that the future is `None`.
+ //
+ // If, however, the queued flag was *not* set then we're safe to
+ // release our reference count on the task. The queued flag was set
+ // above so all future `enqueue` operations will not actually
+ // enqueue the task, so our task will never see the ready to run queue
+ // again. The task itself will be deallocated once all reference counts
+ // have been dropped elsewhere by the various wakers that contain it.
+ if prev {
+ mem::forget(task);
+ }
+ }
+
+ /// Insert a new task into the internal linked list.
+ fn link(&self, task: Arc<Task<Fut>>) -> *const Task<Fut> {
+ // `next_all` should already be reset to the pending state before this
+ // function is called.
+ debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
+ let ptr = Arc::into_raw(task);
+
+ // Atomically swap out the old head node to get the node that should be
+ // assigned to `next_all`.
+ let next = self.head_all.swap(ptr as *mut _, AcqRel);
+
+ unsafe {
+ // Store the new list length in the new node.
+ let new_len = if next.is_null() {
+ 1
+ } else {
+ // Make sure `next_all` has been written to signal that it is
+ // safe to read `len_all`.
+ (*next).spin_next_all(self.pending_next_all(), Acquire);
+ *(*next).len_all.get() + 1
+ };
+ *(*ptr).len_all.get() = new_len;
+
+ // Write the old head as the next node pointer, signaling to other
+ // threads that `len_all` and `next_all` are ready to read.
+ (*ptr).next_all.store(next, Release);
+
+ // `prev_all` updates don't need to be synchronized, as the field is
+ // only ever used after exclusive access has been acquired.
+ if !next.is_null() {
+ *(*next).prev_all.get() = ptr;
+ }
+ }
+
+ ptr
+ }
+
+ /// Remove the task from the linked list tracking all tasks currently
+ /// managed by `FuturesUnordered`.
+ /// This method is unsafe because it has be guaranteed that `task` is a
+ /// valid pointer.
+ unsafe fn unlink(&mut self, task: *const Task<Fut>) -> Arc<Task<Fut>> {
+ // Compute the new list length now in case we're removing the head node
+ // and won't be able to retrieve the correct length later.
+ let head = *self.head_all.get_mut();
+ debug_assert!(!head.is_null());
+ let new_len = *(*head).len_all.get() - 1;
+
+ let task = Arc::from_raw(task);
+ let next = task.next_all.load(Relaxed);
+ let prev = *task.prev_all.get();
+ task.next_all.store(self.pending_next_all(), Relaxed);
+ *task.prev_all.get() = ptr::null_mut();
+
+ if !next.is_null() {
+ *(*next).prev_all.get() = prev;
+ }
+
+ if !prev.is_null() {
+ (*prev).next_all.store(next, Relaxed);
+ } else {
+ *self.head_all.get_mut() = next;
+ }
+
+ // Store the new list length in the head node.
+ let head = *self.head_all.get_mut();
+ if !head.is_null() {
+ *(*head).len_all.get() = new_len;
+ }
+
+ task
+ }
+
+ /// Returns the reserved value for `Task::next_all` to indicate a pending
+ /// assignment from the thread that inserted the task.
+ ///
+ /// `FuturesUnordered::link` needs to update `Task` pointers in an order
+ /// that ensures any iterators created on other threads can correctly
+ /// traverse the entire `Task` list using the chain of `next_all` pointers.
+ /// This could be solved with a compare-exchange loop that stores the
+ /// current `head_all` in `next_all` and swaps out `head_all` with the new
+ /// `Task` pointer if the head hasn't already changed. Under heavy thread
+ /// contention, this compare-exchange loop could become costly.
+ ///
+ /// An alternative is to initialize `next_all` to a reserved pending state
+ /// first, perform an atomic swap on `head_all`, and finally update
+ /// `next_all` with the old head node. Iterators will then either see the
+ /// pending state value or the correct next node pointer, and can reload
+ /// `next_all` as needed until the correct value is loaded. The number of
+ /// retries needed (if any) would be small and will always be finite, so
+ /// this should generally perform better than the compare-exchange loop.
+ ///
+ /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
+ /// this value, so it is safe to use as a reserved value until the correct
+ /// value can be written.
+ fn pending_next_all(&self) -> *mut Task<Fut> {
+ // The `ReadyToRunQueue` stub is never inserted into the `head_all`
+ // list, and its pointer value will remain valid for the lifetime of
+ // this `FuturesUnordered`, so we can make use of its value here.
+ &*self.ready_to_run_queue.stub as *const _ as *mut _
+ }
+}
+
+impl<Fut: Future> Stream for FuturesUnordered<Fut> {
+ type Item = Fut::Output;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ // See YIELD_EVERY docs for more.
+ let yield_every = cmp::min(self.len(), YIELD_EVERY);
+
+ // Keep track of how many child futures we have polled,
+ // in case we want to forcibly yield.
+ let mut polled = 0;
+
+ // Ensure `parent` is correctly set.
+ self.ready_to_run_queue.waker.register(cx.waker());
+
+ loop {
+ // Safety: &mut self guarantees the mutual exclusion `dequeue`
+ // expects
+ let task = match unsafe { self.ready_to_run_queue.dequeue() } {
+ Dequeue::Empty => {
+ if self.is_empty() {
+ // We can only consider ourselves terminated once we
+ // have yielded a `None`
+ *self.is_terminated.get_mut() = true;
+ return Poll::Ready(None);
+ } else {
+ return Poll::Pending;
+ }
+ }
+ Dequeue::Inconsistent => {
+ // At this point, it may be worth yielding the thread &
+ // spinning a few times... but for now, just yield using the
+ // task system.
+ cx.waker().wake_by_ref();
+ return Poll::Pending;
+ }
+ Dequeue::Data(task) => task,
+ };
+
+ debug_assert!(task != self.ready_to_run_queue.stub());
+
+ // Safety:
+ // - `task` is a valid pointer.
+ // - We are the only thread that accesses the `UnsafeCell` that
+ // contains the future
+ let future = match unsafe { &mut *(*task).future.get() } {
+ Some(future) => future,
+
+ // If the future has already gone away then we're just
+ // cleaning out this task. See the comment in
+ // `release_task` for more information, but we're basically
+ // just taking ownership of our reference count here.
+ None => {
+ // This case only happens when `release_task` was called
+ // for this task before and couldn't drop the task
+ // because it was already enqueued in the ready to run
+ // queue.
+
+ // Safety: `task` is a valid pointer
+ let task = unsafe { Arc::from_raw(task) };
+
+ // Double check that the call to `release_task` really
+ // happened. Calling it required the task to be unlinked.
+ debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
+ unsafe {
+ debug_assert!((*task.prev_all.get()).is_null());
+ }
+ continue;
+ }
+ };
+
+ // Safety: `task` is a valid pointer
+ let task = unsafe { self.unlink(task) };
+
+ // Unset queued flag: This must be done before polling to ensure
+ // that the future's task gets rescheduled if it sends a wake-up
+ // notification **during** the call to `poll`.
+ let prev = task.queued.swap(false, SeqCst);
+ assert!(prev);
+
+ // We're going to need to be very careful if the `poll`
+ // method below panics. We need to (a) not leak memory and
+ // (b) ensure that we still don't have any use-after-frees. To
+ // manage this we do a few things:
+ //
+ // * A "bomb" is created which if dropped abnormally will call
+ // `release_task`. That way we'll be sure the memory management
+ // of the `task` is managed correctly. In particular
+ // `release_task` will drop the future. This ensures that it is
+ // dropped on this thread and not accidentally on a different
+ // thread (bad).
+ // * We unlink the task from our internal queue to preemptively
+ // assume it'll panic, in which case we'll want to discard it
+ // regardless.
+ struct Bomb<'a, Fut> {
+ queue: &'a mut FuturesUnordered<Fut>,
+ task: Option<Arc<Task<Fut>>>,
+ }
+
+ impl<Fut> Drop for Bomb<'_, Fut> {
+ fn drop(&mut self) {
+ if let Some(task) = self.task.take() {
+ self.queue.release_task(task);
+ }
+ }
+ }
+
+ let mut bomb = Bomb { task: Some(task), queue: &mut *self };
+
+ // Poll the underlying future with the appropriate waker
+ // implementation. This is where a large bit of the unsafety
+ // starts to stem from internally. The waker is basically just
+ // our `Arc<Task<Fut>>` and can schedule the future for polling by
+ // enqueuing itself in the ready to run queue.
+ //
+ // Critically though `Task<Fut>` won't actually access `Fut`, the
+ // future, while it's floating around inside of wakers.
+ // These structs will basically just use `Fut` to size
+ // the internal allocation, appropriately accessing fields and
+ // deallocating the task if need be.
+ let res = {
+ let waker = Task::waker_ref(bomb.task.as_ref().unwrap());
+ let mut cx = Context::from_waker(&waker);
+
+ // Safety: We won't move the future ever again
+ let future = unsafe { Pin::new_unchecked(future) };
+
+ future.poll(&mut cx)
+ };
+ polled += 1;
+
+ match res {
+ Poll::Pending => {
+ let task = bomb.task.take().unwrap();
+ bomb.queue.link(task);
+
+ if polled == yield_every {
+ // We have polled a large number of futures in a row without yielding.
+ // To ensure we do not starve other tasks waiting on the executor,
+ // we yield here, but immediately wake ourselves up to continue.
+ cx.waker().wake_by_ref();
+ return Poll::Pending;
+ }
+ continue;
+ }
+ Poll::Ready(output) => return Poll::Ready(Some(output)),
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<Fut> Debug for FuturesUnordered<Fut> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "FuturesUnordered {{ ... }}")
+ }
+}
+
+impl<Fut> FuturesUnordered<Fut> {
+ /// Clears the set, removing all futures.
+ pub fn clear(&mut self) {
+ self.clear_head_all();
+
+ // we just cleared all the tasks, and we have &mut self, so this is safe.
+ unsafe { self.ready_to_run_queue.clear() };
+
+ self.is_terminated.store(false, Relaxed);
+ }
+
+ fn clear_head_all(&mut self) {
+ while !self.head_all.get_mut().is_null() {
+ let head = *self.head_all.get_mut();
+ let task = unsafe { self.unlink(head) };
+ self.release_task(task);
+ }
+ }
+}
+
+impl<Fut> Drop for FuturesUnordered<Fut> {
+ fn drop(&mut self) {
+ // When a `FuturesUnordered` is dropped we want to drop all futures
+ // associated with it. At the same time though there may be tons of
+ // wakers flying around which contain `Task<Fut>` references
+ // inside them. We'll let those naturally get deallocated.
+ self.clear_head_all();
+
+ // Note that at this point we could still have a bunch of tasks in the
+ // ready to run queue. None of those tasks, however, have futures
+ // associated with them so they're safe to destroy on any thread. At
+ // this point the `FuturesUnordered` struct, the owner of the one strong
+ // reference to the ready to run queue will drop the strong reference.
+ // At that point whichever thread releases the strong refcount last (be
+ // it this thread or some other thread as part of an `upgrade`) will
+ // clear out the ready to run queue and free all remaining tasks.
+ //
+ // While that freeing operation isn't guaranteed to happen here, it's
+ // guaranteed to happen "promptly" as no more "blocking work" will
+ // happen while there's a strong refcount held.
+ }
+}
+
+impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered<Fut> {
+ type Item = &'a Fut;
+ type IntoIter = Iter<'a, Fut>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered<Fut> {
+ type Item = &'a mut Fut;
+ type IntoIter = IterMut<'a, Fut>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<Fut: Unpin> IntoIterator for FuturesUnordered<Fut> {
+ type Item = Fut;
+ type IntoIter = IntoIter<Fut>;
+
+ fn into_iter(mut self) -> Self::IntoIter {
+ // `head_all` can be accessed directly and we don't need to spin on
+ // `Task::next_all` since we have exclusive access to the set.
+ let task = *self.head_all.get_mut();
+ let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
+
+ IntoIter { len, inner: self }
+ }
+}
+
+impl<Fut> FromIterator<Fut> for FuturesUnordered<Fut> {
+ fn from_iter<I>(iter: I) -> Self
+ where
+ I: IntoIterator<Item = Fut>,
+ {
+ let acc = Self::new();
+ iter.into_iter().fold(acc, |acc, item| {
+ acc.push(item);
+ acc
+ })
+ }
+}
+
+impl<Fut: Future> FusedStream for FuturesUnordered<Fut> {
+ fn is_terminated(&self) -> bool {
+ self.is_terminated.load(Relaxed)
+ }
+}
+
+impl<Fut> Extend<Fut> for FuturesUnordered<Fut> {
+ fn extend<I>(&mut self, iter: I)
+ where
+ I: IntoIterator<Item = Fut>,
+ {
+ for item in iter {
+ self.push(item);
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs b/vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs
new file mode 100644
index 000000000..5ef6cde83
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs
@@ -0,0 +1,122 @@
+use crate::task::AtomicWaker;
+use alloc::sync::Arc;
+use core::cell::UnsafeCell;
+use core::ptr;
+use core::sync::atomic::AtomicPtr;
+use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
+
+use super::abort::abort;
+use super::task::Task;
+
+pub(super) enum Dequeue<Fut> {
+ Data(*const Task<Fut>),
+ Empty,
+ Inconsistent,
+}
+
+pub(super) struct ReadyToRunQueue<Fut> {
+ // The waker of the task using `FuturesUnordered`.
+ pub(super) waker: AtomicWaker,
+
+ // Head/tail of the readiness queue
+ pub(super) head: AtomicPtr<Task<Fut>>,
+ pub(super) tail: UnsafeCell<*const Task<Fut>>,
+ pub(super) stub: Arc<Task<Fut>>,
+}
+
+/// An MPSC queue into which the tasks containing the futures are inserted
+/// whenever the future inside is scheduled for polling.
+impl<Fut> ReadyToRunQueue<Fut> {
+ /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
+ pub(super) fn enqueue(&self, task: *const Task<Fut>) {
+ unsafe {
+ debug_assert!((*task).queued.load(Relaxed));
+
+ // This action does not require any coordination
+ (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed);
+
+ // Note that these atomic orderings come from 1024cores
+ let task = task as *mut _;
+ let prev = self.head.swap(task, AcqRel);
+ (*prev).next_ready_to_run.store(task, Release);
+ }
+ }
+
+ /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
+ ///
+ /// Note that this is unsafe as it required mutual exclusion (only one
+ /// thread can call this) to be guaranteed elsewhere.
+ pub(super) unsafe fn dequeue(&self) -> Dequeue<Fut> {
+ let mut tail = *self.tail.get();
+ let mut next = (*tail).next_ready_to_run.load(Acquire);
+
+ if tail == self.stub() {
+ if next.is_null() {
+ return Dequeue::Empty;
+ }
+
+ *self.tail.get() = next;
+ tail = next;
+ next = (*next).next_ready_to_run.load(Acquire);
+ }
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ debug_assert!(tail != self.stub());
+ return Dequeue::Data(tail);
+ }
+
+ if self.head.load(Acquire) as *const _ != tail {
+ return Dequeue::Inconsistent;
+ }
+
+ self.enqueue(self.stub());
+
+ next = (*tail).next_ready_to_run.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ Dequeue::Inconsistent
+ }
+
+ pub(super) fn stub(&self) -> *const Task<Fut> {
+ &*self.stub
+ }
+
+ // Clear the queue of tasks.
+ //
+ // Note that each task has a strong reference count associated with it
+ // which is owned by the ready to run queue. This method just pulls out
+ // tasks and drops their refcounts.
+ //
+ // # Safety
+ //
+ // - All tasks **must** have had their futures dropped already (by FuturesUnordered::clear)
+ // - The caller **must** guarantee unique access to `self`
+ pub(crate) unsafe fn clear(&self) {
+ loop {
+ // SAFETY: We have the guarantee of mutual exclusion required by `dequeue`.
+ match self.dequeue() {
+ Dequeue::Empty => break,
+ Dequeue::Inconsistent => abort("inconsistent in drop"),
+ Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)),
+ }
+ }
+ }
+}
+
+impl<Fut> Drop for ReadyToRunQueue<Fut> {
+ fn drop(&mut self) {
+ // Once we're in the destructor for `Inner<Fut>` we need to clear out
+ // the ready to run queue of tasks if there's anything left in there.
+
+ // All tasks have had their futures dropped already by the `FuturesUnordered`
+ // destructor above, and we have &mut self, so this is safe.
+ unsafe {
+ self.clear();
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/futures_unordered/task.rs b/vendor/futures-util/src/stream/futures_unordered/task.rs
new file mode 100644
index 000000000..da2cd67d9
--- /dev/null
+++ b/vendor/futures-util/src/stream/futures_unordered/task.rs
@@ -0,0 +1,118 @@
+use alloc::sync::{Arc, Weak};
+use core::cell::UnsafeCell;
+use core::sync::atomic::Ordering::{self, SeqCst};
+use core::sync::atomic::{AtomicBool, AtomicPtr};
+
+use super::abort::abort;
+use super::ReadyToRunQueue;
+use crate::task::{waker_ref, ArcWake, WakerRef};
+
+pub(super) struct Task<Fut> {
+ // The future
+ pub(super) future: UnsafeCell<Option<Fut>>,
+
+ // Next pointer for linked list tracking all active tasks (use
+ // `spin_next_all` to read when access is shared across threads)
+ pub(super) next_all: AtomicPtr<Task<Fut>>,
+
+ // Previous task in linked list tracking all active tasks
+ pub(super) prev_all: UnsafeCell<*const Task<Fut>>,
+
+ // Length of the linked list tracking all active tasks when this node was
+ // inserted (use `spin_next_all` to synchronize before reading when access
+ // is shared across threads)
+ pub(super) len_all: UnsafeCell<usize>,
+
+ // Next pointer in ready to run queue
+ pub(super) next_ready_to_run: AtomicPtr<Task<Fut>>,
+
+ // Queue that we'll be enqueued to when woken
+ pub(super) ready_to_run_queue: Weak<ReadyToRunQueue<Fut>>,
+
+ // Whether or not this task is currently in the ready to run queue
+ pub(super) queued: AtomicBool,
+}
+
+// `Task` can be sent across threads safely because it ensures that
+// the underlying `Fut` type isn't touched from any of its methods.
+//
+// The parent (`super`) module is trusted not to access `future`
+// across different threads.
+unsafe impl<Fut> Send for Task<Fut> {}
+unsafe impl<Fut> Sync for Task<Fut> {}
+
+impl<Fut> ArcWake for Task<Fut> {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ let inner = match arc_self.ready_to_run_queue.upgrade() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ // It's our job to enqueue this task it into the ready to run queue. To
+ // do this we set the `queued` flag, and if successful we then do the
+ // actual queueing operation, ensuring that we're only queued once.
+ //
+ // Once the task is inserted call `wake` to notify the parent task,
+ // as it'll want to come along and run our task later.
+ //
+ // Note that we don't change the reference count of the task here,
+ // we merely enqueue the raw pointer. The `FuturesUnordered`
+ // implementation guarantees that if we set the `queued` flag that
+ // there's a reference count held by the main `FuturesUnordered` queue
+ // still.
+ let prev = arc_self.queued.swap(true, SeqCst);
+ if !prev {
+ inner.enqueue(&**arc_self);
+ inner.waker.wake();
+ }
+ }
+}
+
+impl<Fut> Task<Fut> {
+ /// Returns a waker reference for this task without cloning the Arc.
+ pub(super) fn waker_ref(this: &Arc<Self>) -> WakerRef<'_> {
+ waker_ref(this)
+ }
+
+ /// Spins until `next_all` is no longer set to `pending_next_all`.
+ ///
+ /// The temporary `pending_next_all` value is typically overwritten fairly
+ /// quickly after a node is inserted into the list of all futures, so this
+ /// should rarely spin much.
+ ///
+ /// When it returns, the correct `next_all` value is returned.
+ ///
+ /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be
+ /// used before `len_all` can be safely read.
+ #[inline]
+ pub(super) fn spin_next_all(
+ &self,
+ pending_next_all: *mut Self,
+ ordering: Ordering,
+ ) -> *const Self {
+ loop {
+ let next = self.next_all.load(ordering);
+ if next != pending_next_all {
+ return next;
+ }
+ }
+ }
+}
+
+impl<Fut> Drop for Task<Fut> {
+ fn drop(&mut self) {
+ // Since `Task<Fut>` is sent across all threads for any lifetime,
+ // regardless of `Fut`, we, to guarantee memory safety, can't actually
+ // touch `Fut` at any time except when we have a reference to the
+ // `FuturesUnordered` itself .
+ //
+ // Consequently it *should* be the case that we always drop futures from
+ // the `FuturesUnordered` instance. This is a bomb, just in case there's
+ // a bug in that logic.
+ unsafe {
+ if (*self.future.get()).is_some() {
+ abort("future still here when dropping");
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/iter.rs b/vendor/futures-util/src/stream/iter.rs
new file mode 100644
index 000000000..20471c2ed
--- /dev/null
+++ b/vendor/futures-util/src/stream/iter.rs
@@ -0,0 +1,49 @@
+use super::assert_stream;
+use core::pin::Pin;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+
+/// Stream for the [`iter`] function.
+#[derive(Debug, Clone)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Iter<I> {
+ iter: I,
+}
+
+impl<I> Unpin for Iter<I> {}
+
+/// Converts an `Iterator` into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter
+/// simply always calls `iter.next()` and returns that.
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// let stream = stream::iter(vec![17, 19]);
+/// assert_eq!(vec![17, 19], stream.collect::<Vec<i32>>().await);
+/// # });
+/// ```
+pub fn iter<I>(i: I) -> Iter<I::IntoIter>
+where
+ I: IntoIterator,
+{
+ assert_stream::<I::Item, _>(Iter { iter: i.into_iter() })
+}
+
+impl<I> Stream for Iter<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<I::Item>> {
+ Poll::Ready(self.iter.next())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
diff --git a/vendor/futures-util/src/stream/mod.rs b/vendor/futures-util/src/stream/mod.rs
new file mode 100644
index 000000000..ec685b984
--- /dev/null
+++ b/vendor/futures-util/src/stream/mod.rs
@@ -0,0 +1,143 @@
+//! Asynchronous streams.
+//!
+//! This module contains:
+//!
+//! - The [`Stream`] trait, for objects that can asynchronously produce a
+//! sequence of values.
+//! - The [`StreamExt`] and [`TryStreamExt`] trait, which provides adapters for
+//! chaining and composing streams.
+//! - Top-level stream constructors like [`iter`](iter()) which creates a
+//! stream from an iterator.
+
+#[cfg(feature = "alloc")]
+pub use futures_core::stream::{BoxStream, LocalBoxStream};
+pub use futures_core::stream::{FusedStream, Stream, TryStream};
+
+// Extension traits and combinators
+
+#[allow(clippy::module_inception)]
+mod stream;
+pub use self::stream::{
+ Chain, Collect, Concat, Cycle, Enumerate, Filter, FilterMap, FlatMap, Flatten, Fold, ForEach,
+ Fuse, Inspect, Map, Next, NextIf, NextIfEq, Peek, PeekMut, Peekable, Scan, SelectNextSome,
+ Skip, SkipWhile, StreamExt, StreamFuture, Take, TakeUntil, TakeWhile, Then, Unzip, Zip,
+};
+
+#[cfg(feature = "std")]
+pub use self::stream::CatchUnwind;
+
+#[cfg(feature = "alloc")]
+pub use self::stream::Chunks;
+
+#[cfg(feature = "alloc")]
+pub use self::stream::ReadyChunks;
+
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub use self::stream::Forward;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use self::stream::{BufferUnordered, Buffered, ForEachConcurrent};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+#[cfg(feature = "alloc")]
+pub use self::stream::{ReuniteError, SplitSink, SplitStream};
+
+mod try_stream;
+pub use self::try_stream::{
+ try_unfold, AndThen, ErrInto, InspectErr, InspectOk, IntoStream, MapErr, MapOk, OrElse,
+ TryCollect, TryConcat, TryFilter, TryFilterMap, TryFlatten, TryFold, TryForEach, TryNext,
+ TrySkipWhile, TryStreamExt, TryTakeWhile, TryUnfold,
+};
+
+#[cfg(feature = "io")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
+#[cfg(feature = "std")]
+pub use self::try_stream::IntoAsyncRead;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use self::try_stream::{TryBufferUnordered, TryBuffered, TryForEachConcurrent};
+
+#[cfg(feature = "alloc")]
+pub use self::try_stream::{TryChunks, TryChunksError};
+
+// Primitive streams
+
+mod iter;
+pub use self::iter::{iter, Iter};
+
+mod repeat;
+pub use self::repeat::{repeat, Repeat};
+
+mod repeat_with;
+pub use self::repeat_with::{repeat_with, RepeatWith};
+
+mod empty;
+pub use self::empty::{empty, Empty};
+
+mod once;
+pub use self::once::{once, Once};
+
+mod pending;
+pub use self::pending::{pending, Pending};
+
+mod poll_fn;
+pub use self::poll_fn::{poll_fn, PollFn};
+
+mod poll_immediate;
+pub use self::poll_immediate::{poll_immediate, PollImmediate};
+
+mod select;
+pub use self::select::{select, Select};
+
+mod select_with_strategy;
+pub use self::select_with_strategy::{select_with_strategy, PollNext, SelectWithStrategy};
+
+mod unfold;
+pub use self::unfold::{unfold, Unfold};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod futures_ordered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use self::futures_ordered::FuturesOrdered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub mod futures_unordered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[doc(inline)]
+pub use self::futures_unordered::FuturesUnordered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub mod select_all;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[doc(inline)]
+pub use self::select_all::{select_all, SelectAll};
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod abortable;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use crate::abortable::{AbortHandle, AbortRegistration, Abortable, Aborted};
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use abortable::abortable;
+
+// Just a helper function to ensure the streams we're returning all have the
+// right implementations.
+pub(crate) fn assert_stream<T, S>(stream: S) -> S
+where
+ S: Stream<Item = T>,
+{
+ stream
+}
diff --git a/vendor/futures-util/src/stream/once.rs b/vendor/futures-util/src/stream/once.rs
new file mode 100644
index 000000000..ee21c8b59
--- /dev/null
+++ b/vendor/futures-util/src/stream/once.rs
@@ -0,0 +1,67 @@
+use super::assert_stream;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+/// Creates a stream of a single element.
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// let stream = stream::once(async { 17 });
+/// let collected = stream.collect::<Vec<i32>>().await;
+/// assert_eq!(collected, vec![17]);
+/// # });
+/// ```
+pub fn once<Fut: Future>(future: Fut) -> Once<Fut> {
+ assert_stream::<Fut::Output, _>(Once::new(future))
+}
+
+pin_project! {
+ /// A stream which emits single element and then EOF.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Once<Fut> {
+ #[pin]
+ future: Option<Fut>
+ }
+}
+
+impl<Fut> Once<Fut> {
+ pub(crate) fn new(future: Fut) -> Self {
+ Self { future: Some(future) }
+ }
+}
+
+impl<Fut: Future> Stream for Once<Fut> {
+ type Item = Fut::Output;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ let v = match this.future.as_mut().as_pin_mut() {
+ Some(fut) => ready!(fut.poll(cx)),
+ None => return Poll::Ready(None),
+ };
+
+ this.future.set(None);
+ Poll::Ready(Some(v))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.future.is_some() {
+ (1, Some(1))
+ } else {
+ (0, Some(0))
+ }
+ }
+}
+
+impl<Fut: Future> FusedStream for Once<Fut> {
+ fn is_terminated(&self) -> bool {
+ self.future.is_none()
+ }
+}
diff --git a/vendor/futures-util/src/stream/pending.rs b/vendor/futures-util/src/stream/pending.rs
new file mode 100644
index 000000000..d7030ff3c
--- /dev/null
+++ b/vendor/futures-util/src/stream/pending.rs
@@ -0,0 +1,45 @@
+use super::assert_stream;
+use core::marker;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+/// Stream for the [`pending()`] function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Pending<T> {
+ _data: marker::PhantomData<T>,
+}
+
+/// Creates a stream which never returns any elements.
+///
+/// The returned stream will always return `Pending` when polled.
+pub fn pending<T>() -> Pending<T> {
+ assert_stream::<T, _>(Pending { _data: marker::PhantomData })
+}
+
+impl<T> Unpin for Pending<T> {}
+
+impl<T> FusedStream for Pending<T> {
+ fn is_terminated(&self) -> bool {
+ true
+ }
+}
+
+impl<T> Stream for Pending<T> {
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Pending
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
+
+impl<T> Clone for Pending<T> {
+ fn clone(&self) -> Self {
+ pending()
+ }
+}
diff --git a/vendor/futures-util/src/stream/poll_fn.rs b/vendor/futures-util/src/stream/poll_fn.rs
new file mode 100644
index 000000000..b9bd7d166
--- /dev/null
+++ b/vendor/futures-util/src/stream/poll_fn.rs
@@ -0,0 +1,57 @@
+//! Definition of the `PollFn` combinator
+
+use super::assert_stream;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+
+/// Stream for the [`poll_fn`] function.
+#[must_use = "streams do nothing unless polled"]
+pub struct PollFn<F> {
+ f: F,
+}
+
+impl<F> Unpin for PollFn<F> {}
+
+impl<F> fmt::Debug for PollFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollFn").finish()
+ }
+}
+
+/// Creates a new stream wrapping a function returning `Poll<Option<T>>`.
+///
+/// Polling the returned stream calls the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::stream::poll_fn;
+/// use futures::task::Poll;
+///
+/// let mut counter = 1usize;
+///
+/// let read_stream = poll_fn(move |_| -> Poll<Option<String>> {
+/// if counter == 0 { return Poll::Ready(None); }
+/// counter -= 1;
+/// Poll::Ready(Some("Hello, World!".to_owned()))
+/// });
+/// ```
+pub fn poll_fn<T, F>(f: F) -> PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<Option<T>>,
+{
+ assert_stream::<T, _>(PollFn { f })
+}
+
+impl<T, F> Stream for PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<Option<T>>,
+{
+ type Item = T;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ (&mut self.f)(cx)
+ }
+}
diff --git a/vendor/futures-util/src/stream/poll_immediate.rs b/vendor/futures-util/src/stream/poll_immediate.rs
new file mode 100644
index 000000000..c7e8a5b3c
--- /dev/null
+++ b/vendor/futures-util/src/stream/poll_immediate.rs
@@ -0,0 +1,80 @@
+use core::pin::Pin;
+use futures_core::task::{Context, Poll};
+use futures_core::Stream;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [poll_immediate](poll_immediate()) function.
+ ///
+ /// It will never return [Poll::Pending](core::task::Poll::Pending)
+ #[derive(Debug, Clone)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct PollImmediate<S> {
+ #[pin]
+ stream: Option<S>
+ }
+}
+
+impl<T, S> Stream for PollImmediate<S>
+where
+ S: Stream<Item = T>,
+{
+ type Item = Poll<T>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ let stream = match this.stream.as_mut().as_pin_mut() {
+ // inner is gone, so we can continue to signal that the stream is closed.
+ None => return Poll::Ready(None),
+ Some(inner) => inner,
+ };
+
+ match stream.poll_next(cx) {
+ Poll::Ready(Some(t)) => Poll::Ready(Some(Poll::Ready(t))),
+ Poll::Ready(None) => {
+ this.stream.set(None);
+ Poll::Ready(None)
+ }
+ Poll::Pending => Poll::Ready(Some(Poll::Pending)),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.as_ref().map_or((0, Some(0)), Stream::size_hint)
+ }
+}
+
+impl<S: Stream> super::FusedStream for PollImmediate<S> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_none()
+ }
+}
+
+/// Creates a new stream that always immediately returns [Poll::Ready](core::task::Poll::Ready) when awaiting it.
+///
+/// This is useful when immediacy is more important than waiting for the next item to be ready.
+///
+/// # Examples
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+/// use futures::task::Poll;
+///
+/// let mut r = stream::poll_immediate(Box::pin(stream::iter(1_u32..3)));
+/// assert_eq!(r.next().await, Some(Poll::Ready(1)));
+/// assert_eq!(r.next().await, Some(Poll::Ready(2)));
+/// assert_eq!(r.next().await, None);
+///
+/// let mut p = stream::poll_immediate(Box::pin(stream::once(async {
+/// futures::pending!();
+/// 42_u8
+/// })));
+/// assert_eq!(p.next().await, Some(Poll::Pending));
+/// assert_eq!(p.next().await, Some(Poll::Ready(42)));
+/// assert_eq!(p.next().await, None);
+/// # });
+/// ```
+pub fn poll_immediate<S: Stream>(s: S) -> PollImmediate<S> {
+ super::assert_stream::<Poll<S::Item>, PollImmediate<S>>(PollImmediate { stream: Some(s) })
+}
diff --git a/vendor/futures-util/src/stream/repeat.rs b/vendor/futures-util/src/stream/repeat.rs
new file mode 100644
index 000000000..3f9aa87d5
--- /dev/null
+++ b/vendor/futures-util/src/stream/repeat.rs
@@ -0,0 +1,58 @@
+use super::assert_stream;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+/// Stream for the [`repeat`] function.
+#[derive(Debug, Clone)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Repeat<T> {
+ item: T,
+}
+
+/// Create a stream which produces the same item repeatedly.
+///
+/// The stream never terminates. Note that you likely want to avoid
+/// usage of `collect` or such on the returned stream as it will exhaust
+/// available memory as it tries to just fill up all RAM.
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// let stream = stream::repeat(9);
+/// assert_eq!(vec![9, 9, 9], stream.take(3).collect::<Vec<i32>>().await);
+/// # });
+/// ```
+pub fn repeat<T>(item: T) -> Repeat<T>
+where
+ T: Clone,
+{
+ assert_stream::<T, _>(Repeat { item })
+}
+
+impl<T> Unpin for Repeat<T> {}
+
+impl<T> Stream for Repeat<T>
+where
+ T: Clone,
+{
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(Some(self.item.clone()))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::max_value(), None)
+ }
+}
+
+impl<T> FusedStream for Repeat<T>
+where
+ T: Clone,
+{
+ fn is_terminated(&self) -> bool {
+ false
+ }
+}
diff --git a/vendor/futures-util/src/stream/repeat_with.rs b/vendor/futures-util/src/stream/repeat_with.rs
new file mode 100644
index 000000000..f5a81b4ed
--- /dev/null
+++ b/vendor/futures-util/src/stream/repeat_with.rs
@@ -0,0 +1,93 @@
+use super::assert_stream;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+/// An stream that repeats elements of type `A` endlessly by
+/// applying the provided closure `F: FnMut() -> A`.
+///
+/// This `struct` is created by the [`repeat_with()`] function.
+/// See its documentation for more.
+#[derive(Debug, Clone)]
+#[must_use = "streams do nothing unless polled"]
+pub struct RepeatWith<F> {
+ repeater: F,
+}
+
+impl<A, F: FnMut() -> A> Unpin for RepeatWith<F> {}
+
+impl<A, F: FnMut() -> A> Stream for RepeatWith<F> {
+ type Item = A;
+
+ fn poll_next(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Poll::Ready(Some((&mut self.repeater)()))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::max_value(), None)
+ }
+}
+
+impl<A, F: FnMut() -> A> FusedStream for RepeatWith<F> {
+ fn is_terminated(&self) -> bool {
+ false
+ }
+}
+
+/// Creates a new stream that repeats elements of type `A` endlessly by
+/// applying the provided closure, the repeater, `F: FnMut() -> A`.
+///
+/// The `repeat_with()` function calls the repeater over and over again.
+///
+/// Infinite stream like `repeat_with()` are often used with adapters like
+/// [`stream.take()`], in order to make them finite.
+///
+/// If the element type of the stream you need implements [`Clone`], and
+/// it is OK to keep the source element in memory, you should instead use
+/// the [`stream.repeat()`] function.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// // let's assume we have some value of a type that is not `Clone`
+/// // or which don't want to have in memory just yet because it is expensive:
+/// #[derive(PartialEq, Debug)]
+/// struct Expensive;
+///
+/// // a particular value forever:
+/// let mut things = stream::repeat_with(|| Expensive);
+///
+/// assert_eq!(Some(Expensive), things.next().await);
+/// assert_eq!(Some(Expensive), things.next().await);
+/// assert_eq!(Some(Expensive), things.next().await);
+/// # });
+/// ```
+///
+/// Using mutation and going finite:
+///
+/// ```rust
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// // From the zeroth to the third power of two:
+/// let mut curr = 1;
+/// let mut pow2 = stream::repeat_with(|| { let tmp = curr; curr *= 2; tmp })
+/// .take(4);
+///
+/// assert_eq!(Some(1), pow2.next().await);
+/// assert_eq!(Some(2), pow2.next().await);
+/// assert_eq!(Some(4), pow2.next().await);
+/// assert_eq!(Some(8), pow2.next().await);
+///
+/// // ... and now we're done
+/// assert_eq!(None, pow2.next().await);
+/// # });
+/// ```
+pub fn repeat_with<A, F: FnMut() -> A>(repeater: F) -> RepeatWith<F> {
+ assert_stream::<A, _>(RepeatWith { repeater })
+}
diff --git a/vendor/futures-util/src/stream/select.rs b/vendor/futures-util/src/stream/select.rs
new file mode 100644
index 000000000..0c1e3af78
--- /dev/null
+++ b/vendor/futures-util/src/stream/select.rs
@@ -0,0 +1,117 @@
+use super::assert_stream;
+use crate::stream::{select_with_strategy, PollNext, SelectWithStrategy};
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`select()`] function.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Select<St1, St2> {
+ #[pin]
+ inner: SelectWithStrategy<St1, St2, fn(&mut PollNext)-> PollNext, PollNext>,
+ }
+}
+
+/// This function will attempt to pull items from both streams. Each
+/// stream will be polled in a round-robin fashion, and whenever a stream is
+/// ready to yield an item that item is yielded.
+///
+/// After one of the two input streams completes, the remaining one will be
+/// polled exclusively. The returned stream completes when both input
+/// streams have completed.
+///
+/// Note that this function consumes both streams and returns a wrapped
+/// version of them.
+///
+/// ## Examples
+///
+/// ```rust
+/// # futures::executor::block_on(async {
+/// use futures::stream::{ repeat, select, StreamExt };
+///
+/// let left = repeat(1);
+/// let right = repeat(2);
+///
+/// let mut out = select(left, right);
+///
+/// for _ in 0..100 {
+/// // We should be alternating.
+/// assert_eq!(1, out.select_next_some().await);
+/// assert_eq!(2, out.select_next_some().await);
+/// }
+/// # });
+/// ```
+pub fn select<St1, St2>(stream1: St1, stream2: St2) -> Select<St1, St2>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ fn round_robin(last: &mut PollNext) -> PollNext {
+ last.toggle()
+ }
+
+ assert_stream::<St1::Item, _>(Select {
+ inner: select_with_strategy(stream1, stream2, round_robin),
+ })
+}
+
+impl<St1, St2> Select<St1, St2> {
+ /// Acquires a reference to the underlying streams that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> (&St1, &St2) {
+ self.inner.get_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> (&mut St1, &mut St2) {
+ self.inner.get_mut()
+ }
+
+ /// Acquires a pinned mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) {
+ let this = self.project();
+ this.inner.get_pin_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying streams.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> (St1, St2) {
+ self.inner.into_inner()
+ }
+}
+
+impl<St1, St2> FusedStream for Select<St1, St2>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ fn is_terminated(&self) -> bool {
+ self.inner.is_terminated()
+ }
+}
+
+impl<St1, St2> Stream for Select<St1, St2>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ type Item = St1::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St1::Item>> {
+ let this = self.project();
+ this.inner.poll_next(cx)
+ }
+}
diff --git a/vendor/futures-util/src/stream/select_all.rs b/vendor/futures-util/src/stream/select_all.rs
new file mode 100644
index 000000000..3474331ad
--- /dev/null
+++ b/vendor/futures-util/src/stream/select_all.rs
@@ -0,0 +1,254 @@
+//! An unbounded set of streams
+
+use core::fmt::{self, Debug};
+use core::iter::FromIterator;
+use core::pin::Pin;
+
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+use pin_project_lite::pin_project;
+
+use super::assert_stream;
+use crate::stream::{futures_unordered, FuturesUnordered, StreamExt, StreamFuture};
+
+pin_project! {
+ /// An unbounded set of streams
+ ///
+ /// This "combinator" provides the ability to maintain a set of streams
+ /// and drive them all to completion.
+ ///
+ /// Streams are pushed into this set and their realized values are
+ /// yielded as they become ready. Streams will only be polled when they
+ /// generate notifications. This allows to coordinate a large number of streams.
+ ///
+ /// Note that you can create a ready-made `SelectAll` via the
+ /// `select_all` function in the `stream` module, or you can start with an
+ /// empty set with the `SelectAll::new` constructor.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct SelectAll<St> {
+ #[pin]
+ inner: FuturesUnordered<StreamFuture<St>>,
+ }
+}
+
+impl<St: Debug> Debug for SelectAll<St> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SelectAll {{ ... }}")
+ }
+}
+
+impl<St: Stream + Unpin> SelectAll<St> {
+ /// Constructs a new, empty `SelectAll`
+ ///
+ /// The returned `SelectAll` does not contain any streams and, in this
+ /// state, `SelectAll::poll` will return `Poll::Ready(None)`.
+ pub fn new() -> Self {
+ Self { inner: FuturesUnordered::new() }
+ }
+
+ /// Returns the number of streams contained in the set.
+ ///
+ /// This represents the total number of in-flight streams.
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ /// Returns `true` if the set contains no streams
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Push a stream into the set.
+ ///
+ /// This function submits the given stream to the set for managing. This
+ /// function will not call `poll` on the submitted stream. The caller must
+ /// ensure that `SelectAll::poll` is called in order to receive task
+ /// notifications.
+ pub fn push(&mut self, stream: St) {
+ self.inner.push(stream.into_future());
+ }
+
+ /// Returns an iterator that allows inspecting each stream in the set.
+ pub fn iter(&self) -> Iter<'_, St> {
+ Iter(self.inner.iter())
+ }
+
+ /// Returns an iterator that allows modifying each stream in the set.
+ pub fn iter_mut(&mut self) -> IterMut<'_, St> {
+ IterMut(self.inner.iter_mut())
+ }
+
+ /// Clears the set, removing all streams.
+ pub fn clear(&mut self) {
+ self.inner.clear()
+ }
+}
+
+impl<St: Stream + Unpin> Default for SelectAll<St> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<St: Stream + Unpin> Stream for SelectAll<St> {
+ type Item = St::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ loop {
+ match ready!(self.inner.poll_next_unpin(cx)) {
+ Some((Some(item), remaining)) => {
+ self.push(remaining);
+ return Poll::Ready(Some(item));
+ }
+ Some((None, _)) => {
+ // `FuturesUnordered` thinks it isn't terminated
+ // because it yielded a Some.
+ // We do not return, but poll `FuturesUnordered`
+ // in the next loop iteration.
+ }
+ None => return Poll::Ready(None),
+ }
+ }
+ }
+}
+
+impl<St: Stream + Unpin> FusedStream for SelectAll<St> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_terminated()
+ }
+}
+
+/// Convert a list of streams into a `Stream` of results from the streams.
+///
+/// This essentially takes a list of streams (e.g. a vector, an iterator, etc.)
+/// and bundles them together into a single stream.
+/// The stream will yield items as they become available on the underlying
+/// streams internally, in the order they become available.
+///
+/// Note that the returned set can also be used to dynamically push more
+/// streams into the set as they become available.
+///
+/// This function is only available when the `std` or `alloc` feature of this
+/// library is activated, and it is activated by default.
+pub fn select_all<I>(streams: I) -> SelectAll<I::Item>
+where
+ I: IntoIterator,
+ I::Item: Stream + Unpin,
+{
+ let mut set = SelectAll::new();
+
+ for stream in streams {
+ set.push(stream);
+ }
+
+ assert_stream::<<I::Item as Stream>::Item, _>(set)
+}
+
+impl<St: Stream + Unpin> FromIterator<St> for SelectAll<St> {
+ fn from_iter<T: IntoIterator<Item = St>>(iter: T) -> Self {
+ select_all(iter)
+ }
+}
+
+impl<St: Stream + Unpin> Extend<St> for SelectAll<St> {
+ fn extend<T: IntoIterator<Item = St>>(&mut self, iter: T) {
+ for st in iter {
+ self.push(st)
+ }
+ }
+}
+
+impl<St: Stream + Unpin> IntoIterator for SelectAll<St> {
+ type Item = St;
+ type IntoIter = IntoIter<St>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter(self.inner.into_iter())
+ }
+}
+
+impl<'a, St: Stream + Unpin> IntoIterator for &'a SelectAll<St> {
+ type Item = &'a St;
+ type IntoIter = Iter<'a, St>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, St: Stream + Unpin> IntoIterator for &'a mut SelectAll<St> {
+ type Item = &'a mut St;
+ type IntoIter = IterMut<'a, St>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// Immutable iterator over all streams in the unordered set.
+#[derive(Debug)]
+pub struct Iter<'a, St: Unpin>(futures_unordered::Iter<'a, StreamFuture<St>>);
+
+/// Mutable iterator over all streams in the unordered set.
+#[derive(Debug)]
+pub struct IterMut<'a, St: Unpin>(futures_unordered::IterMut<'a, StreamFuture<St>>);
+
+/// Owned iterator over all streams in the unordered set.
+#[derive(Debug)]
+pub struct IntoIter<St: Unpin>(futures_unordered::IntoIter<StreamFuture<St>>);
+
+impl<'a, St: Stream + Unpin> Iterator for Iter<'a, St> {
+ type Item = &'a St;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let st = self.0.next()?;
+ let next = st.get_ref();
+ // This should always be true because FuturesUnordered removes completed futures.
+ debug_assert!(next.is_some());
+ next
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl<St: Stream + Unpin> ExactSizeIterator for Iter<'_, St> {}
+
+impl<'a, St: Stream + Unpin> Iterator for IterMut<'a, St> {
+ type Item = &'a mut St;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let st = self.0.next()?;
+ let next = st.get_mut();
+ // This should always be true because FuturesUnordered removes completed futures.
+ debug_assert!(next.is_some());
+ next
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl<St: Stream + Unpin> ExactSizeIterator for IterMut<'_, St> {}
+
+impl<St: Stream + Unpin> Iterator for IntoIter<St> {
+ type Item = St;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let st = self.0.next()?;
+ let next = st.into_inner();
+ // This should always be true because FuturesUnordered removes completed futures.
+ debug_assert!(next.is_some());
+ next
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+impl<St: Stream + Unpin> ExactSizeIterator for IntoIter<St> {}
diff --git a/vendor/futures-util/src/stream/select_with_strategy.rs b/vendor/futures-util/src/stream/select_with_strategy.rs
new file mode 100644
index 000000000..bd86990cd
--- /dev/null
+++ b/vendor/futures-util/src/stream/select_with_strategy.rs
@@ -0,0 +1,229 @@
+use super::assert_stream;
+use crate::stream::{Fuse, StreamExt};
+use core::{fmt, pin::Pin};
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+/// Type to tell [`SelectWithStrategy`] which stream to poll next.
+#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
+pub enum PollNext {
+ /// Poll the first stream.
+ Left,
+ /// Poll the second stream.
+ Right,
+}
+
+impl PollNext {
+ /// Toggle the value and return the old one.
+ pub fn toggle(&mut self) -> Self {
+ let old = *self;
+
+ match self {
+ PollNext::Left => *self = PollNext::Right,
+ PollNext::Right => *self = PollNext::Left,
+ }
+
+ old
+ }
+}
+
+impl Default for PollNext {
+ fn default() -> Self {
+ PollNext::Left
+ }
+}
+
+pin_project! {
+ /// Stream for the [`select_with_strategy()`] function. See function docs for details.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct SelectWithStrategy<St1, St2, Clos, State> {
+ #[pin]
+ stream1: Fuse<St1>,
+ #[pin]
+ stream2: Fuse<St2>,
+ state: State,
+ clos: Clos,
+ }
+}
+
+/// This function will attempt to pull items from both streams. You provide a
+/// closure to tell [`SelectWithStrategy`] which stream to poll. The closure can
+/// store state on `SelectWithStrategy` to which it will receive a `&mut` on every
+/// invocation. This allows basing the strategy on prior choices.
+///
+/// After one of the two input streams completes, the remaining one will be
+/// polled exclusively. The returned stream completes when both input
+/// streams have completed.
+///
+/// Note that this function consumes both streams and returns a wrapped
+/// version of them.
+///
+/// ## Examples
+///
+/// ### Priority
+/// This example shows how to always prioritize the left stream.
+///
+/// ```rust
+/// # futures::executor::block_on(async {
+/// use futures::stream::{ repeat, select_with_strategy, PollNext, StreamExt };
+///
+/// let left = repeat(1);
+/// let right = repeat(2);
+///
+/// // We don't need any state, so let's make it an empty tuple.
+/// // We must provide some type here, as there is no way for the compiler
+/// // to infer it. As we don't need to capture variables, we can just
+/// // use a function pointer instead of a closure.
+/// fn prio_left(_: &mut ()) -> PollNext { PollNext::Left }
+///
+/// let mut out = select_with_strategy(left, right, prio_left);
+///
+/// for _ in 0..100 {
+/// // Whenever we poll out, we will alwas get `1`.
+/// assert_eq!(1, out.select_next_some().await);
+/// }
+/// # });
+/// ```
+///
+/// ### Round Robin
+/// This example shows how to select from both streams round robin.
+/// Note: this special case is provided by [`futures-util::stream::select`].
+///
+/// ```rust
+/// # futures::executor::block_on(async {
+/// use futures::stream::{ repeat, select_with_strategy, PollNext, StreamExt };
+///
+/// let left = repeat(1);
+/// let right = repeat(2);
+///
+/// let rrobin = |last: &mut PollNext| last.toggle();
+///
+/// let mut out = select_with_strategy(left, right, rrobin);
+///
+/// for _ in 0..100 {
+/// // We should be alternating now.
+/// assert_eq!(1, out.select_next_some().await);
+/// assert_eq!(2, out.select_next_some().await);
+/// }
+/// # });
+/// ```
+pub fn select_with_strategy<St1, St2, Clos, State>(
+ stream1: St1,
+ stream2: St2,
+ which: Clos,
+) -> SelectWithStrategy<St1, St2, Clos, State>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+ Clos: FnMut(&mut State) -> PollNext,
+ State: Default,
+{
+ assert_stream::<St1::Item, _>(SelectWithStrategy {
+ stream1: stream1.fuse(),
+ stream2: stream2.fuse(),
+ state: Default::default(),
+ clos: which,
+ })
+}
+
+impl<St1, St2, Clos, State> SelectWithStrategy<St1, St2, Clos, State> {
+ /// Acquires a reference to the underlying streams that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> (&St1, &St2) {
+ (self.stream1.get_ref(), self.stream2.get_ref())
+ }
+
+ /// Acquires a mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> (&mut St1, &mut St2) {
+ (self.stream1.get_mut(), self.stream2.get_mut())
+ }
+
+ /// Acquires a pinned mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) {
+ let this = self.project();
+ (this.stream1.get_pin_mut(), this.stream2.get_pin_mut())
+ }
+
+ /// Consumes this combinator, returning the underlying streams.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> (St1, St2) {
+ (self.stream1.into_inner(), self.stream2.into_inner())
+ }
+}
+
+impl<St1, St2, Clos, State> FusedStream for SelectWithStrategy<St1, St2, Clos, State>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+ Clos: FnMut(&mut State) -> PollNext,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream1.is_terminated() && self.stream2.is_terminated()
+ }
+}
+
+impl<St1, St2, Clos, State> Stream for SelectWithStrategy<St1, St2, Clos, State>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+ Clos: FnMut(&mut State) -> PollNext,
+{
+ type Item = St1::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St1::Item>> {
+ let this = self.project();
+
+ match (this.clos)(this.state) {
+ PollNext::Left => poll_inner(this.stream1, this.stream2, cx),
+ PollNext::Right => poll_inner(this.stream2, this.stream1, cx),
+ }
+ }
+}
+
+fn poll_inner<St1, St2>(
+ a: Pin<&mut St1>,
+ b: Pin<&mut St2>,
+ cx: &mut Context<'_>,
+) -> Poll<Option<St1::Item>>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ let a_done = match a.poll_next(cx) {
+ Poll::Ready(Some(item)) => return Poll::Ready(Some(item)),
+ Poll::Ready(None) => true,
+ Poll::Pending => false,
+ };
+
+ match b.poll_next(cx) {
+ Poll::Ready(Some(item)) => Poll::Ready(Some(item)),
+ Poll::Ready(None) if a_done => Poll::Ready(None),
+ Poll::Ready(None) | Poll::Pending => Poll::Pending,
+ }
+}
+
+impl<St1, St2, Clos, State> fmt::Debug for SelectWithStrategy<St1, St2, Clos, State>
+where
+ St1: fmt::Debug,
+ St2: fmt::Debug,
+ State: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SelectWithStrategy")
+ .field("stream1", &self.stream1)
+ .field("stream2", &self.stream2)
+ .field("state", &self.state)
+ .finish()
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/all.rs b/vendor/futures-util/src/stream/stream/all.rs
new file mode 100644
index 000000000..ba2baa5cf
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/all.rs
@@ -0,0 +1,92 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`all`](super::StreamExt::all) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct All<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ accum: Option<bool>,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for All<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("All")
+ .field("stream", &self.stream)
+ .field("accum", &self.accum)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> All<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, accum: Some(true), future: None }
+ }
+}
+
+impl<St, Fut, F> FusedFuture for All<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.accum.is_none() && self.future.is_none()
+ }
+}
+
+impl<St, Fut, F> Future for All<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Output = bool;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<bool> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ // we're currently processing a future to produce a new accum value
+ let acc = this.accum.unwrap() && ready!(fut.poll(cx));
+ if !acc {
+ break false;
+ } // early exit
+ *this.accum = Some(acc);
+ this.future.set(None);
+ } else if this.accum.is_some() {
+ // we're waiting on a new item from the stream
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(item) => {
+ this.future.set(Some((this.f)(item)));
+ }
+ None => {
+ break this.accum.take().unwrap();
+ }
+ }
+ } else {
+ panic!("All polled after completion")
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/any.rs b/vendor/futures-util/src/stream/stream/any.rs
new file mode 100644
index 000000000..f023125c7
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/any.rs
@@ -0,0 +1,92 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`any`](super::StreamExt::any) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Any<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ accum: Option<bool>,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for Any<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Any")
+ .field("stream", &self.stream)
+ .field("accum", &self.accum)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> Any<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, accum: Some(false), future: None }
+ }
+}
+
+impl<St, Fut, F> FusedFuture for Any<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.accum.is_none() && self.future.is_none()
+ }
+}
+
+impl<St, Fut, F> Future for Any<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Output = bool;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<bool> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ // we're currently processing a future to produce a new accum value
+ let acc = this.accum.unwrap() || ready!(fut.poll(cx));
+ if acc {
+ break true;
+ } // early exit
+ *this.accum = Some(acc);
+ this.future.set(None);
+ } else if this.accum.is_some() {
+ // we're waiting on a new item from the stream
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(item) => {
+ this.future.set(Some((this.f)(item)));
+ }
+ None => {
+ break this.accum.take().unwrap();
+ }
+ }
+ } else {
+ panic!("Any polled after completion")
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/buffer_unordered.rs b/vendor/futures-util/src/stream/stream/buffer_unordered.rs
new file mode 100644
index 000000000..d64c142b4
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/buffer_unordered.rs
@@ -0,0 +1,124 @@
+use crate::stream::{Fuse, FuturesUnordered, StreamExt};
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`buffer_unordered`](super::StreamExt::buffer_unordered)
+ /// method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct BufferUnordered<St>
+ where
+ St: Stream,
+ {
+ #[pin]
+ stream: Fuse<St>,
+ in_progress_queue: FuturesUnordered<St::Item>,
+ max: usize,
+ }
+}
+
+impl<St> fmt::Debug for BufferUnordered<St>
+where
+ St: Stream + fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BufferUnordered")
+ .field("stream", &self.stream)
+ .field("in_progress_queue", &self.in_progress_queue)
+ .field("max", &self.max)
+ .finish()
+ }
+}
+
+impl<St> BufferUnordered<St>
+where
+ St: Stream,
+ St::Item: Future,
+{
+ pub(super) fn new(stream: St, n: usize) -> Self
+ where
+ St: Stream,
+ St::Item: Future,
+ {
+ Self {
+ stream: super::Fuse::new(stream),
+ in_progress_queue: FuturesUnordered::new(),
+ max: n,
+ }
+ }
+
+ delegate_access_inner!(stream, St, (.));
+}
+
+impl<St> Stream for BufferUnordered<St>
+where
+ St: Stream,
+ St::Item: Future,
+{
+ type Item = <St::Item as Future>::Output;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ // First up, try to spawn off as many futures as possible by filling up
+ // our queue of futures.
+ while this.in_progress_queue.len() < *this.max {
+ match this.stream.as_mut().poll_next(cx) {
+ Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut),
+ Poll::Ready(None) | Poll::Pending => break,
+ }
+ }
+
+ // Attempt to pull the next value from the in_progress_queue
+ match this.in_progress_queue.poll_next_unpin(cx) {
+ x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x,
+ Poll::Ready(None) => {}
+ }
+
+ // If more values are still coming from the stream, we're not done yet
+ if this.stream.is_done() {
+ Poll::Ready(None)
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let queue_len = self.in_progress_queue.len();
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(queue_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(queue_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St> FusedStream for BufferUnordered<St>
+where
+ St: Stream,
+ St::Item: Future,
+{
+ fn is_terminated(&self) -> bool {
+ self.in_progress_queue.is_terminated() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for BufferUnordered<S>
+where
+ S: Stream + Sink<Item>,
+ S::Item: Future,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/buffered.rs b/vendor/futures-util/src/stream/stream/buffered.rs
new file mode 100644
index 000000000..6052a737b
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/buffered.rs
@@ -0,0 +1,108 @@
+use crate::stream::{Fuse, FuturesOrdered, StreamExt};
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`buffered`](super::StreamExt::buffered) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Buffered<St>
+ where
+ St: Stream,
+ St::Item: Future,
+ {
+ #[pin]
+ stream: Fuse<St>,
+ in_progress_queue: FuturesOrdered<St::Item>,
+ max: usize,
+ }
+}
+
+impl<St> fmt::Debug for Buffered<St>
+where
+ St: Stream + fmt::Debug,
+ St::Item: Future,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Buffered")
+ .field("stream", &self.stream)
+ .field("in_progress_queue", &self.in_progress_queue)
+ .field("max", &self.max)
+ .finish()
+ }
+}
+
+impl<St> Buffered<St>
+where
+ St: Stream,
+ St::Item: Future,
+{
+ pub(super) fn new(stream: St, n: usize) -> Self {
+ Self { stream: super::Fuse::new(stream), in_progress_queue: FuturesOrdered::new(), max: n }
+ }
+
+ delegate_access_inner!(stream, St, (.));
+}
+
+impl<St> Stream for Buffered<St>
+where
+ St: Stream,
+ St::Item: Future,
+{
+ type Item = <St::Item as Future>::Output;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ // First up, try to spawn off as many futures as possible by filling up
+ // our queue of futures.
+ while this.in_progress_queue.len() < *this.max {
+ match this.stream.as_mut().poll_next(cx) {
+ Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut),
+ Poll::Ready(None) | Poll::Pending => break,
+ }
+ }
+
+ // Attempt to pull the next value from the in_progress_queue
+ let res = this.in_progress_queue.poll_next_unpin(cx);
+ if let Some(val) = ready!(res) {
+ return Poll::Ready(Some(val));
+ }
+
+ // If more values are still coming from the stream, we're not done yet
+ if this.stream.is_done() {
+ Poll::Ready(None)
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let queue_len = self.in_progress_queue.len();
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(queue_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(queue_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Buffered<S>
+where
+ S: Stream + Sink<Item>,
+ S::Item: Future,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/catch_unwind.rs b/vendor/futures-util/src/stream/stream/catch_unwind.rs
new file mode 100644
index 000000000..09a6dc1b7
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/catch_unwind.rs
@@ -0,0 +1,61 @@
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+use std::any::Any;
+use std::panic::{catch_unwind, AssertUnwindSafe, UnwindSafe};
+use std::pin::Pin;
+
+pin_project! {
+ /// Stream for the [`catch_unwind`](super::StreamExt::catch_unwind) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct CatchUnwind<St> {
+ #[pin]
+ stream: St,
+ caught_unwind: bool,
+ }
+}
+
+impl<St: Stream + UnwindSafe> CatchUnwind<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, caught_unwind: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St: Stream + UnwindSafe> Stream for CatchUnwind<St> {
+ type Item = Result<St::Item, Box<dyn Any + Send>>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if *this.caught_unwind {
+ Poll::Ready(None)
+ } else {
+ let res = catch_unwind(AssertUnwindSafe(|| this.stream.as_mut().poll_next(cx)));
+
+ match res {
+ Ok(poll) => poll.map(|opt| opt.map(Ok)),
+ Err(e) => {
+ *this.caught_unwind = true;
+ Poll::Ready(Some(Err(e)))
+ }
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.caught_unwind {
+ (0, Some(0))
+ } else {
+ self.stream.size_hint()
+ }
+ }
+}
+
+impl<St: FusedStream + UnwindSafe> FusedStream for CatchUnwind<St> {
+ fn is_terminated(&self) -> bool {
+ self.caught_unwind || self.stream.is_terminated()
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/chain.rs b/vendor/futures-util/src/stream/stream/chain.rs
new file mode 100644
index 000000000..c5da35e25
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/chain.rs
@@ -0,0 +1,75 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`chain`](super::StreamExt::chain) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Chain<St1, St2> {
+ #[pin]
+ first: Option<St1>,
+ #[pin]
+ second: St2,
+ }
+}
+
+// All interactions with `Pin<&mut Chain<..>>` happen through these methods
+impl<St1, St2> Chain<St1, St2>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ pub(super) fn new(stream1: St1, stream2: St2) -> Self {
+ Self { first: Some(stream1), second: stream2 }
+ }
+}
+
+impl<St1, St2> FusedStream for Chain<St1, St2>
+where
+ St1: Stream,
+ St2: FusedStream<Item = St1::Item>,
+{
+ fn is_terminated(&self) -> bool {
+ self.first.is_none() && self.second.is_terminated()
+ }
+}
+
+impl<St1, St2> Stream for Chain<St1, St2>
+where
+ St1: Stream,
+ St2: Stream<Item = St1::Item>,
+{
+ type Item = St1::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ if let Some(first) = this.first.as_mut().as_pin_mut() {
+ if let Some(item) = ready!(first.poll_next(cx)) {
+ return Poll::Ready(Some(item));
+ }
+ }
+ this.first.set(None);
+ this.second.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if let Some(first) = &self.first {
+ let (first_lower, first_upper) = first.size_hint();
+ let (second_lower, second_upper) = self.second.size_hint();
+
+ let lower = first_lower.saturating_add(second_lower);
+
+ let upper = match (first_upper, second_upper) {
+ (Some(x), Some(y)) => x.checked_add(y),
+ _ => None,
+ };
+
+ (lower, upper)
+ } else {
+ self.second.size_hint()
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/chunks.rs b/vendor/futures-util/src/stream/stream/chunks.rs
new file mode 100644
index 000000000..845786999
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/chunks.rs
@@ -0,0 +1,106 @@
+use crate::stream::Fuse;
+use alloc::vec::Vec;
+use core::mem;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`chunks`](super::StreamExt::chunks) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Chunks<St: Stream> {
+ #[pin]
+ stream: Fuse<St>,
+ items: Vec<St::Item>,
+ cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475
+ }
+}
+
+impl<St: Stream> Chunks<St>
+where
+ St: Stream,
+{
+ pub(super) fn new(stream: St, capacity: usize) -> Self {
+ assert!(capacity > 0);
+
+ Self {
+ stream: super::Fuse::new(stream),
+ items: Vec::with_capacity(capacity),
+ cap: capacity,
+ }
+ }
+
+ fn take(self: Pin<&mut Self>) -> Vec<St::Item> {
+ let cap = self.cap;
+ mem::replace(self.project().items, Vec::with_capacity(cap))
+ }
+
+ delegate_access_inner!(stream, St, (.));
+}
+
+impl<St: Stream> Stream for Chunks<St> {
+ type Item = Vec<St::Item>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.as_mut().project();
+ loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ // Push the item into the buffer and check whether it is full.
+ // If so, replace our buffer with a new and empty one and return
+ // the full one.
+ Some(item) => {
+ this.items.push(item);
+ if this.items.len() >= *this.cap {
+ return Poll::Ready(Some(self.take()));
+ }
+ }
+
+ // Since the underlying stream ran out of values, return what we
+ // have buffered, if we have anything.
+ None => {
+ let last = if this.items.is_empty() {
+ None
+ } else {
+ let full_buf = mem::replace(this.items, Vec::new());
+ Some(full_buf)
+ };
+
+ return Poll::Ready(last);
+ }
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let chunk_len = if self.items.is_empty() { 0 } else { 1 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(chunk_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(chunk_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St: FusedStream> FusedStream for Chunks<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated() && self.items.is_empty()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Chunks<S>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/collect.rs b/vendor/futures-util/src/stream/stream/collect.rs
new file mode 100644
index 000000000..b0e81b9ce
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/collect.rs
@@ -0,0 +1,56 @@
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`collect`](super::StreamExt::collect) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Collect<St, C> {
+ #[pin]
+ stream: St,
+ collection: C,
+ }
+}
+
+impl<St: Stream, C: Default> Collect<St, C> {
+ fn finish(self: Pin<&mut Self>) -> C {
+ mem::replace(self.project().collection, Default::default())
+ }
+
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, collection: Default::default() }
+ }
+}
+
+impl<St, C> FusedFuture for Collect<St, C>
+where
+ St: FusedStream,
+ C: Default + Extend<St::Item>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St, C> Future for Collect<St, C>
+where
+ St: Stream,
+ C: Default + Extend<St::Item>,
+{
+ type Output = C;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<C> {
+ let mut this = self.as_mut().project();
+ loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(e) => this.collection.extend(Some(e)),
+ None => return Poll::Ready(self.finish()),
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/concat.rs b/vendor/futures-util/src/stream/stream/concat.rs
new file mode 100644
index 000000000..7e058b231
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/concat.rs
@@ -0,0 +1,62 @@
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`concat`](super::StreamExt::concat) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Concat<St: Stream> {
+ #[pin]
+ stream: St,
+ accum: Option<St::Item>,
+ }
+}
+
+impl<St> Concat<St>
+where
+ St: Stream,
+ St::Item: Extend<<St::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, accum: None }
+ }
+}
+
+impl<St> Future for Concat<St>
+where
+ St: Stream,
+ St::Item: Extend<<St::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+ type Output = St::Item;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ None => return Poll::Ready(this.accum.take().unwrap_or_default()),
+ Some(e) => {
+ if let Some(a) = this.accum {
+ a.extend(e)
+ } else {
+ *this.accum = Some(e)
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<St> FusedFuture for Concat<St>
+where
+ St: FusedStream,
+ St::Item: Extend<<St::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+ fn is_terminated(&self) -> bool {
+ self.accum.is_none() && self.stream.is_terminated()
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/count.rs b/vendor/futures-util/src/stream/stream/count.rs
new file mode 100644
index 000000000..513cab7b6
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/count.rs
@@ -0,0 +1,53 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`count`](super::StreamExt::count) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Count<St> {
+ #[pin]
+ stream: St,
+ count: usize
+ }
+}
+
+impl<St> fmt::Debug for Count<St>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Count").field("stream", &self.stream).field("count", &self.count).finish()
+ }
+}
+
+impl<St: Stream> Count<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, count: 0 }
+ }
+}
+
+impl<St: FusedStream> FusedFuture for Count<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: Stream> Future for Count<St> {
+ type Output = usize;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(_) => *this.count += 1,
+ None => break *this.count,
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/cycle.rs b/vendor/futures-util/src/stream/stream/cycle.rs
new file mode 100644
index 000000000..507431d24
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/cycle.rs
@@ -0,0 +1,68 @@
+use core::pin::Pin;
+use core::usize;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`cycle`](super::StreamExt::cycle) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Cycle<St> {
+ orig: St,
+ #[pin]
+ stream: St,
+ }
+}
+
+impl<St> Cycle<St>
+where
+ St: Clone + Stream,
+{
+ pub(super) fn new(stream: St) -> Self {
+ Self { orig: stream.clone(), stream }
+ }
+}
+
+impl<St> Stream for Cycle<St>
+where
+ St: Clone + Stream,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ None => {
+ this.stream.set(this.orig.clone());
+ this.stream.poll_next(cx)
+ }
+ item => Poll::Ready(item),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // the cycle stream is either empty or infinite
+ match self.orig.size_hint() {
+ size @ (0, Some(0)) => size,
+ (0, _) => (0, None),
+ _ => (usize::max_value(), None),
+ }
+ }
+}
+
+impl<St> FusedStream for Cycle<St>
+where
+ St: Clone + Stream,
+{
+ fn is_terminated(&self) -> bool {
+ // the cycle stream is either empty or infinite
+ if let (0, Some(0)) = self.size_hint() {
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/enumerate.rs b/vendor/futures-util/src/stream/stream/enumerate.rs
new file mode 100644
index 000000000..1cf9d49aa
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/enumerate.rs
@@ -0,0 +1,64 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`enumerate`](super::StreamExt::enumerate) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Enumerate<St> {
+ #[pin]
+ stream: St,
+ count: usize,
+ }
+}
+
+impl<St: Stream> Enumerate<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, count: 0 }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St: Stream + FusedStream> FusedStream for Enumerate<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: Stream> Stream for Enumerate<St> {
+ type Item = (usize, St::Item);
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+
+ match ready!(this.stream.poll_next(cx)) {
+ Some(item) => {
+ let prev_count = *this.count;
+ *this.count += 1;
+ Poll::Ready(Some((prev_count, item)))
+ }
+ None => Poll::Ready(None),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Enumerate<S>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/filter.rs b/vendor/futures-util/src/stream/stream/filter.rs
new file mode 100644
index 000000000..ccf1a5122
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/filter.rs
@@ -0,0 +1,117 @@
+use crate::fns::FnMut1;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`filter`](super::StreamExt::filter) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Filter<St, Fut, F>
+ where St: Stream,
+ {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Item>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Filter")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .finish()
+ }
+}
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<St, Fut, F> Filter<St, Fut, F>
+where
+ St: Stream,
+ F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
+ Fut: Future<Output = bool>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
+where
+ St: Stream + FusedStream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending_fut.is_none() && self.stream.is_terminated()
+ }
+}
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<St, Fut, F> Stream for Filter<St, Fut, F>
+where
+ St: Stream,
+ F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
+ Fut: Future<Output = bool>,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let res = ready!(fut.poll(cx));
+ this.pending_fut.set(None);
+ if res {
+ break this.pending_item.take();
+ }
+ *this.pending_item = None;
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.pending_fut.set(Some(this.f.call_mut(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
+where
+ S: Stream + Sink<Item>,
+ F: FnMut(&S::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/filter_map.rs b/vendor/futures-util/src/stream/stream/filter_map.rs
new file mode 100644
index 000000000..02a0a4386
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/filter_map.rs
@@ -0,0 +1,111 @@
+use crate::fns::FnMut1;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`filter_map`](super::StreamExt::filter_map) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct FilterMap<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for FilterMap<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FilterMap")
+ .field("stream", &self.stream)
+ .field("pending", &self.pending)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> FilterMap<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F, T> FusedStream for FilterMap<St, Fut, F>
+where
+ St: Stream + FusedStream,
+ F: FnMut1<St::Item, Output = Fut>,
+ Fut: Future<Output = Option<T>>,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F, T> Stream for FilterMap<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut1<St::Item, Output = Fut>,
+ Fut: Future<Output = Option<T>>,
+{
+ type Item = T;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(p) = this.pending.as_mut().as_pin_mut() {
+ // We have an item in progress, poll that until it's done
+ let item = ready!(p.poll(cx));
+ this.pending.set(None);
+ if item.is_some() {
+ break item;
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ // No item in progress, but the stream is still going
+ this.pending.set(Some(this.f.call_mut(item)));
+ } else {
+ // The stream is done
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let pending_len = if self.pending.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for FilterMap<S, Fut, F>
+where
+ S: Stream + Sink<Item>,
+ F: FnMut1<S::Item, Output = Fut>,
+ Fut: Future,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/flatten.rs b/vendor/futures-util/src/stream/stream/flatten.rs
new file mode 100644
index 000000000..9f6b7a472
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/flatten.rs
@@ -0,0 +1,73 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`flatten`](super::StreamExt::flatten) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Flatten<St, U> {
+ #[pin]
+ stream: St,
+ #[pin]
+ next: Option<U>,
+ }
+}
+
+impl<St, U> Flatten<St, U> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, next: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St> FusedStream for Flatten<St, St::Item>
+where
+ St: FusedStream,
+ St::Item: Stream,
+{
+ fn is_terminated(&self) -> bool {
+ self.next.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St> Stream for Flatten<St, St::Item>
+where
+ St: Stream,
+ St::Item: Stream,
+{
+ type Item = <St::Item as Stream>::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(s) = this.next.as_mut().as_pin_mut() {
+ if let Some(item) = ready!(s.poll_next(cx)) {
+ break Some(item);
+ } else {
+ this.next.set(None);
+ }
+ } else if let Some(s) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.next.set(Some(s));
+ } else {
+ break None;
+ }
+ })
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Flatten<S, S::Item>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/fold.rs b/vendor/futures-util/src/stream/stream/fold.rs
new file mode 100644
index 000000000..b8b55ecb6
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/fold.rs
@@ -0,0 +1,88 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`fold`](super::StreamExt::fold) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Fold<St, Fut, T, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ accum: Option<T>,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, T, F> fmt::Debug for Fold<St, Fut, T, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Fold")
+ .field("stream", &self.stream)
+ .field("accum", &self.accum)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, T, F> Fold<St, Fut, T, F>
+where
+ St: Stream,
+ F: FnMut(T, St::Item) -> Fut,
+ Fut: Future<Output = T>,
+{
+ pub(super) fn new(stream: St, f: F, t: T) -> Self {
+ Self { stream, f, accum: Some(t), future: None }
+ }
+}
+
+impl<St, Fut, T, F> FusedFuture for Fold<St, Fut, T, F>
+where
+ St: Stream,
+ F: FnMut(T, St::Item) -> Fut,
+ Fut: Future<Output = T>,
+{
+ fn is_terminated(&self) -> bool {
+ self.accum.is_none() && self.future.is_none()
+ }
+}
+
+impl<St, Fut, T, F> Future for Fold<St, Fut, T, F>
+where
+ St: Stream,
+ F: FnMut(T, St::Item) -> Fut,
+ Fut: Future<Output = T>,
+{
+ type Output = T;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ let mut this = self.project();
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ // we're currently processing a future to produce a new accum value
+ *this.accum = Some(ready!(fut.poll(cx)));
+ this.future.set(None);
+ } else if this.accum.is_some() {
+ // we're waiting on a new item from the stream
+ let res = ready!(this.stream.as_mut().poll_next(cx));
+ let a = this.accum.take().unwrap();
+ if let Some(item) = res {
+ this.future.set(Some((this.f)(a, item)));
+ } else {
+ break a;
+ }
+ } else {
+ panic!("Fold polled after completion")
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/for_each.rs b/vendor/futures-util/src/stream/stream/for_each.rs
new file mode 100644
index 000000000..5302b0e03
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/for_each.rs
@@ -0,0 +1,78 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`for_each`](super::StreamExt::for_each) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ForEach<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for ForEach<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ForEach")
+ .field("stream", &self.stream)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> ForEach<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, future: None }
+ }
+}
+
+impl<St, Fut, F> FusedFuture for ForEach<St, Fut, F>
+where
+ St: FusedStream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ fn is_terminated(&self) -> bool {
+ self.future.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F> Future for ForEach<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let mut this = self.project();
+ loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ ready!(fut.poll(cx));
+ this.future.set(None);
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.future.set(Some((this.f)(item)));
+ } else {
+ break;
+ }
+ }
+ Poll::Ready(())
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/for_each_concurrent.rs b/vendor/futures-util/src/stream/stream/for_each_concurrent.rs
new file mode 100644
index 000000000..6c18753eb
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/for_each_concurrent.rs
@@ -0,0 +1,119 @@
+use crate::stream::{FuturesUnordered, StreamExt};
+use core::fmt;
+use core::num::NonZeroUsize;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`for_each_concurrent`](super::StreamExt::for_each_concurrent)
+ /// method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct ForEachConcurrent<St, Fut, F> {
+ #[pin]
+ stream: Option<St>,
+ f: F,
+ futures: FuturesUnordered<Fut>,
+ limit: Option<NonZeroUsize>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for ForEachConcurrent<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ForEachConcurrent")
+ .field("stream", &self.stream)
+ .field("futures", &self.futures)
+ .field("limit", &self.limit)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> ForEachConcurrent<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ pub(super) fn new(stream: St, limit: Option<usize>, f: F) -> Self {
+ Self {
+ stream: Some(stream),
+ // Note: `limit` = 0 gets ignored.
+ limit: limit.and_then(NonZeroUsize::new),
+ f,
+ futures: FuturesUnordered::new(),
+ }
+ }
+}
+
+impl<St, Fut, F> FusedFuture for ForEachConcurrent<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_none() && self.futures.is_empty()
+ }
+}
+
+impl<St, Fut, F> Future for ForEachConcurrent<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future<Output = ()>,
+{
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ let mut this = self.project();
+ loop {
+ let mut made_progress_this_iter = false;
+
+ // Check if we've already created a number of futures greater than `limit`
+ if this.limit.map(|limit| limit.get() > this.futures.len()).unwrap_or(true) {
+ let mut stream_completed = false;
+ let elem = if let Some(stream) = this.stream.as_mut().as_pin_mut() {
+ match stream.poll_next(cx) {
+ Poll::Ready(Some(elem)) => {
+ made_progress_this_iter = true;
+ Some(elem)
+ }
+ Poll::Ready(None) => {
+ stream_completed = true;
+ None
+ }
+ Poll::Pending => None,
+ }
+ } else {
+ None
+ };
+ if stream_completed {
+ this.stream.set(None);
+ }
+ if let Some(elem) = elem {
+ this.futures.push((this.f)(elem));
+ }
+ }
+
+ match this.futures.poll_next_unpin(cx) {
+ Poll::Ready(Some(())) => made_progress_this_iter = true,
+ Poll::Ready(None) => {
+ if this.stream.is_none() {
+ return Poll::Ready(());
+ }
+ }
+ Poll::Pending => {}
+ }
+
+ if !made_progress_this_iter {
+ return Poll::Pending;
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/forward.rs b/vendor/futures-util/src/stream/stream/forward.rs
new file mode 100644
index 000000000..1fe24273a
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/forward.rs
@@ -0,0 +1,75 @@
+use crate::stream::Fuse;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`forward`](super::StreamExt::forward) method.
+ #[project = ForwardProj]
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Forward<St, Si, Item> {
+ #[pin]
+ sink: Option<Si>,
+ #[pin]
+ stream: Fuse<St>,
+ buffered_item: Option<Item>,
+ }
+}
+
+impl<St, Si, Item> Forward<St, Si, Item> {
+ pub(crate) fn new(stream: St, sink: Si) -> Self {
+ Self { sink: Some(sink), stream: Fuse::new(stream), buffered_item: None }
+ }
+}
+
+impl<St, Si, Item, E> FusedFuture for Forward<St, Si, Item>
+where
+ Si: Sink<Item, Error = E>,
+ St: Stream<Item = Result<Item, E>>,
+{
+ fn is_terminated(&self) -> bool {
+ self.sink.is_none()
+ }
+}
+
+impl<St, Si, Item, E> Future for Forward<St, Si, Item>
+where
+ Si: Sink<Item, Error = E>,
+ St: Stream<Item = Result<Item, E>>,
+{
+ type Output = Result<(), E>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let ForwardProj { mut sink, mut stream, buffered_item } = self.project();
+ let mut si = sink.as_mut().as_pin_mut().expect("polled `Forward` after completion");
+
+ loop {
+ // If we've got an item buffered already, we need to write it to the
+ // sink before we can do anything else
+ if buffered_item.is_some() {
+ ready!(si.as_mut().poll_ready(cx))?;
+ si.as_mut().start_send(buffered_item.take().unwrap())?;
+ }
+
+ match stream.as_mut().poll_next(cx)? {
+ Poll::Ready(Some(item)) => {
+ *buffered_item = Some(item);
+ }
+ Poll::Ready(None) => {
+ ready!(si.poll_close(cx))?;
+ sink.set(None);
+ return Poll::Ready(Ok(()));
+ }
+ Poll::Pending => {
+ ready!(si.poll_flush(cx))?;
+ return Poll::Pending;
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/fuse.rs b/vendor/futures-util/src/stream/stream/fuse.rs
new file mode 100644
index 000000000..fe67813e8
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/fuse.rs
@@ -0,0 +1,75 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`fuse`](super::StreamExt::fuse) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Fuse<St> {
+ #[pin]
+ stream: St,
+ done: bool,
+ }
+}
+
+impl<St> Fuse<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, done: false }
+ }
+
+ /// Returns whether the underlying stream has finished or not.
+ ///
+ /// If this method returns `true`, then all future calls to poll are
+ /// guaranteed to return `None`. If this returns `false`, then the
+ /// underlying stream is still in use.
+ pub fn is_done(&self) -> bool {
+ self.done
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<S: Stream> FusedStream for Fuse<S> {
+ fn is_terminated(&self) -> bool {
+ self.done
+ }
+}
+
+impl<S: Stream> Stream for Fuse<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
+ let this = self.project();
+
+ if *this.done {
+ return Poll::Ready(None);
+ }
+
+ let item = ready!(this.stream.poll_next(cx));
+ if item.is_none() {
+ *this.done = true;
+ }
+ Poll::Ready(item)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done {
+ (0, Some(0))
+ } else {
+ self.stream.size_hint()
+ }
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S: Stream + Sink<Item>, Item> Sink<Item> for Fuse<S> {
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/into_future.rs b/vendor/futures-util/src/stream/stream/into_future.rs
new file mode 100644
index 000000000..8abfddccc
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/into_future.rs
@@ -0,0 +1,90 @@
+use crate::stream::StreamExt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`into_future`](super::StreamExt::into_future) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct StreamFuture<St> {
+ stream: Option<St>,
+}
+
+impl<St: Stream + Unpin> StreamFuture<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream: Some(stream) }
+ }
+
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn get_ref(&self) -> Option<&St> {
+ self.stream.as_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn get_mut(&mut self) -> Option<&mut St> {
+ self.stream.as_mut()
+ }
+
+ /// Acquires a pinned mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> Option<Pin<&mut St>> {
+ self.get_mut().stream.as_mut().map(Pin::new)
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn into_inner(self) -> Option<St> {
+ self.stream
+ }
+}
+
+impl<St: Stream + Unpin> FusedFuture for StreamFuture<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_none()
+ }
+}
+
+impl<St: Stream + Unpin> Future for StreamFuture<St> {
+ type Output = (Option<St::Item>, St);
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let item = {
+ let s = self.stream.as_mut().expect("polling StreamFuture twice");
+ ready!(s.poll_next_unpin(cx))
+ };
+ let stream = self.stream.take().unwrap();
+ Poll::Ready((item, stream))
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/map.rs b/vendor/futures-util/src/stream/stream/map.rs
new file mode 100644
index 000000000..88bb6129d
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/map.rs
@@ -0,0 +1,77 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+use crate::fns::FnMut1;
+
+pin_project! {
+ /// Stream for the [`map`](super::StreamExt::map) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Map<St, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ }
+}
+
+impl<St, F> fmt::Debug for Map<St, F>
+where
+ St: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Map").field("stream", &self.stream).finish()
+ }
+}
+
+impl<St, F> Map<St, F> {
+ pub(crate) fn new(stream: St, f: F) -> Self {
+ Self { stream, f }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, F> FusedStream for Map<St, F>
+where
+ St: FusedStream,
+ F: FnMut1<St::Item>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St, F> Stream for Map<St, F>
+where
+ St: Stream,
+ F: FnMut1<St::Item>,
+{
+ type Item = F::Output;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+ let res = ready!(this.stream.as_mut().poll_next(cx));
+ Poll::Ready(res.map(|x| this.f.call_mut(x)))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<St, F, Item> Sink<Item> for Map<St, F>
+where
+ St: Stream + Sink<Item>,
+ F: FnMut1<St::Item>,
+{
+ type Error = St::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/mod.rs b/vendor/futures-util/src/stream/stream/mod.rs
new file mode 100644
index 000000000..9cfcc09ba
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/mod.rs
@@ -0,0 +1,1567 @@
+//! Streams
+//!
+//! This module contains a number of functions for working with `Stream`s,
+//! including the `StreamExt` trait which adds methods to `Stream` types.
+
+use crate::future::{assert_future, Either};
+use crate::stream::assert_stream;
+#[cfg(feature = "alloc")]
+use alloc::boxed::Box;
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+use core::pin::Pin;
+#[cfg(feature = "sink")]
+use futures_core::stream::TryStream;
+#[cfg(feature = "alloc")]
+use futures_core::stream::{BoxStream, LocalBoxStream};
+use futures_core::{
+ future::Future,
+ stream::{FusedStream, Stream},
+ task::{Context, Poll},
+};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+
+use crate::fns::{inspect_fn, InspectFn};
+
+mod chain;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::chain::Chain;
+
+mod collect;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::collect::Collect;
+
+mod unzip;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::unzip::Unzip;
+
+mod concat;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::concat::Concat;
+
+mod count;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::count::Count;
+
+mod cycle;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::cycle::Cycle;
+
+mod enumerate;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::enumerate::Enumerate;
+
+mod filter;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::filter::Filter;
+
+mod filter_map;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::filter_map::FilterMap;
+
+mod flatten;
+
+delegate_all!(
+ /// Stream for the [`flatten`](StreamExt::flatten) method.
+ Flatten<St>(
+ flatten::Flatten<St, St::Item>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St| flatten::Flatten::new(x)]
+ where St: Stream
+);
+
+mod fold;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::fold::Fold;
+
+mod any;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::any::Any;
+
+mod all;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::all::All;
+
+#[cfg(feature = "sink")]
+mod forward;
+
+#[cfg(feature = "sink")]
+delegate_all!(
+ /// Future for the [`forward`](super::StreamExt::forward) method.
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ Forward<St, Si>(
+ forward::Forward<St, Si, St::Ok>
+ ): Debug + Future + FusedFuture + New[|x: St, y: Si| forward::Forward::new(x, y)]
+ where St: TryStream
+);
+
+mod for_each;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::for_each::ForEach;
+
+mod fuse;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::fuse::Fuse;
+
+mod into_future;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::into_future::StreamFuture;
+
+delegate_all!(
+ /// Stream for the [`inspect`](StreamExt::inspect) method.
+ Inspect<St, F>(
+ map::Map<St, InspectFn<F>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St, f: F| map::Map::new(x, inspect_fn(f))]
+);
+
+mod map;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::map::Map;
+
+delegate_all!(
+ /// Stream for the [`flat_map`](StreamExt::flat_map) method.
+ FlatMap<St, U, F>(
+ flatten::Flatten<Map<St, F>, U>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| flatten::Flatten::new(Map::new(x, f))]
+);
+
+mod next;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::next::Next;
+
+mod select_next_some;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::select_next_some::SelectNextSome;
+
+mod peek;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::peek::{NextIf, NextIfEq, Peek, PeekMut, Peekable};
+
+mod skip;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::skip::Skip;
+
+mod skip_while;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::skip_while::SkipWhile;
+
+mod take;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::take::Take;
+
+mod take_while;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::take_while::TakeWhile;
+
+mod take_until;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::take_until::TakeUntil;
+
+mod then;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::then::Then;
+
+mod zip;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::zip::Zip;
+
+#[cfg(feature = "alloc")]
+mod chunks;
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::chunks::Chunks;
+
+#[cfg(feature = "alloc")]
+mod ready_chunks;
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::ready_chunks::ReadyChunks;
+
+mod scan;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::scan::Scan;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod buffer_unordered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::buffer_unordered::BufferUnordered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod buffered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::buffered::Buffered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod for_each_concurrent;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::for_each_concurrent::ForEachConcurrent;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+#[cfg(feature = "alloc")]
+mod split;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "sink")]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::split::{ReuniteError, SplitSink, SplitStream};
+
+#[cfg(feature = "std")]
+mod catch_unwind;
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::catch_unwind::CatchUnwind;
+
+impl<T: ?Sized> StreamExt for T where T: Stream {}
+
+/// An extension trait for `Stream`s that provides a variety of convenient
+/// combinator functions.
+pub trait StreamExt: Stream {
+ /// Creates a future that resolves to the next item in the stream.
+ ///
+ /// Note that because `next` doesn't take ownership over the stream,
+ /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a
+ /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can
+ /// be done by boxing the stream using [`Box::pin`] or
+ /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils`
+ /// crate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..=3);
+ ///
+ /// assert_eq!(stream.next().await, Some(1));
+ /// assert_eq!(stream.next().await, Some(2));
+ /// assert_eq!(stream.next().await, Some(3));
+ /// assert_eq!(stream.next().await, None);
+ /// # });
+ /// ```
+ fn next(&mut self) -> Next<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Option<Self::Item>, _>(Next::new(self))
+ }
+
+ /// Converts this stream into a future of `(next_item, tail_of_stream)`.
+ /// If the stream terminates, then the next item is [`None`].
+ ///
+ /// The returned future can be used to compose streams and futures together
+ /// by placing everything into the "world of futures".
+ ///
+ /// Note that because `into_future` moves the stream, the [`Stream`] type
+ /// must be [`Unpin`]. If you want to use `into_future` with a
+ /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can
+ /// be done by boxing the stream using [`Box::pin`] or
+ /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils`
+ /// crate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=3);
+ ///
+ /// let (item, stream) = stream.into_future().await;
+ /// assert_eq!(Some(1), item);
+ ///
+ /// let (item, stream) = stream.into_future().await;
+ /// assert_eq!(Some(2), item);
+ /// # });
+ /// ```
+ fn into_future(self) -> StreamFuture<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ assert_future::<(Option<Self::Item>, Self), _>(StreamFuture::new(self))
+ }
+
+ /// Maps this stream's items to a different type, returning a new stream of
+ /// the resulting type.
+ ///
+ /// The provided closure is executed over all elements of this stream as
+ /// they are made available. It is executed inline with calls to
+ /// [`poll_next`](Stream::poll_next).
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=3);
+ /// let stream = stream.map(|x| x + 3);
+ ///
+ /// assert_eq!(vec![4, 5, 6], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn map<T, F>(self, f: F) -> Map<Self, F>
+ where
+ F: FnMut(Self::Item) -> T,
+ Self: Sized,
+ {
+ assert_stream::<T, _>(Map::new(self, f))
+ }
+
+ /// Creates a stream which gives the current iteration count as well as
+ /// the next value.
+ ///
+ /// The stream returned yields pairs `(i, val)`, where `i` is the
+ /// current index of iteration and `val` is the value returned by the
+ /// stream.
+ ///
+ /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a
+ /// different sized integer, the [`zip`](StreamExt::zip) function provides similar
+ /// functionality.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so enumerating more than
+ /// [`prim@usize::max_value()`] elements either produces the wrong result or panics. If
+ /// debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// The returned stream might panic if the to-be-returned index would
+ /// overflow a [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(vec!['a', 'b', 'c']);
+ ///
+ /// let mut stream = stream.enumerate();
+ ///
+ /// assert_eq!(stream.next().await, Some((0, 'a')));
+ /// assert_eq!(stream.next().await, Some((1, 'b')));
+ /// assert_eq!(stream.next().await, Some((2, 'c')));
+ /// assert_eq!(stream.next().await, None);
+ /// # });
+ /// ```
+ fn enumerate(self) -> Enumerate<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<(usize, Self::Item), _>(Enumerate::new(self))
+ }
+
+ /// Filters the values produced by this stream according to the provided
+ /// asynchronous predicate.
+ ///
+ /// As values of this stream are made available, the provided predicate `f`
+ /// will be run against them. If the predicate returns a `Future` which
+ /// resolves to `true`, then the stream will yield the value, but if the
+ /// predicate returns a `Future` which resolves to `false`, then the value
+ /// will be discarded and the next value will be produced.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `filter` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ /// let evens = stream.filter(|x| future::ready(x % 2 == 0));
+ ///
+ /// assert_eq!(vec![2, 4, 6, 8, 10], evens.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn filter<Fut, F>(self, f: F) -> Filter<Self, Fut, F>
+ where
+ F: FnMut(&Self::Item) -> Fut,
+ Fut: Future<Output = bool>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Filter::new(self, f))
+ }
+
+ /// Filters the values produced by this stream while simultaneously mapping
+ /// them to a different type according to the provided asynchronous closure.
+ ///
+ /// As values of this stream are made available, the provided function will
+ /// be run on them. If the future returned by the predicate `f` resolves to
+ /// [`Some(item)`](Some) then the stream will yield the value `item`, but if
+ /// it resolves to [`None`] then the next value will be produced.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `filter_map` methods in
+ /// the standard library.
+ ///
+ /// # Examples
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ /// let evens = stream.filter_map(|x| async move {
+ /// if x % 2 == 0 { Some(x + 1) } else { None }
+ /// });
+ ///
+ /// assert_eq!(vec![3, 5, 7, 9, 11], evens.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn filter_map<Fut, T, F>(self, f: F) -> FilterMap<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future<Output = Option<T>>,
+ Self: Sized,
+ {
+ assert_stream::<T, _>(FilterMap::new(self, f))
+ }
+
+ /// Computes from this stream's items new items of a different type using
+ /// an asynchronous closure.
+ ///
+ /// The provided closure `f` will be called with an `Item` once a value is
+ /// ready, it returns a future which will then be run to completion
+ /// to produce the next value on this stream.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=3);
+ /// let stream = stream.then(|x| async move { x + 3 });
+ ///
+ /// assert_eq!(vec![4, 5, 6], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn then<Fut, F>(self, f: F) -> Then<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future,
+ Self: Sized,
+ {
+ assert_stream::<Fut::Output, _>(Then::new(self, f))
+ }
+
+ /// Transforms a stream into a collection, returning a
+ /// future representing the result of that computation.
+ ///
+ /// The returned future will be resolved when the stream terminates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::StreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx, rx) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// for i in 1..=5 {
+ /// tx.unbounded_send(i).unwrap();
+ /// }
+ /// });
+ ///
+ /// let output = rx.collect::<Vec<i32>>().await;
+ /// assert_eq!(output, vec![1, 2, 3, 4, 5]);
+ /// # });
+ /// ```
+ fn collect<C: Default + Extend<Self::Item>>(self) -> Collect<Self, C>
+ where
+ Self: Sized,
+ {
+ assert_future::<C, _>(Collect::new(self))
+ }
+
+ /// Converts a stream of pairs into a future, which
+ /// resolves to pair of containers.
+ ///
+ /// `unzip()` produces a future, which resolves to two
+ /// collections: one from the left elements of the pairs,
+ /// and one from the right elements.
+ ///
+ /// The returned future will be resolved when the stream terminates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::StreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx, rx) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// tx.unbounded_send((1, 2)).unwrap();
+ /// tx.unbounded_send((3, 4)).unwrap();
+ /// tx.unbounded_send((5, 6)).unwrap();
+ /// });
+ ///
+ /// let (o1, o2): (Vec<_>, Vec<_>) = rx.unzip().await;
+ /// assert_eq!(o1, vec![1, 3, 5]);
+ /// assert_eq!(o2, vec![2, 4, 6]);
+ /// # });
+ /// ```
+ fn unzip<A, B, FromA, FromB>(self) -> Unzip<Self, FromA, FromB>
+ where
+ FromA: Default + Extend<A>,
+ FromB: Default + Extend<B>,
+ Self: Sized + Stream<Item = (A, B)>,
+ {
+ assert_future::<(FromA, FromB), _>(Unzip::new(self))
+ }
+
+ /// Concatenate all items of a stream into a single extendable
+ /// destination, returning a future representing the end result.
+ ///
+ /// This combinator will extend the first item with the contents
+ /// of all the subsequent results of the stream. If the stream is
+ /// empty, the default value will be returned.
+ ///
+ /// Works with all collections that implement the
+ /// [`Extend`](std::iter::Extend) trait.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::StreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx, rx) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// for i in (0..3).rev() {
+ /// let n = i * 3;
+ /// tx.unbounded_send(vec![n + 1, n + 2, n + 3]).unwrap();
+ /// }
+ /// });
+ ///
+ /// let result = rx.concat().await;
+ ///
+ /// assert_eq!(result, vec![7, 8, 9, 4, 5, 6, 1, 2, 3]);
+ /// # });
+ /// ```
+ fn concat(self) -> Concat<Self>
+ where
+ Self: Sized,
+ Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+ {
+ assert_future::<Self::Item, _>(Concat::new(self))
+ }
+
+ /// Drives the stream to completion, counting the number of items.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so counting elements of a
+ /// stream with more than [`usize::MAX`] elements either produces the wrong
+ /// result or panics. If debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic if the iterator has more than [`usize::MAX`]
+ /// elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ /// let count = stream.count().await;
+ ///
+ /// assert_eq!(count, 10);
+ /// # });
+ /// ```
+ fn count(self) -> Count<Self>
+ where
+ Self: Sized,
+ {
+ assert_future::<usize, _>(Count::new(self))
+ }
+
+ /// Repeats a stream endlessly.
+ ///
+ /// The stream never terminates. Note that you likely want to avoid
+ /// usage of `collect` or such on the returned stream as it will exhaust
+ /// available memory as it tries to just fill up all RAM.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ /// let a = [1, 2, 3];
+ /// let mut s = stream::iter(a.iter()).cycle();
+ ///
+ /// assert_eq!(s.next().await, Some(&1));
+ /// assert_eq!(s.next().await, Some(&2));
+ /// assert_eq!(s.next().await, Some(&3));
+ /// assert_eq!(s.next().await, Some(&1));
+ /// assert_eq!(s.next().await, Some(&2));
+ /// assert_eq!(s.next().await, Some(&3));
+ /// assert_eq!(s.next().await, Some(&1));
+ /// # });
+ /// ```
+ fn cycle(self) -> Cycle<Self>
+ where
+ Self: Sized + Clone,
+ {
+ assert_stream::<Self::Item, _>(Cycle::new(self))
+ }
+
+ /// Execute an accumulating asynchronous computation over a stream,
+ /// collecting all the values into one final result.
+ ///
+ /// This combinator will accumulate all values returned by this stream
+ /// according to the closure provided. The initial state is also provided to
+ /// this method and then is returned again by each execution of the closure.
+ /// Once the entire stream has been exhausted the returned future will
+ /// resolve to this value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let number_stream = stream::iter(0..6);
+ /// let sum = number_stream.fold(0, |acc, x| async move { acc + x });
+ /// assert_eq!(sum.await, 15);
+ /// # });
+ /// ```
+ fn fold<T, Fut, F>(self, init: T, f: F) -> Fold<Self, Fut, T, F>
+ where
+ F: FnMut(T, Self::Item) -> Fut,
+ Fut: Future<Output = T>,
+ Self: Sized,
+ {
+ assert_future::<T, _>(Fold::new(self, f, init))
+ }
+
+ /// Execute predicate over asynchronous stream, and return `true` if any element in stream satisfied a predicate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let number_stream = stream::iter(0..10);
+ /// let contain_three = number_stream.any(|i| async move { i == 3 });
+ /// assert_eq!(contain_three.await, true);
+ /// # });
+ /// ```
+ fn any<Fut, F>(self, f: F) -> Any<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future<Output = bool>,
+ Self: Sized,
+ {
+ assert_future::<bool, _>(Any::new(self, f))
+ }
+
+ /// Execute predicate over asynchronous stream, and return `true` if all element in stream satisfied a predicate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let number_stream = stream::iter(0..10);
+ /// let less_then_twenty = number_stream.all(|i| async move { i < 20 });
+ /// assert_eq!(less_then_twenty.await, true);
+ /// # });
+ /// ```
+ fn all<Fut, F>(self, f: F) -> All<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future<Output = bool>,
+ Self: Sized,
+ {
+ assert_future::<bool, _>(All::new(self, f))
+ }
+
+ /// Flattens a stream of streams into just one continuous stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::StreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx1, rx1) = mpsc::unbounded();
+ /// let (tx2, rx2) = mpsc::unbounded();
+ /// let (tx3, rx3) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// tx1.unbounded_send(1).unwrap();
+ /// tx1.unbounded_send(2).unwrap();
+ /// });
+ /// thread::spawn(move || {
+ /// tx2.unbounded_send(3).unwrap();
+ /// tx2.unbounded_send(4).unwrap();
+ /// });
+ /// thread::spawn(move || {
+ /// tx3.unbounded_send(rx1).unwrap();
+ /// tx3.unbounded_send(rx2).unwrap();
+ /// });
+ ///
+ /// let output = rx3.flatten().collect::<Vec<i32>>().await;
+ /// assert_eq!(output, vec![1, 2, 3, 4]);
+ /// # });
+ /// ```
+ fn flatten(self) -> Flatten<Self>
+ where
+ Self::Item: Stream,
+ Self: Sized,
+ {
+ assert_stream::<<Self::Item as Stream>::Item, _>(Flatten::new(self))
+ }
+
+ /// Maps a stream like [`StreamExt::map`] but flattens nested `Stream`s.
+ ///
+ /// [`StreamExt::map`] is very useful, but if it produces a `Stream` instead,
+ /// you would have to chain combinators like `.map(f).flatten()` while this
+ /// combinator provides ability to write `.flat_map(f)` instead of chaining.
+ ///
+ /// The provided closure which produce inner streams is executed over all elements
+ /// of stream as last inner stream is terminated and next stream item is available.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `flat_map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=3);
+ /// let stream = stream.flat_map(|x| stream::iter(vec![x + 3; x]));
+ ///
+ /// assert_eq!(vec![4, 5, 5, 6, 6, 6], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
+ where
+ F: FnMut(Self::Item) -> U,
+ U: Stream,
+ Self: Sized,
+ {
+ assert_stream::<U::Item, _>(FlatMap::new(self, f))
+ }
+
+ /// Combinator similar to [`StreamExt::fold`] that holds internal state
+ /// and produces a new stream.
+ ///
+ /// Accepts initial state and closure which will be applied to each element
+ /// of the stream until provided closure returns `None`. Once `None` is
+ /// returned, stream will be terminated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ ///
+ /// let stream = stream.scan(0, |state, x| {
+ /// *state += x;
+ /// future::ready(if *state < 10 { Some(x) } else { None })
+ /// });
+ ///
+ /// assert_eq!(vec![1, 2, 3], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn scan<S, B, Fut, F>(self, initial_state: S, f: F) -> Scan<Self, S, Fut, F>
+ where
+ F: FnMut(&mut S, Self::Item) -> Fut,
+ Fut: Future<Output = Option<B>>,
+ Self: Sized,
+ {
+ assert_stream::<B, _>(Scan::new(self, initial_state, f))
+ }
+
+ /// Skip elements on this stream while the provided asynchronous predicate
+ /// resolves to `true`.
+ ///
+ /// This function, like `Iterator::skip_while`, will skip elements on the
+ /// stream until the predicate `f` resolves to `false`. Once one element
+ /// returns `false`, all future elements will be returned from the underlying
+ /// stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ ///
+ /// let stream = stream.skip_while(|x| future::ready(*x <= 5));
+ ///
+ /// assert_eq!(vec![6, 7, 8, 9, 10], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn skip_while<Fut, F>(self, f: F) -> SkipWhile<Self, Fut, F>
+ where
+ F: FnMut(&Self::Item) -> Fut,
+ Fut: Future<Output = bool>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(SkipWhile::new(self, f))
+ }
+
+ /// Take elements from this stream while the provided asynchronous predicate
+ /// resolves to `true`.
+ ///
+ /// This function, like `Iterator::take_while`, will take elements from the
+ /// stream until the predicate `f` resolves to `false`. Once one element
+ /// returns `false`, it will always return that the stream is done.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10);
+ ///
+ /// let stream = stream.take_while(|x| future::ready(*x <= 5));
+ ///
+ /// assert_eq!(vec![1, 2, 3, 4, 5], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn take_while<Fut, F>(self, f: F) -> TakeWhile<Self, Fut, F>
+ where
+ F: FnMut(&Self::Item) -> Fut,
+ Fut: Future<Output = bool>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(TakeWhile::new(self, f))
+ }
+
+ /// Take elements from this stream until the provided future resolves.
+ ///
+ /// This function will take elements from the stream until the provided
+ /// stopping future `fut` resolves. Once the `fut` future becomes ready,
+ /// this stream combinator will always return that the stream is done.
+ ///
+ /// The stopping future may return any type. Once the stream is stopped
+ /// the result of the stopping future may be accessed with `TakeUntil::take_result()`.
+ /// The stream may also be resumed with `TakeUntil::take_future()`.
+ /// See the documentation of [`TakeUntil`] for more information.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::task::Poll;
+ ///
+ /// let stream = stream::iter(1..=10);
+ ///
+ /// let mut i = 0;
+ /// let stop_fut = future::poll_fn(|_cx| {
+ /// i += 1;
+ /// if i <= 5 {
+ /// Poll::Pending
+ /// } else {
+ /// Poll::Ready(())
+ /// }
+ /// });
+ ///
+ /// let stream = stream.take_until(stop_fut);
+ ///
+ /// assert_eq!(vec![1, 2, 3, 4, 5], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn take_until<Fut>(self, fut: Fut) -> TakeUntil<Self, Fut>
+ where
+ Fut: Future,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(TakeUntil::new(self, fut))
+ }
+
+ /// Runs this stream to completion, executing the provided asynchronous
+ /// closure for each element on the stream.
+ ///
+ /// The closure provided will be called for each item this stream produces,
+ /// yielding a future. That future will then be executed to completion
+ /// before moving on to the next item.
+ ///
+ /// The returned value is a `Future` where the `Output` type is `()`; it is
+ /// executed entirely for its side effects.
+ ///
+ /// To process each item in the stream and produce another stream instead
+ /// of a single future, use `then` instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let mut x = 0;
+ ///
+ /// {
+ /// let fut = stream::repeat(1).take(3).for_each(|item| {
+ /// x += item;
+ /// future::ready(())
+ /// });
+ /// fut.await;
+ /// }
+ ///
+ /// assert_eq!(x, 3);
+ /// # });
+ /// ```
+ fn for_each<Fut, F>(self, f: F) -> ForEach<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future<Output = ()>,
+ Self: Sized,
+ {
+ assert_future::<(), _>(ForEach::new(self, f))
+ }
+
+ /// Runs this stream to completion, executing the provided asynchronous
+ /// closure for each element on the stream concurrently as elements become
+ /// available.
+ ///
+ /// This is similar to [`StreamExt::for_each`], but the futures
+ /// produced by the closure are run concurrently (but not in parallel--
+ /// this combinator does not introduce any threads).
+ ///
+ /// The closure provided will be called for each item this stream produces,
+ /// yielding a future. That future will then be executed to completion
+ /// concurrently with the other futures produced by the closure.
+ ///
+ /// The first argument is an optional limit on the number of concurrent
+ /// futures. If this limit is not `None`, no more than `limit` futures
+ /// will be run concurrently. The `limit` argument is of type
+ /// `Into<Option<usize>>`, and so can be provided as either `None`,
+ /// `Some(10)`, or just `10`. Note: a limit of zero is interpreted as
+ /// no limit at all, and will have the same result as passing in `None`.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::oneshot;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let (tx1, rx1) = oneshot::channel();
+ /// let (tx2, rx2) = oneshot::channel();
+ /// let (tx3, rx3) = oneshot::channel();
+ ///
+ /// let fut = stream::iter(vec![rx1, rx2, rx3]).for_each_concurrent(
+ /// /* limit */ 2,
+ /// |rx| async move {
+ /// rx.await.unwrap();
+ /// }
+ /// );
+ /// tx1.send(()).unwrap();
+ /// tx2.send(()).unwrap();
+ /// tx3.send(()).unwrap();
+ /// fut.await;
+ /// # })
+ /// ```
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn for_each_concurrent<Fut, F>(
+ self,
+ limit: impl Into<Option<usize>>,
+ f: F,
+ ) -> ForEachConcurrent<Self, Fut, F>
+ where
+ F: FnMut(Self::Item) -> Fut,
+ Fut: Future<Output = ()>,
+ Self: Sized,
+ {
+ assert_future::<(), _>(ForEachConcurrent::new(self, limit.into(), f))
+ }
+
+ /// Creates a new stream of at most `n` items of the underlying stream.
+ ///
+ /// Once `n` items have been yielded from this stream then it will always
+ /// return that the stream is done.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10).take(3);
+ ///
+ /// assert_eq!(vec![1, 2, 3], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn take(self, n: usize) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Take::new(self, n))
+ }
+
+ /// Creates a new stream which skips `n` items of the underlying stream.
+ ///
+ /// Once `n` items have been skipped from this stream then it will always
+ /// return the remaining items on this stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(1..=10).skip(5);
+ ///
+ /// assert_eq!(vec![6, 7, 8, 9, 10], stream.collect::<Vec<_>>().await);
+ /// # });
+ /// ```
+ fn skip(self, n: usize) -> Skip<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Skip::new(self, n))
+ }
+
+ /// Fuse a stream such that [`poll_next`](Stream::poll_next) will never
+ /// again be called once it has finished. This method can be used to turn
+ /// any `Stream` into a `FusedStream`.
+ ///
+ /// Normally, once a stream has returned [`None`] from
+ /// [`poll_next`](Stream::poll_next) any further calls could exhibit bad
+ /// behavior such as block forever, panic, never return, etc. If it is known
+ /// that [`poll_next`](Stream::poll_next) may be called after stream
+ /// has already finished, then this method can be used to ensure that it has
+ /// defined semantics.
+ ///
+ /// The [`poll_next`](Stream::poll_next) method of a `fuse`d stream
+ /// is guaranteed to return [`None`] after the underlying stream has
+ /// finished.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::executor::block_on_stream;
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::task::Poll;
+ ///
+ /// let mut x = 0;
+ /// let stream = stream::poll_fn(|_| {
+ /// x += 1;
+ /// match x {
+ /// 0..=2 => Poll::Ready(Some(x)),
+ /// 3 => Poll::Ready(None),
+ /// _ => panic!("should not happen")
+ /// }
+ /// }).fuse();
+ ///
+ /// let mut iter = block_on_stream(stream);
+ /// assert_eq!(Some(1), iter.next());
+ /// assert_eq!(Some(2), iter.next());
+ /// assert_eq!(None, iter.next());
+ /// assert_eq!(None, iter.next());
+ /// // ...
+ /// ```
+ fn fuse(self) -> Fuse<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Fuse::new(self))
+ }
+
+ /// Borrows a stream, rather than consuming it.
+ ///
+ /// This is useful to allow applying stream adaptors while still retaining
+ /// ownership of the original stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let mut stream = stream::iter(1..5);
+ ///
+ /// let sum = stream.by_ref()
+ /// .take(2)
+ /// .fold(0, |a, b| async move { a + b })
+ /// .await;
+ /// assert_eq!(sum, 3);
+ ///
+ /// // You can use the stream again
+ /// let sum = stream.take(2)
+ /// .fold(0, |a, b| async move { a + b })
+ /// .await;
+ /// assert_eq!(sum, 7);
+ /// # });
+ /// ```
+ fn by_ref(&mut self) -> &mut Self {
+ self
+ }
+
+ /// Catches unwinding panics while polling the stream.
+ ///
+ /// Caught panic (if any) will be the last element of the resulting stream.
+ ///
+ /// In general, panics within a stream can propagate all the way out to the
+ /// task level. This combinator makes it possible to halt unwinding within
+ /// the stream itself. It's most commonly used within task executors. This
+ /// method should not be used for error handling.
+ ///
+ /// Note that this method requires the `UnwindSafe` bound from the standard
+ /// library. This isn't always applied automatically, and the standard
+ /// library provides an `AssertUnwindSafe` wrapper type to apply it
+ /// after-the fact. To assist using this method, the [`Stream`] trait is
+ /// also implemented for `AssertUnwindSafe<St>` where `St` implements
+ /// [`Stream`].
+ ///
+ /// This method is only available when the `std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream = stream::iter(vec![Some(10), None, Some(11)]);
+ /// // Panic on second element
+ /// let stream_panicking = stream.map(|o| o.unwrap());
+ /// // Collect all the results
+ /// let stream = stream_panicking.catch_unwind();
+ ///
+ /// let results: Vec<Result<i32, _>> = stream.collect().await;
+ /// match results[0] {
+ /// Ok(10) => {}
+ /// _ => panic!("unexpected result!"),
+ /// }
+ /// assert!(results[1].is_err());
+ /// assert_eq!(results.len(), 2);
+ /// # });
+ /// ```
+ #[cfg(feature = "std")]
+ fn catch_unwind(self) -> CatchUnwind<Self>
+ where
+ Self: Sized + std::panic::UnwindSafe,
+ {
+ assert_stream(CatchUnwind::new(self))
+ }
+
+ /// Wrap the stream in a Box, pinning it.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "alloc")]
+ fn boxed<'a>(self) -> BoxStream<'a, Self::Item>
+ where
+ Self: Sized + Send + 'a,
+ {
+ assert_stream::<Self::Item, _>(Box::pin(self))
+ }
+
+ /// Wrap the stream in a Box, pinning it.
+ ///
+ /// Similar to `boxed`, but without the `Send` requirement.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "alloc")]
+ fn boxed_local<'a>(self) -> LocalBoxStream<'a, Self::Item>
+ where
+ Self: Sized + 'a,
+ {
+ assert_stream::<Self::Item, _>(Box::pin(self))
+ }
+
+ /// An adaptor for creating a buffered list of pending futures.
+ ///
+ /// If this stream's item can be converted into a future, then this adaptor
+ /// will buffer up to at most `n` futures and then return the outputs in the
+ /// same order as the underlying stream. No more than `n` futures will be
+ /// buffered at any point in time, and less than `n` may also be buffered
+ /// depending on the state of each future.
+ ///
+ /// The returned stream will be a stream of each future's output.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn buffered(self, n: usize) -> Buffered<Self>
+ where
+ Self::Item: Future,
+ Self: Sized,
+ {
+ assert_stream::<<Self::Item as Future>::Output, _>(Buffered::new(self, n))
+ }
+
+ /// An adaptor for creating a buffered list of pending futures (unordered).
+ ///
+ /// If this stream's item can be converted into a future, then this adaptor
+ /// will buffer up to `n` futures and then return the outputs in the order
+ /// in which they complete. No more than `n` futures will be buffered at
+ /// any point in time, and less than `n` may also be buffered depending on
+ /// the state of each future.
+ ///
+ /// The returned stream will be a stream of each future's output.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::oneshot;
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let (send_one, recv_one) = oneshot::channel();
+ /// let (send_two, recv_two) = oneshot::channel();
+ ///
+ /// let stream_of_futures = stream::iter(vec![recv_one, recv_two]);
+ /// let mut buffered = stream_of_futures.buffer_unordered(10);
+ ///
+ /// send_two.send(2i32)?;
+ /// assert_eq!(buffered.next().await, Some(Ok(2i32)));
+ ///
+ /// send_one.send(1i32)?;
+ /// assert_eq!(buffered.next().await, Some(Ok(1i32)));
+ ///
+ /// assert_eq!(buffered.next().await, None);
+ /// # Ok::<(), i32>(()) }).unwrap();
+ /// ```
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn buffer_unordered(self, n: usize) -> BufferUnordered<Self>
+ where
+ Self::Item: Future,
+ Self: Sized,
+ {
+ assert_stream::<<Self::Item as Future>::Output, _>(BufferUnordered::new(self, n))
+ }
+
+ /// An adapter for zipping two streams together.
+ ///
+ /// The zipped stream waits for both streams to produce an item, and then
+ /// returns that pair. If either stream ends then the zipped stream will
+ /// also end.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream1 = stream::iter(1..=3);
+ /// let stream2 = stream::iter(5..=10);
+ ///
+ /// let vec = stream1.zip(stream2)
+ /// .collect::<Vec<_>>()
+ /// .await;
+ /// assert_eq!(vec![(1, 5), (2, 6), (3, 7)], vec);
+ /// # });
+ /// ```
+ ///
+ fn zip<St>(self, other: St) -> Zip<Self, St>
+ where
+ St: Stream,
+ Self: Sized,
+ {
+ assert_stream::<(Self::Item, St::Item), _>(Zip::new(self, other))
+ }
+
+ /// Adapter for chaining two streams.
+ ///
+ /// The resulting stream emits elements from the first stream, and when
+ /// first stream reaches the end, emits the elements from the second stream.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ ///
+ /// let stream1 = stream::iter(vec![Ok(10), Err(false)]);
+ /// let stream2 = stream::iter(vec![Err(true), Ok(20)]);
+ ///
+ /// let stream = stream1.chain(stream2);
+ ///
+ /// let result: Vec<_> = stream.collect().await;
+ /// assert_eq!(result, vec![
+ /// Ok(10),
+ /// Err(false),
+ /// Err(true),
+ /// Ok(20),
+ /// ]);
+ /// # });
+ /// ```
+ fn chain<St>(self, other: St) -> Chain<Self, St>
+ where
+ St: Stream<Item = Self::Item>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Chain::new(self, other))
+ }
+
+ /// Creates a new stream which exposes a `peek` method.
+ ///
+ /// Calling `peek` returns a reference to the next item in the stream.
+ fn peekable(self) -> Peekable<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Peekable::new(self))
+ }
+
+ /// An adaptor for chunking up items of the stream inside a vector.
+ ///
+ /// This combinator will attempt to pull items from this stream and buffer
+ /// them into a local vector. At most `capacity` items will get buffered
+ /// before they're yielded from the returned stream.
+ ///
+ /// Note that the vectors returned from this iterator may not always have
+ /// `capacity` elements. If the underlying stream ended and only a partial
+ /// vector was created, it'll be returned. Additionally if an error happens
+ /// from the underlying stream then the currently buffered items will be
+ /// yielded.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if `capacity` is zero.
+ #[cfg(feature = "alloc")]
+ fn chunks(self, capacity: usize) -> Chunks<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Vec<Self::Item>, _>(Chunks::new(self, capacity))
+ }
+
+ /// An adaptor for chunking up ready items of the stream inside a vector.
+ ///
+ /// This combinator will attempt to pull ready items from this stream and
+ /// buffer them into a local vector. At most `capacity` items will get
+ /// buffered before they're yielded from the returned stream. If underlying
+ /// stream returns `Poll::Pending`, and collected chunk is not empty, it will
+ /// be immediately returned.
+ ///
+ /// If the underlying stream ended and only a partial vector was created,
+ /// it'll be returned. Additionally if an error happens from the underlying
+ /// stream then the currently buffered items will be yielded.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if `capacity` is zero.
+ #[cfg(feature = "alloc")]
+ fn ready_chunks(self, capacity: usize) -> ReadyChunks<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Vec<Self::Item>, _>(ReadyChunks::new(self, capacity))
+ }
+
+ /// A future that completes after the given stream has been fully processed
+ /// into the sink and the sink has been flushed and closed.
+ ///
+ /// This future will drive the stream to keep producing items until it is
+ /// exhausted, sending each item to the sink. It will complete once the
+ /// stream is exhausted, the sink has received and flushed all items, and
+ /// the sink is closed. Note that neither the original stream nor provided
+ /// sink will be output by this future. Pass the sink by `Pin<&mut S>`
+ /// (for example, via `forward(&mut sink)` inside an `async` fn/block) in
+ /// order to preserve access to the `Sink`. If the stream produces an error,
+ /// that error will be returned by this future without flushing/closing the sink.
+ #[cfg(feature = "sink")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ fn forward<S>(self, sink: S) -> Forward<Self, S>
+ where
+ S: Sink<Self::Ok, Error = Self::Error>,
+ Self: TryStream + Sized,
+ // Self: TryStream + Sized + Stream<Item = Result<<Self as TryStream>::Ok, <Self as TryStream>::Error>>,
+ {
+ // TODO: type mismatch resolving `<Self as futures_core::Stream>::Item == std::result::Result<<Self as futures_core::TryStream>::Ok, <Self as futures_core::TryStream>::Error>`
+ // assert_future::<Result<(), Self::Error>, _>(Forward::new(self, sink))
+ Forward::new(self, sink)
+ }
+
+ /// Splits this `Stream + Sink` object into separate `Sink` and `Stream`
+ /// objects.
+ ///
+ /// This can be useful when you want to split ownership between tasks, or
+ /// allow direct interaction between the two objects (e.g. via
+ /// `Sink::send_all`).
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "sink")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn split<Item>(self) -> (SplitSink<Self, Item>, SplitStream<Self>)
+ where
+ Self: Sink<Item> + Sized,
+ {
+ let (sink, stream) = split::split(self);
+ (
+ crate::sink::assert_sink::<Item, Self::Error, _>(sink),
+ assert_stream::<Self::Item, _>(stream),
+ )
+ }
+
+ /// Do something with each item of this stream, afterwards passing it on.
+ ///
+ /// This is similar to the `Iterator::inspect` method in the standard
+ /// library where it allows easily inspecting each value as it passes
+ /// through the stream, for example to debug what's going on.
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where
+ F: FnMut(&Self::Item),
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Inspect::new(self, f))
+ }
+
+ /// Wrap this stream in an `Either` stream, making it the left-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `right_stream` method to write `if`
+ /// statements that evaluate to different streams in different branches.
+ fn left_stream<B>(self) -> Either<Self, B>
+ where
+ B: Stream<Item = Self::Item>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Either::Left(self))
+ }
+
+ /// Wrap this stream in an `Either` stream, making it the right-hand variant
+ /// of that `Either`.
+ ///
+ /// This can be used in combination with the `left_stream` method to write `if`
+ /// statements that evaluate to different streams in different branches.
+ fn right_stream<B>(self) -> Either<B, Self>
+ where
+ B: Stream<Item = Self::Item>,
+ Self: Sized,
+ {
+ assert_stream::<Self::Item, _>(Either::Right(self))
+ }
+
+ /// A convenience method for calling [`Stream::poll_next`] on [`Unpin`]
+ /// stream types.
+ fn poll_next_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).poll_next(cx)
+ }
+
+ /// Returns a [`Future`] that resolves when the next item in this stream is
+ /// ready.
+ ///
+ /// This is similar to the [`next`][StreamExt::next] method, but it won't
+ /// resolve to [`None`] if used on an empty [`Stream`]. Instead, the
+ /// returned future type will return `true` from
+ /// [`FusedFuture::is_terminated`][] when the [`Stream`] is empty, allowing
+ /// [`select_next_some`][StreamExt::select_next_some] to be easily used with
+ /// the [`select!`] macro.
+ ///
+ /// If the future is polled after this [`Stream`] is empty it will panic.
+ /// Using the future with a [`FusedFuture`][]-aware primitive like the
+ /// [`select!`] macro will prevent this.
+ ///
+ /// [`FusedFuture`]: futures_core::future::FusedFuture
+ /// [`FusedFuture::is_terminated`]: futures_core::future::FusedFuture::is_terminated
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::{future, select};
+ /// use futures::stream::{StreamExt, FuturesUnordered};
+ ///
+ /// let mut fut = future::ready(1);
+ /// let mut async_tasks = FuturesUnordered::new();
+ /// let mut total = 0;
+ /// loop {
+ /// select! {
+ /// num = fut => {
+ /// // First, the `ready` future completes.
+ /// total += num;
+ /// // Then we spawn a new task onto `async_tasks`,
+ /// async_tasks.push(async { 5 });
+ /// },
+ /// // On the next iteration of the loop, the task we spawned
+ /// // completes.
+ /// num = async_tasks.select_next_some() => {
+ /// total += num;
+ /// }
+ /// // Finally, both the `ready` future and `async_tasks` have
+ /// // finished, so we enter the `complete` branch.
+ /// complete => break,
+ /// }
+ /// }
+ /// assert_eq!(total, 6);
+ /// # });
+ /// ```
+ fn select_next_some(&mut self) -> SelectNextSome<'_, Self>
+ where
+ Self: Unpin + FusedStream,
+ {
+ assert_future::<Self::Item, _>(SelectNextSome::new(self))
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/next.rs b/vendor/futures-util/src/stream/stream/next.rs
new file mode 100644
index 000000000..8d8347aa0
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/next.rs
@@ -0,0 +1,34 @@
+use crate::stream::StreamExt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`next`](super::StreamExt::next) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Next<'a, St: ?Sized> {
+ stream: &'a mut St,
+}
+
+impl<St: ?Sized + Unpin> Unpin for Next<'_, St> {}
+
+impl<'a, St: ?Sized + Stream + Unpin> Next<'a, St> {
+ pub(super) fn new(stream: &'a mut St) -> Self {
+ Self { stream }
+ }
+}
+
+impl<St: ?Sized + FusedStream + Unpin> FusedFuture for Next<'_, St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: ?Sized + Stream + Unpin> Future for Next<'_, St> {
+ type Output = Option<St::Item>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.stream.poll_next_unpin(cx)
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/peek.rs b/vendor/futures-util/src/stream/stream/peek.rs
new file mode 100644
index 000000000..c72dfc366
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/peek.rs
@@ -0,0 +1,433 @@
+use crate::fns::FnOnce1;
+use crate::stream::{Fuse, StreamExt};
+use core::fmt;
+use core::marker::PhantomData;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// A `Stream` that implements a `peek` method.
+ ///
+ /// The `peek` method can be used to retrieve a reference
+ /// to the next `Stream::Item` if available. A subsequent
+ /// call to `poll` will return the owned item.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Peekable<St: Stream> {
+ #[pin]
+ stream: Fuse<St>,
+ peeked: Option<St::Item>,
+ }
+}
+
+impl<St: Stream> Peekable<St> {
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream: stream.fuse(), peeked: None }
+ }
+
+ delegate_access_inner!(stream, St, (.));
+
+ /// Produces a future which retrieves a reference to the next item
+ /// in the stream, or `None` if the underlying stream terminates.
+ pub fn peek(self: Pin<&mut Self>) -> Peek<'_, St> {
+ Peek { inner: Some(self) }
+ }
+
+ /// Peek retrieves a reference to the next item in the stream.
+ ///
+ /// This method polls the underlying stream and return either a reference
+ /// to the next item if the stream is ready or passes through any errors.
+ pub fn poll_peek(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<&St::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if this.peeked.is_some() {
+ break this.peeked.as_ref();
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ *this.peeked = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ /// Produces a future which retrieves a mutable reference to the next item
+ /// in the stream, or `None` if the underlying stream terminates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::pin_mut;
+ ///
+ /// let stream = stream::iter(vec![1, 2, 3]).peekable();
+ /// pin_mut!(stream);
+ ///
+ /// assert_eq!(stream.as_mut().peek_mut().await, Some(&mut 1));
+ /// assert_eq!(stream.as_mut().next().await, Some(1));
+ ///
+ /// // Peek into the stream and modify the value which will be returned next
+ /// if let Some(p) = stream.as_mut().peek_mut().await {
+ /// if *p == 2 {
+ /// *p = 5;
+ /// }
+ /// }
+ ///
+ /// assert_eq!(stream.collect::<Vec<_>>().await, vec![5, 3]);
+ /// # });
+ /// ```
+ pub fn peek_mut(self: Pin<&mut Self>) -> PeekMut<'_, St> {
+ PeekMut { inner: Some(self) }
+ }
+
+ /// Peek retrieves a mutable reference to the next item in the stream.
+ pub fn poll_peek_mut(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<&mut St::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if this.peeked.is_some() {
+ break this.peeked.as_mut();
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ *this.peeked = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ /// Creates a future which will consume and return the next value of this
+ /// stream if a condition is true.
+ ///
+ /// If `func` returns `true` for the next value of this stream, consume and
+ /// return it. Otherwise, return `None`.
+ ///
+ /// # Examples
+ ///
+ /// Consume a number if it's equal to 0.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::pin_mut;
+ ///
+ /// let stream = stream::iter(0..5).peekable();
+ /// pin_mut!(stream);
+ /// // The first item of the stream is 0; consume it.
+ /// assert_eq!(stream.as_mut().next_if(|&x| x == 0).await, Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(stream.as_mut().next_if(|&x| x == 0).await, None);
+ /// // `next_if` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(stream.next().await, Some(1));
+ /// # });
+ /// ```
+ ///
+ /// Consume any number less than 10.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::pin_mut;
+ ///
+ /// let stream = stream::iter(1..20).peekable();
+ /// pin_mut!(stream);
+ /// // Consume all numbers less than 10
+ /// while stream.as_mut().next_if(|&x| x < 10).await.is_some() {}
+ /// // The next value returned will be 10
+ /// assert_eq!(stream.next().await, Some(10));
+ /// # });
+ /// ```
+ pub fn next_if<F>(self: Pin<&mut Self>, func: F) -> NextIf<'_, St, F>
+ where
+ F: FnOnce(&St::Item) -> bool,
+ {
+ NextIf { inner: Some((self, func)) }
+ }
+
+ /// Creates a future which will consume and return the next item if it is
+ /// equal to `expected`.
+ ///
+ /// # Example
+ ///
+ /// Consume a number if it's equal to 0.
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::pin_mut;
+ ///
+ /// let stream = stream::iter(0..5).peekable();
+ /// pin_mut!(stream);
+ /// // The first item of the stream is 0; consume it.
+ /// assert_eq!(stream.as_mut().next_if_eq(&0).await, Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(stream.as_mut().next_if_eq(&0).await, None);
+ /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(stream.next().await, Some(1));
+ /// # });
+ /// ```
+ pub fn next_if_eq<'a, T>(self: Pin<&'a mut Self>, expected: &'a T) -> NextIfEq<'a, St, T>
+ where
+ T: ?Sized,
+ St::Item: PartialEq<T>,
+ {
+ NextIfEq {
+ inner: NextIf { inner: Some((self, NextIfEqFn { expected, _next: PhantomData })) },
+ }
+ }
+}
+
+impl<St: Stream> FusedStream for Peekable<St> {
+ fn is_terminated(&self) -> bool {
+ self.peeked.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<S: Stream> Stream for Peekable<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+ if let Some(item) = this.peeked.take() {
+ return Poll::Ready(Some(item));
+ }
+ this.stream.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let peek_len = if self.peeked.is_some() { 1 } else { 0 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(peek_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(peek_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Peekable<S>
+where
+ S: Sink<Item> + Stream,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
+
+pin_project! {
+ /// Future for the [`Peekable::peek`](self::Peekable::peek) method.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Peek<'a, St: Stream> {
+ inner: Option<Pin<&'a mut Peekable<St>>>,
+ }
+}
+
+impl<St> fmt::Debug for Peek<'_, St>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Peek").field("inner", &self.inner).finish()
+ }
+}
+
+impl<St: Stream> FusedFuture for Peek<'_, St> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<'a, St> Future for Peek<'a, St>
+where
+ St: Stream,
+{
+ type Output = Option<&'a St::Item>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = self.project().inner;
+ if let Some(peekable) = inner {
+ ready!(peekable.as_mut().poll_peek(cx));
+
+ inner.take().unwrap().poll_peek(cx)
+ } else {
+ panic!("Peek polled after completion")
+ }
+ }
+}
+
+pin_project! {
+ /// Future for the [`Peekable::peek_mut`](self::Peekable::peek_mut) method.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct PeekMut<'a, St: Stream> {
+ inner: Option<Pin<&'a mut Peekable<St>>>,
+ }
+}
+
+impl<St> fmt::Debug for PeekMut<'_, St>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PeekMut").field("inner", &self.inner).finish()
+ }
+}
+
+impl<St: Stream> FusedFuture for PeekMut<'_, St> {
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+impl<'a, St> Future for PeekMut<'a, St>
+where
+ St: Stream,
+{
+ type Output = Option<&'a mut St::Item>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = self.project().inner;
+ if let Some(peekable) = inner {
+ ready!(peekable.as_mut().poll_peek_mut(cx));
+
+ inner.take().unwrap().poll_peek_mut(cx)
+ } else {
+ panic!("PeekMut polled after completion")
+ }
+ }
+}
+
+pin_project! {
+ /// Future for the [`Peekable::next_if`](self::Peekable::next_if) method.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct NextIf<'a, St: Stream, F> {
+ inner: Option<(Pin<&'a mut Peekable<St>>, F)>,
+ }
+}
+
+impl<St, F> fmt::Debug for NextIf<'_, St, F>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("NextIf").field("inner", &self.inner.as_ref().map(|(s, _f)| s)).finish()
+ }
+}
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<St, F> FusedFuture for NextIf<'_, St, F>
+where
+ St: Stream,
+ F: for<'a> FnOnce1<&'a St::Item, Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.inner.is_none()
+ }
+}
+
+#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
+impl<St, F> Future for NextIf<'_, St, F>
+where
+ St: Stream,
+ F: for<'a> FnOnce1<&'a St::Item, Output = bool>,
+{
+ type Output = Option<St::Item>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = self.project().inner;
+ if let Some((peekable, _)) = inner {
+ let res = ready!(peekable.as_mut().poll_next(cx));
+
+ let (peekable, func) = inner.take().unwrap();
+ match res {
+ Some(ref matched) if func.call_once(matched) => Poll::Ready(res),
+ other => {
+ let peekable = peekable.project();
+ // Since we called `self.next()`, we consumed `self.peeked`.
+ assert!(peekable.peeked.is_none());
+ *peekable.peeked = other;
+ Poll::Ready(None)
+ }
+ }
+ } else {
+ panic!("NextIf polled after completion")
+ }
+ }
+}
+
+pin_project! {
+ /// Future for the [`Peekable::next_if_eq`](self::Peekable::next_if_eq) method.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct NextIfEq<'a, St: Stream, T: ?Sized> {
+ #[pin]
+ inner: NextIf<'a, St, NextIfEqFn<'a, T, St::Item>>,
+ }
+}
+
+impl<St, T> fmt::Debug for NextIfEq<'_, St, T>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ T: ?Sized,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("NextIfEq")
+ .field("inner", &self.inner.inner.as_ref().map(|(s, _f)| s))
+ .finish()
+ }
+}
+
+impl<St, T> FusedFuture for NextIfEq<'_, St, T>
+where
+ St: Stream,
+ T: ?Sized,
+ St::Item: PartialEq<T>,
+{
+ fn is_terminated(&self) -> bool {
+ self.inner.is_terminated()
+ }
+}
+
+impl<St, T> Future for NextIfEq<'_, St, T>
+where
+ St: Stream,
+ T: ?Sized,
+ St::Item: PartialEq<T>,
+{
+ type Output = Option<St::Item>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.project().inner.poll(cx)
+ }
+}
+
+struct NextIfEqFn<'a, T: ?Sized, Item> {
+ expected: &'a T,
+ _next: PhantomData<Item>,
+}
+
+impl<T, Item> FnOnce1<&Item> for NextIfEqFn<'_, T, Item>
+where
+ T: ?Sized,
+ Item: PartialEq<T>,
+{
+ type Output = bool;
+
+ fn call_once(self, next: &Item) -> Self::Output {
+ next == self.expected
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/ready_chunks.rs b/vendor/futures-util/src/stream/stream/ready_chunks.rs
new file mode 100644
index 000000000..5ebc9582d
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/ready_chunks.rs
@@ -0,0 +1,114 @@
+use crate::stream::Fuse;
+use alloc::vec::Vec;
+use core::mem;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`ready_chunks`](super::StreamExt::ready_chunks) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct ReadyChunks<St: Stream> {
+ #[pin]
+ stream: Fuse<St>,
+ items: Vec<St::Item>,
+ cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475
+ }
+}
+
+impl<St: Stream> ReadyChunks<St>
+where
+ St: Stream,
+{
+ pub(super) fn new(stream: St, capacity: usize) -> Self {
+ assert!(capacity > 0);
+
+ Self {
+ stream: super::Fuse::new(stream),
+ items: Vec::with_capacity(capacity),
+ cap: capacity,
+ }
+ }
+
+ delegate_access_inner!(stream, St, (.));
+}
+
+impl<St: Stream> Stream for ReadyChunks<St> {
+ type Item = Vec<St::Item>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ loop {
+ match this.stream.as_mut().poll_next(cx) {
+ // Flush all collected data if underlying stream doesn't contain
+ // more ready values
+ Poll::Pending => {
+ return if this.items.is_empty() {
+ Poll::Pending
+ } else {
+ Poll::Ready(Some(mem::replace(this.items, Vec::with_capacity(*this.cap))))
+ }
+ }
+
+ // Push the ready item into the buffer and check whether it is full.
+ // If so, replace our buffer with a new and empty one and return
+ // the full one.
+ Poll::Ready(Some(item)) => {
+ this.items.push(item);
+ if this.items.len() >= *this.cap {
+ return Poll::Ready(Some(mem::replace(
+ this.items,
+ Vec::with_capacity(*this.cap),
+ )));
+ }
+ }
+
+ // Since the underlying stream ran out of values, return what we
+ // have buffered, if we have anything.
+ Poll::Ready(None) => {
+ let last = if this.items.is_empty() {
+ None
+ } else {
+ let full_buf = mem::replace(this.items, Vec::new());
+ Some(full_buf)
+ };
+
+ return Poll::Ready(last);
+ }
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let chunk_len = if self.items.is_empty() { 0 } else { 1 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(chunk_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(chunk_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St: FusedStream> FusedStream for ReadyChunks<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated() && self.items.is_empty()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for ReadyChunks<S>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/scan.rs b/vendor/futures-util/src/stream/stream/scan.rs
new file mode 100644
index 000000000..f5cfde9c3
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/scan.rs
@@ -0,0 +1,128 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+struct StateFn<S, F> {
+ state: S,
+ f: F,
+}
+
+pin_project! {
+ /// Stream for the [`scan`](super::StreamExt::scan) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Scan<St: Stream, S, Fut, F> {
+ #[pin]
+ stream: St,
+ state_f: Option<StateFn<S, F>>,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, S, Fut, F> fmt::Debug for Scan<St, S, Fut, F>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ S: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Scan")
+ .field("stream", &self.stream)
+ .field("state", &self.state_f.as_ref().map(|s| &s.state))
+ .field("future", &self.future)
+ .field("done_taking", &self.is_done_taking())
+ .finish()
+ }
+}
+
+impl<St: Stream, S, Fut, F> Scan<St, S, Fut, F> {
+ /// Checks if internal state is `None`.
+ fn is_done_taking(&self) -> bool {
+ self.state_f.is_none()
+ }
+}
+
+impl<B, St, S, Fut, F> Scan<St, S, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&mut S, St::Item) -> Fut,
+ Fut: Future<Output = Option<B>>,
+{
+ pub(super) fn new(stream: St, initial_state: S, f: F) -> Self {
+ Self { stream, state_f: Some(StateFn { state: initial_state, f }), future: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<B, St, S, Fut, F> Stream for Scan<St, S, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&mut S, St::Item) -> Fut,
+ Fut: Future<Output = Option<B>>,
+{
+ type Item = B;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<B>> {
+ if self.is_done_taking() {
+ return Poll::Ready(None);
+ }
+
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ let item = ready!(fut.poll(cx));
+ this.future.set(None);
+
+ if item.is_none() {
+ *this.state_f = None;
+ }
+
+ break item;
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ let state_f = this.state_f.as_mut().unwrap();
+ this.future.set(Some((state_f.f)(&mut state_f.state, item)))
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_done_taking() {
+ (0, Some(0))
+ } else {
+ self.stream.size_hint() // can't know a lower bound, due to the predicate
+ }
+ }
+}
+
+impl<B, St, S, Fut, F> FusedStream for Scan<St, S, Fut, F>
+where
+ St: FusedStream,
+ F: FnMut(&mut S, St::Item) -> Fut,
+ Fut: Future<Output = Option<B>>,
+{
+ fn is_terminated(&self) -> bool {
+ self.is_done_taking() || self.future.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<St, S, Fut, F, Item> Sink<Item> for Scan<St, S, Fut, F>
+where
+ St: Stream + Sink<Item>,
+{
+ type Error = St::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/select_next_some.rs b/vendor/futures-util/src/stream/stream/select_next_some.rs
new file mode 100644
index 000000000..3115e14d9
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/select_next_some.rs
@@ -0,0 +1,42 @@
+use crate::stream::StreamExt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::FusedStream;
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`select_next_some`](super::StreamExt::select_next_some)
+/// method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct SelectNextSome<'a, St: ?Sized> {
+ stream: &'a mut St,
+}
+
+impl<'a, St: ?Sized> SelectNextSome<'a, St> {
+ pub(super) fn new(stream: &'a mut St) -> Self {
+ Self { stream }
+ }
+}
+
+impl<St: ?Sized + FusedStream + Unpin> FusedFuture for SelectNextSome<'_, St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: ?Sized + FusedStream + Unpin> Future for SelectNextSome<'_, St> {
+ type Output = St::Item;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ assert!(!self.stream.is_terminated(), "SelectNextSome polled after terminated");
+
+ if let Some(item) = ready!(self.stream.poll_next_unpin(cx)) {
+ Poll::Ready(item)
+ } else {
+ debug_assert!(self.stream.is_terminated());
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/skip.rs b/vendor/futures-util/src/stream/stream/skip.rs
new file mode 100644
index 000000000..f49577952
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/skip.rs
@@ -0,0 +1,70 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`skip`](super::StreamExt::skip) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Skip<St> {
+ #[pin]
+ stream: St,
+ remaining: usize,
+ }
+}
+
+impl<St: Stream> Skip<St> {
+ pub(super) fn new(stream: St, n: usize) -> Self {
+ Self { stream, remaining: n }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St: FusedStream> FusedStream for Skip<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: Stream> Stream for Skip<St> {
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ let mut this = self.project();
+
+ while *this.remaining > 0 {
+ if ready!(this.stream.as_mut().poll_next(cx)).is_some() {
+ *this.remaining -= 1;
+ } else {
+ return Poll::Ready(None);
+ }
+ }
+
+ this.stream.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.stream.size_hint();
+
+ let lower = lower.saturating_sub(self.remaining);
+ let upper = upper.map(|x| x.saturating_sub(self.remaining));
+
+ (lower, upper)
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Skip<S>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/skip_while.rs b/vendor/futures-util/src/stream/stream/skip_while.rs
new file mode 100644
index 000000000..50a21a21a
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/skip_while.rs
@@ -0,0 +1,124 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`skip_while`](super::StreamExt::skip_while) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct SkipWhile<St, Fut, F> where St: Stream {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Item>,
+ done_skipping: bool,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for SkipWhile<St, Fut, F>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipWhile")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .field("done_skipping", &self.done_skipping)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> SkipWhile<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None, done_skipping: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> FusedStream for SkipWhile<St, Fut, F>
+where
+ St: FusedStream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending_item.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F> Stream for SkipWhile<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ let mut this = self.project();
+
+ if *this.done_skipping {
+ return this.stream.poll_next(cx);
+ }
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let skipped = ready!(fut.poll(cx));
+ let item = this.pending_item.take();
+ this.pending_fut.set(None);
+ if !skipped {
+ *this.done_skipping = true;
+ break item;
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.pending_fut.set(Some((this.f)(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done_skipping {
+ self.stream.size_hint()
+ } else {
+ let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for SkipWhile<S, Fut, F>
+where
+ S: Stream + Sink<Item>,
+ F: FnMut(&S::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/split.rs b/vendor/futures-util/src/stream/stream/split.rs
new file mode 100644
index 000000000..3a72fee30
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/split.rs
@@ -0,0 +1,144 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use futures_sink::Sink;
+
+use crate::lock::BiLock;
+
+/// A `Stream` part of the split pair
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub struct SplitStream<S>(BiLock<S>);
+
+impl<S> Unpin for SplitStream<S> {}
+
+impl<S: Unpin> SplitStream<S> {
+ /// Attempts to put the two "halves" of a split `Stream + Sink` back
+ /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+ /// a matching pair originating from the same call to `StreamExt::split`.
+ pub fn reunite<Item>(self, other: SplitSink<S, Item>) -> Result<S, ReuniteError<S, Item>>
+ where
+ S: Sink<Item>,
+ {
+ other.reunite(self)
+ }
+}
+
+impl<S: Stream> Stream for SplitStream<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
+ ready!(self.0.poll_lock(cx)).as_pin_mut().poll_next(cx)
+ }
+}
+
+#[allow(bad_style)]
+fn SplitSink<S: Sink<Item>, Item>(lock: BiLock<S>) -> SplitSink<S, Item> {
+ SplitSink { lock, slot: None }
+}
+
+/// A `Sink` part of the split pair
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub struct SplitSink<S, Item> {
+ lock: BiLock<S>,
+ slot: Option<Item>,
+}
+
+impl<S, Item> Unpin for SplitSink<S, Item> {}
+
+impl<S: Sink<Item> + Unpin, Item> SplitSink<S, Item> {
+ /// Attempts to put the two "halves" of a split `Stream + Sink` back
+ /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+ /// a matching pair originating from the same call to `StreamExt::split`.
+ pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S, Item>> {
+ self.lock.reunite(other.0).map_err(|err| ReuniteError(SplitSink(err.0), SplitStream(err.1)))
+ }
+}
+
+impl<S: Sink<Item>, Item> SplitSink<S, Item> {
+ fn poll_flush_slot(
+ mut inner: Pin<&mut S>,
+ slot: &mut Option<Item>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), S::Error>> {
+ if slot.is_some() {
+ ready!(inner.as_mut().poll_ready(cx))?;
+ Poll::Ready(inner.start_send(slot.take().unwrap()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
+ }
+
+ fn poll_lock_and_flush_slot(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), S::Error>> {
+ let this = &mut *self;
+ let mut inner = ready!(this.lock.poll_lock(cx));
+ Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx)
+ }
+}
+
+impl<S: Sink<Item>, Item> Sink<Item> for SplitSink<S, Item> {
+ type Error = S::Error;
+
+ fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
+ loop {
+ if self.slot.is_none() {
+ return Poll::Ready(Ok(()));
+ }
+ ready!(self.as_mut().poll_lock_and_flush_slot(cx))?;
+ }
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), S::Error> {
+ self.slot = Some(item);
+ Ok(())
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
+ let this = &mut *self;
+ let mut inner = ready!(this.lock.poll_lock(cx));
+ ready!(Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx))?;
+ inner.as_pin_mut().poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
+ let this = &mut *self;
+ let mut inner = ready!(this.lock.poll_lock(cx));
+ ready!(Self::poll_flush_slot(inner.as_pin_mut(), &mut this.slot, cx))?;
+ inner.as_pin_mut().poll_close(cx)
+ }
+}
+
+pub(super) fn split<S: Stream + Sink<Item>, Item>(s: S) -> (SplitSink<S, Item>, SplitStream<S>) {
+ let (a, b) = BiLock::new(s);
+ let read = SplitStream(a);
+ let write = SplitSink(b);
+ (write, read)
+}
+
+/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves
+/// of a `Stream + Split`, and thus could not be `reunite`d.
+#[cfg_attr(docsrs, doc(cfg(feature = "sink")))]
+pub struct ReuniteError<T, Item>(pub SplitSink<T, Item>, pub SplitStream<T>);
+
+impl<T, Item> fmt::Debug for ReuniteError<T, Item> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("ReuniteError").field(&"...").finish()
+ }
+}
+
+impl<T, Item> fmt::Display for ReuniteError<T, Item> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "tried to reunite a SplitStream and SplitSink that don't form a pair")
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: core::any::Any, Item> std::error::Error for ReuniteError<T, Item> {}
diff --git a/vendor/futures-util/src/stream/stream/take.rs b/vendor/futures-util/src/stream/stream/take.rs
new file mode 100644
index 000000000..b1c728e33
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/take.rs
@@ -0,0 +1,86 @@
+use core::cmp;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`take`](super::StreamExt::take) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Take<St> {
+ #[pin]
+ stream: St,
+ remaining: usize,
+ }
+}
+
+impl<St: Stream> Take<St> {
+ pub(super) fn new(stream: St, n: usize) -> Self {
+ Self { stream, remaining: n }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St> Stream for Take<St>
+where
+ St: Stream,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ if self.remaining == 0 {
+ Poll::Ready(None)
+ } else {
+ let this = self.project();
+ let next = ready!(this.stream.poll_next(cx));
+ if next.is_some() {
+ *this.remaining -= 1;
+ } else {
+ *this.remaining = 0;
+ }
+ Poll::Ready(next)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.remaining == 0 {
+ return (0, Some(0));
+ }
+
+ let (lower, upper) = self.stream.size_hint();
+
+ let lower = cmp::min(lower, self.remaining as usize);
+
+ let upper = match upper {
+ Some(x) if x < self.remaining as usize => Some(x),
+ _ => Some(self.remaining as usize),
+ };
+
+ (lower, upper)
+ }
+}
+
+impl<St> FusedStream for Take<St>
+where
+ St: FusedStream,
+{
+ fn is_terminated(&self) -> bool {
+ self.remaining == 0 || self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for Take<S>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/take_until.rs b/vendor/futures-util/src/stream/stream/take_until.rs
new file mode 100644
index 000000000..d14f9ce10
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/take_until.rs
@@ -0,0 +1,170 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+// FIXME: docs, tests
+
+pin_project! {
+ /// Stream for the [`take_until`](super::StreamExt::take_until) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TakeUntil<St: Stream, Fut: Future> {
+ #[pin]
+ stream: St,
+ // Contains the inner Future on start and None once the inner Future is resolved
+ // or taken out by the user.
+ #[pin]
+ fut: Option<Fut>,
+ // Contains fut's return value once fut is resolved
+ fut_result: Option<Fut::Output>,
+ // Whether the future was taken out by the user.
+ free: bool,
+ }
+}
+
+impl<St, Fut> fmt::Debug for TakeUntil<St, Fut>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ Fut: Future + fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeUntil").field("stream", &self.stream).field("fut", &self.fut).finish()
+ }
+}
+
+impl<St, Fut> TakeUntil<St, Fut>
+where
+ St: Stream,
+ Fut: Future,
+{
+ pub(super) fn new(stream: St, fut: Fut) -> Self {
+ Self { stream, fut: Some(fut), fut_result: None, free: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+
+ /// Extract the stopping future out of the combinator.
+ /// The future is returned only if it isn't resolved yet, ie. if the stream isn't stopped yet.
+ /// Taking out the future means the combinator will be yielding
+ /// elements from the wrapped stream without ever stopping it.
+ pub fn take_future(&mut self) -> Option<Fut> {
+ if self.fut.is_some() {
+ self.free = true;
+ }
+
+ self.fut.take()
+ }
+
+ /// Once the stopping future is resolved, this method can be used
+ /// to extract the value returned by the stopping future.
+ ///
+ /// This may be used to retrieve arbitrary data from the stopping
+ /// future, for example a reason why the stream was stopped.
+ ///
+ /// This method will return `None` if the future isn't resolved yet,
+ /// or if the result was already taken out.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt};
+ /// use futures::task::Poll;
+ ///
+ /// let stream = stream::iter(1..=10);
+ ///
+ /// let mut i = 0;
+ /// let stop_fut = future::poll_fn(|_cx| {
+ /// i += 1;
+ /// if i <= 5 {
+ /// Poll::Pending
+ /// } else {
+ /// Poll::Ready("reason")
+ /// }
+ /// });
+ ///
+ /// let mut stream = stream.take_until(stop_fut);
+ /// let _ = stream.by_ref().collect::<Vec<_>>().await;
+ ///
+ /// let result = stream.take_result().unwrap();
+ /// assert_eq!(result, "reason");
+ /// # });
+ /// ```
+ pub fn take_result(&mut self) -> Option<Fut::Output> {
+ self.fut_result.take()
+ }
+
+ /// Whether the stream was stopped yet by the stopping future
+ /// being resolved.
+ pub fn is_stopped(&self) -> bool {
+ !self.free && self.fut.is_none()
+ }
+}
+
+impl<St, Fut> Stream for TakeUntil<St, Fut>
+where
+ St: Stream,
+ Fut: Future,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ let mut this = self.project();
+
+ if let Some(f) = this.fut.as_mut().as_pin_mut() {
+ if let Poll::Ready(result) = f.poll(cx) {
+ this.fut.set(None);
+ *this.fut_result = Some(result);
+ }
+ }
+
+ if !*this.free && this.fut.is_none() {
+ // Future resolved, inner stream stopped
+ Poll::Ready(None)
+ } else {
+ // Future either not resolved yet or taken out by the user
+ let item = ready!(this.stream.poll_next(cx));
+ if item.is_none() {
+ this.fut.set(None);
+ }
+ Poll::Ready(item)
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_stopped() {
+ return (0, Some(0));
+ }
+
+ self.stream.size_hint()
+ }
+}
+
+impl<St, Fut> FusedStream for TakeUntil<St, Fut>
+where
+ St: Stream,
+ Fut: Future,
+{
+ fn is_terminated(&self) -> bool {
+ self.is_stopped()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, Item> Sink<Item> for TakeUntil<S, Fut>
+where
+ S: Stream + Sink<Item>,
+ Fut: Future,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/take_while.rs b/vendor/futures-util/src/stream/stream/take_while.rs
new file mode 100644
index 000000000..01b27654b
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/take_while.rs
@@ -0,0 +1,124 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`take_while`](super::StreamExt::take_while) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TakeWhile<St: Stream, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Item>,
+ done_taking: bool,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TakeWhile<St, Fut, F>
+where
+ St: Stream + fmt::Debug,
+ St::Item: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeWhile")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .field("done_taking", &self.done_taking)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TakeWhile<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None, done_taking: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> Stream for TakeWhile<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ type Item = St::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
+ if self.done_taking {
+ return Poll::Ready(None);
+ }
+
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let take = ready!(fut.poll(cx));
+ let item = this.pending_item.take();
+ this.pending_fut.set(None);
+ if take {
+ break item;
+ } else {
+ *this.done_taking = true;
+ break None;
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.pending_fut.set(Some((this.f)(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done_taking {
+ return (0, Some(0));
+ }
+
+ let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+impl<St, Fut, F> FusedStream for TakeWhile<St, Fut, F>
+where
+ St: FusedStream,
+ F: FnMut(&St::Item) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.done_taking || self.pending_item.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for TakeWhile<S, Fut, F>
+where
+ S: Stream + Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/then.rs b/vendor/futures-util/src/stream/stream/then.rs
new file mode 100644
index 000000000..d4531d4b9
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/then.rs
@@ -0,0 +1,101 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`then`](super::StreamExt::then) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Then<St, Fut, F> {
+ #[pin]
+ stream: St,
+ #[pin]
+ future: Option<Fut>,
+ f: F,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for Then<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Then").field("stream", &self.stream).field("future", &self.future).finish()
+ }
+}
+
+impl<St, Fut, F> Then<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, future: None, f }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> FusedStream for Then<St, Fut, F>
+where
+ St: FusedStream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future,
+{
+ fn is_terminated(&self) -> bool {
+ self.future.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F> Stream for Then<St, Fut, F>
+where
+ St: Stream,
+ F: FnMut(St::Item) -> Fut,
+ Fut: Future,
+{
+ type Item = Fut::Output;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ let item = ready!(fut.poll(cx));
+ this.future.set(None);
+ break Some(item);
+ } else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
+ this.future.set(Some((this.f)(item)));
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let future_len = if self.future.is_some() { 1 } else { 0 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(future_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(future_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for Then<S, Fut, F>
+where
+ S: Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/stream/unzip.rs b/vendor/futures-util/src/stream/stream/unzip.rs
new file mode 100644
index 000000000..15f22e80b
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/unzip.rs
@@ -0,0 +1,63 @@
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`unzip`](super::StreamExt::unzip) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct Unzip<St, FromA, FromB> {
+ #[pin]
+ stream: St,
+ left: FromA,
+ right: FromB,
+ }
+}
+
+impl<St: Stream, FromA: Default, FromB: Default> Unzip<St, FromA, FromB> {
+ fn finish(self: Pin<&mut Self>) -> (FromA, FromB) {
+ let this = self.project();
+ (mem::replace(this.left, Default::default()), mem::replace(this.right, Default::default()))
+ }
+
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, left: Default::default(), right: Default::default() }
+ }
+}
+
+impl<St, A, B, FromA, FromB> FusedFuture for Unzip<St, FromA, FromB>
+where
+ St: FusedStream<Item = (A, B)>,
+ FromA: Default + Extend<A>,
+ FromB: Default + Extend<B>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St, A, B, FromA, FromB> Future for Unzip<St, FromA, FromB>
+where
+ St: Stream<Item = (A, B)>,
+ FromA: Default + Extend<A>,
+ FromB: Default + Extend<B>,
+{
+ type Output = (FromA, FromB);
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(FromA, FromB)> {
+ let mut this = self.as_mut().project();
+ loop {
+ match ready!(this.stream.as_mut().poll_next(cx)) {
+ Some(e) => {
+ this.left.extend(Some(e.0));
+ this.right.extend(Some(e.1));
+ }
+ None => return Poll::Ready(self.finish()),
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/stream/zip.rs b/vendor/futures-util/src/stream/stream/zip.rs
new file mode 100644
index 000000000..360a8b63b
--- /dev/null
+++ b/vendor/futures-util/src/stream/stream/zip.rs
@@ -0,0 +1,128 @@
+use crate::stream::{Fuse, StreamExt};
+use core::cmp;
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`zip`](super::StreamExt::zip) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Zip<St1: Stream, St2: Stream> {
+ #[pin]
+ stream1: Fuse<St1>,
+ #[pin]
+ stream2: Fuse<St2>,
+ queued1: Option<St1::Item>,
+ queued2: Option<St2::Item>,
+ }
+}
+
+impl<St1: Stream, St2: Stream> Zip<St1, St2> {
+ pub(super) fn new(stream1: St1, stream2: St2) -> Self {
+ Self { stream1: stream1.fuse(), stream2: stream2.fuse(), queued1: None, queued2: None }
+ }
+
+ /// Acquires a reference to the underlying streams that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> (&St1, &St2) {
+ (self.stream1.get_ref(), self.stream2.get_ref())
+ }
+
+ /// Acquires a mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> (&mut St1, &mut St2) {
+ (self.stream1.get_mut(), self.stream2.get_mut())
+ }
+
+ /// Acquires a pinned mutable reference to the underlying streams that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut St1>, Pin<&mut St2>) {
+ let this = self.project();
+ (this.stream1.get_pin_mut(), this.stream2.get_pin_mut())
+ }
+
+ /// Consumes this combinator, returning the underlying streams.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> (St1, St2) {
+ (self.stream1.into_inner(), self.stream2.into_inner())
+ }
+}
+
+impl<St1, St2> FusedStream for Zip<St1, St2>
+where
+ St1: Stream,
+ St2: Stream,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream1.is_terminated() && self.stream2.is_terminated()
+ }
+}
+
+impl<St1, St2> Stream for Zip<St1, St2>
+where
+ St1: Stream,
+ St2: Stream,
+{
+ type Item = (St1::Item, St2::Item);
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if this.queued1.is_none() {
+ match this.stream1.as_mut().poll_next(cx) {
+ Poll::Ready(Some(item1)) => *this.queued1 = Some(item1),
+ Poll::Ready(None) | Poll::Pending => {}
+ }
+ }
+ if this.queued2.is_none() {
+ match this.stream2.as_mut().poll_next(cx) {
+ Poll::Ready(Some(item2)) => *this.queued2 = Some(item2),
+ Poll::Ready(None) | Poll::Pending => {}
+ }
+ }
+
+ if this.queued1.is_some() && this.queued2.is_some() {
+ let pair = (this.queued1.take().unwrap(), this.queued2.take().unwrap());
+ Poll::Ready(Some(pair))
+ } else if this.stream1.is_done() || this.stream2.is_done() {
+ Poll::Ready(None)
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let queued1_len = if self.queued1.is_some() { 1 } else { 0 };
+ let queued2_len = if self.queued2.is_some() { 1 } else { 0 };
+ let (stream1_lower, stream1_upper) = self.stream1.size_hint();
+ let (stream2_lower, stream2_upper) = self.stream2.size_hint();
+
+ let stream1_lower = stream1_lower.saturating_add(queued1_len);
+ let stream2_lower = stream2_lower.saturating_add(queued2_len);
+
+ let lower = cmp::min(stream1_lower, stream2_lower);
+
+ let upper = match (stream1_upper, stream2_upper) {
+ (Some(x), Some(y)) => {
+ let x = x.saturating_add(queued1_len);
+ let y = y.saturating_add(queued2_len);
+ Some(cmp::min(x, y))
+ }
+ (Some(x), None) => x.checked_add(queued1_len),
+ (None, Some(y)) => y.checked_add(queued2_len),
+ (None, None) => None,
+ };
+
+ (lower, upper)
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/and_then.rs b/vendor/futures-util/src/stream/try_stream/and_then.rs
new file mode 100644
index 000000000..a7b50db0b
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/and_then.rs
@@ -0,0 +1,105 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`and_then`](super::TryStreamExt::and_then) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct AndThen<St, Fut, F> {
+ #[pin]
+ stream: St,
+ #[pin]
+ future: Option<Fut>,
+ f: F,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for AndThen<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AndThen")
+ .field("stream", &self.stream)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> AndThen<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: TryFuture<Error = St::Error>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, future: None, f }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> Stream for AndThen<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: TryFuture<Error = St::Error>,
+{
+ type Item = Result<Fut::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ let item = ready!(fut.try_poll(cx));
+ this.future.set(None);
+ break Some(item);
+ } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ this.future.set(Some((this.f)(item)));
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let future_len = if self.future.is_some() { 1 } else { 0 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(future_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(future_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St, Fut, F> FusedStream for AndThen<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: TryFuture<Error = St::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ self.future.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for AndThen<S, Fut, F>
+where
+ S: Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/into_async_read.rs b/vendor/futures-util/src/stream/try_stream/into_async_read.rs
new file mode 100644
index 000000000..914b277a0
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/into_async_read.rs
@@ -0,0 +1,165 @@
+use crate::stream::TryStreamExt;
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::TryStream;
+use futures_core::task::{Context, Poll};
+use futures_io::{AsyncBufRead, AsyncRead, AsyncWrite};
+use std::cmp;
+use std::io::{Error, Result};
+
+/// Reader for the [`into_async_read`](super::TryStreamExt::into_async_read) method.
+#[derive(Debug)]
+#[must_use = "readers do nothing unless polled"]
+#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
+pub struct IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+ stream: St,
+ state: ReadState<St::Ok>,
+}
+
+impl<St> Unpin for IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+}
+
+#[derive(Debug)]
+enum ReadState<T: AsRef<[u8]>> {
+ Ready { chunk: T, chunk_start: usize },
+ PendingChunk,
+ Eof,
+}
+
+impl<St> IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, state: ReadState::PendingChunk }
+ }
+}
+
+impl<St> AsyncRead for IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<Result<usize>> {
+ loop {
+ match &mut self.state {
+ ReadState::Ready { chunk, chunk_start } => {
+ let chunk = chunk.as_ref();
+ let len = cmp::min(buf.len(), chunk.len() - *chunk_start);
+
+ buf[..len].copy_from_slice(&chunk[*chunk_start..*chunk_start + len]);
+ *chunk_start += len;
+
+ if chunk.len() == *chunk_start {
+ self.state = ReadState::PendingChunk;
+ }
+
+ return Poll::Ready(Ok(len));
+ }
+ ReadState::PendingChunk => match ready!(self.stream.try_poll_next_unpin(cx)) {
+ Some(Ok(chunk)) => {
+ if !chunk.as_ref().is_empty() {
+ self.state = ReadState::Ready { chunk, chunk_start: 0 };
+ }
+ }
+ Some(Err(err)) => {
+ self.state = ReadState::Eof;
+ return Poll::Ready(Err(err));
+ }
+ None => {
+ self.state = ReadState::Eof;
+ return Poll::Ready(Ok(0));
+ }
+ },
+ ReadState::Eof => {
+ return Poll::Ready(Ok(0));
+ }
+ }
+ }
+ }
+}
+
+impl<St> AsyncWrite for IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + AsyncWrite + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize>> {
+ Pin::new(&mut self.stream).poll_write(cx, buf)
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ Pin::new(&mut self.stream).poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
+ Pin::new(&mut self.stream).poll_close(cx)
+ }
+}
+
+impl<St> AsyncBufRead for IntoAsyncRead<St>
+where
+ St: TryStream<Error = Error> + Unpin,
+ St::Ok: AsRef<[u8]>,
+{
+ fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
+ while let ReadState::PendingChunk = self.state {
+ match ready!(self.stream.try_poll_next_unpin(cx)) {
+ Some(Ok(chunk)) => {
+ if !chunk.as_ref().is_empty() {
+ self.state = ReadState::Ready { chunk, chunk_start: 0 };
+ }
+ }
+ Some(Err(err)) => {
+ self.state = ReadState::Eof;
+ return Poll::Ready(Err(err));
+ }
+ None => {
+ self.state = ReadState::Eof;
+ return Poll::Ready(Ok(&[]));
+ }
+ }
+ }
+
+ if let ReadState::Ready { ref chunk, chunk_start } = self.into_ref().get_ref().state {
+ let chunk = chunk.as_ref();
+ return Poll::Ready(Ok(&chunk[chunk_start..]));
+ }
+
+ // To get to this point we must be in ReadState::Eof
+ Poll::Ready(Ok(&[]))
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amount: usize) {
+ // https://github.com/rust-lang/futures-rs/pull/1556#discussion_r281644295
+ if amount == 0 {
+ return;
+ }
+ if let ReadState::Ready { chunk, chunk_start } = &mut self.state {
+ *chunk_start += amount;
+ debug_assert!(*chunk_start <= chunk.as_ref().len());
+ if *chunk_start >= chunk.as_ref().len() {
+ self.state = ReadState::PendingChunk;
+ }
+ } else {
+ debug_assert!(false, "Attempted to consume from IntoAsyncRead without chunk");
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/into_stream.rs b/vendor/futures-util/src/stream/try_stream/into_stream.rs
new file mode 100644
index 000000000..2126258af
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/into_stream.rs
@@ -0,0 +1,52 @@
+use core::pin::Pin;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`into_stream`](super::TryStreamExt::into_stream) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct IntoStream<St> {
+ #[pin]
+ stream: St,
+ }
+}
+
+impl<St> IntoStream<St> {
+ #[inline]
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St: TryStream + FusedStream> FusedStream for IntoStream<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: TryStream> Stream for IntoStream<St> {
+ type Item = Result<St::Ok, St::Error>;
+
+ #[inline]
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.project().stream.try_poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S: Sink<Item>, Item> Sink<Item> for IntoStream<S> {
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/mod.rs b/vendor/futures-util/src/stream/try_stream/mod.rs
new file mode 100644
index 000000000..455ddca3f
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/mod.rs
@@ -0,0 +1,1064 @@
+//! Streams
+//!
+//! This module contains a number of functions for working with `Streams`s
+//! that return `Result`s, allowing for short-circuiting computations.
+
+#[cfg(feature = "compat")]
+use crate::compat::Compat;
+use crate::fns::{
+ inspect_err_fn, inspect_ok_fn, into_fn, map_err_fn, map_ok_fn, InspectErrFn, InspectOkFn,
+ IntoFn, MapErrFn, MapOkFn,
+};
+use crate::future::assert_future;
+use crate::stream::assert_stream;
+use crate::stream::{Inspect, Map};
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+use core::pin::Pin;
+use futures_core::{
+ future::{Future, TryFuture},
+ stream::TryStream,
+ task::{Context, Poll},
+};
+
+mod and_then;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::and_then::AndThen;
+
+delegate_all!(
+ /// Stream for the [`err_into`](super::TryStreamExt::err_into) method.
+ ErrInto<St, E>(
+ MapErr<St, IntoFn<E>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (.)] + New[|x: St| MapErr::new(x, into_fn())]
+);
+
+delegate_all!(
+ /// Stream for the [`inspect_ok`](super::TryStreamExt::inspect_ok) method.
+ InspectOk<St, F>(
+ Inspect<IntoStream<St>, InspectOkFn<F>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_ok_fn(f))]
+);
+
+delegate_all!(
+ /// Stream for the [`inspect_err`](super::TryStreamExt::inspect_err) method.
+ InspectErr<St, F>(
+ Inspect<IntoStream<St>, InspectErrFn<F>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Inspect::new(IntoStream::new(x), inspect_err_fn(f))]
+);
+
+mod into_stream;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::into_stream::IntoStream;
+
+delegate_all!(
+ /// Stream for the [`map_ok`](super::TryStreamExt::map_ok) method.
+ MapOk<St, F>(
+ Map<IntoStream<St>, MapOkFn<F>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_ok_fn(f))]
+);
+
+delegate_all!(
+ /// Stream for the [`map_err`](super::TryStreamExt::map_err) method.
+ MapErr<St, F>(
+ Map<IntoStream<St>, MapErrFn<F>>
+ ): Debug + Sink + Stream + FusedStream + AccessInner[St, (. .)] + New[|x: St, f: F| Map::new(IntoStream::new(x), map_err_fn(f))]
+);
+
+mod or_else;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::or_else::OrElse;
+
+mod try_next;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_next::TryNext;
+
+mod try_for_each;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_for_each::TryForEach;
+
+mod try_filter;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_filter::TryFilter;
+
+mod try_filter_map;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_filter_map::TryFilterMap;
+
+mod try_flatten;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_flatten::TryFlatten;
+
+mod try_collect;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_collect::TryCollect;
+
+mod try_concat;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_concat::TryConcat;
+
+#[cfg(feature = "alloc")]
+mod try_chunks;
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_chunks::{TryChunks, TryChunksError};
+
+mod try_fold;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_fold::TryFold;
+
+mod try_unfold;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_unfold::{try_unfold, TryUnfold};
+
+mod try_skip_while;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_skip_while::TrySkipWhile;
+
+mod try_take_while;
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_take_while::TryTakeWhile;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod try_buffer_unordered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_buffer_unordered::TryBufferUnordered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod try_buffered;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_buffered::TryBuffered;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+mod try_for_each_concurrent;
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::try_for_each_concurrent::TryForEachConcurrent;
+
+#[cfg(feature = "io")]
+#[cfg(feature = "std")]
+mod into_async_read;
+#[cfg(feature = "io")]
+#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
+#[cfg(feature = "std")]
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
+pub use self::into_async_read::IntoAsyncRead;
+
+impl<S: ?Sized + TryStream> TryStreamExt for S {}
+
+/// Adapters specific to `Result`-returning streams
+pub trait TryStreamExt: TryStream {
+ /// Wraps the current stream in a new stream which converts the error type
+ /// into the one provided.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let mut stream =
+ /// stream::iter(vec![Ok(()), Err(5i32)])
+ /// .err_into::<i64>();
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(())));
+ /// assert_eq!(stream.try_next().await, Err(5i64));
+ /// # })
+ /// ```
+ fn err_into<E>(self) -> ErrInto<Self, E>
+ where
+ Self: Sized,
+ Self::Error: Into<E>,
+ {
+ assert_stream::<Result<Self::Ok, E>, _>(ErrInto::new(self))
+ }
+
+ /// Wraps the current stream in a new stream which maps the success value
+ /// using the provided closure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let mut stream =
+ /// stream::iter(vec![Ok(5), Err(0)])
+ /// .map_ok(|x| x + 2);
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(7)));
+ /// assert_eq!(stream.try_next().await, Err(0));
+ /// # })
+ /// ```
+ fn map_ok<T, F>(self, f: F) -> MapOk<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Ok) -> T,
+ {
+ assert_stream::<Result<T, Self::Error>, _>(MapOk::new(self, f))
+ }
+
+ /// Wraps the current stream in a new stream which maps the error value
+ /// using the provided closure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let mut stream =
+ /// stream::iter(vec![Ok(5), Err(0)])
+ /// .map_err(|x| x + 2);
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(5)));
+ /// assert_eq!(stream.try_next().await, Err(2));
+ /// # })
+ /// ```
+ fn map_err<E, F>(self, f: F) -> MapErr<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Error) -> E,
+ {
+ assert_stream::<Result<Self::Ok, E>, _>(MapErr::new(self, f))
+ }
+
+ /// Chain on a computation for when a value is ready, passing the successful
+ /// results to the provided closure `f`.
+ ///
+ /// This function can be used to run a unit of work when the next successful
+ /// value on a stream is ready. The closure provided will be yielded a value
+ /// when ready, and the returned future will then be run to completion to
+ /// produce the next value on this stream.
+ ///
+ /// Any errors produced by this stream will not be passed to the closure,
+ /// and will be passed through.
+ ///
+ /// The returned value of the closure must implement the `TryFuture` trait
+ /// and can represent some more work to be done before the composed stream
+ /// is finished.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ ///
+ /// To process the entire stream and return a single future representing
+ /// success or error, use `try_for_each` instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::channel::mpsc;
+ /// use futures::future;
+ /// use futures::stream::TryStreamExt;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<Result<i32, ()>>(1);
+ ///
+ /// let rx = rx.and_then(|result| {
+ /// future::ok(if result % 2 == 0 {
+ /// Some(result)
+ /// } else {
+ /// None
+ /// })
+ /// });
+ /// ```
+ fn and_then<Fut, F>(self, f: F) -> AndThen<Self, Fut, F>
+ where
+ F: FnMut(Self::Ok) -> Fut,
+ Fut: TryFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<Fut::Ok, Fut::Error>, _>(AndThen::new(self, f))
+ }
+
+ /// Chain on a computation for when an error happens, passing the
+ /// erroneous result to the provided closure `f`.
+ ///
+ /// This function can be used to run a unit of work and attempt to recover from
+ /// an error if one happens. The closure provided will be yielded an error
+ /// when one appears, and the returned future will then be run to completion
+ /// to produce the next value on this stream.
+ ///
+ /// Any successful values produced by this stream will not be passed to the
+ /// closure, and will be passed through.
+ ///
+ /// The returned value of the closure must implement the [`TryFuture`](futures_core::future::TryFuture) trait
+ /// and can represent some more work to be done before the composed stream
+ /// is finished.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ fn or_else<Fut, F>(self, f: F) -> OrElse<Self, Fut, F>
+ where
+ F: FnMut(Self::Error) -> Fut,
+ Fut: TryFuture<Ok = Self::Ok>,
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Fut::Error>, _>(OrElse::new(self, f))
+ }
+
+ /// Do something with the success value of this stream, afterwards passing
+ /// it on.
+ ///
+ /// This is similar to the `StreamExt::inspect` method where it allows
+ /// easily inspecting the success value as it passes through the stream, for
+ /// example to debug what's going on.
+ fn inspect_ok<F>(self, f: F) -> InspectOk<Self, F>
+ where
+ F: FnMut(&Self::Ok),
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(InspectOk::new(self, f))
+ }
+
+ /// Do something with the error value of this stream, afterwards passing it on.
+ ///
+ /// This is similar to the `StreamExt::inspect` method where it allows
+ /// easily inspecting the error value as it passes through the stream, for
+ /// example to debug what's going on.
+ fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
+ where
+ F: FnMut(&Self::Error),
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(InspectErr::new(self, f))
+ }
+
+ /// Wraps a [`TryStream`] into a type that implements
+ /// [`Stream`](futures_core::stream::Stream)
+ ///
+ /// [`TryStream`]s currently do not implement the
+ /// [`Stream`](futures_core::stream::Stream) trait because of limitations
+ /// of the compiler.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::stream::{Stream, TryStream, TryStreamExt};
+ ///
+ /// # type T = i32;
+ /// # type E = ();
+ /// fn make_try_stream() -> impl TryStream<Ok = T, Error = E> { // ... }
+ /// # futures::stream::empty()
+ /// # }
+ /// fn take_stream(stream: impl Stream<Item = Result<T, E>>) { /* ... */ }
+ ///
+ /// take_stream(make_try_stream().into_stream());
+ /// ```
+ fn into_stream(self) -> IntoStream<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(IntoStream::new(self))
+ }
+
+ /// Creates a future that attempts to resolve the next item in the stream.
+ /// If an error is encountered before the next item, the error is returned
+ /// instead.
+ ///
+ /// This is similar to the `Stream::next` combinator, but returns a
+ /// `Result<Option<T>, E>` rather than an `Option<Result<T, E>>`, making
+ /// for easy use with the `?` operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let mut stream = stream::iter(vec![Ok(()), Err(())]);
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(())));
+ /// assert_eq!(stream.try_next().await, Err(()));
+ /// # })
+ /// ```
+ fn try_next(&mut self) -> TryNext<'_, Self>
+ where
+ Self: Unpin,
+ {
+ assert_future::<Result<Option<Self::Ok>, Self::Error>, _>(TryNext::new(self))
+ }
+
+ /// Attempts to run this stream to completion, executing the provided
+ /// asynchronous closure for each element on the stream.
+ ///
+ /// The provided closure will be called for each item this stream produces,
+ /// yielding a future. That future will then be executed to completion
+ /// before moving on to the next item.
+ ///
+ /// The returned value is a [`Future`](futures_core::future::Future) where the
+ /// [`Output`](futures_core::future::Future::Output) type is
+ /// `Result<(), Self::Error>`. If any of the intermediate
+ /// futures or the stream returns an error, this future will return
+ /// immediately with an error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let mut x = 0i32;
+ ///
+ /// {
+ /// let fut = stream::repeat(Ok(1)).try_for_each(|item| {
+ /// x += item;
+ /// future::ready(if x == 3 { Err(()) } else { Ok(()) })
+ /// });
+ /// assert_eq!(fut.await, Err(()));
+ /// }
+ ///
+ /// assert_eq!(x, 3);
+ /// # })
+ /// ```
+ fn try_for_each<Fut, F>(self, f: F) -> TryForEach<Self, Fut, F>
+ where
+ F: FnMut(Self::Ok) -> Fut,
+ Fut: TryFuture<Ok = (), Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_future::<Result<(), Self::Error>, _>(TryForEach::new(self, f))
+ }
+
+ /// Skip elements on this stream while the provided asynchronous predicate
+ /// resolves to `true`.
+ ///
+ /// This function is similar to
+ /// [`StreamExt::skip_while`](crate::stream::StreamExt::skip_while) but exits
+ /// early if an error occurs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(3), Ok(2)]);
+ /// let stream = stream.try_skip_while(|x| future::ready(Ok(*x < 3)));
+ ///
+ /// let output: Result<Vec<i32>, i32> = stream.try_collect().await;
+ /// assert_eq!(output, Ok(vec![3, 2]));
+ /// # })
+ /// ```
+ fn try_skip_while<Fut, F>(self, f: F) -> TrySkipWhile<Self, Fut, F>
+ where
+ F: FnMut(&Self::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(TrySkipWhile::new(self, f))
+ }
+
+ /// Take elements on this stream while the provided asynchronous predicate
+ /// resolves to `true`.
+ ///
+ /// This function is similar to
+ /// [`StreamExt::take_while`](crate::stream::StreamExt::take_while) but exits
+ /// early if an error occurs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2), Ok(3), Ok(2)]);
+ /// let stream = stream.try_take_while(|x| future::ready(Ok(*x < 3)));
+ ///
+ /// let output: Result<Vec<i32>, i32> = stream.try_collect().await;
+ /// assert_eq!(output, Ok(vec![1, 2]));
+ /// # })
+ /// ```
+ fn try_take_while<Fut, F>(self, f: F) -> TryTakeWhile<Self, Fut, F>
+ where
+ F: FnMut(&Self::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(TryTakeWhile::new(self, f))
+ }
+
+ /// Attempts to run this stream to completion, executing the provided asynchronous
+ /// closure for each element on the stream concurrently as elements become
+ /// available, exiting as soon as an error occurs.
+ ///
+ /// This is similar to
+ /// [`StreamExt::for_each_concurrent`](crate::stream::StreamExt::for_each_concurrent),
+ /// but will resolve to an error immediately if the underlying stream or the provided
+ /// closure return an error.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::oneshot;
+ /// use futures::stream::{self, StreamExt, TryStreamExt};
+ ///
+ /// let (tx1, rx1) = oneshot::channel();
+ /// let (tx2, rx2) = oneshot::channel();
+ /// let (_tx3, rx3) = oneshot::channel();
+ ///
+ /// let stream = stream::iter(vec![rx1, rx2, rx3]);
+ /// let fut = stream.map(Ok).try_for_each_concurrent(
+ /// /* limit */ 2,
+ /// |rx| async move {
+ /// let res: Result<(), oneshot::Canceled> = rx.await;
+ /// res
+ /// }
+ /// );
+ ///
+ /// tx1.send(()).unwrap();
+ /// // Drop the second sender so that `rx2` resolves to `Canceled`.
+ /// drop(tx2);
+ ///
+ /// // The final result is an error because the second future
+ /// // resulted in an error.
+ /// assert_eq!(Err(oneshot::Canceled), fut.await);
+ /// # })
+ /// ```
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn try_for_each_concurrent<Fut, F>(
+ self,
+ limit: impl Into<Option<usize>>,
+ f: F,
+ ) -> TryForEachConcurrent<Self, Fut, F>
+ where
+ F: FnMut(Self::Ok) -> Fut,
+ Fut: Future<Output = Result<(), Self::Error>>,
+ Self: Sized,
+ {
+ assert_future::<Result<(), Self::Error>, _>(TryForEachConcurrent::new(
+ self,
+ limit.into(),
+ f,
+ ))
+ }
+
+ /// Attempt to transform a stream into a collection,
+ /// returning a future representing the result of that computation.
+ ///
+ /// This combinator will collect all successful results of this stream and
+ /// collect them into the specified collection type. If an error happens then all
+ /// collected elements will be dropped and the error will be returned.
+ ///
+ /// The returned future will be resolved when the stream terminates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::TryStreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx, rx) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// for i in 1..=5 {
+ /// tx.unbounded_send(Ok(i)).unwrap();
+ /// }
+ /// tx.unbounded_send(Err(6)).unwrap();
+ /// });
+ ///
+ /// let output: Result<Vec<i32>, i32> = rx.try_collect().await;
+ /// assert_eq!(output, Err(6));
+ /// # })
+ /// ```
+ fn try_collect<C: Default + Extend<Self::Ok>>(self) -> TryCollect<Self, C>
+ where
+ Self: Sized,
+ {
+ assert_future::<Result<C, Self::Error>, _>(TryCollect::new(self))
+ }
+
+ /// An adaptor for chunking up successful items of the stream inside a vector.
+ ///
+ /// This combinator will attempt to pull successful items from this stream and buffer
+ /// them into a local vector. At most `capacity` items will get buffered
+ /// before they're yielded from the returned stream.
+ ///
+ /// Note that the vectors returned from this iterator may not always have
+ /// `capacity` elements. If the underlying stream ended and only a partial
+ /// vector was created, it'll be returned. Additionally if an error happens
+ /// from the underlying stream then the currently buffered items will be
+ /// yielded.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// This function is similar to
+ /// [`StreamExt::chunks`](crate::stream::StreamExt::chunks) but exits
+ /// early if an error occurs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryChunksError, TryStreamExt};
+ ///
+ /// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2), Ok(3), Err(4), Ok(5), Ok(6)]);
+ /// let mut stream = stream.try_chunks(2);
+ ///
+ /// assert_eq!(stream.try_next().await, Ok(Some(vec![1, 2])));
+ /// assert_eq!(stream.try_next().await, Err(TryChunksError(vec![3], 4)));
+ /// assert_eq!(stream.try_next().await, Ok(Some(vec![5, 6])));
+ /// # })
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if `capacity` is zero.
+ #[cfg(feature = "alloc")]
+ fn try_chunks(self, capacity: usize) -> TryChunks<Self>
+ where
+ Self: Sized,
+ {
+ assert_stream::<Result<Vec<Self::Ok>, TryChunksError<Self::Ok, Self::Error>>, _>(
+ TryChunks::new(self, capacity),
+ )
+ }
+
+ /// Attempt to filter the values produced by this stream according to the
+ /// provided asynchronous closure.
+ ///
+ /// As values of this stream are made available, the provided predicate `f`
+ /// will be run on them. If the predicate returns a `Future` which resolves
+ /// to `true`, then the stream will yield the value, but if the predicate
+ /// return a `Future` which resolves to `false`, then the value will be
+ /// discarded and the next value will be produced.
+ ///
+ /// All errors are passed through without filtering in this combinator.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `filter` methods in
+ /// the standard library.
+ ///
+ /// # Examples
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::future;
+ /// use futures::stream::{self, StreamExt, TryStreamExt};
+ ///
+ /// let stream = stream::iter(vec![Ok(1i32), Ok(2i32), Ok(3i32), Err("error")]);
+ /// let mut evens = stream.try_filter(|x| {
+ /// future::ready(x % 2 == 0)
+ /// });
+ ///
+ /// assert_eq!(evens.next().await, Some(Ok(2)));
+ /// assert_eq!(evens.next().await, Some(Err("error")));
+ /// # })
+ /// ```
+ fn try_filter<Fut, F>(self, f: F) -> TryFilter<Self, Fut, F>
+ where
+ Fut: Future<Output = bool>,
+ F: FnMut(&Self::Ok) -> Fut,
+ Self: Sized,
+ {
+ assert_stream::<Result<Self::Ok, Self::Error>, _>(TryFilter::new(self, f))
+ }
+
+ /// Attempt to filter the values produced by this stream while
+ /// simultaneously mapping them to a different type according to the
+ /// provided asynchronous closure.
+ ///
+ /// As values of this stream are made available, the provided function will
+ /// be run on them. If the future returned by the predicate `f` resolves to
+ /// [`Some(item)`](Some) then the stream will yield the value `item`, but if
+ /// it resolves to [`None`] then the next value will be produced.
+ ///
+ /// All errors are passed through without filtering in this combinator.
+ ///
+ /// Note that this function consumes the stream passed into it and returns a
+ /// wrapped version of it, similar to the existing `filter_map` methods in
+ /// the standard library.
+ ///
+ /// # Examples
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, StreamExt, TryStreamExt};
+ /// use futures::pin_mut;
+ ///
+ /// let stream = stream::iter(vec![Ok(1i32), Ok(6i32), Err("error")]);
+ /// let halves = stream.try_filter_map(|x| async move {
+ /// let ret = if x % 2 == 0 { Some(x / 2) } else { None };
+ /// Ok(ret)
+ /// });
+ ///
+ /// pin_mut!(halves);
+ /// assert_eq!(halves.next().await, Some(Ok(3)));
+ /// assert_eq!(halves.next().await, Some(Err("error")));
+ /// # })
+ /// ```
+ fn try_filter_map<Fut, F, T>(self, f: F) -> TryFilterMap<Self, Fut, F>
+ where
+ Fut: TryFuture<Ok = Option<T>, Error = Self::Error>,
+ F: FnMut(Self::Ok) -> Fut,
+ Self: Sized,
+ {
+ assert_stream::<Result<T, Self::Error>, _>(TryFilterMap::new(self, f))
+ }
+
+ /// Flattens a stream of streams into just one continuous stream.
+ ///
+ /// If this stream's elements are themselves streams then this combinator
+ /// will flatten out the entire stream to one long chain of elements. Any
+ /// errors are passed through without looking at them, but otherwise each
+ /// individual stream will get exhausted before moving on to the next.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::{StreamExt, TryStreamExt};
+ /// use std::thread;
+ ///
+ /// let (tx1, rx1) = mpsc::unbounded();
+ /// let (tx2, rx2) = mpsc::unbounded();
+ /// let (tx3, rx3) = mpsc::unbounded();
+ ///
+ /// thread::spawn(move || {
+ /// tx1.unbounded_send(Ok(1)).unwrap();
+ /// });
+ /// thread::spawn(move || {
+ /// tx2.unbounded_send(Ok(2)).unwrap();
+ /// tx2.unbounded_send(Err(3)).unwrap();
+ /// });
+ /// thread::spawn(move || {
+ /// tx3.unbounded_send(Ok(rx1)).unwrap();
+ /// tx3.unbounded_send(Ok(rx2)).unwrap();
+ /// tx3.unbounded_send(Err(4)).unwrap();
+ /// });
+ ///
+ /// let mut stream = rx3.try_flatten();
+ /// assert_eq!(stream.next().await, Some(Ok(1)));
+ /// assert_eq!(stream.next().await, Some(Ok(2)));
+ /// assert_eq!(stream.next().await, Some(Err(3)));
+ /// # });
+ /// ```
+ fn try_flatten(self) -> TryFlatten<Self>
+ where
+ Self::Ok: TryStream,
+ <Self::Ok as TryStream>::Error: From<Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<<Self::Ok as TryStream>::Ok, <Self::Ok as TryStream>::Error>, _>(
+ TryFlatten::new(self),
+ )
+ }
+
+ /// Attempt to execute an accumulating asynchronous computation over a
+ /// stream, collecting all the values into one final result.
+ ///
+ /// This combinator will accumulate all values returned by this stream
+ /// according to the closure provided. The initial state is also provided to
+ /// this method and then is returned again by each execution of the closure.
+ /// Once the entire stream has been exhausted the returned future will
+ /// resolve to this value.
+ ///
+ /// This method is similar to [`fold`](crate::stream::StreamExt::fold), but will
+ /// exit early if an error is encountered in either the stream or the
+ /// provided closure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ ///
+ /// let number_stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2)]);
+ /// let sum = number_stream.try_fold(0, |acc, x| async move { Ok(acc + x) });
+ /// assert_eq!(sum.await, Ok(3));
+ ///
+ /// let number_stream_with_err = stream::iter(vec![Ok::<i32, i32>(1), Err(2), Ok(1)]);
+ /// let sum = number_stream_with_err.try_fold(0, |acc, x| async move { Ok(acc + x) });
+ /// assert_eq!(sum.await, Err(2));
+ /// # })
+ /// ```
+ fn try_fold<T, Fut, F>(self, init: T, f: F) -> TryFold<Self, Fut, T, F>
+ where
+ F: FnMut(T, Self::Ok) -> Fut,
+ Fut: TryFuture<Ok = T, Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_future::<Result<T, Self::Error>, _>(TryFold::new(self, f, init))
+ }
+
+ /// Attempt to concatenate all items of a stream into a single
+ /// extendable destination, returning a future representing the end result.
+ ///
+ /// This combinator will extend the first item with the contents of all
+ /// the subsequent successful results of the stream. If the stream is empty,
+ /// the default value will be returned.
+ ///
+ /// Works with all collections that implement the [`Extend`](std::iter::Extend) trait.
+ ///
+ /// This method is similar to [`concat`](crate::stream::StreamExt::concat), but will
+ /// exit early if an error is encountered in the stream.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::TryStreamExt;
+ /// use std::thread;
+ ///
+ /// let (tx, rx) = mpsc::unbounded::<Result<Vec<i32>, ()>>();
+ ///
+ /// thread::spawn(move || {
+ /// for i in (0..3).rev() {
+ /// let n = i * 3;
+ /// tx.unbounded_send(Ok(vec![n + 1, n + 2, n + 3])).unwrap();
+ /// }
+ /// });
+ ///
+ /// let result = rx.try_concat().await;
+ ///
+ /// assert_eq!(result, Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+ /// # });
+ /// ```
+ fn try_concat(self) -> TryConcat<Self>
+ where
+ Self: Sized,
+ Self::Ok: Extend<<<Self as TryStream>::Ok as IntoIterator>::Item> + IntoIterator + Default,
+ {
+ assert_future::<Result<Self::Ok, Self::Error>, _>(TryConcat::new(self))
+ }
+
+ /// Attempt to execute several futures from a stream concurrently (unordered).
+ ///
+ /// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type
+ /// that matches the stream's `Error` type.
+ ///
+ /// This adaptor will buffer up to `n` futures and then return their
+ /// outputs in the order in which they complete. If the underlying stream
+ /// returns an error, it will be immediately propagated.
+ ///
+ /// The returned stream will be a stream of results, each containing either
+ /// an error or a future's output. An error can be produced either by the
+ /// underlying stream itself or by one of the futures it yielded.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// Results are returned in the order of completion:
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::oneshot;
+ /// use futures::stream::{self, StreamExt, TryStreamExt};
+ ///
+ /// let (send_one, recv_one) = oneshot::channel();
+ /// let (send_two, recv_two) = oneshot::channel();
+ ///
+ /// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
+ ///
+ /// let mut buffered = stream_of_futures.try_buffer_unordered(10);
+ ///
+ /// send_two.send(2i32)?;
+ /// assert_eq!(buffered.next().await, Some(Ok(2i32)));
+ ///
+ /// send_one.send(1i32)?;
+ /// assert_eq!(buffered.next().await, Some(Ok(1i32)));
+ ///
+ /// assert_eq!(buffered.next().await, None);
+ /// # Ok::<(), i32>(()) }).unwrap();
+ /// ```
+ ///
+ /// Errors from the underlying stream itself are propagated:
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::{StreamExt, TryStreamExt};
+ ///
+ /// let (sink, stream_of_futures) = mpsc::unbounded();
+ /// let mut buffered = stream_of_futures.try_buffer_unordered(10);
+ ///
+ /// sink.unbounded_send(Ok(async { Ok(7i32) }))?;
+ /// assert_eq!(buffered.next().await, Some(Ok(7i32)));
+ ///
+ /// sink.unbounded_send(Err("error in the stream"))?;
+ /// assert_eq!(buffered.next().await, Some(Err("error in the stream")));
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn try_buffer_unordered(self, n: usize) -> TryBufferUnordered<Self>
+ where
+ Self::Ok: TryFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<<Self::Ok as TryFuture>::Ok, Self::Error>, _>(
+ TryBufferUnordered::new(self, n),
+ )
+ }
+
+ /// Attempt to execute several futures from a stream concurrently.
+ ///
+ /// This stream's `Ok` type must be a [`TryFuture`](futures_core::future::TryFuture) with an `Error` type
+ /// that matches the stream's `Error` type.
+ ///
+ /// This adaptor will buffer up to `n` futures and then return their
+ /// outputs in the order. If the underlying stream returns an error, it will
+ /// be immediately propagated.
+ ///
+ /// The returned stream will be a stream of results, each containing either
+ /// an error or a future's output. An error can be produced either by the
+ /// underlying stream itself or by one of the futures it yielded.
+ ///
+ /// This method is only available when the `std` or `alloc` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// Results are returned in the order of addition:
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::oneshot;
+ /// use futures::future::lazy;
+ /// use futures::stream::{self, StreamExt, TryStreamExt};
+ ///
+ /// let (send_one, recv_one) = oneshot::channel();
+ /// let (send_two, recv_two) = oneshot::channel();
+ ///
+ /// let mut buffered = lazy(move |cx| {
+ /// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
+ ///
+ /// let mut buffered = stream_of_futures.try_buffered(10);
+ ///
+ /// assert!(buffered.try_poll_next_unpin(cx).is_pending());
+ ///
+ /// send_two.send(2i32)?;
+ /// assert!(buffered.try_poll_next_unpin(cx).is_pending());
+ /// Ok::<_, i32>(buffered)
+ /// }).await?;
+ ///
+ /// send_one.send(1i32)?;
+ /// assert_eq!(buffered.next().await, Some(Ok(1i32)));
+ /// assert_eq!(buffered.next().await, Some(Ok(2i32)));
+ ///
+ /// assert_eq!(buffered.next().await, None);
+ /// # Ok::<(), i32>(()) }).unwrap();
+ /// ```
+ ///
+ /// Errors from the underlying stream itself are propagated:
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::channel::mpsc;
+ /// use futures::stream::{StreamExt, TryStreamExt};
+ ///
+ /// let (sink, stream_of_futures) = mpsc::unbounded();
+ /// let mut buffered = stream_of_futures.try_buffered(10);
+ ///
+ /// sink.unbounded_send(Ok(async { Ok(7i32) }))?;
+ /// assert_eq!(buffered.next().await, Some(Ok(7i32)));
+ ///
+ /// sink.unbounded_send(Err("error in the stream"))?;
+ /// assert_eq!(buffered.next().await, Some(Err("error in the stream")));
+ /// # Ok::<(), Box<dyn std::error::Error>>(()) }).unwrap();
+ /// ```
+ #[cfg(not(futures_no_atomic_cas))]
+ #[cfg(feature = "alloc")]
+ fn try_buffered(self, n: usize) -> TryBuffered<Self>
+ where
+ Self::Ok: TryFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_stream::<Result<<Self::Ok as TryFuture>::Ok, Self::Error>, _>(TryBuffered::new(
+ self, n,
+ ))
+ }
+
+ // TODO: false positive warning from rustdoc. Verify once #43466 settles
+ //
+ /// A convenience method for calling [`TryStream::try_poll_next`] on [`Unpin`]
+ /// stream types.
+ fn try_poll_next_unpin(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<Self::Ok, Self::Error>>>
+ where
+ Self: Unpin,
+ {
+ Pin::new(self).try_poll_next(cx)
+ }
+
+ /// Wraps a [`TryStream`] into a stream compatible with libraries using
+ /// futures 0.1 `Stream`. Requires the `compat` feature to be enabled.
+ /// ```
+ /// use futures::future::{FutureExt, TryFutureExt};
+ /// # let (tx, rx) = futures::channel::oneshot::channel();
+ ///
+ /// let future03 = async {
+ /// println!("Running on the pool");
+ /// tx.send(42).unwrap();
+ /// };
+ ///
+ /// let future01 = future03
+ /// .unit_error() // Make it a TryFuture
+ /// .boxed() // Make it Unpin
+ /// .compat();
+ ///
+ /// tokio::run(future01);
+ /// # assert_eq!(42, futures::executor::block_on(rx).unwrap());
+ /// ```
+ #[cfg(feature = "compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+ fn compat(self) -> Compat<Self>
+ where
+ Self: Sized + Unpin,
+ {
+ Compat::new(self)
+ }
+
+ /// Adapter that converts this stream into an [`AsyncRead`](crate::io::AsyncRead).
+ ///
+ /// Note that because `into_async_read` moves the stream, the [`Stream`](futures_core::stream::Stream) type must be
+ /// [`Unpin`]. If you want to use `into_async_read` with a [`!Unpin`](Unpin) stream, you'll
+ /// first have to pin the stream. This can be done by boxing the stream using [`Box::pin`]
+ /// or pinning it to the stack using the `pin_mut!` macro from the `pin_utils` crate.
+ ///
+ /// This method is only available when the `std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # futures::executor::block_on(async {
+ /// use futures::stream::{self, TryStreamExt};
+ /// use futures::io::AsyncReadExt;
+ ///
+ /// let stream = stream::iter(vec![Ok(vec![1, 2, 3, 4, 5])]);
+ /// let mut reader = stream.into_async_read();
+ /// let mut buf = Vec::new();
+ ///
+ /// assert!(reader.read_to_end(&mut buf).await.is_ok());
+ /// assert_eq!(buf, &[1, 2, 3, 4, 5]);
+ /// # })
+ /// ```
+ #[cfg(feature = "io")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "io")))]
+ #[cfg(feature = "std")]
+ fn into_async_read(self) -> IntoAsyncRead<Self>
+ where
+ Self: Sized + TryStreamExt<Error = std::io::Error> + Unpin,
+ Self::Ok: AsRef<[u8]>,
+ {
+ crate::io::assert_read(IntoAsyncRead::new(self))
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/or_else.rs b/vendor/futures-util/src/stream/try_stream/or_else.rs
new file mode 100644
index 000000000..cb69e8132
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/or_else.rs
@@ -0,0 +1,109 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`or_else`](super::TryStreamExt::or_else) method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct OrElse<St, Fut, F> {
+ #[pin]
+ stream: St,
+ #[pin]
+ future: Option<Fut>,
+ f: F,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for OrElse<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OrElse")
+ .field("stream", &self.stream)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> OrElse<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Error) -> Fut,
+ Fut: TryFuture<Ok = St::Ok>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, future: None, f }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> Stream for OrElse<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Error) -> Fut,
+ Fut: TryFuture<Ok = St::Ok>,
+{
+ type Item = Result<St::Ok, Fut::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ let item = ready!(fut.try_poll(cx));
+ this.future.set(None);
+ break Some(item);
+ } else {
+ match ready!(this.stream.as_mut().try_poll_next(cx)) {
+ Some(Ok(item)) => break Some(Ok(item)),
+ Some(Err(e)) => {
+ this.future.set(Some((this.f)(e)));
+ }
+ None => break None,
+ }
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let future_len = if self.future.is_some() { 1 } else { 0 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(future_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(future_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St, Fut, F> FusedStream for OrElse<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ F: FnMut(St::Error) -> Fut,
+ Fut: TryFuture<Ok = St::Ok>,
+{
+ fn is_terminated(&self) -> bool {
+ self.future.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for OrElse<S, Fut, F>
+where
+ S: Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs b/vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs
new file mode 100644
index 000000000..9a899d4ea
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_buffer_unordered.rs
@@ -0,0 +1,86 @@
+use crate::future::{IntoFuture, TryFutureExt};
+use crate::stream::{Fuse, FuturesUnordered, IntoStream, StreamExt};
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::stream::{Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the
+ /// [`try_buffer_unordered`](super::TryStreamExt::try_buffer_unordered) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryBufferUnordered<St>
+ where St: TryStream
+ {
+ #[pin]
+ stream: Fuse<IntoStream<St>>,
+ in_progress_queue: FuturesUnordered<IntoFuture<St::Ok>>,
+ max: usize,
+ }
+}
+
+impl<St> TryBufferUnordered<St>
+where
+ St: TryStream,
+ St::Ok: TryFuture,
+{
+ pub(super) fn new(stream: St, n: usize) -> Self {
+ Self {
+ stream: IntoStream::new(stream).fuse(),
+ in_progress_queue: FuturesUnordered::new(),
+ max: n,
+ }
+ }
+
+ delegate_access_inner!(stream, St, (. .));
+}
+
+impl<St> Stream for TryBufferUnordered<St>
+where
+ St: TryStream,
+ St::Ok: TryFuture<Error = St::Error>,
+{
+ type Item = Result<<St::Ok as TryFuture>::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ // First up, try to spawn off as many futures as possible by filling up
+ // our queue of futures. Propagate errors from the stream immediately.
+ while this.in_progress_queue.len() < *this.max {
+ match this.stream.as_mut().poll_next(cx)? {
+ Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut.into_future()),
+ Poll::Ready(None) | Poll::Pending => break,
+ }
+ }
+
+ // Attempt to pull the next value from the in_progress_queue
+ match this.in_progress_queue.poll_next_unpin(cx) {
+ x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x,
+ Poll::Ready(None) => {}
+ }
+
+ // If more values are still coming from the stream, we're not done yet
+ if this.stream.is_done() {
+ Poll::Ready(None)
+ } else {
+ Poll::Pending
+ }
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item, E> Sink<Item> for TryBufferUnordered<S>
+where
+ S: TryStream + Sink<Item, Error = E>,
+ S::Ok: TryFuture<Error = E>,
+{
+ type Error = E;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_buffered.rs b/vendor/futures-util/src/stream/try_stream/try_buffered.rs
new file mode 100644
index 000000000..45bd3f8c7
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_buffered.rs
@@ -0,0 +1,87 @@
+use crate::future::{IntoFuture, TryFutureExt};
+use crate::stream::{Fuse, FuturesOrdered, IntoStream, StreamExt};
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::stream::{Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_buffered`](super::TryStreamExt::try_buffered) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryBuffered<St>
+ where
+ St: TryStream,
+ St::Ok: TryFuture,
+ {
+ #[pin]
+ stream: Fuse<IntoStream<St>>,
+ in_progress_queue: FuturesOrdered<IntoFuture<St::Ok>>,
+ max: usize,
+ }
+}
+
+impl<St> TryBuffered<St>
+where
+ St: TryStream,
+ St::Ok: TryFuture,
+{
+ pub(super) fn new(stream: St, n: usize) -> Self {
+ Self {
+ stream: IntoStream::new(stream).fuse(),
+ in_progress_queue: FuturesOrdered::new(),
+ max: n,
+ }
+ }
+
+ delegate_access_inner!(stream, St, (. .));
+}
+
+impl<St> Stream for TryBuffered<St>
+where
+ St: TryStream,
+ St::Ok: TryFuture<Error = St::Error>,
+{
+ type Item = Result<<St::Ok as TryFuture>::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ // First up, try to spawn off as many futures as possible by filling up
+ // our queue of futures. Propagate errors from the stream immediately.
+ while this.in_progress_queue.len() < *this.max {
+ match this.stream.as_mut().poll_next(cx)? {
+ Poll::Ready(Some(fut)) => this.in_progress_queue.push(fut.into_future()),
+ Poll::Ready(None) | Poll::Pending => break,
+ }
+ }
+
+ // Attempt to pull the next value from the in_progress_queue
+ match this.in_progress_queue.poll_next_unpin(cx) {
+ x @ Poll::Pending | x @ Poll::Ready(Some(_)) => return x,
+ Poll::Ready(None) => {}
+ }
+
+ // If more values are still coming from the stream, we're not done yet
+ if this.stream.is_done() {
+ Poll::Ready(None)
+ } else {
+ Poll::Pending
+ }
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item, E> Sink<Item> for TryBuffered<S>
+where
+ S: TryStream + Sink<Item, Error = E>,
+ S::Ok: TryFuture<Error = E>,
+{
+ type Error = E;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_chunks.rs b/vendor/futures-util/src/stream/try_stream/try_chunks.rs
new file mode 100644
index 000000000..07d4425a8
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_chunks.rs
@@ -0,0 +1,131 @@
+use crate::stream::{Fuse, IntoStream, StreamExt};
+
+use alloc::vec::Vec;
+use core::pin::Pin;
+use core::{fmt, mem};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_chunks`](super::TryStreamExt::try_chunks) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryChunks<St: TryStream> {
+ #[pin]
+ stream: Fuse<IntoStream<St>>,
+ items: Vec<St::Ok>,
+ cap: usize, // https://github.com/rust-lang/futures-rs/issues/1475
+ }
+}
+
+impl<St: TryStream> TryChunks<St> {
+ pub(super) fn new(stream: St, capacity: usize) -> Self {
+ assert!(capacity > 0);
+
+ Self {
+ stream: IntoStream::new(stream).fuse(),
+ items: Vec::with_capacity(capacity),
+ cap: capacity,
+ }
+ }
+
+ fn take(self: Pin<&mut Self>) -> Vec<St::Ok> {
+ let cap = self.cap;
+ mem::replace(self.project().items, Vec::with_capacity(cap))
+ }
+
+ delegate_access_inner!(stream, St, (. .));
+}
+
+impl<St: TryStream> Stream for TryChunks<St> {
+ #[allow(clippy::type_complexity)]
+ type Item = Result<Vec<St::Ok>, TryChunksError<St::Ok, St::Error>>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.as_mut().project();
+ loop {
+ match ready!(this.stream.as_mut().try_poll_next(cx)) {
+ // Push the item into the buffer and check whether it is full.
+ // If so, replace our buffer with a new and empty one and return
+ // the full one.
+ Some(item) => match item {
+ Ok(item) => {
+ this.items.push(item);
+ if this.items.len() >= *this.cap {
+ return Poll::Ready(Some(Ok(self.take())));
+ }
+ }
+ Err(e) => {
+ return Poll::Ready(Some(Err(TryChunksError(self.take(), e))));
+ }
+ },
+
+ // Since the underlying stream ran out of values, return what we
+ // have buffered, if we have anything.
+ None => {
+ let last = if this.items.is_empty() {
+ None
+ } else {
+ let full_buf = mem::replace(this.items, Vec::new());
+ Some(full_buf)
+ };
+
+ return Poll::Ready(last.map(Ok));
+ }
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let chunk_len = if self.items.is_empty() { 0 } else { 1 };
+ let (lower, upper) = self.stream.size_hint();
+ let lower = lower.saturating_add(chunk_len);
+ let upper = match upper {
+ Some(x) => x.checked_add(chunk_len),
+ None => None,
+ };
+ (lower, upper)
+ }
+}
+
+impl<St: TryStream + FusedStream> FusedStream for TryChunks<St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated() && self.items.is_empty()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for TryChunks<S>
+where
+ S: TryStream + Sink<Item>,
+{
+ type Error = <S as Sink<Item>>::Error;
+
+ delegate_sink!(stream, Item);
+}
+
+/// Error indicating, that while chunk was collected inner stream produced an error.
+///
+/// Contains all items that were collected before an error occurred, and the stream error itself.
+#[derive(PartialEq, Eq)]
+pub struct TryChunksError<T, E>(pub Vec<T>, pub E);
+
+impl<T, E: fmt::Debug> fmt::Debug for TryChunksError<T, E> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.1.fmt(f)
+ }
+}
+
+impl<T, E: fmt::Display> fmt::Display for TryChunksError<T, E> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.1.fmt(f)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T, E: fmt::Debug + fmt::Display> std::error::Error for TryChunksError<T, E> {}
diff --git a/vendor/futures-util/src/stream/try_stream/try_collect.rs b/vendor/futures-util/src/stream/try_stream/try_collect.rs
new file mode 100644
index 000000000..5d3b3d766
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_collect.rs
@@ -0,0 +1,52 @@
+use core::mem;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::ready;
+use futures_core::stream::{FusedStream, TryStream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`try_collect`](super::TryStreamExt::try_collect) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct TryCollect<St, C> {
+ #[pin]
+ stream: St,
+ items: C,
+ }
+}
+
+impl<St: TryStream, C: Default> TryCollect<St, C> {
+ pub(super) fn new(s: St) -> Self {
+ Self { stream: s, items: Default::default() }
+ }
+}
+
+impl<St, C> FusedFuture for TryCollect<St, C>
+where
+ St: TryStream + FusedStream,
+ C: Default + Extend<St::Ok>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St, C> Future for TryCollect<St, C>
+where
+ St: TryStream,
+ C: Default + Extend<St::Ok>,
+{
+ type Output = Result<C, St::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+ Poll::Ready(Ok(loop {
+ match ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ Some(x) => this.items.extend(Some(x)),
+ None => break mem::replace(this.items, Default::default()),
+ }
+ }))
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_concat.rs b/vendor/futures-util/src/stream/try_stream/try_concat.rs
new file mode 100644
index 000000000..58fb6a541
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_concat.rs
@@ -0,0 +1,51 @@
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::TryStream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`try_concat`](super::TryStreamExt::try_concat) method.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct TryConcat<St: TryStream> {
+ #[pin]
+ stream: St,
+ accum: Option<St::Ok>,
+ }
+}
+
+impl<St> TryConcat<St>
+where
+ St: TryStream,
+ St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
+{
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, accum: None }
+ }
+}
+
+impl<St> Future for TryConcat<St>
+where
+ St: TryStream,
+ St::Ok: Extend<<St::Ok as IntoIterator>::Item> + IntoIterator + Default,
+{
+ type Output = Result<St::Ok, St::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ Poll::Ready(Ok(loop {
+ if let Some(x) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ if let Some(a) = this.accum {
+ a.extend(x)
+ } else {
+ *this.accum = Some(x)
+ }
+ } else {
+ break this.accum.take().unwrap_or_default();
+ }
+ }))
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_filter.rs b/vendor/futures-util/src/stream/try_stream/try_filter.rs
new file mode 100644
index 000000000..61e6105c3
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_filter.rs
@@ -0,0 +1,112 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_filter`](super::TryStreamExt::try_filter)
+ /// method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryFilter<St, Fut, F>
+ where St: TryStream
+ {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Ok>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TryFilter<St, Fut, F>
+where
+ St: TryStream + fmt::Debug,
+ St::Ok: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryFilter")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TryFilter<St, Fut, F>
+where
+ St: TryStream,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> FusedStream for TryFilter<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: Future<Output = bool>,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending_fut.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F> Stream for TryFilter<St, Fut, F>
+where
+ St: TryStream,
+ Fut: Future<Output = bool>,
+ F: FnMut(&St::Ok) -> Fut,
+{
+ type Item = Result<St::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let res = ready!(fut.poll(cx));
+ this.pending_fut.set(None);
+ if res {
+ break this.pending_item.take().map(Ok);
+ }
+ *this.pending_item = None;
+ } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ this.pending_fut.set(Some((this.f)(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let pending_len = if self.pending_fut.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item, E> Sink<Item> for TryFilter<S, Fut, F>
+where
+ S: TryStream + Sink<Item, Error = E>,
+{
+ type Error = E;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_filter_map.rs b/vendor/futures-util/src/stream/try_stream/try_filter_map.rs
new file mode 100644
index 000000000..bb1b5b9db
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_filter_map.rs
@@ -0,0 +1,106 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_filter_map`](super::TryStreamExt::try_filter_map)
+ /// method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryFilterMap<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TryFilterMap<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryFilterMap")
+ .field("stream", &self.stream)
+ .field("pending", &self.pending)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TryFilterMap<St, Fut, F> {
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F, T> FusedStream for TryFilterMap<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ Fut: TryFuture<Ok = Option<T>, Error = St::Error>,
+ F: FnMut(St::Ok) -> Fut,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St, Fut, F, T> Stream for TryFilterMap<St, Fut, F>
+where
+ St: TryStream,
+ Fut: TryFuture<Ok = Option<T>, Error = St::Error>,
+ F: FnMut(St::Ok) -> Fut,
+{
+ type Item = Result<T, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(p) = this.pending.as_mut().as_pin_mut() {
+ // We have an item in progress, poll that until it's done
+ let res = ready!(p.try_poll(cx));
+ this.pending.set(None);
+ let item = res?;
+ if item.is_some() {
+ break item.map(Ok);
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ // No item in progress, but the stream is still going
+ this.pending.set(Some((this.f)(item)));
+ } else {
+ // The stream is done
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let pending_len = if self.pending.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item> Sink<Item> for TryFilterMap<S, Fut, F>
+where
+ S: Sink<Item>,
+{
+ type Error = S::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_flatten.rs b/vendor/futures-util/src/stream/try_stream/try_flatten.rs
new file mode 100644
index 000000000..4fc04a07b
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_flatten.rs
@@ -0,0 +1,84 @@
+use core::pin::Pin;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_flatten`](super::TryStreamExt::try_flatten) method.
+ #[derive(Debug)]
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryFlatten<St>
+ where
+ St: TryStream,
+ {
+ #[pin]
+ stream: St,
+ #[pin]
+ next: Option<St::Ok>,
+ }
+}
+
+impl<St> TryFlatten<St>
+where
+ St: TryStream,
+ St::Ok: TryStream,
+ <St::Ok as TryStream>::Error: From<St::Error>,
+{
+ pub(super) fn new(stream: St) -> Self {
+ Self { stream, next: None }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St> FusedStream for TryFlatten<St>
+where
+ St: TryStream + FusedStream,
+ St::Ok: TryStream,
+ <St::Ok as TryStream>::Error: From<St::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ self.next.is_none() && self.stream.is_terminated()
+ }
+}
+
+impl<St> Stream for TryFlatten<St>
+where
+ St: TryStream,
+ St::Ok: TryStream,
+ <St::Ok as TryStream>::Error: From<St::Error>,
+{
+ type Item = Result<<St::Ok as TryStream>::Ok, <St::Ok as TryStream>::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(s) = this.next.as_mut().as_pin_mut() {
+ if let Some(item) = ready!(s.try_poll_next(cx)?) {
+ break Some(Ok(item));
+ } else {
+ this.next.set(None);
+ }
+ } else if let Some(s) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ this.next.set(Some(s));
+ } else {
+ break None;
+ }
+ })
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Item> Sink<Item> for TryFlatten<S>
+where
+ S: TryStream + Sink<Item>,
+{
+ type Error = <S as Sink<Item>>::Error;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_fold.rs b/vendor/futures-util/src/stream/try_stream/try_fold.rs
new file mode 100644
index 000000000..d344d96e7
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_fold.rs
@@ -0,0 +1,93 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future, TryFuture};
+use futures_core::ready;
+use futures_core::stream::TryStream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`try_fold`](super::TryStreamExt::try_fold) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct TryFold<St, Fut, T, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ accum: Option<T>,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, T, F> fmt::Debug for TryFold<St, Fut, T, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryFold")
+ .field("stream", &self.stream)
+ .field("accum", &self.accum)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, T, F> TryFold<St, Fut, T, F>
+where
+ St: TryStream,
+ F: FnMut(T, St::Ok) -> Fut,
+ Fut: TryFuture<Ok = T, Error = St::Error>,
+{
+ pub(super) fn new(stream: St, f: F, t: T) -> Self {
+ Self { stream, f, accum: Some(t), future: None }
+ }
+}
+
+impl<St, Fut, T, F> FusedFuture for TryFold<St, Fut, T, F>
+where
+ St: TryStream,
+ F: FnMut(T, St::Ok) -> Fut,
+ Fut: TryFuture<Ok = T, Error = St::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ self.accum.is_none() && self.future.is_none()
+ }
+}
+
+impl<St, Fut, T, F> Future for TryFold<St, Fut, T, F>
+where
+ St: TryStream,
+ F: FnMut(T, St::Ok) -> Fut,
+ Fut: TryFuture<Ok = T, Error = St::Error>,
+{
+ type Output = Result<T, St::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ // we're currently processing a future to produce a new accum value
+ let res = ready!(fut.try_poll(cx));
+ this.future.set(None);
+ match res {
+ Ok(a) => *this.accum = Some(a),
+ Err(e) => break Err(e),
+ }
+ } else if this.accum.is_some() {
+ // we're waiting on a new item from the stream
+ let res = ready!(this.stream.as_mut().try_poll_next(cx));
+ let a = this.accum.take().unwrap();
+ match res {
+ Some(Ok(item)) => this.future.set(Some((this.f)(a, item))),
+ Some(Err(e)) => break Err(e),
+ None => break Ok(a),
+ }
+ } else {
+ panic!("Fold polled after completion")
+ }
+ })
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_for_each.rs b/vendor/futures-util/src/stream/try_stream/try_for_each.rs
new file mode 100644
index 000000000..6a081d84e
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_for_each.rs
@@ -0,0 +1,68 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::{Future, TryFuture};
+use futures_core::ready;
+use futures_core::stream::TryStream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the [`try_for_each`](super::TryStreamExt::try_for_each) method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct TryForEach<St, Fut, F> {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ future: Option<Fut>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TryForEach<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryForEach")
+ .field("stream", &self.stream)
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TryForEach<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: TryFuture<Ok = (), Error = St::Error>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, future: None }
+ }
+}
+
+impl<St, Fut, F> Future for TryForEach<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: TryFuture<Ok = (), Error = St::Error>,
+{
+ type Output = Result<(), St::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+ loop {
+ if let Some(fut) = this.future.as_mut().as_pin_mut() {
+ ready!(fut.try_poll(cx))?;
+ this.future.set(None);
+ } else {
+ match ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ Some(e) => this.future.set(Some((this.f)(e))),
+ None => break,
+ }
+ }
+ }
+ Poll::Ready(Ok(()))
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs b/vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs
new file mode 100644
index 000000000..62734c746
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_for_each_concurrent.rs
@@ -0,0 +1,133 @@
+use crate::stream::{FuturesUnordered, StreamExt};
+use core::fmt;
+use core::mem;
+use core::num::NonZeroUsize;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::TryStream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Future for the
+ /// [`try_for_each_concurrent`](super::TryStreamExt::try_for_each_concurrent)
+ /// method.
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct TryForEachConcurrent<St, Fut, F> {
+ #[pin]
+ stream: Option<St>,
+ f: F,
+ futures: FuturesUnordered<Fut>,
+ limit: Option<NonZeroUsize>,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TryForEachConcurrent<St, Fut, F>
+where
+ St: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryForEachConcurrent")
+ .field("stream", &self.stream)
+ .field("futures", &self.futures)
+ .field("limit", &self.limit)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> FusedFuture for TryForEachConcurrent<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: Future<Output = Result<(), St::Error>>,
+{
+ fn is_terminated(&self) -> bool {
+ self.stream.is_none() && self.futures.is_empty()
+ }
+}
+
+impl<St, Fut, F> TryForEachConcurrent<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: Future<Output = Result<(), St::Error>>,
+{
+ pub(super) fn new(stream: St, limit: Option<usize>, f: F) -> Self {
+ Self {
+ stream: Some(stream),
+ // Note: `limit` = 0 gets ignored.
+ limit: limit.and_then(NonZeroUsize::new),
+ f,
+ futures: FuturesUnordered::new(),
+ }
+ }
+}
+
+impl<St, Fut, F> Future for TryForEachConcurrent<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(St::Ok) -> Fut,
+ Fut: Future<Output = Result<(), St::Error>>,
+{
+ type Output = Result<(), St::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let mut this = self.project();
+ loop {
+ let mut made_progress_this_iter = false;
+
+ // Check if we've already created a number of futures greater than `limit`
+ if this.limit.map(|limit| limit.get() > this.futures.len()).unwrap_or(true) {
+ let poll_res = match this.stream.as_mut().as_pin_mut() {
+ Some(stream) => stream.try_poll_next(cx),
+ None => Poll::Ready(None),
+ };
+
+ let elem = match poll_res {
+ Poll::Ready(Some(Ok(elem))) => {
+ made_progress_this_iter = true;
+ Some(elem)
+ }
+ Poll::Ready(None) => {
+ this.stream.set(None);
+ None
+ }
+ Poll::Pending => None,
+ Poll::Ready(Some(Err(e))) => {
+ // Empty the stream and futures so that we know
+ // the future has completed.
+ this.stream.set(None);
+ drop(mem::replace(this.futures, FuturesUnordered::new()));
+ return Poll::Ready(Err(e));
+ }
+ };
+
+ if let Some(elem) = elem {
+ this.futures.push((this.f)(elem));
+ }
+ }
+
+ match this.futures.poll_next_unpin(cx) {
+ Poll::Ready(Some(Ok(()))) => made_progress_this_iter = true,
+ Poll::Ready(None) => {
+ if this.stream.is_none() {
+ return Poll::Ready(Ok(()));
+ }
+ }
+ Poll::Pending => {}
+ Poll::Ready(Some(Err(e))) => {
+ // Empty the stream and futures so that we know
+ // the future has completed.
+ this.stream.set(None);
+ drop(mem::replace(this.futures, FuturesUnordered::new()));
+ return Poll::Ready(Err(e));
+ }
+ }
+
+ if !made_progress_this_iter {
+ return Poll::Pending;
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_next.rs b/vendor/futures-util/src/stream/try_stream/try_next.rs
new file mode 100644
index 000000000..13fcf80ca
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_next.rs
@@ -0,0 +1,34 @@
+use crate::stream::TryStreamExt;
+use core::pin::Pin;
+use futures_core::future::{FusedFuture, Future};
+use futures_core::stream::{FusedStream, TryStream};
+use futures_core::task::{Context, Poll};
+
+/// Future for the [`try_next`](super::TryStreamExt::try_next) method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct TryNext<'a, St: ?Sized> {
+ stream: &'a mut St,
+}
+
+impl<St: ?Sized + Unpin> Unpin for TryNext<'_, St> {}
+
+impl<'a, St: ?Sized + TryStream + Unpin> TryNext<'a, St> {
+ pub(super) fn new(stream: &'a mut St) -> Self {
+ Self { stream }
+ }
+}
+
+impl<St: ?Sized + TryStream + Unpin + FusedStream> FusedFuture for TryNext<'_, St> {
+ fn is_terminated(&self) -> bool {
+ self.stream.is_terminated()
+ }
+}
+
+impl<St: ?Sized + TryStream + Unpin> Future for TryNext<'_, St> {
+ type Output = Result<Option<St::Ok>, St::Error>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.stream.try_poll_next_unpin(cx)?.map(Ok)
+ }
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_skip_while.rs b/vendor/futures-util/src/stream/try_stream/try_skip_while.rs
new file mode 100644
index 000000000..a424b6c5b
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_skip_while.rs
@@ -0,0 +1,120 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_skip_while`](super::TryStreamExt::try_skip_while)
+ /// method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TrySkipWhile<St, Fut, F> where St: TryStream {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Ok>,
+ done_skipping: bool,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TrySkipWhile<St, Fut, F>
+where
+ St: TryStream + fmt::Debug,
+ St::Ok: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TrySkipWhile")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .field("done_skipping", &self.done_skipping)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TrySkipWhile<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None, done_skipping: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> Stream for TrySkipWhile<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ type Item = Result<St::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if *this.done_skipping {
+ return this.stream.try_poll_next(cx);
+ }
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let res = ready!(fut.try_poll(cx));
+ this.pending_fut.set(None);
+ let skipped = res?;
+ let item = this.pending_item.take();
+ if !skipped {
+ *this.done_skipping = true;
+ break item.map(Ok);
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ this.pending_fut.set(Some((this.f)(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+impl<St, Fut, F> FusedStream for TrySkipWhile<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ self.pending_item.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item, E> Sink<Item> for TrySkipWhile<S, Fut, F>
+where
+ S: TryStream + Sink<Item, Error = E>,
+{
+ type Error = E;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_take_while.rs b/vendor/futures-util/src/stream/try_stream/try_take_while.rs
new file mode 100644
index 000000000..3375960ef
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_take_while.rs
@@ -0,0 +1,129 @@
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream, TryStream};
+use futures_core::task::{Context, Poll};
+#[cfg(feature = "sink")]
+use futures_sink::Sink;
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// Stream for the [`try_take_while`](super::TryStreamExt::try_take_while)
+ /// method.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryTakeWhile<St, Fut, F>
+ where
+ St: TryStream,
+ {
+ #[pin]
+ stream: St,
+ f: F,
+ #[pin]
+ pending_fut: Option<Fut>,
+ pending_item: Option<St::Ok>,
+ done_taking: bool,
+ }
+}
+
+impl<St, Fut, F> fmt::Debug for TryTakeWhile<St, Fut, F>
+where
+ St: TryStream + fmt::Debug,
+ St::Ok: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryTakeWhile")
+ .field("stream", &self.stream)
+ .field("pending_fut", &self.pending_fut)
+ .field("pending_item", &self.pending_item)
+ .field("done_taking", &self.done_taking)
+ .finish()
+ }
+}
+
+impl<St, Fut, F> TryTakeWhile<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ pub(super) fn new(stream: St, f: F) -> Self {
+ Self { stream, f, pending_fut: None, pending_item: None, done_taking: false }
+ }
+
+ delegate_access_inner!(stream, St, ());
+}
+
+impl<St, Fut, F> Stream for TryTakeWhile<St, Fut, F>
+where
+ St: TryStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ type Item = Result<St::Ok, St::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if *this.done_taking {
+ return Poll::Ready(None);
+ }
+
+ Poll::Ready(loop {
+ if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
+ let res = ready!(fut.try_poll(cx));
+ this.pending_fut.set(None);
+ let take = res?;
+ let item = this.pending_item.take();
+ if take {
+ break item.map(Ok);
+ } else {
+ *this.done_taking = true;
+ break None;
+ }
+ } else if let Some(item) = ready!(this.stream.as_mut().try_poll_next(cx)?) {
+ this.pending_fut.set(Some((this.f)(&item)));
+ *this.pending_item = Some(item);
+ } else {
+ break None;
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.done_taking {
+ return (0, Some(0));
+ }
+
+ let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
+ let (_, upper) = self.stream.size_hint();
+ let upper = match upper {
+ Some(x) => x.checked_add(pending_len),
+ None => None,
+ };
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+}
+
+impl<St, Fut, F> FusedStream for TryTakeWhile<St, Fut, F>
+where
+ St: TryStream + FusedStream,
+ F: FnMut(&St::Ok) -> Fut,
+ Fut: TryFuture<Ok = bool, Error = St::Error>,
+{
+ fn is_terminated(&self) -> bool {
+ self.done_taking || self.pending_item.is_none() && self.stream.is_terminated()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+#[cfg(feature = "sink")]
+impl<S, Fut, F, Item, E> Sink<Item> for TryTakeWhile<S, Fut, F>
+where
+ S: TryStream + Sink<Item, Error = E>,
+{
+ type Error = E;
+
+ delegate_sink!(stream, Item);
+}
diff --git a/vendor/futures-util/src/stream/try_stream/try_unfold.rs b/vendor/futures-util/src/stream/try_stream/try_unfold.rs
new file mode 100644
index 000000000..fd9cdf1d8
--- /dev/null
+++ b/vendor/futures-util/src/stream/try_stream/try_unfold.rs
@@ -0,0 +1,122 @@
+use super::assert_stream;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::TryFuture;
+use futures_core::ready;
+use futures_core::stream::Stream;
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+/// Creates a `TryStream` from a seed and a closure returning a `TryFuture`.
+///
+/// This function is the dual for the `TryStream::try_fold()` adapter: while
+/// `TryStream::try_fold()` reduces a `TryStream` to one single value,
+/// `try_unfold()` creates a `TryStream` from a seed value.
+///
+/// `try_unfold()` will call the provided closure with the provided seed, then
+/// wait for the returned `TryFuture` to complete with `(a, b)`. It will then
+/// yield the value `a`, and use `b` as the next internal state.
+///
+/// If the closure returns `None` instead of `Some(TryFuture)`, then the
+/// `try_unfold()` will stop producing items and return `Poll::Ready(None)` in
+/// future calls to `poll()`.
+///
+/// In case of error generated by the returned `TryFuture`, the error will be
+/// returned by the `TryStream`. The `TryStream` will then yield
+/// `Poll::Ready(None)` in future calls to `poll()`.
+///
+/// This function can typically be used when wanting to go from the "world of
+/// futures" to the "world of streams": the provided closure can build a
+/// `TryFuture` using other library functions working on futures, and
+/// `try_unfold()` will turn it into a `TryStream` by repeating the operation.
+///
+/// # Example
+///
+/// ```
+/// # #[derive(Debug, PartialEq)]
+/// # struct SomeError;
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, TryStreamExt};
+///
+/// let stream = stream::try_unfold(0, |state| async move {
+/// if state < 0 {
+/// return Err(SomeError);
+/// }
+///
+/// if state <= 2 {
+/// let next_state = state + 1;
+/// let yielded = state * 2;
+/// Ok(Some((yielded, next_state)))
+/// } else {
+/// Ok(None)
+/// }
+/// });
+///
+/// let result: Result<Vec<i32>, _> = stream.try_collect().await;
+/// assert_eq!(result, Ok(vec![0, 2, 4]));
+/// # });
+/// ```
+pub fn try_unfold<T, F, Fut, Item>(init: T, f: F) -> TryUnfold<T, F, Fut>
+where
+ F: FnMut(T) -> Fut,
+ Fut: TryFuture<Ok = Option<(Item, T)>>,
+{
+ assert_stream::<Result<Item, Fut::Error>, _>(TryUnfold { f, state: Some(init), fut: None })
+}
+
+pin_project! {
+ /// Stream for the [`try_unfold`] function.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct TryUnfold<T, F, Fut> {
+ f: F,
+ state: Option<T>,
+ #[pin]
+ fut: Option<Fut>,
+ }
+}
+
+impl<T, F, Fut> fmt::Debug for TryUnfold<T, F, Fut>
+where
+ T: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TryUnfold").field("state", &self.state).field("fut", &self.fut).finish()
+ }
+}
+
+impl<T, F, Fut, Item> Stream for TryUnfold<T, F, Fut>
+where
+ F: FnMut(T) -> Fut,
+ Fut: TryFuture<Ok = Option<(Item, T)>>,
+{
+ type Item = Result<Item, Fut::Error>;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if let Some(state) = this.state.take() {
+ this.fut.set(Some((this.f)(state)));
+ }
+
+ match this.fut.as_mut().as_pin_mut() {
+ None => {
+ // The future previously errored
+ Poll::Ready(None)
+ }
+ Some(future) => {
+ let step = ready!(future.try_poll(cx));
+ this.fut.set(None);
+
+ match step {
+ Ok(Some((item, next_state))) => {
+ *this.state = Some(next_state);
+ Poll::Ready(Some(Ok(item)))
+ }
+ Ok(None) => Poll::Ready(None),
+ Err(e) => Poll::Ready(Some(Err(e))),
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/futures-util/src/stream/unfold.rs b/vendor/futures-util/src/stream/unfold.rs
new file mode 100644
index 000000000..7d8ef6bab
--- /dev/null
+++ b/vendor/futures-util/src/stream/unfold.rs
@@ -0,0 +1,119 @@
+use super::assert_stream;
+use crate::unfold_state::UnfoldState;
+use core::fmt;
+use core::pin::Pin;
+use futures_core::future::Future;
+use futures_core::ready;
+use futures_core::stream::{FusedStream, Stream};
+use futures_core::task::{Context, Poll};
+use pin_project_lite::pin_project;
+
+/// Creates a `Stream` from a seed and a closure returning a `Future`.
+///
+/// This function is the dual for the `Stream::fold()` adapter: while
+/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a
+/// `Stream` from a seed value.
+///
+/// `unfold()` will call the provided closure with the provided seed, then wait
+/// for the returned `Future` to complete with `(a, b)`. It will then yield the
+/// value `a`, and use `b` as the next internal state.
+///
+/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()`
+/// will stop producing items and return `Poll::Ready(None)` in future
+/// calls to `poll()`.
+///
+/// This function can typically be used when wanting to go from the "world of
+/// futures" to the "world of streams": the provided closure can build a
+/// `Future` using other library functions working on futures, and `unfold()`
+/// will turn it into a `Stream` by repeating the operation.
+///
+/// # Example
+///
+/// ```
+/// # futures::executor::block_on(async {
+/// use futures::stream::{self, StreamExt};
+///
+/// let stream = stream::unfold(0, |state| async move {
+/// if state <= 2 {
+/// let next_state = state + 1;
+/// let yielded = state * 2;
+/// Some((yielded, next_state))
+/// } else {
+/// None
+/// }
+/// });
+///
+/// let result = stream.collect::<Vec<i32>>().await;
+/// assert_eq!(result, vec![0, 2, 4]);
+/// # });
+/// ```
+pub fn unfold<T, F, Fut, Item>(init: T, f: F) -> Unfold<T, F, Fut>
+where
+ F: FnMut(T) -> Fut,
+ Fut: Future<Output = Option<(Item, T)>>,
+{
+ assert_stream::<Item, _>(Unfold { f, state: UnfoldState::Value { value: init } })
+}
+
+pin_project! {
+ /// Stream for the [`unfold`] function.
+ #[must_use = "streams do nothing unless polled"]
+ pub struct Unfold<T, F, Fut> {
+ f: F,
+ #[pin]
+ state: UnfoldState<T, Fut>,
+ }
+}
+
+impl<T, F, Fut> fmt::Debug for Unfold<T, F, Fut>
+where
+ T: fmt::Debug,
+ Fut: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Unfold").field("state", &self.state).finish()
+ }
+}
+
+impl<T, F, Fut, Item> FusedStream for Unfold<T, F, Fut>
+where
+ F: FnMut(T) -> Fut,
+ Fut: Future<Output = Option<(Item, T)>>,
+{
+ fn is_terminated(&self) -> bool {
+ if let UnfoldState::Empty = self.state {
+ true
+ } else {
+ false
+ }
+ }
+}
+
+impl<T, F, Fut, Item> Stream for Unfold<T, F, Fut>
+where
+ F: FnMut(T) -> Fut,
+ Fut: Future<Output = Option<(Item, T)>>,
+{
+ type Item = Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let mut this = self.project();
+
+ if let Some(state) = this.state.as_mut().take_value() {
+ this.state.set(UnfoldState::Future { future: (this.f)(state) });
+ }
+
+ let step = match this.state.as_mut().project_future() {
+ Some(fut) => ready!(fut.poll(cx)),
+ None => panic!("Unfold must not be polled after it returned `Poll::Ready(None)`"),
+ };
+
+ if let Some((item, next_state)) = step {
+ this.state.set(UnfoldState::Value { value: next_state });
+ Poll::Ready(Some(item))
+ } else {
+ this.state.set(UnfoldState::Empty);
+ Poll::Ready(None)
+ }
+ }
+}
diff --git a/vendor/futures-util/src/task/mod.rs b/vendor/futures-util/src/task/mod.rs
new file mode 100644
index 000000000..0a31eeac1
--- /dev/null
+++ b/vendor/futures-util/src/task/mod.rs
@@ -0,0 +1,37 @@
+//! Tools for working with tasks.
+//!
+//! This module contains:
+//!
+//! - [`Spawn`], a trait for spawning new tasks.
+//! - [`Context`], a context of an asynchronous task,
+//! including a handle for waking up the task.
+//! - [`Waker`], a handle for waking up a task.
+//!
+//! The remaining types and traits in the module are used for implementing
+//! executors or dealing with synchronization issues around task wakeup.
+
+#[doc(no_inline)]
+pub use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+
+pub use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError, UnsafeFutureObj};
+
+pub use futures_task::noop_waker;
+pub use futures_task::noop_waker_ref;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use futures_task::ArcWake;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use futures_task::waker;
+
+#[cfg(not(futures_no_atomic_cas))]
+#[cfg(feature = "alloc")]
+pub use futures_task::{waker_ref, WakerRef};
+
+#[cfg(not(futures_no_atomic_cas))]
+pub use futures_core::task::__internal::AtomicWaker;
+
+mod spawn;
+pub use self::spawn::{LocalSpawnExt, SpawnExt};
diff --git a/vendor/futures-util/src/task/spawn.rs b/vendor/futures-util/src/task/spawn.rs
new file mode 100644
index 000000000..f8779230e
--- /dev/null
+++ b/vendor/futures-util/src/task/spawn.rs
@@ -0,0 +1,163 @@
+use futures_task::{LocalSpawn, Spawn};
+
+#[cfg(feature = "compat")]
+use crate::compat::Compat;
+
+#[cfg(feature = "channel")]
+#[cfg(feature = "std")]
+use crate::future::{FutureExt, RemoteHandle};
+#[cfg(feature = "alloc")]
+use alloc::boxed::Box;
+#[cfg(feature = "alloc")]
+use futures_core::future::Future;
+#[cfg(feature = "alloc")]
+use futures_task::{FutureObj, LocalFutureObj, SpawnError};
+
+impl<Sp: ?Sized> SpawnExt for Sp where Sp: Spawn {}
+impl<Sp: ?Sized> LocalSpawnExt for Sp where Sp: LocalSpawn {}
+
+/// Extension trait for `Spawn`.
+pub trait SpawnExt: Spawn {
+ /// Spawns a task that polls the given future with output `()` to
+ /// completion.
+ ///
+ /// This method returns a [`Result`] that contains a [`SpawnError`] if
+ /// spawning fails.
+ ///
+ /// You can use [`spawn_with_handle`](SpawnExt::spawn_with_handle) if
+ /// you want to spawn a future with output other than `()` or if you want
+ /// to be able to await its completion.
+ ///
+ /// Note this method will eventually be replaced with the upcoming
+ /// `Spawn::spawn` method which will take a `dyn Future` as input.
+ /// Technical limitations prevent `Spawn::spawn` from being implemented
+ /// today. Feel free to use this method in the meantime.
+ ///
+ /// ```
+ /// use futures::executor::ThreadPool;
+ /// use futures::task::SpawnExt;
+ ///
+ /// let executor = ThreadPool::new().unwrap();
+ ///
+ /// let future = async { /* ... */ };
+ /// executor.spawn(future).unwrap();
+ /// ```
+ #[cfg(feature = "alloc")]
+ fn spawn<Fut>(&self, future: Fut) -> Result<(), SpawnError>
+ where
+ Fut: Future<Output = ()> + Send + 'static,
+ {
+ self.spawn_obj(FutureObj::new(Box::new(future)))
+ }
+
+ /// Spawns a task that polls the given future to completion and returns a
+ /// future that resolves to the spawned future's output.
+ ///
+ /// This method returns a [`Result`] that contains a [`RemoteHandle`](crate::future::RemoteHandle), or, if
+ /// spawning fails, a [`SpawnError`]. [`RemoteHandle`](crate::future::RemoteHandle) is a future that
+ /// resolves to the output of the spawned future.
+ ///
+ /// ```
+ /// use futures::executor::{block_on, ThreadPool};
+ /// use futures::future;
+ /// use futures::task::SpawnExt;
+ ///
+ /// let executor = ThreadPool::new().unwrap();
+ ///
+ /// let future = future::ready(1);
+ /// let join_handle_fut = executor.spawn_with_handle(future).unwrap();
+ /// assert_eq!(block_on(join_handle_fut), 1);
+ /// ```
+ #[cfg(feature = "channel")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+ #[cfg(feature = "std")]
+ fn spawn_with_handle<Fut>(&self, future: Fut) -> Result<RemoteHandle<Fut::Output>, SpawnError>
+ where
+ Fut: Future + Send + 'static,
+ Fut::Output: Send,
+ {
+ let (future, handle) = future.remote_handle();
+ self.spawn(future)?;
+ Ok(handle)
+ }
+
+ /// Wraps a [`Spawn`] and makes it usable as a futures 0.1 `Executor`.
+ /// Requires the `compat` feature to enable.
+ #[cfg(feature = "compat")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+ fn compat(self) -> Compat<Self>
+ where
+ Self: Sized,
+ {
+ Compat::new(self)
+ }
+}
+
+/// Extension trait for `LocalSpawn`.
+pub trait LocalSpawnExt: LocalSpawn {
+ /// Spawns a task that polls the given future with output `()` to
+ /// completion.
+ ///
+ /// This method returns a [`Result`] that contains a [`SpawnError`] if
+ /// spawning fails.
+ ///
+ /// You can use [`spawn_with_handle`](SpawnExt::spawn_with_handle) if
+ /// you want to spawn a future with output other than `()` or if you want
+ /// to be able to await its completion.
+ ///
+ /// Note this method will eventually be replaced with the upcoming
+ /// `Spawn::spawn` method which will take a `dyn Future` as input.
+ /// Technical limitations prevent `Spawn::spawn` from being implemented
+ /// today. Feel free to use this method in the meantime.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ /// use futures::task::LocalSpawnExt;
+ ///
+ /// let executor = LocalPool::new();
+ /// let spawner = executor.spawner();
+ ///
+ /// let future = async { /* ... */ };
+ /// spawner.spawn_local(future).unwrap();
+ /// ```
+ #[cfg(feature = "alloc")]
+ fn spawn_local<Fut>(&self, future: Fut) -> Result<(), SpawnError>
+ where
+ Fut: Future<Output = ()> + 'static,
+ {
+ self.spawn_local_obj(LocalFutureObj::new(Box::new(future)))
+ }
+
+ /// Spawns a task that polls the given future to completion and returns a
+ /// future that resolves to the spawned future's output.
+ ///
+ /// This method returns a [`Result`] that contains a [`RemoteHandle`](crate::future::RemoteHandle), or, if
+ /// spawning fails, a [`SpawnError`]. [`RemoteHandle`](crate::future::RemoteHandle) is a future that
+ /// resolves to the output of the spawned future.
+ ///
+ /// ```
+ /// use futures::executor::LocalPool;
+ /// use futures::task::LocalSpawnExt;
+ ///
+ /// let mut executor = LocalPool::new();
+ /// let spawner = executor.spawner();
+ ///
+ /// let future = async { 1 };
+ /// let join_handle_fut = spawner.spawn_local_with_handle(future).unwrap();
+ /// assert_eq!(executor.run_until(join_handle_fut), 1);
+ /// ```
+ #[cfg(feature = "channel")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "channel")))]
+ #[cfg(feature = "std")]
+ fn spawn_local_with_handle<Fut>(
+ &self,
+ future: Fut,
+ ) -> Result<RemoteHandle<Fut::Output>, SpawnError>
+ where
+ Fut: Future + 'static,
+ {
+ let (future, handle) = future.remote_handle();
+ self.spawn_local(future)?;
+ Ok(handle)
+ }
+}
diff --git a/vendor/futures-util/src/unfold_state.rs b/vendor/futures-util/src/unfold_state.rs
new file mode 100644
index 000000000..0edc15e43
--- /dev/null
+++ b/vendor/futures-util/src/unfold_state.rs
@@ -0,0 +1,39 @@
+use core::pin::Pin;
+
+use pin_project_lite::pin_project;
+
+pin_project! {
+ /// UnfoldState used for stream and sink unfolds
+ #[project = UnfoldStateProj]
+ #[project_replace = UnfoldStateProjReplace]
+ #[derive(Debug)]
+ pub(crate) enum UnfoldState<T, R> {
+ Value {
+ value: T,
+ },
+ Future {
+ #[pin]
+ future: R,
+ },
+ Empty,
+ }
+}
+
+impl<T, R> UnfoldState<T, R> {
+ pub(crate) fn project_future(self: Pin<&mut Self>) -> Option<Pin<&mut R>> {
+ match self.project() {
+ UnfoldStateProj::Future { future } => Some(future),
+ _ => None,
+ }
+ }
+
+ pub(crate) fn take_value(self: Pin<&mut Self>) -> Option<T> {
+ match &*self {
+ UnfoldState::Value { .. } => match self.project_replace(UnfoldState::Empty) {
+ UnfoldStateProjReplace::Value { value } => Some(value),
+ _ => unreachable!(),
+ },
+ _ => None,
+ }
+ }
+}
diff --git a/vendor/futures/.cargo-checksum.json b/vendor/futures/.cargo-checksum.json
new file mode 100644
index 000000000..04b9549c1
--- /dev/null
+++ b/vendor/futures/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"d5f1e8cd3101583132847daf46805ef6c0576cc5fcf954db0bb06948da552ce2","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","src/lib.rs":"64dde6f72fcb20b91783219bb78cedfeb1558696de0b7aaee92598cf82ab6144","tests/_require_features.rs":"5ad24019430b498addfc1fd853e955c7b646d78d0727a8ca29f586c9aab45cff","tests/async_await_macros.rs":"e171f9f02e7b7b0d9c254ad9b0f777b0282a6742b7c72a0080d9f6e4a6a44a4d","tests/auto_traits.rs":"afd108f67ce7a1549071c0f2316af63be3cb9ef864ff9856c9b2d4e47987c349","tests/compat.rs":"1ab5af07f13fad9b8fbf29c0df89102687b6abe855ce92bac153d5f916b28689","tests/eager_drop.rs":"dc25d067207c06bbe094752d70bf161e206f00e162ffa3219583c8b4eb0816a1","tests/eventual.rs":"4e3db25ac3f5ebb191caf538c460234eb95413b17441372cc3234d2cbecdc551","tests/future_abortable.rs":"4c81607472a85c5d87a5fe8a510a24cf1e8793fedf7f6cd6741ba1efd66615cd","tests/future_basic_combinators.rs":"4508c1250b85a4f749b7261bbd0ba728d3970e7ba277e84a006e76cf068fb54f","tests/future_fuse.rs":"bb63141f1486e755d0cdea1d93e302ad864a2186aa5287f909a0b3a922e82065","tests/future_inspect.rs":"9c03ceb770ce04fe9fd88a3489362642a0e34ae86a7b4958703e89e8b7a1ecf4","tests/future_join_all.rs":"4c7ab90afc4a0ae721e16f92615cd990a7a608de50b88ba06e6f931478ea04cd","tests/future_obj.rs":"a6aae88a194dc7d3bb961c20db78f180a01796cf7ea4bf106da98c40d89ed36d","tests/future_select_all.rs":"4cefc84d6b7ae2cf0007912cd0325fff6b926a4c26310e7b14a21868de61616f","tests/future_select_ok.rs":"1cabd03268641e1ac42b880344528bad73e3aeb6d6a8a141e652f339dd40184b","tests/future_shared.rs":"778e8763dea8df205581ec8dd9bf1453ca9f17065b496cecb6728147a148efeb","tests/future_try_flatten_stream.rs":"aa4542b5d88f62522b736fac4567613081df45ad3eb54b0b659cdadc9409c4db","tests/future_try_join_all.rs":"2bdd2e7d7f6d8b9c28b05e374906e10a914c2ff36762a0fd81ca4d892fad1341","tests/io_buf_reader.rs":"1d60479224d5aa9378d4aed6246362b08a823ee7c9977f6a5e44fce7c40116be","tests/io_buf_writer.rs":"8f7a78ab2955d2beb69d0881321d4191235540aef6448e875e7f76a2ffc55b89","tests/io_cursor.rs":"cba5a7b968b9f816ac33316ce1e4da67cb320aa5a21332c0f9a45694fa445dd7","tests/io_line_writer.rs":"5b1140de776a721a677911496daa4e7956cc52cc08838d593ab300a93e0d7984","tests/io_lines.rs":"72a310c885591793ed724d0aa2158ac2c9d1af22de417044d96b714f78317586","tests/io_read.rs":"e0a8fa9b27e042f03c9fe14e8f0f329a67e24afad1ce40b906a1ab4d2abef23a","tests/io_read_exact.rs":"42049cd67589992dc09764ffb3836c475115b26dee441fd4cc7e847b2d166667","tests/io_read_line.rs":"f360c30c32fc8c73b371281e86c3f1095da7ef23b702debb30d335046dc77dac","tests/io_read_to_end.rs":"ea3e961e39a0b92930bded05e8ba26e4902461ab53818843d40fae8065b1a803","tests/io_read_to_string.rs":"824921601ac49f15b9a0b349c900f9cc9081cf2646e6a86f443166f841f1320e","tests/io_read_until.rs":"36d9a98149b2410894121ccba49e5134e3209826b2225acfc787016cea2bc92a","tests/io_window.rs":"0d18334b1eb35f5e93099e19c0cab22abe5971d8531176b81345fc89d07692a8","tests/io_write.rs":"701032ff3d5a6e6a3d8cb4e373d1c93e4708f2e5ee0a6742fa626f27b6094b4d","tests/lock_mutex.rs":"055ec0365e7ccd3698aa4b02336fd4dd801017aeb2c19345c58b43415d40fa06","tests/macro_comma_support.rs":"627024ccadfe95194469d5bae2cc29b897b0118a664d7222408a2e234a10e939","tests/object_safety.rs":"9d047190387ed8334113687003c23407c80c858411f5ec7d5c505500f9639dfc","tests/oneshot.rs":"2109a8b3b524f4b36be9fb100f9b8c0d38bbd38d51716adcafdb65994b4a81d6","tests/ready_queue.rs":"cf7047cefab12ff0e2e0ca1ff2123ae87b85a2464fa4c2b6a0e2fc8ee5f25aa1","tests/recurse.rs":"b01b3d73b69ad90a767d297f974dac435817c39e12556fa6a3e6c725dd84f706","tests/sink.rs":"a96700307d6b2bea87c5567a93e0ac81d9ebc7ed354a35fa1b893b39ac8b3759","tests/sink_fanout.rs":"67ab58422040308353955311f75222e55378e4cc34557c7b34140bd20c259132","tests/stream.rs":"78be652d49845b2562e275293398686079b512d88e12661ea644e0881c97be27","tests/stream_abortable.rs":"60052b83b5eeb2395b77bc213f35098d2d5880529f0d83884582a8bbff78b139","tests/stream_buffer_unordered.rs":"143ee19056b9ee9e480903cf4a1b00da7d4e528c5804569bf8c40869e6ac6eed","tests/stream_catch_unwind.rs":"5cdaaf70436c49d3a7107bdc5547ddb8757c3d2057635aded70e485d0cb9cbfc","tests/stream_futures_ordered.rs":"b6f8beafd37e44e82c1f6de322ecba752f9d833d5520ed3ea63c303ea1979644","tests/stream_futures_unordered.rs":"12a361ac0d3694908127372de8b710acc5ff08a7ad5e493ca795bdcfb9601c86","tests/stream_into_async_read.rs":"00ecb18289ebc8f46ea0cf43e0dce0631d7698bd1303a7bcd84d0addc9d8b645","tests/stream_peekable.rs":"c0addb0c510e13183ba3d6102633b75a9223651ae80a64542e913c712fe69a30","tests/stream_select_all.rs":"3a9045754939da5b30305e78f0571d79a03aaa77030c6ccf82225f076e9843c9","tests/stream_select_next_some.rs":"871edcee3ffc16c697251b29c9ba500aa4e3e503aa738748d7392e3462c82dce","tests/stream_split.rs":"074e9c9b51b6f7ea83d77347b5a0c8d414ca32b90445fec9b85f7f4cd2a6049f","tests/stream_try_stream.rs":"cf9af07a31697a43ab0071d958f71fba6d84b2f3031301fd309821a72f3de5f7","tests/stream_unfold.rs":"7c6fbd10c782828793cbe1eb347ec776d99b185dad498e886f7161da76f76880","tests/task_arc_wake.rs":"5a49d074d1d5d9d5ec383dcd9a3868f636c1d7e34662e2573e467948db126206","tests/task_atomic_waker.rs":"8e85b4bc1360788646a52633dfe896d852773d6b482f81626cf534b97b7d937a","tests/test_macro.rs":"a46a946169c342c576936b60909165a50b94350501280ed9bba89d365af69287","tests/try_join.rs":"65f282f8351bd9a74642f2465c7aaf72ee7097002920989f156d60271652549e","tests_disabled/all.rs":"ddcd8fefb0d4a4a91a78328e7e652c35f93dc3669639d76fa0f56452b51abc23","tests_disabled/bilock.rs":"74e598568403df45460085166b7b90012d40dae8670b1c8dec126322a4ce171f","tests_disabled/stream.rs":"10e701f0eb83bcc6ec74d96529ad7dad5ad38bf5826574049501aeb07c5b76fa"},"package":"28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4"} \ No newline at end of file
diff --git a/vendor/futures/Cargo.toml b/vendor/futures/Cargo.toml
new file mode 100644
index 000000000..ac96b0383
--- /dev/null
+++ b/vendor/futures/Cargo.toml
@@ -0,0 +1,87 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.45"
+name = "futures"
+version = "0.3.19"
+description = "An implementation of futures and streams featuring zero allocations,\ncomposability, and iterator-like interfaces.\n"
+homepage = "https://rust-lang.github.io/futures-rs"
+readme = "../README.md"
+keywords = ["futures", "async", "future"]
+categories = ["asynchronous"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/futures-rs"
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[package.metadata.playground]
+features = ["std", "async-await", "compat", "io-compat", "executor", "thread-pool"]
+[dependencies.futures-channel]
+version = "0.3.19"
+features = ["sink"]
+default-features = false
+
+[dependencies.futures-core]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-executor]
+version = "0.3.19"
+optional = true
+default-features = false
+
+[dependencies.futures-io]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-sink]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-task]
+version = "0.3.19"
+default-features = false
+
+[dependencies.futures-util]
+version = "0.3.19"
+features = ["sink"]
+default-features = false
+[dev-dependencies.assert_matches]
+version = "1.3.0"
+
+[dev-dependencies.pin-project]
+version = "1.0.1"
+
+[dev-dependencies.pin-utils]
+version = "0.1.0"
+
+[dev-dependencies.static_assertions]
+version = "1"
+
+[dev-dependencies.tokio]
+version = "0.1.11"
+
+[features]
+alloc = ["futures-core/alloc", "futures-task/alloc", "futures-sink/alloc", "futures-channel/alloc", "futures-util/alloc"]
+async-await = ["futures-util/async-await", "futures-util/async-await-macro"]
+bilock = ["futures-util/bilock"]
+cfg-target-has-atomic = []
+compat = ["std", "futures-util/compat"]
+default = ["std", "async-await", "executor"]
+executor = ["std", "futures-executor/std"]
+io-compat = ["compat", "futures-util/io-compat"]
+std = ["alloc", "futures-core/std", "futures-task/std", "futures-io/std", "futures-sink/std", "futures-util/std", "futures-util/io", "futures-util/channel"]
+thread-pool = ["executor", "futures-executor/thread-pool"]
+unstable = ["futures-core/unstable", "futures-task/unstable", "futures-channel/unstable", "futures-io/unstable", "futures-util/unstable"]
+write-all-vectored = ["futures-util/write-all-vectored"]
diff --git a/vendor/futures/LICENSE-APACHE b/vendor/futures/LICENSE-APACHE
new file mode 100644
index 000000000..9eb0b097f
--- /dev/null
+++ b/vendor/futures/LICENSE-APACHE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/futures/LICENSE-MIT b/vendor/futures/LICENSE-MIT
new file mode 100644
index 000000000..8ad082ec4
--- /dev/null
+++ b/vendor/futures/LICENSE-MIT
@@ -0,0 +1,26 @@
+Copyright (c) 2016 Alex Crichton
+Copyright (c) 2017 The Tokio Authors
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/futures/src/lib.rs b/vendor/futures/src/lib.rs
new file mode 100644
index 000000000..8e21c8ebe
--- /dev/null
+++ b/vendor/futures/src/lib.rs
@@ -0,0 +1,194 @@
+//! Abstractions for asynchronous programming.
+//!
+//! This crate provides a number of core abstractions for writing asynchronous
+//! code:
+//!
+//! - [Futures](crate::future) are single eventual values produced by
+//! asynchronous computations. Some programming languages (e.g. JavaScript)
+//! call this concept "promise".
+//! - [Streams](crate::stream) represent a series of values
+//! produced asynchronously.
+//! - [Sinks](crate::sink) provide support for asynchronous writing of
+//! data.
+//! - [Executors](crate::executor) are responsible for running asynchronous
+//! tasks.
+//!
+//! The crate also contains abstractions for [asynchronous I/O](crate::io) and
+//! [cross-task communication](crate::channel).
+//!
+//! Underlying all of this is the *task system*, which is a form of lightweight
+//! threading. Large asynchronous computations are built up using futures,
+//! streams and sinks, and then spawned as independent tasks that are run to
+//! completion, but *do not block* the thread running them.
+//!
+//! The following example describes how the task system context is built and used
+//! within macros and keywords such as async and await!.
+//!
+//! ```rust
+//! # use futures::channel::mpsc;
+//! # use futures::executor; ///standard executors to provide a context for futures and streams
+//! # use futures::executor::ThreadPool;
+//! # use futures::StreamExt;
+//! #
+//! fn main() {
+//! let pool = ThreadPool::new().expect("Failed to build pool");
+//! let (tx, rx) = mpsc::unbounded::<i32>();
+//!
+//! // Create a future by an async block, where async is responsible for an
+//! // implementation of Future. At this point no executor has been provided
+//! // to this future, so it will not be running.
+//! let fut_values = async {
+//! // Create another async block, again where the Future implementation
+//! // is generated by async. Since this is inside of a parent async block,
+//! // it will be provided with the executor of the parent block when the parent
+//! // block is executed.
+//! //
+//! // This executor chaining is done by Future::poll whose second argument
+//! // is a std::task::Context. This represents our executor, and the Future
+//! // implemented by this async block can be polled using the parent async
+//! // block's executor.
+//! let fut_tx_result = async move {
+//! (0..100).for_each(|v| {
+//! tx.unbounded_send(v).expect("Failed to send");
+//! })
+//! };
+//!
+//! // Use the provided thread pool to spawn the generated future
+//! // responsible for transmission
+//! pool.spawn_ok(fut_tx_result);
+//!
+//! let fut_values = rx
+//! .map(|v| v * 2)
+//! .collect();
+//!
+//! // Use the executor provided to this async block to wait for the
+//! // future to complete.
+//! fut_values.await
+//! };
+//!
+//! // Actually execute the above future, which will invoke Future::poll and
+//! // subsequently chain appropriate Future::poll and methods needing executors
+//! // to drive all futures. Eventually fut_values will be driven to completion.
+//! let values: Vec<i32> = executor::block_on(fut_values);
+//!
+//! println!("Values={:?}", values);
+//! }
+//! ```
+//!
+//! The majority of examples and code snippets in this crate assume that they are
+//! inside an async block as written above.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![warn(
+ missing_debug_implementations,
+ missing_docs,
+ rust_2018_idioms,
+ single_use_lifetimes,
+ unreachable_pub
+)]
+#![doc(test(
+ no_crate_inject,
+ attr(
+ deny(warnings, rust_2018_idioms, single_use_lifetimes),
+ allow(dead_code, unused_assignments, unused_variables)
+ )
+))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+#[cfg(all(feature = "bilock", not(feature = "unstable")))]
+compile_error!("The `bilock` feature requires the `unstable` feature as an explicit opt-in to unstable features");
+
+#[doc(no_inline)]
+pub use futures_core::future::{Future, TryFuture};
+#[doc(no_inline)]
+pub use futures_util::future::{FutureExt, TryFutureExt};
+
+#[doc(no_inline)]
+pub use futures_core::stream::{Stream, TryStream};
+#[doc(no_inline)]
+pub use futures_util::stream::{StreamExt, TryStreamExt};
+
+#[doc(no_inline)]
+pub use futures_sink::Sink;
+#[doc(no_inline)]
+pub use futures_util::sink::SinkExt;
+
+#[cfg(feature = "std")]
+#[doc(no_inline)]
+pub use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite};
+#[cfg(feature = "std")]
+#[doc(no_inline)]
+pub use futures_util::{AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
+
+// Macro reexports
+pub use futures_core::ready; // Readiness propagation
+pub use futures_util::pin_mut;
+#[cfg(feature = "std")]
+#[cfg(feature = "async-await")]
+pub use futures_util::select;
+#[cfg(feature = "async-await")]
+pub use futures_util::{join, pending, poll, select_biased, try_join}; // Async-await
+
+// Module reexports
+#[doc(inline)]
+pub use futures_util::{future, never, sink, stream, task};
+
+#[cfg(feature = "std")]
+#[cfg(feature = "async-await")]
+pub use futures_util::stream_select;
+
+#[cfg(feature = "alloc")]
+#[doc(inline)]
+pub use futures_channel as channel;
+#[cfg(feature = "alloc")]
+#[doc(inline)]
+pub use futures_util::lock;
+
+#[cfg(feature = "std")]
+#[doc(inline)]
+pub use futures_util::io;
+
+#[cfg(feature = "executor")]
+#[cfg_attr(docsrs, doc(cfg(feature = "executor")))]
+#[doc(inline)]
+pub use futures_executor as executor;
+
+#[cfg(feature = "compat")]
+#[cfg_attr(docsrs, doc(cfg(feature = "compat")))]
+#[doc(inline)]
+pub use futures_util::compat;
+
+pub mod prelude {
+ //! A "prelude" for crates using the `futures` crate.
+ //!
+ //! This prelude is similar to the standard library's prelude in that you'll
+ //! almost always want to import its entire contents, but unlike the
+ //! standard library's prelude you'll have to do so manually:
+ //!
+ //! ```
+ //! # #[allow(unused_imports)]
+ //! use futures::prelude::*;
+ //! ```
+ //!
+ //! The prelude may grow over time as additional items see ubiquitous use.
+
+ pub use crate::future::{self, Future, TryFuture};
+ pub use crate::sink::{self, Sink};
+ pub use crate::stream::{self, Stream, TryStream};
+
+ #[doc(no_inline)]
+ pub use crate::future::{FutureExt as _, TryFutureExt as _};
+ #[doc(no_inline)]
+ pub use crate::sink::SinkExt as _;
+ #[doc(no_inline)]
+ pub use crate::stream::{StreamExt as _, TryStreamExt as _};
+
+ #[cfg(feature = "std")]
+ pub use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite};
+
+ #[cfg(feature = "std")]
+ #[doc(no_inline)]
+ pub use crate::io::{
+ AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _,
+ };
+}
diff --git a/vendor/futures/tests/_require_features.rs b/vendor/futures/tests/_require_features.rs
new file mode 100644
index 000000000..8046cc99a
--- /dev/null
+++ b/vendor/futures/tests/_require_features.rs
@@ -0,0 +1,13 @@
+#[cfg(not(all(
+ feature = "std",
+ feature = "alloc",
+ feature = "async-await",
+ feature = "compat",
+ feature = "io-compat",
+ feature = "executor",
+ feature = "thread-pool",
+)))]
+compile_error!(
+ "`futures` tests must have all stable features activated: \
+ use `--all-features` or `--features default,thread-pool,io-compat`"
+);
diff --git a/vendor/futures/tests/async_await_macros.rs b/vendor/futures/tests/async_await_macros.rs
new file mode 100644
index 000000000..ce1f3a337
--- /dev/null
+++ b/vendor/futures/tests/async_await_macros.rs
@@ -0,0 +1,389 @@
+use futures::channel::{mpsc, oneshot};
+use futures::executor::block_on;
+use futures::future::{self, poll_fn, FutureExt};
+use futures::sink::SinkExt;
+use futures::stream::StreamExt;
+use futures::task::{Context, Poll};
+use futures::{
+ join, pending, pin_mut, poll, select, select_biased, stream, stream_select, try_join,
+};
+use std::mem;
+
+#[test]
+fn poll_and_pending() {
+ let pending_once = async { pending!() };
+ block_on(async {
+ pin_mut!(pending_once);
+ assert_eq!(Poll::Pending, poll!(&mut pending_once));
+ assert_eq!(Poll::Ready(()), poll!(&mut pending_once));
+ });
+}
+
+#[test]
+fn join() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = oneshot::channel::<i32>();
+
+ let fut = async {
+ let res = join!(rx1, rx2);
+ assert_eq!((Ok(1), Ok(2)), res);
+ };
+
+ block_on(async {
+ pin_mut!(fut);
+ assert_eq!(Poll::Pending, poll!(&mut fut));
+ tx1.send(1).unwrap();
+ assert_eq!(Poll::Pending, poll!(&mut fut));
+ tx2.send(2).unwrap();
+ assert_eq!(Poll::Ready(()), poll!(&mut fut));
+ });
+}
+
+#[test]
+fn select() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (_tx2, rx2) = oneshot::channel::<i32>();
+ tx1.send(1).unwrap();
+ let mut ran = false;
+ block_on(async {
+ select! {
+ res = rx1.fuse() => {
+ assert_eq!(Ok(1), res);
+ ran = true;
+ },
+ _ = rx2.fuse() => unreachable!(),
+ }
+ });
+ assert!(ran);
+}
+
+#[test]
+fn select_biased() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (_tx2, rx2) = oneshot::channel::<i32>();
+ tx1.send(1).unwrap();
+ let mut ran = false;
+ block_on(async {
+ select_biased! {
+ res = rx1.fuse() => {
+ assert_eq!(Ok(1), res);
+ ran = true;
+ },
+ _ = rx2.fuse() => unreachable!(),
+ }
+ });
+ assert!(ran);
+}
+
+#[test]
+fn select_streams() {
+ let (mut tx1, rx1) = mpsc::channel::<i32>(1);
+ let (mut tx2, rx2) = mpsc::channel::<i32>(1);
+ let mut rx1 = rx1.fuse();
+ let mut rx2 = rx2.fuse();
+ let mut ran = false;
+ let mut total = 0;
+ block_on(async {
+ let mut tx1_opt;
+ let mut tx2_opt;
+ select! {
+ _ = rx1.next() => panic!(),
+ _ = rx2.next() => panic!(),
+ default => {
+ tx1.send(2).await.unwrap();
+ tx2.send(3).await.unwrap();
+ tx1_opt = Some(tx1);
+ tx2_opt = Some(tx2);
+ }
+ complete => panic!(),
+ }
+ loop {
+ select! {
+ // runs first and again after default
+ x = rx1.next() => if let Some(x) = x { total += x; },
+ // runs second and again after default
+ x = rx2.next() => if let Some(x) = x { total += x; },
+ // runs third
+ default => {
+ assert_eq!(total, 5);
+ ran = true;
+ drop(tx1_opt.take().unwrap());
+ drop(tx2_opt.take().unwrap());
+ },
+ // runs last
+ complete => break,
+ };
+ }
+ });
+ assert!(ran);
+}
+
+#[test]
+fn select_can_move_uncompleted_futures() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = oneshot::channel::<i32>();
+ tx1.send(1).unwrap();
+ tx2.send(2).unwrap();
+ let mut ran = false;
+ let mut rx1 = rx1.fuse();
+ let mut rx2 = rx2.fuse();
+ block_on(async {
+ select! {
+ res = rx1 => {
+ assert_eq!(Ok(1), res);
+ assert_eq!(Ok(2), rx2.await);
+ ran = true;
+ },
+ res = rx2 => {
+ assert_eq!(Ok(2), res);
+ assert_eq!(Ok(1), rx1.await);
+ ran = true;
+ },
+ }
+ });
+ assert!(ran);
+}
+
+#[test]
+fn select_nested() {
+ let mut outer_fut = future::ready(1);
+ let mut inner_fut = future::ready(2);
+ let res = block_on(async {
+ select! {
+ x = outer_fut => {
+ select! {
+ y = inner_fut => x + y,
+ }
+ }
+ }
+ });
+ assert_eq!(res, 3);
+}
+
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+#[test]
+fn select_size() {
+ let fut = async {
+ let mut ready = future::ready(0i32);
+ select! {
+ _ = ready => {},
+ }
+ };
+ assert_eq!(mem::size_of_val(&fut), 24);
+
+ let fut = async {
+ let mut ready1 = future::ready(0i32);
+ let mut ready2 = future::ready(0i32);
+ select! {
+ _ = ready1 => {},
+ _ = ready2 => {},
+ }
+ };
+ assert_eq!(mem::size_of_val(&fut), 40);
+}
+
+#[test]
+fn select_on_non_unpin_expressions() {
+ // The returned Future is !Unpin
+ let make_non_unpin_fut = || async { 5 };
+
+ let res = block_on(async {
+ let select_res;
+ select! {
+ value_1 = make_non_unpin_fut().fuse() => select_res = value_1,
+ value_2 = make_non_unpin_fut().fuse() => select_res = value_2,
+ };
+ select_res
+ });
+ assert_eq!(res, 5);
+}
+
+#[test]
+fn select_on_non_unpin_expressions_with_default() {
+ // The returned Future is !Unpin
+ let make_non_unpin_fut = || async { 5 };
+
+ let res = block_on(async {
+ let select_res;
+ select! {
+ value_1 = make_non_unpin_fut().fuse() => select_res = value_1,
+ value_2 = make_non_unpin_fut().fuse() => select_res = value_2,
+ default => select_res = 7,
+ };
+ select_res
+ });
+ assert_eq!(res, 5);
+}
+
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+#[test]
+fn select_on_non_unpin_size() {
+ // The returned Future is !Unpin
+ let make_non_unpin_fut = || async { 5 };
+
+ let fut = async {
+ let select_res;
+ select! {
+ value_1 = make_non_unpin_fut().fuse() => select_res = value_1,
+ value_2 = make_non_unpin_fut().fuse() => select_res = value_2,
+ };
+ select_res
+ };
+
+ assert_eq!(32, mem::size_of_val(&fut));
+}
+
+#[test]
+fn select_can_be_used_as_expression() {
+ block_on(async {
+ let res = select! {
+ x = future::ready(7) => x,
+ y = future::ready(3) => y + 1,
+ };
+ assert!(res == 7 || res == 4);
+ });
+}
+
+#[test]
+fn select_with_default_can_be_used_as_expression() {
+ fn poll_always_pending<T>(_cx: &mut Context<'_>) -> Poll<T> {
+ Poll::Pending
+ }
+
+ block_on(async {
+ let res = select! {
+ x = poll_fn(poll_always_pending::<i32>).fuse() => x,
+ y = poll_fn(poll_always_pending::<i32>).fuse() => y + 1,
+ default => 99,
+ };
+ assert_eq!(res, 99);
+ });
+}
+
+#[test]
+fn select_with_complete_can_be_used_as_expression() {
+ block_on(async {
+ let res = select! {
+ x = future::pending::<i32>() => x,
+ y = future::pending::<i32>() => y + 1,
+ default => 99,
+ complete => 237,
+ };
+ assert_eq!(res, 237);
+ });
+}
+
+#[test]
+#[allow(unused_assignments)]
+fn select_on_mutable_borrowing_future_with_same_borrow_in_block() {
+ async fn require_mutable(_: &mut i32) {}
+ async fn async_noop() {}
+
+ block_on(async {
+ let mut value = 234;
+ select! {
+ _ = require_mutable(&mut value).fuse() => { },
+ _ = async_noop().fuse() => {
+ value += 5;
+ },
+ }
+ });
+}
+
+#[test]
+#[allow(unused_assignments)]
+fn select_on_mutable_borrowing_future_with_same_borrow_in_block_and_default() {
+ async fn require_mutable(_: &mut i32) {}
+ async fn async_noop() {}
+
+ block_on(async {
+ let mut value = 234;
+ select! {
+ _ = require_mutable(&mut value).fuse() => { },
+ _ = async_noop().fuse() => {
+ value += 5;
+ },
+ default => {
+ value += 27;
+ },
+ }
+ });
+}
+
+#[test]
+#[allow(unused_assignments)]
+fn stream_select() {
+ // stream_select! macro
+ block_on(async {
+ let endless_ints = |i| stream::iter(vec![i].into_iter().cycle());
+
+ let mut endless_ones = stream_select!(endless_ints(1i32), stream::pending());
+ assert_eq!(endless_ones.next().await, Some(1));
+ assert_eq!(endless_ones.next().await, Some(1));
+
+ let mut finite_list =
+ stream_select!(stream::iter(vec![1].into_iter()), stream::iter(vec![1].into_iter()));
+ assert_eq!(finite_list.next().await, Some(1));
+ assert_eq!(finite_list.next().await, Some(1));
+ assert_eq!(finite_list.next().await, None);
+
+ let endless_mixed = stream_select!(endless_ints(1i32), endless_ints(2), endless_ints(3));
+ // Take 1000, and assert a somewhat even distribution of values.
+ // The fairness is randomized, but over 1000 samples we should be pretty close to even.
+ // This test may be a bit flaky. Feel free to adjust the margins as you see fit.
+ let mut count = 0;
+ let results = endless_mixed
+ .take_while(move |_| {
+ count += 1;
+ let ret = count < 1000;
+ async move { ret }
+ })
+ .collect::<Vec<_>>()
+ .await;
+ assert!(results.iter().filter(|x| **x == 1).count() >= 299);
+ assert!(results.iter().filter(|x| **x == 2).count() >= 299);
+ assert!(results.iter().filter(|x| **x == 3).count() >= 299);
+ });
+}
+
+#[test]
+fn join_size() {
+ let fut = async {
+ let ready = future::ready(0i32);
+ join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(0i32);
+ let ready2 = future::ready(0i32);
+ join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
+
+#[test]
+fn try_join_size() {
+ let fut = async {
+ let ready = future::ready(Ok::<i32, i32>(0));
+ try_join!(ready)
+ };
+ assert_eq!(mem::size_of_val(&fut), 16);
+
+ let fut = async {
+ let ready1 = future::ready(Ok::<i32, i32>(0));
+ let ready2 = future::ready(Ok::<i32, i32>(0));
+ try_join!(ready1, ready2)
+ };
+ assert_eq!(mem::size_of_val(&fut), 28);
+}
+
+#[test]
+fn join_doesnt_require_unpin() {
+ let _ = async { join!(async {}, async {}) };
+}
+
+#[test]
+fn try_join_doesnt_require_unpin() {
+ let _ = async { try_join!(async { Ok::<(), ()>(()) }, async { Ok::<(), ()>(()) },) };
+}
diff --git a/vendor/futures/tests/auto_traits.rs b/vendor/futures/tests/auto_traits.rs
new file mode 100644
index 000000000..b3d8b0077
--- /dev/null
+++ b/vendor/futures/tests/auto_traits.rs
@@ -0,0 +1,1891 @@
+#![cfg(feature = "compat")]
+
+//! Assert Send/Sync/Unpin for all public types.
+
+use futures::{
+ future::Future,
+ sink::Sink,
+ stream::Stream,
+ task::{Context, Poll},
+};
+use static_assertions::{assert_impl_all as assert_impl, assert_not_impl_all as assert_not_impl};
+use std::marker::PhantomPinned;
+use std::{marker::PhantomData, pin::Pin};
+
+pub type LocalFuture<T = *const ()> = Pin<Box<dyn Future<Output = T>>>;
+pub type LocalTryFuture<T = *const (), E = *const ()> = LocalFuture<Result<T, E>>;
+pub type SendFuture<T = *const ()> = Pin<Box<dyn Future<Output = T> + Send>>;
+pub type SendTryFuture<T = *const (), E = *const ()> = SendFuture<Result<T, E>>;
+pub type SyncFuture<T = *const ()> = Pin<Box<dyn Future<Output = T> + Sync>>;
+pub type SyncTryFuture<T = *const (), E = *const ()> = SyncFuture<Result<T, E>>;
+pub type UnpinFuture<T = PhantomPinned> = LocalFuture<T>;
+pub type UnpinTryFuture<T = PhantomPinned, E = PhantomPinned> = UnpinFuture<Result<T, E>>;
+pub struct PinnedFuture<T = PhantomPinned>(PhantomPinned, PhantomData<T>);
+impl<T> Future for PinnedFuture<T> {
+ type Output = T;
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
+ unimplemented!()
+ }
+}
+pub type PinnedTryFuture<T = PhantomPinned, E = PhantomPinned> = PinnedFuture<Result<T, E>>;
+
+pub type LocalStream<T = *const ()> = Pin<Box<dyn Stream<Item = T>>>;
+pub type LocalTryStream<T = *const (), E = *const ()> = LocalStream<Result<T, E>>;
+pub type SendStream<T = *const ()> = Pin<Box<dyn Stream<Item = T> + Send>>;
+pub type SendTryStream<T = *const (), E = *const ()> = SendStream<Result<T, E>>;
+pub type SyncStream<T = *const ()> = Pin<Box<dyn Stream<Item = T> + Sync>>;
+pub type SyncTryStream<T = *const (), E = *const ()> = SyncStream<Result<T, E>>;
+pub type UnpinStream<T = PhantomPinned> = LocalStream<T>;
+pub type UnpinTryStream<T = PhantomPinned, E = PhantomPinned> = UnpinStream<Result<T, E>>;
+pub struct PinnedStream<T = PhantomPinned>(PhantomPinned, PhantomData<T>);
+impl<T> Stream for PinnedStream<T> {
+ type Item = T;
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ unimplemented!()
+ }
+}
+pub type PinnedTryStream<T = PhantomPinned, E = PhantomPinned> = PinnedStream<Result<T, E>>;
+
+pub type LocalSink<T = *const (), E = *const ()> = Pin<Box<dyn Sink<T, Error = E>>>;
+pub type SendSink<T = *const (), E = *const ()> = Pin<Box<dyn Sink<T, Error = E> + Send>>;
+pub type SyncSink<T = *const (), E = *const ()> = Pin<Box<dyn Sink<T, Error = E> + Sync>>;
+pub type UnpinSink<T = PhantomPinned, E = PhantomPinned> = LocalSink<T, E>;
+pub struct PinnedSink<T = PhantomPinned, E = PhantomPinned>(PhantomPinned, PhantomData<(T, E)>);
+impl<T, E> Sink<T> for PinnedSink<T, E> {
+ type Error = E;
+ fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ unimplemented!()
+ }
+ fn start_send(self: Pin<&mut Self>, _: T) -> Result<(), Self::Error> {
+ unimplemented!()
+ }
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ unimplemented!()
+ }
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ unimplemented!()
+ }
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::channel`.
+pub mod channel {
+ use super::*;
+ use futures::channel::*;
+
+ assert_impl!(mpsc::Receiver<()>: Send);
+ assert_not_impl!(mpsc::Receiver<*const ()>: Send);
+ assert_impl!(mpsc::Receiver<()>: Sync);
+ assert_not_impl!(mpsc::Receiver<*const ()>: Sync);
+ assert_impl!(mpsc::Receiver<PhantomPinned>: Unpin);
+
+ assert_impl!(mpsc::SendError: Send);
+ assert_impl!(mpsc::SendError: Sync);
+ assert_impl!(mpsc::SendError: Unpin);
+
+ assert_impl!(mpsc::Sender<()>: Send);
+ assert_not_impl!(mpsc::Sender<*const ()>: Send);
+ assert_impl!(mpsc::Sender<()>: Sync);
+ assert_not_impl!(mpsc::Sender<*const ()>: Sync);
+ assert_impl!(mpsc::Sender<PhantomPinned>: Unpin);
+
+ assert_impl!(mpsc::TryRecvError: Send);
+ assert_impl!(mpsc::TryRecvError: Sync);
+ assert_impl!(mpsc::TryRecvError: Unpin);
+
+ assert_impl!(mpsc::TrySendError<()>: Send);
+ assert_not_impl!(mpsc::TrySendError<*const ()>: Send);
+ assert_impl!(mpsc::TrySendError<()>: Sync);
+ assert_not_impl!(mpsc::TrySendError<*const ()>: Sync);
+ assert_impl!(mpsc::TrySendError<()>: Unpin);
+ assert_not_impl!(mpsc::TrySendError<PhantomPinned>: Unpin);
+
+ assert_impl!(mpsc::UnboundedReceiver<()>: Send);
+ assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Send);
+ assert_impl!(mpsc::UnboundedReceiver<()>: Sync);
+ assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Sync);
+ assert_impl!(mpsc::UnboundedReceiver<PhantomPinned>: Unpin);
+
+ assert_impl!(mpsc::UnboundedReceiver<()>: Send);
+ assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Send);
+ assert_impl!(mpsc::UnboundedReceiver<()>: Sync);
+ assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Sync);
+ assert_impl!(mpsc::UnboundedReceiver<PhantomPinned>: Unpin);
+
+ assert_impl!(oneshot::Canceled: Send);
+ assert_impl!(oneshot::Canceled: Sync);
+ assert_impl!(oneshot::Canceled: Unpin);
+
+ assert_impl!(oneshot::Cancellation<()>: Send);
+ assert_not_impl!(oneshot::Cancellation<*const ()>: Send);
+ assert_impl!(oneshot::Cancellation<()>: Sync);
+ assert_not_impl!(oneshot::Cancellation<*const ()>: Sync);
+ assert_impl!(oneshot::Cancellation<PhantomPinned>: Unpin);
+
+ assert_impl!(oneshot::Receiver<()>: Send);
+ assert_not_impl!(oneshot::Receiver<*const ()>: Send);
+ assert_impl!(oneshot::Receiver<()>: Sync);
+ assert_not_impl!(oneshot::Receiver<*const ()>: Sync);
+ assert_impl!(oneshot::Receiver<PhantomPinned>: Unpin);
+
+ assert_impl!(oneshot::Sender<()>: Send);
+ assert_not_impl!(oneshot::Sender<*const ()>: Send);
+ assert_impl!(oneshot::Sender<()>: Sync);
+ assert_not_impl!(oneshot::Sender<*const ()>: Sync);
+ assert_impl!(oneshot::Sender<PhantomPinned>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::compat`.
+pub mod compat {
+ use super::*;
+ use futures::compat::*;
+
+ assert_impl!(Compat<()>: Send);
+ assert_not_impl!(Compat<*const ()>: Send);
+ assert_impl!(Compat<()>: Sync);
+ assert_not_impl!(Compat<*const ()>: Sync);
+ assert_impl!(Compat<()>: Unpin);
+ assert_not_impl!(Compat<PhantomPinned>: Unpin);
+
+ assert_impl!(Compat01As03<()>: Send);
+ assert_not_impl!(Compat01As03<*const ()>: Send);
+ assert_not_impl!(Compat01As03<()>: Sync);
+ assert_impl!(Compat01As03<PhantomPinned>: Unpin);
+
+ assert_impl!(Compat01As03Sink<(), ()>: Send);
+ assert_not_impl!(Compat01As03Sink<(), *const ()>: Send);
+ assert_not_impl!(Compat01As03Sink<*const (), ()>: Send);
+ assert_not_impl!(Compat01As03Sink<(), ()>: Sync);
+ assert_impl!(Compat01As03Sink<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(CompatSink<(), *const ()>: Send);
+ assert_not_impl!(CompatSink<*const (), ()>: Send);
+ assert_impl!(CompatSink<(), *const ()>: Sync);
+ assert_not_impl!(CompatSink<*const (), ()>: Sync);
+ assert_impl!(CompatSink<(), PhantomPinned>: Unpin);
+ assert_not_impl!(CompatSink<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Executor01As03<()>: Send);
+ assert_not_impl!(Executor01As03<*const ()>: Send);
+ assert_impl!(Executor01As03<()>: Sync);
+ assert_not_impl!(Executor01As03<*const ()>: Sync);
+ assert_impl!(Executor01As03<()>: Unpin);
+ assert_not_impl!(Executor01As03<PhantomPinned>: Unpin);
+
+ assert_impl!(Executor01Future: Send);
+ assert_not_impl!(Executor01Future: Sync);
+ assert_impl!(Executor01Future: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::executor`.
+pub mod executor {
+ use super::*;
+ use futures::executor::*;
+
+ assert_impl!(BlockingStream<SendStream>: Send);
+ assert_not_impl!(BlockingStream<LocalStream>: Send);
+ assert_impl!(BlockingStream<SyncStream>: Sync);
+ assert_not_impl!(BlockingStream<LocalStream>: Sync);
+ assert_impl!(BlockingStream<UnpinStream>: Unpin);
+ // BlockingStream requires `S: Unpin`
+ // assert_not_impl!(BlockingStream<PinnedStream>: Unpin);
+
+ assert_impl!(Enter: Send);
+ assert_impl!(Enter: Sync);
+ assert_impl!(Enter: Unpin);
+
+ assert_impl!(EnterError: Send);
+ assert_impl!(EnterError: Sync);
+ assert_impl!(EnterError: Unpin);
+
+ assert_not_impl!(LocalPool: Send);
+ assert_not_impl!(LocalPool: Sync);
+ assert_impl!(LocalPool: Unpin);
+
+ assert_not_impl!(LocalSpawner: Send);
+ assert_not_impl!(LocalSpawner: Sync);
+ assert_impl!(LocalSpawner: Unpin);
+
+ assert_impl!(ThreadPool: Send);
+ assert_impl!(ThreadPool: Sync);
+ assert_impl!(ThreadPool: Unpin);
+
+ assert_impl!(ThreadPoolBuilder: Send);
+ assert_impl!(ThreadPoolBuilder: Sync);
+ assert_impl!(ThreadPoolBuilder: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::future`.
+pub mod future {
+ use super::*;
+ use futures::future::*;
+
+ assert_impl!(AbortHandle: Send);
+ assert_impl!(AbortHandle: Sync);
+ assert_impl!(AbortHandle: Unpin);
+
+ assert_impl!(AbortRegistration: Send);
+ assert_impl!(AbortRegistration: Sync);
+ assert_impl!(AbortRegistration: Unpin);
+
+ assert_impl!(Abortable<SendFuture>: Send);
+ assert_not_impl!(Abortable<LocalFuture>: Send);
+ assert_impl!(Abortable<SyncFuture>: Sync);
+ assert_not_impl!(Abortable<LocalFuture>: Sync);
+ assert_impl!(Abortable<UnpinFuture>: Unpin);
+ assert_not_impl!(Abortable<PinnedFuture>: Unpin);
+
+ assert_impl!(Aborted: Send);
+ assert_impl!(Aborted: Sync);
+ assert_impl!(Aborted: Unpin);
+
+ assert_impl!(AndThen<SendFuture, SendFuture, ()>: Send);
+ assert_not_impl!(AndThen<SendFuture, LocalFuture, ()>: Send);
+ assert_not_impl!(AndThen<LocalFuture, SendFuture, ()>: Send);
+ assert_not_impl!(AndThen<SendFuture, SendFuture, *const ()>: Send);
+ assert_impl!(AndThen<SyncFuture, SyncFuture, ()>: Sync);
+ assert_not_impl!(AndThen<SyncFuture, LocalFuture, ()>: Sync);
+ assert_not_impl!(AndThen<LocalFuture, SyncFuture, ()>: Sync);
+ assert_not_impl!(AndThen<SyncFuture, SyncFuture, *const ()>: Sync);
+ assert_impl!(AndThen<UnpinFuture, UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(AndThen<PinnedFuture, UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(AndThen<UnpinFuture, PinnedFuture, PhantomPinned>: Unpin);
+
+ assert_impl!(CatchUnwind<SendFuture>: Send);
+ assert_not_impl!(CatchUnwind<LocalFuture>: Send);
+ assert_impl!(CatchUnwind<SyncFuture>: Sync);
+ assert_not_impl!(CatchUnwind<LocalFuture>: Sync);
+ assert_impl!(CatchUnwind<UnpinFuture>: Unpin);
+ assert_not_impl!(CatchUnwind<PinnedFuture>: Unpin);
+
+ assert_impl!(ErrInto<SendTryFuture, *const ()>: Send);
+ assert_not_impl!(ErrInto<LocalTryFuture, ()>: Send);
+ assert_impl!(ErrInto<SyncTryFuture, *const ()>: Sync);
+ assert_not_impl!(ErrInto<LocalTryFuture, ()>: Sync);
+ assert_impl!(ErrInto<UnpinTryFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(ErrInto<PinnedTryFuture, PhantomPinned>: Unpin);
+
+ assert_impl!(Flatten<SendFuture<()>>: Send);
+ assert_not_impl!(Flatten<LocalFuture>: Send);
+ assert_not_impl!(Flatten<SendFuture>: Send);
+ assert_impl!(Flatten<SyncFuture<()>>: Sync);
+ assert_not_impl!(Flatten<LocalFuture>: Sync);
+ assert_not_impl!(Flatten<SyncFuture>: Sync);
+ assert_impl!(Flatten<UnpinFuture<()>>: Unpin);
+ assert_not_impl!(Flatten<PinnedFuture>: Unpin);
+ assert_not_impl!(Flatten<UnpinFuture>: Unpin);
+
+ assert_impl!(FlattenSink<SendFuture, ()>: Send);
+ assert_not_impl!(FlattenSink<SendFuture, *const ()>: Send);
+ assert_not_impl!(FlattenSink<LocalFuture, ()>: Send);
+ assert_impl!(FlattenSink<SyncFuture, ()>: Sync);
+ assert_not_impl!(FlattenSink<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(FlattenSink<LocalFuture, ()>: Sync);
+ assert_impl!(FlattenSink<UnpinFuture, ()>: Unpin);
+ assert_not_impl!(FlattenSink<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(FlattenSink<PinnedFuture, ()>: Unpin);
+
+ assert_impl!(FlattenStream<SendFuture<()>>: Send);
+ assert_not_impl!(FlattenStream<LocalFuture>: Send);
+ assert_not_impl!(FlattenStream<SendFuture>: Send);
+ assert_impl!(FlattenStream<SyncFuture<()>>: Sync);
+ assert_not_impl!(FlattenStream<LocalFuture>: Sync);
+ assert_not_impl!(FlattenStream<SyncFuture>: Sync);
+ assert_impl!(FlattenStream<UnpinFuture<()>>: Unpin);
+ assert_not_impl!(FlattenStream<PinnedFuture>: Unpin);
+ assert_not_impl!(FlattenStream<UnpinFuture>: Unpin);
+
+ assert_impl!(Fuse<SendFuture>: Send);
+ assert_not_impl!(Fuse<LocalFuture>: Send);
+ assert_impl!(Fuse<SyncFuture>: Sync);
+ assert_not_impl!(Fuse<LocalFuture>: Sync);
+ assert_impl!(Fuse<UnpinFuture>: Unpin);
+ assert_not_impl!(Fuse<PinnedFuture>: Unpin);
+
+ assert_impl!(FutureObj<*const ()>: Send);
+ assert_not_impl!(FutureObj<()>: Sync);
+ assert_impl!(FutureObj<PhantomPinned>: Unpin);
+
+ assert_impl!(Inspect<SendFuture, ()>: Send);
+ assert_not_impl!(Inspect<SendFuture, *const ()>: Send);
+ assert_not_impl!(Inspect<LocalFuture, ()>: Send);
+ assert_impl!(Inspect<SyncFuture, ()>: Sync);
+ assert_not_impl!(Inspect<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(Inspect<LocalFuture, ()>: Sync);
+ assert_impl!(Inspect<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(Inspect<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(InspectErr<SendFuture, ()>: Send);
+ assert_not_impl!(InspectErr<SendFuture, *const ()>: Send);
+ assert_not_impl!(InspectErr<LocalFuture, ()>: Send);
+ assert_impl!(InspectErr<SyncFuture, ()>: Sync);
+ assert_not_impl!(InspectErr<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(InspectErr<LocalFuture, ()>: Sync);
+ assert_impl!(InspectErr<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(InspectErr<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(InspectOk<SendFuture, ()>: Send);
+ assert_not_impl!(InspectOk<SendFuture, *const ()>: Send);
+ assert_not_impl!(InspectOk<LocalFuture, ()>: Send);
+ assert_impl!(InspectOk<SyncFuture, ()>: Sync);
+ assert_not_impl!(InspectOk<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(InspectOk<LocalFuture, ()>: Sync);
+ assert_impl!(InspectOk<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(InspectOk<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(IntoFuture<SendFuture>: Send);
+ assert_not_impl!(IntoFuture<LocalFuture>: Send);
+ assert_impl!(IntoFuture<SyncFuture>: Sync);
+ assert_not_impl!(IntoFuture<LocalFuture>: Sync);
+ assert_impl!(IntoFuture<UnpinFuture>: Unpin);
+ assert_not_impl!(IntoFuture<PinnedFuture>: Unpin);
+
+ assert_impl!(IntoStream<SendFuture>: Send);
+ assert_not_impl!(IntoStream<LocalFuture>: Send);
+ assert_impl!(IntoStream<SyncFuture>: Sync);
+ assert_not_impl!(IntoStream<LocalFuture>: Sync);
+ assert_impl!(IntoStream<UnpinFuture>: Unpin);
+ assert_not_impl!(IntoStream<PinnedFuture>: Unpin);
+
+ assert_impl!(Join<SendFuture<()>, SendFuture<()>>: Send);
+ assert_not_impl!(Join<SendFuture<()>, SendFuture>: Send);
+ assert_not_impl!(Join<SendFuture, SendFuture<()>>: Send);
+ assert_not_impl!(Join<SendFuture, LocalFuture>: Send);
+ assert_not_impl!(Join<LocalFuture, SendFuture>: Send);
+ assert_impl!(Join<SyncFuture<()>, SyncFuture<()>>: Sync);
+ assert_not_impl!(Join<SyncFuture<()>, SyncFuture>: Sync);
+ assert_not_impl!(Join<SyncFuture, SyncFuture<()>>: Sync);
+ assert_not_impl!(Join<SyncFuture, LocalFuture>: Sync);
+ assert_not_impl!(Join<LocalFuture, SyncFuture>: Sync);
+ assert_impl!(Join<UnpinFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(Join<PinnedFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(Join<UnpinFuture, PinnedFuture>: Unpin);
+
+ // Join3, Join4, Join5 are the same as Join
+
+ assert_impl!(JoinAll<SendFuture<()>>: Send);
+ assert_not_impl!(JoinAll<LocalFuture>: Send);
+ assert_not_impl!(JoinAll<SendFuture>: Send);
+ assert_impl!(JoinAll<SyncFuture<()>>: Sync);
+ assert_not_impl!(JoinAll<LocalFuture>: Sync);
+ assert_not_impl!(JoinAll<SyncFuture>: Sync);
+ assert_impl!(JoinAll<PinnedFuture>: Unpin);
+
+ assert_impl!(Lazy<()>: Send);
+ assert_not_impl!(Lazy<*const ()>: Send);
+ assert_impl!(Lazy<()>: Sync);
+ assert_not_impl!(Lazy<*const ()>: Sync);
+ assert_impl!(Lazy<PhantomPinned>: Unpin);
+
+ assert_not_impl!(LocalFutureObj<()>: Send);
+ assert_not_impl!(LocalFutureObj<()>: Sync);
+ assert_impl!(LocalFutureObj<PhantomPinned>: Unpin);
+
+ assert_impl!(Map<SendFuture, ()>: Send);
+ assert_not_impl!(Map<SendFuture, *const ()>: Send);
+ assert_not_impl!(Map<LocalFuture, ()>: Send);
+ assert_impl!(Map<SyncFuture, ()>: Sync);
+ assert_not_impl!(Map<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(Map<LocalFuture, ()>: Sync);
+ assert_impl!(Map<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(Map<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapErr<SendFuture, ()>: Send);
+ assert_not_impl!(MapErr<SendFuture, *const ()>: Send);
+ assert_not_impl!(MapErr<LocalFuture, ()>: Send);
+ assert_impl!(MapErr<SyncFuture, ()>: Sync);
+ assert_not_impl!(MapErr<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(MapErr<LocalFuture, ()>: Sync);
+ assert_impl!(MapErr<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(MapErr<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapInto<SendFuture, *const ()>: Send);
+ assert_not_impl!(MapInto<LocalFuture, ()>: Send);
+ assert_impl!(MapInto<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(MapInto<LocalFuture, ()>: Sync);
+ assert_impl!(MapInto<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(MapInto<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapOk<SendFuture, ()>: Send);
+ assert_not_impl!(MapOk<SendFuture, *const ()>: Send);
+ assert_not_impl!(MapOk<LocalFuture, ()>: Send);
+ assert_impl!(MapOk<SyncFuture, ()>: Sync);
+ assert_not_impl!(MapOk<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(MapOk<LocalFuture, ()>: Sync);
+ assert_impl!(MapOk<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(MapOk<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapOkOrElse<SendFuture, (), ()>: Send);
+ assert_not_impl!(MapOkOrElse<SendFuture, (), *const ()>: Send);
+ assert_not_impl!(MapOkOrElse<SendFuture, *const (), ()>: Send);
+ assert_not_impl!(MapOkOrElse<LocalFuture, (), ()>: Send);
+ assert_impl!(MapOkOrElse<SyncFuture, (), ()>: Sync);
+ assert_not_impl!(MapOkOrElse<SyncFuture, (), *const ()>: Sync);
+ assert_not_impl!(MapOkOrElse<SyncFuture, *const (), ()>: Sync);
+ assert_not_impl!(MapOkOrElse<LocalFuture, (), ()>: Sync);
+ assert_impl!(MapOkOrElse<UnpinFuture, PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(MapOkOrElse<PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(NeverError<SendFuture>: Send);
+ assert_not_impl!(NeverError<LocalFuture>: Send);
+ assert_impl!(NeverError<SyncFuture>: Sync);
+ assert_not_impl!(NeverError<LocalFuture>: Sync);
+ assert_impl!(NeverError<UnpinFuture>: Unpin);
+ assert_not_impl!(NeverError<PinnedFuture>: Unpin);
+
+ assert_impl!(OkInto<SendFuture, *const ()>: Send);
+ assert_not_impl!(OkInto<LocalFuture, ()>: Send);
+ assert_impl!(OkInto<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(OkInto<LocalFuture, ()>: Sync);
+ assert_impl!(OkInto<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(OkInto<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(OptionFuture<SendFuture>: Send);
+ assert_not_impl!(OptionFuture<LocalFuture>: Send);
+ assert_impl!(OptionFuture<SyncFuture>: Sync);
+ assert_not_impl!(OptionFuture<LocalFuture>: Sync);
+ assert_impl!(OptionFuture<UnpinFuture>: Unpin);
+ assert_not_impl!(OptionFuture<PinnedFuture>: Unpin);
+
+ assert_impl!(OrElse<SendFuture, SendFuture, ()>: Send);
+ assert_not_impl!(OrElse<SendFuture, LocalFuture, ()>: Send);
+ assert_not_impl!(OrElse<LocalFuture, SendFuture, ()>: Send);
+ assert_not_impl!(OrElse<SendFuture, SendFuture, *const ()>: Send);
+ assert_impl!(OrElse<SyncFuture, SyncFuture, ()>: Sync);
+ assert_not_impl!(OrElse<SyncFuture, LocalFuture, ()>: Sync);
+ assert_not_impl!(OrElse<LocalFuture, SyncFuture, ()>: Sync);
+ assert_not_impl!(OrElse<SyncFuture, SyncFuture, *const ()>: Sync);
+ assert_impl!(OrElse<UnpinFuture, UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(OrElse<PinnedFuture, UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(OrElse<UnpinFuture, PinnedFuture, PhantomPinned>: Unpin);
+
+ assert_impl!(Pending<()>: Send);
+ assert_not_impl!(Pending<*const ()>: Send);
+ assert_impl!(Pending<()>: Sync);
+ assert_not_impl!(Pending<*const ()>: Sync);
+ assert_impl!(Pending<PhantomPinned>: Unpin);
+
+ assert_impl!(PollFn<()>: Send);
+ assert_not_impl!(PollFn<*const ()>: Send);
+ assert_impl!(PollFn<()>: Sync);
+ assert_not_impl!(PollFn<*const ()>: Sync);
+ assert_impl!(PollFn<PhantomPinned>: Unpin);
+
+ assert_impl!(PollImmediate<SendStream>: Send);
+ assert_not_impl!(PollImmediate<LocalStream<()>>: Send);
+ assert_impl!(PollImmediate<SyncStream>: Sync);
+ assert_not_impl!(PollImmediate<LocalStream<()>>: Sync);
+ assert_impl!(PollImmediate<UnpinStream>: Unpin);
+ assert_not_impl!(PollImmediate<PinnedStream>: Unpin);
+
+ assert_impl!(Ready<()>: Send);
+ assert_not_impl!(Ready<*const ()>: Send);
+ assert_impl!(Ready<()>: Sync);
+ assert_not_impl!(Ready<*const ()>: Sync);
+ assert_impl!(Ready<PhantomPinned>: Unpin);
+
+ assert_impl!(Remote<SendFuture<()>>: Send);
+ assert_not_impl!(Remote<LocalFuture>: Send);
+ assert_not_impl!(Remote<SendFuture>: Send);
+ assert_impl!(Remote<SyncFuture<()>>: Sync);
+ assert_not_impl!(Remote<LocalFuture>: Sync);
+ assert_not_impl!(Remote<SyncFuture>: Sync);
+ assert_impl!(Remote<UnpinFuture>: Unpin);
+ assert_not_impl!(Remote<PinnedFuture>: Unpin);
+
+ assert_impl!(RemoteHandle<()>: Send);
+ assert_not_impl!(RemoteHandle<*const ()>: Send);
+ assert_impl!(RemoteHandle<()>: Sync);
+ assert_not_impl!(RemoteHandle<*const ()>: Sync);
+ assert_impl!(RemoteHandle<PhantomPinned>: Unpin);
+
+ assert_impl!(Select<SendFuture, SendFuture>: Send);
+ assert_not_impl!(Select<SendFuture, LocalFuture>: Send);
+ assert_not_impl!(Select<LocalFuture, SendFuture>: Send);
+ assert_impl!(Select<SyncFuture, SyncFuture>: Sync);
+ assert_not_impl!(Select<SyncFuture, LocalFuture>: Sync);
+ assert_not_impl!(Select<LocalFuture, SyncFuture>: Sync);
+ assert_impl!(Select<UnpinFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(Select<PinnedFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(Select<UnpinFuture, PinnedFuture>: Unpin);
+
+ assert_impl!(SelectAll<SendFuture>: Send);
+ assert_not_impl!(SelectAll<LocalFuture>: Send);
+ assert_impl!(SelectAll<SyncFuture>: Sync);
+ assert_not_impl!(SelectAll<LocalFuture>: Sync);
+ assert_impl!(SelectAll<UnpinFuture>: Unpin);
+ assert_not_impl!(SelectAll<PinnedFuture>: Unpin);
+
+ assert_impl!(SelectOk<SendFuture>: Send);
+ assert_not_impl!(SelectOk<LocalFuture>: Send);
+ assert_impl!(SelectOk<SyncFuture>: Sync);
+ assert_not_impl!(SelectOk<LocalFuture>: Sync);
+ assert_impl!(SelectOk<UnpinFuture>: Unpin);
+ assert_not_impl!(SelectOk<PinnedFuture>: Unpin);
+
+ assert_impl!(Shared<SendFuture<()>>: Send);
+ assert_not_impl!(Shared<SendFuture>: Send);
+ assert_not_impl!(Shared<LocalFuture>: Send);
+ assert_not_impl!(Shared<SyncFuture<()>>: Sync);
+ assert_impl!(Shared<PinnedFuture>: Unpin);
+
+ assert_impl!(Then<SendFuture, SendFuture, ()>: Send);
+ assert_not_impl!(Then<SendFuture, SendFuture, *const ()>: Send);
+ assert_not_impl!(Then<SendFuture, LocalFuture, ()>: Send);
+ assert_not_impl!(Then<LocalFuture, SendFuture, ()>: Send);
+ assert_impl!(Then<SyncFuture, SyncFuture, ()>: Sync);
+ assert_not_impl!(Then<SyncFuture, SyncFuture, *const ()>: Sync);
+ assert_not_impl!(Then<SyncFuture, LocalFuture, ()>: Sync);
+ assert_not_impl!(Then<LocalFuture, SyncFuture, ()>: Sync);
+ assert_impl!(Then<UnpinFuture, UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(Then<PinnedFuture, UnpinFuture, ()>: Unpin);
+ assert_not_impl!(Then<UnpinFuture, PinnedFuture, ()>: Unpin);
+
+ assert_impl!(TryFlatten<SendTryFuture<()>, ()>: Send);
+ assert_not_impl!(TryFlatten<LocalTryFuture, ()>: Send);
+ assert_not_impl!(TryFlatten<SendTryFuture, *const ()>: Send);
+ assert_impl!(TryFlatten<SyncTryFuture<()>, ()>: Sync);
+ assert_not_impl!(TryFlatten<LocalTryFuture, ()>: Sync);
+ assert_not_impl!(TryFlatten<SyncTryFuture, *const ()>: Sync);
+ assert_impl!(TryFlatten<UnpinTryFuture<()>, ()>: Unpin);
+ assert_not_impl!(TryFlatten<PinnedTryFuture, ()>: Unpin);
+ assert_not_impl!(TryFlatten<UnpinTryFuture, PhantomPinned>: Unpin);
+
+ assert_impl!(TryFlattenStream<SendTryFuture<()>>: Send);
+ assert_not_impl!(TryFlattenStream<LocalTryFuture>: Send);
+ assert_not_impl!(TryFlattenStream<SendTryFuture>: Send);
+ assert_impl!(TryFlattenStream<SyncTryFuture<()>>: Sync);
+ assert_not_impl!(TryFlattenStream<LocalTryFuture>: Sync);
+ assert_not_impl!(TryFlattenStream<SyncTryFuture>: Sync);
+ assert_impl!(TryFlattenStream<UnpinTryFuture<()>>: Unpin);
+ assert_not_impl!(TryFlattenStream<PinnedTryFuture>: Unpin);
+ assert_not_impl!(TryFlattenStream<UnpinTryFuture>: Unpin);
+
+ assert_impl!(TryJoin<SendTryFuture<()>, SendTryFuture<()>>: Send);
+ assert_not_impl!(TryJoin<SendTryFuture<()>, SendTryFuture>: Send);
+ assert_not_impl!(TryJoin<SendTryFuture, SendTryFuture<()>>: Send);
+ assert_not_impl!(TryJoin<SendTryFuture, LocalTryFuture>: Send);
+ assert_not_impl!(TryJoin<LocalTryFuture, SendTryFuture>: Send);
+ assert_impl!(TryJoin<SyncTryFuture<()>, SyncTryFuture<()>>: Sync);
+ assert_not_impl!(TryJoin<SyncTryFuture<()>, SyncTryFuture>: Sync);
+ assert_not_impl!(TryJoin<SyncTryFuture, SyncTryFuture<()>>: Sync);
+ assert_not_impl!(TryJoin<SyncTryFuture, LocalTryFuture>: Sync);
+ assert_not_impl!(TryJoin<LocalTryFuture, SyncTryFuture>: Sync);
+ assert_impl!(TryJoin<UnpinTryFuture, UnpinTryFuture>: Unpin);
+ assert_not_impl!(TryJoin<PinnedTryFuture, UnpinTryFuture>: Unpin);
+ assert_not_impl!(TryJoin<UnpinTryFuture, PinnedTryFuture>: Unpin);
+
+ // TryJoin3, TryJoin4, TryJoin5 are the same as TryJoin
+
+ assert_impl!(TryJoinAll<SendTryFuture<()>>: Send);
+ assert_not_impl!(TryJoinAll<LocalTryFuture>: Send);
+ assert_not_impl!(TryJoinAll<SendTryFuture>: Send);
+ assert_impl!(TryJoinAll<SyncTryFuture<()>>: Sync);
+ assert_not_impl!(TryJoinAll<LocalTryFuture>: Sync);
+ assert_not_impl!(TryJoinAll<SyncTryFuture>: Sync);
+ assert_impl!(TryJoinAll<PinnedTryFuture>: Unpin);
+
+ assert_impl!(TrySelect<SendFuture, SendFuture>: Send);
+ assert_not_impl!(TrySelect<SendFuture, LocalFuture>: Send);
+ assert_not_impl!(TrySelect<LocalFuture, SendFuture>: Send);
+ assert_impl!(TrySelect<SyncFuture, SyncFuture>: Sync);
+ assert_not_impl!(TrySelect<SyncFuture, LocalFuture>: Sync);
+ assert_not_impl!(TrySelect<LocalFuture, SyncFuture>: Sync);
+ assert_impl!(TrySelect<UnpinFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(TrySelect<PinnedFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(TrySelect<UnpinFuture, PinnedFuture>: Unpin);
+
+ assert_impl!(UnitError<SendFuture>: Send);
+ assert_not_impl!(UnitError<LocalFuture>: Send);
+ assert_impl!(UnitError<SyncFuture>: Sync);
+ assert_not_impl!(UnitError<LocalFuture>: Sync);
+ assert_impl!(UnitError<UnpinFuture>: Unpin);
+ assert_not_impl!(UnitError<PinnedFuture>: Unpin);
+
+ assert_impl!(UnwrapOrElse<SendFuture, ()>: Send);
+ assert_not_impl!(UnwrapOrElse<SendFuture, *const ()>: Send);
+ assert_not_impl!(UnwrapOrElse<LocalFuture, ()>: Send);
+ assert_impl!(UnwrapOrElse<SyncFuture, ()>: Sync);
+ assert_not_impl!(UnwrapOrElse<SyncFuture, *const ()>: Sync);
+ assert_not_impl!(UnwrapOrElse<LocalFuture, ()>: Sync);
+ assert_impl!(UnwrapOrElse<UnpinFuture, PhantomPinned>: Unpin);
+ assert_not_impl!(UnwrapOrElse<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(WeakShared<SendFuture<()>>: Send);
+ assert_not_impl!(WeakShared<SendFuture>: Send);
+ assert_not_impl!(WeakShared<LocalFuture>: Send);
+ assert_not_impl!(WeakShared<SyncFuture<()>>: Sync);
+ assert_impl!(WeakShared<PinnedFuture>: Unpin);
+
+ assert_impl!(Either<SendFuture, SendFuture>: Send);
+ assert_not_impl!(Either<SendFuture, LocalFuture>: Send);
+ assert_not_impl!(Either<LocalFuture, SendFuture>: Send);
+ assert_impl!(Either<SyncFuture, SyncFuture>: Sync);
+ assert_not_impl!(Either<SyncFuture, LocalFuture>: Sync);
+ assert_not_impl!(Either<LocalFuture, SyncFuture>: Sync);
+ assert_impl!(Either<UnpinFuture, UnpinFuture>: Unpin);
+ assert_not_impl!(Either<UnpinFuture, PinnedFuture>: Unpin);
+ assert_not_impl!(Either<PinnedFuture, UnpinFuture>: Unpin);
+
+ assert_impl!(MaybeDone<SendFuture<()>>: Send);
+ assert_not_impl!(MaybeDone<SendFuture>: Send);
+ assert_not_impl!(MaybeDone<LocalFuture>: Send);
+ assert_impl!(MaybeDone<SyncFuture<()>>: Sync);
+ assert_not_impl!(MaybeDone<SyncFuture>: Sync);
+ assert_not_impl!(MaybeDone<LocalFuture>: Sync);
+ assert_impl!(MaybeDone<UnpinFuture>: Unpin);
+ assert_not_impl!(MaybeDone<PinnedFuture>: Unpin);
+
+ assert_impl!(TryMaybeDone<SendTryFuture<()>>: Send);
+ assert_not_impl!(TryMaybeDone<SendTryFuture>: Send);
+ assert_not_impl!(TryMaybeDone<LocalTryFuture>: Send);
+ assert_impl!(TryMaybeDone<SyncTryFuture<()>>: Sync);
+ assert_not_impl!(TryMaybeDone<SyncTryFuture>: Sync);
+ assert_not_impl!(TryMaybeDone<LocalTryFuture>: Sync);
+ assert_impl!(TryMaybeDone<UnpinTryFuture>: Unpin);
+ assert_not_impl!(TryMaybeDone<PinnedTryFuture>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::io`.
+pub mod io {
+ use super::*;
+ use futures::io::{Sink, *};
+
+ assert_impl!(AllowStdIo<()>: Send);
+ assert_not_impl!(AllowStdIo<*const ()>: Send);
+ assert_impl!(AllowStdIo<()>: Sync);
+ assert_not_impl!(AllowStdIo<*const ()>: Sync);
+ assert_impl!(AllowStdIo<PhantomPinned>: Unpin);
+
+ assert_impl!(BufReader<()>: Send);
+ assert_not_impl!(BufReader<*const ()>: Send);
+ assert_impl!(BufReader<()>: Sync);
+ assert_not_impl!(BufReader<*const ()>: Sync);
+ assert_impl!(BufReader<()>: Unpin);
+ assert_not_impl!(BufReader<PhantomPinned>: Unpin);
+
+ assert_impl!(BufWriter<()>: Send);
+ assert_not_impl!(BufWriter<*const ()>: Send);
+ assert_impl!(BufWriter<()>: Sync);
+ assert_not_impl!(BufWriter<*const ()>: Sync);
+ assert_impl!(BufWriter<()>: Unpin);
+ assert_not_impl!(BufWriter<PhantomPinned>: Unpin);
+
+ assert_impl!(Chain<(), ()>: Send);
+ assert_not_impl!(Chain<(), *const ()>: Send);
+ assert_not_impl!(Chain<*const (), ()>: Send);
+ assert_impl!(Chain<(), ()>: Sync);
+ assert_not_impl!(Chain<(), *const ()>: Sync);
+ assert_not_impl!(Chain<*const (), ()>: Sync);
+ assert_impl!(Chain<(), ()>: Unpin);
+ assert_not_impl!(Chain<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Chain<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Close<'_, ()>: Send);
+ assert_not_impl!(Close<'_, *const ()>: Send);
+ assert_impl!(Close<'_, ()>: Sync);
+ assert_not_impl!(Close<'_, *const ()>: Sync);
+ assert_impl!(Close<'_, ()>: Unpin);
+ assert_not_impl!(Close<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(Copy<(), ()>: Send);
+ assert_not_impl!(Copy<(), *const ()>: Send);
+ assert_not_impl!(Copy<*const (), ()>: Send);
+ assert_impl!(Copy<(), ()>: Sync);
+ assert_not_impl!(Copy<(), *const ()>: Sync);
+ assert_not_impl!(Copy<*const (), ()>: Sync);
+ assert_impl!(Copy<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Copy<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(CopyBuf<(), ()>: Send);
+ assert_not_impl!(CopyBuf<(), *const ()>: Send);
+ assert_not_impl!(CopyBuf<*const (), ()>: Send);
+ assert_impl!(CopyBuf<(), ()>: Sync);
+ assert_not_impl!(CopyBuf<(), *const ()>: Sync);
+ assert_not_impl!(CopyBuf<*const (), ()>: Sync);
+ assert_impl!(CopyBuf<(), PhantomPinned>: Unpin);
+ assert_not_impl!(CopyBuf<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Cursor<()>: Send);
+ assert_not_impl!(Cursor<*const ()>: Send);
+ assert_impl!(Cursor<()>: Sync);
+ assert_not_impl!(Cursor<*const ()>: Sync);
+ assert_impl!(Cursor<()>: Unpin);
+ assert_not_impl!(Cursor<PhantomPinned>: Unpin);
+
+ assert_impl!(Empty: Send);
+ assert_impl!(Empty: Sync);
+ assert_impl!(Empty: Unpin);
+
+ assert_impl!(FillBuf<'_, ()>: Send);
+ assert_not_impl!(FillBuf<'_, *const ()>: Send);
+ assert_impl!(FillBuf<'_, ()>: Sync);
+ assert_not_impl!(FillBuf<'_, *const ()>: Sync);
+ assert_impl!(FillBuf<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(Flush<'_, ()>: Send);
+ assert_not_impl!(Flush<'_, *const ()>: Send);
+ assert_impl!(Flush<'_, ()>: Sync);
+ assert_not_impl!(Flush<'_, *const ()>: Sync);
+ assert_impl!(Flush<'_, ()>: Unpin);
+ assert_not_impl!(Flush<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(IntoSink<(), ()>: Send);
+ assert_not_impl!(IntoSink<(), *const ()>: Send);
+ assert_not_impl!(IntoSink<*const (), ()>: Send);
+ assert_impl!(IntoSink<(), ()>: Sync);
+ assert_not_impl!(IntoSink<(), *const ()>: Sync);
+ assert_not_impl!(IntoSink<*const (), ()>: Sync);
+ assert_impl!(IntoSink<(), PhantomPinned>: Unpin);
+ assert_not_impl!(IntoSink<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Lines<()>: Send);
+ assert_not_impl!(Lines<*const ()>: Send);
+ assert_impl!(Lines<()>: Sync);
+ assert_not_impl!(Lines<*const ()>: Sync);
+ assert_impl!(Lines<()>: Unpin);
+ assert_not_impl!(Lines<PhantomPinned>: Unpin);
+
+ assert_impl!(Read<'_, ()>: Send);
+ assert_not_impl!(Read<'_, *const ()>: Send);
+ assert_impl!(Read<'_, ()>: Sync);
+ assert_not_impl!(Read<'_, *const ()>: Sync);
+ assert_impl!(Read<'_, ()>: Unpin);
+ assert_not_impl!(Read<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadExact<'_, ()>: Send);
+ assert_not_impl!(ReadExact<'_, *const ()>: Send);
+ assert_impl!(ReadExact<'_, ()>: Sync);
+ assert_not_impl!(ReadExact<'_, *const ()>: Sync);
+ assert_impl!(ReadExact<'_, ()>: Unpin);
+ assert_not_impl!(ReadExact<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadHalf<()>: Send);
+ assert_not_impl!(ReadHalf<*const ()>: Send);
+ assert_impl!(ReadHalf<()>: Sync);
+ assert_not_impl!(ReadHalf<*const ()>: Sync);
+ assert_impl!(ReadHalf<PhantomPinned>: Unpin);
+
+ assert_impl!(ReadLine<'_, ()>: Send);
+ assert_not_impl!(ReadLine<'_, *const ()>: Send);
+ assert_impl!(ReadLine<'_, ()>: Sync);
+ assert_not_impl!(ReadLine<'_, *const ()>: Sync);
+ assert_impl!(ReadLine<'_, ()>: Unpin);
+ assert_not_impl!(ReadLine<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadToEnd<'_, ()>: Send);
+ assert_not_impl!(ReadToEnd<'_, *const ()>: Send);
+ assert_impl!(ReadToEnd<'_, ()>: Sync);
+ assert_not_impl!(ReadToEnd<'_, *const ()>: Sync);
+ assert_impl!(ReadToEnd<'_, ()>: Unpin);
+ assert_not_impl!(ReadToEnd<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadToString<'_, ()>: Send);
+ assert_not_impl!(ReadToString<'_, *const ()>: Send);
+ assert_impl!(ReadToString<'_, ()>: Sync);
+ assert_not_impl!(ReadToString<'_, *const ()>: Sync);
+ assert_impl!(ReadToString<'_, ()>: Unpin);
+ assert_not_impl!(ReadToString<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadUntil<'_, ()>: Send);
+ assert_not_impl!(ReadUntil<'_, *const ()>: Send);
+ assert_impl!(ReadUntil<'_, ()>: Sync);
+ assert_not_impl!(ReadUntil<'_, *const ()>: Sync);
+ assert_impl!(ReadUntil<'_, ()>: Unpin);
+ assert_not_impl!(ReadUntil<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(ReadVectored<'_, ()>: Send);
+ assert_not_impl!(ReadVectored<'_, *const ()>: Send);
+ assert_impl!(ReadVectored<'_, ()>: Sync);
+ assert_not_impl!(ReadVectored<'_, *const ()>: Sync);
+ assert_impl!(ReadVectored<'_, ()>: Unpin);
+ assert_not_impl!(ReadVectored<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(Repeat: Send);
+ assert_impl!(Repeat: Sync);
+ assert_impl!(Repeat: Unpin);
+
+ assert_impl!(ReuniteError<()>: Send);
+ assert_not_impl!(ReuniteError<*const ()>: Send);
+ assert_impl!(ReuniteError<()>: Sync);
+ assert_not_impl!(ReuniteError<*const ()>: Sync);
+ assert_impl!(ReuniteError<PhantomPinned>: Unpin);
+
+ assert_impl!(Seek<'_, ()>: Send);
+ assert_not_impl!(Seek<'_, *const ()>: Send);
+ assert_impl!(Seek<'_, ()>: Sync);
+ assert_not_impl!(Seek<'_, *const ()>: Sync);
+ assert_impl!(Seek<'_, ()>: Unpin);
+ assert_not_impl!(Seek<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(SeeKRelative<'_, ()>: Send);
+ assert_not_impl!(SeeKRelative<'_, *const ()>: Send);
+ assert_impl!(SeeKRelative<'_, ()>: Sync);
+ assert_not_impl!(SeeKRelative<'_, *const ()>: Sync);
+ assert_impl!(SeeKRelative<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(Sink: Send);
+ assert_impl!(Sink: Sync);
+ assert_impl!(Sink: Unpin);
+
+ assert_impl!(Take<()>: Send);
+ assert_not_impl!(Take<*const ()>: Send);
+ assert_impl!(Take<()>: Sync);
+ assert_not_impl!(Take<*const ()>: Sync);
+ assert_impl!(Take<()>: Unpin);
+ assert_not_impl!(Take<PhantomPinned>: Unpin);
+
+ assert_impl!(Window<()>: Send);
+ assert_not_impl!(Window<*const ()>: Send);
+ assert_impl!(Window<()>: Sync);
+ assert_not_impl!(Window<*const ()>: Sync);
+ assert_impl!(Window<()>: Unpin);
+ assert_not_impl!(Window<PhantomPinned>: Unpin);
+
+ assert_impl!(Write<'_, ()>: Send);
+ assert_not_impl!(Write<'_, *const ()>: Send);
+ assert_impl!(Write<'_, ()>: Sync);
+ assert_not_impl!(Write<'_, *const ()>: Sync);
+ assert_impl!(Write<'_, ()>: Unpin);
+ assert_not_impl!(Write<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(WriteAll<'_, ()>: Send);
+ assert_not_impl!(WriteAll<'_, *const ()>: Send);
+ assert_impl!(WriteAll<'_, ()>: Sync);
+ assert_not_impl!(WriteAll<'_, *const ()>: Sync);
+ assert_impl!(WriteAll<'_, ()>: Unpin);
+ assert_not_impl!(WriteAll<'_, PhantomPinned>: Unpin);
+
+ #[cfg(feature = "write-all-vectored")]
+ assert_impl!(WriteAllVectored<'_, ()>: Send);
+ #[cfg(feature = "write-all-vectored")]
+ assert_not_impl!(WriteAllVectored<'_, *const ()>: Send);
+ #[cfg(feature = "write-all-vectored")]
+ assert_impl!(WriteAllVectored<'_, ()>: Sync);
+ #[cfg(feature = "write-all-vectored")]
+ assert_not_impl!(WriteAllVectored<'_, *const ()>: Sync);
+ #[cfg(feature = "write-all-vectored")]
+ assert_impl!(WriteAllVectored<'_, ()>: Unpin);
+ // WriteAllVectored requires `W: Unpin`
+ // #[cfg(feature = "write-all-vectored")]
+ // assert_not_impl!(WriteAllVectored<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(WriteHalf<()>: Send);
+ assert_not_impl!(WriteHalf<*const ()>: Send);
+ assert_impl!(WriteHalf<()>: Sync);
+ assert_not_impl!(WriteHalf<*const ()>: Sync);
+ assert_impl!(WriteHalf<PhantomPinned>: Unpin);
+
+ assert_impl!(WriteVectored<'_, ()>: Send);
+ assert_not_impl!(WriteVectored<'_, *const ()>: Send);
+ assert_impl!(WriteVectored<'_, ()>: Sync);
+ assert_not_impl!(WriteVectored<'_, *const ()>: Sync);
+ assert_impl!(WriteVectored<'_, ()>: Unpin);
+ assert_not_impl!(WriteVectored<'_, PhantomPinned>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::lock`.
+pub mod lock {
+ use super::*;
+ use futures::lock::*;
+
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLock<()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLock<*const ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLock<()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLock<*const ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLock<PhantomPinned>: Unpin);
+
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockAcquire<'_, ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLockAcquire<'_, *const ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockAcquire<'_, ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLockAcquire<'_, *const ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockAcquire<'_, PhantomPinned>: Unpin);
+
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockGuard<'_, ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLockGuard<'_, *const ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockGuard<'_, ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(BiLockGuard<'_, *const ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_impl!(BiLockGuard<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(MappedMutexGuard<'_, (), ()>: Send);
+ assert_not_impl!(MappedMutexGuard<'_, (), *const ()>: Send);
+ assert_not_impl!(MappedMutexGuard<'_, *const (), ()>: Send);
+ assert_impl!(MappedMutexGuard<'_, (), ()>: Sync);
+ assert_not_impl!(MappedMutexGuard<'_, (), *const ()>: Sync);
+ assert_not_impl!(MappedMutexGuard<'_, *const (), ()>: Sync);
+ assert_impl!(MappedMutexGuard<'_, PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(Mutex<()>: Send);
+ assert_not_impl!(Mutex<*const ()>: Send);
+ assert_impl!(Mutex<()>: Sync);
+ assert_not_impl!(Mutex<*const ()>: Sync);
+ assert_impl!(Mutex<()>: Unpin);
+ assert_not_impl!(Mutex<PhantomPinned>: Unpin);
+
+ assert_impl!(MutexGuard<'_, ()>: Send);
+ assert_not_impl!(MutexGuard<'_, *const ()>: Send);
+ assert_impl!(MutexGuard<'_, ()>: Sync);
+ assert_not_impl!(MutexGuard<'_, *const ()>: Sync);
+ assert_impl!(MutexGuard<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(MutexLockFuture<'_, ()>: Send);
+ assert_not_impl!(MutexLockFuture<'_, *const ()>: Send);
+ assert_impl!(MutexLockFuture<'_, *const ()>: Sync);
+ assert_impl!(MutexLockFuture<'_, PhantomPinned>: Unpin);
+
+ #[cfg(feature = "bilock")]
+ assert_impl!(ReuniteError<()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(ReuniteError<*const ()>: Send);
+ #[cfg(feature = "bilock")]
+ assert_impl!(ReuniteError<()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_not_impl!(ReuniteError<*const ()>: Sync);
+ #[cfg(feature = "bilock")]
+ assert_impl!(ReuniteError<PhantomPinned>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::sink`.
+pub mod sink {
+ use super::*;
+ use futures::sink::{self, *};
+ use std::marker::Send;
+
+ assert_impl!(Buffer<(), ()>: Send);
+ assert_not_impl!(Buffer<(), *const ()>: Send);
+ assert_not_impl!(Buffer<*const (), ()>: Send);
+ assert_impl!(Buffer<(), ()>: Sync);
+ assert_not_impl!(Buffer<(), *const ()>: Sync);
+ assert_not_impl!(Buffer<*const (), ()>: Sync);
+ assert_impl!(Buffer<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Buffer<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Close<'_, (), *const ()>: Send);
+ assert_not_impl!(Close<'_, *const (), ()>: Send);
+ assert_impl!(Close<'_, (), *const ()>: Sync);
+ assert_not_impl!(Close<'_, *const (), ()>: Sync);
+ assert_impl!(Close<'_, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Close<'_, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Drain<()>: Send);
+ assert_not_impl!(Drain<*const ()>: Send);
+ assert_impl!(Drain<()>: Sync);
+ assert_not_impl!(Drain<*const ()>: Sync);
+ assert_impl!(Drain<PhantomPinned>: Unpin);
+
+ assert_impl!(Fanout<(), ()>: Send);
+ assert_not_impl!(Fanout<(), *const ()>: Send);
+ assert_not_impl!(Fanout<*const (), ()>: Send);
+ assert_impl!(Fanout<(), ()>: Sync);
+ assert_not_impl!(Fanout<(), *const ()>: Sync);
+ assert_not_impl!(Fanout<*const (), ()>: Sync);
+ assert_impl!(Fanout<(), ()>: Unpin);
+ assert_not_impl!(Fanout<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Fanout<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Feed<'_, (), ()>: Send);
+ assert_not_impl!(Feed<'_, (), *const ()>: Send);
+ assert_not_impl!(Feed<'_, *const (), ()>: Send);
+ assert_impl!(Feed<'_, (), ()>: Sync);
+ assert_not_impl!(Feed<'_, (), *const ()>: Sync);
+ assert_not_impl!(Feed<'_, *const (), ()>: Sync);
+ assert_impl!(Feed<'_, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Feed<'_, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Flush<'_, (), *const ()>: Send);
+ assert_not_impl!(Flush<'_, *const (), ()>: Send);
+ assert_impl!(Flush<'_, (), *const ()>: Sync);
+ assert_not_impl!(Flush<'_, *const (), ()>: Sync);
+ assert_impl!(Flush<'_, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Flush<'_, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(sink::Send<'_, (), ()>: Send);
+ assert_not_impl!(sink::Send<'_, (), *const ()>: Send);
+ assert_not_impl!(sink::Send<'_, *const (), ()>: Send);
+ assert_impl!(sink::Send<'_, (), ()>: Sync);
+ assert_not_impl!(sink::Send<'_, (), *const ()>: Sync);
+ assert_not_impl!(sink::Send<'_, *const (), ()>: Sync);
+ assert_impl!(sink::Send<'_, (), PhantomPinned>: Unpin);
+ assert_not_impl!(sink::Send<'_, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(SendAll<'_, (), SendTryStream<()>>: Send);
+ assert_not_impl!(SendAll<'_, (), SendTryStream>: Send);
+ assert_not_impl!(SendAll<'_, (), LocalTryStream>: Send);
+ assert_not_impl!(SendAll<'_, *const (), SendTryStream<()>>: Send);
+ assert_impl!(SendAll<'_, (), SyncTryStream<()>>: Sync);
+ assert_not_impl!(SendAll<'_, (), SyncTryStream>: Sync);
+ assert_not_impl!(SendAll<'_, (), LocalTryStream>: Sync);
+ assert_not_impl!(SendAll<'_, *const (), SyncTryStream<()>>: Sync);
+ assert_impl!(SendAll<'_, (), UnpinTryStream>: Unpin);
+ assert_not_impl!(SendAll<'_, PhantomPinned, UnpinTryStream>: Unpin);
+ assert_not_impl!(SendAll<'_, (), PinnedTryStream>: Unpin);
+
+ assert_impl!(SinkErrInto<SendSink, *const (), *const ()>: Send);
+ assert_not_impl!(SinkErrInto<LocalSink<()>, (), ()>: Send);
+ assert_impl!(SinkErrInto<SyncSink, *const (), *const ()>: Sync);
+ assert_not_impl!(SinkErrInto<LocalSink<()>, (), ()>: Sync);
+ assert_impl!(SinkErrInto<UnpinSink, PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(SinkErrInto<PinnedSink<()>, (), ()>: Unpin);
+
+ assert_impl!(SinkMapErr<SendSink, ()>: Send);
+ assert_not_impl!(SinkMapErr<SendSink, *const ()>: Send);
+ assert_not_impl!(SinkMapErr<LocalSink<()>, ()>: Send);
+ assert_impl!(SinkMapErr<SyncSink, ()>: Sync);
+ assert_not_impl!(SinkMapErr<SyncSink, *const ()>: Sync);
+ assert_not_impl!(SinkMapErr<LocalSink<()>, ()>: Sync);
+ assert_impl!(SinkMapErr<UnpinSink, PhantomPinned>: Unpin);
+ assert_not_impl!(SinkMapErr<PinnedSink<()>, ()>: Unpin);
+
+ assert_impl!(Unfold<(), (), ()>: Send);
+ assert_not_impl!(Unfold<*const (), (), ()>: Send);
+ assert_not_impl!(Unfold<(), *const (), ()>: Send);
+ assert_not_impl!(Unfold<(), (), *const ()>: Send);
+ assert_impl!(Unfold<(), (), ()>: Sync);
+ assert_not_impl!(Unfold<*const (), (), ()>: Sync);
+ assert_not_impl!(Unfold<(), *const (), ()>: Sync);
+ assert_not_impl!(Unfold<(), (), *const ()>: Sync);
+ assert_impl!(Unfold<PhantomPinned, PhantomPinned, ()>: Unpin);
+ assert_not_impl!(Unfold<PinnedSink<()>, (), PhantomPinned>: Unpin);
+
+ assert_impl!(With<(), *const (), *const (), (), ()>: Send);
+ assert_not_impl!(With<*const (), (), (), (), ()>: Send);
+ assert_not_impl!(With<(), (), (), *const (), ()>: Send);
+ assert_not_impl!(With<(), (), (), (), *const ()>: Send);
+ assert_impl!(With<(), *const (), *const (), (), ()>: Sync);
+ assert_not_impl!(With<*const (), (), (), (), ()>: Sync);
+ assert_not_impl!(With<(), (), (), *const (), ()>: Sync);
+ assert_not_impl!(With<(), (), (), (), *const ()>: Sync);
+ assert_impl!(With<(), PhantomPinned, PhantomPinned, (), PhantomPinned>: Unpin);
+ assert_not_impl!(With<PhantomPinned, (), (), (), ()>: Unpin);
+ assert_not_impl!(With<(), (), (), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(WithFlatMap<(), (), *const (), (), ()>: Send);
+ assert_not_impl!(WithFlatMap<*const (), (), (), (), ()>: Send);
+ assert_not_impl!(WithFlatMap<(), *const (), (), (), ()>: Send);
+ assert_not_impl!(WithFlatMap<(), (), (), *const (), ()>: Send);
+ assert_not_impl!(WithFlatMap<(), (), (), (), *const ()>: Send);
+ assert_impl!(WithFlatMap<(), (), *const (), (), ()>: Sync);
+ assert_not_impl!(WithFlatMap<*const (), (), (), (), ()>: Sync);
+ assert_not_impl!(WithFlatMap<(), *const (), (), (), ()>: Sync);
+ assert_not_impl!(WithFlatMap<(), (), (), *const (), ()>: Sync);
+ assert_not_impl!(WithFlatMap<(), (), (), (), *const ()>: Sync);
+ assert_impl!(WithFlatMap<(), PhantomPinned, PhantomPinned, (), PhantomPinned>: Unpin);
+ assert_not_impl!(WithFlatMap<PhantomPinned, (), (), (), ()>: Unpin);
+ assert_not_impl!(WithFlatMap<(), (), (), PhantomPinned, ()>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::stream`.
+pub mod stream {
+ use super::*;
+ use futures::{io, stream::*};
+
+ assert_impl!(AndThen<(), (), ()>: Send);
+ assert_not_impl!(AndThen<*const (), (), ()>: Send);
+ assert_not_impl!(AndThen<(), *const (), ()>: Send);
+ assert_not_impl!(AndThen<(), (), *const ()>: Send);
+ assert_impl!(AndThen<(), (), ()>: Sync);
+ assert_not_impl!(AndThen<*const (), (), ()>: Sync);
+ assert_not_impl!(AndThen<(), *const (), ()>: Sync);
+ assert_not_impl!(AndThen<(), (), *const ()>: Sync);
+ assert_impl!(AndThen<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(AndThen<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(AndThen<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(BufferUnordered<SendStream<()>>: Send);
+ assert_not_impl!(BufferUnordered<SendStream>: Send);
+ assert_not_impl!(BufferUnordered<LocalStream>: Send);
+ assert_impl!(BufferUnordered<SyncStream<()>>: Sync);
+ assert_not_impl!(BufferUnordered<SyncStream>: Sync);
+ assert_not_impl!(BufferUnordered<LocalStream>: Sync);
+ assert_impl!(BufferUnordered<UnpinStream>: Unpin);
+ assert_not_impl!(BufferUnordered<PinnedStream>: Unpin);
+
+ assert_impl!(Buffered<SendStream<SendFuture<()>>>: Send);
+ assert_not_impl!(Buffered<SendStream<SendFuture>>: Send);
+ assert_not_impl!(Buffered<SendStream<LocalFuture>>: Send);
+ assert_not_impl!(Buffered<LocalStream<SendFuture<()>>>: Send);
+ assert_impl!(Buffered<SyncStream<SyncFuture<()>>>: Sync);
+ assert_not_impl!(Buffered<SyncStream<SyncFuture>>: Sync);
+ assert_not_impl!(Buffered<SyncStream<LocalFuture>>: Sync);
+ assert_not_impl!(Buffered<LocalStream<SyncFuture<()>>>: Sync);
+ assert_impl!(Buffered<UnpinStream<PinnedFuture>>: Unpin);
+ assert_not_impl!(Buffered<PinnedStream<PinnedFuture>>: Unpin);
+
+ assert_impl!(CatchUnwind<SendStream>: Send);
+ assert_not_impl!(CatchUnwind<LocalStream>: Send);
+ assert_impl!(CatchUnwind<SyncStream>: Sync);
+ assert_not_impl!(CatchUnwind<LocalStream>: Sync);
+ assert_impl!(CatchUnwind<UnpinStream>: Unpin);
+ assert_not_impl!(CatchUnwind<PinnedStream>: Unpin);
+
+ assert_impl!(Chain<(), ()>: Send);
+ assert_not_impl!(Chain<(), *const ()>: Send);
+ assert_not_impl!(Chain<*const (), ()>: Send);
+ assert_impl!(Chain<(), ()>: Sync);
+ assert_not_impl!(Chain<(), *const ()>: Sync);
+ assert_not_impl!(Chain<*const (), ()>: Sync);
+ assert_impl!(Chain<(), ()>: Unpin);
+ assert_not_impl!(Chain<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Chain<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Chunks<SendStream<()>>: Send);
+ assert_not_impl!(Chunks<SendStream>: Send);
+ assert_not_impl!(Chunks<LocalStream>: Send);
+ assert_impl!(Chunks<SyncStream<()>>: Sync);
+ assert_not_impl!(Chunks<SyncStream>: Sync);
+ assert_not_impl!(Chunks<LocalStream>: Sync);
+ assert_impl!(Chunks<UnpinStream>: Unpin);
+ assert_not_impl!(Chunks<PinnedStream>: Unpin);
+
+ assert_impl!(Collect<(), ()>: Send);
+ assert_not_impl!(Collect<*const (), ()>: Send);
+ assert_not_impl!(Collect<(), *const ()>: Send);
+ assert_impl!(Collect<(), ()>: Sync);
+ assert_not_impl!(Collect<*const (), ()>: Sync);
+ assert_not_impl!(Collect<(), *const ()>: Sync);
+ assert_impl!(Collect<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Collect<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Concat<SendStream<()>>: Send);
+ assert_not_impl!(Concat<SendStream>: Send);
+ assert_not_impl!(Concat<LocalStream>: Send);
+ assert_impl!(Concat<SyncStream<()>>: Sync);
+ assert_not_impl!(Concat<SyncStream>: Sync);
+ assert_not_impl!(Concat<LocalStream>: Sync);
+ assert_impl!(Concat<UnpinStream>: Unpin);
+ assert_not_impl!(Concat<PinnedStream>: Unpin);
+
+ assert_impl!(Cycle<()>: Send);
+ assert_not_impl!(Cycle<*const ()>: Send);
+ assert_impl!(Cycle<()>: Sync);
+ assert_not_impl!(Cycle<*const ()>: Sync);
+ assert_impl!(Cycle<()>: Unpin);
+ assert_not_impl!(Cycle<PhantomPinned>: Unpin);
+
+ assert_impl!(Empty<()>: Send);
+ assert_not_impl!(Empty<*const ()>: Send);
+ assert_impl!(Empty<()>: Sync);
+ assert_not_impl!(Empty<*const ()>: Sync);
+ assert_impl!(Empty<PhantomPinned>: Unpin);
+
+ assert_impl!(Enumerate<()>: Send);
+ assert_not_impl!(Enumerate<*const ()>: Send);
+ assert_impl!(Enumerate<()>: Sync);
+ assert_not_impl!(Enumerate<*const ()>: Sync);
+ assert_impl!(Enumerate<()>: Unpin);
+ assert_not_impl!(Enumerate<PhantomPinned>: Unpin);
+
+ assert_impl!(ErrInto<(), *const ()>: Send);
+ assert_not_impl!(ErrInto<*const (), ()>: Send);
+ assert_impl!(ErrInto<(), *const ()>: Sync);
+ assert_not_impl!(ErrInto<*const (), ()>: Sync);
+ assert_impl!(ErrInto<(), PhantomPinned>: Unpin);
+ assert_not_impl!(ErrInto<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Filter<SendStream<()>, (), ()>: Send);
+ assert_not_impl!(Filter<LocalStream<()>, (), ()>: Send);
+ assert_not_impl!(Filter<SendStream, (), ()>: Send);
+ assert_not_impl!(Filter<SendStream<()>, *const (), ()>: Send);
+ assert_not_impl!(Filter<SendStream<()>, (), *const ()>: Send);
+ assert_impl!(Filter<SyncStream<()>, (), ()>: Sync);
+ assert_not_impl!(Filter<LocalStream<()>, (), ()>: Sync);
+ assert_not_impl!(Filter<SyncStream, (), ()>: Sync);
+ assert_not_impl!(Filter<SyncStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(Filter<SyncStream<()>, (), *const ()>: Sync);
+ assert_impl!(Filter<UnpinStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Filter<PinnedStream, (), ()>: Unpin);
+ assert_not_impl!(Filter<UnpinStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(FilterMap<(), (), ()>: Send);
+ assert_not_impl!(FilterMap<*const (), (), ()>: Send);
+ assert_not_impl!(FilterMap<(), *const (), ()>: Send);
+ assert_not_impl!(FilterMap<(), (), *const ()>: Send);
+ assert_impl!(FilterMap<(), (), ()>: Sync);
+ assert_not_impl!(FilterMap<*const (), (), ()>: Sync);
+ assert_not_impl!(FilterMap<(), *const (), ()>: Sync);
+ assert_not_impl!(FilterMap<(), (), *const ()>: Sync);
+ assert_impl!(FilterMap<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(FilterMap<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(FilterMap<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(FlatMap<(), (), ()>: Send);
+ assert_not_impl!(FlatMap<*const (), (), ()>: Send);
+ assert_not_impl!(FlatMap<(), *const (), ()>: Send);
+ assert_not_impl!(FlatMap<(), (), *const ()>: Send);
+ assert_impl!(FlatMap<(), (), ()>: Sync);
+ assert_not_impl!(FlatMap<*const (), (), ()>: Sync);
+ assert_not_impl!(FlatMap<(), *const (), ()>: Sync);
+ assert_not_impl!(FlatMap<(), (), *const ()>: Sync);
+ assert_impl!(FlatMap<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(FlatMap<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(FlatMap<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Flatten<SendStream<()>>: Send);
+ assert_not_impl!(Flatten<SendStream>: Send);
+ assert_not_impl!(Flatten<SendStream>: Send);
+ assert_impl!(Flatten<SyncStream<()>>: Sync);
+ assert_not_impl!(Flatten<LocalStream<()>>: Sync);
+ assert_not_impl!(Flatten<LocalStream<()>>: Sync);
+ assert_impl!(Flatten<UnpinStream<()>>: Unpin);
+ assert_not_impl!(Flatten<UnpinStream>: Unpin);
+ assert_not_impl!(Flatten<PinnedStream>: Unpin);
+
+ assert_impl!(Fold<(), (), (), ()>: Send);
+ assert_not_impl!(Fold<*const (), (), (), ()>: Send);
+ assert_not_impl!(Fold<(), *const (), (), ()>: Send);
+ assert_not_impl!(Fold<(), (), *const (), ()>: Send);
+ assert_not_impl!(Fold<(), (), (), *const ()>: Send);
+ assert_impl!(Fold<(), (), (), ()>: Sync);
+ assert_not_impl!(Fold<*const (), (), (), ()>: Sync);
+ assert_not_impl!(Fold<(), *const (), (), ()>: Sync);
+ assert_not_impl!(Fold<(), (), *const (), ()>: Sync);
+ assert_not_impl!(Fold<(), (), (), *const ()>: Sync);
+ assert_impl!(Fold<(), (), PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(Fold<PhantomPinned, (), (), ()>: Unpin);
+ assert_not_impl!(Fold<(), PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(ForEach<(), (), ()>: Send);
+ assert_not_impl!(ForEach<*const (), (), ()>: Send);
+ assert_not_impl!(ForEach<(), *const (), ()>: Send);
+ assert_not_impl!(ForEach<(), (), *const ()>: Send);
+ assert_impl!(ForEach<(), (), ()>: Sync);
+ assert_not_impl!(ForEach<*const (), (), ()>: Sync);
+ assert_not_impl!(ForEach<(), *const (), ()>: Sync);
+ assert_not_impl!(ForEach<(), (), *const ()>: Sync);
+ assert_impl!(ForEach<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(ForEach<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(ForEach<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(ForEachConcurrent<(), (), ()>: Send);
+ assert_not_impl!(ForEachConcurrent<*const (), (), ()>: Send);
+ assert_not_impl!(ForEachConcurrent<(), *const (), ()>: Send);
+ assert_not_impl!(ForEachConcurrent<(), (), *const ()>: Send);
+ assert_impl!(ForEachConcurrent<(), (), ()>: Sync);
+ assert_not_impl!(ForEachConcurrent<*const (), (), ()>: Sync);
+ assert_not_impl!(ForEachConcurrent<(), *const (), ()>: Sync);
+ assert_not_impl!(ForEachConcurrent<(), (), *const ()>: Sync);
+ assert_impl!(ForEachConcurrent<(), PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(ForEachConcurrent<PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(Forward<SendTryStream<()>, ()>: Send);
+ assert_not_impl!(Forward<SendTryStream, ()>: Send);
+ assert_not_impl!(Forward<SendTryStream<()>, *const ()>: Send);
+ assert_not_impl!(Forward<LocalTryStream, ()>: Send);
+ assert_impl!(Forward<SyncTryStream<()>, ()>: Sync);
+ assert_not_impl!(Forward<SyncTryStream, ()>: Sync);
+ assert_not_impl!(Forward<SyncTryStream<()>, *const ()>: Sync);
+ assert_not_impl!(Forward<LocalTryStream, ()>: Sync);
+ assert_impl!(Forward<UnpinTryStream, ()>: Unpin);
+ assert_not_impl!(Forward<UnpinTryStream, PhantomPinned>: Unpin);
+ assert_not_impl!(Forward<PinnedTryStream, ()>: Unpin);
+
+ assert_impl!(Fuse<()>: Send);
+ assert_not_impl!(Fuse<*const ()>: Send);
+ assert_impl!(Fuse<()>: Sync);
+ assert_not_impl!(Fuse<*const ()>: Sync);
+ assert_impl!(Fuse<()>: Unpin);
+ assert_not_impl!(Fuse<PhantomPinned>: Unpin);
+
+ assert_impl!(FuturesOrdered<SendFuture<()>>: Send);
+ assert_not_impl!(FuturesOrdered<SendFuture>: Send);
+ assert_not_impl!(FuturesOrdered<SendFuture>: Send);
+ assert_impl!(FuturesOrdered<SyncFuture<()>>: Sync);
+ assert_not_impl!(FuturesOrdered<LocalFuture<()>>: Sync);
+ assert_not_impl!(FuturesOrdered<LocalFuture<()>>: Sync);
+ assert_impl!(FuturesOrdered<PinnedFuture>: Unpin);
+
+ assert_impl!(FuturesUnordered<()>: Send);
+ assert_not_impl!(FuturesUnordered<*const ()>: Send);
+ assert_impl!(FuturesUnordered<()>: Sync);
+ assert_not_impl!(FuturesUnordered<*const ()>: Sync);
+ assert_impl!(FuturesUnordered<PhantomPinned>: Unpin);
+
+ assert_impl!(Inspect<(), ()>: Send);
+ assert_not_impl!(Inspect<*const (), ()>: Send);
+ assert_not_impl!(Inspect<(), *const ()>: Send);
+ assert_impl!(Inspect<(), ()>: Sync);
+ assert_not_impl!(Inspect<*const (), ()>: Sync);
+ assert_not_impl!(Inspect<(), *const ()>: Sync);
+ assert_impl!(Inspect<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Inspect<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(InspectErr<(), ()>: Send);
+ assert_not_impl!(InspectErr<*const (), ()>: Send);
+ assert_not_impl!(InspectErr<(), *const ()>: Send);
+ assert_impl!(InspectErr<(), ()>: Sync);
+ assert_not_impl!(InspectErr<*const (), ()>: Sync);
+ assert_not_impl!(InspectErr<(), *const ()>: Sync);
+ assert_impl!(InspectErr<(), PhantomPinned>: Unpin);
+ assert_not_impl!(InspectErr<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(InspectOk<(), ()>: Send);
+ assert_not_impl!(InspectOk<*const (), ()>: Send);
+ assert_not_impl!(InspectOk<(), *const ()>: Send);
+ assert_impl!(InspectOk<(), ()>: Sync);
+ assert_not_impl!(InspectOk<*const (), ()>: Sync);
+ assert_not_impl!(InspectOk<(), *const ()>: Sync);
+ assert_impl!(InspectOk<(), PhantomPinned>: Unpin);
+ assert_not_impl!(InspectOk<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(IntoAsyncRead<SendTryStream<Vec<u8>, io::Error>>: Send);
+ assert_not_impl!(IntoAsyncRead<LocalTryStream<Vec<u8>, io::Error>>: Send);
+ assert_impl!(IntoAsyncRead<SyncTryStream<Vec<u8>, io::Error>>: Sync);
+ assert_not_impl!(IntoAsyncRead<LocalTryStream<Vec<u8>, io::Error>>: Sync);
+ assert_impl!(IntoAsyncRead<UnpinTryStream<Vec<u8>, io::Error>>: Unpin);
+ // IntoAsyncRead requires `St: Unpin`
+ // assert_not_impl!(IntoAsyncRead<PinnedTryStream<Vec<u8>, io::Error>>: Unpin);
+
+ assert_impl!(IntoStream<()>: Send);
+ assert_not_impl!(IntoStream<*const ()>: Send);
+ assert_impl!(IntoStream<()>: Sync);
+ assert_not_impl!(IntoStream<*const ()>: Sync);
+ assert_impl!(IntoStream<()>: Unpin);
+ assert_not_impl!(IntoStream<PhantomPinned>: Unpin);
+
+ assert_impl!(Iter<()>: Send);
+ assert_not_impl!(Iter<*const ()>: Send);
+ assert_impl!(Iter<()>: Sync);
+ assert_not_impl!(Iter<*const ()>: Sync);
+ assert_impl!(Iter<PhantomPinned>: Unpin);
+
+ assert_impl!(Map<(), ()>: Send);
+ assert_not_impl!(Map<*const (), ()>: Send);
+ assert_not_impl!(Map<(), *const ()>: Send);
+ assert_impl!(Map<(), ()>: Sync);
+ assert_not_impl!(Map<*const (), ()>: Sync);
+ assert_not_impl!(Map<(), *const ()>: Sync);
+ assert_impl!(Map<(), PhantomPinned>: Unpin);
+ assert_not_impl!(Map<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapErr<(), ()>: Send);
+ assert_not_impl!(MapErr<*const (), ()>: Send);
+ assert_not_impl!(MapErr<(), *const ()>: Send);
+ assert_impl!(MapErr<(), ()>: Sync);
+ assert_not_impl!(MapErr<*const (), ()>: Sync);
+ assert_not_impl!(MapErr<(), *const ()>: Sync);
+ assert_impl!(MapErr<(), PhantomPinned>: Unpin);
+ assert_not_impl!(MapErr<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(MapOk<(), ()>: Send);
+ assert_not_impl!(MapOk<*const (), ()>: Send);
+ assert_not_impl!(MapOk<(), *const ()>: Send);
+ assert_impl!(MapOk<(), ()>: Sync);
+ assert_not_impl!(MapOk<*const (), ()>: Sync);
+ assert_not_impl!(MapOk<(), *const ()>: Sync);
+ assert_impl!(MapOk<(), PhantomPinned>: Unpin);
+ assert_not_impl!(MapOk<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Next<'_, ()>: Send);
+ assert_not_impl!(Next<'_, *const ()>: Send);
+ assert_impl!(Next<'_, ()>: Sync);
+ assert_not_impl!(Next<'_, *const ()>: Sync);
+ assert_impl!(Next<'_, ()>: Unpin);
+ assert_not_impl!(Next<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(NextIf<'_, SendStream<()>, ()>: Send);
+ assert_not_impl!(NextIf<'_, SendStream<()>, *const ()>: Send);
+ assert_not_impl!(NextIf<'_, SendStream, ()>: Send);
+ assert_not_impl!(NextIf<'_, LocalStream<()>, ()>: Send);
+ assert_impl!(NextIf<'_, SyncStream<()>, ()>: Sync);
+ assert_not_impl!(NextIf<'_, SyncStream<()>, *const ()>: Sync);
+ assert_not_impl!(NextIf<'_, SyncStream, ()>: Sync);
+ assert_not_impl!(NextIf<'_, LocalStream<()>, ()>: Send);
+ assert_impl!(NextIf<'_, PinnedStream, PhantomPinned>: Unpin);
+
+ assert_impl!(NextIfEq<'_, SendStream<()>, ()>: Send);
+ assert_not_impl!(NextIfEq<'_, SendStream<()>, *const ()>: Send);
+ assert_not_impl!(NextIfEq<'_, SendStream, ()>: Send);
+ assert_not_impl!(NextIfEq<'_, LocalStream<()>, ()>: Send);
+ assert_impl!(NextIfEq<'_, SyncStream<()>, ()>: Sync);
+ assert_not_impl!(NextIfEq<'_, SyncStream<()>, *const ()>: Sync);
+ assert_not_impl!(NextIfEq<'_, SyncStream, ()>: Sync);
+ assert_not_impl!(NextIfEq<'_, LocalStream<()>, ()>: Send);
+ assert_impl!(NextIfEq<'_, PinnedStream, PhantomPinned>: Unpin);
+
+ assert_impl!(Once<()>: Send);
+ assert_not_impl!(Once<*const ()>: Send);
+ assert_impl!(Once<()>: Sync);
+ assert_not_impl!(Once<*const ()>: Sync);
+ assert_impl!(Once<()>: Unpin);
+ assert_not_impl!(Once<PhantomPinned>: Unpin);
+
+ assert_impl!(OrElse<(), (), ()>: Send);
+ assert_not_impl!(OrElse<*const (), (), ()>: Send);
+ assert_not_impl!(OrElse<(), *const (), ()>: Send);
+ assert_not_impl!(OrElse<(), (), *const ()>: Send);
+ assert_impl!(OrElse<(), (), ()>: Sync);
+ assert_not_impl!(OrElse<*const (), (), ()>: Sync);
+ assert_not_impl!(OrElse<(), *const (), ()>: Sync);
+ assert_not_impl!(OrElse<(), (), *const ()>: Sync);
+ assert_impl!(OrElse<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(OrElse<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(OrElse<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Peek<'_, SendStream<()>>: Send);
+ assert_not_impl!(Peek<'_, SendStream>: Send);
+ assert_not_impl!(Peek<'_, LocalStream<()>>: Send);
+ assert_impl!(Peek<'_, SyncStream<()>>: Sync);
+ assert_not_impl!(Peek<'_, SyncStream>: Sync);
+ assert_not_impl!(Peek<'_, LocalStream<()>>: Sync);
+ assert_impl!(Peek<'_, PinnedStream>: Unpin);
+
+ assert_impl!(PeekMut<'_, SendStream<()>>: Send);
+ assert_not_impl!(PeekMut<'_, SendStream>: Send);
+ assert_not_impl!(PeekMut<'_, LocalStream<()>>: Send);
+ assert_impl!(PeekMut<'_, SyncStream<()>>: Sync);
+ assert_not_impl!(PeekMut<'_, SyncStream>: Sync);
+ assert_not_impl!(PeekMut<'_, LocalStream<()>>: Sync);
+ assert_impl!(PeekMut<'_, PinnedStream>: Unpin);
+
+ assert_impl!(Peekable<SendStream<()>>: Send);
+ assert_not_impl!(Peekable<SendStream>: Send);
+ assert_not_impl!(Peekable<LocalStream>: Send);
+ assert_impl!(Peekable<SyncStream<()>>: Sync);
+ assert_not_impl!(Peekable<SyncStream>: Sync);
+ assert_not_impl!(Peekable<LocalStream>: Sync);
+ assert_impl!(Peekable<UnpinStream>: Unpin);
+ assert_not_impl!(Peekable<PinnedStream>: Unpin);
+
+ assert_impl!(Pending<()>: Send);
+ assert_not_impl!(Pending<*const ()>: Send);
+ assert_impl!(Pending<()>: Sync);
+ assert_not_impl!(Pending<*const ()>: Sync);
+ assert_impl!(Pending<PhantomPinned>: Unpin);
+
+ assert_impl!(PollFn<()>: Send);
+ assert_not_impl!(PollFn<*const ()>: Send);
+ assert_impl!(PollFn<()>: Sync);
+ assert_not_impl!(PollFn<*const ()>: Sync);
+ assert_impl!(PollFn<PhantomPinned>: Unpin);
+
+ assert_impl!(PollImmediate<SendStream>: Send);
+ assert_not_impl!(PollImmediate<LocalStream<()>>: Send);
+ assert_impl!(PollImmediate<SyncStream>: Sync);
+ assert_not_impl!(PollImmediate<LocalStream<()>>: Sync);
+ assert_impl!(PollImmediate<UnpinStream>: Unpin);
+ assert_not_impl!(PollImmediate<PinnedStream>: Unpin);
+
+ assert_impl!(ReadyChunks<SendStream<()>>: Send);
+ assert_not_impl!(ReadyChunks<SendStream>: Send);
+ assert_not_impl!(ReadyChunks<LocalStream>: Send);
+ assert_impl!(ReadyChunks<SyncStream<()>>: Sync);
+ assert_not_impl!(ReadyChunks<SyncStream>: Sync);
+ assert_not_impl!(ReadyChunks<LocalStream>: Sync);
+ assert_impl!(ReadyChunks<UnpinStream>: Unpin);
+ assert_not_impl!(ReadyChunks<PinnedStream>: Unpin);
+
+ assert_impl!(Repeat<()>: Send);
+ assert_not_impl!(Repeat<*const ()>: Send);
+ assert_impl!(Repeat<()>: Sync);
+ assert_not_impl!(Repeat<*const ()>: Sync);
+ assert_impl!(Repeat<PhantomPinned>: Unpin);
+
+ assert_impl!(RepeatWith<()>: Send);
+ assert_not_impl!(RepeatWith<*const ()>: Send);
+ assert_impl!(RepeatWith<()>: Sync);
+ assert_not_impl!(RepeatWith<*const ()>: Sync);
+ // RepeatWith requires `F: FnMut() -> A`
+ assert_impl!(RepeatWith<fn() -> ()>: Unpin);
+ // assert_impl!(RepeatWith<PhantomPinned>: Unpin);
+
+ assert_impl!(ReuniteError<(), ()>: Send);
+ assert_not_impl!(ReuniteError<*const (), ()>: Send);
+ assert_not_impl!(ReuniteError<(), *const ()>: Send);
+ assert_impl!(ReuniteError<(), ()>: Sync);
+ assert_not_impl!(ReuniteError<*const (), ()>: Sync);
+ assert_not_impl!(ReuniteError<(), *const ()>: Sync);
+ assert_impl!(ReuniteError<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(Scan<SendStream, (), (), ()>: Send);
+ assert_not_impl!(Scan<LocalStream<()>, (), (), ()>: Send);
+ assert_not_impl!(Scan<SendStream<()>, *const (), (), ()>: Send);
+ assert_not_impl!(Scan<SendStream<()>, (), *const (), ()>: Send);
+ assert_not_impl!(Scan<SendStream<()>, (), (), *const ()>: Send);
+ assert_impl!(Scan<SyncStream, (), (), ()>: Sync);
+ assert_not_impl!(Scan<LocalStream<()>, (), (), ()>: Sync);
+ assert_not_impl!(Scan<SyncStream<()>, *const (), (), ()>: Sync);
+ assert_not_impl!(Scan<SyncStream<()>, (), *const (), ()>: Sync);
+ assert_not_impl!(Scan<SyncStream<()>, (), (), *const ()>: Sync);
+ assert_impl!(Scan<UnpinStream, PhantomPinned, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Scan<PinnedStream, (), (), ()>: Unpin);
+ assert_not_impl!(Scan<UnpinStream, (), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Select<(), ()>: Send);
+ assert_not_impl!(Select<*const (), ()>: Send);
+ assert_not_impl!(Select<(), *const ()>: Send);
+ assert_impl!(Select<(), ()>: Sync);
+ assert_not_impl!(Select<*const (), ()>: Sync);
+ assert_not_impl!(Select<(), *const ()>: Sync);
+ assert_impl!(Select<(), ()>: Unpin);
+ assert_not_impl!(Select<PhantomPinned, ()>: Unpin);
+ assert_not_impl!(Select<(), PhantomPinned>: Unpin);
+
+ assert_impl!(SelectAll<()>: Send);
+ assert_not_impl!(SelectAll<*const ()>: Send);
+ assert_impl!(SelectAll<()>: Sync);
+ assert_not_impl!(SelectAll<*const ()>: Sync);
+ assert_impl!(SelectAll<PhantomPinned>: Unpin);
+
+ assert_impl!(SelectNextSome<'_, ()>: Send);
+ assert_not_impl!(SelectNextSome<'_, *const ()>: Send);
+ assert_impl!(SelectNextSome<'_, ()>: Sync);
+ assert_not_impl!(SelectNextSome<'_, *const ()>: Sync);
+ assert_impl!(SelectNextSome<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(Skip<()>: Send);
+ assert_not_impl!(Skip<*const ()>: Send);
+ assert_impl!(Skip<()>: Sync);
+ assert_not_impl!(Skip<*const ()>: Sync);
+ assert_impl!(Skip<()>: Unpin);
+ assert_not_impl!(Skip<PhantomPinned>: Unpin);
+
+ assert_impl!(SkipWhile<SendStream<()>, (), ()>: Send);
+ assert_not_impl!(SkipWhile<LocalStream<()>, (), ()>: Send);
+ assert_not_impl!(SkipWhile<SendStream, (), ()>: Send);
+ assert_not_impl!(SkipWhile<SendStream<()>, *const (), ()>: Send);
+ assert_not_impl!(SkipWhile<SendStream<()>, (), *const ()>: Send);
+ assert_impl!(SkipWhile<SyncStream<()>, (), ()>: Sync);
+ assert_not_impl!(SkipWhile<LocalStream<()>, (), ()>: Sync);
+ assert_not_impl!(SkipWhile<SyncStream, (), ()>: Sync);
+ assert_not_impl!(SkipWhile<SyncStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(SkipWhile<SyncStream<()>, (), *const ()>: Sync);
+ assert_impl!(SkipWhile<UnpinStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(SkipWhile<PinnedStream, (), ()>: Unpin);
+ assert_not_impl!(SkipWhile<UnpinStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(SplitSink<(), ()>: Send);
+ assert_not_impl!(SplitSink<*const (), ()>: Send);
+ assert_not_impl!(SplitSink<(), *const ()>: Send);
+ assert_impl!(SplitSink<(), ()>: Sync);
+ assert_not_impl!(SplitSink<*const (), ()>: Sync);
+ assert_not_impl!(SplitSink<(), *const ()>: Sync);
+ assert_impl!(SplitSink<PhantomPinned, PhantomPinned>: Unpin);
+
+ assert_impl!(SplitStream<()>: Send);
+ assert_not_impl!(SplitStream<*const ()>: Send);
+ assert_impl!(SplitStream<()>: Sync);
+ assert_not_impl!(SplitStream<*const ()>: Sync);
+ assert_impl!(SplitStream<PhantomPinned>: Unpin);
+
+ assert_impl!(StreamFuture<()>: Send);
+ assert_not_impl!(StreamFuture<*const ()>: Send);
+ assert_impl!(StreamFuture<()>: Sync);
+ assert_not_impl!(StreamFuture<*const ()>: Sync);
+ assert_impl!(StreamFuture<()>: Unpin);
+ assert_not_impl!(StreamFuture<PhantomPinned>: Unpin);
+
+ assert_impl!(Take<()>: Send);
+ assert_not_impl!(Take<*const ()>: Send);
+ assert_impl!(Take<()>: Sync);
+ assert_not_impl!(Take<*const ()>: Sync);
+ assert_impl!(Take<()>: Unpin);
+ assert_not_impl!(Take<PhantomPinned>: Unpin);
+
+ assert_impl!(TakeUntil<SendStream, SendFuture<()>>: Send);
+ assert_not_impl!(TakeUntil<SendStream, SendFuture>: Send);
+ assert_not_impl!(TakeUntil<SendStream, LocalFuture<()>>: Send);
+ assert_not_impl!(TakeUntil<LocalStream, SendFuture<()>>: Send);
+ assert_impl!(TakeUntil<SyncStream, SyncFuture<()>>: Sync);
+ assert_not_impl!(TakeUntil<SyncStream, SyncFuture>: Sync);
+ assert_not_impl!(TakeUntil<SyncStream, LocalFuture<()>>: Sync);
+ assert_not_impl!(TakeUntil<LocalStream, SyncFuture<()>>: Sync);
+ assert_impl!(TakeUntil<UnpinStream, UnpinFuture>: Unpin);
+ assert_not_impl!(TakeUntil<PinnedStream, UnpinFuture>: Unpin);
+ assert_not_impl!(TakeUntil<UnpinStream, PinnedFuture>: Unpin);
+
+ assert_impl!(TakeWhile<SendStream<()>, (), ()>: Send);
+ assert_not_impl!(TakeWhile<LocalStream<()>, (), ()>: Send);
+ assert_not_impl!(TakeWhile<SendStream, (), ()>: Send);
+ assert_not_impl!(TakeWhile<SendStream<()>, *const (), ()>: Send);
+ assert_not_impl!(TakeWhile<SendStream<()>, (), *const ()>: Send);
+ assert_impl!(TakeWhile<SyncStream<()>, (), ()>: Sync);
+ assert_not_impl!(TakeWhile<LocalStream<()>, (), ()>: Sync);
+ assert_not_impl!(TakeWhile<SyncStream, (), ()>: Sync);
+ assert_not_impl!(TakeWhile<SyncStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(TakeWhile<SyncStream<()>, (), *const ()>: Sync);
+ assert_impl!(TakeWhile<UnpinStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(TakeWhile<PinnedStream, (), ()>: Unpin);
+ assert_not_impl!(TakeWhile<UnpinStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(Then<SendStream, (), ()>: Send);
+ assert_not_impl!(Then<LocalStream<()>, (), ()>: Send);
+ assert_not_impl!(Then<SendStream<()>, *const (), ()>: Send);
+ assert_not_impl!(Then<SendStream<()>, (), *const ()>: Send);
+ assert_impl!(Then<SyncStream, (), ()>: Sync);
+ assert_not_impl!(Then<LocalStream<()>, (), ()>: Sync);
+ assert_not_impl!(Then<SyncStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(Then<SyncStream<()>, (), *const ()>: Sync);
+ assert_impl!(Then<UnpinStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(Then<PinnedStream, (), ()>: Unpin);
+ assert_not_impl!(Then<UnpinStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryBufferUnordered<SendTryStream<()>>: Send);
+ assert_not_impl!(TryBufferUnordered<SendTryStream>: Send);
+ assert_not_impl!(TryBufferUnordered<LocalTryStream>: Send);
+ assert_impl!(TryBufferUnordered<SyncTryStream<()>>: Sync);
+ assert_not_impl!(TryBufferUnordered<SyncTryStream>: Sync);
+ assert_not_impl!(TryBufferUnordered<LocalTryStream>: Sync);
+ assert_impl!(TryBufferUnordered<UnpinTryStream>: Unpin);
+ assert_not_impl!(TryBufferUnordered<PinnedTryStream>: Unpin);
+
+ assert_impl!(TryBuffered<SendTryStream<SendTryFuture<(), ()>>>: Send);
+ assert_not_impl!(TryBuffered<SendTryStream<SendTryFuture<*const (), ()>>>: Send);
+ assert_not_impl!(TryBuffered<SendTryStream<SendTryFuture<(), *const ()>>>: Send);
+ assert_not_impl!(TryBuffered<SendTryStream<LocalTryFuture<(), ()>>>: Send);
+ assert_not_impl!(TryBuffered<LocalTryStream<SendTryFuture<(), ()>>>: Send);
+ assert_impl!(TryBuffered<SyncTryStream<SyncTryFuture<(), ()>>>: Sync);
+ assert_not_impl!(TryBuffered<SyncTryStream<SyncTryFuture<*const (), ()>>>: Sync);
+ assert_not_impl!(TryBuffered<SyncTryStream<SyncTryFuture<(), *const ()>>>: Sync);
+ assert_not_impl!(TryBuffered<SyncTryStream<LocalTryFuture<(), ()>>>: Sync);
+ assert_not_impl!(TryBuffered<LocalTryStream<SyncTryFuture<(), ()>>>: Sync);
+ assert_impl!(TryBuffered<UnpinTryStream<PinnedTryFuture>>: Unpin);
+ assert_not_impl!(TryBuffered<PinnedTryStream<UnpinTryFuture>>: Unpin);
+
+ assert_impl!(TryCollect<(), ()>: Send);
+ assert_not_impl!(TryCollect<*const (), ()>: Send);
+ assert_not_impl!(TryCollect<(), *const ()>: Send);
+ assert_impl!(TryCollect<(), ()>: Sync);
+ assert_not_impl!(TryCollect<*const (), ()>: Sync);
+ assert_not_impl!(TryCollect<(), *const ()>: Sync);
+ assert_impl!(TryCollect<(), PhantomPinned>: Unpin);
+ assert_not_impl!(TryCollect<PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryConcat<SendTryStream<()>>: Send);
+ assert_not_impl!(TryConcat<SendTryStream>: Send);
+ assert_not_impl!(TryConcat<LocalTryStream>: Send);
+ assert_impl!(TryConcat<SyncTryStream<()>>: Sync);
+ assert_not_impl!(TryConcat<SyncTryStream>: Sync);
+ assert_not_impl!(TryConcat<LocalTryStream>: Sync);
+ assert_impl!(TryConcat<UnpinTryStream>: Unpin);
+ assert_not_impl!(TryConcat<PinnedTryStream>: Unpin);
+
+ assert_impl!(TryFilter<SendTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TryFilter<LocalTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TryFilter<SendTryStream, (), ()>: Send);
+ assert_not_impl!(TryFilter<SendTryStream<()>, *const (), ()>: Send);
+ assert_not_impl!(TryFilter<SendTryStream<()>, (), *const ()>: Send);
+ assert_impl!(TryFilter<SyncTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TryFilter<LocalTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TryFilter<SyncTryStream, (), ()>: Sync);
+ assert_not_impl!(TryFilter<SyncTryStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(TryFilter<SyncTryStream<()>, (), *const ()>: Sync);
+ assert_impl!(TryFilter<UnpinTryStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(TryFilter<PinnedTryStream, (), ()>: Unpin);
+ assert_not_impl!(TryFilter<UnpinTryStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryFilterMap<(), (), ()>: Send);
+ assert_not_impl!(TryFilterMap<*const (), (), ()>: Send);
+ assert_not_impl!(TryFilterMap<(), *const (), ()>: Send);
+ assert_not_impl!(TryFilterMap<(), (), *const ()>: Send);
+ assert_impl!(TryFilterMap<(), (), ()>: Sync);
+ assert_not_impl!(TryFilterMap<*const (), (), ()>: Sync);
+ assert_not_impl!(TryFilterMap<(), *const (), ()>: Sync);
+ assert_not_impl!(TryFilterMap<(), (), *const ()>: Sync);
+ assert_impl!(TryFilterMap<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(TryFilterMap<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(TryFilterMap<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryFlatten<SendTryStream<()>>: Send);
+ assert_not_impl!(TryFlatten<SendTryStream>: Send);
+ assert_not_impl!(TryFlatten<SendTryStream>: Send);
+ assert_impl!(TryFlatten<SyncTryStream<()>>: Sync);
+ assert_not_impl!(TryFlatten<LocalTryStream<()>>: Sync);
+ assert_not_impl!(TryFlatten<LocalTryStream<()>>: Sync);
+ assert_impl!(TryFlatten<UnpinTryStream<()>>: Unpin);
+ assert_not_impl!(TryFlatten<UnpinTryStream>: Unpin);
+ assert_not_impl!(TryFlatten<PinnedTryStream>: Unpin);
+
+ assert_impl!(TryFold<(), (), (), ()>: Send);
+ assert_not_impl!(TryFold<*const (), (), (), ()>: Send);
+ assert_not_impl!(TryFold<(), *const (), (), ()>: Send);
+ assert_not_impl!(TryFold<(), (), *const (), ()>: Send);
+ assert_not_impl!(TryFold<(), (), (), *const ()>: Send);
+ assert_impl!(TryFold<(), (), (), ()>: Sync);
+ assert_not_impl!(TryFold<*const (), (), (), ()>: Sync);
+ assert_not_impl!(TryFold<(), *const (), (), ()>: Sync);
+ assert_not_impl!(TryFold<(), (), *const (), ()>: Sync);
+ assert_not_impl!(TryFold<(), (), (), *const ()>: Sync);
+ assert_impl!(TryFold<(), (), PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(TryFold<PhantomPinned, (), (), ()>: Unpin);
+ assert_not_impl!(TryFold<(), PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(TryForEach<(), (), ()>: Send);
+ assert_not_impl!(TryForEach<*const (), (), ()>: Send);
+ assert_not_impl!(TryForEach<(), *const (), ()>: Send);
+ assert_not_impl!(TryForEach<(), (), *const ()>: Send);
+ assert_impl!(TryForEach<(), (), ()>: Sync);
+ assert_not_impl!(TryForEach<*const (), (), ()>: Sync);
+ assert_not_impl!(TryForEach<(), *const (), ()>: Sync);
+ assert_not_impl!(TryForEach<(), (), *const ()>: Sync);
+ assert_impl!(TryForEach<(), (), PhantomPinned>: Unpin);
+ assert_not_impl!(TryForEach<PhantomPinned, (), ()>: Unpin);
+ assert_not_impl!(TryForEach<(), PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryForEachConcurrent<(), (), ()>: Send);
+ assert_not_impl!(TryForEachConcurrent<*const (), (), ()>: Send);
+ assert_not_impl!(TryForEachConcurrent<(), *const (), ()>: Send);
+ assert_not_impl!(TryForEachConcurrent<(), (), *const ()>: Send);
+ assert_impl!(TryForEachConcurrent<(), (), ()>: Sync);
+ assert_not_impl!(TryForEachConcurrent<*const (), (), ()>: Sync);
+ assert_not_impl!(TryForEachConcurrent<(), *const (), ()>: Sync);
+ assert_not_impl!(TryForEachConcurrent<(), (), *const ()>: Sync);
+ assert_impl!(TryForEachConcurrent<(), PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(TryForEachConcurrent<PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(TryNext<'_, ()>: Send);
+ assert_not_impl!(TryNext<'_, *const ()>: Send);
+ assert_impl!(TryNext<'_, ()>: Sync);
+ assert_not_impl!(TryNext<'_, *const ()>: Sync);
+ assert_impl!(TryNext<'_, ()>: Unpin);
+ assert_not_impl!(TryNext<'_, PhantomPinned>: Unpin);
+
+ assert_impl!(TrySkipWhile<SendTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TrySkipWhile<LocalTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TrySkipWhile<SendTryStream, (), ()>: Send);
+ assert_not_impl!(TrySkipWhile<SendTryStream<()>, *const (), ()>: Send);
+ assert_not_impl!(TrySkipWhile<SendTryStream<()>, (), *const ()>: Send);
+ assert_impl!(TrySkipWhile<SyncTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TrySkipWhile<LocalTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TrySkipWhile<SyncTryStream, (), ()>: Sync);
+ assert_not_impl!(TrySkipWhile<SyncTryStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(TrySkipWhile<SyncTryStream<()>, (), *const ()>: Sync);
+ assert_impl!(TrySkipWhile<UnpinTryStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(TrySkipWhile<PinnedTryStream, (), ()>: Unpin);
+ assert_not_impl!(TrySkipWhile<UnpinTryStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryTakeWhile<SendTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TryTakeWhile<LocalTryStream<()>, (), ()>: Send);
+ assert_not_impl!(TryTakeWhile<SendTryStream, (), ()>: Send);
+ assert_not_impl!(TryTakeWhile<SendTryStream<()>, *const (), ()>: Send);
+ assert_not_impl!(TryTakeWhile<SendTryStream<()>, (), *const ()>: Send);
+ assert_impl!(TryTakeWhile<SyncTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TryTakeWhile<LocalTryStream<()>, (), ()>: Sync);
+ assert_not_impl!(TryTakeWhile<SyncTryStream, (), ()>: Sync);
+ assert_not_impl!(TryTakeWhile<SyncTryStream<()>, *const (), ()>: Sync);
+ assert_not_impl!(TryTakeWhile<SyncTryStream<()>, (), *const ()>: Sync);
+ assert_impl!(TryTakeWhile<UnpinTryStream, (), PhantomPinned>: Unpin);
+ assert_not_impl!(TryTakeWhile<PinnedTryStream, (), ()>: Unpin);
+ assert_not_impl!(TryTakeWhile<UnpinTryStream, PhantomPinned, ()>: Unpin);
+
+ assert_impl!(TryUnfold<(), (), ()>: Send);
+ assert_not_impl!(TryUnfold<*const (), (), ()>: Send);
+ assert_not_impl!(TryUnfold<(), *const (), ()>: Send);
+ assert_not_impl!(TryUnfold<(), (), *const ()>: Send);
+ assert_impl!(TryUnfold<(), (), ()>: Sync);
+ assert_not_impl!(TryUnfold<*const (), (), ()>: Sync);
+ assert_not_impl!(TryUnfold<(), *const (), ()>: Sync);
+ assert_not_impl!(TryUnfold<(), (), *const ()>: Sync);
+ assert_impl!(TryUnfold<PhantomPinned, PhantomPinned, ()>: Unpin);
+ assert_not_impl!(TryUnfold<(), (), PhantomPinned>: Unpin);
+
+ assert_impl!(Unfold<(), (), ()>: Send);
+ assert_not_impl!(Unfold<*const (), (), ()>: Send);
+ assert_not_impl!(Unfold<(), *const (), ()>: Send);
+ assert_not_impl!(Unfold<(), (), *const ()>: Send);
+ assert_impl!(Unfold<(), (), ()>: Sync);
+ assert_not_impl!(Unfold<*const (), (), ()>: Sync);
+ assert_not_impl!(Unfold<(), *const (), ()>: Sync);
+ assert_not_impl!(Unfold<(), (), *const ()>: Sync);
+ assert_impl!(Unfold<PhantomPinned, PhantomPinned, ()>: Unpin);
+ assert_not_impl!(Unfold<(), (), PhantomPinned>: Unpin);
+
+ assert_impl!(Unzip<(), (), ()>: Send);
+ assert_not_impl!(Unzip<*const (), (), ()>: Send);
+ assert_not_impl!(Unzip<(), *const (), ()>: Send);
+ assert_not_impl!(Unzip<(), (), *const ()>: Send);
+ assert_impl!(Unzip<(), (), ()>: Sync);
+ assert_not_impl!(Unzip<*const (), (), ()>: Sync);
+ assert_not_impl!(Unzip<(), *const (), ()>: Sync);
+ assert_not_impl!(Unzip<(), (), *const ()>: Sync);
+ assert_impl!(Unzip<(), PhantomPinned, PhantomPinned>: Unpin);
+ assert_not_impl!(Unzip<PhantomPinned, (), ()>: Unpin);
+
+ assert_impl!(Zip<SendStream<()>, SendStream<()>>: Send);
+ assert_not_impl!(Zip<SendStream, SendStream<()>>: Send);
+ assert_not_impl!(Zip<SendStream<()>, SendStream>: Send);
+ assert_not_impl!(Zip<LocalStream, SendStream<()>>: Send);
+ assert_not_impl!(Zip<SendStream<()>, LocalStream>: Send);
+ assert_impl!(Zip<SyncStream<()>, SyncStream<()>>: Sync);
+ assert_not_impl!(Zip<SyncStream, SyncStream<()>>: Sync);
+ assert_not_impl!(Zip<SyncStream<()>, SyncStream>: Sync);
+ assert_not_impl!(Zip<LocalStream, SyncStream<()>>: Sync);
+ assert_not_impl!(Zip<SyncStream<()>, LocalStream>: Sync);
+ assert_impl!(Zip<UnpinStream, UnpinStream>: Unpin);
+ assert_not_impl!(Zip<UnpinStream, PinnedStream>: Unpin);
+ assert_not_impl!(Zip<PinnedStream, UnpinStream>: Unpin);
+
+ assert_impl!(futures_unordered::Iter<()>: Send);
+ assert_not_impl!(futures_unordered::Iter<*const ()>: Send);
+ assert_impl!(futures_unordered::Iter<()>: Sync);
+ assert_not_impl!(futures_unordered::Iter<*const ()>: Sync);
+ assert_impl!(futures_unordered::Iter<()>: Unpin);
+ // The definition of futures_unordered::Iter has `Fut: Unpin` bounds.
+ // assert_not_impl!(futures_unordered::Iter<PhantomPinned>: Unpin);
+
+ assert_impl!(futures_unordered::IterMut<()>: Send);
+ assert_not_impl!(futures_unordered::IterMut<*const ()>: Send);
+ assert_impl!(futures_unordered::IterMut<()>: Sync);
+ assert_not_impl!(futures_unordered::IterMut<*const ()>: Sync);
+ assert_impl!(futures_unordered::IterMut<()>: Unpin);
+ // The definition of futures_unordered::IterMut has `Fut: Unpin` bounds.
+ // assert_not_impl!(futures_unordered::IterMut<PhantomPinned>: Unpin);
+
+ assert_impl!(futures_unordered::IterPinMut<()>: Send);
+ assert_not_impl!(futures_unordered::IterPinMut<*const ()>: Send);
+ assert_impl!(futures_unordered::IterPinMut<()>: Sync);
+ assert_not_impl!(futures_unordered::IterPinMut<*const ()>: Sync);
+ assert_impl!(futures_unordered::IterPinMut<PhantomPinned>: Unpin);
+
+ assert_impl!(futures_unordered::IterPinRef<()>: Send);
+ assert_not_impl!(futures_unordered::IterPinRef<*const ()>: Send);
+ assert_impl!(futures_unordered::IterPinRef<()>: Sync);
+ assert_not_impl!(futures_unordered::IterPinRef<*const ()>: Sync);
+ assert_impl!(futures_unordered::IterPinRef<PhantomPinned>: Unpin);
+
+ assert_impl!(futures_unordered::IntoIter<()>: Send);
+ assert_not_impl!(futures_unordered::IntoIter<*const ()>: Send);
+ assert_impl!(futures_unordered::IntoIter<()>: Sync);
+ assert_not_impl!(futures_unordered::IntoIter<*const ()>: Sync);
+ // The definition of futures_unordered::IntoIter has `Fut: Unpin` bounds.
+ // assert_not_impl!(futures_unordered::IntoIter<PhantomPinned>: Unpin);
+}
+
+/// Assert Send/Sync/Unpin for all public types in `futures::task`.
+pub mod task {
+ use super::*;
+ use futures::task::*;
+
+ assert_impl!(AtomicWaker: Send);
+ assert_impl!(AtomicWaker: Sync);
+ assert_impl!(AtomicWaker: Unpin);
+
+ assert_impl!(FutureObj<*const ()>: Send);
+ assert_not_impl!(FutureObj<()>: Sync);
+ assert_impl!(FutureObj<PhantomPinned>: Unpin);
+
+ assert_not_impl!(LocalFutureObj<()>: Send);
+ assert_not_impl!(LocalFutureObj<()>: Sync);
+ assert_impl!(LocalFutureObj<PhantomPinned>: Unpin);
+
+ assert_impl!(SpawnError: Send);
+ assert_impl!(SpawnError: Sync);
+ assert_impl!(SpawnError: Unpin);
+
+ assert_impl!(WakerRef<'_>: Send);
+ assert_impl!(WakerRef<'_>: Sync);
+ assert_impl!(WakerRef<'_>: Unpin);
+}
diff --git a/vendor/futures/tests/compat.rs b/vendor/futures/tests/compat.rs
new file mode 100644
index 000000000..c4125d895
--- /dev/null
+++ b/vendor/futures/tests/compat.rs
@@ -0,0 +1,15 @@
+#![cfg(feature = "compat")]
+
+use futures::compat::Future01CompatExt;
+use futures::prelude::*;
+use std::time::Instant;
+use tokio::runtime::Runtime;
+use tokio::timer::Delay;
+
+#[test]
+fn can_use_01_futures_in_a_03_future_running_on_a_01_executor() {
+ let f = async { Delay::new(Instant::now()).compat().await };
+
+ let mut runtime = Runtime::new().unwrap();
+ runtime.block_on(f.boxed().compat()).unwrap();
+}
diff --git a/vendor/futures/tests/eager_drop.rs b/vendor/futures/tests/eager_drop.rs
new file mode 100644
index 000000000..992507774
--- /dev/null
+++ b/vendor/futures/tests/eager_drop.rs
@@ -0,0 +1,121 @@
+use futures::channel::oneshot;
+use futures::future::{self, Future, FutureExt, TryFutureExt};
+use futures::task::{Context, Poll};
+use futures_test::future::FutureTestExt;
+use pin_project::pin_project;
+use std::pin::Pin;
+use std::sync::mpsc;
+
+#[test]
+fn map_ok() {
+ // The closure given to `map_ok` should have been dropped by the time `map`
+ // runs.
+ let (tx1, rx1) = mpsc::channel::<()>();
+ let (tx2, rx2) = mpsc::channel::<()>();
+
+ future::ready::<Result<i32, i32>>(Err(1))
+ .map_ok(move |_| {
+ let _tx1 = tx1;
+ panic!("should not run");
+ })
+ .map(move |_| {
+ assert!(rx1.recv().is_err());
+ tx2.send(()).unwrap()
+ })
+ .run_in_background();
+
+ rx2.recv().unwrap();
+}
+
+#[test]
+fn map_err() {
+ // The closure given to `map_err` should have been dropped by the time `map`
+ // runs.
+ let (tx1, rx1) = mpsc::channel::<()>();
+ let (tx2, rx2) = mpsc::channel::<()>();
+
+ future::ready::<Result<i32, i32>>(Ok(1))
+ .map_err(move |_| {
+ let _tx1 = tx1;
+ panic!("should not run");
+ })
+ .map(move |_| {
+ assert!(rx1.recv().is_err());
+ tx2.send(()).unwrap()
+ })
+ .run_in_background();
+
+ rx2.recv().unwrap();
+}
+
+#[pin_project]
+struct FutureData<F, T> {
+ _data: T,
+ #[pin]
+ future: F,
+}
+
+impl<F: Future, T: Send + 'static> Future for FutureData<F, T> {
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<F::Output> {
+ self.project().future.poll(cx)
+ }
+}
+
+#[test]
+fn then_drops_eagerly() {
+ let (tx0, rx0) = oneshot::channel::<()>();
+ let (tx1, rx1) = mpsc::channel::<()>();
+ let (tx2, rx2) = mpsc::channel::<()>();
+
+ FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) }
+ .then(move |_| {
+ assert!(rx1.recv().is_err()); // tx1 should have been dropped
+ tx2.send(()).unwrap();
+ future::ready(())
+ })
+ .run_in_background();
+
+ assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv());
+ tx0.send(()).unwrap();
+ rx2.recv().unwrap();
+}
+
+#[test]
+fn and_then_drops_eagerly() {
+ let (tx0, rx0) = oneshot::channel::<Result<(), ()>>();
+ let (tx1, rx1) = mpsc::channel::<()>();
+ let (tx2, rx2) = mpsc::channel::<()>();
+
+ FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) }
+ .and_then(move |_| {
+ assert!(rx1.recv().is_err()); // tx1 should have been dropped
+ tx2.send(()).unwrap();
+ future::ready(Ok(()))
+ })
+ .run_in_background();
+
+ assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv());
+ tx0.send(Ok(())).unwrap();
+ rx2.recv().unwrap();
+}
+
+#[test]
+fn or_else_drops_eagerly() {
+ let (tx0, rx0) = oneshot::channel::<Result<(), ()>>();
+ let (tx1, rx1) = mpsc::channel::<()>();
+ let (tx2, rx2) = mpsc::channel::<()>();
+
+ FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) }
+ .or_else(move |_| {
+ assert!(rx1.recv().is_err()); // tx1 should have been dropped
+ tx2.send(()).unwrap();
+ future::ready::<Result<(), ()>>(Ok(()))
+ })
+ .run_in_background();
+
+ assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv());
+ tx0.send(Err(())).unwrap();
+ rx2.recv().unwrap();
+}
diff --git a/vendor/futures/tests/eventual.rs b/vendor/futures/tests/eventual.rs
new file mode 100644
index 000000000..bff000dd0
--- /dev/null
+++ b/vendor/futures/tests/eventual.rs
@@ -0,0 +1,159 @@
+use futures::channel::oneshot;
+use futures::executor::ThreadPool;
+use futures::future::{self, ok, Future, FutureExt, TryFutureExt};
+use futures::task::SpawnExt;
+use std::sync::mpsc;
+use std::thread;
+
+fn run<F: Future + Send + 'static>(future: F) {
+ let tp = ThreadPool::new().unwrap();
+ tp.spawn(future.map(drop)).unwrap();
+}
+
+#[test]
+fn join1() {
+ let (tx, rx) = mpsc::channel();
+ run(future::try_join(ok::<i32, i32>(1), ok(2)).map_ok(move |v| tx.send(v).unwrap()));
+ assert_eq!(rx.recv(), Ok((1, 2)));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn join2() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_join(p1, p2).map_ok(move |v| tx.send(v).unwrap()));
+ assert!(rx.try_recv().is_err());
+ c1.send(1).unwrap();
+ assert!(rx.try_recv().is_err());
+ c2.send(2).unwrap();
+ assert_eq!(rx.recv(), Ok((1, 2)));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn join3() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_join(p1, p2).map_err(move |_v| tx.send(1).unwrap()));
+ assert!(rx.try_recv().is_err());
+ drop(c1);
+ assert_eq!(rx.recv(), Ok(1));
+ assert!(rx.recv().is_err());
+ drop(c2);
+}
+
+#[test]
+fn join4() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_join(p1, p2).map_err(move |v| tx.send(v).unwrap()));
+ assert!(rx.try_recv().is_err());
+ drop(c1);
+ assert!(rx.recv().is_ok());
+ drop(c2);
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn join5() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (c3, p3) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_join(future::try_join(p1, p2), p3).map_ok(move |v| tx.send(v).unwrap()));
+ assert!(rx.try_recv().is_err());
+ c1.send(1).unwrap();
+ assert!(rx.try_recv().is_err());
+ c2.send(2).unwrap();
+ assert!(rx.try_recv().is_err());
+ c3.send(3).unwrap();
+ assert_eq!(rx.recv(), Ok(((1, 2), 3)));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn select1() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_select(p1, p2).map_ok(move |v| tx.send(v).unwrap()));
+ assert!(rx.try_recv().is_err());
+ c1.send(1).unwrap();
+ let (v, p2) = rx.recv().unwrap().into_inner();
+ assert_eq!(v, 1);
+ assert!(rx.recv().is_err());
+
+ let (tx, rx) = mpsc::channel();
+ run(p2.map_ok(move |v| tx.send(v).unwrap()));
+ c2.send(2).unwrap();
+ assert_eq!(rx.recv(), Ok(2));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn select2() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_select(p1, p2).map_err(move |v| tx.send((1, v.into_inner().1)).unwrap()));
+ assert!(rx.try_recv().is_err());
+ drop(c1);
+ let (v, p2) = rx.recv().unwrap();
+ assert_eq!(v, 1);
+ assert!(rx.recv().is_err());
+
+ let (tx, rx) = mpsc::channel();
+ run(p2.map_ok(move |v| tx.send(v).unwrap()));
+ c2.send(2).unwrap();
+ assert_eq!(rx.recv(), Ok(2));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn select3() {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+ let (tx, rx) = mpsc::channel();
+ run(future::try_select(p1, p2).map_err(move |v| tx.send((1, v.into_inner().1)).unwrap()));
+ assert!(rx.try_recv().is_err());
+ drop(c1);
+ let (v, p2) = rx.recv().unwrap();
+ assert_eq!(v, 1);
+ assert!(rx.recv().is_err());
+
+ let (tx, rx) = mpsc::channel();
+ run(p2.map_err(move |_v| tx.send(2).unwrap()));
+ drop(c2);
+ assert_eq!(rx.recv(), Ok(2));
+ assert!(rx.recv().is_err());
+}
+
+#[test]
+fn select4() {
+ let (tx, rx) = mpsc::channel::<oneshot::Sender<i32>>();
+
+ let t = thread::spawn(move || {
+ for c in rx {
+ c.send(1).unwrap();
+ }
+ });
+
+ let (tx2, rx2) = mpsc::channel();
+ for _ in 0..10000 {
+ let (c1, p1) = oneshot::channel::<i32>();
+ let (c2, p2) = oneshot::channel::<i32>();
+
+ let tx3 = tx2.clone();
+ run(future::try_select(p1, p2).map_ok(move |_| tx3.send(()).unwrap()));
+ tx.send(c1).unwrap();
+ rx2.recv().unwrap();
+ drop(c2);
+ }
+ drop(tx);
+
+ t.join().unwrap();
+}
diff --git a/vendor/futures/tests/future_abortable.rs b/vendor/futures/tests/future_abortable.rs
new file mode 100644
index 000000000..e119f0b71
--- /dev/null
+++ b/vendor/futures/tests/future_abortable.rs
@@ -0,0 +1,44 @@
+use futures::channel::oneshot;
+use futures::executor::block_on;
+use futures::future::{abortable, Aborted, FutureExt};
+use futures::task::{Context, Poll};
+use futures_test::task::new_count_waker;
+
+#[test]
+fn abortable_works() {
+ let (_tx, a_rx) = oneshot::channel::<()>();
+ let (abortable_rx, abort_handle) = abortable(a_rx);
+
+ abort_handle.abort();
+ assert!(abortable_rx.is_aborted());
+ assert_eq!(Err(Aborted), block_on(abortable_rx));
+}
+
+#[test]
+fn abortable_awakens() {
+ let (_tx, a_rx) = oneshot::channel::<()>();
+ let (mut abortable_rx, abort_handle) = abortable(a_rx);
+
+ let (waker, counter) = new_count_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ assert_eq!(counter, 0);
+ assert_eq!(Poll::Pending, abortable_rx.poll_unpin(&mut cx));
+ assert_eq!(counter, 0);
+
+ abort_handle.abort();
+ assert_eq!(counter, 1);
+ assert!(abortable_rx.is_aborted());
+ assert_eq!(Poll::Ready(Err(Aborted)), abortable_rx.poll_unpin(&mut cx));
+}
+
+#[test]
+fn abortable_resolves() {
+ let (tx, a_rx) = oneshot::channel::<()>();
+ let (abortable_rx, _abort_handle) = abortable(a_rx);
+
+ tx.send(()).unwrap();
+
+ assert!(!abortable_rx.is_aborted());
+ assert_eq!(Ok(Ok(())), block_on(abortable_rx));
+}
diff --git a/vendor/futures/tests/future_basic_combinators.rs b/vendor/futures/tests/future_basic_combinators.rs
new file mode 100644
index 000000000..372ab48b7
--- /dev/null
+++ b/vendor/futures/tests/future_basic_combinators.rs
@@ -0,0 +1,104 @@
+use futures::future::{self, FutureExt, TryFutureExt};
+use futures_test::future::FutureTestExt;
+use std::sync::mpsc;
+
+#[test]
+fn basic_future_combinators() {
+ let (tx1, rx) = mpsc::channel();
+ let tx2 = tx1.clone();
+ let tx3 = tx1.clone();
+
+ let fut = future::ready(1)
+ .then(move |x| {
+ tx1.send(x).unwrap(); // Send 1
+ tx1.send(2).unwrap(); // Send 2
+ future::ready(3)
+ })
+ .map(move |x| {
+ tx2.send(x).unwrap(); // Send 3
+ tx2.send(4).unwrap(); // Send 4
+ 5
+ })
+ .map(move |x| {
+ tx3.send(x).unwrap(); // Send 5
+ });
+
+ assert!(rx.try_recv().is_err()); // Not started yet
+ fut.run_in_background(); // Start it
+ for i in 1..=5 {
+ assert_eq!(rx.recv(), Ok(i));
+ } // Check it
+ assert!(rx.recv().is_err()); // Should be done
+}
+
+#[test]
+fn basic_try_future_combinators() {
+ let (tx1, rx) = mpsc::channel();
+ let tx2 = tx1.clone();
+ let tx3 = tx1.clone();
+ let tx4 = tx1.clone();
+ let tx5 = tx1.clone();
+ let tx6 = tx1.clone();
+ let tx7 = tx1.clone();
+ let tx8 = tx1.clone();
+ let tx9 = tx1.clone();
+ let tx10 = tx1.clone();
+
+ let fut = future::ready(Ok(1))
+ .and_then(move |x: i32| {
+ tx1.send(x).unwrap(); // Send 1
+ tx1.send(2).unwrap(); // Send 2
+ future::ready(Ok(3))
+ })
+ .or_else(move |x: i32| {
+ tx2.send(x).unwrap(); // Should not run
+ tx2.send(-1).unwrap();
+ future::ready(Ok(-1))
+ })
+ .map_ok(move |x: i32| {
+ tx3.send(x).unwrap(); // Send 3
+ tx3.send(4).unwrap(); // Send 4
+ 5
+ })
+ .map_err(move |x: i32| {
+ tx4.send(x).unwrap(); // Should not run
+ tx4.send(-1).unwrap();
+ -1
+ })
+ .map(move |x: Result<i32, i32>| {
+ tx5.send(x.unwrap()).unwrap(); // Send 5
+ tx5.send(6).unwrap(); // Send 6
+ Err(7) // Now return errors!
+ })
+ .and_then(move |x: i32| {
+ tx6.send(x).unwrap(); // Should not run
+ tx6.send(-1).unwrap();
+ future::ready(Err(-1))
+ })
+ .or_else(move |x: i32| {
+ tx7.send(x).unwrap(); // Send 7
+ tx7.send(8).unwrap(); // Send 8
+ future::ready(Err(9))
+ })
+ .map_ok(move |x: i32| {
+ tx8.send(x).unwrap(); // Should not run
+ tx8.send(-1).unwrap();
+ -1
+ })
+ .map_err(move |x: i32| {
+ tx9.send(x).unwrap(); // Send 9
+ tx9.send(10).unwrap(); // Send 10
+ 11
+ })
+ .map(move |x: Result<i32, i32>| {
+ tx10.send(x.err().unwrap()).unwrap(); // Send 11
+ tx10.send(12).unwrap(); // Send 12
+ });
+
+ assert!(rx.try_recv().is_err()); // Not started yet
+ fut.run_in_background(); // Start it
+ for i in 1..=12 {
+ assert_eq!(rx.recv(), Ok(i));
+ } // Check it
+ assert!(rx.recv().is_err()); // Should be done
+}
diff --git a/vendor/futures/tests/future_fuse.rs b/vendor/futures/tests/future_fuse.rs
new file mode 100644
index 000000000..83f2c1ce9
--- /dev/null
+++ b/vendor/futures/tests/future_fuse.rs
@@ -0,0 +1,12 @@
+use futures::future::{self, FutureExt};
+use futures::task::Context;
+use futures_test::task::panic_waker;
+
+#[test]
+fn fuse() {
+ let mut future = future::ready::<i32>(2).fuse();
+ let waker = panic_waker();
+ let mut cx = Context::from_waker(&waker);
+ assert!(future.poll_unpin(&mut cx).is_ready());
+ assert!(future.poll_unpin(&mut cx).is_pending());
+}
diff --git a/vendor/futures/tests/future_inspect.rs b/vendor/futures/tests/future_inspect.rs
new file mode 100644
index 000000000..eacd1f78a
--- /dev/null
+++ b/vendor/futures/tests/future_inspect.rs
@@ -0,0 +1,16 @@
+use futures::executor::block_on;
+use futures::future::{self, FutureExt};
+
+#[test]
+fn smoke() {
+ let mut counter = 0;
+
+ {
+ let work = future::ready::<i32>(40).inspect(|val| {
+ counter += *val;
+ });
+ assert_eq!(block_on(work), 40);
+ }
+
+ assert_eq!(counter, 40);
+}
diff --git a/vendor/futures/tests/future_join_all.rs b/vendor/futures/tests/future_join_all.rs
new file mode 100644
index 000000000..ae05a21b7
--- /dev/null
+++ b/vendor/futures/tests/future_join_all.rs
@@ -0,0 +1,42 @@
+use futures::executor::block_on;
+use futures::future::{join_all, ready, Future, JoinAll};
+use std::fmt::Debug;
+
+fn assert_done<T, F>(actual_fut: F, expected: T)
+where
+ T: PartialEq + Debug,
+ F: FnOnce() -> Box<dyn Future<Output = T> + Unpin>,
+{
+ let output = block_on(actual_fut());
+ assert_eq!(output, expected);
+}
+
+#[test]
+fn collect_collects() {
+ assert_done(|| Box::new(join_all(vec![ready(1), ready(2)])), vec![1, 2]);
+ assert_done(|| Box::new(join_all(vec![ready(1)])), vec![1]);
+ // REVIEW: should this be implemented?
+ // assert_done(|| Box::new(join_all(Vec::<i32>::new())), vec![]);
+
+ // TODO: needs more tests
+}
+
+#[test]
+fn join_all_iter_lifetime() {
+ // In futures-rs version 0.1, this function would fail to typecheck due to an overly
+ // conservative type parameterization of `JoinAll`.
+ fn sizes(bufs: Vec<&[u8]>) -> Box<dyn Future<Output = Vec<usize>> + Unpin> {
+ let iter = bufs.into_iter().map(|b| ready::<usize>(b.len()));
+ Box::new(join_all(iter))
+ }
+
+ assert_done(|| sizes(vec![&[1, 2, 3], &[], &[0]]), vec![3_usize, 0, 1]);
+}
+
+#[test]
+fn join_all_from_iter() {
+ assert_done(
+ || Box::new(vec![ready(1), ready(2)].into_iter().collect::<JoinAll<_>>()),
+ vec![1, 2],
+ )
+}
diff --git a/vendor/futures/tests/future_obj.rs b/vendor/futures/tests/future_obj.rs
new file mode 100644
index 000000000..0e5253464
--- /dev/null
+++ b/vendor/futures/tests/future_obj.rs
@@ -0,0 +1,33 @@
+use futures::future::{Future, FutureExt, FutureObj};
+use futures::task::{Context, Poll};
+use std::pin::Pin;
+
+#[test]
+fn dropping_does_not_segfault() {
+ FutureObj::new(async { String::new() }.boxed());
+}
+
+#[test]
+fn dropping_drops_the_future() {
+ let mut times_dropped = 0;
+
+ struct Inc<'a>(&'a mut u32);
+
+ impl Future for Inc<'_> {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<()> {
+ unimplemented!()
+ }
+ }
+
+ impl Drop for Inc<'_> {
+ fn drop(&mut self) {
+ *self.0 += 1;
+ }
+ }
+
+ FutureObj::new(Inc(&mut times_dropped).boxed());
+
+ assert_eq!(times_dropped, 1);
+}
diff --git a/vendor/futures/tests/future_select_all.rs b/vendor/futures/tests/future_select_all.rs
new file mode 100644
index 000000000..299b47904
--- /dev/null
+++ b/vendor/futures/tests/future_select_all.rs
@@ -0,0 +1,25 @@
+use futures::executor::block_on;
+use futures::future::{ready, select_all};
+use std::collections::HashSet;
+
+#[test]
+fn smoke() {
+ let v = vec![ready(1), ready(2), ready(3)];
+
+ let mut c = vec![1, 2, 3].into_iter().collect::<HashSet<_>>();
+
+ let (i, idx, v) = block_on(select_all(v));
+ assert!(c.remove(&i));
+ assert_eq!(idx, 0);
+
+ let (i, idx, v) = block_on(select_all(v));
+ assert!(c.remove(&i));
+ assert_eq!(idx, 0);
+
+ let (i, idx, v) = block_on(select_all(v));
+ assert!(c.remove(&i));
+ assert_eq!(idx, 0);
+
+ assert!(c.is_empty());
+ assert!(v.is_empty());
+}
diff --git a/vendor/futures/tests/future_select_ok.rs b/vendor/futures/tests/future_select_ok.rs
new file mode 100644
index 000000000..8aec00362
--- /dev/null
+++ b/vendor/futures/tests/future_select_ok.rs
@@ -0,0 +1,30 @@
+use futures::executor::block_on;
+use futures::future::{err, ok, select_ok};
+
+#[test]
+fn ignore_err() {
+ let v = vec![err(1), err(2), ok(3), ok(4)];
+
+ let (i, v) = block_on(select_ok(v)).ok().unwrap();
+ assert_eq!(i, 3);
+
+ assert_eq!(v.len(), 1);
+
+ let (i, v) = block_on(select_ok(v)).ok().unwrap();
+ assert_eq!(i, 4);
+
+ assert!(v.is_empty());
+}
+
+#[test]
+fn last_err() {
+ let v = vec![ok(1), err(2), err(3)];
+
+ let (i, v) = block_on(select_ok(v)).ok().unwrap();
+ assert_eq!(i, 1);
+
+ assert_eq!(v.len(), 2);
+
+ let i = block_on(select_ok(v)).err().unwrap();
+ assert_eq!(i, 3);
+}
diff --git a/vendor/futures/tests/future_shared.rs b/vendor/futures/tests/future_shared.rs
new file mode 100644
index 000000000..718d6c41b
--- /dev/null
+++ b/vendor/futures/tests/future_shared.rs
@@ -0,0 +1,195 @@
+use futures::channel::oneshot;
+use futures::executor::{block_on, LocalPool};
+use futures::future::{self, FutureExt, LocalFutureObj, TryFutureExt};
+use futures::task::LocalSpawn;
+use std::cell::{Cell, RefCell};
+use std::rc::Rc;
+use std::task::Poll;
+use std::thread;
+
+struct CountClone(Rc<Cell<i32>>);
+
+impl Clone for CountClone {
+ fn clone(&self) -> Self {
+ self.0.set(self.0.get() + 1);
+ Self(self.0.clone())
+ }
+}
+
+fn send_shared_oneshot_and_wait_on_multiple_threads(threads_number: u32) {
+ let (tx, rx) = oneshot::channel::<i32>();
+ let f = rx.shared();
+ let join_handles = (0..threads_number)
+ .map(|_| {
+ let cloned_future = f.clone();
+ thread::spawn(move || {
+ assert_eq!(block_on(cloned_future).unwrap(), 6);
+ })
+ })
+ .collect::<Vec<_>>();
+
+ tx.send(6).unwrap();
+
+ assert_eq!(block_on(f).unwrap(), 6);
+ for join_handle in join_handles {
+ join_handle.join().unwrap();
+ }
+}
+
+#[test]
+fn one_thread() {
+ send_shared_oneshot_and_wait_on_multiple_threads(1);
+}
+
+#[test]
+fn two_threads() {
+ send_shared_oneshot_and_wait_on_multiple_threads(2);
+}
+
+#[test]
+fn many_threads() {
+ send_shared_oneshot_and_wait_on_multiple_threads(1000);
+}
+
+#[test]
+fn drop_on_one_task_ok() {
+ let (tx, rx) = oneshot::channel::<u32>();
+ let f1 = rx.shared();
+ let f2 = f1.clone();
+
+ let (tx2, rx2) = oneshot::channel::<u32>();
+
+ let t1 = thread::spawn(|| {
+ let f = future::try_select(f1.map_err(|_| ()), rx2.map_err(|_| ()));
+ drop(block_on(f));
+ });
+
+ let (tx3, rx3) = oneshot::channel::<u32>();
+
+ let t2 = thread::spawn(|| {
+ let _ = block_on(f2.map_ok(|x| tx3.send(x).unwrap()).map_err(|_| ()));
+ });
+
+ tx2.send(11).unwrap(); // cancel `f1`
+ t1.join().unwrap();
+
+ tx.send(42).unwrap(); // Should cause `f2` and then `rx3` to get resolved.
+ let result = block_on(rx3).unwrap();
+ assert_eq!(result, 42);
+ t2.join().unwrap();
+}
+
+#[test]
+fn drop_in_poll() {
+ let slot1 = Rc::new(RefCell::new(None));
+ let slot2 = slot1.clone();
+
+ let future1 = future::lazy(move |_| {
+ slot2.replace(None); // Drop future
+ 1
+ })
+ .shared();
+
+ let future2 = LocalFutureObj::new(Box::new(future1.clone()));
+ slot1.replace(Some(future2));
+
+ assert_eq!(block_on(future1), 1);
+}
+
+#[test]
+fn peek() {
+ let mut local_pool = LocalPool::new();
+ let spawn = &mut local_pool.spawner();
+
+ let (tx0, rx0) = oneshot::channel::<i32>();
+ let f1 = rx0.shared();
+ let f2 = f1.clone();
+
+ // Repeated calls on the original or clone do not change the outcome.
+ for _ in 0..2 {
+ assert!(f1.peek().is_none());
+ assert!(f2.peek().is_none());
+ }
+
+ // Completing the underlying future has no effect, because the value has not been `poll`ed in.
+ tx0.send(42).unwrap();
+ for _ in 0..2 {
+ assert!(f1.peek().is_none());
+ assert!(f2.peek().is_none());
+ }
+
+ // Once the Shared has been polled, the value is peekable on the clone.
+ spawn.spawn_local_obj(LocalFutureObj::new(Box::new(f1.map(|_| ())))).unwrap();
+ local_pool.run();
+ for _ in 0..2 {
+ assert_eq!(*f2.peek().unwrap(), Ok(42));
+ }
+}
+
+#[test]
+fn downgrade() {
+ let (tx, rx) = oneshot::channel::<i32>();
+ let shared = rx.shared();
+ // Since there are outstanding `Shared`s, we can get a `WeakShared`.
+ let weak = shared.downgrade().unwrap();
+ // It should upgrade fine right now.
+ let mut shared2 = weak.upgrade().unwrap();
+
+ tx.send(42).unwrap();
+ assert_eq!(block_on(shared).unwrap(), 42);
+
+ // We should still be able to get a new `WeakShared` and upgrade it
+ // because `shared2` is outstanding.
+ assert!(shared2.downgrade().is_some());
+ assert!(weak.upgrade().is_some());
+
+ assert_eq!(block_on(&mut shared2).unwrap(), 42);
+ // Now that all `Shared`s have been exhausted, we should not be able
+ // to get a new `WeakShared` or upgrade an existing one.
+ assert!(weak.upgrade().is_none());
+ assert!(shared2.downgrade().is_none());
+}
+
+#[test]
+fn dont_clone_in_single_owner_shared_future() {
+ let counter = CountClone(Rc::new(Cell::new(0)));
+ let (tx, rx) = oneshot::channel();
+
+ let rx = rx.shared();
+
+ tx.send(counter).ok().unwrap();
+
+ assert_eq!(block_on(rx).unwrap().0.get(), 0);
+}
+
+#[test]
+fn dont_do_unnecessary_clones_on_output() {
+ let counter = CountClone(Rc::new(Cell::new(0)));
+ let (tx, rx) = oneshot::channel();
+
+ let rx = rx.shared();
+
+ tx.send(counter).ok().unwrap();
+
+ assert_eq!(block_on(rx.clone()).unwrap().0.get(), 1);
+ assert_eq!(block_on(rx.clone()).unwrap().0.get(), 2);
+ assert_eq!(block_on(rx).unwrap().0.get(), 2);
+}
+
+#[test]
+fn shared_future_that_wakes_itself_until_pending_is_returned() {
+ let proceed = Cell::new(false);
+ let fut = futures::future::poll_fn(|cx| {
+ if proceed.get() {
+ Poll::Ready(())
+ } else {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ })
+ .shared();
+
+ // The join future can only complete if the second future gets a chance to run after the first
+ // has returned pending
+ assert_eq!(block_on(futures::future::join(fut, async { proceed.set(true) })), ((), ()));
+}
diff --git a/vendor/futures/tests/future_try_flatten_stream.rs b/vendor/futures/tests/future_try_flatten_stream.rs
new file mode 100644
index 000000000..82ae1baf2
--- /dev/null
+++ b/vendor/futures/tests/future_try_flatten_stream.rs
@@ -0,0 +1,83 @@
+use futures::executor::block_on_stream;
+use futures::future::{err, ok, TryFutureExt};
+use futures::sink::Sink;
+use futures::stream::Stream;
+use futures::stream::{self, StreamExt};
+use futures::task::{Context, Poll};
+use std::marker::PhantomData;
+use std::pin::Pin;
+
+#[test]
+fn successful_future() {
+ let stream_items = vec![17, 19];
+ let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items).map(Ok));
+
+ let stream = future_of_a_stream.try_flatten_stream();
+
+ let mut iter = block_on_stream(stream);
+ assert_eq!(Ok(17), iter.next().unwrap());
+ assert_eq!(Ok(19), iter.next().unwrap());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn failed_future() {
+ struct PanickingStream<T, E> {
+ _marker: PhantomData<(T, E)>,
+ }
+
+ impl<T, E> Stream for PanickingStream<T, E> {
+ type Item = Result<T, E>;
+
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ panic!()
+ }
+ }
+
+ let future_of_a_stream = err::<PanickingStream<bool, u32>, _>(10);
+ let stream = future_of_a_stream.try_flatten_stream();
+ let mut iter = block_on_stream(stream);
+ assert_eq!(Err(10), iter.next().unwrap());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn assert_impls() {
+ struct StreamSink<T, E, Item>(PhantomData<(T, E, Item)>);
+
+ impl<T, E, Item> Stream for StreamSink<T, E, Item> {
+ type Item = Result<T, E>;
+ fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ panic!()
+ }
+ }
+
+ impl<T, E, Item> Sink<Item> for StreamSink<T, E, Item> {
+ type Error = E;
+ fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ panic!()
+ }
+ fn start_send(self: Pin<&mut Self>, _: Item) -> Result<(), Self::Error> {
+ panic!()
+ }
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ panic!()
+ }
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ panic!()
+ }
+ }
+
+ fn assert_stream<S: Stream>(_: &S) {}
+ fn assert_sink<S: Sink<Item>, Item>(_: &S) {}
+ fn assert_stream_sink<S: Stream + Sink<Item>, Item>(_: &S) {}
+
+ let s = ok(StreamSink::<(), (), ()>(PhantomData)).try_flatten_stream();
+ assert_stream(&s);
+ assert_sink(&s);
+ assert_stream_sink(&s);
+ let s = ok(StreamSink::<(), (), ()>(PhantomData)).flatten_sink();
+ assert_stream(&s);
+ assert_sink(&s);
+ assert_stream_sink(&s);
+}
diff --git a/vendor/futures/tests/future_try_join_all.rs b/vendor/futures/tests/future_try_join_all.rs
new file mode 100644
index 000000000..a4b3bb76a
--- /dev/null
+++ b/vendor/futures/tests/future_try_join_all.rs
@@ -0,0 +1,44 @@
+use futures::executor::block_on;
+use futures_util::future::{err, ok, try_join_all, TryJoinAll};
+use std::fmt::Debug;
+use std::future::Future;
+
+fn assert_done<T, F>(actual_fut: F, expected: T)
+where
+ T: PartialEq + Debug,
+ F: FnOnce() -> Box<dyn Future<Output = T> + Unpin>,
+{
+ let output = block_on(actual_fut());
+ assert_eq!(output, expected);
+}
+
+#[test]
+fn collect_collects() {
+ assert_done(|| Box::new(try_join_all(vec![ok(1), ok(2)])), Ok::<_, usize>(vec![1, 2]));
+ assert_done(|| Box::new(try_join_all(vec![ok(1), err(2)])), Err(2));
+ assert_done(|| Box::new(try_join_all(vec![ok(1)])), Ok::<_, usize>(vec![1]));
+ // REVIEW: should this be implemented?
+ // assert_done(|| Box::new(try_join_all(Vec::<i32>::new())), Ok(vec![]));
+
+ // TODO: needs more tests
+}
+
+#[test]
+fn try_join_all_iter_lifetime() {
+ // In futures-rs version 0.1, this function would fail to typecheck due to an overly
+ // conservative type parameterization of `TryJoinAll`.
+ fn sizes(bufs: Vec<&[u8]>) -> Box<dyn Future<Output = Result<Vec<usize>, ()>> + Unpin> {
+ let iter = bufs.into_iter().map(|b| ok::<usize, ()>(b.len()));
+ Box::new(try_join_all(iter))
+ }
+
+ assert_done(|| sizes(vec![&[1, 2, 3], &[], &[0]]), Ok(vec![3_usize, 0, 1]));
+}
+
+#[test]
+fn try_join_all_from_iter() {
+ assert_done(
+ || Box::new(vec![ok(1), ok(2)].into_iter().collect::<TryJoinAll<_>>()),
+ Ok::<_, usize>(vec![1, 2]),
+ )
+}
diff --git a/vendor/futures/tests/io_buf_reader.rs b/vendor/futures/tests/io_buf_reader.rs
new file mode 100644
index 000000000..717297cce
--- /dev/null
+++ b/vendor/futures/tests/io_buf_reader.rs
@@ -0,0 +1,432 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{
+ AllowStdIo, AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt,
+ BufReader, SeekFrom,
+};
+use futures::pin_mut;
+use futures::task::{Context, Poll};
+use futures_test::task::noop_context;
+use pin_project::pin_project;
+use std::cmp;
+use std::io;
+use std::pin::Pin;
+
+// helper for maybe_pending_* tests
+fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+}
+
+// https://github.com/rust-lang/futures-rs/pull/2489#discussion_r697865719
+#[pin_project(!Unpin)]
+struct Cursor<T> {
+ #[pin]
+ inner: futures::io::Cursor<T>,
+}
+
+impl<T> Cursor<T> {
+ fn new(inner: T) -> Self {
+ Self { inner: futures::io::Cursor::new(inner) }
+ }
+}
+
+impl AsyncRead for Cursor<&[u8]> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_read(cx, buf)
+ }
+}
+
+impl AsyncBufRead for Cursor<&[u8]> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ self.project().inner.poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.project().inner.consume(amt)
+ }
+}
+
+impl AsyncSeek for Cursor<&[u8]> {
+ fn poll_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ self.project().inner.poll_seek(cx, pos)
+ }
+}
+
+struct MaybePending<'a> {
+ inner: &'a [u8],
+ ready_read: bool,
+ ready_fill_buf: bool,
+}
+
+impl<'a> MaybePending<'a> {
+ fn new(inner: &'a [u8]) -> Self {
+ Self { inner, ready_read: false, ready_fill_buf: false }
+ }
+}
+
+impl AsyncRead for MaybePending<'_> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.ready_read {
+ self.ready_read = false;
+ Pin::new(&mut self.inner).poll_read(cx, buf)
+ } else {
+ self.ready_read = true;
+ Poll::Pending
+ }
+ }
+}
+
+impl AsyncBufRead for MaybePending<'_> {
+ fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ if self.ready_fill_buf {
+ self.ready_fill_buf = false;
+ if self.inner.is_empty() {
+ return Poll::Ready(Ok(&[]));
+ }
+ let len = cmp::min(2, self.inner.len());
+ Poll::Ready(Ok(&self.inner[0..len]))
+ } else {
+ self.ready_fill_buf = true;
+ Poll::Pending
+ }
+ }
+
+ fn consume(mut self: Pin<&mut Self>, amt: usize) {
+ self.inner = &self.inner[amt..];
+ }
+}
+
+#[test]
+fn test_buffered_reader() {
+ block_on(async {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, inner);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 3);
+ assert_eq!(buf, [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 2);
+ assert_eq!(buf, [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [0, 0, 0];
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [3, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ let nread = reader.read(&mut buf).await.unwrap();
+ assert_eq!(nread, 1);
+ assert_eq!(buf, [4, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ });
+}
+
+#[test]
+fn test_buffered_reader_seek() {
+ block_on(async {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let reader = BufReader::with_capacity(2, Cursor::new(inner));
+ pin_mut!(reader);
+
+ assert_eq!(reader.seek(SeekFrom::Start(3)).await.unwrap(), 3);
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]);
+ assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]);
+ assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4);
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[1, 2][..]);
+ reader.as_mut().consume(1);
+ assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3);
+ });
+}
+
+#[test]
+fn test_buffered_reader_seek_relative() {
+ block_on(async {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let reader = BufReader::with_capacity(2, Cursor::new(inner));
+ pin_mut!(reader);
+
+ assert!(reader.as_mut().seek_relative(3).await.is_ok());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]);
+ assert!(reader.as_mut().seek_relative(0).await.is_ok());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]);
+ assert!(reader.as_mut().seek_relative(1).await.is_ok());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[1][..]);
+ assert!(reader.as_mut().seek_relative(-1).await.is_ok());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]);
+ assert!(reader.as_mut().seek_relative(2).await.is_ok());
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[2, 3][..]);
+ });
+}
+
+#[test]
+fn test_buffered_reader_invalidated_after_read() {
+ block_on(async {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let reader = BufReader::with_capacity(3, Cursor::new(inner));
+ pin_mut!(reader);
+
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[5, 6, 7][..]);
+ reader.as_mut().consume(3);
+
+ let mut buffer = [0, 0, 0, 0, 0];
+ assert_eq!(reader.read(&mut buffer).await.unwrap(), 5);
+ assert_eq!(buffer, [0, 1, 2, 3, 4]);
+
+ assert!(reader.as_mut().seek_relative(-2).await.is_ok());
+ let mut buffer = [0, 0];
+ assert_eq!(reader.read(&mut buffer).await.unwrap(), 2);
+ assert_eq!(buffer, [3, 4]);
+ });
+}
+
+#[test]
+fn test_buffered_reader_invalidated_after_seek() {
+ block_on(async {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let reader = BufReader::with_capacity(3, Cursor::new(inner));
+ pin_mut!(reader);
+
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[5, 6, 7][..]);
+ reader.as_mut().consume(3);
+
+ assert!(reader.seek(SeekFrom::Current(5)).await.is_ok());
+
+ assert!(reader.as_mut().seek_relative(-2).await.is_ok());
+ let mut buffer = [0, 0];
+ assert_eq!(reader.read(&mut buffer).await.unwrap(), 2);
+ assert_eq!(buffer, [3, 4]);
+ });
+}
+
+#[test]
+fn test_buffered_reader_seek_underflow() {
+ // gimmick reader that yields its position modulo 256 for each byte
+ struct PositionReader {
+ pos: u64,
+ }
+ impl io::Read for PositionReader {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let len = buf.len();
+ for x in buf {
+ *x = self.pos as u8;
+ self.pos = self.pos.wrapping_add(1);
+ }
+ Ok(len)
+ }
+ }
+ impl io::Seek for PositionReader {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ match pos {
+ SeekFrom::Start(n) => {
+ self.pos = n;
+ }
+ SeekFrom::Current(n) => {
+ self.pos = self.pos.wrapping_add(n as u64);
+ }
+ SeekFrom::End(n) => {
+ self.pos = u64::MAX.wrapping_add(n as u64);
+ }
+ }
+ Ok(self.pos)
+ }
+ }
+
+ block_on(async {
+ let reader = BufReader::with_capacity(5, AllowStdIo::new(PositionReader { pos: 0 }));
+ pin_mut!(reader);
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1, 2, 3, 4][..]);
+ assert_eq!(reader.seek(SeekFrom::End(-5)).await.unwrap(), u64::MAX - 5);
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap().len(), 5);
+ // the following seek will require two underlying seeks
+ let expected = 9_223_372_036_854_775_802;
+ assert_eq!(reader.seek(SeekFrom::Current(i64::MIN)).await.unwrap(), expected);
+ assert_eq!(reader.as_mut().fill_buf().await.unwrap().len(), 5);
+ // seeking to 0 should empty the buffer.
+ assert_eq!(reader.seek(SeekFrom::Current(0)).await.unwrap(), expected);
+ assert_eq!(reader.get_ref().get_ref().pos, expected);
+ });
+}
+
+#[test]
+fn test_short_reads() {
+ /// A dummy reader intended at testing short-reads propagation.
+ struct ShortReader {
+ lengths: Vec<usize>,
+ }
+
+ impl io::Read for ShortReader {
+ fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
+ if self.lengths.is_empty() {
+ Ok(0)
+ } else {
+ Ok(self.lengths.remove(0))
+ }
+ }
+ }
+
+ block_on(async {
+ let inner = ShortReader { lengths: vec![0, 1, 2, 0, 1, 0] };
+ let mut reader = BufReader::new(AllowStdIo::new(inner));
+ let mut buf = [0, 0];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 2);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ });
+}
+
+#[test]
+fn maybe_pending() {
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let mut reader = BufReader::with_capacity(2, MaybePending::new(inner));
+
+ let mut buf = [0, 0, 0];
+ let nread = run(reader.read(&mut buf));
+ assert_eq!(nread.unwrap(), 3);
+ assert_eq!(buf, [5, 6, 7]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0, 0];
+ let nread = run(reader.read(&mut buf));
+ assert_eq!(nread.unwrap(), 2);
+ assert_eq!(buf, [0, 1]);
+ assert_eq!(reader.buffer(), []);
+
+ let mut buf = [0];
+ let nread = run(reader.read(&mut buf));
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [2]);
+ assert_eq!(reader.buffer(), [3]);
+
+ let mut buf = [0, 0, 0];
+ let nread = run(reader.read(&mut buf));
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [3, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ let nread = run(reader.read(&mut buf));
+ assert_eq!(nread.unwrap(), 1);
+ assert_eq!(buf, [4, 0, 0]);
+ assert_eq!(reader.buffer(), []);
+
+ assert_eq!(run(reader.read(&mut buf)).unwrap(), 0);
+}
+
+#[test]
+fn maybe_pending_buf_read() {
+ let inner = MaybePending::new(&[0, 1, 2, 3, 1, 0]);
+ let mut reader = BufReader::with_capacity(2, inner);
+ let mut v = Vec::new();
+ run(reader.read_until(3, &mut v)).unwrap();
+ assert_eq!(v, [0, 1, 2, 3]);
+ v.clear();
+ run(reader.read_until(1, &mut v)).unwrap();
+ assert_eq!(v, [1]);
+ v.clear();
+ run(reader.read_until(8, &mut v)).unwrap();
+ assert_eq!(v, [0]);
+ v.clear();
+ run(reader.read_until(9, &mut v)).unwrap();
+ assert_eq!(v, []);
+}
+
+// https://github.com/rust-lang/futures-rs/pull/1573#discussion_r281162309
+#[test]
+fn maybe_pending_seek() {
+ #[pin_project]
+ struct MaybePendingSeek<'a> {
+ #[pin]
+ inner: Cursor<&'a [u8]>,
+ ready: bool,
+ }
+
+ impl<'a> MaybePendingSeek<'a> {
+ fn new(inner: &'a [u8]) -> Self {
+ Self { inner: Cursor::new(inner), ready: true }
+ }
+ }
+
+ impl AsyncRead for MaybePendingSeek<'_> {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.project().inner.poll_read(cx, buf)
+ }
+ }
+
+ impl AsyncBufRead for MaybePendingSeek<'_> {
+ fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
+ self.project().inner.poll_fill_buf(cx)
+ }
+
+ fn consume(self: Pin<&mut Self>, amt: usize) {
+ self.project().inner.consume(amt)
+ }
+ }
+
+ impl AsyncSeek for MaybePendingSeek<'_> {
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ if self.ready {
+ *self.as_mut().project().ready = false;
+ self.project().inner.poll_seek(cx, pos)
+ } else {
+ *self.project().ready = true;
+ Poll::Pending
+ }
+ }
+ }
+
+ let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
+ let reader = BufReader::with_capacity(2, MaybePendingSeek::new(inner));
+ pin_mut!(reader);
+
+ assert_eq!(run(reader.seek(SeekFrom::Current(3))).ok(), Some(3));
+ assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[0, 1][..]));
+ assert_eq!(run(reader.seek(SeekFrom::Current(i64::MIN))).ok(), None);
+ assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[0, 1][..]));
+ assert_eq!(run(reader.seek(SeekFrom::Current(1))).ok(), Some(4));
+ assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[1, 2][..]));
+ Pin::new(&mut reader).consume(1);
+ assert_eq!(run(reader.seek(SeekFrom::Current(-2))).ok(), Some(3));
+}
diff --git a/vendor/futures/tests/io_buf_writer.rs b/vendor/futures/tests/io_buf_writer.rs
new file mode 100644
index 000000000..b264cd54c
--- /dev/null
+++ b/vendor/futures/tests/io_buf_writer.rs
@@ -0,0 +1,239 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{
+ AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufWriter, Cursor, SeekFrom,
+};
+use futures::task::{Context, Poll};
+use futures_test::task::noop_context;
+use std::io;
+use std::pin::Pin;
+
+struct MaybePending {
+ inner: Vec<u8>,
+ ready: bool,
+}
+
+impl MaybePending {
+ fn new(inner: Vec<u8>) -> Self {
+ Self { inner, ready: false }
+ }
+}
+
+impl AsyncWrite for MaybePending {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.ready {
+ self.ready = false;
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ } else {
+ self.ready = true;
+ Poll::Pending
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_close(cx)
+ }
+}
+
+fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+}
+
+#[test]
+fn buf_writer() {
+ let mut writer = BufWriter::with_capacity(2, Vec::new());
+
+ block_on(writer.write(&[0, 1])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ block_on(writer.write(&[2])).unwrap();
+ assert_eq!(writer.buffer(), [2]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ block_on(writer.write(&[3])).unwrap();
+ assert_eq!(writer.buffer(), [2, 3]);
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ block_on(writer.flush()).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ block_on(writer.write(&[4])).unwrap();
+ block_on(writer.write(&[5])).unwrap();
+ assert_eq!(writer.buffer(), [4, 5]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
+
+ block_on(writer.write(&[6])).unwrap();
+ assert_eq!(writer.buffer(), [6]);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
+
+ block_on(writer.write(&[7, 8])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ block_on(writer.write(&[9, 10, 11])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+
+ block_on(writer.flush()).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+}
+
+#[test]
+fn buf_writer_inner_flushes() {
+ let mut w = BufWriter::with_capacity(3, Vec::new());
+ block_on(w.write(&[0, 1])).unwrap();
+ assert_eq!(*w.get_ref(), []);
+ block_on(w.flush()).unwrap();
+ let w = w.into_inner();
+ assert_eq!(w, [0, 1]);
+}
+
+#[test]
+fn buf_writer_seek() {
+ // FIXME: when https://github.com/rust-lang/futures-rs/issues/1510 fixed,
+ // use `Vec::new` instead of `vec![0; 8]`.
+ let mut w = BufWriter::with_capacity(3, Cursor::new(vec![0; 8]));
+ block_on(w.write_all(&[0, 1, 2, 3, 4, 5])).unwrap();
+ block_on(w.write_all(&[6, 7])).unwrap();
+ assert_eq!(block_on(w.seek(SeekFrom::Current(0))).ok(), Some(8));
+ assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
+ assert_eq!(block_on(w.seek(SeekFrom::Start(2))).ok(), Some(2));
+ block_on(w.write_all(&[8, 9])).unwrap();
+ block_on(w.flush()).unwrap();
+ assert_eq!(&w.into_inner().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
+}
+
+#[test]
+fn maybe_pending_buf_writer() {
+ let mut writer = BufWriter::with_capacity(2, MaybePending::new(Vec::new()));
+
+ run(writer.write(&[0, 1])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ run(writer.write(&[2])).unwrap();
+ assert_eq!(writer.buffer(), [2]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ run(writer.write(&[3])).unwrap();
+ assert_eq!(writer.buffer(), [2, 3]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1]);
+
+ run(writer.flush()).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
+
+ run(writer.write(&[4])).unwrap();
+ run(writer.write(&[5])).unwrap();
+ assert_eq!(writer.buffer(), [4, 5]);
+ assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
+
+ run(writer.write(&[6])).unwrap();
+ assert_eq!(writer.buffer(), [6]);
+ assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5]);
+
+ run(writer.write(&[7, 8])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ run(writer.write(&[9, 10, 11])).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+
+ run(writer.flush()).unwrap();
+ assert_eq!(writer.buffer(), []);
+ assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+}
+
+#[test]
+fn maybe_pending_buf_writer_inner_flushes() {
+ let mut w = BufWriter::with_capacity(3, MaybePending::new(Vec::new()));
+ run(w.write(&[0, 1])).unwrap();
+ assert_eq!(&w.get_ref().inner, &[]);
+ run(w.flush()).unwrap();
+ let w = w.into_inner().inner;
+ assert_eq!(w, [0, 1]);
+}
+
+#[test]
+fn maybe_pending_buf_writer_seek() {
+ struct MaybePendingSeek {
+ inner: Cursor<Vec<u8>>,
+ ready_write: bool,
+ ready_seek: bool,
+ }
+
+ impl MaybePendingSeek {
+ fn new(inner: Vec<u8>) -> Self {
+ Self { inner: Cursor::new(inner), ready_write: false, ready_seek: false }
+ }
+ }
+
+ impl AsyncWrite for MaybePendingSeek {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ if self.ready_write {
+ self.ready_write = false;
+ Pin::new(&mut self.inner).poll_write(cx, buf)
+ } else {
+ self.ready_write = true;
+ Poll::Pending
+ }
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_flush(cx)
+ }
+
+ fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ Pin::new(&mut self.inner).poll_close(cx)
+ }
+ }
+
+ impl AsyncSeek for MaybePendingSeek {
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ if self.ready_seek {
+ self.ready_seek = false;
+ Pin::new(&mut self.inner).poll_seek(cx, pos)
+ } else {
+ self.ready_seek = true;
+ Poll::Pending
+ }
+ }
+ }
+
+ // FIXME: when https://github.com/rust-lang/futures-rs/issues/1510 fixed,
+ // use `Vec::new` instead of `vec![0; 8]`.
+ let mut w = BufWriter::with_capacity(3, MaybePendingSeek::new(vec![0; 8]));
+ run(w.write_all(&[0, 1, 2, 3, 4, 5])).unwrap();
+ run(w.write_all(&[6, 7])).unwrap();
+ assert_eq!(run(w.seek(SeekFrom::Current(0))).ok(), Some(8));
+ assert_eq!(&w.get_ref().inner.get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
+ assert_eq!(run(w.seek(SeekFrom::Start(2))).ok(), Some(2));
+ run(w.write_all(&[8, 9])).unwrap();
+ run(w.flush()).unwrap();
+ assert_eq!(&w.into_inner().inner.into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
+}
diff --git a/vendor/futures/tests/io_cursor.rs b/vendor/futures/tests/io_cursor.rs
new file mode 100644
index 000000000..435ea5a15
--- /dev/null
+++ b/vendor/futures/tests/io_cursor.rs
@@ -0,0 +1,30 @@
+use assert_matches::assert_matches;
+use futures::executor::block_on;
+use futures::future::lazy;
+use futures::io::{AsyncWrite, Cursor};
+use futures::task::Poll;
+use std::pin::Pin;
+
+#[test]
+fn cursor_asyncwrite_vec() {
+ let mut cursor = Cursor::new(vec![0; 5]);
+ block_on(lazy(|cx| {
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[1, 2]), Poll::Ready(Ok(2)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[3, 4]), Poll::Ready(Ok(2)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[5, 6]), Poll::Ready(Ok(2)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[6, 7]), Poll::Ready(Ok(2)));
+ }));
+ assert_eq!(cursor.into_inner(), [1, 2, 3, 4, 5, 6, 6, 7]);
+}
+
+#[test]
+fn cursor_asyncwrite_box() {
+ let mut cursor = Cursor::new(vec![0; 5].into_boxed_slice());
+ block_on(lazy(|cx| {
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[1, 2]), Poll::Ready(Ok(2)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[3, 4]), Poll::Ready(Ok(2)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[5, 6]), Poll::Ready(Ok(1)));
+ assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[6, 7]), Poll::Ready(Ok(0)));
+ }));
+ assert_eq!(&*cursor.into_inner(), [1, 2, 3, 4, 5]);
+}
diff --git a/vendor/futures/tests/io_line_writer.rs b/vendor/futures/tests/io_line_writer.rs
new file mode 100644
index 000000000..b483e0ff7
--- /dev/null
+++ b/vendor/futures/tests/io_line_writer.rs
@@ -0,0 +1,73 @@
+use futures::executor::block_on;
+use futures::io::{AsyncWriteExt, LineWriter};
+use std::io;
+
+#[test]
+fn line_writer() {
+ let mut writer = LineWriter::new(Vec::new());
+
+ block_on(writer.write(&[0])).unwrap();
+ assert_eq!(*writer.get_ref(), []);
+
+ block_on(writer.write(&[1])).unwrap();
+ assert_eq!(*writer.get_ref(), []);
+
+ block_on(writer.flush()).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1]);
+
+ block_on(writer.write(&[0, b'\n', 1, b'\n', 2])).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']);
+
+ block_on(writer.flush()).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]);
+
+ block_on(writer.write(&[3, b'\n'])).unwrap();
+ assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']);
+}
+
+#[test]
+fn line_vectored() {
+ let mut line_writer = LineWriter::new(Vec::new());
+ assert_eq!(
+ block_on(line_writer.write_vectored(&[
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(b"\n"),
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(b"a"),
+ ]))
+ .unwrap(),
+ 2
+ );
+ assert_eq!(line_writer.get_ref(), b"\n");
+
+ assert_eq!(
+ block_on(line_writer.write_vectored(&[
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(b"b"),
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(b"a"),
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(b"c"),
+ ]))
+ .unwrap(),
+ 3
+ );
+ assert_eq!(line_writer.get_ref(), b"\n");
+ block_on(line_writer.flush()).unwrap();
+ assert_eq!(line_writer.get_ref(), b"\nabac");
+ assert_eq!(block_on(line_writer.write_vectored(&[])).unwrap(), 0);
+
+ assert_eq!(
+ block_on(line_writer.write_vectored(&[
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(&[]),
+ io::IoSlice::new(&[]),
+ ]))
+ .unwrap(),
+ 0
+ );
+
+ assert_eq!(block_on(line_writer.write_vectored(&[io::IoSlice::new(b"a\nb")])).unwrap(), 3);
+ assert_eq!(line_writer.get_ref(), b"\nabaca\nb");
+}
diff --git a/vendor/futures/tests/io_lines.rs b/vendor/futures/tests/io_lines.rs
new file mode 100644
index 000000000..5ce01a694
--- /dev/null
+++ b/vendor/futures/tests/io_lines.rs
@@ -0,0 +1,60 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{AsyncBufReadExt, Cursor};
+use futures::stream::{self, StreamExt, TryStreamExt};
+use futures::task::Poll;
+use futures_test::io::AsyncReadTestExt;
+use futures_test::task::noop_context;
+
+fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+}
+
+macro_rules! block_on_next {
+ ($expr:expr) => {
+ block_on($expr.next()).unwrap().unwrap()
+ };
+}
+
+macro_rules! run_next {
+ ($expr:expr) => {
+ run($expr.next()).unwrap().unwrap()
+ };
+}
+
+#[test]
+fn lines() {
+ let buf = Cursor::new(&b"12\r"[..]);
+ let mut s = buf.lines();
+ assert_eq!(block_on_next!(s), "12\r".to_string());
+ assert!(block_on(s.next()).is_none());
+
+ let buf = Cursor::new(&b"12\r\n\n"[..]);
+ let mut s = buf.lines();
+ assert_eq!(block_on_next!(s), "12".to_string());
+ assert_eq!(block_on_next!(s), "".to_string());
+ assert!(block_on(s.next()).is_none());
+}
+
+#[test]
+fn maybe_pending() {
+ let buf =
+ stream::iter(vec![&b"12"[..], &b"\r"[..]]).map(Ok).into_async_read().interleave_pending();
+ let mut s = buf.lines();
+ assert_eq!(run_next!(s), "12\r".to_string());
+ assert!(run(s.next()).is_none());
+
+ let buf = stream::iter(vec![&b"12"[..], &b"\r\n"[..], &b"\n"[..]])
+ .map(Ok)
+ .into_async_read()
+ .interleave_pending();
+ let mut s = buf.lines();
+ assert_eq!(run_next!(s), "12".to_string());
+ assert_eq!(run_next!(s), "".to_string());
+ assert!(run(s.next()).is_none());
+}
diff --git a/vendor/futures/tests/io_read.rs b/vendor/futures/tests/io_read.rs
new file mode 100644
index 000000000..d39a6ea79
--- /dev/null
+++ b/vendor/futures/tests/io_read.rs
@@ -0,0 +1,64 @@
+use futures::io::AsyncRead;
+use futures_test::task::panic_context;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+struct MockReader {
+ fun: Box<dyn FnMut(&mut [u8]) -> Poll<io::Result<usize>>>,
+}
+
+impl MockReader {
+ fn new(fun: impl FnMut(&mut [u8]) -> Poll<io::Result<usize>> + 'static) -> Self {
+ Self { fun: Box::new(fun) }
+ }
+}
+
+impl AsyncRead for MockReader {
+ fn poll_read(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ (self.get_mut().fun)(buf)
+ }
+}
+
+/// Verifies that the default implementation of `poll_read_vectored`
+/// calls `poll_read` with an empty slice if no buffers are provided.
+#[test]
+fn read_vectored_no_buffers() {
+ let mut reader = MockReader::new(|buf| {
+ assert_eq!(buf, b"");
+ Err(io::ErrorKind::BrokenPipe.into()).into()
+ });
+ let cx = &mut panic_context();
+ let bufs = &mut [];
+
+ let res = Pin::new(&mut reader).poll_read_vectored(cx, bufs);
+ let res = res.map_err(|e| e.kind());
+ assert_eq!(res, Poll::Ready(Err(io::ErrorKind::BrokenPipe)))
+}
+
+/// Verifies that the default implementation of `poll_read_vectored`
+/// calls `poll_read` with the first non-empty buffer.
+#[test]
+fn read_vectored_first_non_empty() {
+ let mut reader = MockReader::new(|buf| {
+ assert_eq!(buf.len(), 4);
+ buf.copy_from_slice(b"four");
+ Poll::Ready(Ok(4))
+ });
+ let cx = &mut panic_context();
+ let mut buf = [0; 4];
+ let bufs = &mut [
+ io::IoSliceMut::new(&mut []),
+ io::IoSliceMut::new(&mut []),
+ io::IoSliceMut::new(&mut buf),
+ ];
+
+ let res = Pin::new(&mut reader).poll_read_vectored(cx, bufs);
+ let res = res.map_err(|e| e.kind());
+ assert_eq!(res, Poll::Ready(Ok(4)));
+ assert_eq!(buf, b"four"[..]);
+}
diff --git a/vendor/futures/tests/io_read_exact.rs b/vendor/futures/tests/io_read_exact.rs
new file mode 100644
index 000000000..6582e50b8
--- /dev/null
+++ b/vendor/futures/tests/io_read_exact.rs
@@ -0,0 +1,17 @@
+use futures::executor::block_on;
+use futures::io::AsyncReadExt;
+
+#[test]
+fn read_exact() {
+ let mut reader: &[u8] = &[1, 2, 3, 4, 5];
+ let mut out = [0u8; 3];
+
+ let res = block_on(reader.read_exact(&mut out)); // read 3 bytes out
+ assert!(res.is_ok());
+ assert_eq!(out, [1, 2, 3]);
+ assert_eq!(reader.len(), 2);
+
+ let res = block_on(reader.read_exact(&mut out)); // read another 3 bytes, but only 2 bytes left
+ assert!(res.is_err());
+ assert_eq!(reader.len(), 0);
+}
diff --git a/vendor/futures/tests/io_read_line.rs b/vendor/futures/tests/io_read_line.rs
new file mode 100644
index 000000000..88a877928
--- /dev/null
+++ b/vendor/futures/tests/io_read_line.rs
@@ -0,0 +1,58 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{AsyncBufReadExt, Cursor};
+use futures::stream::{self, StreamExt, TryStreamExt};
+use futures::task::Poll;
+use futures_test::io::AsyncReadTestExt;
+use futures_test::task::noop_context;
+
+fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+}
+
+#[test]
+fn read_line() {
+ let mut buf = Cursor::new(b"12");
+ let mut v = String::new();
+ assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 2);
+ assert_eq!(v, "12");
+
+ let mut buf = Cursor::new(b"12\n\n");
+ let mut v = String::new();
+ assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 3);
+ assert_eq!(v, "12\n");
+ v.clear();
+ assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 1);
+ assert_eq!(v, "\n");
+ v.clear();
+ assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 0);
+ assert_eq!(v, "");
+}
+
+#[test]
+fn maybe_pending() {
+ let mut buf = b"12".interleave_pending();
+ let mut v = String::new();
+ assert_eq!(run(buf.read_line(&mut v)).unwrap(), 2);
+ assert_eq!(v, "12");
+
+ let mut buf =
+ stream::iter(vec![&b"12"[..], &b"\n\n"[..]]).map(Ok).into_async_read().interleave_pending();
+ let mut v = String::new();
+ assert_eq!(run(buf.read_line(&mut v)).unwrap(), 3);
+ assert_eq!(v, "12\n");
+ v.clear();
+ assert_eq!(run(buf.read_line(&mut v)).unwrap(), 1);
+ assert_eq!(v, "\n");
+ v.clear();
+ assert_eq!(run(buf.read_line(&mut v)).unwrap(), 0);
+ assert_eq!(v, "");
+ v.clear();
+ assert_eq!(run(buf.read_line(&mut v)).unwrap(), 0);
+ assert_eq!(v, "");
+}
diff --git a/vendor/futures/tests/io_read_to_end.rs b/vendor/futures/tests/io_read_to_end.rs
new file mode 100644
index 000000000..7122511fc
--- /dev/null
+++ b/vendor/futures/tests/io_read_to_end.rs
@@ -0,0 +1,65 @@
+use futures::{
+ executor::block_on,
+ io::{self, AsyncRead, AsyncReadExt},
+ task::{Context, Poll},
+};
+use std::pin::Pin;
+
+#[test]
+#[should_panic(expected = "assertion failed: n <= buf.len()")]
+fn issue2310() {
+ struct MyRead {
+ first: bool,
+ }
+
+ impl MyRead {
+ fn new() -> Self {
+ MyRead { first: false }
+ }
+ }
+
+ impl AsyncRead for MyRead {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context,
+ _buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ Poll::Ready(if !self.first {
+ self.first = true;
+ // First iteration: return more than the buffer size
+ Ok(64)
+ } else {
+ // Second iteration: indicate that we are done
+ Ok(0)
+ })
+ }
+ }
+
+ struct VecWrapper {
+ inner: Vec<u8>,
+ }
+
+ impl VecWrapper {
+ fn new() -> Self {
+ VecWrapper { inner: Vec::new() }
+ }
+ }
+
+ impl Drop for VecWrapper {
+ fn drop(&mut self) {
+ // Observe uninitialized bytes
+ println!("{:?}", &self.inner);
+ // Overwrite heap contents
+ for b in &mut self.inner {
+ *b = 0x90;
+ }
+ }
+ }
+
+ block_on(async {
+ let mut vec = VecWrapper::new();
+ let mut read = MyRead::new();
+
+ read.read_to_end(&mut vec.inner).await.unwrap();
+ })
+}
diff --git a/vendor/futures/tests/io_read_to_string.rs b/vendor/futures/tests/io_read_to_string.rs
new file mode 100644
index 000000000..ae6aaa21d
--- /dev/null
+++ b/vendor/futures/tests/io_read_to_string.rs
@@ -0,0 +1,44 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{AsyncReadExt, Cursor};
+use futures::stream::{self, StreamExt, TryStreamExt};
+use futures::task::Poll;
+use futures_test::io::AsyncReadTestExt;
+use futures_test::task::noop_context;
+
+#[test]
+fn read_to_string() {
+ let mut c = Cursor::new(&b""[..]);
+ let mut v = String::new();
+ assert_eq!(block_on(c.read_to_string(&mut v)).unwrap(), 0);
+ assert_eq!(v, "");
+
+ let mut c = Cursor::new(&b"1"[..]);
+ let mut v = String::new();
+ assert_eq!(block_on(c.read_to_string(&mut v)).unwrap(), 1);
+ assert_eq!(v, "1");
+
+ let mut c = Cursor::new(&b"\xff"[..]);
+ let mut v = String::new();
+ assert!(block_on(c.read_to_string(&mut v)).is_err());
+}
+
+#[test]
+fn interleave_pending() {
+ fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+ }
+ let mut buf = stream::iter(vec![&b"12"[..], &b"33"[..], &b"3"[..]])
+ .map(Ok)
+ .into_async_read()
+ .interleave_pending();
+
+ let mut v = String::new();
+ assert_eq!(run(buf.read_to_string(&mut v)).unwrap(), 5);
+ assert_eq!(v, "12333");
+}
diff --git a/vendor/futures/tests/io_read_until.rs b/vendor/futures/tests/io_read_until.rs
new file mode 100644
index 000000000..71f857f4b
--- /dev/null
+++ b/vendor/futures/tests/io_read_until.rs
@@ -0,0 +1,60 @@
+use futures::executor::block_on;
+use futures::future::{Future, FutureExt};
+use futures::io::{AsyncBufReadExt, Cursor};
+use futures::stream::{self, StreamExt, TryStreamExt};
+use futures::task::Poll;
+use futures_test::io::AsyncReadTestExt;
+use futures_test::task::noop_context;
+
+fn run<F: Future + Unpin>(mut f: F) -> F::Output {
+ let mut cx = noop_context();
+ loop {
+ if let Poll::Ready(x) = f.poll_unpin(&mut cx) {
+ return x;
+ }
+ }
+}
+
+#[test]
+fn read_until() {
+ let mut buf = Cursor::new(b"12");
+ let mut v = Vec::new();
+ assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 2);
+ assert_eq!(v, b"12");
+
+ let mut buf = Cursor::new(b"1233");
+ let mut v = Vec::new();
+ assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 3);
+ assert_eq!(v, b"123");
+ v.truncate(0);
+ assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 1);
+ assert_eq!(v, b"3");
+ v.truncate(0);
+ assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 0);
+ assert_eq!(v, []);
+}
+
+#[test]
+fn maybe_pending() {
+ let mut buf = b"12".interleave_pending();
+ let mut v = Vec::new();
+ assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 2);
+ assert_eq!(v, b"12");
+
+ let mut buf = stream::iter(vec![&b"12"[..], &b"33"[..], &b"3"[..]])
+ .map(Ok)
+ .into_async_read()
+ .interleave_pending();
+ let mut v = Vec::new();
+ assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 3);
+ assert_eq!(v, b"123");
+ v.clear();
+ assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 1);
+ assert_eq!(v, b"3");
+ v.clear();
+ assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 1);
+ assert_eq!(v, b"3");
+ v.clear();
+ assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 0);
+ assert_eq!(v, []);
+}
diff --git a/vendor/futures/tests/io_window.rs b/vendor/futures/tests/io_window.rs
new file mode 100644
index 000000000..8f0d48bc9
--- /dev/null
+++ b/vendor/futures/tests/io_window.rs
@@ -0,0 +1,30 @@
+#![allow(clippy::reversed_empty_ranges)] // This is intentional.
+
+use futures::io::Window;
+
+#[test]
+fn set() {
+ let mut buffer = Window::new(&[1, 2, 3]);
+ buffer.set(..3);
+ assert_eq!(buffer.as_ref(), &[1, 2, 3]);
+ buffer.set(3..3);
+ assert_eq!(buffer.as_ref(), &[]);
+ buffer.set(3..=2); // == 3..3
+ assert_eq!(buffer.as_ref(), &[]);
+ buffer.set(0..2);
+ assert_eq!(buffer.as_ref(), &[1, 2]);
+}
+
+#[test]
+#[should_panic]
+fn set_panic_out_of_bounds() {
+ let mut buffer = Window::new(&[1, 2, 3]);
+ buffer.set(2..4);
+}
+
+#[test]
+#[should_panic]
+fn set_panic_start_is_greater_than_end() {
+ let mut buffer = Window::new(&[1, 2, 3]);
+ buffer.set(3..2);
+}
diff --git a/vendor/futures/tests/io_write.rs b/vendor/futures/tests/io_write.rs
new file mode 100644
index 000000000..6af27553c
--- /dev/null
+++ b/vendor/futures/tests/io_write.rs
@@ -0,0 +1,65 @@
+use futures::io::AsyncWrite;
+use futures_test::task::panic_context;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+struct MockWriter {
+ fun: Box<dyn FnMut(&[u8]) -> Poll<io::Result<usize>>>,
+}
+
+impl MockWriter {
+ fn new(fun: impl FnMut(&[u8]) -> Poll<io::Result<usize>> + 'static) -> Self {
+ Self { fun: Box::new(fun) }
+ }
+}
+
+impl AsyncWrite for MockWriter {
+ fn poll_write(
+ self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<io::Result<usize>> {
+ (self.get_mut().fun)(buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ panic!()
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
+ panic!()
+ }
+}
+
+/// Verifies that the default implementation of `poll_write_vectored`
+/// calls `poll_write` with an empty slice if no buffers are provided.
+#[test]
+fn write_vectored_no_buffers() {
+ let mut writer = MockWriter::new(|buf| {
+ assert_eq!(buf, b"");
+ Err(io::ErrorKind::BrokenPipe.into()).into()
+ });
+ let cx = &mut panic_context();
+ let bufs = &mut [];
+
+ let res = Pin::new(&mut writer).poll_write_vectored(cx, bufs);
+ let res = res.map_err(|e| e.kind());
+ assert_eq!(res, Poll::Ready(Err(io::ErrorKind::BrokenPipe)))
+}
+
+/// Verifies that the default implementation of `poll_write_vectored`
+/// calls `poll_write` with the first non-empty buffer.
+#[test]
+fn write_vectored_first_non_empty() {
+ let mut writer = MockWriter::new(|buf| {
+ assert_eq!(buf, b"four");
+ Poll::Ready(Ok(4))
+ });
+ let cx = &mut panic_context();
+ let bufs = &mut [io::IoSlice::new(&[]), io::IoSlice::new(&[]), io::IoSlice::new(b"four")];
+
+ let res = Pin::new(&mut writer).poll_write_vectored(cx, bufs);
+ let res = res.map_err(|e| e.kind());
+ assert_eq!(res, Poll::Ready(Ok(4)));
+}
diff --git a/vendor/futures/tests/lock_mutex.rs b/vendor/futures/tests/lock_mutex.rs
new file mode 100644
index 000000000..7c33864c7
--- /dev/null
+++ b/vendor/futures/tests/lock_mutex.rs
@@ -0,0 +1,66 @@
+use futures::channel::mpsc;
+use futures::executor::{block_on, ThreadPool};
+use futures::future::{ready, FutureExt};
+use futures::lock::Mutex;
+use futures::stream::StreamExt;
+use futures::task::{Context, SpawnExt};
+use futures_test::future::FutureTestExt;
+use futures_test::task::{new_count_waker, panic_context};
+use std::sync::Arc;
+
+#[test]
+fn mutex_acquire_uncontested() {
+ let mutex = Mutex::new(());
+ for _ in 0..10 {
+ assert!(mutex.lock().poll_unpin(&mut panic_context()).is_ready());
+ }
+}
+
+#[test]
+fn mutex_wakes_waiters() {
+ let mutex = Mutex::new(());
+ let (waker, counter) = new_count_waker();
+ let lock = mutex.lock().poll_unpin(&mut panic_context());
+ assert!(lock.is_ready());
+
+ let mut cx = Context::from_waker(&waker);
+ let mut waiter = mutex.lock();
+ assert!(waiter.poll_unpin(&mut cx).is_pending());
+ assert_eq!(counter, 0);
+
+ drop(lock);
+
+ assert_eq!(counter, 1);
+ assert!(waiter.poll_unpin(&mut panic_context()).is_ready());
+}
+
+#[test]
+fn mutex_contested() {
+ let (tx, mut rx) = mpsc::unbounded();
+ let pool = ThreadPool::builder().pool_size(16).create().unwrap();
+
+ let tx = Arc::new(tx);
+ let mutex = Arc::new(Mutex::new(0));
+
+ let num_tasks = 1000;
+ for _ in 0..num_tasks {
+ let tx = tx.clone();
+ let mutex = mutex.clone();
+ pool.spawn(async move {
+ let mut lock = mutex.lock().await;
+ ready(()).pending_once().await;
+ *lock += 1;
+ tx.unbounded_send(()).unwrap();
+ drop(lock);
+ })
+ .unwrap();
+ }
+
+ block_on(async {
+ for _ in 0..num_tasks {
+ rx.next().await.unwrap();
+ }
+ let lock = mutex.lock().await;
+ assert_eq!(num_tasks, *lock);
+ })
+}
diff --git a/vendor/futures/tests/macro_comma_support.rs b/vendor/futures/tests/macro_comma_support.rs
new file mode 100644
index 000000000..85871e98b
--- /dev/null
+++ b/vendor/futures/tests/macro_comma_support.rs
@@ -0,0 +1,43 @@
+use futures::{
+ executor::block_on,
+ future::{self, FutureExt},
+ join, ready,
+ task::Poll,
+ try_join,
+};
+
+#[test]
+fn ready() {
+ block_on(future::poll_fn(|_| {
+ ready!(Poll::Ready(()),);
+ Poll::Ready(())
+ }))
+}
+
+#[test]
+fn poll() {
+ use futures::poll;
+
+ block_on(async {
+ let _ = poll!(async {}.boxed(),);
+ })
+}
+
+#[test]
+fn join() {
+ block_on(async {
+ let future1 = async { 1 };
+ let future2 = async { 2 };
+ join!(future1, future2,);
+ })
+}
+
+#[test]
+fn try_join() {
+ block_on(async {
+ let future1 = async { 1 }.never_error();
+ let future2 = async { 2 }.never_error();
+ try_join!(future1, future2,)
+ })
+ .unwrap();
+}
diff --git a/vendor/futures/tests/object_safety.rs b/vendor/futures/tests/object_safety.rs
new file mode 100644
index 000000000..30c892f5e
--- /dev/null
+++ b/vendor/futures/tests/object_safety.rs
@@ -0,0 +1,49 @@
+fn assert_is_object_safe<T>() {}
+
+#[test]
+fn future() {
+ // `FutureExt`, `TryFutureExt` and `UnsafeFutureObj` are not object safe.
+ use futures::future::{FusedFuture, Future, TryFuture};
+
+ assert_is_object_safe::<&dyn Future<Output = ()>>();
+ assert_is_object_safe::<&dyn FusedFuture<Output = ()>>();
+ assert_is_object_safe::<&dyn TryFuture<Ok = (), Error = (), Output = Result<(), ()>>>();
+}
+
+#[test]
+fn stream() {
+ // `StreamExt` and `TryStreamExt` are not object safe.
+ use futures::stream::{FusedStream, Stream, TryStream};
+
+ assert_is_object_safe::<&dyn Stream<Item = ()>>();
+ assert_is_object_safe::<&dyn FusedStream<Item = ()>>();
+ assert_is_object_safe::<&dyn TryStream<Ok = (), Error = (), Item = Result<(), ()>>>();
+}
+
+#[test]
+fn sink() {
+ // `SinkExt` is not object safe.
+ use futures::sink::Sink;
+
+ assert_is_object_safe::<&dyn Sink<(), Error = ()>>();
+}
+
+#[test]
+fn io() {
+ // `AsyncReadExt`, `AsyncWriteExt`, `AsyncSeekExt` and `AsyncBufReadExt` are not object safe.
+ use futures::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite};
+
+ assert_is_object_safe::<&dyn AsyncRead>();
+ assert_is_object_safe::<&dyn AsyncWrite>();
+ assert_is_object_safe::<&dyn AsyncSeek>();
+ assert_is_object_safe::<&dyn AsyncBufRead>();
+}
+
+#[test]
+fn task() {
+ // `ArcWake`, `SpawnExt` and `LocalSpawnExt` are not object safe.
+ use futures::task::{LocalSpawn, Spawn};
+
+ assert_is_object_safe::<&dyn Spawn>();
+ assert_is_object_safe::<&dyn LocalSpawn>();
+}
diff --git a/vendor/futures/tests/oneshot.rs b/vendor/futures/tests/oneshot.rs
new file mode 100644
index 000000000..34b78a33f
--- /dev/null
+++ b/vendor/futures/tests/oneshot.rs
@@ -0,0 +1,78 @@
+use futures::channel::oneshot;
+use futures::future::{FutureExt, TryFutureExt};
+use futures_test::future::FutureTestExt;
+use std::sync::mpsc;
+use std::thread;
+
+#[test]
+fn oneshot_send1() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = mpsc::channel();
+
+ let t = thread::spawn(|| tx1.send(1).unwrap());
+ rx1.map_ok(move |x| tx2.send(x)).run_in_background();
+ assert_eq!(1, rx2.recv().unwrap());
+ t.join().unwrap();
+}
+
+#[test]
+fn oneshot_send2() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = mpsc::channel();
+
+ thread::spawn(|| tx1.send(1).unwrap()).join().unwrap();
+ rx1.map_ok(move |x| tx2.send(x).unwrap()).run_in_background();
+ assert_eq!(1, rx2.recv().unwrap());
+}
+
+#[test]
+fn oneshot_send3() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = mpsc::channel();
+
+ rx1.map_ok(move |x| tx2.send(x).unwrap()).run_in_background();
+ thread::spawn(|| tx1.send(1).unwrap()).join().unwrap();
+ assert_eq!(1, rx2.recv().unwrap());
+}
+
+#[test]
+fn oneshot_drop_tx1() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = mpsc::channel();
+
+ drop(tx1);
+ rx1.map(move |result| tx2.send(result).unwrap()).run_in_background();
+
+ assert_eq!(Err(oneshot::Canceled), rx2.recv().unwrap());
+}
+
+#[test]
+fn oneshot_drop_tx2() {
+ let (tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = mpsc::channel();
+
+ let t = thread::spawn(|| drop(tx1));
+ rx1.map(move |result| tx2.send(result).unwrap()).run_in_background();
+ t.join().unwrap();
+
+ assert_eq!(Err(oneshot::Canceled), rx2.recv().unwrap());
+}
+
+#[test]
+fn oneshot_drop_rx() {
+ let (tx, rx) = oneshot::channel::<i32>();
+ drop(rx);
+ assert_eq!(Err(2), tx.send(2));
+}
+
+#[test]
+fn oneshot_debug() {
+ let (tx, rx) = oneshot::channel::<i32>();
+ assert_eq!(format!("{:?}", tx), "Sender { complete: false }");
+ assert_eq!(format!("{:?}", rx), "Receiver { complete: false }");
+ drop(rx);
+ assert_eq!(format!("{:?}", tx), "Sender { complete: true }");
+ let (tx, rx) = oneshot::channel::<i32>();
+ drop(tx);
+ assert_eq!(format!("{:?}", rx), "Receiver { complete: true }");
+}
diff --git a/vendor/futures/tests/ready_queue.rs b/vendor/futures/tests/ready_queue.rs
new file mode 100644
index 000000000..82901327f
--- /dev/null
+++ b/vendor/futures/tests/ready_queue.rs
@@ -0,0 +1,148 @@
+use futures::channel::oneshot;
+use futures::executor::{block_on, block_on_stream};
+use futures::future;
+use futures::stream::{FuturesUnordered, StreamExt};
+use futures::task::Poll;
+use futures_test::task::noop_context;
+use std::panic::{self, AssertUnwindSafe};
+use std::sync::{Arc, Barrier};
+use std::thread;
+
+#[test]
+fn basic_usage() {
+ block_on(future::lazy(move |cx| {
+ let mut queue = FuturesUnordered::new();
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let (tx3, rx3) = oneshot::channel();
+
+ queue.push(rx1);
+ queue.push(rx2);
+ queue.push(rx3);
+
+ assert!(!queue.poll_next_unpin(cx).is_ready());
+
+ tx2.send("hello").unwrap();
+
+ assert_eq!(Poll::Ready(Some(Ok("hello"))), queue.poll_next_unpin(cx));
+ assert!(!queue.poll_next_unpin(cx).is_ready());
+
+ tx1.send("world").unwrap();
+ tx3.send("world2").unwrap();
+
+ assert_eq!(Poll::Ready(Some(Ok("world"))), queue.poll_next_unpin(cx));
+ assert_eq!(Poll::Ready(Some(Ok("world2"))), queue.poll_next_unpin(cx));
+ assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx));
+ }));
+}
+
+#[test]
+fn resolving_errors() {
+ block_on(future::lazy(move |cx| {
+ let mut queue = FuturesUnordered::new();
+ let (tx1, rx1) = oneshot::channel();
+ let (tx2, rx2) = oneshot::channel();
+ let (tx3, rx3) = oneshot::channel();
+
+ queue.push(rx1);
+ queue.push(rx2);
+ queue.push(rx3);
+
+ assert!(!queue.poll_next_unpin(cx).is_ready());
+
+ drop(tx2);
+
+ assert_eq!(Poll::Ready(Some(Err(oneshot::Canceled))), queue.poll_next_unpin(cx));
+ assert!(!queue.poll_next_unpin(cx).is_ready());
+
+ drop(tx1);
+ tx3.send("world2").unwrap();
+
+ assert_eq!(Poll::Ready(Some(Err(oneshot::Canceled))), queue.poll_next_unpin(cx));
+ assert_eq!(Poll::Ready(Some(Ok("world2"))), queue.poll_next_unpin(cx));
+ assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx));
+ }));
+}
+
+#[test]
+fn dropping_ready_queue() {
+ block_on(future::lazy(move |_| {
+ let queue = FuturesUnordered::new();
+ let (mut tx1, rx1) = oneshot::channel::<()>();
+ let (mut tx2, rx2) = oneshot::channel::<()>();
+ let (mut tx3, rx3) = oneshot::channel::<()>();
+
+ queue.push(rx1);
+ queue.push(rx2);
+ queue.push(rx3);
+
+ {
+ let cx = &mut noop_context();
+ assert!(!tx1.poll_canceled(cx).is_ready());
+ assert!(!tx2.poll_canceled(cx).is_ready());
+ assert!(!tx3.poll_canceled(cx).is_ready());
+
+ drop(queue);
+
+ assert!(tx1.poll_canceled(cx).is_ready());
+ assert!(tx2.poll_canceled(cx).is_ready());
+ assert!(tx3.poll_canceled(cx).is_ready());
+ }
+ }));
+}
+
+#[test]
+fn stress() {
+ const ITER: usize = 300;
+
+ for i in 0..ITER {
+ let n = (i % 10) + 1;
+
+ let mut queue = FuturesUnordered::new();
+
+ for _ in 0..5 {
+ let barrier = Arc::new(Barrier::new(n + 1));
+
+ for num in 0..n {
+ let barrier = barrier.clone();
+ let (tx, rx) = oneshot::channel();
+
+ queue.push(rx);
+
+ thread::spawn(move || {
+ barrier.wait();
+ tx.send(num).unwrap();
+ });
+ }
+
+ barrier.wait();
+
+ let mut sync = block_on_stream(queue);
+
+ let mut rx: Vec<_> = (&mut sync).take(n).map(|res| res.unwrap()).collect();
+
+ assert_eq!(rx.len(), n);
+
+ rx.sort_unstable();
+
+ for (i, x) in rx.into_iter().enumerate() {
+ assert_eq!(i, x);
+ }
+
+ queue = sync.into_inner();
+ }
+ }
+}
+
+#[test]
+fn panicking_future_dropped() {
+ block_on(future::lazy(move |cx| {
+ let mut queue = FuturesUnordered::new();
+ queue.push(future::poll_fn(|_| -> Poll<Result<i32, i32>> { panic!() }));
+
+ let r = panic::catch_unwind(AssertUnwindSafe(|| queue.poll_next_unpin(cx)));
+ assert!(r.is_err());
+ assert!(queue.is_empty());
+ assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx));
+ }));
+}
diff --git a/vendor/futures/tests/recurse.rs b/vendor/futures/tests/recurse.rs
new file mode 100644
index 000000000..d81753c9d
--- /dev/null
+++ b/vendor/futures/tests/recurse.rs
@@ -0,0 +1,25 @@
+use futures::executor::block_on;
+use futures::future::{self, BoxFuture, FutureExt};
+use std::sync::mpsc;
+use std::thread;
+
+#[test]
+fn lots() {
+ #[cfg(not(futures_sanitizer))]
+ const N: i32 = 1_000;
+ #[cfg(futures_sanitizer)] // If N is many, asan reports stack-overflow: https://gist.github.com/taiki-e/099446d21cbec69d4acbacf7a9646136
+ const N: i32 = 100;
+
+ fn do_it(input: (i32, i32)) -> BoxFuture<'static, i32> {
+ let (n, x) = input;
+ if n == 0 {
+ future::ready(x).boxed()
+ } else {
+ future::ready((n - 1, x + n)).then(do_it).boxed()
+ }
+ }
+
+ let (tx, rx) = mpsc::channel();
+ thread::spawn(|| block_on(do_it((N, 0)).map(move |x| tx.send(x).unwrap())));
+ assert_eq!((0..=N).sum::<i32>(), rx.recv().unwrap());
+}
diff --git a/vendor/futures/tests/sink.rs b/vendor/futures/tests/sink.rs
new file mode 100644
index 000000000..f3cf11b93
--- /dev/null
+++ b/vendor/futures/tests/sink.rs
@@ -0,0 +1,554 @@
+use futures::channel::{mpsc, oneshot};
+use futures::executor::block_on;
+use futures::future::{self, poll_fn, Future, FutureExt, TryFutureExt};
+use futures::never::Never;
+use futures::ready;
+use futures::sink::{self, Sink, SinkErrInto, SinkExt};
+use futures::stream::{self, Stream, StreamExt};
+use futures::task::{self, ArcWake, Context, Poll, Waker};
+use futures_test::task::panic_context;
+use std::cell::{Cell, RefCell};
+use std::collections::VecDeque;
+use std::fmt;
+use std::mem;
+use std::pin::Pin;
+use std::rc::Rc;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
+
+fn sassert_next<S>(s: &mut S, item: S::Item)
+where
+ S: Stream + Unpin,
+ S::Item: Eq + fmt::Debug,
+{
+ match s.poll_next_unpin(&mut panic_context()) {
+ Poll::Ready(None) => panic!("stream is at its end"),
+ Poll::Ready(Some(e)) => assert_eq!(e, item),
+ Poll::Pending => panic!("stream wasn't ready"),
+ }
+}
+
+fn unwrap<T, E: fmt::Debug>(x: Poll<Result<T, E>>) -> T {
+ match x {
+ Poll::Ready(Ok(x)) => x,
+ Poll::Ready(Err(_)) => panic!("Poll::Ready(Err(_))"),
+ Poll::Pending => panic!("Poll::Pending"),
+ }
+}
+
+// An Unpark struct that records unpark events for inspection
+struct Flag(AtomicBool);
+
+impl Flag {
+ fn new() -> Arc<Self> {
+ Arc::new(Self(AtomicBool::new(false)))
+ }
+
+ fn take(&self) -> bool {
+ self.0.swap(false, Ordering::SeqCst)
+ }
+
+ fn set(&self, v: bool) {
+ self.0.store(v, Ordering::SeqCst)
+ }
+}
+
+impl ArcWake for Flag {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.set(true)
+ }
+}
+
+fn flag_cx<F, R>(f: F) -> R
+where
+ F: FnOnce(Arc<Flag>, &mut Context<'_>) -> R,
+{
+ let flag = Flag::new();
+ let waker = task::waker_ref(&flag);
+ let cx = &mut Context::from_waker(&waker);
+ f(flag.clone(), cx)
+}
+
+// Sends a value on an i32 channel sink
+struct StartSendFut<S: Sink<Item> + Unpin, Item: Unpin>(Option<S>, Option<Item>);
+
+impl<S: Sink<Item> + Unpin, Item: Unpin> StartSendFut<S, Item> {
+ fn new(sink: S, item: Item) -> Self {
+ Self(Some(sink), Some(item))
+ }
+}
+
+impl<S: Sink<Item> + Unpin, Item: Unpin> Future for StartSendFut<S, Item> {
+ type Output = Result<S, S::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let Self(inner, item) = self.get_mut();
+ {
+ let mut inner = inner.as_mut().unwrap();
+ ready!(Pin::new(&mut inner).poll_ready(cx))?;
+ Pin::new(&mut inner).start_send(item.take().unwrap())?;
+ }
+ Poll::Ready(Ok(inner.take().unwrap()))
+ }
+}
+
+// Immediately accepts all requests to start pushing, but completion is managed
+// by manually flushing
+struct ManualFlush<T: Unpin> {
+ data: Vec<T>,
+ waiting_tasks: Vec<Waker>,
+}
+
+impl<T: Unpin> Sink<Option<T>> for ManualFlush<T> {
+ type Error = ();
+
+ fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: Option<T>) -> Result<(), Self::Error> {
+ if let Some(item) = item {
+ self.data.push(item);
+ } else {
+ self.force_flush();
+ }
+ Ok(())
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ if self.data.is_empty() {
+ Poll::Ready(Ok(()))
+ } else {
+ self.waiting_tasks.push(cx.waker().clone());
+ Poll::Pending
+ }
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.poll_flush(cx)
+ }
+}
+
+impl<T: Unpin> ManualFlush<T> {
+ fn new() -> Self {
+ Self { data: Vec::new(), waiting_tasks: Vec::new() }
+ }
+
+ fn force_flush(&mut self) -> Vec<T> {
+ for task in self.waiting_tasks.drain(..) {
+ task.wake()
+ }
+ mem::replace(&mut self.data, Vec::new())
+ }
+}
+
+struct ManualAllow<T: Unpin> {
+ data: Vec<T>,
+ allow: Rc<Allow>,
+}
+
+struct Allow {
+ flag: Cell<bool>,
+ tasks: RefCell<Vec<Waker>>,
+}
+
+impl Allow {
+ fn new() -> Self {
+ Self { flag: Cell::new(false), tasks: RefCell::new(Vec::new()) }
+ }
+
+ fn check(&self, cx: &mut Context<'_>) -> bool {
+ if self.flag.get() {
+ true
+ } else {
+ self.tasks.borrow_mut().push(cx.waker().clone());
+ false
+ }
+ }
+
+ fn start(&self) {
+ self.flag.set(true);
+ let mut tasks = self.tasks.borrow_mut();
+ for task in tasks.drain(..) {
+ task.wake();
+ }
+ }
+}
+
+impl<T: Unpin> Sink<T> for ManualAllow<T> {
+ type Error = ();
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ if self.allow.check(cx) {
+ Poll::Ready(Ok(()))
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> {
+ self.data.push(item);
+ Ok(())
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+}
+
+fn manual_allow<T: Unpin>() -> (ManualAllow<T>, Rc<Allow>) {
+ let allow = Rc::new(Allow::new());
+ let manual_allow = ManualAllow { data: Vec::new(), allow: allow.clone() };
+ (manual_allow, allow)
+}
+
+#[test]
+fn either_sink() {
+ let mut s =
+ if true { Vec::<i32>::new().left_sink() } else { VecDeque::<i32>::new().right_sink() };
+
+ Pin::new(&mut s).start_send(0).unwrap();
+}
+
+#[test]
+fn vec_sink() {
+ let mut v = Vec::new();
+ Pin::new(&mut v).start_send(0).unwrap();
+ Pin::new(&mut v).start_send(1).unwrap();
+ assert_eq!(v, vec![0, 1]);
+ block_on(v.flush()).unwrap();
+ assert_eq!(v, vec![0, 1]);
+}
+
+#[test]
+fn vecdeque_sink() {
+ let mut deque = VecDeque::new();
+ Pin::new(&mut deque).start_send(2).unwrap();
+ Pin::new(&mut deque).start_send(3).unwrap();
+
+ assert_eq!(deque.pop_front(), Some(2));
+ assert_eq!(deque.pop_front(), Some(3));
+ assert_eq!(deque.pop_front(), None);
+}
+
+#[test]
+fn send() {
+ let mut v = Vec::new();
+
+ block_on(v.send(0)).unwrap();
+ assert_eq!(v, vec![0]);
+
+ block_on(v.send(1)).unwrap();
+ assert_eq!(v, vec![0, 1]);
+
+ block_on(v.send(2)).unwrap();
+ assert_eq!(v, vec![0, 1, 2]);
+}
+
+#[test]
+fn send_all() {
+ let mut v = Vec::new();
+
+ block_on(v.send_all(&mut stream::iter(vec![0, 1]).map(Ok))).unwrap();
+ assert_eq!(v, vec![0, 1]);
+
+ block_on(v.send_all(&mut stream::iter(vec![2, 3]).map(Ok))).unwrap();
+ assert_eq!(v, vec![0, 1, 2, 3]);
+
+ block_on(v.send_all(&mut stream::iter(vec![4, 5]).map(Ok))).unwrap();
+ assert_eq!(v, vec![0, 1, 2, 3, 4, 5]);
+}
+
+// Test that `start_send` on an `mpsc` channel does indeed block when the
+// channel is full
+#[test]
+fn mpsc_blocking_start_send() {
+ let (mut tx, mut rx) = mpsc::channel::<i32>(0);
+
+ block_on(future::lazy(|_| {
+ tx.start_send(0).unwrap();
+
+ flag_cx(|flag, cx| {
+ let mut task = StartSendFut::new(tx, 1);
+
+ assert!(task.poll_unpin(cx).is_pending());
+ assert!(!flag.take());
+ sassert_next(&mut rx, 0);
+ assert!(flag.take());
+ unwrap(task.poll_unpin(cx));
+ assert!(!flag.take());
+ sassert_next(&mut rx, 1);
+ })
+ }));
+}
+
+// test `flush` by using `with` to make the first insertion into a sink block
+// until a oneshot is completed
+#[test]
+fn with_flush() {
+ let (tx, rx) = oneshot::channel();
+ let mut block = rx.boxed();
+ let mut sink = Vec::new().with(|elem| {
+ mem::replace(&mut block, future::ok(()).boxed())
+ .map_ok(move |()| elem + 1)
+ .map_err(|_| -> Never { panic!() })
+ });
+
+ assert_eq!(Pin::new(&mut sink).start_send(0).ok(), Some(()));
+
+ flag_cx(|flag, cx| {
+ let mut task = sink.flush();
+ assert!(task.poll_unpin(cx).is_pending());
+ tx.send(()).unwrap();
+ assert!(flag.take());
+
+ unwrap(task.poll_unpin(cx));
+
+ block_on(sink.send(1)).unwrap();
+ assert_eq!(sink.get_ref(), &[1, 2]);
+ })
+}
+
+// test simple use of with to change data
+#[test]
+fn with_as_map() {
+ let mut sink = Vec::new().with(|item| future::ok::<i32, Never>(item * 2));
+ block_on(sink.send(0)).unwrap();
+ block_on(sink.send(1)).unwrap();
+ block_on(sink.send(2)).unwrap();
+ assert_eq!(sink.get_ref(), &[0, 2, 4]);
+}
+
+// test simple use of with_flat_map
+#[test]
+fn with_flat_map() {
+ let mut sink = Vec::new().with_flat_map(|item| stream::iter(vec![item; item]).map(Ok));
+ block_on(sink.send(0)).unwrap();
+ block_on(sink.send(1)).unwrap();
+ block_on(sink.send(2)).unwrap();
+ block_on(sink.send(3)).unwrap();
+ assert_eq!(sink.get_ref(), &[1, 2, 2, 3, 3, 3]);
+}
+
+// Check that `with` propagates `poll_ready` to the inner sink.
+// Regression test for the issue #1834.
+#[test]
+fn with_propagates_poll_ready() {
+ let (tx, mut rx) = mpsc::channel::<i32>(0);
+ let mut tx = tx.with(|item: i32| future::ok::<i32, mpsc::SendError>(item + 10));
+
+ block_on(future::lazy(|_| {
+ flag_cx(|flag, cx| {
+ let mut tx = Pin::new(&mut tx);
+
+ // Should be ready for the first item.
+ assert_eq!(tx.as_mut().poll_ready(cx), Poll::Ready(Ok(())));
+ assert_eq!(tx.as_mut().start_send(0), Ok(()));
+
+ // Should be ready for the second item only after the first one is received.
+ assert_eq!(tx.as_mut().poll_ready(cx), Poll::Pending);
+ assert!(!flag.take());
+ sassert_next(&mut rx, 10);
+ assert!(flag.take());
+ assert_eq!(tx.as_mut().poll_ready(cx), Poll::Ready(Ok(())));
+ assert_eq!(tx.as_mut().start_send(1), Ok(()));
+ })
+ }));
+}
+
+// test that the `with` sink doesn't require the underlying sink to flush,
+// but doesn't claim to be flushed until the underlying sink is
+#[test]
+fn with_flush_propagate() {
+ let mut sink = ManualFlush::new().with(future::ok::<Option<i32>, ()>);
+ flag_cx(|flag, cx| {
+ unwrap(Pin::new(&mut sink).poll_ready(cx));
+ Pin::new(&mut sink).start_send(Some(0)).unwrap();
+ unwrap(Pin::new(&mut sink).poll_ready(cx));
+ Pin::new(&mut sink).start_send(Some(1)).unwrap();
+
+ {
+ let mut task = sink.flush();
+ assert!(task.poll_unpin(cx).is_pending());
+ assert!(!flag.take());
+ }
+ assert_eq!(sink.get_mut().force_flush(), vec![0, 1]);
+ assert!(flag.take());
+ unwrap(sink.flush().poll_unpin(cx));
+ })
+}
+
+// test that `Clone` is implemented on `with` sinks
+#[test]
+fn with_implements_clone() {
+ let (mut tx, rx) = mpsc::channel(5);
+
+ {
+ let mut is_positive = tx.clone().with(|item| future::ok::<bool, mpsc::SendError>(item > 0));
+
+ let mut is_long =
+ tx.clone().with(|item: &str| future::ok::<bool, mpsc::SendError>(item.len() > 5));
+
+ block_on(is_positive.clone().send(-1)).unwrap();
+ block_on(is_long.clone().send("123456")).unwrap();
+ block_on(is_long.send("123")).unwrap();
+ block_on(is_positive.send(1)).unwrap();
+ }
+
+ block_on(tx.send(false)).unwrap();
+
+ block_on(tx.close()).unwrap();
+
+ assert_eq!(block_on(rx.collect::<Vec<_>>()), vec![false, true, false, true, false]);
+}
+
+// test that a buffer is a no-nop around a sink that always accepts sends
+#[test]
+fn buffer_noop() {
+ let mut sink = Vec::new().buffer(0);
+ block_on(sink.send(0)).unwrap();
+ block_on(sink.send(1)).unwrap();
+ assert_eq!(sink.get_ref(), &[0, 1]);
+
+ let mut sink = Vec::new().buffer(1);
+ block_on(sink.send(0)).unwrap();
+ block_on(sink.send(1)).unwrap();
+ assert_eq!(sink.get_ref(), &[0, 1]);
+}
+
+// test basic buffer functionality, including both filling up to capacity,
+// and writing out when the underlying sink is ready
+#[test]
+fn buffer() {
+ let (sink, allow) = manual_allow::<i32>();
+ let sink = sink.buffer(2);
+
+ let sink = block_on(StartSendFut::new(sink, 0)).unwrap();
+ let mut sink = block_on(StartSendFut::new(sink, 1)).unwrap();
+
+ flag_cx(|flag, cx| {
+ let mut task = sink.send(2);
+ assert!(task.poll_unpin(cx).is_pending());
+ assert!(!flag.take());
+ allow.start();
+ assert!(flag.take());
+ unwrap(task.poll_unpin(cx));
+ assert_eq!(sink.get_ref().data, vec![0, 1, 2]);
+ })
+}
+
+#[test]
+fn fanout_smoke() {
+ let sink1 = Vec::new();
+ let sink2 = Vec::new();
+ let mut sink = sink1.fanout(sink2);
+ block_on(sink.send_all(&mut stream::iter(vec![1, 2, 3]).map(Ok))).unwrap();
+ let (sink1, sink2) = sink.into_inner();
+ assert_eq!(sink1, vec![1, 2, 3]);
+ assert_eq!(sink2, vec![1, 2, 3]);
+}
+
+#[test]
+fn fanout_backpressure() {
+ let (left_send, mut left_recv) = mpsc::channel(0);
+ let (right_send, mut right_recv) = mpsc::channel(0);
+ let sink = left_send.fanout(right_send);
+
+ let mut sink = block_on(StartSendFut::new(sink, 0)).unwrap();
+
+ flag_cx(|flag, cx| {
+ let mut task = sink.send(2);
+ assert!(!flag.take());
+ assert!(task.poll_unpin(cx).is_pending());
+ assert_eq!(block_on(left_recv.next()), Some(0));
+ assert!(flag.take());
+ assert!(task.poll_unpin(cx).is_pending());
+ assert_eq!(block_on(right_recv.next()), Some(0));
+ assert!(flag.take());
+
+ assert!(task.poll_unpin(cx).is_pending());
+ assert_eq!(block_on(left_recv.next()), Some(2));
+ assert!(flag.take());
+ assert!(task.poll_unpin(cx).is_pending());
+ assert_eq!(block_on(right_recv.next()), Some(2));
+ assert!(flag.take());
+
+ unwrap(task.poll_unpin(cx));
+ // make sure receivers live until end of test to prevent send errors
+ drop(left_recv);
+ drop(right_recv);
+ })
+}
+
+#[test]
+fn sink_map_err() {
+ {
+ let cx = &mut panic_context();
+ let (tx, _rx) = mpsc::channel(1);
+ let mut tx = tx.sink_map_err(|_| ());
+ assert_eq!(Pin::new(&mut tx).start_send(()), Ok(()));
+ assert_eq!(Pin::new(&mut tx).poll_flush(cx), Poll::Ready(Ok(())));
+ }
+
+ let tx = mpsc::channel(0).0;
+ assert_eq!(Pin::new(&mut tx.sink_map_err(|_| ())).start_send(()), Err(()));
+}
+
+#[test]
+fn sink_unfold() {
+ block_on(poll_fn(|cx| {
+ let (tx, mut rx) = mpsc::channel(1);
+ let unfold = sink::unfold((), |(), i: i32| {
+ let mut tx = tx.clone();
+ async move {
+ tx.send(i).await.unwrap();
+ Ok::<_, String>(())
+ }
+ });
+ futures::pin_mut!(unfold);
+ assert_eq!(unfold.as_mut().start_send(1), Ok(()));
+ assert_eq!(unfold.as_mut().poll_flush(cx), Poll::Ready(Ok(())));
+ assert_eq!(rx.try_next().unwrap(), Some(1));
+
+ assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(())));
+ assert_eq!(unfold.as_mut().start_send(2), Ok(()));
+ assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(())));
+ assert_eq!(unfold.as_mut().start_send(3), Ok(()));
+ assert_eq!(rx.try_next().unwrap(), Some(2));
+ assert!(rx.try_next().is_err());
+ assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(())));
+ assert_eq!(unfold.as_mut().start_send(4), Ok(()));
+ assert_eq!(unfold.as_mut().poll_flush(cx), Poll::Pending); // Channel full
+ assert_eq!(rx.try_next().unwrap(), Some(3));
+ assert_eq!(rx.try_next().unwrap(), Some(4));
+
+ Poll::Ready(())
+ }))
+}
+
+#[test]
+fn err_into() {
+ #[derive(Copy, Clone, Debug, PartialEq, Eq)]
+ struct ErrIntoTest;
+
+ impl From<mpsc::SendError> for ErrIntoTest {
+ fn from(_: mpsc::SendError) -> Self {
+ Self
+ }
+ }
+
+ {
+ let cx = &mut panic_context();
+ let (tx, _rx) = mpsc::channel(1);
+ let mut tx: SinkErrInto<mpsc::Sender<()>, _, ErrIntoTest> = tx.sink_err_into();
+ assert_eq!(Pin::new(&mut tx).start_send(()), Ok(()));
+ assert_eq!(Pin::new(&mut tx).poll_flush(cx), Poll::Ready(Ok(())));
+ }
+
+ let tx = mpsc::channel(0).0;
+ assert_eq!(Pin::new(&mut tx.sink_err_into()).start_send(()), Err(ErrIntoTest));
+}
diff --git a/vendor/futures/tests/sink_fanout.rs b/vendor/futures/tests/sink_fanout.rs
new file mode 100644
index 000000000..e57b2d8c7
--- /dev/null
+++ b/vendor/futures/tests/sink_fanout.rs
@@ -0,0 +1,24 @@
+use futures::channel::mpsc;
+use futures::executor::block_on;
+use futures::future::join3;
+use futures::sink::SinkExt;
+use futures::stream::{self, StreamExt};
+
+#[test]
+fn it_works() {
+ let (tx1, rx1) = mpsc::channel(1);
+ let (tx2, rx2) = mpsc::channel(2);
+ let tx = tx1.fanout(tx2).sink_map_err(|_| ());
+
+ let src = stream::iter((0..10).map(Ok));
+ let fwd = src.forward(tx);
+
+ let collect_fut1 = rx1.collect::<Vec<_>>();
+ let collect_fut2 = rx2.collect::<Vec<_>>();
+ let (_, vec1, vec2) = block_on(join3(fwd, collect_fut1, collect_fut2));
+
+ let expected = (0..10).collect::<Vec<_>>();
+
+ assert_eq!(vec1, expected);
+ assert_eq!(vec2, expected);
+}
diff --git a/vendor/futures/tests/stream.rs b/vendor/futures/tests/stream.rs
new file mode 100644
index 000000000..0d453d175
--- /dev/null
+++ b/vendor/futures/tests/stream.rs
@@ -0,0 +1,151 @@
+use futures::channel::mpsc;
+use futures::executor::block_on;
+use futures::future::{self, Future};
+use futures::sink::SinkExt;
+use futures::stream::{self, StreamExt};
+use futures::task::Poll;
+use futures::FutureExt;
+use futures_test::task::noop_context;
+
+#[test]
+fn select() {
+ fn select_and_compare(a: Vec<u32>, b: Vec<u32>, expected: Vec<u32>) {
+ let a = stream::iter(a);
+ let b = stream::iter(b);
+ let vec = block_on(stream::select(a, b).collect::<Vec<_>>());
+ assert_eq!(vec, expected);
+ }
+
+ select_and_compare(vec![1, 2, 3], vec![4, 5, 6], vec![1, 4, 2, 5, 3, 6]);
+ select_and_compare(vec![1, 2, 3], vec![4, 5], vec![1, 4, 2, 5, 3]);
+ select_and_compare(vec![1, 2], vec![4, 5, 6], vec![1, 4, 2, 5, 6]);
+}
+
+#[test]
+fn flat_map() {
+ block_on(async {
+ let st =
+ stream::iter(vec![stream::iter(0..=4u8), stream::iter(6..=10), stream::iter(0..=2)]);
+
+ let values: Vec<_> =
+ st.flat_map(|s| s.filter(|v| futures::future::ready(v % 2 == 0))).collect().await;
+
+ assert_eq!(values, vec![0, 2, 4, 6, 8, 10, 0, 2]);
+ });
+}
+
+#[test]
+fn scan() {
+ block_on(async {
+ let values = stream::iter(vec![1u8, 2, 3, 4, 6, 8, 2])
+ .scan(1, |state, e| {
+ *state += 1;
+ futures::future::ready(if e < *state { Some(e) } else { None })
+ })
+ .collect::<Vec<_>>()
+ .await;
+
+ assert_eq!(values, vec![1u8, 2, 3, 4]);
+ });
+}
+
+#[test]
+fn take_until() {
+ fn make_stop_fut(stop_on: u32) -> impl Future<Output = ()> {
+ let mut i = 0;
+ future::poll_fn(move |_cx| {
+ i += 1;
+ if i <= stop_on {
+ Poll::Pending
+ } else {
+ Poll::Ready(())
+ }
+ })
+ }
+
+ block_on(async {
+ // Verify stopping works:
+ let stream = stream::iter(1u32..=10);
+ let stop_fut = make_stop_fut(5);
+
+ let stream = stream.take_until(stop_fut);
+ let last = stream.fold(0, |_, i| async move { i }).await;
+ assert_eq!(last, 5);
+
+ // Verify take_future() works:
+ let stream = stream::iter(1..=10);
+ let stop_fut = make_stop_fut(5);
+
+ let mut stream = stream.take_until(stop_fut);
+
+ assert_eq!(stream.next().await, Some(1));
+ assert_eq!(stream.next().await, Some(2));
+
+ stream.take_future();
+
+ let last = stream.fold(0, |_, i| async move { i }).await;
+ assert_eq!(last, 10);
+
+ // Verify take_future() returns None if stream is stopped:
+ let stream = stream::iter(1u32..=10);
+ let stop_fut = make_stop_fut(1);
+ let mut stream = stream.take_until(stop_fut);
+ assert_eq!(stream.next().await, Some(1));
+ assert_eq!(stream.next().await, None);
+ assert!(stream.take_future().is_none());
+
+ // Verify TakeUntil is fused:
+ let mut i = 0;
+ let stream = stream::poll_fn(move |_cx| {
+ i += 1;
+ match i {
+ 1 => Poll::Ready(Some(1)),
+ 2 => Poll::Ready(None),
+ _ => panic!("TakeUntil not fused"),
+ }
+ });
+
+ let stop_fut = make_stop_fut(1);
+ let mut stream = stream.take_until(stop_fut);
+ assert_eq!(stream.next().await, Some(1));
+ assert_eq!(stream.next().await, None);
+ assert_eq!(stream.next().await, None);
+ });
+}
+
+#[test]
+#[should_panic]
+fn chunks_panic_on_cap_zero() {
+ let (_, rx1) = mpsc::channel::<()>(1);
+
+ let _ = rx1.chunks(0);
+}
+
+#[test]
+#[should_panic]
+fn ready_chunks_panic_on_cap_zero() {
+ let (_, rx1) = mpsc::channel::<()>(1);
+
+ let _ = rx1.ready_chunks(0);
+}
+
+#[test]
+fn ready_chunks() {
+ let (mut tx, rx1) = mpsc::channel::<i32>(16);
+
+ let mut s = rx1.ready_chunks(2);
+
+ let mut cx = noop_context();
+ assert!(s.next().poll_unpin(&mut cx).is_pending());
+
+ block_on(async {
+ tx.send(1).await.unwrap();
+
+ assert_eq!(s.next().await.unwrap(), vec![1]);
+ tx.send(2).await.unwrap();
+ tx.send(3).await.unwrap();
+ tx.send(4).await.unwrap();
+ assert_eq!(s.next().await.unwrap(), vec![2, 3]);
+ assert_eq!(s.next().await.unwrap(), vec![4]);
+ });
+}
diff --git a/vendor/futures/tests/stream_abortable.rs b/vendor/futures/tests/stream_abortable.rs
new file mode 100644
index 000000000..2339dd052
--- /dev/null
+++ b/vendor/futures/tests/stream_abortable.rs
@@ -0,0 +1,46 @@
+use futures::channel::mpsc;
+use futures::executor::block_on;
+use futures::stream::{abortable, Stream, StreamExt};
+use futures::task::{Context, Poll};
+use futures::SinkExt;
+use futures_test::task::new_count_waker;
+use std::pin::Pin;
+
+#[test]
+fn abortable_works() {
+ let (_tx, a_rx) = mpsc::channel::<()>(1);
+ let (mut abortable_rx, abort_handle) = abortable(a_rx);
+
+ abort_handle.abort();
+ assert!(abortable_rx.is_aborted());
+ assert_eq!(None, block_on(abortable_rx.next()));
+}
+
+#[test]
+fn abortable_awakens() {
+ let (_tx, a_rx) = mpsc::channel::<()>(1);
+ let (mut abortable_rx, abort_handle) = abortable(a_rx);
+
+ let (waker, counter) = new_count_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ assert_eq!(counter, 0);
+ assert_eq!(Poll::Pending, Pin::new(&mut abortable_rx).poll_next(&mut cx));
+ assert_eq!(counter, 0);
+
+ abort_handle.abort();
+ assert_eq!(counter, 1);
+ assert!(abortable_rx.is_aborted());
+ assert_eq!(Poll::Ready(None), Pin::new(&mut abortable_rx).poll_next(&mut cx));
+}
+
+#[test]
+fn abortable_resolves() {
+ let (mut tx, a_rx) = mpsc::channel::<()>(1);
+ let (mut abortable_rx, _abort_handle) = abortable(a_rx);
+
+ block_on(tx.send(())).unwrap();
+
+ assert!(!abortable_rx.is_aborted());
+ assert_eq!(Some(()), block_on(abortable_rx.next()));
+}
diff --git a/vendor/futures/tests/stream_buffer_unordered.rs b/vendor/futures/tests/stream_buffer_unordered.rs
new file mode 100644
index 000000000..9a2ee174e
--- /dev/null
+++ b/vendor/futures/tests/stream_buffer_unordered.rs
@@ -0,0 +1,73 @@
+use futures::channel::{mpsc, oneshot};
+use futures::executor::{block_on, block_on_stream};
+use futures::sink::SinkExt;
+use futures::stream::StreamExt;
+use std::sync::mpsc as std_mpsc;
+use std::thread;
+
+#[test]
+#[ignore] // FIXME: https://github.com/rust-lang/futures-rs/issues/1790
+fn works() {
+ const N: usize = 4;
+
+ let (mut tx, rx) = mpsc::channel(1);
+
+ let (tx2, rx2) = std_mpsc::channel();
+ let (tx3, rx3) = std_mpsc::channel();
+ let t1 = thread::spawn(move || {
+ for _ in 0..=N {
+ let (mytx, myrx) = oneshot::channel();
+ block_on(tx.send(myrx)).unwrap();
+ tx3.send(mytx).unwrap();
+ }
+ rx2.recv().unwrap();
+ for _ in 0..N {
+ let (mytx, myrx) = oneshot::channel();
+ block_on(tx.send(myrx)).unwrap();
+ tx3.send(mytx).unwrap();
+ }
+ });
+
+ let (tx4, rx4) = std_mpsc::channel();
+ let t2 = thread::spawn(move || {
+ for item in block_on_stream(rx.buffer_unordered(N)) {
+ tx4.send(item.unwrap()).unwrap();
+ }
+ });
+
+ let o1 = rx3.recv().unwrap();
+ let o2 = rx3.recv().unwrap();
+ let o3 = rx3.recv().unwrap();
+ let o4 = rx3.recv().unwrap();
+ assert!(rx4.try_recv().is_err());
+
+ o1.send(1).unwrap();
+ assert_eq!(rx4.recv(), Ok(1));
+ o3.send(3).unwrap();
+ assert_eq!(rx4.recv(), Ok(3));
+ tx2.send(()).unwrap();
+ o2.send(2).unwrap();
+ assert_eq!(rx4.recv(), Ok(2));
+ o4.send(4).unwrap();
+ assert_eq!(rx4.recv(), Ok(4));
+
+ let o5 = rx3.recv().unwrap();
+ let o6 = rx3.recv().unwrap();
+ let o7 = rx3.recv().unwrap();
+ let o8 = rx3.recv().unwrap();
+ let o9 = rx3.recv().unwrap();
+
+ o5.send(5).unwrap();
+ assert_eq!(rx4.recv(), Ok(5));
+ o8.send(8).unwrap();
+ assert_eq!(rx4.recv(), Ok(8));
+ o9.send(9).unwrap();
+ assert_eq!(rx4.recv(), Ok(9));
+ o7.send(7).unwrap();
+ assert_eq!(rx4.recv(), Ok(7));
+ o6.send(6).unwrap();
+ assert_eq!(rx4.recv(), Ok(6));
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+}
diff --git a/vendor/futures/tests/stream_catch_unwind.rs b/vendor/futures/tests/stream_catch_unwind.rs
new file mode 100644
index 000000000..8b23a0a7e
--- /dev/null
+++ b/vendor/futures/tests/stream_catch_unwind.rs
@@ -0,0 +1,27 @@
+use futures::executor::block_on_stream;
+use futures::stream::{self, StreamExt};
+
+#[test]
+fn panic_in_the_middle_of_the_stream() {
+ let stream = stream::iter(vec![Some(10), None, Some(11)]);
+
+ // panic on second element
+ let stream_panicking = stream.map(|o| o.unwrap());
+ let mut iter = block_on_stream(stream_panicking.catch_unwind());
+
+ assert_eq!(10, iter.next().unwrap().ok().unwrap());
+ assert!(iter.next().unwrap().is_err());
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn no_panic() {
+ let stream = stream::iter(vec![10, 11, 12]);
+
+ let mut iter = block_on_stream(stream.catch_unwind());
+
+ assert_eq!(10, iter.next().unwrap().ok().unwrap());
+ assert_eq!(11, iter.next().unwrap().ok().unwrap());
+ assert_eq!(12, iter.next().unwrap().ok().unwrap());
+ assert!(iter.next().is_none());
+}
diff --git a/vendor/futures/tests/stream_futures_ordered.rs b/vendor/futures/tests/stream_futures_ordered.rs
new file mode 100644
index 000000000..7506c65a6
--- /dev/null
+++ b/vendor/futures/tests/stream_futures_ordered.rs
@@ -0,0 +1,84 @@
+use futures::channel::oneshot;
+use futures::executor::{block_on, block_on_stream};
+use futures::future::{self, join, Future, FutureExt, TryFutureExt};
+use futures::stream::{FuturesOrdered, StreamExt};
+use futures_test::task::noop_context;
+use std::any::Any;
+
+#[test]
+fn works_1() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut stream = vec![a_rx, b_rx, c_rx].into_iter().collect::<FuturesOrdered<_>>();
+
+ b_tx.send(99).unwrap();
+ assert!(stream.poll_next_unpin(&mut noop_context()).is_pending());
+
+ a_tx.send(33).unwrap();
+ c_tx.send(33).unwrap();
+
+ let mut iter = block_on_stream(stream);
+ assert_eq!(Some(Ok(33)), iter.next());
+ assert_eq!(Some(Ok(99)), iter.next());
+ assert_eq!(Some(Ok(33)), iter.next());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn works_2() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut stream = vec![a_rx.boxed(), join(b_rx, c_rx).map(|(a, b)| Ok(a? + b?)).boxed()]
+ .into_iter()
+ .collect::<FuturesOrdered<_>>();
+
+ let mut cx = noop_context();
+ a_tx.send(33).unwrap();
+ b_tx.send(33).unwrap();
+ assert!(stream.poll_next_unpin(&mut cx).is_ready());
+ assert!(stream.poll_next_unpin(&mut cx).is_pending());
+ c_tx.send(33).unwrap();
+ assert!(stream.poll_next_unpin(&mut cx).is_ready());
+}
+
+#[test]
+fn from_iterator() {
+ let stream = vec![future::ready::<i32>(1), future::ready::<i32>(2), future::ready::<i32>(3)]
+ .into_iter()
+ .collect::<FuturesOrdered<_>>();
+ assert_eq!(stream.len(), 3);
+ assert_eq!(block_on(stream.collect::<Vec<_>>()), vec![1, 2, 3]);
+}
+
+#[test]
+fn queue_never_unblocked() {
+ let (_a_tx, a_rx) = oneshot::channel::<Box<dyn Any + Send>>();
+ let (b_tx, b_rx) = oneshot::channel::<Box<dyn Any + Send>>();
+ let (c_tx, c_rx) = oneshot::channel::<Box<dyn Any + Send>>();
+
+ let mut stream = vec![
+ Box::new(a_rx) as Box<dyn Future<Output = _> + Unpin>,
+ Box::new(
+ future::try_select(b_rx, c_rx)
+ .map_err(|e| e.factor_first().0)
+ .and_then(|e| future::ok(Box::new(e) as Box<dyn Any + Send>)),
+ ) as _,
+ ]
+ .into_iter()
+ .collect::<FuturesOrdered<_>>();
+
+ let cx = &mut noop_context();
+ for _ in 0..10 {
+ assert!(stream.poll_next_unpin(cx).is_pending());
+ }
+
+ b_tx.send(Box::new(())).unwrap();
+ assert!(stream.poll_next_unpin(cx).is_pending());
+ c_tx.send(Box::new(())).unwrap();
+ assert!(stream.poll_next_unpin(cx).is_pending());
+ assert!(stream.poll_next_unpin(cx).is_pending());
+}
diff --git a/vendor/futures/tests/stream_futures_unordered.rs b/vendor/futures/tests/stream_futures_unordered.rs
new file mode 100644
index 000000000..439c809be
--- /dev/null
+++ b/vendor/futures/tests/stream_futures_unordered.rs
@@ -0,0 +1,369 @@
+use futures::channel::oneshot;
+use futures::executor::{block_on, block_on_stream};
+use futures::future::{self, join, Future, FutureExt};
+use futures::stream::{FusedStream, FuturesUnordered, StreamExt};
+use futures::task::{Context, Poll};
+use futures_test::future::FutureTestExt;
+use futures_test::task::noop_context;
+use futures_test::{assert_stream_done, assert_stream_next, assert_stream_pending};
+use std::iter::FromIterator;
+use std::pin::Pin;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+#[test]
+fn is_terminated() {
+ let mut cx = noop_context();
+ let mut tasks = FuturesUnordered::new();
+
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None));
+ assert_eq!(tasks.is_terminated(), true);
+
+ // Test that the sentinel value doesn't leak
+ assert_eq!(tasks.is_empty(), true);
+ assert_eq!(tasks.len(), 0);
+ assert_eq!(tasks.iter_mut().len(), 0);
+
+ tasks.push(future::ready(1));
+
+ assert_eq!(tasks.is_empty(), false);
+ assert_eq!(tasks.len(), 1);
+ assert_eq!(tasks.iter_mut().len(), 1);
+
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(Some(1)));
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None));
+ assert_eq!(tasks.is_terminated(), true);
+}
+
+#[test]
+fn works_1() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut iter =
+ block_on_stream(vec![a_rx, b_rx, c_rx].into_iter().collect::<FuturesUnordered<_>>());
+
+ b_tx.send(99).unwrap();
+ assert_eq!(Some(Ok(99)), iter.next());
+
+ a_tx.send(33).unwrap();
+ c_tx.send(33).unwrap();
+ assert_eq!(Some(Ok(33)), iter.next());
+ assert_eq!(Some(Ok(33)), iter.next());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn works_2() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut stream = vec![a_rx.boxed(), join(b_rx, c_rx).map(|(a, b)| Ok(a? + b?)).boxed()]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ a_tx.send(9).unwrap();
+ b_tx.send(10).unwrap();
+
+ let mut cx = noop_context();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(9))));
+ c_tx.send(20).unwrap();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(30))));
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(None));
+}
+
+#[test]
+fn from_iterator() {
+ let stream = vec![future::ready::<i32>(1), future::ready::<i32>(2), future::ready::<i32>(3)]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+ assert_eq!(stream.len(), 3);
+ assert_eq!(block_on(stream.collect::<Vec<_>>()), vec![1, 2, 3]);
+}
+
+#[test]
+fn finished_future() {
+ let (_a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut stream = vec![
+ Box::new(a_rx) as Box<dyn Future<Output = Result<_, _>> + Unpin>,
+ Box::new(future::select(b_rx, c_rx).map(|e| e.factor_first().0)) as _,
+ ]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ let cx = &mut noop_context();
+ for _ in 0..10 {
+ assert!(stream.poll_next_unpin(cx).is_pending());
+ }
+
+ b_tx.send(12).unwrap();
+ c_tx.send(3).unwrap();
+ assert!(stream.poll_next_unpin(cx).is_ready());
+ assert!(stream.poll_next_unpin(cx).is_pending());
+ assert!(stream.poll_next_unpin(cx).is_pending());
+}
+
+#[test]
+fn iter_mut_cancel() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let mut stream = vec![a_rx, b_rx, c_rx].into_iter().collect::<FuturesUnordered<_>>();
+
+ for rx in stream.iter_mut() {
+ rx.close();
+ }
+
+ let mut iter = block_on_stream(stream);
+
+ assert!(a_tx.is_canceled());
+ assert!(b_tx.is_canceled());
+ assert!(c_tx.is_canceled());
+
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn iter_mut_len() {
+ let mut stream =
+ vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ let mut iter_mut = stream.iter_mut();
+ assert_eq!(iter_mut.len(), 3);
+ assert!(iter_mut.next().is_some());
+ assert_eq!(iter_mut.len(), 2);
+ assert!(iter_mut.next().is_some());
+ assert_eq!(iter_mut.len(), 1);
+ assert!(iter_mut.next().is_some());
+ assert_eq!(iter_mut.len(), 0);
+ assert!(iter_mut.next().is_none());
+}
+
+#[test]
+fn iter_cancel() {
+ struct AtomicCancel<F> {
+ future: F,
+ cancel: AtomicBool,
+ }
+
+ impl<F: Future + Unpin> Future for AtomicCancel<F> {
+ type Output = Option<<F as Future>::Output>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ if self.cancel.load(Ordering::Relaxed) {
+ Poll::Ready(None)
+ } else {
+ self.future.poll_unpin(cx).map(Some)
+ }
+ }
+ }
+
+ impl<F: Future + Unpin> AtomicCancel<F> {
+ fn new(future: F) -> Self {
+ Self { future, cancel: AtomicBool::new(false) }
+ }
+ }
+
+ let stream = vec![
+ AtomicCancel::new(future::pending::<()>()),
+ AtomicCancel::new(future::pending::<()>()),
+ AtomicCancel::new(future::pending::<()>()),
+ ]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ for f in stream.iter() {
+ f.cancel.store(true, Ordering::Relaxed);
+ }
+
+ let mut iter = block_on_stream(stream);
+
+ assert_eq!(iter.next(), Some(None));
+ assert_eq!(iter.next(), Some(None));
+ assert_eq!(iter.next(), Some(None));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn iter_len() {
+ let stream = vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ let mut iter = stream.iter();
+ assert_eq!(iter.len(), 3);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn into_iter_cancel() {
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+
+ let stream = vec![a_rx, b_rx, c_rx].into_iter().collect::<FuturesUnordered<_>>();
+
+ let stream = stream
+ .into_iter()
+ .map(|mut rx| {
+ rx.close();
+ rx
+ })
+ .collect::<FuturesUnordered<_>>();
+
+ let mut iter = block_on_stream(stream);
+
+ assert!(a_tx.is_canceled());
+ assert!(b_tx.is_canceled());
+ assert!(c_tx.is_canceled());
+
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled)));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn into_iter_len() {
+ let stream = vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()]
+ .into_iter()
+ .collect::<FuturesUnordered<_>>();
+
+ let mut into_iter = stream.into_iter();
+ assert_eq!(into_iter.len(), 3);
+ assert!(into_iter.next().is_some());
+ assert_eq!(into_iter.len(), 2);
+ assert!(into_iter.next().is_some());
+ assert_eq!(into_iter.len(), 1);
+ assert!(into_iter.next().is_some());
+ assert_eq!(into_iter.len(), 0);
+ assert!(into_iter.next().is_none());
+}
+
+#[test]
+fn futures_not_moved_after_poll() {
+ // Future that will be ready after being polled twice,
+ // asserting that it does not move.
+ let fut = future::ready(()).pending_once().assert_unmoved();
+ let mut stream = vec![fut; 3].into_iter().collect::<FuturesUnordered<_>>();
+ assert_stream_pending!(stream);
+ assert_stream_next!(stream, ());
+ assert_stream_next!(stream, ());
+ assert_stream_next!(stream, ());
+ assert_stream_done!(stream);
+}
+
+#[test]
+fn len_valid_during_out_of_order_completion() {
+ // Complete futures out-of-order and add new futures afterwards to ensure
+ // length values remain correct.
+ let (a_tx, a_rx) = oneshot::channel::<i32>();
+ let (b_tx, b_rx) = oneshot::channel::<i32>();
+ let (c_tx, c_rx) = oneshot::channel::<i32>();
+ let (d_tx, d_rx) = oneshot::channel::<i32>();
+
+ let mut cx = noop_context();
+ let mut stream = FuturesUnordered::new();
+ assert_eq!(stream.len(), 0);
+
+ stream.push(a_rx);
+ assert_eq!(stream.len(), 1);
+ stream.push(b_rx);
+ assert_eq!(stream.len(), 2);
+ stream.push(c_rx);
+ assert_eq!(stream.len(), 3);
+
+ b_tx.send(4).unwrap();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(4))));
+ assert_eq!(stream.len(), 2);
+
+ stream.push(d_rx);
+ assert_eq!(stream.len(), 3);
+
+ c_tx.send(5).unwrap();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(5))));
+ assert_eq!(stream.len(), 2);
+
+ d_tx.send(6).unwrap();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(6))));
+ assert_eq!(stream.len(), 1);
+
+ a_tx.send(7).unwrap();
+ assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(7))));
+ assert_eq!(stream.len(), 0);
+}
+
+#[test]
+fn polled_only_once_at_most_per_iteration() {
+ #[derive(Debug, Clone, Copy, Default)]
+ struct F {
+ polled: bool,
+ }
+
+ impl Future for F {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, _: &mut Context) -> Poll<Self::Output> {
+ if self.polled {
+ panic!("polled twice")
+ } else {
+ self.polled = true;
+ Poll::Pending
+ }
+ }
+ }
+
+ let cx = &mut noop_context();
+
+ let mut tasks = FuturesUnordered::from_iter(vec![F::default(); 10]);
+ assert!(tasks.poll_next_unpin(cx).is_pending());
+ assert_eq!(10, tasks.iter().filter(|f| f.polled).count());
+
+ let mut tasks = FuturesUnordered::from_iter(vec![F::default(); 33]);
+ assert!(tasks.poll_next_unpin(cx).is_pending());
+ assert_eq!(32, tasks.iter().filter(|f| f.polled).count());
+
+ let mut tasks = FuturesUnordered::<F>::new();
+ assert_eq!(Poll::Ready(None), tasks.poll_next_unpin(cx));
+}
+
+#[test]
+fn clear() {
+ let mut tasks = FuturesUnordered::from_iter(vec![future::ready(1), future::ready(2)]);
+
+ assert_eq!(block_on(tasks.next()), Some(1));
+ assert!(!tasks.is_empty());
+
+ tasks.clear();
+ assert!(tasks.is_empty());
+
+ tasks.push(future::ready(3));
+ assert!(!tasks.is_empty());
+
+ tasks.clear();
+ assert!(tasks.is_empty());
+
+ assert_eq!(block_on(tasks.next()), None);
+ assert!(tasks.is_terminated());
+ tasks.clear();
+ assert!(!tasks.is_terminated());
+}
diff --git a/vendor/futures/tests/stream_into_async_read.rs b/vendor/futures/tests/stream_into_async_read.rs
new file mode 100644
index 000000000..60188d3e5
--- /dev/null
+++ b/vendor/futures/tests/stream_into_async_read.rs
@@ -0,0 +1,94 @@
+use core::pin::Pin;
+use futures::io::{AsyncBufRead, AsyncRead};
+use futures::stream::{self, TryStreamExt};
+use futures::task::Poll;
+use futures_test::{stream::StreamTestExt, task::noop_context};
+
+macro_rules! assert_read {
+ ($reader:expr, $buf:expr, $item:expr) => {
+ let mut cx = noop_context();
+ loop {
+ match Pin::new(&mut $reader).poll_read(&mut cx, $buf) {
+ Poll::Ready(Ok(x)) => {
+ assert_eq!(x, $item);
+ break;
+ }
+ Poll::Ready(Err(err)) => {
+ panic!("assertion failed: expected value but got {}", err);
+ }
+ Poll::Pending => {
+ continue;
+ }
+ }
+ }
+ };
+}
+
+macro_rules! assert_fill_buf {
+ ($reader:expr, $buf:expr) => {
+ let mut cx = noop_context();
+ loop {
+ match Pin::new(&mut $reader).poll_fill_buf(&mut cx) {
+ Poll::Ready(Ok(x)) => {
+ assert_eq!(x, $buf);
+ break;
+ }
+ Poll::Ready(Err(err)) => {
+ panic!("assertion failed: expected value but got {}", err);
+ }
+ Poll::Pending => {
+ continue;
+ }
+ }
+ }
+ };
+}
+
+#[test]
+fn test_into_async_read() {
+ let stream = stream::iter((1..=3).flat_map(|_| vec![Ok(vec![]), Ok(vec![1, 2, 3, 4, 5])]));
+ let mut reader = stream.interleave_pending().into_async_read();
+ let mut buf = vec![0; 3];
+
+ assert_read!(reader, &mut buf, 3);
+ assert_eq!(&buf, &[1, 2, 3]);
+
+ assert_read!(reader, &mut buf, 2);
+ assert_eq!(&buf[..2], &[4, 5]);
+
+ assert_read!(reader, &mut buf, 3);
+ assert_eq!(&buf, &[1, 2, 3]);
+
+ assert_read!(reader, &mut buf, 2);
+ assert_eq!(&buf[..2], &[4, 5]);
+
+ assert_read!(reader, &mut buf, 3);
+ assert_eq!(&buf, &[1, 2, 3]);
+
+ assert_read!(reader, &mut buf, 2);
+ assert_eq!(&buf[..2], &[4, 5]);
+
+ assert_read!(reader, &mut buf, 0);
+}
+
+#[test]
+fn test_into_async_bufread() {
+ let stream = stream::iter((1..=2).flat_map(|_| vec![Ok(vec![]), Ok(vec![1, 2, 3, 4, 5])]));
+ let mut reader = stream.interleave_pending().into_async_read();
+
+ let mut reader = Pin::new(&mut reader);
+
+ assert_fill_buf!(reader, &[1, 2, 3, 4, 5][..]);
+ reader.as_mut().consume(3);
+
+ assert_fill_buf!(reader, &[4, 5][..]);
+ reader.as_mut().consume(2);
+
+ assert_fill_buf!(reader, &[1, 2, 3, 4, 5][..]);
+ reader.as_mut().consume(2);
+
+ assert_fill_buf!(reader, &[3, 4, 5][..]);
+ reader.as_mut().consume(3);
+
+ assert_fill_buf!(reader, &[][..]);
+}
diff --git a/vendor/futures/tests/stream_peekable.rs b/vendor/futures/tests/stream_peekable.rs
new file mode 100644
index 000000000..153fcc25b
--- /dev/null
+++ b/vendor/futures/tests/stream_peekable.rs
@@ -0,0 +1,58 @@
+use futures::executor::block_on;
+use futures::pin_mut;
+use futures::stream::{self, Peekable, StreamExt};
+
+#[test]
+fn peekable() {
+ block_on(async {
+ let peekable: Peekable<_> = stream::iter(vec![1u8, 2, 3]).peekable();
+ pin_mut!(peekable);
+ assert_eq!(peekable.as_mut().peek().await, Some(&1u8));
+ assert_eq!(peekable.collect::<Vec<u8>>().await, vec![1, 2, 3]);
+
+ let s = stream::once(async { 1 }).peekable();
+ pin_mut!(s);
+ assert_eq!(s.as_mut().peek().await, Some(&1u8));
+ assert_eq!(s.collect::<Vec<u8>>().await, vec![1]);
+ });
+}
+
+#[test]
+fn peekable_mut() {
+ block_on(async {
+ let s = stream::iter(vec![1u8, 2, 3]).peekable();
+ pin_mut!(s);
+ if let Some(p) = s.as_mut().peek_mut().await {
+ if *p == 1 {
+ *p = 5;
+ }
+ }
+ assert_eq!(s.collect::<Vec<_>>().await, vec![5, 2, 3]);
+ });
+}
+
+#[test]
+fn peekable_next_if_eq() {
+ block_on(async {
+ // first, try on references
+ let s = stream::iter(vec!["Heart", "of", "Gold"]).peekable();
+ pin_mut!(s);
+ // try before `peek()`
+ assert_eq!(s.as_mut().next_if_eq(&"trillian").await, None);
+ assert_eq!(s.as_mut().next_if_eq(&"Heart").await, Some("Heart"));
+ // try after peek()
+ assert_eq!(s.as_mut().peek().await, Some(&"of"));
+ assert_eq!(s.as_mut().next_if_eq(&"of").await, Some("of"));
+ assert_eq!(s.as_mut().next_if_eq(&"zaphod").await, None);
+ // make sure `next()` still behaves
+ assert_eq!(s.next().await, Some("Gold"));
+
+ // make sure comparison works for owned values
+ let s = stream::iter(vec![String::from("Ludicrous"), "speed".into()]).peekable();
+ pin_mut!(s);
+ // make sure basic functionality works
+ assert_eq!(s.as_mut().next_if_eq("Ludicrous").await, Some("Ludicrous".into()));
+ assert_eq!(s.as_mut().next_if_eq("speed").await, Some("speed".into()));
+ assert_eq!(s.as_mut().next_if_eq("").await, None);
+ });
+}
diff --git a/vendor/futures/tests/stream_select_all.rs b/vendor/futures/tests/stream_select_all.rs
new file mode 100644
index 000000000..4ae073576
--- /dev/null
+++ b/vendor/futures/tests/stream_select_all.rs
@@ -0,0 +1,197 @@
+use futures::channel::mpsc;
+use futures::executor::{block_on, block_on_stream};
+use futures::future::{self, FutureExt};
+use futures::stream::{self, select_all, FusedStream, SelectAll, StreamExt};
+use futures::task::Poll;
+use futures_test::task::noop_context;
+
+#[test]
+fn is_terminated() {
+ let mut cx = noop_context();
+ let mut tasks = SelectAll::new();
+
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None));
+ assert_eq!(tasks.is_terminated(), true);
+
+ // Test that the sentinel value doesn't leak
+ assert_eq!(tasks.is_empty(), true);
+ assert_eq!(tasks.len(), 0);
+
+ tasks.push(future::ready(1).into_stream());
+
+ assert_eq!(tasks.is_empty(), false);
+ assert_eq!(tasks.len(), 1);
+
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(Some(1)));
+ assert_eq!(tasks.is_terminated(), false);
+ assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None));
+ assert_eq!(tasks.is_terminated(), true);
+}
+
+#[test]
+fn issue_1626() {
+ let a = stream::iter(0..=2);
+ let b = stream::iter(10..=14);
+
+ let mut s = block_on_stream(stream::select_all(vec![a, b]));
+
+ assert_eq!(s.next(), Some(0));
+ assert_eq!(s.next(), Some(10));
+ assert_eq!(s.next(), Some(1));
+ assert_eq!(s.next(), Some(11));
+ assert_eq!(s.next(), Some(2));
+ assert_eq!(s.next(), Some(12));
+ assert_eq!(s.next(), Some(13));
+ assert_eq!(s.next(), Some(14));
+ assert_eq!(s.next(), None);
+}
+
+#[test]
+fn works_1() {
+ let (a_tx, a_rx) = mpsc::unbounded::<u32>();
+ let (b_tx, b_rx) = mpsc::unbounded::<u32>();
+ let (c_tx, c_rx) = mpsc::unbounded::<u32>();
+
+ let streams = vec![a_rx, b_rx, c_rx];
+
+ let mut stream = block_on_stream(select_all(streams));
+
+ b_tx.unbounded_send(99).unwrap();
+ a_tx.unbounded_send(33).unwrap();
+ assert_eq!(Some(33), stream.next());
+ assert_eq!(Some(99), stream.next());
+
+ b_tx.unbounded_send(99).unwrap();
+ a_tx.unbounded_send(33).unwrap();
+ assert_eq!(Some(33), stream.next());
+ assert_eq!(Some(99), stream.next());
+
+ c_tx.unbounded_send(42).unwrap();
+ assert_eq!(Some(42), stream.next());
+ a_tx.unbounded_send(43).unwrap();
+ assert_eq!(Some(43), stream.next());
+
+ drop((a_tx, b_tx, c_tx));
+ assert_eq!(None, stream.next());
+}
+
+#[test]
+fn clear() {
+ let mut tasks =
+ select_all(vec![stream::iter(vec![1].into_iter()), stream::iter(vec![2].into_iter())]);
+
+ assert_eq!(block_on(tasks.next()), Some(1));
+ assert!(!tasks.is_empty());
+
+ tasks.clear();
+ assert!(tasks.is_empty());
+
+ tasks.push(stream::iter(vec![3].into_iter()));
+ assert!(!tasks.is_empty());
+
+ tasks.clear();
+ assert!(tasks.is_empty());
+
+ assert_eq!(block_on(tasks.next()), None);
+ assert!(tasks.is_terminated());
+ tasks.clear();
+ assert!(!tasks.is_terminated());
+}
+
+#[test]
+fn iter_mut() {
+ let mut stream =
+ vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()]
+ .into_iter()
+ .collect::<SelectAll<_>>();
+
+ let mut iter = stream.iter_mut();
+ assert_eq!(iter.len(), 3);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+
+ let mut stream = vec![stream::iter(vec![]), stream::iter(vec![1]), stream::iter(vec![2])]
+ .into_iter()
+ .collect::<SelectAll<_>>();
+
+ assert_eq!(stream.len(), 3);
+ assert_eq!(block_on(stream.next()), Some(1));
+ assert_eq!(stream.len(), 2);
+ let mut iter = stream.iter_mut();
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+
+ assert_eq!(block_on(stream.next()), Some(2));
+ assert_eq!(stream.len(), 2);
+ assert_eq!(block_on(stream.next()), None);
+ let mut iter = stream.iter_mut();
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn iter() {
+ let stream = vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()]
+ .into_iter()
+ .collect::<SelectAll<_>>();
+
+ let mut iter = stream.iter();
+ assert_eq!(iter.len(), 3);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+
+ let mut stream = vec![stream::iter(vec![]), stream::iter(vec![1]), stream::iter(vec![2])]
+ .into_iter()
+ .collect::<SelectAll<_>>();
+
+ assert_eq!(stream.len(), 3);
+ assert_eq!(block_on(stream.next()), Some(1));
+ assert_eq!(stream.len(), 2);
+ let mut iter = stream.iter();
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+
+ assert_eq!(block_on(stream.next()), Some(2));
+ assert_eq!(stream.len(), 2);
+ assert_eq!(block_on(stream.next()), None);
+ let mut iter = stream.iter();
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn into_iter() {
+ let stream = vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()]
+ .into_iter()
+ .collect::<SelectAll<_>>();
+
+ let mut iter = stream.into_iter();
+ assert_eq!(iter.len(), 3);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 2);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 1);
+ assert!(iter.next().is_some());
+ assert_eq!(iter.len(), 0);
+ assert!(iter.next().is_none());
+}
diff --git a/vendor/futures/tests/stream_select_next_some.rs b/vendor/futures/tests/stream_select_next_some.rs
new file mode 100644
index 000000000..8252ad7b5
--- /dev/null
+++ b/vendor/futures/tests/stream_select_next_some.rs
@@ -0,0 +1,86 @@
+use futures::executor::block_on;
+use futures::future::{self, FusedFuture, FutureExt};
+use futures::select;
+use futures::stream::{FuturesUnordered, StreamExt};
+use futures::task::{Context, Poll};
+use futures_test::future::FutureTestExt;
+use futures_test::task::new_count_waker;
+
+#[test]
+fn is_terminated() {
+ let (waker, counter) = new_count_waker();
+ let mut cx = Context::from_waker(&waker);
+
+ let mut tasks = FuturesUnordered::new();
+
+ let mut select_next_some = tasks.select_next_some();
+ assert_eq!(select_next_some.is_terminated(), false);
+ assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Pending);
+ assert_eq!(counter, 1);
+ assert_eq!(select_next_some.is_terminated(), true);
+ drop(select_next_some);
+
+ tasks.push(future::ready(1));
+
+ let mut select_next_some = tasks.select_next_some();
+ assert_eq!(select_next_some.is_terminated(), false);
+ assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Ready(1));
+ assert_eq!(select_next_some.is_terminated(), false);
+ assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Pending);
+ assert_eq!(select_next_some.is_terminated(), true);
+}
+
+#[test]
+fn select() {
+ // Checks that even though `async_tasks` will yield a `None` and return
+ // `is_terminated() == true` during the first poll, it manages to toggle
+ // back to having items after a future is pushed into it during the second
+ // poll (after pending_once completes).
+ block_on(async {
+ let mut fut = future::ready(1).pending_once();
+ let mut async_tasks = FuturesUnordered::new();
+ let mut total = 0;
+ loop {
+ select! {
+ num = fut => {
+ total += num;
+ async_tasks.push(async { 5 });
+ },
+ num = async_tasks.select_next_some() => {
+ total += num;
+ }
+ complete => break,
+ }
+ }
+ assert_eq!(total, 6);
+ });
+}
+
+// Check that `select!` macro does not fail when importing from `futures_util`.
+#[test]
+fn futures_util_select() {
+ use futures_util::select;
+
+ // Checks that even though `async_tasks` will yield a `None` and return
+ // `is_terminated() == true` during the first poll, it manages to toggle
+ // back to having items after a future is pushed into it during the second
+ // poll (after pending_once completes).
+ block_on(async {
+ let mut fut = future::ready(1).pending_once();
+ let mut async_tasks = FuturesUnordered::new();
+ let mut total = 0;
+ loop {
+ select! {
+ num = fut => {
+ total += num;
+ async_tasks.push(async { 5 });
+ },
+ num = async_tasks.select_next_some() => {
+ total += num;
+ }
+ complete => break,
+ }
+ }
+ assert_eq!(total, 6);
+ });
+}
diff --git a/vendor/futures/tests/stream_split.rs b/vendor/futures/tests/stream_split.rs
new file mode 100644
index 000000000..694c15180
--- /dev/null
+++ b/vendor/futures/tests/stream_split.rs
@@ -0,0 +1,57 @@
+use futures::executor::block_on;
+use futures::sink::{Sink, SinkExt};
+use futures::stream::{self, Stream, StreamExt};
+use futures::task::{Context, Poll};
+use pin_project::pin_project;
+use std::pin::Pin;
+
+#[test]
+fn test_split() {
+ #[pin_project]
+ struct Join<T, U> {
+ #[pin]
+ stream: T,
+ #[pin]
+ sink: U,
+ }
+
+ impl<T: Stream, U> Stream for Join<T, U> {
+ type Item = T::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
+ self.project().stream.poll_next(cx)
+ }
+ }
+
+ impl<T, U: Sink<Item>, Item> Sink<Item> for Join<T, U> {
+ type Error = U::Error;
+
+ fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.project().sink.poll_ready(cx)
+ }
+
+ fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
+ self.project().sink.start_send(item)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.project().sink.poll_flush(cx)
+ }
+
+ fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ self.project().sink.poll_close(cx)
+ }
+ }
+
+ let mut dest: Vec<i32> = Vec::new();
+ {
+ let join = Join { stream: stream::iter(vec![10, 20, 30]), sink: &mut dest };
+
+ let (sink, stream) = join.split();
+ let join = sink.reunite(stream).expect("test_split: reunite error");
+ let (mut sink, stream) = join.split();
+ let mut stream = stream.map(Ok);
+ block_on(sink.send_all(&mut stream)).unwrap();
+ }
+ assert_eq!(dest, vec![10, 20, 30]);
+}
diff --git a/vendor/futures/tests/stream_try_stream.rs b/vendor/futures/tests/stream_try_stream.rs
new file mode 100644
index 000000000..194e74db7
--- /dev/null
+++ b/vendor/futures/tests/stream_try_stream.rs
@@ -0,0 +1,38 @@
+use futures::{
+ stream::{self, StreamExt, TryStreamExt},
+ task::Poll,
+};
+use futures_test::task::noop_context;
+
+#[test]
+fn try_filter_map_after_err() {
+ let cx = &mut noop_context();
+ let mut s = stream::iter(1..=3)
+ .map(Ok)
+ .try_filter_map(|v| async move { Err::<Option<()>, _>(v) })
+ .filter_map(|r| async move { r.ok() })
+ .boxed();
+ assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx));
+}
+
+#[test]
+fn try_skip_while_after_err() {
+ let cx = &mut noop_context();
+ let mut s = stream::iter(1..=3)
+ .map(Ok)
+ .try_skip_while(|_| async move { Err::<_, ()>(()) })
+ .filter_map(|r| async move { r.ok() })
+ .boxed();
+ assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx));
+}
+
+#[test]
+fn try_take_while_after_err() {
+ let cx = &mut noop_context();
+ let mut s = stream::iter(1..=3)
+ .map(Ok)
+ .try_take_while(|_| async move { Err::<_, ()>(()) })
+ .filter_map(|r| async move { r.ok() })
+ .boxed();
+ assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx));
+}
diff --git a/vendor/futures/tests/stream_unfold.rs b/vendor/futures/tests/stream_unfold.rs
new file mode 100644
index 000000000..16b10813b
--- /dev/null
+++ b/vendor/futures/tests/stream_unfold.rs
@@ -0,0 +1,32 @@
+use futures::future;
+use futures::stream;
+use futures_test::future::FutureTestExt;
+use futures_test::{assert_stream_done, assert_stream_next, assert_stream_pending};
+
+#[test]
+fn unfold1() {
+ let mut stream = stream::unfold(0, |state| {
+ if state <= 2 {
+ future::ready(Some((state * 2, state + 1))).pending_once()
+ } else {
+ future::ready(None).pending_once()
+ }
+ });
+
+ // Creates the future with the closure
+ // Not ready (delayed future)
+ assert_stream_pending!(stream);
+ // Future is ready, yields the item
+ assert_stream_next!(stream, 0);
+
+ // Repeat
+ assert_stream_pending!(stream);
+ assert_stream_next!(stream, 2);
+
+ assert_stream_pending!(stream);
+ assert_stream_next!(stream, 4);
+
+ // No more items
+ assert_stream_pending!(stream);
+ assert_stream_done!(stream);
+}
diff --git a/vendor/futures/tests/task_arc_wake.rs b/vendor/futures/tests/task_arc_wake.rs
new file mode 100644
index 000000000..aedc15bcb
--- /dev/null
+++ b/vendor/futures/tests/task_arc_wake.rs
@@ -0,0 +1,79 @@
+use futures::task::{self, ArcWake, Waker};
+use std::panic;
+use std::sync::{Arc, Mutex};
+
+struct CountingWaker {
+ nr_wake: Mutex<i32>,
+}
+
+impl CountingWaker {
+ fn new() -> Self {
+ Self { nr_wake: Mutex::new(0) }
+ }
+
+ fn wakes(&self) -> i32 {
+ *self.nr_wake.lock().unwrap()
+ }
+}
+
+impl ArcWake for CountingWaker {
+ fn wake_by_ref(arc_self: &Arc<Self>) {
+ let mut lock = arc_self.nr_wake.lock().unwrap();
+ *lock += 1;
+ }
+}
+
+#[test]
+fn create_from_arc() {
+ let some_w = Arc::new(CountingWaker::new());
+
+ let w1: Waker = task::waker(some_w.clone());
+ assert_eq!(2, Arc::strong_count(&some_w));
+ w1.wake_by_ref();
+ assert_eq!(1, some_w.wakes());
+
+ let w2 = w1.clone();
+ assert_eq!(3, Arc::strong_count(&some_w));
+
+ w2.wake_by_ref();
+ assert_eq!(2, some_w.wakes());
+
+ drop(w2);
+ assert_eq!(2, Arc::strong_count(&some_w));
+ drop(w1);
+ assert_eq!(1, Arc::strong_count(&some_w));
+}
+
+#[test]
+fn ref_wake_same() {
+ let some_w = Arc::new(CountingWaker::new());
+
+ let w1: Waker = task::waker(some_w.clone());
+ let w2 = task::waker_ref(&some_w);
+ let w3 = w2.clone();
+
+ assert!(w1.will_wake(&w2));
+ assert!(w2.will_wake(&w3));
+}
+
+#[test]
+fn proper_refcount_on_wake_panic() {
+ struct PanicWaker;
+
+ impl ArcWake for PanicWaker {
+ fn wake_by_ref(_arc_self: &Arc<Self>) {
+ panic!("WAKE UP");
+ }
+ }
+
+ let some_w = Arc::new(PanicWaker);
+
+ let w1: Waker = task::waker(some_w.clone());
+ assert_eq!(
+ "WAKE UP",
+ *panic::catch_unwind(|| w1.wake_by_ref()).unwrap_err().downcast::<&str>().unwrap()
+ );
+ assert_eq!(2, Arc::strong_count(&some_w)); // some_w + w1
+ drop(w1);
+ assert_eq!(1, Arc::strong_count(&some_w)); // some_w
+}
diff --git a/vendor/futures/tests/task_atomic_waker.rs b/vendor/futures/tests/task_atomic_waker.rs
new file mode 100644
index 000000000..cec3db287
--- /dev/null
+++ b/vendor/futures/tests/task_atomic_waker.rs
@@ -0,0 +1,48 @@
+use futures::executor::block_on;
+use futures::future::poll_fn;
+use futures::task::{AtomicWaker, Poll};
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+use std::thread;
+
+#[test]
+fn basic() {
+ let atomic_waker = Arc::new(AtomicWaker::new());
+ let atomic_waker_copy = atomic_waker.clone();
+
+ let returned_pending = Arc::new(AtomicUsize::new(0));
+ let returned_pending_copy = returned_pending.clone();
+
+ let woken = Arc::new(AtomicUsize::new(0));
+ let woken_copy = woken.clone();
+
+ let t = thread::spawn(move || {
+ let mut pending_count = 0;
+
+ block_on(poll_fn(move |cx| {
+ if woken_copy.load(Ordering::Relaxed) == 1 {
+ Poll::Ready(())
+ } else {
+ // Assert we return pending exactly once
+ assert_eq!(0, pending_count);
+ pending_count += 1;
+ atomic_waker_copy.register(cx.waker());
+
+ returned_pending_copy.store(1, Ordering::Relaxed);
+
+ Poll::Pending
+ }
+ }))
+ });
+
+ while returned_pending.load(Ordering::Relaxed) == 0 {}
+
+ // give spawned thread some time to sleep in `block_on`
+ thread::yield_now();
+
+ woken.store(1, Ordering::Relaxed);
+ atomic_waker.wake();
+
+ t.join().unwrap();
+}
diff --git a/vendor/futures/tests/test_macro.rs b/vendor/futures/tests/test_macro.rs
new file mode 100644
index 000000000..6adf51d8b
--- /dev/null
+++ b/vendor/futures/tests/test_macro.rs
@@ -0,0 +1,20 @@
+#[futures_test::test]
+async fn it_works() {
+ let fut = async { true };
+ assert!(fut.await);
+
+ let fut = async { false };
+ assert!(!fut.await);
+}
+
+#[should_panic]
+#[futures_test::test]
+async fn it_is_being_run() {
+ let fut = async { false };
+ assert!(fut.await);
+}
+
+#[futures_test::test]
+async fn return_ty() -> Result<(), ()> {
+ Ok(())
+}
diff --git a/vendor/futures/tests/try_join.rs b/vendor/futures/tests/try_join.rs
new file mode 100644
index 000000000..0281ab897
--- /dev/null
+++ b/vendor/futures/tests/try_join.rs
@@ -0,0 +1,35 @@
+#![deny(unreachable_code)]
+
+use futures::{executor::block_on, try_join};
+
+// TODO: This abuses https://github.com/rust-lang/rust/issues/58733 in order to
+// test behavior of the `try_join!` macro with the never type before it is
+// stabilized. Once `!` is again stabilized this can be removed and replaced
+// with direct use of `!` below where `Never` is used.
+trait MyTrait {
+ type Output;
+}
+impl<T> MyTrait for fn() -> T {
+ type Output = T;
+}
+type Never = <fn() -> ! as MyTrait>::Output;
+
+#[test]
+fn try_join_never_error() {
+ block_on(async {
+ let future1 = async { Ok::<(), Never>(()) };
+ let future2 = async { Ok::<(), Never>(()) };
+ try_join!(future1, future2)
+ })
+ .unwrap();
+}
+
+#[test]
+fn try_join_never_ok() {
+ block_on(async {
+ let future1 = async { Err::<Never, ()>(()) };
+ let future2 = async { Err::<Never, ()>(()) };
+ try_join!(future1, future2)
+ })
+ .unwrap_err();
+}
diff --git a/vendor/futures/tests_disabled/all.rs b/vendor/futures/tests_disabled/all.rs
new file mode 100644
index 000000000..a7a571040
--- /dev/null
+++ b/vendor/futures/tests_disabled/all.rs
@@ -0,0 +1,400 @@
+use futures::channel::oneshot::{self, Canceled};
+use futures::executor::block_on;
+use futures::future;
+use std::sync::mpsc::{channel, TryRecvError};
+
+// mod support;
+// use support::*;
+
+fn unselect<T, E, A, B>(r: Result<Either<(T, B), (T, A)>, Either<(E, B), (E, A)>>) -> Result<T, E> {
+ match r {
+ Ok(Either::Left((t, _))) | Ok(Either::Right((t, _))) => Ok(t),
+ Err(Either::Left((e, _))) | Err(Either::Right((e, _))) => Err(e),
+ }
+}
+
+#[test]
+fn result_smoke() {
+ fn is_future_v<A, B, C>(_: C)
+ where
+ A: Send + 'static,
+ B: Send + 'static,
+ C: Future<Item = A, Error = B>,
+ {
+ }
+
+ is_future_v::<i32, u32, _>(f_ok(1).map(|a| a + 1));
+ is_future_v::<i32, u32, _>(f_ok(1).map_err(|a| a + 1));
+ is_future_v::<i32, u32, _>(f_ok(1).and_then(Ok));
+ is_future_v::<i32, u32, _>(f_ok(1).or_else(Err));
+ is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3)));
+ is_future_v::<i32, u32, _>(f_ok(1).map(f_ok).flatten());
+
+ assert_done(|| f_ok(1), r_ok(1));
+ assert_done(|| f_err(1), r_err(1));
+ assert_done(|| result(Ok(1)), r_ok(1));
+ assert_done(|| result(Err(1)), r_err(1));
+ assert_done(|| ok(1), r_ok(1));
+ assert_done(|| err(1), r_err(1));
+ assert_done(|| f_ok(1).map(|a| a + 2), r_ok(3));
+ assert_done(|| f_err(1).map(|a| a + 2), r_err(1));
+ assert_done(|| f_ok(1).map_err(|a| a + 2), r_ok(1));
+ assert_done(|| f_err(1).map_err(|a| a + 2), r_err(3));
+ assert_done(|| f_ok(1).and_then(|a| Ok(a + 2)), r_ok(3));
+ assert_done(|| f_err(1).and_then(|a| Ok(a + 2)), r_err(1));
+ assert_done(|| f_ok(1).and_then(|a| Err(a as u32 + 3)), r_err(4));
+ assert_done(|| f_err(1).and_then(|a| Err(a as u32 + 4)), r_err(1));
+ assert_done(|| f_ok(1).or_else(|a| Ok(a as i32 + 2)), r_ok(1));
+ assert_done(|| f_err(1).or_else(|a| Ok(a as i32 + 2)), r_ok(3));
+ assert_done(|| f_ok(1).or_else(|a| Err(a + 3)), r_ok(1));
+ assert_done(|| f_err(1).or_else(|a| Err(a + 4)), r_err(5));
+ assert_done(|| f_ok(1).select(f_err(2)).then(unselect), r_ok(1));
+ assert_done(|| f_ok(1).select(Ok(2)).then(unselect), r_ok(1));
+ assert_done(|| f_err(1).select(f_ok(1)).then(unselect), r_err(1));
+ assert_done(|| f_ok(1).select(empty()).then(unselect), Ok(1));
+ assert_done(|| empty().select(f_ok(1)).then(unselect), Ok(1));
+ assert_done(|| f_ok(1).join(f_err(1)), Err(1));
+ assert_done(|| f_ok(1).join(Ok(2)), Ok((1, 2)));
+ assert_done(|| f_err(1).join(f_ok(1)), Err(1));
+ assert_done(|| f_ok(1).then(|_| Ok(2)), r_ok(2));
+ assert_done(|| f_ok(1).then(|_| Err(2)), r_err(2));
+ assert_done(|| f_err(1).then(|_| Ok(2)), r_ok(2));
+ assert_done(|| f_err(1).then(|_| Err(2)), r_err(2));
+}
+
+#[test]
+fn test_empty() {
+ fn empty() -> Empty<i32, u32> {
+ future::empty()
+ }
+
+ assert_empty(|| empty());
+ assert_empty(|| empty().select(empty()));
+ assert_empty(|| empty().join(empty()));
+ assert_empty(|| empty().join(f_ok(1)));
+ assert_empty(|| f_ok(1).join(empty()));
+ assert_empty(|| empty().or_else(move |_| empty()));
+ assert_empty(|| empty().and_then(move |_| empty()));
+ assert_empty(|| f_err(1).or_else(move |_| empty()));
+ assert_empty(|| f_ok(1).and_then(move |_| empty()));
+ assert_empty(|| empty().map(|a| a + 1));
+ assert_empty(|| empty().map_err(|a| a + 1));
+ assert_empty(|| empty().then(|a| a));
+}
+
+#[test]
+fn test_ok() {
+ assert_done(|| ok(1), r_ok(1));
+ assert_done(|| err(1), r_err(1));
+}
+
+#[test]
+fn flatten() {
+ fn ok<T: Send + 'static>(a: T) -> FutureResult<T, u32> {
+ future::ok(a)
+ }
+ fn err<E: Send + 'static>(b: E) -> FutureResult<i32, E> {
+ future::err(b)
+ }
+
+ assert_done(|| ok(ok(1)).flatten(), r_ok(1));
+ assert_done(|| ok(err(1)).flatten(), r_err(1));
+ assert_done(|| err(1u32).map(ok).flatten(), r_err(1));
+ assert_done(|| future::ok(future::ok(1)).flatten(), r_ok(1));
+ assert_empty(|| ok(empty::<i32, u32>()).flatten());
+ assert_empty(|| empty::<i32, u32>().map(ok).flatten());
+}
+
+#[test]
+fn smoke_oneshot() {
+ assert_done(
+ || {
+ let (c, p) = oneshot::channel();
+ c.send(1).unwrap();
+ p
+ },
+ Ok(1),
+ );
+ assert_done(
+ || {
+ let (c, p) = oneshot::channel::<i32>();
+ drop(c);
+ p
+ },
+ Err(Canceled),
+ );
+ let mut completes = Vec::new();
+ assert_empty(|| {
+ let (a, b) = oneshot::channel::<i32>();
+ completes.push(a);
+ b
+ });
+
+ let (c, mut p) = oneshot::channel::<i32>();
+ drop(c);
+ let res = panic_waker_lw(|lw| p.poll(lw));
+ assert!(res.is_err());
+ let (c, p) = oneshot::channel::<i32>();
+ drop(c);
+ let (tx, rx) = channel();
+ p.then(move |_| tx.send(())).forget();
+ rx.recv().unwrap();
+}
+
+#[test]
+fn select_cancels() {
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |b| {
+ btx.send(b).unwrap();
+ b
+ });
+ let d = d.map(move |d| {
+ dtx.send(d).unwrap();
+ d
+ });
+
+ let mut f = b.select(d).then(unselect);
+ // assert!(f.poll(&mut Task::new()).is_pending());
+ assert!(brx.try_recv().is_err());
+ assert!(drx.try_recv().is_err());
+ a.send(1).unwrap();
+ noop_waker_lw(|lw| {
+ let res = f.poll(lw);
+ assert!(res.ok().unwrap().is_ready());
+ assert_eq!(brx.recv().unwrap(), 1);
+ drop(c);
+ assert!(drx.recv().is_err());
+
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |b| {
+ btx.send(b).unwrap();
+ b
+ });
+ let d = d.map(move |d| {
+ dtx.send(d).unwrap();
+ d
+ });
+
+ let mut f = b.select(d).then(unselect);
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ a.send(1).unwrap();
+ assert!(f.poll(lw).ok().unwrap().is_ready());
+ drop((c, f));
+ assert!(drx.recv().is_err());
+ })
+}
+
+#[test]
+fn join_cancels() {
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |b| {
+ btx.send(b).unwrap();
+ b
+ });
+ let d = d.map(move |d| {
+ dtx.send(d).unwrap();
+ d
+ });
+
+ let mut f = b.join(d);
+ drop(a);
+ let res = panic_waker_lw(|lw| f.poll(lw));
+ assert!(res.is_err());
+ drop(c);
+ assert!(drx.recv().is_err());
+
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |b| {
+ btx.send(b).unwrap();
+ b
+ });
+ let d = d.map(move |d| {
+ dtx.send(d).unwrap();
+ d
+ });
+
+ let (tx, rx) = channel();
+ let f = b.join(d);
+ f.then(move |_| {
+ tx.send(()).unwrap();
+ let res: Result<(), ()> = Ok(());
+ res
+ })
+ .forget();
+ assert!(rx.try_recv().is_err());
+ drop(a);
+ rx.recv().unwrap();
+ drop(c);
+ assert!(drx.recv().is_err());
+}
+
+#[test]
+fn join_incomplete() {
+ let (a, b) = oneshot::channel::<i32>();
+ let (tx, rx) = channel();
+ noop_waker_lw(|lw| {
+ let mut f = ok(1).join(b).map(move |r| tx.send(r).unwrap());
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ assert!(rx.try_recv().is_err());
+ a.send(2).unwrap();
+ assert!(f.poll(lw).ok().unwrap().is_ready());
+ assert_eq!(rx.recv().unwrap(), (1, 2));
+
+ let (a, b) = oneshot::channel::<i32>();
+ let (tx, rx) = channel();
+ let mut f = b.join(Ok(2)).map(move |r| tx.send(r).unwrap());
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ assert!(rx.try_recv().is_err());
+ a.send(1).unwrap();
+ assert!(f.poll(lw).ok().unwrap().is_ready());
+ assert_eq!(rx.recv().unwrap(), (1, 2));
+
+ let (a, b) = oneshot::channel::<i32>();
+ let (tx, rx) = channel();
+ let mut f = ok(1).join(b).map_err(move |_r| tx.send(2).unwrap());
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ assert!(rx.try_recv().is_err());
+ drop(a);
+ assert!(f.poll(lw).is_err());
+ assert_eq!(rx.recv().unwrap(), 2);
+
+ let (a, b) = oneshot::channel::<i32>();
+ let (tx, rx) = channel();
+ let mut f = b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap());
+ assert!(f.poll(lw).ok().unwrap().is_pending());
+ assert!(rx.try_recv().is_err());
+ drop(a);
+ assert!(f.poll(lw).is_err());
+ assert_eq!(rx.recv().unwrap(), 1);
+ })
+}
+
+#[test]
+fn select2() {
+ assert_done(|| f_ok(2).select(empty()).then(unselect), Ok(2));
+ assert_done(|| empty().select(f_ok(2)).then(unselect), Ok(2));
+ assert_done(|| f_err(2).select(empty()).then(unselect), Err(2));
+ assert_done(|| empty().select(f_err(2)).then(unselect), Err(2));
+
+ assert_done(
+ || {
+ f_ok(1).select(f_ok(2)).map_err(|_| 0).and_then(|either_tup| {
+ let (a, b) = either_tup.into_inner();
+ b.map(move |b| a + b)
+ })
+ },
+ Ok(3),
+ );
+
+ // Finish one half of a select and then fail the second, ensuring that we
+ // get the notification of the second one.
+ {
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let f = b.select(d);
+ let (tx, rx) = channel();
+ f.map(move |r| tx.send(r).unwrap()).forget();
+ a.send(1).unwrap();
+ let (val, next) = rx.recv().unwrap().into_inner();
+ assert_eq!(val, 1);
+ let (tx, rx) = channel();
+ next.map_err(move |_r| tx.send(2).unwrap()).forget();
+ assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty);
+ drop(c);
+ assert_eq!(rx.recv().unwrap(), 2);
+ }
+
+ // Fail the second half and ensure that we see the first one finish
+ {
+ let ((a, b), (c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let f = b.select(d);
+ let (tx, rx) = channel();
+ f.map_err(move |r| tx.send((1, r.into_inner().1)).unwrap()).forget();
+ drop(c);
+ let (val, next) = rx.recv().unwrap();
+ assert_eq!(val, 1);
+ let (tx, rx) = channel();
+ next.map(move |r| tx.send(r).unwrap()).forget();
+ assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty);
+ a.send(2).unwrap();
+ assert_eq!(rx.recv().unwrap(), 2);
+ }
+
+ // Cancelling the first half should cancel the second
+ {
+ let ((_a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |v| {
+ btx.send(v).unwrap();
+ v
+ });
+ let d = d.map(move |v| {
+ dtx.send(v).unwrap();
+ v
+ });
+ let f = b.select(d);
+ drop(f);
+ assert!(drx.recv().is_err());
+ assert!(brx.recv().is_err());
+ }
+
+ // Cancel after a schedule
+ {
+ let ((_a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |v| {
+ btx.send(v).unwrap();
+ v
+ });
+ let d = d.map(move |v| {
+ dtx.send(v).unwrap();
+ v
+ });
+ let mut f = b.select(d);
+ let _res = noop_waker_lw(|lw| f.poll(lw));
+ drop(f);
+ assert!(drx.recv().is_err());
+ assert!(brx.recv().is_err());
+ }
+
+ // Cancel propagates
+ {
+ let ((a, b), (_c, d)) = (oneshot::channel::<i32>(), oneshot::channel::<i32>());
+ let ((btx, brx), (dtx, drx)) = (channel(), channel());
+ let b = b.map(move |v| {
+ btx.send(v).unwrap();
+ v
+ });
+ let d = d.map(move |v| {
+ dtx.send(v).unwrap();
+ v
+ });
+ let (tx, rx) = channel();
+ b.select(d).map(move |_| tx.send(()).unwrap()).forget();
+ drop(a);
+ assert!(drx.recv().is_err());
+ assert!(brx.recv().is_err());
+ assert!(rx.recv().is_err());
+ }
+
+ // Cancel on early drop
+ {
+ let (tx, rx) = channel();
+ let f = f_ok(1).select(empty::<_, ()>().map(move |()| {
+ tx.send(()).unwrap();
+ 1
+ }));
+ drop(f);
+ assert!(rx.recv().is_err());
+ }
+}
+
+#[test]
+fn option() {
+ assert_eq!(Ok(Some(())), block_on(Some(ok::<(), ()>(())).into_future()));
+ assert_eq!(Ok::<_, ()>(None::<()>), block_on(None::<FutureResult<(), ()>>.into_future()));
+}
diff --git a/vendor/futures/tests_disabled/bilock.rs b/vendor/futures/tests_disabled/bilock.rs
new file mode 100644
index 000000000..0166ca48b
--- /dev/null
+++ b/vendor/futures/tests_disabled/bilock.rs
@@ -0,0 +1,102 @@
+use futures::future;
+use futures::stream;
+use futures::task;
+use futures_util::lock::BiLock;
+use std::thread;
+
+// mod support;
+// use support::*;
+
+#[test]
+fn smoke() {
+ let future = future::lazy(|_| {
+ let (a, b) = BiLock::new(1);
+
+ {
+ let mut lock = match a.poll_lock() {
+ Poll::Ready(l) => l,
+ Poll::Pending => panic!("poll not ready"),
+ };
+ assert_eq!(*lock, 1);
+ *lock = 2;
+
+ assert!(b.poll_lock().is_pending());
+ assert!(a.poll_lock().is_pending());
+ }
+
+ assert!(b.poll_lock().is_ready());
+ assert!(a.poll_lock().is_ready());
+
+ {
+ let lock = match b.poll_lock() {
+ Poll::Ready(l) => l,
+ Poll::Pending => panic!("poll not ready"),
+ };
+ assert_eq!(*lock, 2);
+ }
+
+ assert_eq!(a.reunite(b).expect("bilock/smoke: reunite error"), 2);
+
+ Ok::<(), ()>(())
+ });
+
+ assert!(task::spawn(future)
+ .poll_future_notify(&notify_noop(), 0)
+ .expect("failure in poll")
+ .is_ready());
+}
+
+#[test]
+fn concurrent() {
+ const N: usize = 10000;
+ let (a, b) = BiLock::new(0);
+
+ let a = Increment { a: Some(a), remaining: N };
+ let b = stream::iter_ok(0..N).fold(b, |b, _n| {
+ b.lock().map(|mut b| {
+ *b += 1;
+ b.unlock()
+ })
+ });
+
+ let t1 = thread::spawn(move || a.wait());
+ let b = b.wait().expect("b error");
+ let a = t1.join().unwrap().expect("a error");
+
+ match a.poll_lock() {
+ Poll::Ready(l) => assert_eq!(*l, 2 * N),
+ Poll::Pending => panic!("poll not ready"),
+ }
+ match b.poll_lock() {
+ Poll::Ready(l) => assert_eq!(*l, 2 * N),
+ Poll::Pending => panic!("poll not ready"),
+ }
+
+ assert_eq!(a.reunite(b).expect("bilock/concurrent: reunite error"), 2 * N);
+
+ struct Increment {
+ remaining: usize,
+ a: Option<BiLock<usize>>,
+ }
+
+ impl Future for Increment {
+ type Item = BiLock<usize>;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<BiLock<usize>, ()> {
+ loop {
+ if self.remaining == 0 {
+ return Ok(self.a.take().unwrap().into());
+ }
+
+ let a = self.a.as_ref().unwrap();
+ let mut a = match a.poll_lock() {
+ Poll::Ready(l) => l,
+ Poll::Pending => return Ok(Poll::Pending),
+ };
+ self.remaining -= 1;
+ *a += 1;
+ }
+ }
+ }
+}
diff --git a/vendor/futures/tests_disabled/stream.rs b/vendor/futures/tests_disabled/stream.rs
new file mode 100644
index 000000000..854dbad82
--- /dev/null
+++ b/vendor/futures/tests_disabled/stream.rs
@@ -0,0 +1,369 @@
+use futures::channel::mpsc;
+use futures::channel::oneshot;
+use futures::executor::{block_on, block_on_stream};
+use futures::future::{err, ok};
+use futures::stream::{empty, iter_ok, poll_fn, Peekable};
+
+// mod support;
+// use support::*;
+
+pub struct Iter<I> {
+ iter: I,
+}
+
+pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
+where
+ J: IntoIterator<Item = Result<T, E>>,
+{
+ Iter { iter: i.into_iter() }
+}
+
+impl<I, T, E> Stream for Iter<I>
+where
+ I: Iterator<Item = Result<T, E>>,
+{
+ type Item = T;
+ type Error = E;
+
+ fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<T>, E> {
+ match self.iter.next() {
+ Some(Ok(e)) => Ok(Poll::Ready(Some(e))),
+ Some(Err(e)) => Err(e),
+ None => Ok(Poll::Ready(None)),
+ }
+ }
+}
+
+fn list() -> Box<Stream<Item = i32, Error = u32> + Send> {
+ let (tx, rx) = mpsc::channel(1);
+ tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Ok(3))).forget();
+ Box::new(rx.then(|r| r.unwrap()))
+}
+
+fn err_list() -> Box<Stream<Item = i32, Error = u32> + Send> {
+ let (tx, rx) = mpsc::channel(1);
+ tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Err(3))).forget();
+ Box::new(rx.then(|r| r.unwrap()))
+}
+
+#[test]
+fn map() {
+ assert_done(|| list().map(|a| a + 1).collect(), Ok(vec![2, 3, 4]));
+}
+
+#[test]
+fn map_err() {
+ assert_done(|| err_list().map_err(|a| a + 1).collect::<Vec<_>>(), Err(4));
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+struct FromErrTest(u32);
+
+impl From<u32> for FromErrTest {
+ fn from(i: u32) -> Self {
+ Self(i)
+ }
+}
+
+#[test]
+fn from_err() {
+ assert_done(|| err_list().err_into().collect::<Vec<_>>(), Err(FromErrTest(3)));
+}
+
+#[test]
+fn fold() {
+ assert_done(|| list().fold(0, |a, b| ok::<i32, u32>(a + b)), Ok(6));
+ assert_done(|| err_list().fold(0, |a, b| ok::<i32, u32>(a + b)), Err(3));
+}
+
+#[test]
+fn filter() {
+ assert_done(|| list().filter(|a| ok(*a % 2 == 0)).collect(), Ok(vec![2]));
+}
+
+#[test]
+fn filter_map() {
+ assert_done(
+ || list().filter_map(|x| ok(if x % 2 == 0 { Some(x + 10) } else { None })).collect(),
+ Ok(vec![12]),
+ );
+}
+
+#[test]
+fn and_then() {
+ assert_done(|| list().and_then(|a| Ok(a + 1)).collect(), Ok(vec![2, 3, 4]));
+ assert_done(|| list().and_then(|a| err::<i32, u32>(a as u32)).collect::<Vec<_>>(), Err(1));
+}
+
+#[test]
+fn then() {
+ assert_done(|| list().then(|a| a.map(|e| e + 1)).collect(), Ok(vec![2, 3, 4]));
+}
+
+#[test]
+fn or_else() {
+ assert_done(|| err_list().or_else(|a| ok::<i32, u32>(a as i32)).collect(), Ok(vec![1, 2, 3]));
+}
+
+#[test]
+fn flatten() {
+ assert_done(|| list().map(|_| list()).flatten().collect(), Ok(vec![1, 2, 3, 1, 2, 3, 1, 2, 3]));
+}
+
+#[test]
+fn skip() {
+ assert_done(|| list().skip(2).collect(), Ok(vec![3]));
+}
+
+#[test]
+fn skip_passes_errors_through() {
+ let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Ok(5)]).skip(1));
+ assert_eq!(s.next(), Some(Err(1)));
+ assert_eq!(s.next(), Some(Err(2)));
+ assert_eq!(s.next(), Some(Ok(4)));
+ assert_eq!(s.next(), Some(Ok(5)));
+ assert_eq!(s.next(), None);
+}
+
+#[test]
+fn skip_while() {
+ assert_done(|| list().skip_while(|e| Ok(*e % 2 == 1)).collect(), Ok(vec![2, 3]));
+}
+#[test]
+fn take() {
+ assert_done(|| list().take(2).collect(), Ok(vec![1, 2]));
+}
+
+#[test]
+fn take_while() {
+ assert_done(|| list().take_while(|e| Ok(*e < 3)).collect(), Ok(vec![1, 2]));
+}
+
+#[test]
+fn take_passes_errors_through() {
+ let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Err(4)]).take(1));
+ assert_eq!(s.next(), Some(Err(1)));
+ assert_eq!(s.next(), Some(Err(2)));
+ assert_eq!(s.next(), Some(Ok(3)));
+ assert_eq!(s.next(), None);
+
+ let mut s = block_on_stream(iter(vec![Ok(1), Err(2)]).take(1));
+ assert_eq!(s.next(), Some(Ok(1)));
+ assert_eq!(s.next(), None);
+}
+
+#[test]
+fn peekable() {
+ assert_done(|| list().peekable().collect(), Ok(vec![1, 2, 3]));
+}
+
+#[test]
+fn fuse() {
+ let mut stream = block_on_stream(list().fuse());
+ assert_eq!(stream.next(), Some(Ok(1)));
+ assert_eq!(stream.next(), Some(Ok(2)));
+ assert_eq!(stream.next(), Some(Ok(3)));
+ assert_eq!(stream.next(), None);
+ assert_eq!(stream.next(), None);
+ assert_eq!(stream.next(), None);
+}
+
+#[test]
+fn buffered() {
+ let (tx, rx) = mpsc::channel(1);
+ let (a, b) = oneshot::channel::<u32>();
+ let (c, d) = oneshot::channel::<u32>();
+
+ tx.send(Box::new(b.recover(|_| panic!())) as Box<Future<Item = _, Error = _> + Send>)
+ .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!()))))
+ .forget();
+
+ let mut rx = rx.buffered(2);
+ sassert_empty(&mut rx);
+ c.send(3).unwrap();
+ sassert_empty(&mut rx);
+ a.send(5).unwrap();
+ let mut rx = block_on_stream(rx);
+ assert_eq!(rx.next(), Some(Ok(5)));
+ assert_eq!(rx.next(), Some(Ok(3)));
+ assert_eq!(rx.next(), None);
+
+ let (tx, rx) = mpsc::channel(1);
+ let (a, b) = oneshot::channel::<u32>();
+ let (c, d) = oneshot::channel::<u32>();
+
+ tx.send(Box::new(b.recover(|_| panic!())) as Box<Future<Item = _, Error = _> + Send>)
+ .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!()))))
+ .forget();
+
+ let mut rx = rx.buffered(1);
+ sassert_empty(&mut rx);
+ c.send(3).unwrap();
+ sassert_empty(&mut rx);
+ a.send(5).unwrap();
+ let mut rx = block_on_stream(rx);
+ assert_eq!(rx.next(), Some(Ok(5)));
+ assert_eq!(rx.next(), Some(Ok(3)));
+ assert_eq!(rx.next(), None);
+}
+
+#[test]
+fn unordered() {
+ let (tx, rx) = mpsc::channel(1);
+ let (a, b) = oneshot::channel::<u32>();
+ let (c, d) = oneshot::channel::<u32>();
+
+ tx.send(Box::new(b.recover(|_| panic!())) as Box<Future<Item = _, Error = _> + Send>)
+ .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!()))))
+ .forget();
+
+ let mut rx = rx.buffer_unordered(2);
+ sassert_empty(&mut rx);
+ let mut rx = block_on_stream(rx);
+ c.send(3).unwrap();
+ assert_eq!(rx.next(), Some(Ok(3)));
+ a.send(5).unwrap();
+ assert_eq!(rx.next(), Some(Ok(5)));
+ assert_eq!(rx.next(), None);
+
+ let (tx, rx) = mpsc::channel(1);
+ let (a, b) = oneshot::channel::<u32>();
+ let (c, d) = oneshot::channel::<u32>();
+
+ tx.send(Box::new(b.recover(|_| panic!())) as Box<Future<Item = _, Error = _> + Send>)
+ .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!()))))
+ .forget();
+
+ // We don't even get to see `c` until `a` completes.
+ let mut rx = rx.buffer_unordered(1);
+ sassert_empty(&mut rx);
+ c.send(3).unwrap();
+ sassert_empty(&mut rx);
+ a.send(5).unwrap();
+ let mut rx = block_on_stream(rx);
+ assert_eq!(rx.next(), Some(Ok(5)));
+ assert_eq!(rx.next(), Some(Ok(3)));
+ assert_eq!(rx.next(), None);
+}
+
+#[test]
+fn zip() {
+ assert_done(|| list().zip(list()).collect(), Ok(vec![(1, 1), (2, 2), (3, 3)]));
+ assert_done(|| list().zip(list().take(2)).collect(), Ok(vec![(1, 1), (2, 2)]));
+ assert_done(|| list().take(2).zip(list()).collect(), Ok(vec![(1, 1), (2, 2)]));
+ assert_done(|| err_list().zip(list()).collect::<Vec<_>>(), Err(3));
+ assert_done(|| list().zip(list().map(|x| x + 1)).collect(), Ok(vec![(1, 2), (2, 3), (3, 4)]));
+}
+
+#[test]
+fn peek() {
+ struct Peek {
+ inner: Peekable<Box<Stream<Item = i32, Error = u32> + Send>>,
+ }
+
+ impl Future for Peek {
+ type Item = ();
+ type Error = u32;
+
+ fn poll(&mut self, cx: &mut Context<'_>) -> Poll<(), u32> {
+ {
+ let res = ready!(self.inner.peek(cx))?;
+ assert_eq!(res, Some(&1));
+ }
+ assert_eq!(self.inner.peek(cx).unwrap(), Some(&1).into());
+ assert_eq!(self.inner.poll_next(cx).unwrap(), Some(1).into());
+ Ok(Poll::Ready(()))
+ }
+ }
+
+ block_on(Peek { inner: list().peekable() }).unwrap()
+}
+
+#[test]
+fn wait() {
+ assert_eq!(block_on_stream(list()).collect::<Result<Vec<_>, _>>(), Ok(vec![1, 2, 3]));
+}
+
+#[test]
+fn chunks() {
+ assert_done(|| list().chunks(3).collect(), Ok(vec![vec![1, 2, 3]]));
+ assert_done(|| list().chunks(1).collect(), Ok(vec![vec![1], vec![2], vec![3]]));
+ assert_done(|| list().chunks(2).collect(), Ok(vec![vec![1, 2], vec![3]]));
+ let mut list = block_on_stream(err_list().chunks(3));
+ let i = list.next().unwrap().unwrap();
+ assert_eq!(i, vec![1, 2]);
+ let i = list.next().unwrap().unwrap_err();
+ assert_eq!(i, 3);
+}
+
+#[test]
+#[should_panic]
+fn chunks_panic_on_cap_zero() {
+ let _ = list().chunks(0);
+}
+
+#[test]
+fn forward() {
+ let v = Vec::new();
+ let v = block_on(iter_ok::<_, Never>(vec![0, 1]).forward(v)).unwrap().1;
+ assert_eq!(v, vec![0, 1]);
+
+ let v = block_on(iter_ok::<_, Never>(vec![2, 3]).forward(v)).unwrap().1;
+ assert_eq!(v, vec![0, 1, 2, 3]);
+
+ assert_done(
+ move || iter_ok::<_, Never>(vec![4, 5]).forward(v).map(|(_, s)| s),
+ Ok(vec![0, 1, 2, 3, 4, 5]),
+ );
+}
+
+#[test]
+#[allow(deprecated)]
+fn concat() {
+ let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
+ assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]));
+
+ let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]);
+ assert_done(move || b.concat(), Err(()));
+}
+
+#[test]
+fn concat2() {
+ let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
+ assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]));
+
+ let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]);
+ assert_done(move || b.concat(), Err(()));
+
+ let c = empty::<Vec<()>, ()>();
+ assert_done(move || c.concat(), Ok(vec![]))
+}
+
+#[test]
+fn stream_poll_fn() {
+ let mut counter = 5usize;
+
+ let read_stream = poll_fn(move |_| -> Poll<Option<usize>, std::io::Error> {
+ if counter == 0 {
+ return Ok(Poll::Ready(None));
+ }
+ counter -= 1;
+ Ok(Poll::Ready(Some(counter)))
+ });
+
+ assert_eq!(block_on_stream(read_stream).count(), 5);
+}
+
+#[test]
+fn inspect() {
+ let mut seen = vec![];
+ assert_done(|| list().inspect(|&a| seen.push(a)).collect(), Ok(vec![1, 2, 3]));
+ assert_eq!(seen, [1, 2, 3]);
+}
+
+#[test]
+fn inspect_err() {
+ let mut seen = vec![];
+ assert_done(|| err_list().inspect_err(|&a| seen.push(a)).collect::<Vec<_>>(), Err(3));
+ assert_eq!(seen, [3]);
+}