summaryrefslogtreecommitdiffstats
path: root/third_party/rust/futures-0.1.31/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/rust/futures-0.1.31/src
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/futures-0.1.31/src')
-rw-r--r--third_party/rust/futures-0.1.31/src/executor.rs17
-rw-r--r--third_party/rust/futures-0.1.31/src/future/and_then.rs38
-rw-r--r--third_party/rust/futures-0.1.31/src/future/catch_unwind.rs51
-rw-r--r--third_party/rust/futures-0.1.31/src/future/chain.rs48
-rw-r--r--third_party/rust/futures-0.1.31/src/future/either.rs54
-rw-r--r--third_party/rust/futures-0.1.31/src/future/empty.rs31
-rw-r--r--third_party/rust/futures-0.1.31/src/future/flatten.rs49
-rw-r--r--third_party/rust/futures-0.1.31/src/future/flatten_stream.rs99
-rw-r--r--third_party/rust/futures-0.1.31/src/future/from_err.rs35
-rw-r--r--third_party/rust/futures-0.1.31/src/future/fuse.rs49
-rw-r--r--third_party/rust/futures-0.1.31/src/future/inspect.rs40
-rw-r--r--third_party/rust/futures-0.1.31/src/future/into_stream.rs36
-rw-r--r--third_party/rust/futures-0.1.31/src/future/join.rs172
-rw-r--r--third_party/rust/futures-0.1.31/src/future/join_all.rs136
-rw-r--r--third_party/rust/futures-0.1.31/src/future/lazy.rs84
-rw-r--r--third_party/rust/futures-0.1.31/src/future/loop_fn.rs99
-rw-r--r--third_party/rust/futures-0.1.31/src/future/map.rs38
-rw-r--r--third_party/rust/futures-0.1.31/src/future/map_err.rs36
-rw-r--r--third_party/rust/futures-0.1.31/src/future/mod.rs1171
-rw-r--r--third_party/rust/futures-0.1.31/src/future/option.rs15
-rw-r--r--third_party/rust/futures-0.1.31/src/future/or_else.rs39
-rw-r--r--third_party/rust/futures-0.1.31/src/future/poll_fn.rs45
-rw-r--r--third_party/rust/futures-0.1.31/src/future/result.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/future/select.rs86
-rw-r--r--third_party/rust/futures-0.1.31/src/future/select2.rs39
-rw-r--r--third_party/rust/futures-0.1.31/src/future/select_all.rs71
-rw-r--r--third_party/rust/futures-0.1.31/src/future/select_ok.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/future/shared.rs300
-rw-r--r--third_party/rust/futures-0.1.31/src/future/then.rs36
-rw-r--r--third_party/rust/futures-0.1.31/src/lib.rs266
-rw-r--r--third_party/rust/futures-0.1.31/src/lock.rs107
-rw-r--r--third_party/rust/futures-0.1.31/src/poll.rs105
-rw-r--r--third_party/rust/futures-0.1.31/src/resultstream.rs46
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/buffer.rs108
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/fanout.rs135
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/flush.rs46
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/from_err.rs71
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/map_err.rs64
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/mod.rs489
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/send.rs59
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/send_all.rs88
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/wait.rs59
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/with.rs153
-rw-r--r--third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs126
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/and_then.rs106
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs130
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/buffered.rs132
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs71
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/chain.rs57
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/channel.rs114
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/chunks.rs136
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/collect.rs52
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/concat.rs172
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/empty.rs29
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/filter.rs89
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/filter_map.rs89
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/flatten.rs96
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/fold.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/for_each.rs51
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/forward.rs110
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/from_err.rs80
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/fuse.rs89
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/future.rs76
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs219
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs707
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/inspect.rs84
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/inspect_err.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/iter.rs46
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/iter_ok.rs48
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/iter_result.rs51
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/map.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/map_err.rs80
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/merge.rs82
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/mod.rs1146
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/once.rs35
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/or_else.rs80
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/peek.rs74
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/poll_fn.rs49
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/repeat.rs53
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/select.rs64
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/skip.rs84
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/skip_while.rs113
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/split.rs105
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/take.rs86
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/take_while.rs113
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/then.rs81
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/unfold.rs114
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/wait.rs53
-rw-r--r--third_party/rust/futures-0.1.31/src/stream/zip.rs59
-rw-r--r--third_party/rust/futures-0.1.31/src/sync/bilock.rs298
-rw-r--r--third_party/rust/futures-0.1.31/src/sync/mod.rs17
-rw-r--r--third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs1187
-rw-r--r--third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs151
-rw-r--r--third_party/rust/futures-0.1.31/src/sync/oneshot.rs611
-rw-r--r--third_party/rust/futures-0.1.31/src/task.rs46
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs283
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/core.rs186
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/mod.rs733
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/std/data.rs131
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs719
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs129
-rw-r--r--third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs144
-rw-r--r--third_party/rust/futures-0.1.31/src/unsync/mod.rs7
-rw-r--r--third_party/rust/futures-0.1.31/src/unsync/mpsc.rs474
-rw-r--r--third_party/rust/futures-0.1.31/src/unsync/oneshot.rs351
105 files changed, 16013 insertions, 0 deletions
diff --git a/third_party/rust/futures-0.1.31/src/executor.rs b/third_party/rust/futures-0.1.31/src/executor.rs
new file mode 100644
index 0000000000..365642f770
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/executor.rs
@@ -0,0 +1,17 @@
+//! Executors
+//!
+//! This module contains tools for managing the raw execution of futures,
+//! which is needed when building *executors* (places where futures can run).
+//!
+//! More information about executors can be [found online at tokio.rs][online].
+//!
+//! [online]: https://tokio.rs/docs/going-deeper-futures/tasks/
+
+#[allow(deprecated)]
+#[doc(hidden)]
+#[cfg(feature = "use_std")]
+pub use task_impl::{Unpark, Executor, Run};
+
+pub use task_impl::{Spawn, spawn, Notify, with_notify};
+
+pub use task_impl::{UnsafeNotify, NotifyHandle};
diff --git a/third_party/rust/futures-0.1.31/src/future/and_then.rs b/third_party/rust/futures-0.1.31/src/future/and_then.rs
new file mode 100644
index 0000000000..2e5b6aa16e
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/and_then.rs
@@ -0,0 +1,38 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `and_then` combinator, chaining a computation onto the end of
+/// another future which completes successfully.
+///
+/// This is created by the `Future::and_then` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct AndThen<A, B, F> where A: Future, B: IntoFuture {
+ state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> AndThen<A, B, F>
+ where A: Future,
+ B: IntoFuture,
+{
+ AndThen {
+ state: Chain::new(future, f),
+ }
+}
+
+impl<A, B, F> Future for AndThen<A, B, F>
+ where A: Future,
+ B: IntoFuture<Error=A::Error>,
+ F: FnOnce(A::Item) -> B,
+{
+ type Item = B::Item;
+ type Error = B::Error;
+
+ fn poll(&mut self) -> Poll<B::Item, B::Error> {
+ self.state.poll(|result, f| {
+ result.map(|e| {
+ Err(f(e).into_future())
+ })
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs b/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs
new file mode 100644
index 0000000000..f87f118185
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/catch_unwind.rs
@@ -0,0 +1,51 @@
+use std::prelude::v1::*;
+use std::any::Any;
+use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
+
+use {Future, Poll, Async};
+
+/// Future for the `catch_unwind` combinator.
+///
+/// This is created by the `Future::catch_unwind` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct CatchUnwind<F> where F: Future {
+ future: Option<F>,
+}
+
+pub fn new<F>(future: F) -> CatchUnwind<F>
+ where F: Future + UnwindSafe,
+{
+ CatchUnwind {
+ future: Some(future),
+ }
+}
+
+impl<F> Future for CatchUnwind<F>
+ where F: Future + UnwindSafe,
+{
+ type Item = Result<F::Item, F::Error>;
+ type Error = Box<Any + Send>;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let mut future = self.future.take().expect("cannot poll twice");
+ let (res, future) = catch_unwind(|| (future.poll(), future))?;
+ match res {
+ Ok(Async::NotReady) => {
+ self.future = Some(future);
+ Ok(Async::NotReady)
+ }
+ Ok(Async::Ready(t)) => Ok(Async::Ready(Ok(t))),
+ Err(e) => Ok(Async::Ready(Err(e))),
+ }
+ }
+}
+
+impl<F: Future> Future for AssertUnwindSafe<F> {
+ type Item = F::Item;
+ type Error = F::Error;
+
+ fn poll(&mut self) -> Poll<F::Item, F::Error> {
+ self.0.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/chain.rs b/third_party/rust/futures-0.1.31/src/future/chain.rs
new file mode 100644
index 0000000000..1bf5cd639c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/chain.rs
@@ -0,0 +1,48 @@
+use core::mem;
+
+use {Future, Poll, Async};
+
+#[derive(Debug)]
+pub enum Chain<A, B, C> where A: Future {
+ First(A, C),
+ Second(B),
+ Done,
+}
+
+impl<A, B, C> Chain<A, B, C>
+ where A: Future,
+ B: Future,
+{
+ pub fn new(a: A, c: C) -> Chain<A, B, C> {
+ Chain::First(a, c)
+ }
+
+ pub fn poll<F>(&mut self, f: F) -> Poll<B::Item, B::Error>
+ where F: FnOnce(Result<A::Item, A::Error>, C)
+ -> Result<Result<B::Item, B>, B::Error>,
+ {
+ let a_result = match *self {
+ Chain::First(ref mut a, _) => {
+ match a.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(t)) => Ok(t),
+ Err(e) => Err(e),
+ }
+ }
+ Chain::Second(ref mut b) => return b.poll(),
+ Chain::Done => panic!("cannot poll a chained future twice"),
+ };
+ let data = match mem::replace(self, Chain::Done) {
+ Chain::First(_, c) => c,
+ _ => panic!(),
+ };
+ match f(a_result, data)? {
+ Ok(e) => Ok(Async::Ready(e)),
+ Err(mut b) => {
+ let ret = b.poll();
+ *self = Chain::Second(b);
+ ret
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/either.rs b/third_party/rust/futures-0.1.31/src/future/either.rs
new file mode 100644
index 0000000000..253f26784c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/either.rs
@@ -0,0 +1,54 @@
+use {Future, Poll, Stream};
+
+/// Combines two different futures yielding the same item and error
+/// types into a single type.
+#[derive(Debug)]
+pub enum Either<A, B> {
+ /// First branch of the type
+ A(A),
+ /// Second branch of the type
+ B(B),
+}
+
+impl<T, A, B> Either<(T, A), (T, B)> {
+ /// Splits out the homogeneous type from an either of tuples.
+ ///
+ /// This method is typically useful when combined with the `Future::select2`
+ /// combinator.
+ pub fn split(self) -> (T, Either<A, B>) {
+ match self {
+ Either::A((a, b)) => (a, Either::A(b)),
+ Either::B((a, b)) => (a, Either::B(b)),
+ }
+ }
+}
+
+impl<A, B> Future for Either<A, B>
+ where A: Future,
+ B: Future<Item = A::Item, Error = A::Error>
+{
+ type Item = A::Item;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<A::Item, A::Error> {
+ match *self {
+ Either::A(ref mut a) => a.poll(),
+ Either::B(ref mut b) => b.poll(),
+ }
+ }
+}
+
+impl<A, B> Stream for Either<A, B>
+ where A: Stream,
+ B: Stream<Item = A::Item, Error = A::Error>
+{
+ type Item = A::Item;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<Option<A::Item>, A::Error> {
+ match *self {
+ Either::A(ref mut a) => a.poll(),
+ Either::B(ref mut b) => b.poll(),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/empty.rs b/third_party/rust/futures-0.1.31/src/future/empty.rs
new file mode 100644
index 0000000000..fbb56b26fd
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/empty.rs
@@ -0,0 +1,31 @@
+//! Definition of the Empty combinator, a future that's never ready.
+
+use core::marker;
+
+use {Future, Poll, Async};
+
+/// A future which is never resolved.
+///
+/// This future can be created with the `empty` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Empty<T, E> {
+ _data: marker::PhantomData<(T, E)>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// The returned future will forever return `Async::NotReady`.
+pub fn empty<T, E>() -> Empty<T, E> {
+ Empty { _data: marker::PhantomData }
+}
+
+impl<T, E> Future for Empty<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<T, E> {
+ Ok(Async::NotReady)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/flatten.rs b/third_party/rust/futures-0.1.31/src/future/flatten.rs
new file mode 100644
index 0000000000..bfe286975c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/flatten.rs
@@ -0,0 +1,49 @@
+use {Future, IntoFuture, Poll};
+use core::fmt;
+use super::chain::Chain;
+
+/// Future for the `flatten` combinator, flattening a future-of-a-future to get just
+/// the result of the final future.
+///
+/// This is created by the `Future::flatten` method.
+#[must_use = "futures do nothing unless polled"]
+pub struct Flatten<A> where A: Future, A::Item: IntoFuture {
+ state: Chain<A, <A::Item as IntoFuture>::Future, ()>,
+}
+
+impl<A> fmt::Debug for Flatten<A>
+ where A: Future + fmt::Debug,
+ A::Item: IntoFuture,
+ <<A as IntoFuture>::Item as IntoFuture>::Future: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Flatten")
+ .field("state", &self.state)
+ .finish()
+ }
+}
+
+pub fn new<A>(future: A) -> Flatten<A>
+ where A: Future,
+ A::Item: IntoFuture,
+{
+ Flatten {
+ state: Chain::new(future, ()),
+ }
+}
+
+impl<A> Future for Flatten<A>
+ where A: Future,
+ A::Item: IntoFuture,
+ <<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error>
+{
+ type Item = <<A as Future>::Item as IntoFuture>::Item;
+ type Error = <<A as Future>::Item as IntoFuture>::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ self.state.poll(|a, ()| {
+ let future = a?.into_future();
+ Ok(Err(future))
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs b/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs
new file mode 100644
index 0000000000..7bf3b9ca79
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/flatten_stream.rs
@@ -0,0 +1,99 @@
+use {Async, Future, Poll};
+use core::fmt;
+use stream::Stream;
+
+/// Future for the `flatten_stream` combinator, flattening a
+/// future-of-a-stream to get just the result of the final stream as a stream.
+///
+/// This is created by the `Future::flatten_stream` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct FlattenStream<F>
+ where F: Future,
+ <F as Future>::Item: Stream<Error=F::Error>,
+{
+ state: State<F>
+}
+
+impl<F> fmt::Debug for FlattenStream<F>
+ where F: Future + fmt::Debug,
+ <F as Future>::Item: Stream<Error=F::Error> + fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("FlattenStream")
+ .field("state", &self.state)
+ .finish()
+ }
+}
+
+pub fn new<F>(f: F) -> FlattenStream<F>
+ where F: Future,
+ <F as Future>::Item: Stream<Error=F::Error>,
+{
+ FlattenStream {
+ state: State::Future(f)
+ }
+}
+
+#[derive(Debug)]
+enum State<F>
+ where F: Future,
+ <F as Future>::Item: Stream<Error=F::Error>,
+{
+ // future is not yet called or called and not ready
+ Future(F),
+ // future resolved to Stream
+ Stream(F::Item),
+ // EOF after future resolved to error
+ Eof,
+ // after EOF after future resolved to error
+ Done,
+}
+
+impl<F> Stream for FlattenStream<F>
+ where F: Future,
+ <F as Future>::Item: Stream<Error=F::Error>,
+{
+ type Item = <F::Item as Stream>::Item;
+ type Error = <F::Item as Stream>::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ loop {
+ let (next_state, ret_opt) = match self.state {
+ State::Future(ref mut f) => {
+ match f.poll() {
+ Ok(Async::NotReady) => {
+ // State is not changed, early return.
+ return Ok(Async::NotReady)
+ },
+ Ok(Async::Ready(stream)) => {
+ // Future resolved to stream.
+ // We do not return, but poll that
+ // stream in the next loop iteration.
+ (State::Stream(stream), None)
+ }
+ Err(e) => {
+ (State::Eof, Some(Err(e)))
+ }
+ }
+ }
+ State::Stream(ref mut s) => {
+ // Just forward call to the stream,
+ // do not track its state.
+ return s.poll();
+ }
+ State::Eof => {
+ (State::Done, Some(Ok(Async::Ready(None))))
+ }
+ State::Done => {
+ panic!("poll called after eof");
+ }
+ };
+
+ self.state = next_state;
+ if let Some(ret) = ret_opt {
+ return ret;
+ }
+ }
+ }
+}
+
diff --git a/third_party/rust/futures-0.1.31/src/future/from_err.rs b/third_party/rust/futures-0.1.31/src/future/from_err.rs
new file mode 100644
index 0000000000..97e35d7cc7
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/from_err.rs
@@ -0,0 +1,35 @@
+use core::marker::PhantomData;
+
+use {Future, Poll, Async};
+
+/// Future for the `from_err` combinator, changing the error type of a future.
+///
+/// This is created by the `Future::from_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct FromErr<A, E> where A: Future {
+ future: A,
+ f: PhantomData<E>
+}
+
+pub fn new<A, E>(future: A) -> FromErr<A, E>
+ where A: Future
+{
+ FromErr {
+ future: future,
+ f: PhantomData
+ }
+}
+
+impl<A:Future, E:From<A::Error>> Future for FromErr<A, E> {
+ type Item = A::Item;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<A::Item, E> {
+ let e = match self.future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ other => other,
+ };
+ e.map_err(From::from)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/fuse.rs b/third_party/rust/futures-0.1.31/src/future/fuse.rs
new file mode 100644
index 0000000000..05ad3d5afa
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/fuse.rs
@@ -0,0 +1,49 @@
+use {Future, Poll, Async};
+
+/// A future which "fuses" a future once it's been resolved.
+///
+/// Normally futures can behave unpredictable once they're used after a future
+/// has been resolved, but `Fuse` is always defined to return `Async::NotReady`
+/// from `poll` after it has resolved successfully or returned an error.
+///
+/// This is created by the `Future::fuse` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Fuse<A: Future> {
+ future: Option<A>,
+}
+
+pub fn new<A: Future>(f: A) -> Fuse<A> {
+ Fuse {
+ future: Some(f),
+ }
+}
+
+impl<A: Future> Fuse<A> {
+ /// Returns whether the underlying future has finished or not.
+ ///
+ /// If this method returns `true`, then all future calls to `poll`
+ /// are guaranteed to return `Ok(Async::NotReady)`. If this returns
+ /// false, then the underlying future has not been driven to
+ /// completion.
+ pub fn is_done(&self) -> bool {
+ self.future.is_none()
+ }
+}
+
+impl<A: Future> Future for Fuse<A> {
+ type Item = A::Item;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<A::Item, A::Error> {
+ let res = self.future.as_mut().map(|f| f.poll());
+ match res.unwrap_or(Ok(Async::NotReady)) {
+ res @ Ok(Async::Ready(_)) |
+ res @ Err(_) => {
+ self.future = None;
+ res
+ }
+ Ok(Async::NotReady) => Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/inspect.rs b/third_party/rust/futures-0.1.31/src/future/inspect.rs
new file mode 100644
index 0000000000..59fcd78638
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/inspect.rs
@@ -0,0 +1,40 @@
+use {Future, Poll, Async};
+
+/// Do something with the item of a future, passing it on.
+///
+/// This is created by the `Future::inspect` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Inspect<A, F> where A: Future {
+ future: A,
+ f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Inspect<A, F>
+ where A: Future,
+ F: FnOnce(&A::Item),
+{
+ Inspect {
+ future: future,
+ f: Some(f),
+ }
+}
+
+impl<A, F> Future for Inspect<A, F>
+ where A: Future,
+ F: FnOnce(&A::Item),
+{
+ type Item = A::Item;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<A::Item, A::Error> {
+ match self.future.poll() {
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Ok(Async::Ready(e)) => {
+ (self.f.take().expect("cannot poll Inspect twice"))(&e);
+ Ok(Async::Ready(e))
+ },
+ Err(e) => Err(e),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/into_stream.rs b/third_party/rust/futures-0.1.31/src/future/into_stream.rs
new file mode 100644
index 0000000000..6e299e6a21
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/into_stream.rs
@@ -0,0 +1,36 @@
+use {Async, Poll};
+use Future;
+use stream::Stream;
+
+/// Future that forwards one element from the underlying future
+/// (whether it is success of error) and emits EOF after that.
+#[derive(Debug)]
+pub struct IntoStream<F: Future> {
+ future: Option<F>
+}
+
+pub fn new<F: Future>(future: F) -> IntoStream<F> {
+ IntoStream {
+ future: Some(future)
+ }
+}
+
+impl<F: Future> Stream for IntoStream<F> {
+ type Item = F::Item;
+ type Error = F::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ let ret = match self.future {
+ None => return Ok(Async::Ready(None)),
+ Some(ref mut future) => {
+ match future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Err(e) => Err(e),
+ Ok(Async::Ready(r)) => Ok(r),
+ }
+ }
+ };
+ self.future = None;
+ ret.map(|r| Async::Ready(Some(r)))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/join.rs b/third_party/rust/futures-0.1.31/src/future/join.rs
new file mode 100644
index 0000000000..452121200b
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/join.rs
@@ -0,0 +1,172 @@
+#![allow(non_snake_case)]
+
+use core::fmt;
+use core::mem;
+
+use {Future, Poll, IntoFuture, Async};
+
+macro_rules! generate {
+ ($(
+ $(#[$doc:meta])*
+ ($Join:ident, $new:ident, <A, $($B:ident),*>),
+ )*) => ($(
+ $(#[$doc])*
+ #[must_use = "futures do nothing unless polled"]
+ pub struct $Join<A, $($B),*>
+ where A: Future,
+ $($B: Future<Error=A::Error>),*
+ {
+ a: MaybeDone<A>,
+ $($B: MaybeDone<$B>,)*
+ }
+
+ impl<A, $($B),*> fmt::Debug for $Join<A, $($B),*>
+ where A: Future + fmt::Debug,
+ A::Item: fmt::Debug,
+ $(
+ $B: Future<Error=A::Error> + fmt::Debug,
+ $B::Item: fmt::Debug
+ ),*
+ {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct(stringify!($Join))
+ .field("a", &self.a)
+ $(.field(stringify!($B), &self.$B))*
+ .finish()
+ }
+ }
+
+ pub fn $new<A, $($B),*>(a: A, $($B: $B),*) -> $Join<A, $($B),*>
+ where A: Future,
+ $($B: Future<Error=A::Error>),*
+ {
+ $Join {
+ a: MaybeDone::NotYet(a),
+ $($B: MaybeDone::NotYet($B)),*
+ }
+ }
+
+ impl<A, $($B),*> $Join<A, $($B),*>
+ where A: Future,
+ $($B: Future<Error=A::Error>),*
+ {
+ fn erase(&mut self) {
+ self.a = MaybeDone::Gone;
+ $(self.$B = MaybeDone::Gone;)*
+ }
+ }
+
+ impl<A, $($B),*> Future for $Join<A, $($B),*>
+ where A: Future,
+ $($B: Future<Error=A::Error>),*
+ {
+ type Item = (A::Item, $($B::Item),*);
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let mut all_done = match self.a.poll() {
+ Ok(done) => done,
+ Err(e) => {
+ self.erase();
+ return Err(e)
+ }
+ };
+ $(
+ all_done = match self.$B.poll() {
+ Ok(done) => all_done && done,
+ Err(e) => {
+ self.erase();
+ return Err(e)
+ }
+ };
+ )*
+
+ if all_done {
+ Ok(Async::Ready((self.a.take(), $(self.$B.take()),*)))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+ }
+
+ impl<A, $($B),*> IntoFuture for (A, $($B),*)
+ where A: IntoFuture,
+ $(
+ $B: IntoFuture<Error=A::Error>
+ ),*
+ {
+ type Future = $Join<A::Future, $($B::Future),*>;
+ type Item = (A::Item, $($B::Item),*);
+ type Error = A::Error;
+
+ fn into_future(self) -> Self::Future {
+ match self {
+ (a, $($B),+) => {
+ $new(
+ IntoFuture::into_future(a),
+ $(IntoFuture::into_future($B)),+
+ )
+ }
+ }
+ }
+ }
+
+ )*)
+}
+
+generate! {
+ /// Future for the `join` combinator, waiting for two futures to
+ /// complete.
+ ///
+ /// This is created by the `Future::join` method.
+ (Join, new, <A, B>),
+
+ /// Future for the `join3` combinator, waiting for three futures to
+ /// complete.
+ ///
+ /// This is created by the `Future::join3` method.
+ (Join3, new3, <A, B, C>),
+
+ /// Future for the `join4` combinator, waiting for four futures to
+ /// complete.
+ ///
+ /// This is created by the `Future::join4` method.
+ (Join4, new4, <A, B, C, D>),
+
+ /// Future for the `join5` combinator, waiting for five futures to
+ /// complete.
+ ///
+ /// This is created by the `Future::join5` method.
+ (Join5, new5, <A, B, C, D, E>),
+}
+
+#[derive(Debug)]
+enum MaybeDone<A: Future> {
+ NotYet(A),
+ Done(A::Item),
+ Gone,
+}
+
+impl<A: Future> MaybeDone<A> {
+ fn poll(&mut self) -> Result<bool, A::Error> {
+ let res = match *self {
+ MaybeDone::NotYet(ref mut a) => a.poll()?,
+ MaybeDone::Done(_) => return Ok(true),
+ MaybeDone::Gone => panic!("cannot poll Join twice"),
+ };
+ match res {
+ Async::Ready(res) => {
+ *self = MaybeDone::Done(res);
+ Ok(true)
+ }
+ Async::NotReady => Ok(false),
+ }
+ }
+
+ fn take(&mut self) -> A::Item {
+ match mem::replace(self, MaybeDone::Gone) {
+ MaybeDone::Done(a) => a,
+ _ => panic!(),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/join_all.rs b/third_party/rust/futures-0.1.31/src/future/join_all.rs
new file mode 100644
index 0000000000..398a7a4736
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/join_all.rs
@@ -0,0 +1,136 @@
+//! Definition of the `JoinAll` combinator, waiting for all of a list of futures
+//! to finish.
+
+use std::prelude::v1::*;
+
+use std::fmt;
+use std::mem;
+
+use {Future, IntoFuture, Poll, Async};
+
+#[derive(Debug)]
+enum ElemState<T> where T: Future {
+ Pending(T),
+ Done(T::Item),
+}
+
+/// A future which takes a list of futures and resolves with a vector of the
+/// completed values.
+///
+/// This future is created with the `join_all` method.
+#[must_use = "futures do nothing unless polled"]
+pub struct JoinAll<I>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+{
+ elems: Vec<ElemState<<I::Item as IntoFuture>::Future>>,
+}
+
+impl<I> fmt::Debug for JoinAll<I>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+ <<I as IntoIterator>::Item as IntoFuture>::Future: fmt::Debug,
+ <<I as IntoIterator>::Item as IntoFuture>::Item: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("JoinAll")
+ .field("elems", &self.elems)
+ .finish()
+ }
+}
+
+/// Creates a future which represents a collection of the results of the futures
+/// given.
+///
+/// The returned future will drive execution for all of its underlying futures,
+/// collecting the results into a destination `Vec<T>` in the same order as they
+/// were provided. If any future returns an error then all other futures will be
+/// canceled and an error will be returned immediately. If all futures complete
+/// successfully, however, then the returned future will succeed with a `Vec` of
+/// all the successful results.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let f = join_all(vec![
+/// ok::<u32, u32>(1),
+/// ok::<u32, u32>(2),
+/// ok::<u32, u32>(3),
+/// ]);
+/// let f = f.map(|x| {
+/// assert_eq!(x, [1, 2, 3]);
+/// });
+///
+/// let f = join_all(vec![
+/// Box::new(ok::<u32, u32>(1)),
+/// Box::new(err::<u32, u32>(2)),
+/// Box::new(ok::<u32, u32>(3)),
+/// ]);
+/// let f = f.then(|x| {
+/// assert_eq!(x, Err(2));
+/// x
+/// });
+/// ```
+pub fn join_all<I>(i: I) -> JoinAll<I>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+{
+ let elems = i.into_iter().map(|f| {
+ ElemState::Pending(f.into_future())
+ }).collect();
+ JoinAll { elems: elems }
+}
+
+impl<I> Future for JoinAll<I>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+{
+ type Item = Vec<<I::Item as IntoFuture>::Item>;
+ type Error = <I::Item as IntoFuture>::Error;
+
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let mut all_done = true;
+
+ for idx in 0 .. self.elems.len() {
+ let done_val = match self.elems[idx] {
+ ElemState::Pending(ref mut t) => {
+ match t.poll() {
+ Ok(Async::Ready(v)) => Ok(v),
+ Ok(Async::NotReady) => {
+ all_done = false;
+ continue
+ }
+ Err(e) => Err(e),
+ }
+ }
+ ElemState::Done(ref mut _v) => continue,
+ };
+
+ match done_val {
+ Ok(v) => self.elems[idx] = ElemState::Done(v),
+ Err(e) => {
+ // On completion drop all our associated resources
+ // ASAP.
+ self.elems = Vec::new();
+ return Err(e)
+ }
+ }
+ }
+
+ if all_done {
+ let elems = mem::replace(&mut self.elems, Vec::new());
+ let result = elems.into_iter().map(|e| {
+ match e {
+ ElemState::Done(t) => t,
+ _ => unreachable!(),
+ }
+ }).collect();
+ Ok(Async::Ready(result))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/lazy.rs b/third_party/rust/futures-0.1.31/src/future/lazy.rs
new file mode 100644
index 0000000000..2f310337b6
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/lazy.rs
@@ -0,0 +1,84 @@
+//! Definition of the Lazy combinator, deferring execution of a function until
+//! the future is polled.
+
+use core::mem;
+
+use {Future, IntoFuture, Poll};
+
+/// A future which defers creation of the actual future until a callback is
+/// scheduled.
+///
+/// This is created by the `lazy` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Lazy<F, R: IntoFuture> {
+ inner: _Lazy<F, R::Future>,
+}
+
+#[derive(Debug)]
+enum _Lazy<F, R> {
+ First(F),
+ Second(R),
+ Moved,
+}
+
+/// Creates a new future which will eventually be the same as the one created
+/// by the closure provided.
+///
+/// The provided closure is only run once the future has a callback scheduled
+/// on it, otherwise the callback never runs. Once run, however, this future is
+/// the same as the one the closure creates.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let a = lazy(|| ok::<u32, u32>(1));
+///
+/// let b = lazy(|| -> FutureResult<u32, u32> {
+/// panic!("oh no!")
+/// });
+/// drop(b); // closure is never run
+/// ```
+pub fn lazy<F, R>(f: F) -> Lazy<F, R>
+ where F: FnOnce() -> R,
+ R: IntoFuture
+{
+ Lazy {
+ inner: _Lazy::First(f),
+ }
+}
+
+impl<F, R> Lazy<F, R>
+ where F: FnOnce() -> R,
+ R: IntoFuture,
+{
+ fn get(&mut self) -> &mut R::Future {
+ match self.inner {
+ _Lazy::First(_) => {}
+ _Lazy::Second(ref mut f) => return f,
+ _Lazy::Moved => panic!(), // can only happen if `f()` panics
+ }
+ match mem::replace(&mut self.inner, _Lazy::Moved) {
+ _Lazy::First(f) => self.inner = _Lazy::Second(f().into_future()),
+ _ => panic!(), // we already found First
+ }
+ match self.inner {
+ _Lazy::Second(ref mut f) => f,
+ _ => panic!(), // we just stored Second
+ }
+ }
+}
+
+impl<F, R> Future for Lazy<F, R>
+ where F: FnOnce() -> R,
+ R: IntoFuture,
+{
+ type Item = R::Item;
+ type Error = R::Error;
+
+ fn poll(&mut self) -> Poll<R::Item, R::Error> {
+ self.get().poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/loop_fn.rs b/third_party/rust/futures-0.1.31/src/future/loop_fn.rs
new file mode 100644
index 0000000000..299a0383c2
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/loop_fn.rs
@@ -0,0 +1,99 @@
+//! Definition of the `LoopFn` combinator, implementing `Future` loops.
+
+use {Async, Future, IntoFuture, Poll};
+
+/// The status of a `loop_fn` loop.
+#[derive(Debug)]
+pub enum Loop<T, S> {
+ /// Indicates that the loop has completed with output `T`.
+ Break(T),
+
+ /// Indicates that the loop function should be called again with input
+ /// state `S`.
+ Continue(S),
+}
+
+/// A future implementing a tail-recursive loop.
+///
+/// Created by the `loop_fn` function.
+#[derive(Debug)]
+pub struct LoopFn<A, F> where A: IntoFuture {
+ future: A::Future,
+ func: F,
+}
+
+/// Creates a new future implementing a tail-recursive loop.
+///
+/// The loop function is immediately called with `initial_state` and should
+/// return a value that can be converted to a future. On successful completion,
+/// this future should output a `Loop<T, S>` to indicate the status of the
+/// loop.
+///
+/// `Loop::Break(T)` halts the loop and completes the future with output `T`.
+///
+/// `Loop::Continue(S)` reinvokes the loop function with state `S`. The returned
+/// future will be subsequently polled for a new `Loop<T, S>` value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::{ok, loop_fn, Future, FutureResult, Loop};
+/// use std::io::Error;
+///
+/// struct Client {
+/// ping_count: u8,
+/// }
+///
+/// impl Client {
+/// fn new() -> Self {
+/// Client { ping_count: 0 }
+/// }
+///
+/// fn send_ping(self) -> FutureResult<Self, Error> {
+/// ok(Client { ping_count: self.ping_count + 1 })
+/// }
+///
+/// fn receive_pong(self) -> FutureResult<(Self, bool), Error> {
+/// let done = self.ping_count >= 5;
+/// ok((self, done))
+/// }
+/// }
+///
+/// let ping_til_done = loop_fn(Client::new(), |client| {
+/// client.send_ping()
+/// .and_then(|client| client.receive_pong())
+/// .and_then(|(client, done)| {
+/// if done {
+/// Ok(Loop::Break(client))
+/// } else {
+/// Ok(Loop::Continue(client))
+/// }
+/// })
+/// });
+/// ```
+pub fn loop_fn<S, T, A, F>(initial_state: S, mut func: F) -> LoopFn<A, F>
+ where F: FnMut(S) -> A,
+ A: IntoFuture<Item = Loop<T, S>>,
+{
+ LoopFn {
+ future: func(initial_state).into_future(),
+ func: func,
+ }
+}
+
+impl<S, T, A, F> Future for LoopFn<A, F>
+ where F: FnMut(S) -> A,
+ A: IntoFuture<Item = Loop<T, S>>,
+{
+ type Item = T;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ loop {
+ match try_ready!(self.future.poll()) {
+ Loop::Break(x) => return Ok(Async::Ready(x)),
+ Loop::Continue(s) => self.future = (self.func)(s).into_future(),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/map.rs b/third_party/rust/futures-0.1.31/src/future/map.rs
new file mode 100644
index 0000000000..4b1f4cd7d4
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/map.rs
@@ -0,0 +1,38 @@
+use {Future, Poll, Async};
+
+/// Future for the `map` combinator, changing the type of a future.
+///
+/// This is created by the `Future::map` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Map<A, F> where A: Future {
+ future: A,
+ f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Map<A, F>
+ where A: Future,
+{
+ Map {
+ future: future,
+ f: Some(f),
+ }
+}
+
+impl<U, A, F> Future for Map<A, F>
+ where A: Future,
+ F: FnOnce(A::Item) -> U,
+{
+ type Item = U;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<U, A::Error> {
+ let e = match self.future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(e)) => Ok(e),
+ Err(e) => Err(e),
+ };
+ e.map(self.f.take().expect("cannot poll Map twice"))
+ .map(Async::Ready)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/map_err.rs b/third_party/rust/futures-0.1.31/src/future/map_err.rs
new file mode 100644
index 0000000000..4ea12f4586
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/map_err.rs
@@ -0,0 +1,36 @@
+use {Future, Poll, Async};
+
+/// Future for the `map_err` combinator, changing the error type of a future.
+///
+/// This is created by the `Future::map_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct MapErr<A, F> where A: Future {
+ future: A,
+ f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> MapErr<A, F>
+ where A: Future
+{
+ MapErr {
+ future: future,
+ f: Some(f),
+ }
+}
+
+impl<U, A, F> Future for MapErr<A, F>
+ where A: Future,
+ F: FnOnce(A::Error) -> U,
+{
+ type Item = A::Item;
+ type Error = U;
+
+ fn poll(&mut self) -> Poll<A::Item, U> {
+ let e = match self.future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ other => other,
+ };
+ e.map_err(self.f.take().expect("cannot poll MapErr twice"))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/mod.rs b/third_party/rust/futures-0.1.31/src/future/mod.rs
new file mode 100644
index 0000000000..9867765902
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/mod.rs
@@ -0,0 +1,1171 @@
+//! Futures
+//!
+//! This module contains the `Future` trait and a number of adaptors for this
+//! trait. See the crate docs, and the docs for `Future`, for full detail.
+
+use core::fmt;
+use core::result;
+
+// Primitive futures
+mod empty;
+mod lazy;
+mod poll_fn;
+#[path = "result.rs"]
+mod result_;
+mod loop_fn;
+mod option;
+pub use self::empty::{empty, Empty};
+pub use self::lazy::{lazy, Lazy};
+pub use self::poll_fn::{poll_fn, PollFn};
+pub use self::result_::{result, ok, err, FutureResult};
+pub use self::loop_fn::{loop_fn, Loop, LoopFn};
+
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `ok` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{ok as finished, Ok as Finished};
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `err` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{err as failed, Err as Failed};
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "use `result` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{result as done, FutureResult as Done};
+#[doc(hidden)]
+#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{FutureResult as Ok};
+#[doc(hidden)]
+#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
+#[cfg(feature = "with-deprecated")]
+pub use self::{FutureResult as Err};
+
+// combinators
+mod and_then;
+mod flatten;
+mod flatten_stream;
+mod fuse;
+mod into_stream;
+mod join;
+mod map;
+mod map_err;
+mod from_err;
+mod or_else;
+mod select;
+mod select2;
+mod then;
+mod either;
+mod inspect;
+
+// impl details
+mod chain;
+
+pub use self::and_then::AndThen;
+pub use self::flatten::Flatten;
+pub use self::flatten_stream::FlattenStream;
+pub use self::fuse::Fuse;
+pub use self::into_stream::IntoStream;
+pub use self::join::{Join, Join3, Join4, Join5};
+pub use self::map::Map;
+pub use self::map_err::MapErr;
+pub use self::from_err::FromErr;
+pub use self::or_else::OrElse;
+pub use self::select::{Select, SelectNext};
+pub use self::select2::Select2;
+pub use self::then::Then;
+pub use self::either::Either;
+pub use self::inspect::Inspect;
+
+if_std! {
+ mod catch_unwind;
+ mod join_all;
+ mod select_all;
+ mod select_ok;
+ mod shared;
+ pub use self::catch_unwind::CatchUnwind;
+ pub use self::join_all::{join_all, JoinAll};
+ pub use self::select_all::{SelectAll, SelectAllNext, select_all};
+ pub use self::select_ok::{SelectOk, select_ok};
+ pub use self::shared::{Shared, SharedItem, SharedError};
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use join_all instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use self::join_all::join_all as collect;
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use JoinAll instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use self::join_all::JoinAll as Collect;
+
+ /// A type alias for `Box<Future + Send>`
+ #[doc(hidden)]
+ #[deprecated(note = "removed without replacement, recommended to use a \
+ local extension trait or function if needed, more \
+ details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+ pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>;
+
+ impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> {
+ type Item = F::Item;
+ type Error = F::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ (**self).poll()
+ }
+ }
+}
+
+use {Poll, stream};
+
+/// Trait for types which are a placeholder of a value that may become
+/// available at some later point in time.
+///
+/// In addition to the documentation here you can also find more information
+/// about futures [online] at [https://tokio.rs](https://tokio.rs)
+///
+/// [online]: https://tokio.rs/docs/getting-started/futures/
+///
+/// Futures are used to provide a sentinel through which a value can be
+/// referenced. They crucially allow chaining and composing operations through
+/// consumption which allows expressing entire trees of computation as one
+/// sentinel value.
+///
+/// The ergonomics and implementation of the `Future` trait are very similar to
+/// the `Iterator` trait in that there is just one methods you need
+/// to implement, but you get a whole lot of others for free as a result.
+///
+/// # The `poll` method
+///
+/// The core method of future, `poll`, is used to attempt to generate the value
+/// of a `Future`. This method *does not block* but is allowed to inform the
+/// caller that the value is not ready yet. Implementations of `poll` may
+/// themselves do work to generate the value, but it's guaranteed that this will
+/// never block the calling thread.
+///
+/// A key aspect of this method is that if the value is not yet available the
+/// current task is scheduled to receive a notification when it's later ready to
+/// be made available. This follows what's typically known as a "readiness" or
+/// "pull" model where values are pulled out of futures on demand, and
+/// otherwise a task is notified when a value might be ready to get pulled out.
+///
+/// The `poll` method is not intended to be called in general, but rather is
+/// typically called in the context of a "task" which drives a future to
+/// completion. For more information on this see the `task` module.
+///
+/// More information about the details of `poll` and the nitty-gritty of tasks
+/// can be [found online at tokio.rs][poll-dox].
+///
+/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
+///
+/// # Combinators
+///
+/// Like iterators, futures provide a large number of combinators to work with
+/// futures to express computations in a much more natural method than
+/// scheduling a number of callbacks. For example the `map` method can change
+/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
+/// create a future after the first one is done and only be resolved when the
+/// second is done.
+///
+/// Combinators act very similarly to the methods on the `Iterator` trait itself
+/// or those on `Option` and `Result`. Like with iterators, the combinators are
+/// zero-cost and don't impose any extra layers of indirection you wouldn't
+/// otherwise have to write down.
+///
+/// More information about combinators can be found [on tokio.rs].
+///
+/// [on tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-mechanics/
+#[must_use = "futures do nothing unless polled"]
+pub trait Future {
+ /// The type of value that this future will resolved with if it is
+ /// successful.
+ type Item;
+
+ /// The type of error that this future will resolve with if it fails in a
+ /// normal fashion.
+ type Error;
+
+ /// Query this future to see if its value has become available, registering
+ /// interest if it is not.
+ ///
+ /// This function will check the internal state of the future and assess
+ /// whether the value is ready to be produced. Implementers of this function
+ /// should ensure that a call to this **never blocks** as event loops may
+ /// not work properly otherwise.
+ ///
+ /// When a future is not ready yet, the `Async::NotReady` value will be
+ /// returned. In this situation the future will *also* register interest of
+ /// the current task in the value being produced. This is done by calling
+ /// `task::park` to retrieve a handle to the current `Task`. When the future
+ /// is then ready to make progress (e.g. it should be `poll`ed again) the
+ /// `unpark` method is called on the `Task`.
+ ///
+ /// More information about the details of `poll` and the nitty-gritty of
+ /// tasks can be [found online at tokio.rs][poll-dox].
+ ///
+ /// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
+ ///
+ /// # Runtime characteristics
+ ///
+ /// This function, `poll`, is the primary method for 'making progress'
+ /// within a tree of futures. For example this method will be called
+ /// repeatedly as the internal state machine makes its various transitions.
+ /// Executors are responsible for ensuring that this function is called in
+ /// the right location (e.g. always on an I/O thread or not). Unless it is
+ /// otherwise arranged to be so, it should be ensured that **implementations
+ /// of this function finish very quickly**.
+ ///
+ /// Returning quickly prevents unnecessarily clogging up threads and/or
+ /// event loops while a `poll` function call, for example, takes up compute
+ /// resources to perform some expensive computation. If it is known ahead
+ /// of time that a call to `poll` may end up taking awhile, the work should
+ /// be offloaded to a thread pool (or something similar) to ensure that
+ /// `poll` can return quickly.
+ ///
+ /// Note that the `poll` function is not called repeatedly in a loop for
+ /// futures typically, but only whenever the future itself is ready. If
+ /// you're familiar with the `poll(2)` or `select(2)` syscalls on Unix
+ /// it's worth noting that futures typically do *not* suffer the same
+ /// problems of "all wakeups must poll all events". Futures have enough
+ /// support for only polling futures which cause a wakeup.
+ ///
+ /// # Return value
+ ///
+ /// This function returns `Async::NotReady` if the future is not ready yet,
+ /// `Err` if the future is finished but resolved to an error, or
+ /// `Async::Ready` with the result of this future if it's finished
+ /// successfully. Once a future has finished it is considered a contract
+ /// error to continue polling the future.
+ ///
+ /// If `NotReady` is returned, then the future will internally register
+ /// interest in the value being produced for the current task (through
+ /// `task::park`). In other words, the current task will receive a
+ /// notification (through the `unpark` method) once the value is ready to be
+ /// produced or the future can make progress.
+ ///
+ /// Note that if `NotReady` is returned it only means that *this* task will
+ /// receive a notification. Historical calls to `poll` with different tasks
+ /// will not receive notifications. In other words, implementers of the
+ /// `Future` trait need not store a queue of tasks to notify, but only the
+ /// last task that called this method. Alternatively callers of this method
+ /// can only rely on the most recent task which call `poll` being notified
+ /// when a future is ready.
+ ///
+ /// # Panics
+ ///
+ /// Once a future has completed (returned `Ready` or `Err` from `poll`),
+ /// then any future calls to `poll` may panic, block forever, or otherwise
+ /// cause wrong behavior. The `Future` trait itself provides no guarantees
+ /// about the behavior of `poll` after a future has completed.
+ ///
+ /// Callers who may call `poll` too many times may want to consider using
+ /// the `fuse` adaptor which defines the behavior of `poll`, but comes with
+ /// a little bit of extra cost.
+ ///
+ /// Additionally, calls to `poll` must always be made from within the
+ /// context of a task. If a current task is not set then this method will
+ /// likely panic.
+ ///
+ /// # Errors
+ ///
+ /// This future may have failed to finish the computation, in which case
+ /// the `Err` variant will be returned with an appropriate payload of an
+ /// error.
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error>;
+
+ /// Block the current thread until this future is resolved.
+ ///
+ /// This method will consume ownership of this future, driving it to
+ /// completion via `poll` and blocking the current thread while it's waiting
+ /// for the value to become available. Once the future is resolved the
+ /// result of this future is returned.
+ ///
+ /// > **Note:** This method is not appropriate to call on event loops or
+ /// > similar I/O situations because it will prevent the event
+ /// > loop from making progress (this blocks the thread). This
+ /// > method should only be called when it's guaranteed that the
+ /// > blocking work associated with this future will be completed
+ /// > by another thread.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Panics
+ ///
+ /// This function does not attempt to catch panics. If the `poll` function
+ /// of this future panics, panics will be propagated to the caller.
+ #[cfg(feature = "use_std")]
+ fn wait(self) -> result::Result<Self::Item, Self::Error>
+ where Self: Sized
+ {
+ ::executor::spawn(self).wait_future()
+ }
+
+ /// Convenience function for turning this future into a trait object which
+ /// is also `Send`.
+ ///
+ /// This simply avoids the need to write `Box::new` and can often help with
+ /// type inference as well by always returning a trait object. Note that
+ /// this method requires the `Send` bound and returns a `BoxFuture`, which
+ /// also encodes this. If you'd like to create a `Box<Future>` without the
+ /// `Send` bound, then the `Box::new` function can be used instead.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future::{BoxFuture, result};
+ ///
+ /// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed();
+ /// ```
+ #[cfg(feature = "use_std")]
+ #[doc(hidden)]
+ #[deprecated(note = "removed without replacement, recommended to use a \
+ local extension trait or function if needed, more \
+ details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+ #[allow(deprecated)]
+ fn boxed(self) -> BoxFuture<Self::Item, Self::Error>
+ where Self: Sized + Send + 'static
+ {
+ ::std::boxed::Box::new(self)
+ }
+
+ /// Map this future's result to a different type, returning a new future of
+ /// the resulting type.
+ ///
+ /// This function is similar to the `Option::map` or `Iterator::map` where
+ /// it will change the type of the underlying future. This is useful to
+ /// chain along a computation once a future has been resolved.
+ ///
+ /// The closure provided will only be called if this future is resolved
+ /// successfully. If this future returns an error, panics, or is dropped,
+ /// then the closure provided will never be invoked.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it, similar to the existing `map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::ok::<u32, u32>(1);
+ /// let new_future = future.map(|x| x + 3);
+ /// assert_eq!(new_future.wait(), Ok(4));
+ /// ```
+ ///
+ /// Calling `map` on an errored `Future` has no effect:
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::err::<u32, u32>(1);
+ /// let new_future = future.map(|x| x + 3);
+ /// assert_eq!(new_future.wait(), Err(1));
+ /// ```
+ fn map<F, U>(self, f: F) -> Map<Self, F>
+ where F: FnOnce(Self::Item) -> U,
+ Self: Sized,
+ {
+ assert_future::<U, Self::Error, _>(map::new(self, f))
+ }
+
+ /// Map this future's error to a different error, returning a new future.
+ ///
+ /// This function is similar to the `Result::map_err` where it will change
+ /// the error type of the underlying future. This is useful for example to
+ /// ensure that futures have the same error type when used with combinators
+ /// like `select` and `join`.
+ ///
+ /// The closure provided will only be called if this future is resolved
+ /// with an error. If this future returns a success, panics, or is
+ /// dropped, then the closure provided will never be invoked.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::future::*;
+ ///
+ /// let future = err::<u32, u32>(1);
+ /// let new_future = future.map_err(|x| x + 3);
+ /// assert_eq!(new_future.wait(), Err(4));
+ /// ```
+ ///
+ /// Calling `map_err` on a successful `Future` has no effect:
+ ///
+ /// ```
+ /// use futures::future::*;
+ ///
+ /// let future = ok::<u32, u32>(1);
+ /// let new_future = future.map_err(|x| x + 3);
+ /// assert_eq!(new_future.wait(), Ok(1));
+ /// ```
+ fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
+ where F: FnOnce(Self::Error) -> E,
+ Self: Sized,
+ {
+ assert_future::<Self::Item, E, _>(map_err::new(self, f))
+ }
+
+
+
+ /// Map this future's error to any error implementing `From` for
+ /// this future's `Error`, returning a new future.
+ ///
+ /// This function does for futures what `try!` does for `Result`,
+ /// by letting the compiler infer the type of the resulting error.
+ /// Just as `map_err` above, this is useful for example to ensure
+ /// that futures have the same error type when used with
+ /// combinators like `select` and `join`.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future_with_err_u8 = future::err::<(), u8>(1);
+ /// let future_with_err_u32 = future_with_err_u8.from_err::<u32>();
+ /// ```
+ fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E>
+ where Self: Sized,
+ {
+ assert_future::<Self::Item, E, _>(from_err::new(self))
+ }
+
+ /// Chain on a computation for when a future finished, passing the result of
+ /// the future to the provided closure `f`.
+ ///
+ /// This function can be used to ensure a computation runs regardless of
+ /// the conclusion of the future. The closure provided will be yielded a
+ /// `Result` once the future is complete.
+ ///
+ /// The returned value of the closure must implement the `IntoFuture` trait
+ /// and can represent some more work to be done before the composed future
+ /// is finished. Note that the `Result` type implements the `IntoFuture`
+ /// trait so it is possible to simply alter the `Result` yielded to the
+ /// closure and return it.
+ ///
+ /// If this future is dropped or panics then the closure `f` will not be
+ /// run.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future_of_1 = future::ok::<u32, u32>(1);
+ /// let future_of_4 = future_of_1.then(|x| {
+ /// x.map(|y| y + 3)
+ /// });
+ ///
+ /// let future_of_err_1 = future::err::<u32, u32>(1);
+ /// let future_of_4 = future_of_err_1.then(|x| {
+ /// match x {
+ /// Ok(_) => panic!("expected an error"),
+ /// Err(y) => future::ok::<u32, u32>(y + 3),
+ /// }
+ /// });
+ /// ```
+ fn then<F, B>(self, f: F) -> Then<Self, B, F>
+ where F: FnOnce(result::Result<Self::Item, Self::Error>) -> B,
+ B: IntoFuture,
+ Self: Sized,
+ {
+ assert_future::<B::Item, B::Error, _>(then::new(self, f))
+ }
+
+ /// Execute another future after this one has resolved successfully.
+ ///
+ /// This function can be used to chain two futures together and ensure that
+ /// the final future isn't resolved until both have finished. The closure
+ /// provided is yielded the successful result of this future and returns
+ /// another value which can be converted into a future.
+ ///
+ /// Note that because `Result` implements the `IntoFuture` trait this method
+ /// can also be useful for chaining fallible and serial computations onto
+ /// the end of one future.
+ ///
+ /// If this future is dropped, panics, or completes with an error then the
+ /// provided closure `f` is never called.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future::{self, FutureResult};
+ ///
+ /// let future_of_1 = future::ok::<u32, u32>(1);
+ /// let future_of_4 = future_of_1.and_then(|x| {
+ /// Ok(x + 3)
+ /// });
+ ///
+ /// let future_of_err_1 = future::err::<u32, u32>(1);
+ /// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> {
+ /// panic!("should not be called in case of an error");
+ /// });
+ /// ```
+ fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
+ where F: FnOnce(Self::Item) -> B,
+ B: IntoFuture<Error = Self::Error>,
+ Self: Sized,
+ {
+ assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
+ }
+
+ /// Execute another future if this one resolves with an error.
+ ///
+ /// Return a future that passes along this future's value if it succeeds,
+ /// and otherwise passes the error to the closure `f` and waits for the
+ /// future it returns. The closure may also simply return a value that can
+ /// be converted into a future.
+ ///
+ /// Note that because `Result` implements the `IntoFuture` trait this method
+ /// can also be useful for chaining together fallback computations, where
+ /// when one fails, the next is attempted.
+ ///
+ /// If this future is dropped, panics, or completes successfully then the
+ /// provided closure `f` is never called.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future::{self, FutureResult};
+ ///
+ /// let future_of_err_1 = future::err::<u32, u32>(1);
+ /// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
+ /// Ok(x + 3)
+ /// });
+ ///
+ /// let future_of_1 = future::ok::<u32, u32>(1);
+ /// future_of_1.or_else(|_| -> FutureResult<u32, u32> {
+ /// panic!("should not be called in case of success");
+ /// });
+ /// ```
+ fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
+ where F: FnOnce(Self::Error) -> B,
+ B: IntoFuture<Item = Self::Item>,
+ Self: Sized,
+ {
+ assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
+ }
+
+ /// Waits for either one of two futures to complete.
+ ///
+ /// This function will return a new future which awaits for either this or
+ /// the `other` future to complete. The returned future will finish with
+ /// both the value resolved and a future representing the completion of the
+ /// other work. Both futures must have the same item and error type.
+ ///
+ /// Note that this function consumes the receiving futures and returns a
+ /// wrapped version of them.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use futures::prelude::*;
+ /// use futures::future;
+ /// use std::thread;
+ /// use std::time;
+ ///
+ /// let future1 = future::lazy(|| {
+ /// thread::sleep(time::Duration::from_secs(5));
+ /// future::ok::<char, ()>('a')
+ /// });
+ ///
+ /// let future2 = future::lazy(|| {
+ /// thread::sleep(time::Duration::from_secs(3));
+ /// future::ok::<char, ()>('b')
+ /// });
+ ///
+ /// let (value, last_future) = future1.select(future2).wait().ok().unwrap();
+ /// assert_eq!(value, 'a');
+ /// assert_eq!(last_future.wait().unwrap(), 'b');
+ /// ```
+ ///
+ /// A poor-man's `join` implemented on top of `select`:
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// fn join<A>(a: A, b: A) -> Box<Future<Item=(u32, u32), Error=u32>>
+ /// where A: Future<Item = u32, Error = u32> + 'static,
+ /// {
+ /// Box::new(a.select(b).then(|res| -> Box<Future<Item=_, Error=_>> {
+ /// match res {
+ /// Ok((a, b)) => Box::new(b.map(move |b| (a, b))),
+ /// Err((a, _)) => Box::new(future::err(a)),
+ /// }
+ /// }))
+ /// }
+ /// ```
+ fn select<B>(self, other: B) -> Select<Self, B::Future>
+ where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
+ Self: Sized,
+ {
+ let f = select::new(self, other.into_future());
+ assert_future::<(Self::Item, SelectNext<Self, B::Future>),
+ (Self::Error, SelectNext<Self, B::Future>), _>(f)
+ }
+
+ /// Waits for either one of two differently-typed futures to complete.
+ ///
+ /// This function will return a new future which awaits for either this or
+ /// the `other` future to complete. The returned future will finish with
+ /// both the value resolved and a future representing the completion of the
+ /// other work.
+ ///
+ /// Note that this function consumes the receiving futures and returns a
+ /// wrapped version of them.
+ ///
+ /// Also note that if both this and the second future have the same
+ /// success/error type you can use the `Either::split` method to
+ /// conveniently extract out the value at the end.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future::{self, Either};
+ ///
+ /// // A poor-man's join implemented on top of select2
+ ///
+ /// fn join<A, B, E>(a: A, b: B) -> Box<Future<Item=(A::Item, B::Item), Error=E>>
+ /// where A: Future<Error = E> + 'static,
+ /// B: Future<Error = E> + 'static,
+ /// E: 'static,
+ /// {
+ /// Box::new(a.select2(b).then(|res| -> Box<Future<Item=_, Error=_>> {
+ /// match res {
+ /// Ok(Either::A((x, b))) => Box::new(b.map(move |y| (x, y))),
+ /// Ok(Either::B((y, a))) => Box::new(a.map(move |x| (x, y))),
+ /// Err(Either::A((e, _))) => Box::new(future::err(e)),
+ /// Err(Either::B((e, _))) => Box::new(future::err(e)),
+ /// }
+ /// }))
+ /// }
+ /// ```
+ fn select2<B>(self, other: B) -> Select2<Self, B::Future>
+ where B: IntoFuture, Self: Sized
+ {
+ select2::new(self, other.into_future())
+ }
+
+ /// Joins the result of two futures, waiting for them both to complete.
+ ///
+ /// This function will return a new future which awaits both this and the
+ /// `other` future to complete. The returned future will finish with a tuple
+ /// of both results.
+ ///
+ /// Both futures must have the same error type, and if either finishes with
+ /// an error then the other will be dropped and that error will be
+ /// returned.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let a = future::ok::<u32, u32>(1);
+ /// let b = future::ok::<u32, u32>(2);
+ /// let pair = a.join(b);
+ ///
+ /// assert_eq!(pair.wait(), Ok((1, 2)));
+ /// ```
+ ///
+ /// If one or both of the joined `Future`s is errored, the resulting
+ /// `Future` will be errored:
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let a = future::ok::<u32, u32>(1);
+ /// let b = future::err::<u32, u32>(2);
+ /// let pair = a.join(b);
+ ///
+ /// assert_eq!(pair.wait(), Err(2));
+ /// ```
+ fn join<B>(self, other: B) -> Join<Self, B::Future>
+ where B: IntoFuture<Error=Self::Error>,
+ Self: Sized,
+ {
+ let f = join::new(self, other.into_future());
+ assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
+ }
+
+ /// Same as `join`, but with more futures.
+ fn join3<B, C>(self, b: B, c: C) -> Join3<Self, B::Future, C::Future>
+ where B: IntoFuture<Error=Self::Error>,
+ C: IntoFuture<Error=Self::Error>,
+ Self: Sized,
+ {
+ join::new3(self, b.into_future(), c.into_future())
+ }
+
+ /// Same as `join`, but with more futures.
+ fn join4<B, C, D>(self, b: B, c: C, d: D)
+ -> Join4<Self, B::Future, C::Future, D::Future>
+ where B: IntoFuture<Error=Self::Error>,
+ C: IntoFuture<Error=Self::Error>,
+ D: IntoFuture<Error=Self::Error>,
+ Self: Sized,
+ {
+ join::new4(self, b.into_future(), c.into_future(), d.into_future())
+ }
+
+ /// Same as `join`, but with more futures.
+ fn join5<B, C, D, E>(self, b: B, c: C, d: D, e: E)
+ -> Join5<Self, B::Future, C::Future, D::Future, E::Future>
+ where B: IntoFuture<Error=Self::Error>,
+ C: IntoFuture<Error=Self::Error>,
+ D: IntoFuture<Error=Self::Error>,
+ E: IntoFuture<Error=Self::Error>,
+ Self: Sized,
+ {
+ join::new5(self, b.into_future(), c.into_future(), d.into_future(),
+ e.into_future())
+ }
+
+ /// Convert this future into a single element stream.
+ ///
+ /// The returned stream contains single success if this future resolves to
+ /// success or single error if this future resolves into error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::ok::<_, bool>(17);
+ /// let mut stream = future.into_stream();
+ /// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+ /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+ ///
+ /// let future = future::err::<bool, _>(19);
+ /// let mut stream = future.into_stream();
+ /// assert_eq!(Err(19), stream.poll());
+ /// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+ /// ```
+ fn into_stream(self) -> IntoStream<Self>
+ where Self: Sized
+ {
+ into_stream::new(self)
+ }
+
+ /// Flatten the execution of this future when the successful result of this
+ /// future is itself another future.
+ ///
+ /// This can be useful when combining futures together to flatten the
+ /// computation out the final result. This method can only be called
+ /// when the successful result of this future itself implements the
+ /// `IntoFuture` trait and the error can be created from this future's error
+ /// type.
+ ///
+ /// This method is roughly equivalent to `self.and_then(|x| x)`.
+ ///
+ /// Note that this function consumes the receiving future and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let nested_future = future::ok::<_, u32>(future::ok::<u32, u32>(1));
+ /// let future = nested_future.flatten();
+ /// assert_eq!(future.wait(), Ok(1));
+ /// ```
+ ///
+ /// Calling `flatten` on an errored `Future`, or if the inner `Future` is
+ /// errored, will result in an errored `Future`:
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let nested_future = future::ok::<_, u32>(future::err::<u32, u32>(1));
+ /// let future = nested_future.flatten();
+ /// assert_eq!(future.wait(), Err(1));
+ /// ```
+ fn flatten(self) -> Flatten<Self>
+ where Self::Item: IntoFuture,
+ <<Self as Future>::Item as IntoFuture>::Error:
+ From<<Self as Future>::Error>,
+ Self: Sized
+ {
+ let f = flatten::new(self);
+ assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
+ <<Self as Future>::Item as IntoFuture>::Error,
+ _>(f)
+ }
+
+ /// Flatten the execution of this future when the successful result of this
+ /// future is a stream.
+ ///
+ /// This can be useful when stream initialization is deferred, and it is
+ /// convenient to work with that stream as if stream was available at the
+ /// call site.
+ ///
+ /// Note that this function consumes this future and returns a wrapped
+ /// version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ /// use futures::stream;
+ ///
+ /// let stream_items = vec![17, 18, 19];
+ /// let future_of_a_stream = future::ok::<_, bool>(stream::iter_ok(stream_items));
+ ///
+ /// let stream = future_of_a_stream.flatten_stream();
+ ///
+ /// let mut iter = stream.wait();
+ /// assert_eq!(Ok(17), iter.next().unwrap());
+ /// assert_eq!(Ok(18), iter.next().unwrap());
+ /// assert_eq!(Ok(19), iter.next().unwrap());
+ /// assert_eq!(None, iter.next());
+ /// ```
+ fn flatten_stream(self) -> FlattenStream<Self>
+ where <Self as Future>::Item: stream::Stream<Error=Self::Error>,
+ Self: Sized
+ {
+ flatten_stream::new(self)
+ }
+
+ /// Fuse a future such that `poll` will never again be called once it has
+ /// completed.
+ ///
+ /// Currently once a future has returned `Ready` or `Err` from
+ /// `poll` any further calls could exhibit bad behavior such as blocking
+ /// forever, panicking, never returning, etc. If it is known that `poll`
+ /// may be called too often then this method can be used to ensure that it
+ /// has defined semantics.
+ ///
+ /// Once a future has been `fuse`d and it returns a completion from `poll`,
+ /// then it will forever return `NotReady` from `poll` again (never
+ /// resolve). This, unlike the trait's `poll` method, is guaranteed.
+ ///
+ /// This combinator will drop this future as soon as it's been completed to
+ /// ensure resources are reclaimed as soon as possible.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let mut future = future::ok::<i32, u32>(2);
+ /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
+ ///
+ /// // Normally, a call such as this would panic:
+ /// //future.poll();
+ ///
+ /// // This, however, is guaranteed to not panic
+ /// let mut future = future::ok::<i32, u32>(2).fuse();
+ /// assert_eq!(future.poll(), Ok(Async::Ready(2)));
+ /// assert_eq!(future.poll(), Ok(Async::NotReady));
+ /// ```
+ fn fuse(self) -> Fuse<Self>
+ where Self: Sized
+ {
+ let f = fuse::new(self);
+ assert_future::<Self::Item, Self::Error, _>(f)
+ }
+
+ /// Do something with the item of a future, passing it on.
+ ///
+ /// When using futures, you'll often chain several of them together.
+ /// While working on such code, you might want to check out what's happening at
+ /// various parts in the pipeline. To do that, insert a call to inspect().
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::ok::<u32, u32>(1);
+ /// let new_future = future.inspect(|&x| println!("about to resolve: {}", x));
+ /// assert_eq!(new_future.wait(), Ok(1));
+ /// ```
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where F: FnOnce(&Self::Item) -> (),
+ Self: Sized,
+ {
+ assert_future::<Self::Item, Self::Error, _>(inspect::new(self, f))
+ }
+
+ /// Catches unwinding panics while polling the future.
+ ///
+ /// In general, panics within a future can propagate all the way out to the
+ /// task level. This combinator makes it possible to halt unwinding within
+ /// the future itself. It's most commonly used within task executors. It's
+ /// not recommended to use this for error handling.
+ ///
+ /// Note that this method requires the `UnwindSafe` bound from the standard
+ /// library. This isn't always applied automatically, and the standard
+ /// library provides an `AssertUnwindSafe` wrapper type to apply it
+ /// after-the fact. To assist using this method, the `Future` trait is also
+ /// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use futures::prelude::*;
+ /// use futures::future::{self, FutureResult};
+ ///
+ /// let mut future = future::ok::<i32, u32>(2);
+ /// assert!(future.catch_unwind().wait().is_ok());
+ ///
+ /// let mut future = future::lazy(|| -> FutureResult<i32, u32> {
+ /// panic!();
+ /// future::ok::<i32, u32>(2)
+ /// });
+ /// assert!(future.catch_unwind().wait().is_err());
+ /// ```
+ #[cfg(feature = "use_std")]
+ fn catch_unwind(self) -> CatchUnwind<Self>
+ where Self: Sized + ::std::panic::UnwindSafe
+ {
+ catch_unwind::new(self)
+ }
+
+ /// Create a cloneable handle to this future where all handles will resolve
+ /// to the same result.
+ ///
+ /// The shared() method provides a method to convert any future into a
+ /// cloneable future. It enables a future to be polled by multiple threads.
+ ///
+ /// The returned `Shared` future resolves successfully with
+ /// `SharedItem<Self::Item>` or erroneously with `SharedError<Self::Error>`.
+ /// Both `SharedItem` and `SharedError` implements `Deref` to allow shared
+ /// access to the underlying result. Ownership of `Self::Item` and
+ /// `Self::Error` cannot currently be reclaimed.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::ok::<_, bool>(6);
+ /// let shared1 = future.shared();
+ /// let shared2 = shared1.clone();
+ /// assert_eq!(6, *shared1.wait().unwrap());
+ /// assert_eq!(6, *shared2.wait().unwrap());
+ /// ```
+ ///
+ /// ```
+ /// use std::thread;
+ /// use futures::prelude::*;
+ /// use futures::future;
+ ///
+ /// let future = future::ok::<_, bool>(6);
+ /// let shared1 = future.shared();
+ /// let shared2 = shared1.clone();
+ /// let join_handle = thread::spawn(move || {
+ /// assert_eq!(6, *shared2.wait().unwrap());
+ /// });
+ /// assert_eq!(6, *shared1.wait().unwrap());
+ /// join_handle.join().unwrap();
+ /// ```
+ #[cfg(feature = "use_std")]
+ fn shared(self) -> Shared<Self>
+ where Self: Sized
+ {
+ shared::new(self)
+ }
+}
+
+impl<'a, F: ?Sized + Future> Future for &'a mut F {
+ type Item = F::Item;
+ type Error = F::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ (**self).poll()
+ }
+}
+
+// Just a helper function to ensure the futures we're returning all have the
+// right implementations.
+fn assert_future<A, B, F>(t: F) -> F
+ where F: Future<Item=A, Error=B>,
+{
+ t
+}
+
+/// Class of types which can be converted into a future.
+///
+/// This trait is very similar to the `IntoIterator` trait and is intended to be
+/// used in a very similar fashion.
+pub trait IntoFuture {
+ /// The future that this type can be converted into.
+ type Future: Future<Item=Self::Item, Error=Self::Error>;
+
+ /// The item that the future may resolve with.
+ type Item;
+ /// The error that the future may resolve with.
+ type Error;
+
+ /// Consumes this object and produces a future.
+ fn into_future(self) -> Self::Future;
+}
+
+impl<F: Future> IntoFuture for F {
+ type Future = F;
+ type Item = F::Item;
+ type Error = F::Error;
+
+ fn into_future(self) -> F {
+ self
+ }
+}
+
+impl<T, E> IntoFuture for result::Result<T, E> {
+ type Future = FutureResult<T, E>;
+ type Item = T;
+ type Error = E;
+
+ fn into_future(self) -> FutureResult<T, E> {
+ result(self)
+ }
+}
+
+/// Asynchronous conversion from a type `T`.
+///
+/// This trait is analogous to `std::convert::From`, adapted to asynchronous
+/// computation.
+pub trait FutureFrom<T>: Sized {
+ /// The future for the conversion.
+ type Future: Future<Item=Self, Error=Self::Error>;
+
+ /// Possible errors during conversion.
+ type Error;
+
+ /// Consume the given value, beginning the conversion.
+ fn future_from(T) -> Self::Future;
+}
+
+/// A trait for types which can spawn fresh futures.
+///
+/// This trait is typically implemented for "executors", or those types which
+/// can execute futures to completion. Futures passed to `Spawn::spawn`
+/// typically get turned into a *task* and are then driven to completion.
+///
+/// On spawn, the executor takes ownership of the future and becomes responsible
+/// to call `Future::poll()` whenever a readiness notification is raised.
+pub trait Executor<F: Future<Item = (), Error = ()>> {
+ /// Spawns a future to run on this `Executor`, typically in the
+ /// "background".
+ ///
+ /// This function will return immediately, and schedule the future `future`
+ /// to run on `self`. The details of scheduling and execution are left to
+ /// the implementations of `Executor`, but this is typically a primary point
+ /// for injecting concurrency in a futures-based system. Futures spawned
+ /// through this `execute` function tend to run concurrently while they're
+ /// waiting on events.
+ ///
+ /// # Errors
+ ///
+ /// Implementers of this trait are allowed to reject accepting this future
+ /// as well. This can happen for various reason such as:
+ ///
+ /// * The executor is shut down
+ /// * The executor has run out of capacity to execute futures
+ ///
+ /// The decision is left to the caller how to work with this form of error.
+ /// The error returned transfers ownership of the future back to the caller.
+ fn execute(&self, future: F) -> Result<(), ExecuteError<F>>;
+}
+
+/// Errors returned from the `Spawn::spawn` function.
+pub struct ExecuteError<F> {
+ future: F,
+ kind: ExecuteErrorKind,
+}
+
+/// Kinds of errors that can be returned from the `Execute::spawn` function.
+///
+/// Executors which may not always be able to accept a future may return one of
+/// these errors, indicating why it was unable to spawn a future.
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub enum ExecuteErrorKind {
+ /// This executor has shut down and will no longer accept new futures to
+ /// spawn.
+ Shutdown,
+
+ /// This executor has no more capacity to run more futures. Other futures
+ /// need to finish before this executor can accept another.
+ NoCapacity,
+
+ #[doc(hidden)]
+ __Nonexhaustive,
+}
+
+impl<F> ExecuteError<F> {
+ /// Create a new `ExecuteError`
+ pub fn new(kind: ExecuteErrorKind, future: F) -> ExecuteError<F> {
+ ExecuteError {
+ future: future,
+ kind: kind,
+ }
+ }
+
+ /// Returns the associated reason for the error
+ pub fn kind(&self) -> ExecuteErrorKind {
+ self.kind
+ }
+
+ /// Consumes self and returns the original future that was spawned.
+ pub fn into_future(self) -> F {
+ self.future
+ }
+}
+
+impl<F> fmt::Debug for ExecuteError<F> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.kind {
+ ExecuteErrorKind::Shutdown => "executor has shut down".fmt(f),
+ ExecuteErrorKind::NoCapacity => "executor has no more capacity".fmt(f),
+ ExecuteErrorKind::__Nonexhaustive => panic!(),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/option.rs b/third_party/rust/futures-0.1.31/src/future/option.rs
new file mode 100644
index 0000000000..1b204d376a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/option.rs
@@ -0,0 +1,15 @@
+//! Definition of the `Option` (optional step) combinator
+
+use {Future, Poll, Async};
+
+impl<F, T, E> Future for Option<F> where F: Future<Item=T, Error=E> {
+ type Item = Option<T>;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ match *self {
+ None => Ok(Async::Ready(None)),
+ Some(ref mut x) => x.poll().map(|x| x.map(Some)),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/or_else.rs b/third_party/rust/futures-0.1.31/src/future/or_else.rs
new file mode 100644
index 0000000000..bc134137af
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/or_else.rs
@@ -0,0 +1,39 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `or_else` combinator, chaining a computation onto the end of
+/// a future which fails with an error.
+///
+/// This is created by the `Future::or_else` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct OrElse<A, B, F> where A: Future, B: IntoFuture {
+ state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> OrElse<A, B, F>
+ where A: Future,
+ B: IntoFuture<Item=A::Item>,
+{
+ OrElse {
+ state: Chain::new(future, f),
+ }
+}
+
+impl<A, B, F> Future for OrElse<A, B, F>
+ where A: Future,
+ B: IntoFuture<Item=A::Item>,
+ F: FnOnce(A::Error) -> B,
+{
+ type Item = B::Item;
+ type Error = B::Error;
+
+ fn poll(&mut self) -> Poll<B::Item, B::Error> {
+ self.state.poll(|a, f| {
+ match a {
+ Ok(item) => Ok(Ok(item)),
+ Err(e) => Ok(Err(f(e).into_future()))
+ }
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/poll_fn.rs b/third_party/rust/futures-0.1.31/src/future/poll_fn.rs
new file mode 100644
index 0000000000..d96bf2f98d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/poll_fn.rs
@@ -0,0 +1,45 @@
+//! Definition of the `PollFn` adapter combinator
+
+use {Future, Poll};
+
+/// A future which adapts a function returning `Poll`.
+///
+/// Created by the `poll_fn` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct PollFn<F> {
+ inner: F,
+}
+
+/// Creates a new future wrapping around a function returning `Poll`.
+///
+/// Polling the returned future delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::poll_fn;
+/// use futures::{Async, Poll};
+///
+/// fn read_line() -> Poll<String, std::io::Error> {
+/// Ok(Async::Ready("Hello, World!".into()))
+/// }
+///
+/// let read_future = poll_fn(read_line);
+/// ```
+pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
+ where F: FnMut() -> ::Poll<T, E>
+{
+ PollFn { inner: f }
+}
+
+impl<T, E, F> Future for PollFn<F>
+ where F: FnMut() -> Poll<T, E>
+{
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<T, E> {
+ (self.inner)()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/result.rs b/third_party/rust/futures-0.1.31/src/future/result.rs
new file mode 100644
index 0000000000..5c44a63e1f
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/result.rs
@@ -0,0 +1,81 @@
+//! Definition of the `Result` (immediately finished) combinator
+
+use core::result;
+
+use {Future, Poll, Async};
+
+/// A future representing a value that is immediately ready.
+///
+/// Created by the `result` function.
+#[derive(Debug, Clone)]
+#[must_use = "futures do nothing unless polled"]
+// TODO: rename this to `Result` on the next major version
+pub struct FutureResult<T, E> {
+ inner: Option<result::Result<T, E>>,
+}
+
+/// Creates a new "leaf future" which will resolve with the given result.
+///
+/// The returned future represents a computation which is finished immediately.
+/// This can be useful with the `finished` and `failed` base future types to
+/// convert an immediate value to a future to interoperate elsewhere.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_1 = result::<u32, u32>(Ok(1));
+/// let future_of_err_2 = result::<u32, u32>(Err(2));
+/// ```
+pub fn result<T, E>(r: result::Result<T, E>) -> FutureResult<T, E> {
+ FutureResult { inner: Some(r) }
+}
+
+/// Creates a "leaf future" from an immediate value of a finished and
+/// successful computation.
+///
+/// The returned future is similar to `result` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_1 = ok::<u32, u32>(1);
+/// ```
+pub fn ok<T, E>(t: T) -> FutureResult<T, E> {
+ result(Ok(t))
+}
+
+/// Creates a "leaf future" from an immediate value of a failed computation.
+///
+/// The returned future is similar to `result` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::future::*;
+///
+/// let future_of_err_1 = err::<u32, u32>(1);
+/// ```
+pub fn err<T, E>(e: E) -> FutureResult<T, E> {
+ result(Err(e))
+}
+
+impl<T, E> Future for FutureResult<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<T, E> {
+ self.inner.take().expect("cannot poll Result twice").map(Async::Ready)
+ }
+}
+
+impl<T, E> From<Result<T, E>> for FutureResult<T, E> {
+ fn from(r: Result<T, E>) -> Self {
+ result(r)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/select.rs b/third_party/rust/futures-0.1.31/src/future/select.rs
new file mode 100644
index 0000000000..c48e1c0a1e
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/select.rs
@@ -0,0 +1,86 @@
+use {Future, Poll, Async};
+
+/// Future for the `select` combinator, waiting for one of two futures to
+/// complete.
+///
+/// This is created by the `Future::select` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Select<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+ inner: Option<(A, B)>,
+}
+
+/// Future yielded as the second result in a `Select` future.
+///
+/// This sentinel future represents the completion of the second future to a
+/// `select` which finished second.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectNext<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+ inner: OneOf<A, B>,
+}
+
+#[derive(Debug)]
+enum OneOf<A, B> where A: Future, B: Future {
+ A(A),
+ B(B),
+}
+
+pub fn new<A, B>(a: A, b: B) -> Select<A, B>
+ where A: Future,
+ B: Future<Item=A::Item, Error=A::Error>
+{
+ Select {
+ inner: Some((a, b)),
+ }
+}
+
+impl<A, B> Future for Select<A, B>
+ where A: Future,
+ B: Future<Item=A::Item, Error=A::Error>,
+{
+ type Item = (A::Item, SelectNext<A, B>);
+ type Error = (A::Error, SelectNext<A, B>);
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let (ret, is_a) = match self.inner {
+ Some((ref mut a, ref mut b)) => {
+ match a.poll() {
+ Err(a) => (Err(a), true),
+ Ok(Async::Ready(a)) => (Ok(a), true),
+ Ok(Async::NotReady) => {
+ match b.poll() {
+ Err(a) => (Err(a), false),
+ Ok(Async::Ready(a)) => (Ok(a), false),
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ }
+ }
+ }
+ }
+ None => panic!("cannot poll select twice"),
+ };
+
+ let (a, b) = self.inner.take().unwrap();
+ let next = if is_a {OneOf::B(b)} else {OneOf::A(a)};
+ let next = SelectNext { inner: next };
+ match ret {
+ Ok(a) => Ok(Async::Ready((a, next))),
+ Err(e) => Err((e, next)),
+ }
+ }
+}
+
+impl<A, B> Future for SelectNext<A, B>
+ where A: Future,
+ B: Future<Item=A::Item, Error=A::Error>,
+{
+ type Item = A::Item;
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ match self.inner {
+ OneOf::A(ref mut a) => a.poll(),
+ OneOf::B(ref mut b) => b.poll(),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/select2.rs b/third_party/rust/futures-0.1.31/src/future/select2.rs
new file mode 100644
index 0000000000..073f67be4a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/select2.rs
@@ -0,0 +1,39 @@
+use {Future, Poll, Async};
+use future::Either;
+
+/// Future for the `select2` combinator, waiting for one of two differently-typed
+/// futures to complete.
+///
+/// This is created by the [`Future::select2`] method.
+///
+/// [`Future::select2`]: trait.Future.html#method.select2
+#[must_use = "futures do nothing unless polled"]
+#[derive(Debug)]
+pub struct Select2<A, B> {
+ inner: Option<(A, B)>,
+}
+
+pub fn new<A, B>(a: A, b: B) -> Select2<A, B> {
+ Select2 { inner: Some((a, b)) }
+}
+
+impl<A, B> Future for Select2<A, B> where A: Future, B: Future {
+ type Item = Either<(A::Item, B), (B::Item, A)>;
+ type Error = Either<(A::Error, B), (B::Error, A)>;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice");
+ match a.poll() {
+ Err(e) => Err(Either::A((e, b))),
+ Ok(Async::Ready(x)) => Ok(Async::Ready(Either::A((x, b)))),
+ Ok(Async::NotReady) => match b.poll() {
+ Err(e) => Err(Either::B((e, a))),
+ Ok(Async::Ready(x)) => Ok(Async::Ready(Either::B((x, a)))),
+ Ok(Async::NotReady) => {
+ self.inner = Some((a, b));
+ Ok(Async::NotReady)
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/select_all.rs b/third_party/rust/futures-0.1.31/src/future/select_all.rs
new file mode 100644
index 0000000000..1fbc98693b
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/select_all.rs
@@ -0,0 +1,71 @@
+//! Definition of the `SelectAll`, finding the first future in a list that
+//! finishes.
+
+use std::mem;
+use std::prelude::v1::*;
+
+use {Future, IntoFuture, Poll, Async};
+
+/// Future for the `select_all` combinator, waiting for one of any of a list of
+/// futures to complete.
+///
+/// This is created by the `select_all` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectAll<A> where A: Future {
+ inner: Vec<A>,
+}
+
+#[doc(hidden)]
+pub type SelectAllNext<A> = A;
+
+/// Creates a new future which will select over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready. Upon
+/// completion or failure the item resolved will be returned, along with the
+/// index of the future that was ready and the list of all the remaining
+/// futures.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_all<I>(iter: I) -> SelectAll<<I::Item as IntoFuture>::Future>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+{
+ let ret = SelectAll {
+ inner: iter.into_iter()
+ .map(|a| a.into_future())
+ .collect(),
+ };
+ assert!(ret.inner.len() > 0);
+ ret
+}
+
+impl<A> Future for SelectAll<A>
+ where A: Future,
+{
+ type Item = (A::Item, usize, Vec<A>);
+ type Error = (A::Error, usize, Vec<A>);
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
+ match f.poll() {
+ Ok(Async::NotReady) => None,
+ Ok(Async::Ready(e)) => Some((i, Ok(e))),
+ Err(e) => Some((i, Err(e))),
+ }
+ }).next();
+ match item {
+ Some((idx, res)) => {
+ self.inner.remove(idx);
+ let rest = mem::replace(&mut self.inner, Vec::new());
+ match res {
+ Ok(e) => Ok(Async::Ready((e, idx, rest))),
+ Err(e) => Err((e, idx, rest)),
+ }
+ }
+ None => Ok(Async::NotReady),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/select_ok.rs b/third_party/rust/futures-0.1.31/src/future/select_ok.rs
new file mode 100644
index 0000000000..f122a0ea30
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/select_ok.rs
@@ -0,0 +1,81 @@
+//! Definition of the `SelectOk` combinator, finding the first successful future
+//! in a list.
+
+use std::mem;
+use std::prelude::v1::*;
+
+use {Future, IntoFuture, Poll, Async};
+
+/// Future for the `select_ok` combinator, waiting for one of any of a list of
+/// futures to successfully complete. Unlike `select_all`, this future ignores all
+/// but the last error, if there are any.
+///
+/// This is created by the `select_ok` function.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SelectOk<A> where A: Future {
+ inner: Vec<A>,
+}
+
+/// Creates a new future which will select the first successful future over a list of futures.
+///
+/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike
+/// `select_all`, this will only return the first successful completion, or the last
+/// failure. This is useful in contexts where any success is desired and failures
+/// are ignored, unless all the futures fail.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_ok<I>(iter: I) -> SelectOk<<I::Item as IntoFuture>::Future>
+ where I: IntoIterator,
+ I::Item: IntoFuture,
+{
+ let ret = SelectOk {
+ inner: iter.into_iter()
+ .map(|a| a.into_future())
+ .collect(),
+ };
+ assert!(ret.inner.len() > 0);
+ ret
+}
+
+impl<A> Future for SelectOk<A> where A: Future {
+ type Item = (A::Item, Vec<A>);
+ type Error = A::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ // loop until we've either exhausted all errors, a success was hit, or nothing is ready
+ loop {
+ let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
+ match f.poll() {
+ Ok(Async::NotReady) => None,
+ Ok(Async::Ready(e)) => Some((i, Ok(e))),
+ Err(e) => Some((i, Err(e))),
+ }
+ }).next();
+
+ match item {
+ Some((idx, res)) => {
+ // always remove Ok or Err, if it's not the last Err continue looping
+ drop(self.inner.remove(idx));
+ match res {
+ Ok(e) => {
+ let rest = mem::replace(&mut self.inner, Vec::new());
+ return Ok(Async::Ready((e, rest)))
+ },
+ Err(e) => {
+ if self.inner.is_empty() {
+ return Err(e)
+ }
+ },
+ }
+ }
+ None => {
+ // based on the filter above, nothing is ready, return
+ return Ok(Async::NotReady)
+ },
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/shared.rs b/third_party/rust/futures-0.1.31/src/future/shared.rs
new file mode 100644
index 0000000000..e3b6d2fca7
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/shared.rs
@@ -0,0 +1,300 @@
+//! Definition of the Shared combinator, a future that is cloneable,
+//! and can be polled in multiple threads.
+//!
+//! # Examples
+//!
+//! ```
+//! use futures::future::*;
+//!
+//! let future = ok::<_, bool>(6);
+//! let shared1 = future.shared();
+//! let shared2 = shared1.clone();
+//! assert_eq!(6, *shared1.wait().unwrap());
+//! assert_eq!(6, *shared2.wait().unwrap());
+//! ```
+
+use {Future, Poll, Async};
+use task::{self, Task};
+use executor::{self, Notify, Spawn};
+
+use std::{error, fmt, mem, ops};
+use std::cell::UnsafeCell;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::collections::HashMap;
+
+/// A future that is cloneable and can be polled in multiple threads.
+/// Use `Future::shared()` method to convert any future into a `Shared` future.
+#[must_use = "futures do nothing unless polled"]
+pub struct Shared<F: Future> {
+ inner: Arc<Inner<F>>,
+ waiter: usize,
+}
+
+impl<F> fmt::Debug for Shared<F>
+ where F: Future + fmt::Debug,
+ F::Item: fmt::Debug,
+ F::Error: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Shared")
+ .field("inner", &self.inner)
+ .field("waiter", &self.waiter)
+ .finish()
+ }
+}
+
+struct Inner<F: Future> {
+ next_clone_id: AtomicUsize,
+ future: UnsafeCell<Option<Spawn<F>>>,
+ result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>,
+ notifier: Arc<Notifier>,
+}
+
+struct Notifier {
+ state: AtomicUsize,
+ waiters: Mutex<HashMap<usize, Task>>,
+}
+
+const IDLE: usize = 0;
+const POLLING: usize = 1;
+const COMPLETE: usize = 2;
+const POISONED: usize = 3;
+
+pub fn new<F: Future>(future: F) -> Shared<F> {
+ Shared {
+ inner: Arc::new(Inner {
+ next_clone_id: AtomicUsize::new(1),
+ notifier: Arc::new(Notifier {
+ state: AtomicUsize::new(IDLE),
+ waiters: Mutex::new(HashMap::new()),
+ }),
+ future: UnsafeCell::new(Some(executor::spawn(future))),
+ result: UnsafeCell::new(None),
+ }),
+ waiter: 0,
+ }
+}
+
+impl<F> Shared<F> where F: Future {
+ // TODO: make this private
+ #[deprecated(since = "0.1.12", note = "use `Future::shared` instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[doc(hidden)]
+ pub fn new(future: F) -> Self {
+ new(future)
+ }
+
+ /// If any clone of this `Shared` has completed execution, returns its result immediately
+ /// without blocking. Otherwise, returns None without triggering the work represented by
+ /// this `Shared`.
+ pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> {
+ match self.inner.notifier.state.load(SeqCst) {
+ COMPLETE => {
+ Some(unsafe { self.clone_result() })
+ }
+ POISONED => panic!("inner future panicked during poll"),
+ _ => None,
+ }
+ }
+
+ fn set_waiter(&mut self) {
+ let mut waiters = self.inner.notifier.waiters.lock().unwrap();
+ waiters.insert(self.waiter, task::current());
+ }
+
+ unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> {
+ match *self.inner.result.get() {
+ Some(Ok(ref item)) => Ok(SharedItem { item: item.item.clone() }),
+ Some(Err(ref e)) => Err(SharedError { error: e.error.clone() }),
+ _ => unreachable!(),
+ }
+ }
+
+ fn complete(&self) {
+ unsafe { *self.inner.future.get() = None };
+ self.inner.notifier.state.store(COMPLETE, SeqCst);
+ self.inner.notifier.notify(0);
+ }
+}
+
+impl<F> Future for Shared<F>
+ where F: Future
+{
+ type Item = SharedItem<F::Item>;
+ type Error = SharedError<F::Error>;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ self.set_waiter();
+
+ match self.inner.notifier.state.compare_and_swap(IDLE, POLLING, SeqCst) {
+ IDLE => {
+ // Lock acquired, fall through
+ }
+ POLLING => {
+ // Another task is currently polling, at this point we just want
+ // to ensure that our task handle is currently registered
+
+ return Ok(Async::NotReady);
+ }
+ COMPLETE => {
+ return unsafe { self.clone_result().map(Async::Ready) };
+ }
+ POISONED => panic!("inner future panicked during poll"),
+ _ => unreachable!(),
+ }
+
+ struct Reset<'a>(&'a AtomicUsize);
+
+ impl<'a> Drop for Reset<'a> {
+ fn drop(&mut self) {
+ use std::thread;
+
+ if thread::panicking() {
+ self.0.store(POISONED, SeqCst);
+ }
+ }
+ }
+
+ let _reset = Reset(&self.inner.notifier.state);
+
+ // Poll the future
+ let res = unsafe {
+ (*self.inner.future.get()).as_mut().unwrap()
+ .poll_future_notify(&self.inner.notifier, 0)
+ };
+ match res {
+ Ok(Async::NotReady) => {
+ // Not ready, try to release the handle
+ match self.inner.notifier.state.compare_and_swap(POLLING, IDLE, SeqCst) {
+ POLLING => {
+ // Success
+ return Ok(Async::NotReady);
+ }
+ _ => unreachable!(),
+ }
+
+ }
+ Ok(Async::Ready(i)) => {
+ unsafe {
+ (*self.inner.result.get()) = Some(Ok(SharedItem { item: Arc::new(i) }));
+ }
+ }
+ Err(e) => {
+ unsafe {
+ (*self.inner.result.get()) = Some(Err(SharedError { error: Arc::new(e) }));
+ }
+ }
+ }
+
+ self.complete();
+ unsafe { self.clone_result().map(Async::Ready) }
+ }
+}
+
+impl<F> Clone for Shared<F> where F: Future {
+ fn clone(&self) -> Self {
+ let next_clone_id = self.inner.next_clone_id.fetch_add(1, SeqCst);
+
+ Shared {
+ inner: self.inner.clone(),
+ waiter: next_clone_id,
+ }
+ }
+}
+
+impl<F> Drop for Shared<F> where F: Future {
+ fn drop(&mut self) {
+ let mut waiters = self.inner.notifier.waiters.lock().unwrap();
+ waiters.remove(&self.waiter);
+ }
+}
+
+impl Notify for Notifier {
+ fn notify(&self, _id: usize) {
+ let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new());
+
+ for (_, waiter) in waiters {
+ waiter.notify();
+ }
+ }
+}
+
+// The `F` is synchronized by a lock, so `F` doesn't need
+// to be `Sync`. However, its `Item` or `Error` are exposed
+// through an `Arc` but not lock, so they must be `Send + Sync`.
+unsafe impl<F> Send for Inner<F>
+ where F: Future + Send,
+ F::Item: Send + Sync,
+ F::Error: Send + Sync,
+{}
+
+unsafe impl<F> Sync for Inner<F>
+ where F: Future + Send,
+ F::Item: Send + Sync,
+ F::Error: Send + Sync,
+{}
+
+impl<F> fmt::Debug for Inner<F>
+ where F: Future + fmt::Debug,
+ F::Item: fmt::Debug,
+ F::Error: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Inner")
+ .finish()
+ }
+}
+
+/// A wrapped item of the original future that is cloneable and implements Deref
+/// for ease of use.
+#[derive(Clone, Debug)]
+pub struct SharedItem<T> {
+ item: Arc<T>,
+}
+
+impl<T> ops::Deref for SharedItem<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.item.as_ref()
+ }
+}
+
+/// A wrapped error of the original future that is cloneable and implements Deref
+/// for ease of use.
+#[derive(Clone, Debug)]
+pub struct SharedError<E> {
+ error: Arc<E>,
+}
+
+impl<E> ops::Deref for SharedError<E> {
+ type Target = E;
+
+ fn deref(&self) -> &E {
+ &self.error.as_ref()
+ }
+}
+
+impl<E> fmt::Display for SharedError<E>
+ where E: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.error.fmt(f)
+ }
+}
+
+impl<E> error::Error for SharedError<E>
+ where E: error::Error,
+{
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ self.error.description()
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&error::Error> {
+ self.error.cause()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/future/then.rs b/third_party/rust/futures-0.1.31/src/future/then.rs
new file mode 100644
index 0000000000..188fb8fa80
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/future/then.rs
@@ -0,0 +1,36 @@
+use {Future, IntoFuture, Poll};
+use super::chain::Chain;
+
+/// Future for the `then` combinator, chaining computations on the end of
+/// another future regardless of its outcome.
+///
+/// This is created by the `Future::then` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Then<A, B, F> where A: Future, B: IntoFuture {
+ state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> Then<A, B, F>
+ where A: Future,
+ B: IntoFuture,
+{
+ Then {
+ state: Chain::new(future, f),
+ }
+}
+
+impl<A, B, F> Future for Then<A, B, F>
+ where A: Future,
+ B: IntoFuture,
+ F: FnOnce(Result<A::Item, A::Error>) -> B,
+{
+ type Item = B::Item;
+ type Error = B::Error;
+
+ fn poll(&mut self) -> Poll<B::Item, B::Error> {
+ self.state.poll(|a, f| {
+ Ok(Err(f(a).into_future()))
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/lib.rs b/third_party/rust/futures-0.1.31/src/lib.rs
new file mode 100644
index 0000000000..ccadb6777f
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/lib.rs
@@ -0,0 +1,266 @@
+//! Zero-cost Futures in Rust
+//!
+//! This library is an implementation of futures in Rust which aims to provide
+//! a robust implementation of handling asynchronous computations, ergonomic
+//! composition and usage, and zero-cost abstractions over what would otherwise
+//! be written by hand.
+//!
+//! Futures are a concept for an object which is a proxy for another value that
+//! may not be ready yet. For example issuing an HTTP request may return a
+//! future for the HTTP response, as it probably hasn't arrived yet. With an
+//! object representing a value that will eventually be available, futures allow
+//! for powerful composition of tasks through basic combinators that can perform
+//! operations like chaining computations, changing the types of futures, or
+//! waiting for two futures to complete at the same time.
+//!
+//! You can find extensive tutorials and documentations at [https://tokio.rs]
+//! for both this crate (asynchronous programming in general) as well as the
+//! Tokio stack to perform async I/O with.
+//!
+//! [https://tokio.rs]: https://tokio.rs
+//!
+//! ## Installation
+//!
+//! Add this to your `Cargo.toml`:
+//!
+//! ```toml
+//! [dependencies]
+//! futures = "0.1"
+//! ```
+//!
+//! ## Examples
+//!
+//! Let's take a look at a few examples of how futures might be used:
+//!
+//! ```
+//! extern crate futures;
+//!
+//! use std::io;
+//! use std::time::Duration;
+//! use futures::prelude::*;
+//! use futures::future::Map;
+//!
+//! // A future is actually a trait implementation, so we can generically take a
+//! // future of any integer and return back a future that will resolve to that
+//! // value plus 10 more.
+//! //
+//! // Note here that like iterators, we're returning the `Map` combinator in
+//! // the futures crate, not a boxed abstraction. This is a zero-cost
+//! // construction of a future.
+//! fn add_ten<F>(future: F) -> Map<F, fn(i32) -> i32>
+//! where F: Future<Item=i32>,
+//! {
+//! fn add(a: i32) -> i32 { a + 10 }
+//! future.map(add)
+//! }
+//!
+//! // Not only can we modify one future, but we can even compose them together!
+//! // Here we have a function which takes two futures as input, and returns a
+//! // future that will calculate the sum of their two values.
+//! //
+//! // Above we saw a direct return value of the `Map` combinator, but
+//! // performance isn't always critical and sometimes it's more ergonomic to
+//! // return a trait object like we do here. Note though that there's only one
+//! // allocation here, not any for the intermediate futures.
+//! fn add<'a, A, B>(a: A, b: B) -> Box<Future<Item=i32, Error=A::Error> + 'a>
+//! where A: Future<Item=i32> + 'a,
+//! B: Future<Item=i32, Error=A::Error> + 'a,
+//! {
+//! Box::new(a.join(b).map(|(a, b)| a + b))
+//! }
+//!
+//! // Futures also allow chaining computations together, starting another after
+//! // the previous finishes. Here we wait for the first computation to finish,
+//! // and then decide what to do depending on the result.
+//! fn download_timeout(url: &str,
+//! timeout_dur: Duration)
+//! -> Box<Future<Item=Vec<u8>, Error=io::Error>> {
+//! use std::io;
+//! use std::net::{SocketAddr, TcpStream};
+//!
+//! type IoFuture<T> = Box<Future<Item=T, Error=io::Error>>;
+//!
+//! // First thing to do is we need to resolve our URL to an address. This
+//! // will likely perform a DNS lookup which may take some time.
+//! let addr = resolve(url);
+//!
+//! // After we acquire the address, we next want to open up a TCP
+//! // connection.
+//! let tcp = addr.and_then(|addr| connect(&addr));
+//!
+//! // After the TCP connection is established and ready to go, we're off to
+//! // the races!
+//! let data = tcp.and_then(|conn| download(conn));
+//!
+//! // That all might take awhile, though, so let's not wait too long for it
+//! // to all come back. The `select` combinator here returns a future which
+//! // resolves to the first value that's ready plus the next future.
+//! //
+//! // Note we can also use the `then` combinator which is similar to
+//! // `and_then` above except that it receives the result of the
+//! // computation, not just the successful value.
+//! //
+//! // Again note that all the above calls to `and_then` and the below calls
+//! // to `map` and such require no allocations. We only ever allocate once
+//! // we hit the `Box::new()` call at the end here, which means we've built
+//! // up a relatively involved computation with only one box, and even that
+//! // was optional!
+//!
+//! let data = data.map(Ok);
+//! let timeout = timeout(timeout_dur).map(Err);
+//!
+//! let ret = data.select(timeout).then(|result| {
+//! match result {
+//! // One future succeeded, and it was the one which was
+//! // downloading data from the connection.
+//! Ok((Ok(data), _other_future)) => Ok(data),
+//!
+//! // The timeout fired, and otherwise no error was found, so
+//! // we translate this to an error.
+//! Ok((Err(_timeout), _other_future)) => {
+//! Err(io::Error::new(io::ErrorKind::Other, "timeout"))
+//! }
+//!
+//! // A normal I/O error happened, so we pass that on through.
+//! Err((e, _other_future)) => Err(e),
+//! }
+//! });
+//! return Box::new(ret);
+//!
+//! fn resolve(url: &str) -> IoFuture<SocketAddr> {
+//! // ...
+//! # panic!("unimplemented");
+//! }
+//!
+//! fn connect(hostname: &SocketAddr) -> IoFuture<TcpStream> {
+//! // ...
+//! # panic!("unimplemented");
+//! }
+//!
+//! fn download(stream: TcpStream) -> IoFuture<Vec<u8>> {
+//! // ...
+//! # panic!("unimplemented");
+//! }
+//!
+//! fn timeout(stream: Duration) -> IoFuture<()> {
+//! // ...
+//! # panic!("unimplemented");
+//! }
+//! }
+//! # fn main() {}
+//! ```
+//!
+//! Some more information can also be found in the [README] for now, but
+//! otherwise feel free to jump in to the docs below!
+//!
+//! [README]: https://github.com/rust-lang-nursery/futures-rs#futures-rs
+
+#![no_std]
+#![deny(missing_docs, missing_debug_implementations)]
+#![allow(bare_trait_objects, unknown_lints)]
+#![doc(html_root_url = "https://docs.rs/futures/0.1")]
+
+#[macro_use]
+#[cfg(feature = "use_std")]
+extern crate std;
+
+macro_rules! if_std {
+ ($($i:item)*) => ($(
+ #[cfg(feature = "use_std")]
+ $i
+ )*)
+}
+
+#[macro_use]
+mod poll;
+pub use poll::{Poll, Async, AsyncSink, StartSend};
+
+pub mod future;
+pub use future::{Future, IntoFuture};
+
+pub mod stream;
+pub use stream::Stream;
+
+pub mod sink;
+pub use sink::Sink;
+
+#[deprecated(since = "0.1.4", note = "import through the future module instead")]
+#[cfg(feature = "with-deprecated")]
+#[doc(hidden)]
+pub use future::{done, empty, failed, finished, lazy};
+
+#[doc(hidden)]
+#[cfg(feature = "with-deprecated")]
+#[deprecated(since = "0.1.4", note = "import through the future module instead")]
+pub use future::{
+ Done, Empty, Failed, Finished, Lazy, AndThen, Flatten, FlattenStream, Fuse, IntoStream,
+ Join, Join3, Join4, Join5, Map, MapErr, OrElse, Select,
+ SelectNext, Then
+};
+
+#[cfg(feature = "use_std")]
+mod lock;
+mod task_impl;
+
+mod resultstream;
+
+pub mod task;
+pub mod executor;
+#[cfg(feature = "use_std")]
+pub mod sync;
+#[cfg(feature = "use_std")]
+pub mod unsync;
+
+
+if_std! {
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use sync::oneshot::channel as oneshot;
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use sync::oneshot::Receiver instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use sync::oneshot::Receiver as Oneshot;
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use sync::oneshot::Sender instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use sync::oneshot::Sender as Complete;
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "use sync::oneshot::Canceled instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use sync::oneshot::Canceled;
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "import through the future module instead")]
+ #[cfg(feature = "with-deprecated")]
+ #[allow(deprecated)]
+ pub use future::{BoxFuture, collect, select_all, select_ok};
+
+ #[doc(hidden)]
+ #[deprecated(since = "0.1.4", note = "import through the future module instead")]
+ #[cfg(feature = "with-deprecated")]
+ pub use future::{SelectAll, SelectAllNext, Collect, SelectOk};
+}
+
+/// A "prelude" for crates using the `futures` crate.
+///
+/// This prelude is similar to the standard library's prelude in that you'll
+/// almost always want to import its entire contents, but unlike the standard
+/// library's prelude you'll have to do so manually. An example of using this is:
+///
+/// ```
+/// use futures::prelude::*;
+/// ```
+///
+/// We may add items to this over time as they become ubiquitous as well, but
+/// otherwise this should help cut down on futures-related imports when you're
+/// working with the `futures` crate!
+pub mod prelude {
+ #[doc(no_inline)]
+ pub use {Future, Stream, Sink, Async, AsyncSink, Poll, StartSend};
+ #[doc(no_inline)]
+ pub use IntoFuture;
+}
diff --git a/third_party/rust/futures-0.1.31/src/lock.rs b/third_party/rust/futures-0.1.31/src/lock.rs
new file mode 100644
index 0000000000..627c524949
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/lock.rs
@@ -0,0 +1,107 @@
+//! A "mutex" which only supports `try_lock`
+//!
+//! As a futures library the eventual call to an event loop should be the only
+//! thing that ever blocks, so this is assisted with a fast user-space
+//! implementation of a lock that can only have a `try_lock` operation.
+
+extern crate core;
+
+use self::core::cell::UnsafeCell;
+use self::core::ops::{Deref, DerefMut};
+use self::core::sync::atomic::Ordering::SeqCst;
+use self::core::sync::atomic::AtomicBool;
+
+/// A "mutex" around a value, similar to `std::sync::Mutex<T>`.
+///
+/// This lock only supports the `try_lock` operation, however, and does not
+/// implement poisoning.
+#[derive(Debug)]
+pub struct Lock<T> {
+ locked: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+/// Sentinel representing an acquired lock through which the data can be
+/// accessed.
+pub struct TryLock<'a, T: 'a> {
+ __ptr: &'a Lock<T>,
+}
+
+// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are
+// intended to mirror the standard library's corresponding impls for `Mutex<T>`.
+//
+// If a `T` is sendable across threads, so is the lock, and `T` must be sendable
+// across threads to be `Sync` because it allows mutable access from multiple
+// threads.
+unsafe impl<T: Send> Send for Lock<T> {}
+unsafe impl<T: Send> Sync for Lock<T> {}
+
+impl<T> Lock<T> {
+ /// Creates a new lock around the given value.
+ pub fn new(t: T) -> Lock<T> {
+ Lock {
+ locked: AtomicBool::new(false),
+ data: UnsafeCell::new(t),
+ }
+ }
+
+ /// Attempts to acquire this lock, returning whether the lock was acquired or
+ /// not.
+ ///
+ /// If `Some` is returned then the data this lock protects can be accessed
+ /// through the sentinel. This sentinel allows both mutable and immutable
+ /// access.
+ ///
+ /// If `None` is returned then the lock is already locked, either elsewhere
+ /// on this thread or on another thread.
+ pub fn try_lock(&self) -> Option<TryLock<T>> {
+ if !self.locked.swap(true, SeqCst) {
+ Some(TryLock { __ptr: self })
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, T> Deref for TryLock<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ // The existence of `TryLock` represents that we own the lock, so we
+ // can safely access the data here.
+ unsafe { &*self.__ptr.data.get() }
+ }
+}
+
+impl<'a, T> DerefMut for TryLock<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // The existence of `TryLock` represents that we own the lock, so we
+ // can safely access the data here.
+ //
+ // Additionally, we're the *only* `TryLock` in existence so mutable
+ // access should be ok.
+ unsafe { &mut *self.__ptr.data.get() }
+ }
+}
+
+impl<'a, T> Drop for TryLock<'a, T> {
+ fn drop(&mut self) {
+ self.__ptr.locked.store(false, SeqCst);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Lock;
+
+ #[test]
+ fn smoke() {
+ let a = Lock::new(1);
+ let mut a1 = a.try_lock().unwrap();
+ assert!(a.try_lock().is_none());
+ assert_eq!(*a1, 1);
+ *a1 = 2;
+ drop(a1);
+ assert_eq!(*a.try_lock().unwrap(), 2);
+ assert_eq!(*a.try_lock().unwrap(), 2);
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/poll.rs b/third_party/rust/futures-0.1.31/src/poll.rs
new file mode 100644
index 0000000000..c568e726c2
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/poll.rs
@@ -0,0 +1,105 @@
+/// A macro for extracting the successful type of a `Poll<T, E>`.
+///
+/// This macro bakes propagation of both errors and `NotReady` signals by
+/// returning early.
+#[macro_export]
+macro_rules! try_ready {
+ ($e:expr) => (match $e {
+ Ok($crate::Async::Ready(t)) => t,
+ Ok($crate::Async::NotReady) => return Ok($crate::Async::NotReady),
+ Err(e) => return Err(From::from(e)),
+ })
+}
+
+/// Return type of the `Future::poll` method, indicates whether a future's value
+/// is ready or not.
+///
+/// * `Ok(Async::Ready(t))` means that a future has successfully resolved
+/// * `Ok(Async::NotReady)` means that a future is not ready to complete yet
+/// * `Err(e)` means that a future has completed with the given failure
+pub type Poll<T, E> = Result<Async<T>, E>;
+
+/// Return type of future, indicating whether a value is ready or not.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Async<T> {
+ /// Represents that a value is immediately ready.
+ Ready(T),
+
+ /// Represents that a value is not ready yet, but may be so later.
+ NotReady,
+}
+
+impl<T> Async<T> {
+ /// Change the success value of this `Async` with the closure provided
+ pub fn map<F, U>(self, f: F) -> Async<U>
+ where F: FnOnce(T) -> U
+ {
+ match self {
+ Async::Ready(t) => Async::Ready(f(t)),
+ Async::NotReady => Async::NotReady,
+ }
+ }
+
+ /// Returns whether this is `Async::Ready`
+ pub fn is_ready(&self) -> bool {
+ match *self {
+ Async::Ready(_) => true,
+ Async::NotReady => false,
+ }
+ }
+
+ /// Returns whether this is `Async::NotReady`
+ pub fn is_not_ready(&self) -> bool {
+ !self.is_ready()
+ }
+}
+
+impl<T> From<T> for Async<T> {
+ fn from(t: T) -> Async<T> {
+ Async::Ready(t)
+ }
+}
+
+/// The result of an asynchronous attempt to send a value to a sink.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum AsyncSink<T> {
+ /// The `start_send` attempt succeeded, so the sending process has
+ /// *started*; you must use `Sink::poll_complete` to drive the send
+ /// to completion.
+ Ready,
+
+ /// The `start_send` attempt failed due to the sink being full. The value
+ /// being sent is returned, and the current `Task` will be automatically
+ /// notified again once the sink has room.
+ NotReady(T),
+}
+
+impl<T> AsyncSink<T> {
+ /// Change the NotReady value of this `AsyncSink` with the closure provided
+ pub fn map<F, U>(self, f: F) -> AsyncSink<U>
+ where F: FnOnce(T) -> U,
+ {
+ match self {
+ AsyncSink::Ready => AsyncSink::Ready,
+ AsyncSink::NotReady(t) => AsyncSink::NotReady(f(t)),
+ }
+ }
+
+ /// Returns whether this is `AsyncSink::Ready`
+ pub fn is_ready(&self) -> bool {
+ match *self {
+ AsyncSink::Ready => true,
+ AsyncSink::NotReady(_) => false,
+ }
+ }
+
+ /// Returns whether this is `AsyncSink::NotReady`
+ pub fn is_not_ready(&self) -> bool {
+ !self.is_ready()
+ }
+}
+
+
+/// Return type of the `Sink::start_send` method, indicating the outcome of a
+/// send attempt. See `AsyncSink` for more details.
+pub type StartSend<T, E> = Result<AsyncSink<T>, E>;
diff --git a/third_party/rust/futures-0.1.31/src/resultstream.rs b/third_party/rust/futures-0.1.31/src/resultstream.rs
new file mode 100644
index 0000000000..23a99819bd
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/resultstream.rs
@@ -0,0 +1,46 @@
+// This should really be in the stream module,
+// but `pub(crate)` isn't available until Rust 1.18,
+// and pre-1.18 there isn't a really good way to have a sub-module
+// available to the crate, but not without it.
+use core::marker::PhantomData;
+
+use {Poll, Async};
+use stream::Stream;
+
+
+/// A stream combinator used to convert a `Stream<Item=T,Error=E>`
+/// to a `Stream<Item=Result<T,E>>`.
+///
+/// A poll on this stream will never return an `Err`. As such the
+/// actual error type is parameterized, so it can match whatever error
+/// type is needed.
+///
+/// This structure is produced by the `Stream::results` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Results<S: Stream, E> {
+ inner: S,
+ phantom: PhantomData<E>
+}
+
+pub fn new<S, E>(s: S) -> Results<S, E> where S: Stream {
+ Results {
+ inner: s,
+ phantom: PhantomData
+ }
+}
+
+impl<S: Stream, E> Stream for Results<S, E> {
+ type Item = Result<S::Item, S::Error>;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<Result<S::Item, S::Error>>, E> {
+ match self.inner.poll() {
+ Ok(Async::Ready(Some(item))) => Ok(Async::Ready(Some(Ok(item)))),
+ Err(e) => Ok(Async::Ready(Some(Err(e)))),
+ Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+ Ok(Async::NotReady) => Ok(Async::NotReady)
+ }
+ }
+}
+
diff --git a/third_party/rust/futures-0.1.31/src/sink/buffer.rs b/third_party/rust/futures-0.1.31/src/sink/buffer.rs
new file mode 100644
index 0000000000..419579d9a0
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/buffer.rs
@@ -0,0 +1,108 @@
+use std::collections::VecDeque;
+
+use {Poll, Async};
+use {StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::buffer` combinator, which buffers up to some fixed
+/// number of values when the underlying sink is unable to accept them.
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct Buffer<S: Sink> {
+ sink: S,
+ buf: VecDeque<S::SinkItem>,
+
+ // Track capacity separately from the `VecDeque`, which may be rounded up
+ cap: usize,
+}
+
+pub fn new<S: Sink>(sink: S, amt: usize) -> Buffer<S> {
+ Buffer {
+ sink: sink,
+ buf: VecDeque::with_capacity(amt),
+ cap: amt,
+ }
+}
+
+impl<S: Sink> Buffer<S> {
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ &self.sink
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.sink
+ }
+
+ /// Consumes this combinator, returning the underlying sink.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.sink
+ }
+
+ fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> {
+ while let Some(item) = self.buf.pop_front() {
+ if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
+ self.buf.push_front(item);
+
+ return Ok(Async::NotReady);
+ }
+ }
+
+ Ok(Async::Ready(()))
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S> Stream for Buffer<S> where S: Sink + Stream {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.sink.poll()
+ }
+}
+
+impl<S: Sink> Sink for Buffer<S> {
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+ if self.cap == 0 {
+ return self.sink.start_send(item);
+ }
+
+ self.try_empty_buffer()?;
+ if self.buf.len() == self.cap {
+ return Ok(AsyncSink::NotReady(item));
+ }
+ self.buf.push_back(item);
+ Ok(AsyncSink::Ready)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ if self.cap == 0 {
+ return self.sink.poll_complete();
+ }
+
+ try_ready!(self.try_empty_buffer());
+ debug_assert!(self.buf.is_empty());
+ self.sink.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ if self.cap == 0 {
+ return self.sink.close();
+ }
+
+ if self.buf.len() > 0 {
+ try_ready!(self.try_empty_buffer());
+ }
+ assert_eq!(self.buf.len(), 0);
+ self.sink.close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/fanout.rs b/third_party/rust/futures-0.1.31/src/sink/fanout.rs
new file mode 100644
index 0000000000..8d2456e7e8
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/fanout.rs
@@ -0,0 +1,135 @@
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::mem::replace;
+
+use {Async, AsyncSink, Poll, Sink, StartSend};
+
+/// Sink that clones incoming items and forwards them to two sinks at the same time.
+///
+/// Backpressure from any downstream sink propagates up, which means that this sink
+/// can only process items as fast as its _slowest_ downstream sink.
+pub struct Fanout<A: Sink, B: Sink> {
+ left: Downstream<A>,
+ right: Downstream<B>
+}
+
+impl<A: Sink, B: Sink> Fanout<A, B> {
+ /// Consumes this combinator, returning the underlying sinks.
+ ///
+ /// Note that this may discard intermediate state of this combinator,
+ /// so care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> (A, B) {
+ (self.left.sink, self.right.sink)
+ }
+}
+
+impl<A: Sink + Debug, B: Sink + Debug> Debug for Fanout<A, B>
+ where A::SinkItem: Debug,
+ B::SinkItem: Debug
+{
+ fn fmt(&self, f: &mut Formatter) -> FmtResult {
+ f.debug_struct("Fanout")
+ .field("left", &self.left)
+ .field("right", &self.right)
+ .finish()
+ }
+}
+
+pub fn new<A: Sink, B: Sink>(left: A, right: B) -> Fanout<A, B> {
+ Fanout {
+ left: Downstream::new(left),
+ right: Downstream::new(right)
+ }
+}
+
+impl<A, B> Sink for Fanout<A, B>
+ where A: Sink,
+ A::SinkItem: Clone,
+ B: Sink<SinkItem=A::SinkItem, SinkError=A::SinkError>
+{
+ type SinkItem = A::SinkItem;
+ type SinkError = A::SinkError;
+
+ fn start_send(
+ &mut self,
+ item: Self::SinkItem
+ ) -> StartSend<Self::SinkItem, Self::SinkError> {
+ // Attempt to complete processing any outstanding requests.
+ self.left.keep_flushing()?;
+ self.right.keep_flushing()?;
+ // Only if both downstream sinks are ready, start sending the next item.
+ if self.left.is_ready() && self.right.is_ready() {
+ self.left.state = self.left.sink.start_send(item.clone())?;
+ self.right.state = self.right.sink.start_send(item)?;
+ Ok(AsyncSink::Ready)
+ } else {
+ Ok(AsyncSink::NotReady(item))
+ }
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ let left_async = self.left.poll_complete()?;
+ let right_async = self.right.poll_complete()?;
+ // Only if both downstream sinks are ready, signal readiness.
+ if left_async.is_ready() && right_async.is_ready() {
+ Ok(Async::Ready(()))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ let left_async = self.left.close()?;
+ let right_async = self.right.close()?;
+ // Only if both downstream sinks are ready, signal readiness.
+ if left_async.is_ready() && right_async.is_ready() {
+ Ok(Async::Ready(()))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Downstream<S: Sink> {
+ sink: S,
+ state: AsyncSink<S::SinkItem>
+}
+
+impl<S: Sink> Downstream<S> {
+ fn new(sink: S) -> Self {
+ Downstream { sink: sink, state: AsyncSink::Ready }
+ }
+
+ fn is_ready(&self) -> bool {
+ self.state.is_ready()
+ }
+
+ fn keep_flushing(&mut self) -> Result<(), S::SinkError> {
+ if let AsyncSink::NotReady(item) = replace(&mut self.state, AsyncSink::Ready) {
+ self.state = self.sink.start_send(item)?;
+ }
+ Ok(())
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.keep_flushing()?;
+ let async = self.sink.poll_complete()?;
+ // Only if all values have been sent _and_ the underlying
+ // sink is completely flushed, signal readiness.
+ if self.state.is_ready() && async.is_ready() {
+ Ok(Async::Ready(()))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.keep_flushing()?;
+ // If all items have been flushed, initiate close.
+ if self.state.is_ready() {
+ self.sink.close()
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/flush.rs b/third_party/rust/futures-0.1.31/src/sink/flush.rs
new file mode 100644
index 0000000000..f66811e03d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/flush.rs
@@ -0,0 +1,46 @@
+use {Poll, Async, Future};
+use sink::Sink;
+
+/// Future for the `Sink::flush` combinator, which polls the sink until all data
+/// has been flushed.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Flush<S> {
+ sink: Option<S>,
+}
+
+pub fn new<S: Sink>(sink: S) -> Flush<S> {
+ Flush { sink: Some(sink) }
+}
+
+impl<S: Sink> Flush<S> {
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ self.sink.as_ref().expect("Attempted `Flush::get_ref` after the flush completed")
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed")
+ }
+
+ /// Consume the `Flush` and return the inner sink.
+ pub fn into_inner(self) -> S {
+ self.sink.expect("Attempted `Flush::into_inner` after the flush completed")
+ }
+}
+
+impl<S: Sink> Future for Flush<S> {
+ type Item = S;
+ type Error = S::SinkError;
+
+ fn poll(&mut self) -> Poll<S, S::SinkError> {
+ let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed");
+ if sink.poll_complete()?.is_ready() {
+ Ok(Async::Ready(sink))
+ } else {
+ self.sink = Some(sink);
+ Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/from_err.rs b/third_party/rust/futures-0.1.31/src/sink/from_err.rs
new file mode 100644
index 0000000000..4880c30ef4
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/from_err.rs
@@ -0,0 +1,71 @@
+use core::marker::PhantomData;
+
+use {Sink, Poll, StartSend};
+
+/// A sink combinator to change the error type of a sink.
+///
+/// This is created by the `Sink::from_err` method.
+#[derive(Clone, Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SinkFromErr<S, E> {
+ sink: S,
+ f: PhantomData<E>
+}
+
+pub fn new<S, E>(sink: S) -> SinkFromErr<S, E>
+ where S: Sink
+{
+ SinkFromErr {
+ sink: sink,
+ f: PhantomData
+ }
+}
+
+impl<S, E> SinkFromErr<S, E> {
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ &self.sink
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.sink
+ }
+
+ /// Consumes this combinator, returning the underlying sink.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.sink
+ }
+}
+
+impl<S, E> Sink for SinkFromErr<S, E>
+ where S: Sink,
+ E: From<S::SinkError>
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = E;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+ self.sink.start_send(item).map_err(|e| e.into())
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ self.sink.poll_complete().map_err(|e| e.into())
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ self.sink.close().map_err(|e| e.into())
+ }
+}
+
+impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.sink.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/map_err.rs b/third_party/rust/futures-0.1.31/src/sink/map_err.rs
new file mode 100644
index 0000000000..25c168c071
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/map_err.rs
@@ -0,0 +1,64 @@
+use sink::Sink;
+
+use {Poll, StartSend, Stream};
+
+/// Sink for the `Sink::sink_map_err` combinator.
+#[derive(Clone,Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct SinkMapErr<S, F> {
+ sink: S,
+ f: Option<F>,
+}
+
+pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> {
+ SinkMapErr { sink: s, f: Some(f) }
+}
+
+impl<S, E> SinkMapErr<S, E> {
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ &self.sink
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.sink
+ }
+
+ /// Consumes this combinator, returning the underlying sink.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.sink
+ }
+}
+
+impl<S, F, E> Sink for SinkMapErr<S, F>
+ where S: Sink,
+ F: FnOnce(S::SinkError) -> E,
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = E;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+ self.sink.start_send(item).map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ self.sink.poll_complete().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
+ }
+}
+
+impl<S: Stream, F> Stream for SinkMapErr<S, F> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.sink.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/mod.rs b/third_party/rust/futures-0.1.31/src/sink/mod.rs
new file mode 100644
index 0000000000..e5ea97f92a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/mod.rs
@@ -0,0 +1,489 @@
+//! Asynchronous sinks
+//!
+//! This module contains the `Sink` trait, along with a number of adapter types
+//! for it. An overview is available in the documentation for the trait itself.
+//!
+//! You can find more information/tutorials about streams [online at
+//! https://tokio.rs][online]
+//!
+//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+
+use {IntoFuture, Poll, StartSend};
+use stream::Stream;
+
+mod with;
+mod with_flat_map;
+// mod with_map;
+// mod with_filter;
+// mod with_filter_map;
+mod flush;
+mod from_err;
+mod send;
+mod send_all;
+mod map_err;
+mod fanout;
+
+if_std! {
+ mod buffer;
+ mod wait;
+
+ pub use self::buffer::Buffer;
+ pub use self::wait::Wait;
+
+ // TODO: consider expanding this via e.g. FromIterator
+ impl<T> Sink for ::std::vec::Vec<T> {
+ type SinkItem = T;
+ type SinkError = (); // Change this to ! once it stabilizes
+
+ fn start_send(&mut self, item: Self::SinkItem)
+ -> StartSend<Self::SinkItem, Self::SinkError>
+ {
+ self.push(item);
+ Ok(::AsyncSink::Ready)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ Ok(::Async::Ready(()))
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ Ok(::Async::Ready(()))
+ }
+ }
+
+ /// A type alias for `Box<Sink + Send>`
+ pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> +
+ ::core::marker::Send>;
+
+ impl<S: ?Sized + Sink> Sink for ::std::boxed::Box<S> {
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: Self::SinkItem)
+ -> StartSend<Self::SinkItem, Self::SinkError> {
+ (**self).start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ (**self).poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ (**self).close()
+ }
+ }
+}
+
+pub use self::with::With;
+pub use self::with_flat_map::WithFlatMap;
+pub use self::flush::Flush;
+pub use self::send::Send;
+pub use self::send_all::SendAll;
+pub use self::map_err::SinkMapErr;
+pub use self::from_err::SinkFromErr;
+pub use self::fanout::Fanout;
+
+/// A `Sink` is a value into which other values can be sent, asynchronously.
+///
+/// Basic examples of sinks include the sending side of:
+///
+/// - Channels
+/// - Sockets
+/// - Pipes
+///
+/// In addition to such "primitive" sinks, it's typical to layer additional
+/// functionality, such as buffering, on top of an existing sink.
+///
+/// Sending to a sink is "asynchronous" in the sense that the value may not be
+/// sent in its entirety immediately. Instead, values are sent in a two-phase
+/// way: first by initiating a send, and then by polling for completion. This
+/// two-phase setup is analogous to buffered writing in synchronous code, where
+/// writes often succeed immediately, but internally are buffered and are
+/// *actually* written only upon flushing.
+///
+/// In addition, the `Sink` may be *full*, in which case it is not even possible
+/// to start the sending process.
+///
+/// As with `Future` and `Stream`, the `Sink` trait is built from a few core
+/// required methods, and a host of default methods for working in a
+/// higher-level way. The `Sink::send_all` combinator is of particular
+/// importance: you can use it to send an entire stream to a sink, which is
+/// the simplest way to ultimately consume a sink.
+///
+/// You can find more information/tutorials about streams [online at
+/// https://tokio.rs][online]
+///
+/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+pub trait Sink {
+ /// The type of value that the sink accepts.
+ type SinkItem;
+
+ /// The type of value produced by the sink when an error occurs.
+ type SinkError;
+
+ /// Begin the process of sending a value to the sink.
+ ///
+ /// As the name suggests, this method only *begins* the process of sending
+ /// the item. If the sink employs buffering, the item isn't fully processed
+ /// until the buffer is fully flushed. Since sinks are designed to work with
+ /// asynchronous I/O, the process of actually writing out the data to an
+ /// underlying object takes place asynchronously. **You *must* use
+ /// `poll_complete` in order to drive completion of a send**. In particular,
+ /// `start_send` does not begin the flushing process
+ ///
+ /// # Return value
+ ///
+ /// This method returns `AsyncSink::Ready` if the sink was able to start
+ /// sending `item`. In that case, you *must* ensure that you call
+ /// `poll_complete` to process the sent item to completion. Note, however,
+ /// that several calls to `start_send` can be made prior to calling
+ /// `poll_complete`, which will work on completing all pending items.
+ ///
+ /// The method returns `AsyncSink::NotReady` if the sink was unable to begin
+ /// sending, usually due to being full. The sink must have attempted to
+ /// complete processing any outstanding requests (equivalent to
+ /// `poll_complete`) before yielding this result. The current task will be
+ /// automatically scheduled for notification when the sink may be ready to
+ /// receive new values.
+ ///
+ /// # Errors
+ ///
+ /// If the sink encounters an error other than being temporarily full, it
+ /// uses the `Err` variant to signal that error. In most cases, such errors
+ /// mean that the sink will permanently be unable to receive items.
+ ///
+ /// # Panics
+ ///
+ /// This method may panic in a few situations, depending on the specific
+ /// sink:
+ ///
+ /// - It is called outside of the context of a task.
+ /// - A previous call to `start_send` or `poll_complete` yielded an error.
+ fn start_send(&mut self, item: Self::SinkItem)
+ -> StartSend<Self::SinkItem, Self::SinkError>;
+
+ /// Flush all output from this sink, if necessary.
+ ///
+ /// Some sinks may buffer intermediate data as an optimization to improve
+ /// throughput. In other words, if a sink has a corresponding receiver then
+ /// a successful `start_send` above may not guarantee that the value is
+ /// actually ready to be received by the receiver. This function is intended
+ /// to be used to ensure that values do indeed make their way to the
+ /// receiver.
+ ///
+ /// This function will attempt to process any pending requests on behalf of
+ /// the sink and drive it to completion.
+ ///
+ /// # Return value
+ ///
+ /// Returns `Ok(Async::Ready(()))` when no buffered items remain. If this
+ /// value is returned then it is guaranteed that all previous values sent
+ /// via `start_send` will be guaranteed to be available to a listening
+ /// receiver.
+ ///
+ /// Returns `Ok(Async::NotReady)` if there is more work left to do, in which
+ /// case the current task is scheduled to wake up when more progress may be
+ /// possible.
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the sink encounters an error while processing one of
+ /// its pending requests. Due to the buffered nature of requests, it is not
+ /// generally possible to correlate the error with a particular request. As
+ /// with `start_send`, these errors are generally "fatal" for continued use
+ /// of the sink.
+ ///
+ /// # Panics
+ ///
+ /// This method may panic in a few situations, depending on the specific sink:
+ ///
+ /// - It is called outside of the context of a task.
+ /// - A previous call to `start_send` or `poll_complete` yielded an error.
+ ///
+ /// # Compatibility nodes
+ ///
+ /// The name of this method may be slightly misleading as the original
+ /// intention was to have this method be more general than just flushing
+ /// requests. Over time though it was decided to trim back the ambitions of
+ /// this method to what it's always done, just flushing.
+ ///
+ /// In the 0.2 release series of futures this method will be renamed to
+ /// `poll_flush`. For 0.1, however, the breaking change is not happening
+ /// yet.
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError>;
+
+ /// A method to indicate that no more values will ever be pushed into this
+ /// sink.
+ ///
+ /// This method is used to indicate that a sink will no longer even be given
+ /// another value by the caller. That is, the `start_send` method above will
+ /// be called no longer (nor `poll_complete`). This method is intended to
+ /// model "graceful shutdown" in various protocols where the intent to shut
+ /// down is followed by a little more blocking work.
+ ///
+ /// Callers of this function should work it it in a similar fashion to
+ /// `poll_complete`. Once called it may return `NotReady` which indicates
+ /// that more external work needs to happen to make progress. The current
+ /// task will be scheduled to receive a notification in such an event,
+ /// however.
+ ///
+ /// Note that this function will imply `poll_complete` above. That is, if a
+ /// sink has buffered data, then it'll be flushed out during a `close`
+ /// operation. It is not necessary to have `poll_complete` return `Ready`
+ /// before a `close` is called. Once a `close` is called, though,
+ /// `poll_complete` cannot be called.
+ ///
+ /// # Return value
+ ///
+ /// This function, like `poll_complete`, returns a `Poll`. The value is
+ /// `Ready` once the close operation has completed. At that point it should
+ /// be safe to drop the sink and deallocate associated resources.
+ ///
+ /// If the value returned is `NotReady` then the sink is not yet closed and
+ /// work needs to be done to close it. The work has been scheduled and the
+ /// current task will receive a notification when it's next ready to call
+ /// this method again.
+ ///
+ /// Finally, this function may also return an error.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an `Err` if any operation along the way during
+ /// the close operation fails. An error typically is fatal for a sink and is
+ /// unable to be recovered from, but in specific situations this may not
+ /// always be true.
+ ///
+ /// Note that it's also typically an error to call `start_send` or
+ /// `poll_complete` after the `close` function is called. This method will
+ /// *initiate* a close, and continuing to send values after that (or attempt
+ /// to flush) may result in strange behavior, panics, errors, etc. Once this
+ /// method is called, it must be the only method called on this `Sink`.
+ ///
+ /// # Panics
+ ///
+ /// This method may panic or cause panics if:
+ ///
+ /// * It is called outside the context of a future's task
+ /// * It is called and then `start_send` or `poll_complete` is called
+ ///
+ /// # Compatibility notes
+ ///
+ /// Note that this function is currently by default a provided function,
+ /// defaulted to calling `poll_complete` above. This function was added
+ /// in the 0.1 series of the crate as a backwards-compatible addition. It
+ /// is intended that in the 0.2 series the method will no longer be a
+ /// default method.
+ ///
+ /// It is highly recommended to consider this method a required method and
+ /// to implement it whenever you implement `Sink` locally. It is especially
+ /// crucial to be sure to close inner sinks, if applicable.
+ #[cfg(feature = "with-deprecated")]
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ self.poll_complete()
+ }
+
+ /// dox (you should see the above, not this)
+ #[cfg(not(feature = "with-deprecated"))]
+ fn close(&mut self) -> Poll<(), Self::SinkError>;
+
+ /// Creates a new object which will produce a synchronous sink.
+ ///
+ /// The sink returned does **not** implement the `Sink` trait, and instead
+ /// only has two methods: `send` and `flush`. These two methods correspond
+ /// to `start_send` and `poll_complete` above except are executed in a
+ /// blocking fashion.
+ #[cfg(feature = "use_std")]
+ fn wait(self) -> Wait<Self>
+ where Self: Sized
+ {
+ wait::new(self)
+ }
+
+ /// Composes a function *in front of* the sink.
+ ///
+ /// This adapter produces a new sink that passes each value through the
+ /// given function `f` before sending it to `self`.
+ ///
+ /// To process each value, `f` produces a *future*, which is then polled to
+ /// completion before passing its result down to the underlying sink. If the
+ /// future produces an error, that error is returned by the new sink.
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::map`.
+ fn with<U, F, Fut>(self, f: F) -> With<Self, U, F, Fut>
+ where F: FnMut(U) -> Fut,
+ Fut: IntoFuture<Item = Self::SinkItem>,
+ Fut::Error: From<Self::SinkError>,
+ Self: Sized
+ {
+ with::new(self, f)
+ }
+
+ /// Composes a function *in front of* the sink.
+ ///
+ /// This adapter produces a new sink that passes each value through the
+ /// given function `f` before sending it to `self`.
+ ///
+ /// To process each value, `f` produces a *stream*, of which each value
+ /// is passed to the underlying sink. A new value will not be accepted until
+ /// the stream has been drained
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::flat_map`.
+ ///
+ /// # Examples
+ /// ---
+ /// Using this function with an iterator through use of the `stream::iter_ok()`
+ /// function
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::stream;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (tx, rx) = mpsc::channel::<i32>(5);
+ ///
+ /// let tx = tx.with_flat_map(|x| {
+ /// stream::iter_ok(vec![42; x].into_iter().map(|y| y))
+ /// });
+ /// tx.send(5).wait().unwrap();
+ /// assert_eq!(rx.collect().wait(), Ok(vec![42, 42, 42, 42, 42]))
+ /// ```
+ fn with_flat_map<U, F, St>(self, f: F) -> WithFlatMap<Self, U, F, St>
+ where F: FnMut(U) -> St,
+ St: Stream<Item = Self::SinkItem, Error=Self::SinkError>,
+ Self: Sized
+ {
+ with_flat_map::new(self, f)
+ }
+
+ /*
+ fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
+ where F: FnMut(U) -> Self::SinkItem,
+ Self: Sized;
+
+ fn with_filter<F>(self, f: F) -> WithFilter<Self, F>
+ where F: FnMut(Self::SinkItem) -> bool,
+ Self: Sized;
+
+ fn with_filter_map<U, F>(self, f: F) -> WithFilterMap<Self, U, F>
+ where F: FnMut(U) -> Option<Self::SinkItem>,
+ Self: Sized;
+ */
+
+ /// Transforms the error returned by the sink.
+ fn sink_map_err<F, E>(self, f: F) -> SinkMapErr<Self, F>
+ where F: FnOnce(Self::SinkError) -> E,
+ Self: Sized,
+ {
+ map_err::new(self, f)
+ }
+
+ /// Map this sink's error to any error implementing `From` for this sink's
+ /// `Error`, returning a new sink.
+ ///
+ /// If wanting to map errors of a `Sink + Stream`, use `.sink_from_err().from_err()`.
+ fn sink_from_err<E: From<Self::SinkError>>(self) -> from_err::SinkFromErr<Self, E>
+ where Self: Sized,
+ {
+ from_err::new(self)
+ }
+
+
+ /// Adds a fixed-size buffer to the current sink.
+ ///
+ /// The resulting sink will buffer up to `amt` items when the underlying
+ /// sink is unwilling to accept additional items. Calling `poll_complete` on
+ /// the buffered sink will attempt to both empty the buffer and complete
+ /// processing on the underlying sink.
+ ///
+ /// Note that this function consumes the given sink, returning a wrapped
+ /// version, much like `Iterator::map`.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "use_std")]
+ fn buffer(self, amt: usize) -> Buffer<Self>
+ where Self: Sized
+ {
+ buffer::new(self, amt)
+ }
+
+ /// Fanout items to multiple sinks.
+ ///
+ /// This adapter clones each incoming item and forwards it to both this as well as
+ /// the other sink at the same time.
+ fn fanout<S>(self, other: S) -> Fanout<Self, S>
+ where Self: Sized,
+ Self::SinkItem: Clone,
+ S: Sink<SinkItem=Self::SinkItem, SinkError=Self::SinkError>
+ {
+ fanout::new(self, other)
+ }
+
+ /// A future that completes when the sink has finished processing all
+ /// pending requests.
+ ///
+ /// The sink itself is returned after flushing is complete; this adapter is
+ /// intended to be used when you want to stop sending to the sink until
+ /// all current requests are processed.
+ fn flush(self) -> Flush<Self>
+ where Self: Sized
+ {
+ flush::new(self)
+ }
+
+ /// A future that completes after the given item has been fully processed
+ /// into the sink, including flushing.
+ ///
+ /// Note that, **because of the flushing requirement, it is usually better
+ /// to batch together items to send via `send_all`, rather than flushing
+ /// between each item.**
+ ///
+ /// On completion, the sink is returned.
+ fn send(self, item: Self::SinkItem) -> Send<Self>
+ where Self: Sized
+ {
+ send::new(self, item)
+ }
+
+ /// A future that completes after the given stream has been fully processed
+ /// into the sink, including flushing.
+ ///
+ /// This future will drive the stream to keep producing items until it is
+ /// exhausted, sending each item to the sink. It will complete once both the
+ /// stream is exhausted, the sink has received all items, the sink has been
+ /// flushed, and the sink has been closed.
+ ///
+ /// Doing `sink.send_all(stream)` is roughly equivalent to
+ /// `stream.forward(sink)`. The returned future will exhaust all items from
+ /// `stream` and send them to `self`, closing `self` when all items have been
+ /// received.
+ ///
+ /// On completion, the pair `(sink, source)` is returned.
+ fn send_all<S>(self, stream: S) -> SendAll<Self, S>
+ where S: Stream<Item = Self::SinkItem>,
+ Self::SinkError: From<S::Error>,
+ Self: Sized
+ {
+ send_all::new(self, stream)
+ }
+}
+
+impl<'a, S: ?Sized + Sink> Sink for &'a mut S {
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: Self::SinkItem)
+ -> StartSend<Self::SinkItem, Self::SinkError> {
+ (**self).start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ (**self).poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ (**self).close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/send.rs b/third_party/rust/futures-0.1.31/src/sink/send.rs
new file mode 100644
index 0000000000..71173fa836
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/send.rs
@@ -0,0 +1,59 @@
+use {Poll, Async, Future, AsyncSink};
+use sink::Sink;
+
+/// Future for the `Sink::send` combinator, which sends a value to a sink and
+/// then waits until the sink has fully flushed.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Send<S: Sink> {
+ sink: Option<S>,
+ item: Option<S::SinkItem>,
+}
+
+pub fn new<S: Sink>(sink: S, item: S::SinkItem) -> Send<S> {
+ Send {
+ sink: Some(sink),
+ item: Some(item),
+ }
+}
+
+impl<S: Sink> Send<S> {
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ self.sink.as_ref().take().expect("Attempted Send::get_ref after completion")
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.sink.as_mut().take().expect("Attempted Send::get_mut after completion")
+ }
+
+ fn sink_mut(&mut self) -> &mut S {
+ self.sink.as_mut().take().expect("Attempted to poll Send after completion")
+ }
+
+ fn take_sink(&mut self) -> S {
+ self.sink.take().expect("Attempted to poll Send after completion")
+ }
+}
+
+impl<S: Sink> Future for Send<S> {
+ type Item = S;
+ type Error = S::SinkError;
+
+ fn poll(&mut self) -> Poll<S, S::SinkError> {
+ if let Some(item) = self.item.take() {
+ if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
+ self.item = Some(item);
+ return Ok(Async::NotReady);
+ }
+ }
+
+ // we're done sending the item, but want to block on flushing the
+ // sink
+ try_ready!(self.sink_mut().poll_complete());
+
+ // now everything's emptied, so return the sink for further use
+ Ok(Async::Ready(self.take_sink()))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/send_all.rs b/third_party/rust/futures-0.1.31/src/sink/send_all.rs
new file mode 100644
index 0000000000..a230903d1c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/send_all.rs
@@ -0,0 +1,88 @@
+use {Poll, Async, Future, AsyncSink};
+use stream::{Stream, Fuse};
+use sink::Sink;
+
+/// Future for the `Sink::send_all` combinator, which sends a stream of values
+/// to a sink and then waits until the sink has fully flushed those values.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct SendAll<T, U: Stream> {
+ sink: Option<T>,
+ stream: Option<Fuse<U>>,
+ buffered: Option<U::Item>,
+}
+
+pub fn new<T, U>(sink: T, stream: U) -> SendAll<T, U>
+ where T: Sink,
+ U: Stream<Item = T::SinkItem>,
+ T::SinkError: From<U::Error>,
+{
+ SendAll {
+ sink: Some(sink),
+ stream: Some(stream.fuse()),
+ buffered: None,
+ }
+}
+
+impl<T, U> SendAll<T, U>
+ where T: Sink,
+ U: Stream<Item = T::SinkItem>,
+ T::SinkError: From<U::Error>,
+{
+ fn sink_mut(&mut self) -> &mut T {
+ self.sink.as_mut().take().expect("Attempted to poll SendAll after completion")
+ }
+
+ fn stream_mut(&mut self) -> &mut Fuse<U> {
+ self.stream.as_mut().take()
+ .expect("Attempted to poll SendAll after completion")
+ }
+
+ fn take_result(&mut self) -> (T, U) {
+ let sink = self.sink.take()
+ .expect("Attempted to poll Forward after completion");
+ let fuse = self.stream.take()
+ .expect("Attempted to poll Forward after completion");
+ (sink, fuse.into_inner())
+ }
+
+ fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> {
+ debug_assert!(self.buffered.is_none());
+ if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
+ self.buffered = Some(item);
+ return Ok(Async::NotReady)
+ }
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T, U> Future for SendAll<T, U>
+ where T: Sink,
+ U: Stream<Item = T::SinkItem>,
+ T::SinkError: From<U::Error>,
+{
+ type Item = (T, U);
+ type Error = T::SinkError;
+
+ fn poll(&mut self) -> Poll<(T, U), T::SinkError> {
+ // If we've got an item buffered already, we need to write it to the
+ // sink before we can do anything else
+ if let Some(item) = self.buffered.take() {
+ try_ready!(self.try_start_send(item))
+ }
+
+ loop {
+ match self.stream_mut().poll()? {
+ Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
+ Async::Ready(None) => {
+ try_ready!(self.sink_mut().close());
+ return Ok(Async::Ready(self.take_result()))
+ }
+ Async::NotReady => {
+ try_ready!(self.sink_mut().poll_complete());
+ return Ok(Async::NotReady)
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/wait.rs b/third_party/rust/futures-0.1.31/src/sink/wait.rs
new file mode 100644
index 0000000000..940a58862f
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/wait.rs
@@ -0,0 +1,59 @@
+use sink::Sink;
+use executor;
+
+/// A sink combinator which converts an asynchronous sink to a **blocking
+/// sink**.
+///
+/// Created by the `Sink::wait` method, this function transforms any sink into a
+/// blocking version. This is implemented by blocking the current thread when a
+/// sink is otherwise unable to make progress.
+#[must_use = "sinks do nothing unless used"]
+#[derive(Debug)]
+pub struct Wait<S> {
+ sink: executor::Spawn<S>,
+}
+
+pub fn new<S: Sink>(s: S) -> Wait<S> {
+ Wait {
+ sink: executor::spawn(s),
+ }
+}
+
+impl<S: Sink> Wait<S> {
+ /// Sends a value to this sink, blocking the current thread until it's able
+ /// to do so.
+ ///
+ /// This function will take the `value` provided and call the underlying
+ /// sink's `start_send` function until it's ready to accept the value. If
+ /// the function returns `NotReady` then the current thread is blocked
+ /// until it is otherwise ready to accept the value.
+ ///
+ /// # Return value
+ ///
+ /// If `Ok(())` is returned then the `value` provided was successfully sent
+ /// along the sink, and if `Err(e)` is returned then an error occurred
+ /// which prevented the value from being sent.
+ pub fn send(&mut self, value: S::SinkItem) -> Result<(), S::SinkError> {
+ self.sink.wait_send(value)
+ }
+
+ /// Flushes any buffered data in this sink, blocking the current thread
+ /// until it's entirely flushed.
+ ///
+ /// This function will call the underlying sink's `poll_complete` method
+ /// until it returns that it's ready to proceed. If the method returns
+ /// `NotReady` the current thread will be blocked until it's otherwise
+ /// ready to proceed.
+ pub fn flush(&mut self) -> Result<(), S::SinkError> {
+ self.sink.wait_flush()
+ }
+
+ /// Close this sink, blocking the current thread until it's entirely closed.
+ ///
+ /// This function will call the underlying sink's `close` method
+ /// until it returns that it's closed. If the method returns
+ /// `NotReady` the current thread will be blocked until it's otherwise closed.
+ pub fn close(&mut self) -> Result<(), S::SinkError> {
+ self.sink.wait_close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/with.rs b/third_party/rust/futures-0.1.31/src/sink/with.rs
new file mode 100644
index 0000000000..3326b6e49c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/with.rs
@@ -0,0 +1,153 @@
+use core::mem;
+use core::marker::PhantomData;
+
+use {IntoFuture, Future, Poll, Async, StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::with` combinator, chaining a computation to run *prior*
+/// to pushing a value into the underlying sink.
+#[derive(Clone, Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct With<S, U, F, Fut>
+ where S: Sink,
+ F: FnMut(U) -> Fut,
+ Fut: IntoFuture,
+{
+ sink: S,
+ f: F,
+ state: State<Fut::Future, S::SinkItem>,
+ _phantom: PhantomData<fn(U)>,
+}
+
+#[derive(Clone, Debug)]
+enum State<Fut, T> {
+ Empty,
+ Process(Fut),
+ Buffered(T),
+}
+
+impl<Fut, T> State<Fut, T> {
+ fn is_empty(&self) -> bool {
+ if let State::Empty = *self {
+ true
+ } else {
+ false
+ }
+ }
+}
+
+pub fn new<S, U, F, Fut>(sink: S, f: F) -> With<S, U, F, Fut>
+ where S: Sink,
+ F: FnMut(U) -> Fut,
+ Fut: IntoFuture<Item = S::SinkItem>,
+ Fut::Error: From<S::SinkError>,
+{
+ With {
+ state: State::Empty,
+ sink: sink,
+ f: f,
+ _phantom: PhantomData,
+ }
+}
+
+// Forwarding impl of Stream from the underlying sink
+impl<S, U, F, Fut> Stream for With<S, U, F, Fut>
+ where S: Stream + Sink,
+ F: FnMut(U) -> Fut,
+ Fut: IntoFuture
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.sink.poll()
+ }
+}
+
+impl<S, U, F, Fut> With<S, U, F, Fut>
+ where S: Sink,
+ F: FnMut(U) -> Fut,
+ Fut: IntoFuture<Item = S::SinkItem>,
+ Fut::Error: From<S::SinkError>,
+{
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ &self.sink
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.sink
+ }
+
+ /// Consumes this combinator, returning the underlying sink.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.sink
+ }
+
+ fn poll(&mut self) -> Poll<(), Fut::Error> {
+ loop {
+ match mem::replace(&mut self.state, State::Empty) {
+ State::Empty => break,
+ State::Process(mut fut) => {
+ match fut.poll()? {
+ Async::Ready(item) => {
+ self.state = State::Buffered(item);
+ }
+ Async::NotReady => {
+ self.state = State::Process(fut);
+ break
+ }
+ }
+ }
+ State::Buffered(item) => {
+ if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
+ self.state = State::Buffered(item);
+ break
+ }
+ }
+ }
+ }
+
+ if self.state.is_empty() {
+ Ok(Async::Ready(()))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
+
+impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
+ where S: Sink,
+ F: FnMut(U) -> Fut,
+ Fut: IntoFuture<Item = S::SinkItem>,
+ Fut::Error: From<S::SinkError>,
+{
+ type SinkItem = U;
+ type SinkError = Fut::Error;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> {
+ if self.poll()?.is_not_ready() {
+ return Ok(AsyncSink::NotReady(item))
+ }
+ self.state = State::Process((self.f)(item).into_future());
+ Ok(AsyncSink::Ready)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Fut::Error> {
+ // poll ourselves first, to push data downward
+ let me_ready = self.poll()?;
+ // always propagate `poll_complete` downward to attempt to make progress
+ try_ready!(self.sink.poll_complete());
+ Ok(me_ready)
+ }
+
+ fn close(&mut self) -> Poll<(), Fut::Error> {
+ try_ready!(self.poll());
+ Ok(self.sink.close()?)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs b/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs
new file mode 100644
index 0000000000..80c4f6605a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sink/with_flat_map.rs
@@ -0,0 +1,126 @@
+use core::marker::PhantomData;
+
+use {Poll, Async, StartSend, AsyncSink};
+use sink::Sink;
+use stream::Stream;
+
+/// Sink for the `Sink::with_flat_map` combinator, chaining a computation that returns an iterator
+/// to run prior to pushing a value into the underlying sink
+#[derive(Debug)]
+#[must_use = "sinks do nothing unless polled"]
+pub struct WithFlatMap<S, U, F, St>
+where
+ S: Sink,
+ F: FnMut(U) -> St,
+ St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+ sink: S,
+ f: F,
+ stream: Option<St>,
+ buffer: Option<S::SinkItem>,
+ _phantom: PhantomData<fn(U)>,
+}
+
+pub fn new<S, U, F, St>(sink: S, f: F) -> WithFlatMap<S, U, F, St>
+where
+ S: Sink,
+ F: FnMut(U) -> St,
+ St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+ WithFlatMap {
+ sink: sink,
+ f: f,
+ stream: None,
+ buffer: None,
+ _phantom: PhantomData,
+ }
+}
+
+impl<S, U, F, St> WithFlatMap<S, U, F, St>
+where
+ S: Sink,
+ F: FnMut(U) -> St,
+ St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+ /// Get a shared reference to the inner sink.
+ pub fn get_ref(&self) -> &S {
+ &self.sink
+ }
+
+ /// Get a mutable reference to the inner sink.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.sink
+ }
+
+ /// Consumes this combinator, returning the underlying sink.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.sink
+ }
+
+ fn try_empty_stream(&mut self) -> Poll<(), S::SinkError> {
+ if let Some(x) = self.buffer.take() {
+ if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+ self.buffer = Some(x);
+ return Ok(Async::NotReady);
+ }
+ }
+ if let Some(mut stream) = self.stream.take() {
+ while let Some(x) = try_ready!(stream.poll()) {
+ if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
+ self.stream = Some(stream);
+ self.buffer = Some(x);
+ return Ok(Async::NotReady);
+ }
+ }
+ }
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<S, U, F, St> Stream for WithFlatMap<S, U, F, St>
+where
+ S: Stream + Sink,
+ F: FnMut(U) -> St,
+ St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.sink.poll()
+ }
+}
+
+impl<S, U, F, St> Sink for WithFlatMap<S, U, F, St>
+where
+ S: Sink,
+ F: FnMut(U) -> St,
+ St: Stream<Item = S::SinkItem, Error=S::SinkError>,
+{
+ type SinkItem = U;
+ type SinkError = S::SinkError;
+ fn start_send(&mut self, i: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
+ if self.try_empty_stream()?.is_not_ready() {
+ return Ok(AsyncSink::NotReady(i));
+ }
+ assert!(self.stream.is_none());
+ self.stream = Some((self.f)(i));
+ self.try_empty_stream()?;
+ Ok(AsyncSink::Ready)
+ }
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ if self.try_empty_stream()?.is_not_ready() {
+ return Ok(Async::NotReady);
+ }
+ self.sink.poll_complete()
+ }
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ if self.try_empty_stream()?.is_not_ready() {
+ return Ok(Async::NotReady);
+ }
+ assert!(self.stream.is_none());
+ self.sink.close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/and_then.rs b/third_party/rust/futures-0.1.31/src/stream/and_then.rs
new file mode 100644
index 0000000000..1fac8b952d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/and_then.rs
@@ -0,0 +1,106 @@
+use {IntoFuture, Future, Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto values produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::and_then` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct AndThen<S, F, U>
+ where U: IntoFuture,
+{
+ stream: S,
+ future: Option<U::Future>,
+ f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+ U: IntoFuture<Error=S::Error>,
+{
+ AndThen {
+ stream: s,
+ future: None,
+ f: f,
+ }
+}
+
+impl<S, F, U> AndThen<S, F, U>
+ where U: IntoFuture,
+{
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, U> Stream for AndThen<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+ U: IntoFuture<Error=S::Error>,
+{
+ type Item = U::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<U::Item>, S::Error> {
+ if self.future.is_none() {
+ let item = match try_ready!(self.stream.poll()) {
+ None => return Ok(Async::Ready(None)),
+ Some(e) => e,
+ };
+ self.future = Some((self.f)(item).into_future());
+ }
+ assert!(self.future.is_some());
+ match self.future.as_mut().unwrap().poll() {
+ Ok(Async::Ready(e)) => {
+ self.future = None;
+ Ok(Async::Ready(Some(e)))
+ }
+ Err(e) => {
+ self.future = None;
+ Err(e)
+ }
+ Ok(Async::NotReady) => Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs b/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs
new file mode 100644
index 0000000000..3011108cf3
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/buffer_unordered.rs
@@ -0,0 +1,130 @@
+use std::fmt;
+
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesUnordered};
+
+/// An adaptor for a stream of futures to execute the futures concurrently, if
+/// possible, delivering results as they become available.
+///
+/// This adaptor will buffer up a list of pending futures, and then return their
+/// results in the order that they complete. This is created by the
+/// `Stream::buffer_unordered` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct BufferUnordered<S>
+ where S: Stream,
+ S::Item: IntoFuture,
+{
+ stream: Fuse<S>,
+ queue: FuturesUnordered<<S::Item as IntoFuture>::Future>,
+ max: usize,
+}
+
+impl<S> fmt::Debug for BufferUnordered<S>
+ where S: Stream + fmt::Debug,
+ S::Item: IntoFuture,
+ <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("BufferUnordered")
+ .field("stream", &self.stream)
+ .field("queue", &self.queue)
+ .field("max", &self.max)
+ .finish()
+ }
+}
+
+pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ BufferUnordered {
+ stream: super::fuse::new(s),
+ queue: FuturesUnordered::new(),
+ max: amt,
+ }
+}
+
+impl<S> BufferUnordered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ self.stream.get_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.stream.get_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream.into_inner()
+ }
+}
+
+impl<S> Stream for BufferUnordered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ type Item = <S::Item as IntoFuture>::Item;
+ type Error = <S as Stream>::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ // First up, try to spawn off as many futures as possible by filling up
+ // our slab of futures.
+ while self.queue.len() < self.max {
+ let future = match self.stream.poll()? {
+ Async::Ready(Some(s)) => s.into_future(),
+ Async::Ready(None) |
+ Async::NotReady => break,
+ };
+
+ self.queue.push(future);
+ }
+
+ // Try polling a new future
+ if let Some(val) = try_ready!(self.queue.poll()) {
+ return Ok(Async::Ready(Some(val)));
+ }
+
+ // If we've gotten this far, then there are no events for us to process
+ // and nothing was ready, so figure out if we're not done yet or if
+ // we've reached the end.
+ if self.stream.is_done() {
+ Ok(Async::Ready(None))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for BufferUnordered<S>
+ where S: ::sink::Sink + Stream,
+ S::Item: IntoFuture,
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/buffered.rs b/third_party/rust/futures-0.1.31/src/stream/buffered.rs
new file mode 100644
index 0000000000..5616b73d7a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/buffered.rs
@@ -0,0 +1,132 @@
+use std::fmt;
+
+use {Async, IntoFuture, Poll};
+use stream::{Stream, Fuse, FuturesOrdered};
+
+/// An adaptor for a stream of futures to execute the futures concurrently, if
+/// possible.
+///
+/// This adaptor will buffer up a list of pending futures, and then return their
+/// results in the order that they were pulled out of the original stream. This
+/// is created by the `Stream::buffered` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct Buffered<S>
+ where S: Stream,
+ S::Item: IntoFuture,
+{
+ stream: Fuse<S>,
+ queue: FuturesOrdered<<S::Item as IntoFuture>::Future>,
+ max: usize,
+}
+
+impl<S> fmt::Debug for Buffered<S>
+ where S: Stream + fmt::Debug,
+ S::Item: IntoFuture,
+ <<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
+ <<S as Stream>::Item as IntoFuture>::Item: fmt::Debug,
+ <<S as Stream>::Item as IntoFuture>::Error: fmt::Debug,
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("Buffered")
+ .field("stream", &self.stream)
+ .field("queue", &self.queue)
+ .field("max", &self.max)
+ .finish()
+ }
+}
+
+pub fn new<S>(s: S, amt: usize) -> Buffered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ Buffered {
+ stream: super::fuse::new(s),
+ queue: FuturesOrdered::new(),
+ max: amt,
+ }
+}
+
+impl<S> Buffered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ self.stream.get_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.stream.get_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream.into_inner()
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Buffered<S>
+ where S: ::sink::Sink + Stream,
+ S::Item: IntoFuture,
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S> Stream for Buffered<S>
+ where S: Stream,
+ S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+ type Item = <S::Item as IntoFuture>::Item;
+ type Error = <S as Stream>::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ // First up, try to spawn off as many futures as possible by filling up
+ // our slab of futures.
+ while self.queue.len() < self.max {
+ let future = match self.stream.poll()? {
+ Async::Ready(Some(s)) => s.into_future(),
+ Async::Ready(None) |
+ Async::NotReady => break,
+ };
+
+ self.queue.push(future);
+ }
+
+ // Try polling a new future
+ if let Some(val) = try_ready!(self.queue.poll()) {
+ return Ok(Async::Ready(Some(val)));
+ }
+
+ // If we've gotten this far, then there are no events for us to process
+ // and nothing was ready, so figure out if we're not done yet or if
+ // we've reached the end.
+ if self.stream.is_done() {
+ Ok(Async::Ready(None))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs b/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs
new file mode 100644
index 0000000000..d3244946e5
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/catch_unwind.rs
@@ -0,0 +1,71 @@
+use std::prelude::v1::*;
+use std::any::Any;
+use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
+use std::mem;
+
+use super::super::{Poll, Async};
+use super::Stream;
+
+/// Stream for the `catch_unwind` combinator.
+///
+/// This is created by the `Stream::catch_unwind` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct CatchUnwind<S> where S: Stream {
+ state: CatchUnwindState<S>,
+}
+
+pub fn new<S>(stream: S) -> CatchUnwind<S>
+ where S: Stream + UnwindSafe,
+{
+ CatchUnwind {
+ state: CatchUnwindState::Stream(stream),
+ }
+}
+
+#[derive(Debug)]
+enum CatchUnwindState<S> {
+ Stream(S),
+ Eof,
+ Done,
+}
+
+impl<S> Stream for CatchUnwind<S>
+ where S: Stream + UnwindSafe,
+{
+ type Item = Result<S::Item, S::Error>;
+ type Error = Box<Any + Send>;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ let mut stream = match mem::replace(&mut self.state, CatchUnwindState::Eof) {
+ CatchUnwindState::Done => panic!("cannot poll after eof"),
+ CatchUnwindState::Eof => {
+ self.state = CatchUnwindState::Done;
+ return Ok(Async::Ready(None));
+ }
+ CatchUnwindState::Stream(stream) => stream,
+ };
+ let res = catch_unwind(|| (stream.poll(), stream));
+ match res {
+ Err(e) => Err(e), // and state is already Eof
+ Ok((poll, stream)) => {
+ self.state = CatchUnwindState::Stream(stream);
+ match poll {
+ Err(e) => Ok(Async::Ready(Some(Err(e)))),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Ok(Async::Ready(Some(r))) => Ok(Async::Ready(Some(Ok(r)))),
+ Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+ }
+ }
+ }
+ }
+}
+
+impl<S: Stream> Stream for AssertUnwindSafe<S> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.0.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/chain.rs b/third_party/rust/futures-0.1.31/src/stream/chain.rs
new file mode 100644
index 0000000000..0ff0e5ce6f
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/chain.rs
@@ -0,0 +1,57 @@
+use core::mem;
+
+use stream::Stream;
+use {Async, Poll};
+
+
+/// State of chain stream.
+#[derive(Debug)]
+enum State<S1, S2> {
+ /// Emitting elements of first stream
+ First(S1, S2),
+ /// Emitting elements of second stream
+ Second(S2),
+ /// Temporary value to replace first with second
+ Temp,
+}
+
+/// An adapter for chaining the output of two streams.
+///
+/// The resulting stream produces items from first stream and then
+/// from second stream.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Chain<S1, S2> {
+ state: State<S1, S2>
+}
+
+pub fn new<S1, S2>(s1: S1, s2: S2) -> Chain<S1, S2>
+ where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
+{
+ Chain { state: State::First(s1, s2) }
+}
+
+impl<S1, S2> Stream for Chain<S1, S2>
+ where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
+{
+ type Item = S1::Item;
+ type Error = S1::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ loop {
+ match self.state {
+ State::First(ref mut s1, ref _s2) => match s1.poll() {
+ Ok(Async::Ready(None)) => (), // roll
+ x => return x,
+ },
+ State::Second(ref mut s2) => return s2.poll(),
+ State::Temp => unreachable!(),
+ }
+
+ self.state = match mem::replace(&mut self.state, State::Temp) {
+ State::First(_s1, s2) => State::Second(s2),
+ _ => unreachable!(),
+ };
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/channel.rs b/third_party/rust/futures-0.1.31/src/stream/channel.rs
new file mode 100644
index 0000000000..89a419d150
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/channel.rs
@@ -0,0 +1,114 @@
+#![cfg(feature = "with-deprecated")]
+#![deprecated(since = "0.1.4", note = "use sync::mpsc::channel instead")]
+#![allow(deprecated)]
+
+use std::any::Any;
+use std::error::Error;
+use std::fmt;
+
+use {Poll, Async, Stream, Future, Sink};
+use sink::Send;
+use sync::mpsc;
+
+/// Creates an in-memory channel implementation of the `Stream` trait.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. This
+/// channel is unique in that it implements back pressure to ensure that the
+/// sender never outpaces the receiver. The `Sender::send` method will only
+/// allow sending one message and the next message can only be sent once the
+/// first was consumed.
+///
+/// The `Receiver` returned implements the `Stream` trait and has access to any
+/// number of the associated combinators for transforming the result.
+pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>) {
+ let (tx, rx) = mpsc::channel(0);
+ (Sender { inner: tx }, Receiver { inner: rx })
+}
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `channel` method in the `stream` module.
+#[derive(Debug)]
+pub struct Sender<T, E> {
+ inner: mpsc::Sender<Result<T, E>>,
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `channel` method in the `stream` module.
+#[must_use = "streams do nothing unless polled"]
+#[derive(Debug)]
+pub struct Receiver<T, E> {
+ inner: mpsc::Receiver<Result<T, E>>,
+}
+
+/// Error type for sending, used when the receiving end of the channel is dropped
+pub struct SendError<T, E>(Result<T, E>);
+
+/// Future returned by `Sender::send`.
+#[derive(Debug)]
+pub struct FutureSender<T, E> {
+ inner: Send<mpsc::Sender<Result<T, E>>>,
+}
+
+impl<T, E> fmt::Debug for SendError<T, E> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("SendError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T, E> fmt::Display for SendError<T, E> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "send failed because receiver is gone")
+ }
+}
+
+impl<T, E> Error for SendError<T, E>
+ where T: Any, E: Any
+{
+ fn description(&self) -> &str {
+ "send failed because receiver is gone"
+ }
+}
+
+
+impl<T, E> Stream for Receiver<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ match self.inner.poll().expect("cannot fail") {
+ Async::Ready(Some(Ok(e))) => Ok(Async::Ready(Some(e))),
+ Async::Ready(Some(Err(e))) => Err(e),
+ Async::Ready(None) => Ok(Async::Ready(None)),
+ Async::NotReady => Ok(Async::NotReady),
+ }
+ }
+}
+
+impl<T, E> Sender<T, E> {
+ /// Sends a new value along this channel to the receiver.
+ ///
+ /// This method consumes the sender and returns a future which will resolve
+ /// to the sender again when the value sent has been consumed.
+ pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> {
+ FutureSender { inner: self.inner.send(t) }
+ }
+}
+
+impl<T, E> Future for FutureSender<T, E> {
+ type Item = Sender<T, E>;
+ type Error = SendError<T, E>;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ match self.inner.poll() {
+ Ok(a) => Ok(a.map(|a| Sender { inner: a })),
+ Err(e) => Err(SendError(e.into_inner())),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/chunks.rs b/third_party/rust/futures-0.1.31/src/stream/chunks.rs
new file mode 100644
index 0000000000..dbfaeb89ec
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/chunks.rs
@@ -0,0 +1,136 @@
+use std::mem;
+use std::prelude::v1::*;
+
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// An adaptor that chunks up elements in a vector.
+///
+/// This adaptor will buffer up a list of items in the stream and pass on the
+/// vector used for buffering when a specified capacity has been reached. This
+/// is created by the `Stream::chunks` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Chunks<S>
+ where S: Stream
+{
+ items: Vec<S::Item>,
+ err: Option<S::Error>,
+ stream: Fuse<S>,
+ cap: usize, // https://github.com/rust-lang-nursery/futures-rs/issues/1475
+}
+
+pub fn new<S>(s: S, capacity: usize) -> Chunks<S>
+ where S: Stream
+{
+ assert!(capacity > 0);
+
+ Chunks {
+ items: Vec::with_capacity(capacity),
+ err: None,
+ stream: super::fuse::new(s),
+ cap: capacity,
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Chunks<S>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+
+impl<S> Chunks<S> where S: Stream {
+ fn take(&mut self) -> Vec<S::Item> {
+ let cap = self.cap;
+ mem::replace(&mut self.items, Vec::with_capacity(cap))
+ }
+
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ self.stream.get_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.stream.get_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream.into_inner()
+ }
+}
+
+impl<S> Stream for Chunks<S>
+ where S: Stream
+{
+ type Item = Vec<<S as Stream>::Item>;
+ type Error = <S as Stream>::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ if let Some(err) = self.err.take() {
+ return Err(err)
+ }
+
+ loop {
+ match self.stream.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+
+ // Push the item into the buffer and check whether it is full.
+ // If so, replace our buffer with a new and empty one and return
+ // the full one.
+ Ok(Async::Ready(Some(item))) => {
+ self.items.push(item);
+ if self.items.len() >= self.cap {
+ return Ok(Some(self.take()).into())
+ }
+ }
+
+ // Since the underlying stream ran out of values, return what we
+ // have buffered, if we have anything.
+ Ok(Async::Ready(None)) => {
+ return if self.items.len() > 0 {
+ let full_buf = mem::replace(&mut self.items, Vec::new());
+ Ok(Some(full_buf).into())
+ } else {
+ Ok(Async::Ready(None))
+ }
+ }
+
+ // If we've got buffered items be sure to return them first,
+ // we'll defer our error for later.
+ Err(e) => {
+ if self.items.len() == 0 {
+ return Err(e)
+ } else {
+ self.err = Some(e);
+ return Ok(Some(self.take()).into())
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/collect.rs b/third_party/rust/futures-0.1.31/src/stream/collect.rs
new file mode 100644
index 0000000000..8bd9d0e1dc
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/collect.rs
@@ -0,0 +1,52 @@
+use std::prelude::v1::*;
+
+use std::mem;
+
+use {Future, Poll, Async};
+use stream::Stream;
+
+/// A future which collects all of the values of a stream into a vector.
+///
+/// This future is created by the `Stream::collect` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Collect<S> where S: Stream {
+ stream: S,
+ items: Vec<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Collect<S>
+ where S: Stream,
+{
+ Collect {
+ stream: s,
+ items: Vec::new(),
+ }
+}
+
+impl<S: Stream> Collect<S> {
+ fn finish(&mut self) -> Vec<S::Item> {
+ mem::replace(&mut self.items, Vec::new())
+ }
+}
+
+impl<S> Future for Collect<S>
+ where S: Stream,
+{
+ type Item = Vec<S::Item>;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Vec<S::Item>, S::Error> {
+ loop {
+ match self.stream.poll() {
+ Ok(Async::Ready(Some(e))) => self.items.push(e),
+ Ok(Async::Ready(None)) => return Ok(Async::Ready(self.finish())),
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Err(e) => {
+ self.finish();
+ return Err(e)
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/concat.rs b/third_party/rust/futures-0.1.31/src/stream/concat.rs
new file mode 100644
index 0000000000..a0da71bdd5
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/concat.rs
@@ -0,0 +1,172 @@
+use core::mem;
+use core::fmt::{Debug, Formatter, Result as FmtResult};
+use core::default::Default;
+
+use {Poll, Async};
+use future::Future;
+use stream::Stream;
+
+/// A stream combinator to concatenate the results of a stream into the first
+/// yielded item.
+///
+/// This structure is produced by the `Stream::concat2` method.
+#[must_use = "streams do nothing unless polled"]
+pub struct Concat2<S>
+ where S: Stream,
+{
+ inner: ConcatSafe<S>
+}
+
+impl<S: Debug> Debug for Concat2<S> where S: Stream, S::Item: Debug {
+ fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+ fmt.debug_struct("Concat2")
+ .field("inner", &self.inner)
+ .finish()
+ }
+}
+
+pub fn new2<S>(s: S) -> Concat2<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+{
+ Concat2 {
+ inner: new_safe(s)
+ }
+}
+
+impl<S> Future for Concat2<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ self.inner.poll().map(|a| {
+ match a {
+ Async::NotReady => Async::NotReady,
+ Async::Ready(None) => Async::Ready(Default::default()),
+ Async::Ready(Some(e)) => Async::Ready(e)
+ }
+ })
+ }
+}
+
+
+/// A stream combinator to concatenate the results of a stream into the first
+/// yielded item.
+///
+/// This structure is produced by the `Stream::concat` method.
+#[deprecated(since="0.1.18", note="please use `Stream::Concat2` instead")]
+#[must_use = "streams do nothing unless polled"]
+pub struct Concat<S>
+ where S: Stream,
+{
+ inner: ConcatSafe<S>
+}
+
+#[allow(deprecated)]
+impl<S: Debug> Debug for Concat<S> where S: Stream, S::Item: Debug {
+ fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
+ fmt.debug_struct("Concat")
+ .field("inner", &self.inner)
+ .finish()
+ }
+}
+
+#[allow(deprecated)]
+pub fn new<S>(s: S) -> Concat<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+{
+ Concat {
+ inner: new_safe(s)
+ }
+}
+
+#[allow(deprecated)]
+impl<S> Future for Concat<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ self.inner.poll().map(|a| {
+ match a {
+ Async::NotReady => Async::NotReady,
+ Async::Ready(None) => panic!("attempted concatenation of empty stream"),
+ Async::Ready(Some(e)) => Async::Ready(e)
+ }
+ })
+ }
+}
+
+
+#[derive(Debug)]
+struct ConcatSafe<S>
+ where S: Stream,
+{
+ stream: S,
+ extend: Inner<S::Item>,
+}
+
+fn new_safe<S>(s: S) -> ConcatSafe<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+{
+ ConcatSafe {
+ stream: s,
+ extend: Inner::First,
+ }
+}
+
+impl<S> Future for ConcatSafe<S>
+ where S: Stream,
+ S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+
+{
+ type Item = Option<S::Item>;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ loop {
+ match self.stream.poll() {
+ Ok(Async::Ready(Some(i))) => {
+ match self.extend {
+ Inner::First => {
+ self.extend = Inner::Extending(i);
+ },
+ Inner::Extending(ref mut e) => {
+ e.extend(i);
+ },
+ Inner::Done => unreachable!(),
+ }
+ },
+ Ok(Async::Ready(None)) => {
+ match mem::replace(&mut self.extend, Inner::Done) {
+ Inner::First => return Ok(Async::Ready(None)),
+ Inner::Extending(e) => return Ok(Async::Ready(Some(e))),
+ Inner::Done => panic!("cannot poll Concat again")
+ }
+ },
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Err(e) => {
+ self.extend = Inner::Done;
+ return Err(e)
+ }
+ }
+ }
+ }
+}
+
+
+#[derive(Debug)]
+enum Inner<E> {
+ First,
+ Extending(E),
+ Done,
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/empty.rs b/third_party/rust/futures-0.1.31/src/stream/empty.rs
new file mode 100644
index 0000000000..c53fb80238
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/empty.rs
@@ -0,0 +1,29 @@
+use core::marker;
+
+use stream::Stream;
+use {Poll, Async};
+
+/// A stream which contains no elements.
+///
+/// This stream can be created with the `stream::empty` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Empty<T, E> {
+ _data: marker::PhantomData<(T, E)>,
+}
+
+/// Creates a stream which contains no elements.
+///
+/// The returned stream will always return `Ready(None)` when polled.
+pub fn empty<T, E>() -> Empty<T, E> {
+ Empty { _data: marker::PhantomData }
+}
+
+impl<T, E> Stream for Empty<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ Ok(Async::Ready(None))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/filter.rs b/third_party/rust/futures-0.1.31/src/stream/filter.rs
new file mode 100644
index 0000000000..99c4abd657
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/filter.rs
@@ -0,0 +1,89 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator used to filter the results of a stream and only yield
+/// some values.
+///
+/// This structure is produced by the `Stream::filter` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Filter<S, F> {
+ stream: S,
+ f: F,
+}
+
+pub fn new<S, F>(s: S, f: F) -> Filter<S, F>
+ where S: Stream,
+ F: FnMut(&S::Item) -> bool,
+{
+ Filter {
+ stream: s,
+ f: f,
+ }
+}
+
+impl<S, F> Filter<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Filter<S, F>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F> Stream for Filter<S, F>
+ where S: Stream,
+ F: FnMut(&S::Item) -> bool,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ loop {
+ match try_ready!(self.stream.poll()) {
+ Some(e) => {
+ if (self.f)(&e) {
+ return Ok(Async::Ready(Some(e)))
+ }
+ }
+ None => return Ok(Async::Ready(None)),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/filter_map.rs b/third_party/rust/futures-0.1.31/src/stream/filter_map.rs
new file mode 100644
index 0000000000..f91d26a45c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/filter_map.rs
@@ -0,0 +1,89 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A combinator used to filter the results of a stream and simultaneously map
+/// them to a different type.
+///
+/// This structure is returned by the `Stream::filter_map` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct FilterMap<S, F> {
+ stream: S,
+ f: F,
+}
+
+pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F>
+ where S: Stream,
+ F: FnMut(S::Item) -> Option<B>,
+{
+ FilterMap {
+ stream: s,
+ f: f,
+ }
+}
+
+impl<S, F> FilterMap<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for FilterMap<S, F>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, B> Stream for FilterMap<S, F>
+ where S: Stream,
+ F: FnMut(S::Item) -> Option<B>,
+{
+ type Item = B;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<B>, S::Error> {
+ loop {
+ match try_ready!(self.stream.poll()) {
+ Some(e) => {
+ if let Some(e) = (self.f)(e) {
+ return Ok(Async::Ready(Some(e)))
+ }
+ }
+ None => return Ok(Async::Ready(None)),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/flatten.rs b/third_party/rust/futures-0.1.31/src/stream/flatten.rs
new file mode 100644
index 0000000000..4baf9045a0
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/flatten.rs
@@ -0,0 +1,96 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A combinator used to flatten a stream-of-streams into one long stream of
+/// elements.
+///
+/// This combinator is created by the `Stream::flatten` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Flatten<S>
+ where S: Stream,
+{
+ stream: S,
+ next: Option<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Flatten<S>
+ where S: Stream,
+ S::Item: Stream,
+ <S::Item as Stream>::Error: From<S::Error>,
+{
+ Flatten {
+ stream: s,
+ next: None,
+ }
+}
+
+impl<S: Stream> Flatten<S> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Flatten<S>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S> Stream for Flatten<S>
+ where S: Stream,
+ S::Item: Stream,
+ <S::Item as Stream>::Error: From<S::Error>,
+{
+ type Item = <S::Item as Stream>::Item;
+ type Error = <S::Item as Stream>::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ loop {
+ if self.next.is_none() {
+ match try_ready!(self.stream.poll()) {
+ Some(e) => self.next = Some(e),
+ None => return Ok(Async::Ready(None)),
+ }
+ }
+ assert!(self.next.is_some());
+ match self.next.as_mut().unwrap().poll() {
+ Ok(Async::Ready(None)) => self.next = None,
+ other => return other,
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/fold.rs b/third_party/rust/futures-0.1.31/src/stream/fold.rs
new file mode 100644
index 0000000000..7fa24b449d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/fold.rs
@@ -0,0 +1,81 @@
+use core::mem;
+
+use {Future, Poll, IntoFuture, Async};
+use stream::Stream;
+
+/// A future used to collect all the results of a stream into one generic type.
+///
+/// This future is returned by the `Stream::fold` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Fold<S, F, Fut, T> where Fut: IntoFuture {
+ stream: S,
+ f: F,
+ state: State<T, Fut::Future>,
+}
+
+#[derive(Debug)]
+enum State<T, F> where F: Future {
+ /// Placeholder state when doing work
+ Empty,
+
+ /// Ready to process the next stream item; current accumulator is the `T`
+ Ready(T),
+
+ /// Working on a future the process the previous stream item
+ Processing(F),
+}
+
+pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
+ where S: Stream,
+ F: FnMut(T, S::Item) -> Fut,
+ Fut: IntoFuture<Item = T>,
+ S::Error: From<Fut::Error>,
+{
+ Fold {
+ stream: s,
+ f: f,
+ state: State::Ready(t),
+ }
+}
+
+impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
+ where S: Stream,
+ F: FnMut(T, S::Item) -> Fut,
+ Fut: IntoFuture<Item = T>,
+ S::Error: From<Fut::Error>,
+{
+ type Item = T;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<T, S::Error> {
+ loop {
+ match mem::replace(&mut self.state, State::Empty) {
+ State::Empty => panic!("cannot poll Fold twice"),
+ State::Ready(state) => {
+ match self.stream.poll()? {
+ Async::Ready(Some(e)) => {
+ let future = (self.f)(state, e);
+ let future = future.into_future();
+ self.state = State::Processing(future);
+ }
+ Async::Ready(None) => return Ok(Async::Ready(state)),
+ Async::NotReady => {
+ self.state = State::Ready(state);
+ return Ok(Async::NotReady)
+ }
+ }
+ }
+ State::Processing(mut fut) => {
+ match fut.poll()? {
+ Async::Ready(state) => self.state = State::Ready(state),
+ Async::NotReady => {
+ self.state = State::Processing(fut);
+ return Ok(Async::NotReady)
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/for_each.rs b/third_party/rust/futures-0.1.31/src/stream/for_each.rs
new file mode 100644
index 0000000000..c7e1cde5bb
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/for_each.rs
@@ -0,0 +1,51 @@
+use {Async, Future, IntoFuture, Poll};
+use stream::Stream;
+
+/// A stream combinator which executes a unit closure over each item on a
+/// stream.
+///
+/// This structure is returned by the `Stream::for_each` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct ForEach<S, F, U> where U: IntoFuture {
+ stream: S,
+ f: F,
+ fut: Option<U::Future>,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> ForEach<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+ U: IntoFuture<Item = (), Error = S::Error>,
+{
+ ForEach {
+ stream: s,
+ f: f,
+ fut: None,
+ }
+}
+
+impl<S, F, U> Future for ForEach<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+ U: IntoFuture<Item= (), Error = S::Error>,
+{
+ type Item = ();
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<(), S::Error> {
+ loop {
+ if let Some(mut fut) = self.fut.take() {
+ if fut.poll()?.is_not_ready() {
+ self.fut = Some(fut);
+ return Ok(Async::NotReady);
+ }
+ }
+
+ match try_ready!(self.stream.poll()) {
+ Some(e) => self.fut = Some((self.f)(e).into_future()),
+ None => return Ok(Async::Ready(())),
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/forward.rs b/third_party/rust/futures-0.1.31/src/stream/forward.rs
new file mode 100644
index 0000000000..6722af8c20
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/forward.rs
@@ -0,0 +1,110 @@
+use {Poll, Async, Future, AsyncSink};
+use stream::{Stream, Fuse};
+use sink::Sink;
+
+/// Future for the `Stream::forward` combinator, which sends a stream of values
+/// to a sink and then waits until the sink has fully flushed those values.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Forward<T: Stream, U> {
+ sink: Option<U>,
+ stream: Option<Fuse<T>>,
+ buffered: Option<T::Item>,
+}
+
+
+pub fn new<T, U>(stream: T, sink: U) -> Forward<T, U>
+ where U: Sink<SinkItem=T::Item>,
+ T: Stream,
+ T::Error: From<U::SinkError>,
+{
+ Forward {
+ sink: Some(sink),
+ stream: Some(stream.fuse()),
+ buffered: None,
+ }
+}
+
+impl<T, U> Forward<T, U>
+ where U: Sink<SinkItem=T::Item>,
+ T: Stream,
+ T::Error: From<U::SinkError>,
+{
+ /// Get a shared reference to the inner sink.
+ /// If this combinator has already been polled to completion, None will be returned.
+ pub fn sink_ref(&self) -> Option<&U> {
+ self.sink.as_ref()
+ }
+
+ /// Get a mutable reference to the inner sink.
+ /// If this combinator has already been polled to completion, None will be returned.
+ pub fn sink_mut(&mut self) -> Option<&mut U> {
+ self.sink.as_mut()
+ }
+
+ /// Get a shared reference to the inner stream.
+ /// If this combinator has already been polled to completion, None will be returned.
+ pub fn stream_ref(&self) -> Option<&T> {
+ self.stream.as_ref().map(|x| x.get_ref())
+ }
+
+ /// Get a mutable reference to the inner stream.
+ /// If this combinator has already been polled to completion, None will be returned.
+ pub fn stream_mut(&mut self) -> Option<&mut T> {
+ self.stream.as_mut().map(|x| x.get_mut())
+ }
+
+ fn take_result(&mut self) -> (T, U) {
+ let sink = self.sink.take()
+ .expect("Attempted to poll Forward after completion");
+ let fuse = self.stream.take()
+ .expect("Attempted to poll Forward after completion");
+ (fuse.into_inner(), sink)
+ }
+
+ fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> {
+ debug_assert!(self.buffered.is_none());
+ if let AsyncSink::NotReady(item) = self.sink_mut()
+ .expect("Attempted to poll Forward after completion")
+ .start_send(item)?
+ {
+ self.buffered = Some(item);
+ return Ok(Async::NotReady)
+ }
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T, U> Future for Forward<T, U>
+ where U: Sink<SinkItem=T::Item>,
+ T: Stream,
+ T::Error: From<U::SinkError>,
+{
+ type Item = (T, U);
+ type Error = T::Error;
+
+ fn poll(&mut self) -> Poll<(T, U), T::Error> {
+ // If we've got an item buffered already, we need to write it to the
+ // sink before we can do anything else
+ if let Some(item) = self.buffered.take() {
+ try_ready!(self.try_start_send(item))
+ }
+
+ loop {
+ match self.stream.as_mut()
+ .expect("Attempted to poll Forward after completion")
+ .poll()?
+ {
+ Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
+ Async::Ready(None) => {
+ try_ready!(self.sink_mut().expect("Attempted to poll Forward after completion").close());
+ return Ok(Async::Ready(self.take_result()))
+ }
+ Async::NotReady => {
+ try_ready!(self.sink_mut().expect("Attempted to poll Forward after completion").poll_complete());
+ return Ok(Async::NotReady)
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/from_err.rs b/third_party/rust/futures-0.1.31/src/stream/from_err.rs
new file mode 100644
index 0000000000..4028542dfc
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/from_err.rs
@@ -0,0 +1,80 @@
+use core::marker::PhantomData;
+use poll::Poll;
+use Async;
+use stream::Stream;
+
+/// A stream combinator to change the error type of a stream.
+///
+/// This is created by the `Stream::from_err` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct FromErr<S, E> {
+ stream: S,
+ f: PhantomData<E>
+}
+
+pub fn new<S, E>(stream: S) -> FromErr<S, E>
+ where S: Stream
+{
+ FromErr {
+ stream: stream,
+ f: PhantomData
+ }
+}
+
+impl<S, E> FromErr<S, E> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+
+impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> {
+ type Item = S::Item;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, E> {
+ let e = match self.stream.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ other => other,
+ };
+ e.map_err(From::from)
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S: Stream + ::sink::Sink, E> ::sink::Sink for FromErr<S, E> {
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: Self::SinkItem) -> ::StartSend<Self::SinkItem, Self::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), Self::SinkError> {
+ self.stream.close()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/fuse.rs b/third_party/rust/futures-0.1.31/src/stream/fuse.rs
new file mode 100644
index 0000000000..e39c31f348
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/fuse.rs
@@ -0,0 +1,89 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream which "fuse"s a stream once it's terminated.
+///
+/// Normally streams can behave unpredictably when used after they have already
+/// finished, but `Fuse` continues to return `None` from `poll` forever when
+/// finished.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Fuse<S> {
+ stream: S,
+ done: bool,
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Fuse<S>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+pub fn new<S: Stream>(s: S) -> Fuse<S> {
+ Fuse { stream: s, done: false }
+}
+
+impl<S: Stream> Stream for Fuse<S> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ if self.done {
+ Ok(Async::Ready(None))
+ } else {
+ let r = self.stream.poll();
+ if let Ok(Async::Ready(None)) = r {
+ self.done = true;
+ }
+ r
+ }
+ }
+}
+
+impl<S> Fuse<S> {
+ /// Returns whether the underlying stream has finished or not.
+ ///
+ /// If this method returns `true`, then all future calls to poll are
+ /// guaranteed to return `None`. If this returns `false`, then the
+ /// underlying stream is still in use.
+ pub fn is_done(&self) -> bool {
+ self.done
+ }
+
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/future.rs b/third_party/rust/futures-0.1.31/src/stream/future.rs
new file mode 100644
index 0000000000..5b052ee4d3
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/future.rs
@@ -0,0 +1,76 @@
+use {Future, Poll, Async};
+use stream::Stream;
+
+/// A combinator used to temporarily convert a stream into a future.
+///
+/// This future is returned by the `Stream::into_future` method.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct StreamFuture<S> {
+ stream: Option<S>,
+}
+
+pub fn new<S: Stream>(s: S) -> StreamFuture<S> {
+ StreamFuture { stream: Some(s) }
+}
+
+impl<S> StreamFuture<S> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn get_ref(&self) -> Option<&S> {
+ self.stream.as_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn get_mut(&mut self) -> Option<&mut S> {
+ self.stream.as_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ ///
+ /// This method returns an `Option` to account for the fact that `StreamFuture`'s
+ /// implementation of `Future::poll` consumes the underlying stream during polling
+ /// in order to return it to the caller of `Future::poll` if the stream yielded
+ /// an element.
+ pub fn into_inner(self) -> Option<S> {
+ self.stream
+ }
+}
+
+impl<S: Stream> Future for StreamFuture<S> {
+ type Item = (Option<S::Item>, S);
+ type Error = (S::Error, S);
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let item = {
+ let s = self.stream.as_mut().expect("polling StreamFuture twice");
+ match s.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(e)) => Ok(e),
+ Err(e) => Err(e),
+ }
+ };
+ let stream = self.stream.take().unwrap();
+ match item {
+ Ok(e) => Ok(Async::Ready((e, stream))),
+ Err(e) => Err((e, stream)),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs b/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs
new file mode 100644
index 0000000000..561bbb5189
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/futures_ordered.rs
@@ -0,0 +1,219 @@
+use std::cmp::{Eq, PartialEq, PartialOrd, Ord, Ordering};
+use std::collections::BinaryHeap;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+
+use {Async, Future, IntoFuture, Poll, Stream};
+use stream::FuturesUnordered;
+
+#[derive(Debug)]
+struct OrderWrapper<T> {
+ item: T,
+ index: usize,
+}
+
+impl<T> PartialEq for OrderWrapper<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.index == other.index
+ }
+}
+
+impl<T> Eq for OrderWrapper<T> {}
+
+impl<T> PartialOrd for OrderWrapper<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T> Ord for OrderWrapper<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ // BinaryHeap is a max heap, so compare backwards here.
+ other.index.cmp(&self.index)
+ }
+}
+
+impl<T> Future for OrderWrapper<T>
+ where T: Future
+{
+ type Item = OrderWrapper<T::Item>;
+ type Error = T::Error;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let result = try_ready!(self.item.poll());
+ Ok(Async::Ready(OrderWrapper {
+ item: result,
+ index: self.index
+ }))
+ }
+}
+
+/// An unbounded queue of futures.
+///
+/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order
+/// on top of the set of futures. While futures in the set will race to
+/// completion in parallel, results will only be returned in the order their
+/// originating futures were added to the queue.
+///
+/// Futures are pushed into this queue and their realized values are yielded in
+/// order. This structure is optimized to manage a large number of futures.
+/// Futures managed by `FuturesOrdered` will only be polled when they generate
+/// notifications. This reduces the required amount of work needed to coordinate
+/// large numbers of futures.
+///
+/// When a `FuturesOrdered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the queue using `push`; however, the
+/// future will **not** be polled at this point. `FuturesOrdered` will only
+/// poll managed futures when `FuturesOrdered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesOrdered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the queue is currently not managing any futures. A future may be submitted
+/// to the queue at a later time. At that point, a call to
+/// `FuturesOrdered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed. When
+/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will
+/// return `Ok(Async::NotReady)` until the first future completes, even if
+/// some of the later futures have already completed.
+///
+/// Note that you can create a ready-made `FuturesOrdered` via the
+/// `futures_ordered` function in the `stream` module, or you can start with an
+/// empty queue with the `FuturesOrdered::new` constructor.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesOrdered<T>
+ where T: Future
+{
+ in_progress: FuturesUnordered<OrderWrapper<T>>,
+ queued_results: BinaryHeap<OrderWrapper<T::Item>>,
+ next_incoming_index: usize,
+ next_outgoing_index: usize,
+}
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that their originating
+/// futures were submitted to the queue. If the futures complete out of order,
+/// items will be stored internally within `FuturesOrdered` until all preceding
+/// items have been yielded.
+///
+/// Note that the returned queue can also be used to dynamically push more
+/// futures into the queue as they become available.
+pub fn futures_ordered<I>(futures: I) -> FuturesOrdered<<I::Item as IntoFuture>::Future>
+ where I: IntoIterator,
+ I::Item: IntoFuture
+{
+ let mut queue = FuturesOrdered::new();
+
+ for future in futures {
+ queue.push(future.into_future());
+ }
+
+ return queue
+}
+
+impl<T> Default for FuturesOrdered<T> where T: Future {
+ fn default() -> Self {
+ FuturesOrdered::new()
+ }
+}
+
+impl<T> FuturesOrdered<T>
+ where T: Future
+{
+ /// Constructs a new, empty `FuturesOrdered`
+ ///
+ /// The returned `FuturesOrdered` does not contain any futures and, in this
+ /// state, `FuturesOrdered::poll` will return `Ok(Async::Ready(None))`.
+ pub fn new() -> FuturesOrdered<T> {
+ FuturesOrdered {
+ in_progress: FuturesUnordered::new(),
+ queued_results: BinaryHeap::new(),
+ next_incoming_index: 0,
+ next_outgoing_index: 0,
+ }
+ }
+
+ /// Returns the number of futures contained in the queue.
+ ///
+ /// This represents the total number of in-flight futures, both
+ /// those currently processing and those that have completed but
+ /// which are waiting for earlier futures to complete.
+ pub fn len(&self) -> usize {
+ self.in_progress.len() + self.queued_results.len()
+ }
+
+ /// Returns `true` if the queue contains no futures
+ pub fn is_empty(&self) -> bool {
+ self.in_progress.is_empty() && self.queued_results.is_empty()
+ }
+
+ /// Push a future into the queue.
+ ///
+ /// This function submits the given future to the internal set for managing.
+ /// This function will not call `poll` on the submitted future. The caller
+ /// must ensure that `FuturesOrdered::poll` is called in order to receive
+ /// task notifications.
+ pub fn push(&mut self, future: T) {
+ let wrapped = OrderWrapper {
+ item: future,
+ index: self.next_incoming_index,
+ };
+ self.next_incoming_index += 1;
+ self.in_progress.push(wrapped);
+ }
+}
+
+impl<T> Stream for FuturesOrdered<T>
+ where T: Future
+{
+ type Item = T::Item;
+ type Error = T::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ // Get any completed futures from the unordered set.
+ loop {
+ match self.in_progress.poll()? {
+ Async::Ready(Some(result)) => self.queued_results.push(result),
+ Async::Ready(None) | Async::NotReady => break,
+ }
+ }
+
+ if let Some(next_result) = self.queued_results.peek() {
+ // PeekMut::pop is not stable yet QQ
+ if next_result.index != self.next_outgoing_index {
+ return Ok(Async::NotReady);
+ }
+ } else if !self.in_progress.is_empty() {
+ return Ok(Async::NotReady);
+ } else {
+ return Ok(Async::Ready(None));
+ }
+
+ let next_result = self.queued_results.pop().unwrap();
+ self.next_outgoing_index += 1;
+ Ok(Async::Ready(Some(next_result.item)))
+ }
+}
+
+impl<T: Debug> Debug for FuturesOrdered<T>
+ where T: Future
+{
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "FuturesOrdered {{ ... }}")
+ }
+}
+
+impl<F: Future> FromIterator<F> for FuturesOrdered<F> {
+ fn from_iter<T>(iter: T) -> Self
+ where T: IntoIterator<Item = F>
+ {
+ let mut new = FuturesOrdered::new();
+ for future in iter.into_iter() {
+ new.push(future);
+ }
+ new
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs b/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs
new file mode 100644
index 0000000000..3f25c86f39
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/futures_unordered.rs
@@ -0,0 +1,707 @@
+//! An unbounded set of futures.
+
+use std::cell::UnsafeCell;
+use std::fmt::{self, Debug};
+use std::iter::FromIterator;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel};
+use std::sync::atomic::{AtomicPtr, AtomicBool};
+use std::sync::{Arc, Weak};
+use std::usize;
+
+use {task, Stream, Future, Poll, Async};
+use executor::{Notify, UnsafeNotify, NotifyHandle};
+use task_impl::{self, AtomicTask};
+
+/// An unbounded set of futures.
+///
+/// This "combinator" also serves a special function in this library, providing
+/// the ability to maintain a set of futures that and manage driving them all
+/// to completion.
+///
+/// Futures are pushed into this set and their realized values are yielded as
+/// they are ready. This structure is optimized to manage a large number of
+/// futures. Futures managed by `FuturesUnordered` will only be polled when they
+/// generate notifications. This reduces the required amount of work needed to
+/// coordinate large numbers of futures.
+///
+/// When a `FuturesUnordered` is first created, it does not contain any futures.
+/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
+/// returned. Futures are submitted to the set using `push`; however, the
+/// future will **not** be polled at this point. `FuturesUnordered` will only
+/// poll managed futures when `FuturesUnordered::poll` is called. As such, it
+/// is important to call `poll` after pushing new futures.
+///
+/// If `FuturesUnordered::poll` returns `Ok(Async::Ready(None))` this means that
+/// the set is currently not managing any futures. A future may be submitted
+/// to the set at a later time. At that point, a call to
+/// `FuturesUnordered::poll` will either return the future's resolved value
+/// **or** `Ok(Async::NotReady)` if the future has not yet completed.
+///
+/// Note that you can create a ready-made `FuturesUnordered` via the
+/// `futures_unordered` function in the `stream` module, or you can start with an
+/// empty set with the `FuturesUnordered::new` constructor.
+#[must_use = "streams do nothing unless polled"]
+pub struct FuturesUnordered<F> {
+ inner: Arc<Inner<F>>,
+ len: usize,
+ head_all: *const Node<F>,
+}
+
+unsafe impl<T: Send> Send for FuturesUnordered<T> {}
+unsafe impl<T: Sync> Sync for FuturesUnordered<T> {}
+
+// FuturesUnordered is implemented using two linked lists. One which links all
+// futures managed by a `FuturesUnordered` and one that tracks futures that have
+// been scheduled for polling. The first linked list is not thread safe and is
+// only accessed by the thread that owns the `FuturesUnordered` value. The
+// second linked list is an implementation of the intrusive MPSC queue algorithm
+// described by 1024cores.net.
+//
+// When a future is submitted to the set a node is allocated and inserted in
+// both linked lists. The next call to `poll` will (eventually) see this node
+// and call `poll` on the future.
+//
+// Before a managed future is polled, the current task's `Notify` is replaced
+// with one that is aware of the specific future being run. This ensures that
+// task notifications generated by that specific future are visible to
+// `FuturesUnordered`. When a notification is received, the node is scheduled
+// for polling by being inserted into the concurrent linked list.
+//
+// Each node uses an `AtomicUsize` to track it's state. The node state is the
+// reference count (the number of outstanding handles to the node) as well as a
+// flag tracking if the node is currently inserted in the atomic queue. When the
+// future is notified, it will only insert itself into the linked list if it
+// isn't currently inserted.
+
+#[allow(missing_debug_implementations)]
+struct Inner<T> {
+ // The task using `FuturesUnordered`.
+ parent: AtomicTask,
+
+ // Head/tail of the readiness queue
+ head_readiness: AtomicPtr<Node<T>>,
+ tail_readiness: UnsafeCell<*const Node<T>>,
+ stub: Arc<Node<T>>,
+}
+
+struct Node<T> {
+ // The future
+ future: UnsafeCell<Option<T>>,
+
+ // Next pointer for linked list tracking all active nodes
+ next_all: UnsafeCell<*const Node<T>>,
+
+ // Previous node in linked list tracking all active nodes
+ prev_all: UnsafeCell<*const Node<T>>,
+
+ // Next pointer in readiness queue
+ next_readiness: AtomicPtr<Node<T>>,
+
+ // Queue that we'll be enqueued to when notified
+ queue: Weak<Inner<T>>,
+
+ // Whether or not this node is currently in the mpsc queue.
+ queued: AtomicBool,
+}
+
+enum Dequeue<T> {
+ Data(*const Node<T>),
+ Empty,
+ Inconsistent,
+}
+
+impl<T> Default for FuturesUnordered<T> where T: Future {
+ fn default() -> Self {
+ FuturesUnordered::new()
+ }
+}
+
+impl<T> FuturesUnordered<T>
+ where T: Future,
+{
+ /// Constructs a new, empty `FuturesUnordered`
+ ///
+ /// The returned `FuturesUnordered` does not contain any futures and, in this
+ /// state, `FuturesUnordered::poll` will return `Ok(Async::Ready(None))`.
+ pub fn new() -> FuturesUnordered<T> {
+ let stub = Arc::new(Node {
+ future: UnsafeCell::new(None),
+ next_all: UnsafeCell::new(ptr::null()),
+ prev_all: UnsafeCell::new(ptr::null()),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ queued: AtomicBool::new(true),
+ queue: Weak::new(),
+ });
+ let stub_ptr = &*stub as *const Node<T>;
+ let inner = Arc::new(Inner {
+ parent: AtomicTask::new(),
+ head_readiness: AtomicPtr::new(stub_ptr as *mut _),
+ tail_readiness: UnsafeCell::new(stub_ptr),
+ stub: stub,
+ });
+
+ FuturesUnordered {
+ len: 0,
+ head_all: ptr::null_mut(),
+ inner: inner,
+ }
+ }
+}
+
+impl<T> FuturesUnordered<T> {
+ /// Returns the number of futures contained in the set.
+ ///
+ /// This represents the total number of in-flight futures.
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` if the set contains no futures
+ pub fn is_empty(&self) -> bool {
+ self.len == 0
+ }
+
+ /// Push a future into the set.
+ ///
+ /// This function submits the given future to the set for managing. This
+ /// function will not call `poll` on the submitted future. The caller must
+ /// ensure that `FuturesUnordered::poll` is called in order to receive task
+ /// notifications.
+ pub fn push(&mut self, future: T) {
+ let node = Arc::new(Node {
+ future: UnsafeCell::new(Some(future)),
+ next_all: UnsafeCell::new(ptr::null_mut()),
+ prev_all: UnsafeCell::new(ptr::null_mut()),
+ next_readiness: AtomicPtr::new(ptr::null_mut()),
+ queued: AtomicBool::new(true),
+ queue: Arc::downgrade(&self.inner),
+ });
+
+ // Right now our node has a strong reference count of 1. We transfer
+ // ownership of this reference count to our internal linked list
+ // and we'll reclaim ownership through the `unlink` function below.
+ let ptr = self.link(node);
+
+ // We'll need to get the future "into the system" to start tracking it,
+ // e.g. getting its unpark notifications going to us tracking which
+ // futures are ready. To do that we unconditionally enqueue it for
+ // polling here.
+ self.inner.enqueue(ptr);
+ }
+
+ /// Returns an iterator that allows modifying each future in the set.
+ pub fn iter_mut(&mut self) -> IterMut<T> {
+ IterMut {
+ node: self.head_all,
+ len: self.len,
+ _marker: PhantomData
+ }
+ }
+
+ fn release_node(&mut self, node: Arc<Node<T>>) {
+ // The future is done, try to reset the queued flag. This will prevent
+ // `notify` from doing any work in the future
+ let prev = node.queued.swap(true, SeqCst);
+
+ // Drop the future, even if it hasn't finished yet. This is safe
+ // because we're dropping the future on the thread that owns
+ // `FuturesUnordered`, which correctly tracks T's lifetimes and such.
+ unsafe {
+ drop((*node.future.get()).take());
+ }
+
+ // If the queued flag was previously set then it means that this node
+ // is still in our internal mpsc queue. We then transfer ownership
+ // of our reference count to the mpsc queue, and it'll come along and
+ // free it later, noticing that the future is `None`.
+ //
+ // If, however, the queued flag was *not* set then we're safe to
+ // release our reference count on the internal node. The queued flag
+ // was set above so all future `enqueue` operations will not actually
+ // enqueue the node, so our node will never see the mpsc queue again.
+ // The node itself will be deallocated once all reference counts have
+ // been dropped by the various owning tasks elsewhere.
+ if prev {
+ mem::forget(node);
+ }
+ }
+
+ /// Insert a new node into the internal linked list.
+ fn link(&mut self, node: Arc<Node<T>>) -> *const Node<T> {
+ let ptr = arc2ptr(node);
+ unsafe {
+ *(*ptr).next_all.get() = self.head_all;
+ if !self.head_all.is_null() {
+ *(*self.head_all).prev_all.get() = ptr;
+ }
+ }
+
+ self.head_all = ptr;
+ self.len += 1;
+ return ptr
+ }
+
+ /// Remove the node from the linked list tracking all nodes currently
+ /// managed by `FuturesUnordered`.
+ unsafe fn unlink(&mut self, node: *const Node<T>) -> Arc<Node<T>> {
+ let node = ptr2arc(node);
+ let next = *node.next_all.get();
+ let prev = *node.prev_all.get();
+ *node.next_all.get() = ptr::null_mut();
+ *node.prev_all.get() = ptr::null_mut();
+
+ if !next.is_null() {
+ *(*next).prev_all.get() = prev;
+ }
+
+ if !prev.is_null() {
+ *(*prev).next_all.get() = next;
+ } else {
+ self.head_all = next;
+ }
+ self.len -= 1;
+ return node
+ }
+}
+
+impl<T> Stream for FuturesUnordered<T>
+ where T: Future
+{
+ type Item = T::Item;
+ type Error = T::Error;
+
+ fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> {
+ // Variable to determine how many times it is allowed to poll underlying
+ // futures without yielding.
+ //
+ // A single call to `poll_next` may potentially do a lot of work before
+ // yielding. This happens in particular if the underlying futures are awoken
+ // frequently but continue to return `Pending`. This is problematic if other
+ // tasks are waiting on the executor, since they do not get to run. This value
+ // caps the number of calls to `poll` on underlying futures a single call to
+ // `poll_next` is allowed to make.
+ //
+ // The value is the length of FuturesUnordered. This ensures that each
+ // future is polled only once at most per iteration.
+ //
+ // See also https://github.com/rust-lang/futures-rs/issues/2047.
+ let yield_every = self.len();
+
+ // Keep track of how many child futures we have polled,
+ // in case we want to forcibly yield.
+ let mut polled = 0;
+
+ // Ensure `parent` is correctly set.
+ self.inner.parent.register();
+
+ loop {
+ let node = match unsafe { self.inner.dequeue() } {
+ Dequeue::Empty => {
+ if self.is_empty() {
+ return Ok(Async::Ready(None));
+ } else {
+ return Ok(Async::NotReady)
+ }
+ }
+ Dequeue::Inconsistent => {
+ // At this point, it may be worth yielding the thread &
+ // spinning a few times... but for now, just yield using the
+ // task system.
+ task::current().notify();
+ return Ok(Async::NotReady);
+ }
+ Dequeue::Data(node) => node,
+ };
+
+ debug_assert!(node != self.inner.stub());
+
+ unsafe {
+ let mut future = match (*(*node).future.get()).take() {
+ Some(future) => future,
+
+ // If the future has already gone away then we're just
+ // cleaning out this node. See the comment in
+ // `release_node` for more information, but we're basically
+ // just taking ownership of our reference count here.
+ None => {
+ let node = ptr2arc(node);
+ assert!((*node.next_all.get()).is_null());
+ assert!((*node.prev_all.get()).is_null());
+ continue
+ }
+ };
+
+ // Unset queued flag... this must be done before
+ // polling. This ensures that the future gets
+ // rescheduled if it is notified **during** a call
+ // to `poll`.
+ let prev = (*node).queued.swap(false, SeqCst);
+ assert!(prev);
+
+ // We're going to need to be very careful if the `poll`
+ // function below panics. We need to (a) not leak memory and
+ // (b) ensure that we still don't have any use-after-frees. To
+ // manage this we do a few things:
+ //
+ // * This "bomb" here will call `release_node` if dropped
+ // abnormally. That way we'll be sure the memory management
+ // of the `node` is managed correctly.
+ // * The future was extracted above (taken ownership). That way
+ // if it panics we're guaranteed that the future is
+ // dropped on this thread and doesn't accidentally get
+ // dropped on a different thread (bad).
+ // * We unlink the node from our internal queue to preemptively
+ // assume it'll panic, in which case we'll want to discard it
+ // regardless.
+ struct Bomb<'a, T: 'a> {
+ queue: &'a mut FuturesUnordered<T>,
+ node: Option<Arc<Node<T>>>,
+ }
+ impl<'a, T> Drop for Bomb<'a, T> {
+ fn drop(&mut self) {
+ if let Some(node) = self.node.take() {
+ self.queue.release_node(node);
+ }
+ }
+ }
+ let mut bomb = Bomb {
+ node: Some(self.unlink(node)),
+ queue: self,
+ };
+
+ // Poll the underlying future with the appropriate `notify`
+ // implementation. This is where a large bit of the unsafety
+ // starts to stem from internally. The `notify` instance itself
+ // is basically just our `Arc<Node<T>>` and tracks the mpsc
+ // queue of ready futures.
+ //
+ // Critically though `Node<T>` won't actually access `T`, the
+ // future, while it's floating around inside of `Task`
+ // instances. These structs will basically just use `T` to size
+ // the internal allocation, appropriately accessing fields and
+ // deallocating the node if need be.
+ let res = {
+ let notify = NodeToHandle(bomb.node.as_ref().unwrap());
+ task_impl::with_notify(&notify, 0, || {
+ future.poll()
+ })
+ };
+ polled += 1;
+
+ let ret = match res {
+ Ok(Async::NotReady) => {
+ let node = bomb.node.take().unwrap();
+ *node.future.get() = Some(future);
+ bomb.queue.link(node);
+
+ if polled == yield_every {
+ // We have polled a large number of futures in a row without yielding.
+ // To ensure we do not starve other tasks waiting on the executor,
+ // we yield here, but immediately wake ourselves up to continue.
+ task_impl::current().notify();
+ return Ok(Async::NotReady);
+ }
+ continue
+ }
+ Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
+ Err(e) => Err(e),
+ };
+ return ret
+ }
+ }
+ }
+}
+
+impl<T: Debug> Debug for FuturesUnordered<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "FuturesUnordered {{ ... }}")
+ }
+}
+
+impl<T> Drop for FuturesUnordered<T> {
+ fn drop(&mut self) {
+ // When a `FuturesUnordered` is dropped we want to drop all futures associated
+ // with it. At the same time though there may be tons of `Task` handles
+ // flying around which contain `Node<T>` references inside them. We'll
+ // let those naturally get deallocated when the `Task` itself goes out
+ // of scope or gets notified.
+ unsafe {
+ while !self.head_all.is_null() {
+ let head = self.head_all;
+ let node = self.unlink(head);
+ self.release_node(node);
+ }
+ }
+
+ // Note that at this point we could still have a bunch of nodes in the
+ // mpsc queue. None of those nodes, however, have futures associated
+ // with them so they're safe to destroy on any thread. At this point
+ // the `FuturesUnordered` struct, the owner of the one strong reference
+ // to `Inner<T>` will drop the strong reference. At that point
+ // whichever thread releases the strong refcount last (be it this
+ // thread or some other thread as part of an `upgrade`) will clear out
+ // the mpsc queue and free all remaining nodes.
+ //
+ // While that freeing operation isn't guaranteed to happen here, it's
+ // guaranteed to happen "promptly" as no more "blocking work" will
+ // happen while there's a strong refcount held.
+ }
+}
+
+impl<F: Future> FromIterator<F> for FuturesUnordered<F> {
+ fn from_iter<T>(iter: T) -> Self
+ where T: IntoIterator<Item = F>
+ {
+ let mut new = FuturesUnordered::new();
+ for future in iter.into_iter() {
+ new.push(future);
+ }
+ new
+ }
+}
+
+#[derive(Debug)]
+/// Mutable iterator over all futures in the unordered set.
+pub struct IterMut<'a, F: 'a> {
+ node: *const Node<F>,
+ len: usize,
+ _marker: PhantomData<&'a mut FuturesUnordered<F>>
+}
+
+impl<'a, F> Iterator for IterMut<'a, F> {
+ type Item = &'a mut F;
+
+ fn next(&mut self) -> Option<&'a mut F> {
+ if self.node.is_null() {
+ return None;
+ }
+ unsafe {
+ let future = (*(*self.node).future.get()).as_mut().unwrap();
+ let next = *(*self.node).next_all.get();
+ self.node = next;
+ self.len -= 1;
+ return Some(future);
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+}
+
+impl<'a, F> ExactSizeIterator for IterMut<'a, F> {}
+
+impl<T> Inner<T> {
+ /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
+ fn enqueue(&self, node: *const Node<T>) {
+ unsafe {
+ debug_assert!((*node).queued.load(Relaxed));
+
+ // This action does not require any coordination
+ (*node).next_readiness.store(ptr::null_mut(), Relaxed);
+
+ // Note that these atomic orderings come from 1024cores
+ let node = node as *mut _;
+ let prev = self.head_readiness.swap(node, AcqRel);
+ (*prev).next_readiness.store(node, Release);
+ }
+ }
+
+ /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
+ ///
+ /// Note that this unsafe as it required mutual exclusion (only one thread
+ /// can call this) to be guaranteed elsewhere.
+ unsafe fn dequeue(&self) -> Dequeue<T> {
+ let mut tail = *self.tail_readiness.get();
+ let mut next = (*tail).next_readiness.load(Acquire);
+
+ if tail == self.stub() {
+ if next.is_null() {
+ return Dequeue::Empty;
+ }
+
+ *self.tail_readiness.get() = next;
+ tail = next;
+ next = (*next).next_readiness.load(Acquire);
+ }
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ debug_assert!(tail != self.stub());
+ return Dequeue::Data(tail);
+ }
+
+ if self.head_readiness.load(Acquire) as *const _ != tail {
+ return Dequeue::Inconsistent;
+ }
+
+ self.enqueue(self.stub());
+
+ next = (*tail).next_readiness.load(Acquire);
+
+ if !next.is_null() {
+ *self.tail_readiness.get() = next;
+ return Dequeue::Data(tail);
+ }
+
+ Dequeue::Inconsistent
+ }
+
+ fn stub(&self) -> *const Node<T> {
+ &*self.stub
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ // Once we're in the destructor for `Inner<T>` we need to clear out the
+ // mpsc queue of nodes if there's anything left in there.
+ //
+ // Note that each node has a strong reference count associated with it
+ // which is owned by the mpsc queue. All nodes should have had their
+ // futures dropped already by the `FuturesUnordered` destructor above,
+ // so we're just pulling out nodes and dropping their refcounts.
+ unsafe {
+ loop {
+ match self.dequeue() {
+ Dequeue::Empty => break,
+ Dequeue::Inconsistent => abort("inconsistent in drop"),
+ Dequeue::Data(ptr) => drop(ptr2arc(ptr)),
+ }
+ }
+ }
+ }
+}
+
+#[allow(missing_debug_implementations)]
+struct NodeToHandle<'a, T: 'a>(&'a Arc<Node<T>>);
+
+impl<'a, T> Clone for NodeToHandle<'a, T> {
+ fn clone(&self) -> Self {
+ NodeToHandle(self.0)
+ }
+}
+
+impl<'a, T> From<NodeToHandle<'a, T>> for NotifyHandle {
+ fn from(handle: NodeToHandle<'a, T>) -> NotifyHandle {
+ unsafe {
+ let ptr = handle.0.clone();
+ let ptr = mem::transmute::<Arc<Node<T>>, *mut ArcNode<T>>(ptr);
+ NotifyHandle::new(hide_lt(ptr))
+ }
+ }
+}
+
+struct ArcNode<T>(PhantomData<T>);
+
+// We should never touch `T` on any thread other than the one owning
+// `FuturesUnordered`, so this should be a safe operation.
+unsafe impl<T> Send for ArcNode<T> {}
+unsafe impl<T> Sync for ArcNode<T> {}
+
+impl<T> Notify for ArcNode<T> {
+ fn notify(&self, _id: usize) {
+ unsafe {
+ let me: *const ArcNode<T> = self;
+ let me: *const *const ArcNode<T> = &me;
+ let me = me as *const Arc<Node<T>>;
+ Node::notify(&*me)
+ }
+ }
+}
+
+unsafe impl<T> UnsafeNotify for ArcNode<T> {
+ unsafe fn clone_raw(&self) -> NotifyHandle {
+ let me: *const ArcNode<T> = self;
+ let me: *const *const ArcNode<T> = &me;
+ let me = &*(me as *const Arc<Node<T>>);
+ NodeToHandle(me).into()
+ }
+
+ unsafe fn drop_raw(&self) {
+ let mut me: *const ArcNode<T> = self;
+ let me = &mut me as *mut *const ArcNode<T> as *mut Arc<Node<T>>;
+ ptr::drop_in_place(me);
+ }
+}
+
+unsafe fn hide_lt<T>(p: *mut ArcNode<T>) -> *mut UnsafeNotify {
+ mem::transmute(p as *mut UnsafeNotify)
+}
+
+impl<T> Node<T> {
+ fn notify(me: &Arc<Node<T>>) {
+ let inner = match me.queue.upgrade() {
+ Some(inner) => inner,
+ None => return,
+ };
+
+ // It's our job to notify the node that it's ready to get polled,
+ // meaning that we need to enqueue it into the readiness queue. To
+ // do this we flag that we're ready to be queued, and if successful
+ // we then do the literal queueing operation, ensuring that we're
+ // only queued once.
+ //
+ // Once the node is inserted we be sure to notify the parent task,
+ // as it'll want to come along and pick up our node now.
+ //
+ // Note that we don't change the reference count of the node here,
+ // we're just enqueueing the raw pointer. The `FuturesUnordered`
+ // implementation guarantees that if we set the `queued` flag true that
+ // there's a reference count held by the main `FuturesUnordered` queue
+ // still.
+ let prev = me.queued.swap(true, SeqCst);
+ if !prev {
+ inner.enqueue(&**me);
+ inner.parent.notify();
+ }
+ }
+}
+
+impl<T> Drop for Node<T> {
+ fn drop(&mut self) {
+ // Currently a `Node<T>` is sent across all threads for any lifetime,
+ // regardless of `T`. This means that for memory safety we can't
+ // actually touch `T` at any time except when we have a reference to the
+ // `FuturesUnordered` itself.
+ //
+ // Consequently it *should* be the case that we always drop futures from
+ // the `FuturesUnordered` instance, but this is a bomb in place to catch
+ // any bugs in that logic.
+ unsafe {
+ if (*self.future.get()).is_some() {
+ abort("future still here when dropping");
+ }
+ }
+ }
+}
+
+fn arc2ptr<T>(ptr: Arc<T>) -> *const T {
+ let addr = &*ptr as *const T;
+ mem::forget(ptr);
+ return addr
+}
+
+unsafe fn ptr2arc<T>(ptr: *const T) -> Arc<T> {
+ let anchor = mem::transmute::<usize, Arc<T>>(0x10);
+ let addr = &*anchor as *const T;
+ mem::forget(anchor);
+ let offset = addr as isize - 0x10;
+ mem::transmute::<isize, Arc<T>>(ptr as isize - offset)
+}
+
+fn abort(s: &str) -> ! {
+ struct DoublePanic;
+
+ impl Drop for DoublePanic {
+ fn drop(&mut self) {
+ panic!("panicking twice to abort the program");
+ }
+ }
+
+ let _bomb = DoublePanic;
+ panic!("{}", s);
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/inspect.rs b/third_party/rust/futures-0.1.31/src/stream/inspect.rs
new file mode 100644
index 0000000000..fc8f7f4ea2
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/inspect.rs
@@ -0,0 +1,84 @@
+use {Stream, Poll, Async};
+
+/// Do something with the items of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Inspect<S, F> where S: Stream {
+ stream: S,
+ inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> Inspect<S, F>
+ where S: Stream,
+ F: FnMut(&S::Item) -> (),
+{
+ Inspect {
+ stream: stream,
+ inspect: f,
+ }
+}
+
+impl<S: Stream, F> Inspect<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Inspect<S, F>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F> Stream for Inspect<S, F>
+ where S: Stream,
+ F: FnMut(&S::Item),
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ match try_ready!(self.stream.poll()) {
+ Some(e) => {
+ (self.inspect)(&e);
+ Ok(Async::Ready(Some(e)))
+ }
+ None => Ok(Async::Ready(None)),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs b/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs
new file mode 100644
index 0000000000..5c56a217ff
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/inspect_err.rs
@@ -0,0 +1,81 @@
+use {Stream, Poll};
+
+/// Do something with the error of a stream, passing it on.
+///
+/// This is created by the `Stream::inspect_err` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct InspectErr<S, F> where S: Stream {
+ stream: S,
+ inspect: F,
+}
+
+pub fn new<S, F>(stream: S, f: F) -> InspectErr<S, F>
+ where S: Stream,
+ F: FnMut(&S::Error) -> (),
+{
+ InspectErr {
+ stream: stream,
+ inspect: f,
+ }
+}
+
+impl<S: Stream, F> InspectErr<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for InspectErr<S, F>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F> Stream for InspectErr<S, F>
+ where S: Stream,
+ F: FnMut(&S::Error),
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ self.stream.poll().map_err(|e| {
+ (self.inspect)(&e);
+ e
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/iter.rs b/third_party/rust/futures-0.1.31/src/stream/iter.rs
new file mode 100644
index 0000000000..e0b9379353
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/iter.rs
@@ -0,0 +1,46 @@
+#![deprecated(note = "implementation moved to `iter_ok` and `iter_result`")]
+#![allow(deprecated)]
+
+use Poll;
+use stream::{iter_result, IterResult, Stream};
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Iter<I>(IterResult<I>);
+
+/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter(vec![Ok(17), Err(false), Ok(19)]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Err(false), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+#[inline]
+pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
+ where J: IntoIterator<Item=Result<T, E>>,
+{
+ Iter(iter_result(i))
+}
+
+impl<I, T, E> Stream for Iter<I>
+ where I: Iterator<Item=Result<T, E>>,
+{
+ type Item = T;
+ type Error = E;
+
+ #[inline]
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ self.0.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs b/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs
new file mode 100644
index 0000000000..9c8d871399
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/iter_ok.rs
@@ -0,0 +1,48 @@
+use core::marker;
+
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterOk<I, E> {
+ iter: I,
+ _marker: marker::PhantomData<fn() -> E>,
+}
+
+/// Converts an `Iterator` into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter
+/// simply always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_ok::<_, ()>(vec![17, 19]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_ok<I, E>(i: I) -> IterOk<I::IntoIter, E>
+ where I: IntoIterator,
+{
+ IterOk {
+ iter: i.into_iter(),
+ _marker: marker::PhantomData,
+ }
+}
+
+impl<I, E> Stream for IterOk<I, E>
+ where I: Iterator,
+{
+ type Item = I::Item;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<I::Item>, E> {
+ Ok(Async::Ready(self.iter.next()))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/iter_result.rs b/third_party/rust/futures-0.1.31/src/stream/iter_result.rs
new file mode 100644
index 0000000000..4eef5da08e
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/iter_result.rs
@@ -0,0 +1,51 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct IterResult<I> {
+ iter: I,
+}
+
+/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
+/// to yield the next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::iter_result(vec![Ok(17), Err(false), Ok(19)]);
+/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
+/// assert_eq!(Err(false), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn iter_result<J, T, E>(i: J) -> IterResult<J::IntoIter>
+where
+ J: IntoIterator<Item = Result<T, E>>,
+{
+ IterResult {
+ iter: i.into_iter(),
+ }
+}
+
+impl<I, T, E> Stream for IterResult<I>
+where
+ I: Iterator<Item = Result<T, E>>,
+{
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ match self.iter.next() {
+ Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+ Some(Err(e)) => Err(e),
+ None => Ok(Async::Ready(None)),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/map.rs b/third_party/rust/futures-0.1.31/src/stream/map.rs
new file mode 100644
index 0000000000..702e980b3f
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/map.rs
@@ -0,0 +1,81 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator which will change the type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Map<S, F> {
+ stream: S,
+ f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Map<S, F>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+{
+ Map {
+ stream: s,
+ f: f,
+ }
+}
+
+impl<S, F> Map<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for Map<S, F>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, U> Stream for Map<S, F>
+ where S: Stream,
+ F: FnMut(S::Item) -> U,
+{
+ type Item = U;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<U>, S::Error> {
+ let option = try_ready!(self.stream.poll());
+ Ok(Async::Ready(option.map(&mut self.f)))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/map_err.rs b/third_party/rust/futures-0.1.31/src/stream/map_err.rs
new file mode 100644
index 0000000000..8d1c0fc083
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/map_err.rs
@@ -0,0 +1,80 @@
+use Poll;
+use stream::Stream;
+
+/// A stream combinator which will change the error type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map_err` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct MapErr<S, F> {
+ stream: S,
+ f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F>
+ where S: Stream,
+ F: FnMut(S::Error) -> U,
+{
+ MapErr {
+ stream: s,
+ f: f,
+ }
+}
+
+impl<S, F> MapErr<S, F> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F> ::sink::Sink for MapErr<S, F>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, U> Stream for MapErr<S, F>
+ where S: Stream,
+ F: FnMut(S::Error) -> U,
+{
+ type Item = S::Item;
+ type Error = U;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, U> {
+ self.stream.poll().map_err(&mut self.f)
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/merge.rs b/third_party/rust/futures-0.1.31/src/stream/merge.rs
new file mode 100644
index 0000000000..af7505e69a
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/merge.rs
@@ -0,0 +1,82 @@
+#![deprecated(note = "functionality provided by `select` now")]
+#![allow(deprecated)]
+
+use {Poll, Async};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from one or both of the underlying
+/// streams as they become available. Errors, however, are not merged: you
+/// get at most one error at a time.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Merge<S1, S2: Stream> {
+ stream1: Fuse<S1>,
+ stream2: Fuse<S2>,
+ queued_error: Option<S2::Error>,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Merge<S1, S2>
+ where S1: Stream, S2: Stream<Error = S1::Error>
+{
+ Merge {
+ stream1: stream1.fuse(),
+ stream2: stream2.fuse(),
+ queued_error: None,
+ }
+}
+
+/// An item returned from a merge stream, which represents an item from one or
+/// both of the underlying streams.
+#[derive(Debug)]
+pub enum MergedItem<I1, I2> {
+ /// An item from the first stream
+ First(I1),
+ /// An item from the second stream
+ Second(I2),
+ /// Items from both streams
+ Both(I1, I2),
+}
+
+impl<S1, S2> Stream for Merge<S1, S2>
+ where S1: Stream, S2: Stream<Error = S1::Error>
+{
+ type Item = MergedItem<S1::Item, S2::Item>;
+ type Error = S1::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ if let Some(e) = self.queued_error.take() {
+ return Err(e)
+ }
+
+ match self.stream1.poll()? {
+ Async::NotReady => {
+ match try_ready!(self.stream2.poll()) {
+ Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
+ None => Ok(Async::NotReady),
+ }
+ }
+ Async::Ready(None) => {
+ match try_ready!(self.stream2.poll()) {
+ Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
+ None => Ok(Async::Ready(None)),
+ }
+ }
+ Async::Ready(Some(item1)) => {
+ match self.stream2.poll() {
+ Err(e) => {
+ self.queued_error = Some(e);
+ Ok(Async::Ready(Some(MergedItem::First(item1))))
+ }
+ Ok(Async::NotReady) | Ok(Async::Ready(None)) => {
+ Ok(Async::Ready(Some(MergedItem::First(item1))))
+ }
+ Ok(Async::Ready(Some(item2))) => {
+ Ok(Async::Ready(Some(MergedItem::Both(item1, item2))))
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/mod.rs b/third_party/rust/futures-0.1.31/src/stream/mod.rs
new file mode 100644
index 0000000000..2d90362470
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/mod.rs
@@ -0,0 +1,1146 @@
+//! Asynchronous streams
+//!
+//! This module contains the `Stream` trait and a number of adaptors for this
+//! trait. This trait is very similar to the `Iterator` trait in the standard
+//! library except that it expresses the concept of blocking as well. A stream
+//! here is a sequential sequence of values which may take some amount of time
+//! in between to produce.
+//!
+//! A stream may request that it is blocked between values while the next value
+//! is calculated, and provides a way to get notified once the next value is
+//! ready as well.
+//!
+//! You can find more information/tutorials about streams [online at
+//! https://tokio.rs][online]
+//!
+//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+
+use {IntoFuture, Poll};
+
+mod iter;
+#[allow(deprecated)]
+pub use self::iter::{iter, Iter};
+#[cfg(feature = "with-deprecated")]
+#[allow(deprecated)]
+pub use self::Iter as IterStream;
+mod iter_ok;
+pub use self::iter_ok::{iter_ok, IterOk};
+mod iter_result;
+pub use self::iter_result::{iter_result, IterResult};
+
+mod repeat;
+pub use self::repeat::{repeat, Repeat};
+
+mod and_then;
+mod chain;
+mod concat;
+mod empty;
+mod filter;
+mod filter_map;
+mod flatten;
+mod fold;
+mod for_each;
+mod from_err;
+mod fuse;
+mod future;
+mod inspect;
+mod inspect_err;
+mod map;
+mod map_err;
+mod merge;
+mod once;
+mod or_else;
+mod peek;
+mod poll_fn;
+mod select;
+mod skip;
+mod skip_while;
+mod take;
+mod take_while;
+mod then;
+mod unfold;
+mod zip;
+mod forward;
+pub use self::and_then::AndThen;
+pub use self::chain::Chain;
+#[allow(deprecated)]
+pub use self::concat::Concat;
+pub use self::concat::Concat2;
+pub use self::empty::{Empty, empty};
+pub use self::filter::Filter;
+pub use self::filter_map::FilterMap;
+pub use self::flatten::Flatten;
+pub use self::fold::Fold;
+pub use self::for_each::ForEach;
+pub use self::from_err::FromErr;
+pub use self::fuse::Fuse;
+pub use self::future::StreamFuture;
+pub use self::inspect::Inspect;
+pub use self::inspect_err::InspectErr;
+pub use self::map::Map;
+pub use self::map_err::MapErr;
+#[allow(deprecated)]
+pub use self::merge::{Merge, MergedItem};
+pub use self::once::{Once, once};
+pub use self::or_else::OrElse;
+pub use self::peek::Peekable;
+pub use self::poll_fn::{poll_fn, PollFn};
+pub use self::select::Select;
+pub use self::skip::Skip;
+pub use self::skip_while::SkipWhile;
+pub use self::take::Take;
+pub use self::take_while::TakeWhile;
+pub use self::then::Then;
+pub use self::unfold::{Unfold, unfold};
+pub use self::zip::Zip;
+pub use self::forward::Forward;
+use sink::{Sink};
+
+if_std! {
+ use std;
+
+ mod buffered;
+ mod buffer_unordered;
+ mod catch_unwind;
+ mod chunks;
+ mod collect;
+ mod wait;
+ mod channel;
+ mod split;
+ pub mod futures_unordered;
+ mod futures_ordered;
+ pub use self::buffered::Buffered;
+ pub use self::buffer_unordered::BufferUnordered;
+ pub use self::catch_unwind::CatchUnwind;
+ pub use self::chunks::Chunks;
+ pub use self::collect::Collect;
+ pub use self::wait::Wait;
+ pub use self::split::{SplitStream, SplitSink, ReuniteError};
+ pub use self::futures_unordered::FuturesUnordered;
+ pub use self::futures_ordered::{futures_ordered, FuturesOrdered};
+
+ #[doc(hidden)]
+ #[cfg(feature = "with-deprecated")]
+ #[allow(deprecated)]
+ pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError};
+
+ /// A type alias for `Box<Stream + Send>`
+ #[doc(hidden)]
+ #[deprecated(note = "removed without replacement, recommended to use a \
+ local extension trait or function if needed, more \
+ details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+ pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>;
+
+ impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ (**self).poll()
+ }
+ }
+}
+
+/// A stream of values, not all of which may have been produced yet.
+///
+/// `Stream` is a trait to represent any source of sequential events or items
+/// which acts like an iterator but long periods of time may pass between
+/// items. Like `Future` the methods of `Stream` never block and it is thus
+/// suitable for programming in an asynchronous fashion. This trait is very
+/// similar to the `Iterator` trait in the standard library where `Some` is
+/// used to signal elements of the stream and `None` is used to indicate that
+/// the stream is finished.
+///
+/// Like futures a stream has basic combinators to transform the stream, perform
+/// more work on each item, etc.
+///
+/// You can find more information/tutorials about streams [online at
+/// https://tokio.rs][online]
+///
+/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
+///
+/// # Streams as Futures
+///
+/// Any instance of `Stream` can also be viewed as a `Future` where the resolved
+/// value is the next item in the stream along with the rest of the stream. The
+/// `into_future` adaptor can be used here to convert any stream into a future
+/// for use with other future methods like `join` and `select`.
+///
+/// # Errors
+///
+/// Streams, like futures, can also model errors in their computation. All
+/// streams have an associated `Error` type like with futures. Currently as of
+/// the 0.1 release of this library an error on a stream **does not terminate
+/// the stream**. That is, after one error is received, another error may be
+/// received from the same stream (it's valid to keep polling).
+///
+/// This property of streams, however, is [being considered] for change in 0.2
+/// where an error on a stream is similar to `None`, it terminates the stream
+/// entirely. If one of these use cases suits you perfectly and not the other,
+/// please feel welcome to comment on [the issue][being considered]!
+///
+/// [being considered]: https://github.com/rust-lang-nursery/futures-rs/issues/206
+#[must_use = "streams do nothing unless polled"]
+pub trait Stream {
+ /// The type of item this stream will yield on success.
+ type Item;
+
+ /// The type of error this stream may generate.
+ type Error;
+
+ /// Attempt to pull out the next value of this stream, returning `None` if
+ /// the stream is finished.
+ ///
+ /// This method, like `Future::poll`, is the sole method of pulling out a
+ /// value from a stream. This method must also be run within the context of
+ /// a task typically and implementors of this trait must ensure that
+ /// implementations of this method do not block, as it may cause consumers
+ /// to behave badly.
+ ///
+ /// # Return value
+ ///
+ /// If `NotReady` is returned then this stream's next value is not ready
+ /// yet and implementations will ensure that the current task will be
+ /// notified when the next value may be ready. If `Some` is returned then
+ /// the returned value represents the next value on the stream. `Err`
+ /// indicates an error happened, while `Ok` indicates whether there was a
+ /// new item on the stream or whether the stream has terminated.
+ ///
+ /// # Panics
+ ///
+ /// Once a stream is finished, that is `Ready(None)` has been returned,
+ /// further calls to `poll` may result in a panic or other "bad behavior".
+ /// If this is difficult to guard against then the `fuse` adapter can be
+ /// used to ensure that `poll` always has well-defined semantics.
+ // TODO: more here
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error>;
+
+ // TODO: should there also be a method like `poll` but doesn't return an
+ // item? basically just says "please make more progress internally"
+ // seems crucial for buffering to actually make any sense.
+
+ /// Creates an iterator which blocks the current thread until each item of
+ /// this stream is resolved.
+ ///
+ /// This method will consume ownership of this stream, returning an
+ /// implementation of a standard iterator. This iterator will *block the
+ /// current thread* on each call to `next` if the item in the stream isn't
+ /// ready yet.
+ ///
+ /// > **Note:** This method is not appropriate to call on event loops or
+ /// > similar I/O situations because it will prevent the event
+ /// > loop from making progress (this blocks the thread). This
+ /// > method should only be called when it's guaranteed that the
+ /// > blocking work associated with this stream will be completed
+ /// > by another thread.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Panics
+ ///
+ /// The returned iterator does not attempt to catch panics. If the `poll`
+ /// function panics, panics will be propagated to the caller of `next`.
+ #[cfg(feature = "use_std")]
+ fn wait(self) -> Wait<Self>
+ where Self: Sized
+ {
+ wait::new(self)
+ }
+
+ /// Convenience function for turning this stream into a trait object.
+ ///
+ /// This simply avoids the need to write `Box::new` and can often help with
+ /// type inference as well by always returning a trait object. Note that
+ /// this method requires the `Send` bound and returns a `BoxStream`, which
+ /// also encodes this. If you'd like to create a `Box<Stream>` without the
+ /// `Send` bound, then the `Box::new` function can be used instead.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::stream::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel(1);
+ /// let a: BoxStream<i32, ()> = rx.boxed();
+ /// ```
+ #[cfg(feature = "use_std")]
+ #[doc(hidden)]
+ #[deprecated(note = "removed without replacement, recommended to use a \
+ local extension trait or function if needed, more \
+ details in https://github.com/rust-lang-nursery/futures-rs/issues/228")]
+ #[allow(deprecated)]
+ fn boxed(self) -> BoxStream<Self::Item, Self::Error>
+ where Self: Sized + Send + 'static,
+ {
+ ::std::boxed::Box::new(self)
+ }
+
+ /// Converts this stream into a `Future`.
+ ///
+ /// A stream can be viewed as a future which will resolve to a pair containing
+ /// the next element of the stream plus the remaining stream. If the stream
+ /// terminates, then the next element is `None` and the remaining stream is
+ /// still passed back, to allow reclamation of its resources.
+ ///
+ /// The returned future can be used to compose streams and futures together by
+ /// placing everything into the "world of futures".
+ fn into_future(self) -> StreamFuture<Self>
+ where Self: Sized
+ {
+ future::new(self)
+ }
+
+ /// Converts a stream of type `T` to a stream of type `U`.
+ ///
+ /// The provided closure is executed over all elements of this stream as
+ /// they are made available, and the callback will be executed inline with
+ /// calls to `poll`.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it, similar to the existing `map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ /// let rx = rx.map(|x| x + 3);
+ /// ```
+ fn map<U, F>(self, f: F) -> Map<Self, F>
+ where F: FnMut(Self::Item) -> U,
+ Self: Sized
+ {
+ map::new(self, f)
+ }
+
+ /// Converts a stream of error type `T` to a stream of error type `U`.
+ ///
+ /// The provided closure is executed over all errors of this stream as
+ /// they are made available, and the callback will be executed inline with
+ /// calls to `poll`.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it, similar to the existing `map_err` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ /// let rx = rx.map_err(|()| 3);
+ /// ```
+ fn map_err<U, F>(self, f: F) -> MapErr<Self, F>
+ where F: FnMut(Self::Error) -> U,
+ Self: Sized
+ {
+ map_err::new(self, f)
+ }
+
+ /// Filters the values produced by this stream according to the provided
+ /// predicate.
+ ///
+ /// As values of this stream are made available, the provided predicate will
+ /// be run against them. If the predicate returns `true` then the stream
+ /// will yield the value, but if the predicate returns `false` then the
+ /// value will be discarded and the next value will be produced.
+ ///
+ /// All errors are passed through without filtering in this combinator.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it, similar to the existing `filter` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ /// let evens = rx.filter(|x| x % 2 == 0);
+ /// ```
+ fn filter<F>(self, f: F) -> Filter<Self, F>
+ where F: FnMut(&Self::Item) -> bool,
+ Self: Sized
+ {
+ filter::new(self, f)
+ }
+
+ /// Filters the values produced by this stream while simultaneously mapping
+ /// them to a different type.
+ ///
+ /// As values of this stream are made available, the provided function will
+ /// be run on them. If the predicate returns `Some(e)` then the stream will
+ /// yield the value `e`, but if the predicate returns `None` then the next
+ /// value will be produced.
+ ///
+ /// All errors are passed through without filtering in this combinator.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it, similar to the existing `filter_map` methods in the
+ /// standard library.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ /// let evens_plus_one = rx.filter_map(|x| {
+ /// if x % 0 == 2 {
+ /// Some(x + 1)
+ /// } else {
+ /// None
+ /// }
+ /// });
+ /// ```
+ fn filter_map<F, B>(self, f: F) -> FilterMap<Self, F>
+ where F: FnMut(Self::Item) -> Option<B>,
+ Self: Sized
+ {
+ filter_map::new(self, f)
+ }
+
+ /// Chain on a computation for when a value is ready, passing the resulting
+ /// item to the provided closure `f`.
+ ///
+ /// This function can be used to ensure a computation runs regardless of
+ /// the next value on the stream. The closure provided will be yielded a
+ /// `Result` once a value is ready, and the returned future will then be run
+ /// to completion to produce the next value on this stream.
+ ///
+ /// The returned value of the closure must implement the `IntoFuture` trait
+ /// and can represent some more work to be done before the composed stream
+ /// is finished. Note that the `Result` type implements the `IntoFuture`
+ /// trait so it is possible to simply alter the `Result` yielded to the
+ /// closure and return it.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ ///
+ /// let rx = rx.then(|result| {
+ /// match result {
+ /// Ok(e) => Ok(e + 3),
+ /// Err(()) => Err(4),
+ /// }
+ /// });
+ /// ```
+ fn then<F, U>(self, f: F) -> Then<Self, F, U>
+ where F: FnMut(Result<Self::Item, Self::Error>) -> U,
+ U: IntoFuture,
+ Self: Sized
+ {
+ then::new(self, f)
+ }
+
+ /// Chain on a computation for when a value is ready, passing the successful
+ /// results to the provided closure `f`.
+ ///
+ /// This function can be used to run a unit of work when the next successful
+ /// value on a stream is ready. The closure provided will be yielded a value
+ /// when ready, and the returned future will then be run to completion to
+ /// produce the next value on this stream.
+ ///
+ /// Any errors produced by this stream will not be passed to the closure,
+ /// and will be passed through.
+ ///
+ /// The returned value of the closure must implement the `IntoFuture` trait
+ /// and can represent some more work to be done before the composed stream
+ /// is finished. Note that the `Result` type implements the `IntoFuture`
+ /// trait so it is possible to simply alter the `Result` yielded to the
+ /// closure and return it.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ ///
+ /// To process the entire stream and return a single future representing
+ /// success or error, use `for_each` instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (_tx, rx) = mpsc::channel::<i32>(1);
+ ///
+ /// let rx = rx.and_then(|result| {
+ /// if result % 2 == 0 {
+ /// Ok(result)
+ /// } else {
+ /// Err(())
+ /// }
+ /// });
+ /// ```
+ fn and_then<F, U>(self, f: F) -> AndThen<Self, F, U>
+ where F: FnMut(Self::Item) -> U,
+ U: IntoFuture<Error = Self::Error>,
+ Self: Sized
+ {
+ and_then::new(self, f)
+ }
+
+ /// Chain on a computation for when an error happens, passing the
+ /// erroneous result to the provided closure `f`.
+ ///
+ /// This function can be used to run a unit of work and attempt to recover from
+ /// an error if one happens. The closure provided will be yielded an error
+ /// when one appears, and the returned future will then be run to completion
+ /// to produce the next value on this stream.
+ ///
+ /// Any successful values produced by this stream will not be passed to the
+ /// closure, and will be passed through.
+ ///
+ /// The returned value of the closure must implement the `IntoFuture` trait
+ /// and can represent some more work to be done before the composed stream
+ /// is finished. Note that the `Result` type implements the `IntoFuture`
+ /// trait so it is possible to simply alter the `Result` yielded to the
+ /// closure and return it.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ fn or_else<F, U>(self, f: F) -> OrElse<Self, F, U>
+ where F: FnMut(Self::Error) -> U,
+ U: IntoFuture<Item = Self::Item>,
+ Self: Sized
+ {
+ or_else::new(self, f)
+ }
+
+ /// Collect all of the values of this stream into a vector, returning a
+ /// future representing the result of that computation.
+ ///
+ /// This combinator will collect all successful results of this stream and
+ /// collect them into a `Vec<Self::Item>`. If an error happens then all
+ /// collected elements will be dropped and the error will be returned.
+ ///
+ /// The returned future will be resolved whenever an error happens or when
+ /// the stream returns `Ok(None)`.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (mut tx, rx) = mpsc::channel(1);
+ ///
+ /// thread::spawn(|| {
+ /// for i in (0..5).rev() {
+ /// tx = tx.send(i + 1).wait().unwrap();
+ /// }
+ /// });
+ ///
+ /// let mut result = rx.collect();
+ /// assert_eq!(result.wait(), Ok(vec![5, 4, 3, 2, 1]));
+ /// ```
+ #[cfg(feature = "use_std")]
+ fn collect(self) -> Collect<Self>
+ where Self: Sized
+ {
+ collect::new(self)
+ }
+
+ /// Concatenate all results of a stream into a single extendable
+ /// destination, returning a future representing the end result.
+ ///
+ /// This combinator will extend the first item with the contents
+ /// of all the successful results of the stream. If the stream is
+ /// empty, the default value will be returned. If an error occurs,
+ /// all the results will be dropped and the error will be returned.
+ ///
+ /// The name `concat2` is an intermediate measure until the release of
+ /// futures 0.2, at which point it will be renamed back to `concat`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (mut tx, rx) = mpsc::channel(1);
+ ///
+ /// thread::spawn(move || {
+ /// for i in (0..3).rev() {
+ /// let n = i * 3;
+ /// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
+ /// }
+ /// });
+ /// let result = rx.concat2();
+ /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+ /// ```
+ fn concat2(self) -> Concat2<Self>
+ where Self: Sized,
+ Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
+ {
+ concat::new2(self)
+ }
+
+ /// Concatenate all results of a stream into a single extendable
+ /// destination, returning a future representing the end result.
+ ///
+ /// This combinator will extend the first item with the contents
+ /// of all the successful results of the stream. If an error occurs,
+ /// all the results will be dropped and the error will be returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (mut tx, rx) = mpsc::channel(1);
+ ///
+ /// thread::spawn(move || {
+ /// for i in (0..3).rev() {
+ /// let n = i * 3;
+ /// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
+ /// }
+ /// });
+ /// let result = rx.concat();
+ /// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// It's important to note that this function will panic if the stream
+ /// is empty, which is the reason for its deprecation.
+ #[deprecated(since="0.1.14", note="please use `Stream::concat2` instead")]
+ #[allow(deprecated)]
+ fn concat(self) -> Concat<Self>
+ where Self: Sized,
+ Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator,
+ {
+ concat::new(self)
+ }
+
+ /// Execute an accumulating computation over a stream, collecting all the
+ /// values into one final result.
+ ///
+ /// This combinator will collect all successful results of this stream
+ /// according to the closure provided. The initial state is also provided to
+ /// this method and then is returned again by each execution of the closure.
+ /// Once the entire stream has been exhausted the returned future will
+ /// resolve to this value.
+ ///
+ /// If an error happens then collected state will be dropped and the error
+ /// will be returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::stream;
+ /// use futures::future;
+ ///
+ /// let number_stream = stream::iter_ok::<_, ()>(0..6);
+ /// let sum = number_stream.fold(0, |acc, x| future::ok(acc + x));
+ /// assert_eq!(sum.wait(), Ok(15));
+ /// ```
+ fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
+ where F: FnMut(T, Self::Item) -> Fut,
+ Fut: IntoFuture<Item = T>,
+ Self::Error: From<Fut::Error>,
+ Self: Sized
+ {
+ fold::new(self, f, init)
+ }
+
+ /// Flattens a stream of streams into just one continuous stream.
+ ///
+ /// If this stream's elements are themselves streams then this combinator
+ /// will flatten out the entire stream to one long chain of elements. Any
+ /// errors are passed through without looking at them, but otherwise each
+ /// individual stream will get exhausted before moving on to the next.
+ ///
+ /// ```
+ /// use std::thread;
+ ///
+ /// use futures::prelude::*;
+ /// use futures::sync::mpsc;
+ ///
+ /// let (tx1, rx1) = mpsc::channel::<i32>(1);
+ /// let (tx2, rx2) = mpsc::channel::<i32>(1);
+ /// let (tx3, rx3) = mpsc::channel(1);
+ ///
+ /// thread::spawn(|| {
+ /// tx1.send(1).wait().unwrap()
+ /// .send(2).wait().unwrap();
+ /// });
+ /// thread::spawn(|| {
+ /// tx2.send(3).wait().unwrap()
+ /// .send(4).wait().unwrap();
+ /// });
+ /// thread::spawn(|| {
+ /// tx3.send(rx1).wait().unwrap()
+ /// .send(rx2).wait().unwrap();
+ /// });
+ ///
+ /// let mut result = rx3.flatten().collect();
+ /// assert_eq!(result.wait(), Ok(vec![1, 2, 3, 4]));
+ /// ```
+ fn flatten(self) -> Flatten<Self>
+ where Self::Item: Stream,
+ <Self::Item as Stream>::Error: From<Self::Error>,
+ Self: Sized
+ {
+ flatten::new(self)
+ }
+
+ /// Skip elements on this stream while the predicate provided resolves to
+ /// `true`.
+ ///
+ /// This function, like `Iterator::skip_while`, will skip elements on the
+ /// stream until the `predicate` resolves to `false`. Once one element
+ /// returns false all future elements will be returned from the underlying
+ /// stream.
+ fn skip_while<P, R>(self, pred: P) -> SkipWhile<Self, P, R>
+ where P: FnMut(&Self::Item) -> R,
+ R: IntoFuture<Item=bool, Error=Self::Error>,
+ Self: Sized
+ {
+ skip_while::new(self, pred)
+ }
+
+ /// Take elements from this stream while the predicate provided resolves to
+ /// `true`.
+ ///
+ /// This function, like `Iterator::take_while`, will take elements from the
+ /// stream until the `predicate` resolves to `false`. Once one element
+ /// returns false it will always return that the stream is done.
+ fn take_while<P, R>(self, pred: P) -> TakeWhile<Self, P, R>
+ where P: FnMut(&Self::Item) -> R,
+ R: IntoFuture<Item=bool, Error=Self::Error>,
+ Self: Sized
+ {
+ take_while::new(self, pred)
+ }
+
+ /// Runs this stream to completion, executing the provided closure for each
+ /// element on the stream.
+ ///
+ /// The closure provided will be called for each item this stream resolves
+ /// to successfully, producing a future. That future will then be executed
+ /// to completion before moving on to the next item.
+ ///
+ /// The returned value is a `Future` where the `Item` type is `()` and
+ /// errors are otherwise threaded through. Any error on the stream or in the
+ /// closure will cause iteration to be halted immediately and the future
+ /// will resolve to that error.
+ ///
+ /// To process each item in the stream and produce another stream instead
+ /// of a single future, use `and_then` instead.
+ fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U>
+ where F: FnMut(Self::Item) -> U,
+ U: IntoFuture<Item=(), Error = Self::Error>,
+ Self: Sized
+ {
+ for_each::new(self, f)
+ }
+
+ /// Map this stream's error to any error implementing `From` for
+ /// this stream's `Error`, returning a new stream.
+ ///
+ /// This function does for streams what `try!` does for `Result`,
+ /// by letting the compiler infer the type of the resulting error.
+ /// Just as `map_err` above, this is useful for example to ensure
+ /// that streams have the same error type when used with
+ /// combinators.
+ ///
+ /// Note that this function consumes the receiving stream and returns a
+ /// wrapped version of it.
+ fn from_err<E: From<Self::Error>>(self) -> FromErr<Self, E>
+ where Self: Sized,
+ {
+ from_err::new(self)
+ }
+
+ /// Creates a new stream of at most `amt` items of the underlying stream.
+ ///
+ /// Once `amt` items have been yielded from this stream then it will always
+ /// return that the stream is done.
+ ///
+ /// # Errors
+ ///
+ /// Any errors yielded from underlying stream, before the desired amount of
+ /// items is reached, are passed through and do not affect the total number
+ /// of items taken.
+ fn take(self, amt: u64) -> Take<Self>
+ where Self: Sized
+ {
+ take::new(self, amt)
+ }
+
+ /// Creates a new stream which skips `amt` items of the underlying stream.
+ ///
+ /// Once `amt` items have been skipped from this stream then it will always
+ /// return the remaining items on this stream.
+ ///
+ /// # Errors
+ ///
+ /// All errors yielded from underlying stream are passed through and do not
+ /// affect the total number of items skipped.
+ fn skip(self, amt: u64) -> Skip<Self>
+ where Self: Sized
+ {
+ skip::new(self, amt)
+ }
+
+ /// Fuse a stream such that `poll` will never again be called once it has
+ /// finished.
+ ///
+ /// Currently once a stream has returned `None` from `poll` any further
+ /// calls could exhibit bad behavior such as block forever, panic, never
+ /// return, etc. If it is known that `poll` may be called after stream has
+ /// already finished, then this method can be used to ensure that it has
+ /// defined semantics.
+ ///
+ /// Once a stream has been `fuse`d and it finishes, then it will forever
+ /// return `None` from `poll`. This, unlike for the traits `poll` method,
+ /// is guaranteed.
+ ///
+ /// Also note that as soon as this stream returns `None` it will be dropped
+ /// to reclaim resources associated with it.
+ fn fuse(self) -> Fuse<Self>
+ where Self: Sized
+ {
+ fuse::new(self)
+ }
+
+ /// Borrows a stream, rather than consuming it.
+ ///
+ /// This is useful to allow applying stream adaptors while still retaining
+ /// ownership of the original stream.
+ ///
+ /// ```
+ /// use futures::prelude::*;
+ /// use futures::stream;
+ /// use futures::future;
+ ///
+ /// let mut stream = stream::iter_ok::<_, ()>(1..5);
+ ///
+ /// let sum = stream.by_ref().take(2).fold(0, |a, b| future::ok(a + b)).wait();
+ /// assert_eq!(sum, Ok(3));
+ ///
+ /// // You can use the stream again
+ /// let sum = stream.take(2).fold(0, |a, b| future::ok(a + b)).wait();
+ /// assert_eq!(sum, Ok(7));
+ /// ```
+ fn by_ref(&mut self) -> &mut Self
+ where Self: Sized
+ {
+ self
+ }
+
+ /// Catches unwinding panics while polling the stream.
+ ///
+ /// Caught panic (if any) will be the last element of the resulting stream.
+ ///
+ /// In general, panics within a stream can propagate all the way out to the
+ /// task level. This combinator makes it possible to halt unwinding within
+ /// the stream itself. It's most commonly used within task executors. This
+ /// method should not be used for error handling.
+ ///
+ /// Note that this method requires the `UnwindSafe` bound from the standard
+ /// library. This isn't always applied automatically, and the standard
+ /// library provides an `AssertUnwindSafe` wrapper type to apply it
+ /// after-the fact. To assist using this method, the `Stream` trait is also
+ /// implemented for `AssertUnwindSafe<S>` where `S` implements `Stream`.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use futures::prelude::*;
+ /// use futures::stream;
+ ///
+ /// let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]);
+ /// // panic on second element
+ /// let stream_panicking = stream.map(|o| o.unwrap());
+ /// let mut iter = stream_panicking.catch_unwind().wait();
+ ///
+ /// assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
+ /// assert!(iter.next().unwrap().is_err());
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[cfg(feature = "use_std")]
+ fn catch_unwind(self) -> CatchUnwind<Self>
+ where Self: Sized + std::panic::UnwindSafe
+ {
+ catch_unwind::new(self)
+ }
+
+ /// An adaptor for creating a buffered list of pending futures.
+ ///
+ /// If this stream's item can be converted into a future, then this adaptor
+ /// will buffer up to at most `amt` futures and then return results in the
+ /// same order as the underlying stream. No more than `amt` futures will be
+ /// buffered at any point in time, and less than `amt` may also be buffered
+ /// depending on the state of each future.
+ ///
+ /// The returned stream will be a stream of each future's result, with
+ /// errors passed through whenever they occur.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "use_std")]
+ fn buffered(self, amt: usize) -> Buffered<Self>
+ where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
+ Self: Sized
+ {
+ buffered::new(self, amt)
+ }
+
+ /// An adaptor for creating a buffered list of pending futures (unordered).
+ ///
+ /// If this stream's item can be converted into a future, then this adaptor
+ /// will buffer up to `amt` futures and then return results in the order
+ /// in which they complete. No more than `amt` futures will be buffered at
+ /// any point in time, and less than `amt` may also be buffered depending on
+ /// the state of each future.
+ ///
+ /// The returned stream will be a stream of each future's result, with
+ /// errors passed through whenever they occur.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "use_std")]
+ fn buffer_unordered(self, amt: usize) -> BufferUnordered<Self>
+ where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
+ Self: Sized
+ {
+ buffer_unordered::new(self, amt)
+ }
+
+ /// An adapter for merging the output of two streams.
+ ///
+ /// The merged stream produces items from one or both of the underlying
+ /// streams as they become available. Errors, however, are not merged: you
+ /// get at most one error at a time.
+ #[deprecated(note = "functionality provided by `select` now")]
+ #[allow(deprecated)]
+ fn merge<S>(self, other: S) -> Merge<Self, S>
+ where S: Stream<Error = Self::Error>,
+ Self: Sized,
+ {
+ merge::new(self, other)
+ }
+
+ /// An adapter for zipping two streams together.
+ ///
+ /// The zipped stream waits for both streams to produce an item, and then
+ /// returns that pair. If an error happens, then that error will be returned
+ /// immediately. If either stream ends then the zipped stream will also end.
+ fn zip<S>(self, other: S) -> Zip<Self, S>
+ where S: Stream<Error = Self::Error>,
+ Self: Sized,
+ {
+ zip::new(self, other)
+ }
+
+ /// Adapter for chaining two stream.
+ ///
+ /// The resulting stream emits elements from the first stream, and when
+ /// first stream reaches the end, emits the elements from the second stream.
+ ///
+ /// ```rust
+ /// use futures::prelude::*;
+ /// use futures::stream;
+ ///
+ /// let stream1 = stream::iter_result(vec![Ok(10), Err(false)]);
+ /// let stream2 = stream::iter_result(vec![Err(true), Ok(20)]);
+ /// let mut chain = stream1.chain(stream2).wait();
+ ///
+ /// assert_eq!(Some(Ok(10)), chain.next());
+ /// assert_eq!(Some(Err(false)), chain.next());
+ /// assert_eq!(Some(Err(true)), chain.next());
+ /// assert_eq!(Some(Ok(20)), chain.next());
+ /// assert_eq!(None, chain.next());
+ /// ```
+ fn chain<S>(self, other: S) -> Chain<Self, S>
+ where S: Stream<Item = Self::Item, Error = Self::Error>,
+ Self: Sized
+ {
+ chain::new(self, other)
+ }
+
+ /// Creates a new stream which exposes a `peek` method.
+ ///
+ /// Calling `peek` returns a reference to the next item in the stream.
+ fn peekable(self) -> Peekable<Self>
+ where Self: Sized
+ {
+ peek::new(self)
+ }
+
+ /// An adaptor for chunking up items of the stream inside a vector.
+ ///
+ /// This combinator will attempt to pull items from this stream and buffer
+ /// them into a local vector. At most `capacity` items will get buffered
+ /// before they're yielded from the returned stream.
+ ///
+ /// Note that the vectors returned from this iterator may not always have
+ /// `capacity` elements. If the underlying stream ended and only a partial
+ /// vector was created, it'll be returned. Additionally if an error happens
+ /// from the underlying stream then the currently buffered items will be
+ /// yielded.
+ ///
+ /// Errors are passed through the stream unbuffered.
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic of `capacity` is zero.
+ #[cfg(feature = "use_std")]
+ fn chunks(self, capacity: usize) -> Chunks<Self>
+ where Self: Sized
+ {
+ chunks::new(self, capacity)
+ }
+
+ /// Creates a stream that selects the next element from either this stream
+ /// or the provided one, whichever is ready first.
+ ///
+ /// This combinator will attempt to pull items from both streams. Each
+ /// stream will be polled in a round-robin fashion, and whenever a stream is
+ /// ready to yield an item that item is yielded.
+ ///
+ /// The `select` function is similar to `merge` except that it requires both
+ /// streams to have the same item and error types.
+ ///
+ /// Error are passed through from either stream.
+ fn select<S>(self, other: S) -> Select<Self, S>
+ where S: Stream<Item = Self::Item, Error = Self::Error>,
+ Self: Sized,
+ {
+ select::new(self, other)
+ }
+
+ /// A future that completes after the given stream has been fully processed
+ /// into the sink, including flushing.
+ ///
+ /// This future will drive the stream to keep producing items until it is
+ /// exhausted, sending each item to the sink. It will complete once both the
+ /// stream is exhausted, and the sink has fully processed received item,
+ /// flushed successfully, and closed successfully.
+ ///
+ /// Doing `stream.forward(sink)` is roughly equivalent to
+ /// `sink.send_all(stream)`. The returned future will exhaust all items from
+ /// `self`, sending them all to `sink`. Furthermore the `sink` will be
+ /// closed and flushed.
+ ///
+ /// On completion, the pair `(stream, sink)` is returned.
+ fn forward<S>(self, sink: S) -> Forward<Self, S>
+ where S: Sink<SinkItem = Self::Item>,
+ Self::Error: From<S::SinkError>,
+ Self: Sized
+ {
+ forward::new(self, sink)
+ }
+
+ /// Splits this `Stream + Sink` object into separate `Stream` and `Sink`
+ /// objects.
+ ///
+ /// This can be useful when you want to split ownership between tasks, or
+ /// allow direct interaction between the two objects (e.g. via
+ /// `Sink::send_all`).
+ ///
+ /// This method is only available when the `use_std` feature of this
+ /// library is activated, and it is activated by default.
+ #[cfg(feature = "use_std")]
+ fn split(self) -> (SplitSink<Self>, SplitStream<Self>)
+ where Self: super::sink::Sink + Sized
+ {
+ split::split(self)
+ }
+
+ /// Do something with each item of this stream, afterwards passing it on.
+ ///
+ /// This is similar to the `Iterator::inspect` method in the standard
+ /// library where it allows easily inspecting each value as it passes
+ /// through the stream, for example to debug what's going on.
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where F: FnMut(&Self::Item),
+ Self: Sized,
+ {
+ inspect::new(self, f)
+ }
+
+ /// Do something with the error of this stream, afterwards passing it on.
+ ///
+ /// This is similar to the `Stream::inspect` method where it allows
+ /// easily inspecting the error as it passes through the stream, for
+ /// example to debug what's going on.
+ fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
+ where F: FnMut(&Self::Error),
+ Self: Sized,
+ {
+ inspect_err::new(self, f)
+ }
+}
+
+impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ (**self).poll()
+ }
+}
+
+/// Converts a list of futures into a `Stream` of results from the futures.
+///
+/// This function will take an list of futures (e.g. a vector, an iterator,
+/// etc), and return a stream. The stream will yield items as they become
+/// available on the futures internally, in the order that they become
+/// available. This function is similar to `buffer_unordered` in that it may
+/// return items in a different order than in the list specified.
+///
+/// Note that the returned set can also be used to dynamically push more
+/// futures into the set as they become available.
+#[cfg(feature = "use_std")]
+pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
+ where I: IntoIterator,
+ I::Item: IntoFuture
+{
+ let mut set = FuturesUnordered::new();
+
+ for future in futures {
+ set.push(future.into_future());
+ }
+
+ return set
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/once.rs b/third_party/rust/futures-0.1.31/src/stream/once.rs
new file mode 100644
index 0000000000..24fb327bd6
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/once.rs
@@ -0,0 +1,35 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream which emits single element and then EOF.
+///
+/// This stream will never block and is always ready.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Once<T, E>(Option<Result<T, E>>);
+
+/// Creates a stream of single element
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::once::<(), _>(Err(17));
+/// assert_eq!(Err(17), stream.poll());
+/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
+/// ```
+pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
+ Once(Some(item))
+}
+
+impl<T, E> Stream for Once<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ match self.0.take() {
+ Some(Ok(e)) => Ok(Async::Ready(Some(e))),
+ Some(Err(e)) => Err(e),
+ None => Ok(Async::Ready(None)),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/or_else.rs b/third_party/rust/futures-0.1.31/src/stream/or_else.rs
new file mode 100644
index 0000000000..2d15fa2b70
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/or_else.rs
@@ -0,0 +1,80 @@
+use {IntoFuture, Future, Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto errors produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::or_else` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct OrElse<S, F, U>
+ where U: IntoFuture,
+{
+ stream: S,
+ future: Option<U::Future>,
+ f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> OrElse<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Error) -> U,
+ U: IntoFuture<Item=S::Item>,
+{
+ OrElse {
+ stream: s,
+ future: None,
+ f: f,
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U> ::sink::Sink for OrElse<S, F, U>
+ where S: ::sink::Sink, U: IntoFuture
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, U> Stream for OrElse<S, F, U>
+ where S: Stream,
+ F: FnMut(S::Error) -> U,
+ U: IntoFuture<Item=S::Item>,
+{
+ type Item = S::Item;
+ type Error = U::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, U::Error> {
+ if self.future.is_none() {
+ let item = match self.stream.poll() {
+ Ok(Async::Ready(e)) => return Ok(Async::Ready(e)),
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Err(e) => e,
+ };
+ self.future = Some((self.f)(item).into_future());
+ }
+ assert!(self.future.is_some());
+ match self.future.as_mut().unwrap().poll() {
+ Ok(Async::Ready(e)) => {
+ self.future = None;
+ Ok(Async::Ready(Some(e)))
+ }
+ Err(e) => {
+ self.future = None;
+ Err(e)
+ }
+ Ok(Async::NotReady) => Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/peek.rs b/third_party/rust/futures-0.1.31/src/stream/peek.rs
new file mode 100644
index 0000000000..96e657663b
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/peek.rs
@@ -0,0 +1,74 @@
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// A `Stream` that implements a `peek` method.
+///
+/// The `peek` method can be used to retrieve a reference
+/// to the next `Stream::Item` if available. A subsequent
+/// call to `poll` will return the owned item.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Peekable<S: Stream> {
+ stream: Fuse<S>,
+ peeked: Option<S::Item>,
+}
+
+
+pub fn new<S: Stream>(stream: S) -> Peekable<S> {
+ Peekable {
+ stream: stream.fuse(),
+ peeked: None
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Peekable<S>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S: Stream> Stream for Peekable<S> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ if let Some(item) = self.peeked.take() {
+ return Ok(Async::Ready(Some(item)))
+ }
+ self.stream.poll()
+ }
+}
+
+
+impl<S: Stream> Peekable<S> {
+ /// Peek retrieves a reference to the next item in the stream.
+ ///
+ /// This method polls the underlying stream and return either a reference
+ /// to the next item if the stream is ready or passes through any errors.
+ pub fn peek(&mut self) -> Poll<Option<&S::Item>, S::Error> {
+ if self.peeked.is_some() {
+ return Ok(Async::Ready(self.peeked.as_ref()))
+ }
+ match try_ready!(self.poll()) {
+ None => Ok(Async::Ready(None)),
+ Some(item) => {
+ self.peeked = Some(item);
+ Ok(Async::Ready(self.peeked.as_ref()))
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs b/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs
new file mode 100644
index 0000000000..fbc7df0844
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/poll_fn.rs
@@ -0,0 +1,49 @@
+//! Definition of the `PollFn` combinator
+
+use {Stream, Poll};
+
+/// A stream which adapts a function returning `Poll`.
+///
+/// Created by the `poll_fn` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct PollFn<F> {
+ inner: F,
+}
+
+/// Creates a new stream wrapping around a function returning `Poll`.
+///
+/// Polling the returned stream delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// use futures::stream::poll_fn;
+/// use futures::{Async, Poll};
+///
+/// let mut counter = 1usize;
+///
+/// let read_stream = poll_fn(move || -> Poll<Option<String>, std::io::Error> {
+/// if counter == 0 { return Ok(Async::Ready(None)); }
+/// counter -= 1;
+/// Ok(Async::Ready(Some("Hello, World!".to_owned())))
+/// });
+/// ```
+pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
+where
+ F: FnMut() -> Poll<Option<T>, E>,
+{
+ PollFn { inner: f }
+}
+
+impl<T, E, F> Stream for PollFn<F>
+where
+ F: FnMut() -> Poll<Option<T>, E>,
+{
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<T>, E> {
+ (self.inner)()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/repeat.rs b/third_party/rust/futures-0.1.31/src/stream/repeat.rs
new file mode 100644
index 0000000000..e3cb5ff49c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/repeat.rs
@@ -0,0 +1,53 @@
+use core::marker;
+
+
+use stream::Stream;
+
+use {Async, Poll};
+
+
+/// Stream that produces the same element repeatedly.
+///
+/// This structure is created by the `stream::repeat` function.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Repeat<T, E>
+ where T: Clone
+{
+ item: T,
+ error: marker::PhantomData<E>,
+}
+
+/// Create a stream which produces the same item repeatedly.
+///
+/// Stream never produces an error or EOF. Note that you likely want to avoid
+/// usage of `collect` or such on the returned stream as it will exhaust
+/// available memory as it tries to just fill up all RAM.
+///
+/// ```rust
+/// use futures::*;
+///
+/// let mut stream = stream::repeat::<_, bool>(10);
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
+/// ```
+pub fn repeat<T, E>(item: T) -> Repeat<T, E>
+ where T: Clone
+{
+ Repeat {
+ item: item,
+ error: marker::PhantomData,
+ }
+}
+
+impl<T, E> Stream for Repeat<T, E>
+ where T: Clone
+{
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ Ok(Async::Ready(Some(self.item.clone())))
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/select.rs b/third_party/rust/futures-0.1.31/src/stream/select.rs
new file mode 100644
index 0000000000..ae6b66cf14
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/select.rs
@@ -0,0 +1,64 @@
+use {Poll, Async};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from either of the underlying streams as
+/// they become available, and the streams are polled in a round-robin fashion.
+/// Errors, however, are not merged: you get at most one error at a time.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Select<S1, S2> {
+ stream1: Fuse<S1>,
+ stream2: Fuse<S2>,
+ flag: bool,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Select<S1, S2>
+ where S1: Stream,
+ S2: Stream<Item = S1::Item, Error = S1::Error>
+{
+ Select {
+ stream1: stream1.fuse(),
+ stream2: stream2.fuse(),
+ flag: false,
+ }
+}
+
+impl<S1, S2> Stream for Select<S1, S2>
+ where S1: Stream,
+ S2: Stream<Item = S1::Item, Error = S1::Error>
+{
+ type Item = S1::Item;
+ type Error = S1::Error;
+
+ fn poll(&mut self) -> Poll<Option<S1::Item>, S1::Error> {
+ let (a, b) = if self.flag {
+ (&mut self.stream2 as &mut Stream<Item=_, Error=_>,
+ &mut self.stream1 as &mut Stream<Item=_, Error=_>)
+ } else {
+ (&mut self.stream1 as &mut Stream<Item=_, Error=_>,
+ &mut self.stream2 as &mut Stream<Item=_, Error=_>)
+ };
+ self.flag = !self.flag;
+
+ let a_done = match a.poll()? {
+ Async::Ready(Some(item)) => return Ok(Some(item).into()),
+ Async::Ready(None) => true,
+ Async::NotReady => false,
+ };
+
+ match b.poll()? {
+ Async::Ready(Some(item)) => {
+ // If the other stream isn't finished yet, give them a chance to
+ // go first next time as we pulled something off `b`.
+ if !a_done {
+ self.flag = !self.flag;
+ }
+ Ok(Some(item).into())
+ }
+ Async::Ready(None) if a_done => Ok(None.into()),
+ Async::Ready(None) | Async::NotReady => Ok(Async::NotReady),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/skip.rs b/third_party/rust/futures-0.1.31/src/stream/skip.rs
new file mode 100644
index 0000000000..a1d7b49797
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/skip.rs
@@ -0,0 +1,84 @@
+use {Poll, Async};
+use stream::Stream;
+
+/// A stream combinator which skips a number of elements before continuing.
+///
+/// This structure is produced by the `Stream::skip` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Skip<S> {
+ stream: S,
+ remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Skip<S>
+ where S: Stream,
+{
+ Skip {
+ stream: s,
+ remaining: amt,
+ }
+}
+
+impl<S> Skip<S> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Skip<S>
+ where S: ::sink::Sink
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S> Stream for Skip<S>
+ where S: Stream,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ while self.remaining > 0 {
+ match try_ready!(self.stream.poll()) {
+ Some(_) => self.remaining -= 1,
+ None => return Ok(Async::Ready(None)),
+ }
+ }
+
+ self.stream.poll()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/skip_while.rs b/third_party/rust/futures-0.1.31/src/stream/skip_while.rs
new file mode 100644
index 0000000000..b571996c24
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/skip_while.rs
@@ -0,0 +1,113 @@
+use {Async, Poll, IntoFuture, Future};
+use stream::Stream;
+
+/// A stream combinator which skips elements of a stream while a predicate
+/// holds.
+///
+/// This structure is produced by the `Stream::skip_while` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+ stream: S,
+ pred: P,
+ pending: Option<(R::Future, S::Item)>,
+ done_skipping: bool,
+}
+
+pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R>
+ where S: Stream,
+ P: FnMut(&S::Item) -> R,
+ R: IntoFuture<Item=bool, Error=S::Error>,
+{
+ SkipWhile {
+ stream: s,
+ pred: p,
+ pending: None,
+ done_skipping: false,
+ }
+}
+
+impl<S, P, R> SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R>
+ where S: ::sink::Sink + Stream, R: IntoFuture
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, P, R> Stream for SkipWhile<S, P, R>
+ where S: Stream,
+ P: FnMut(&S::Item) -> R,
+ R: IntoFuture<Item=bool, Error=S::Error>,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ if self.done_skipping {
+ return self.stream.poll();
+ }
+
+ loop {
+ if self.pending.is_none() {
+ let item = match try_ready!(self.stream.poll()) {
+ Some(e) => e,
+ None => return Ok(Async::Ready(None)),
+ };
+ self.pending = Some(((self.pred)(&item).into_future(), item));
+ }
+
+ assert!(self.pending.is_some());
+ match self.pending.as_mut().unwrap().0.poll() {
+ Ok(Async::Ready(true)) => self.pending = None,
+ Ok(Async::Ready(false)) => {
+ let (_, item) = self.pending.take().unwrap();
+ self.done_skipping = true;
+ return Ok(Async::Ready(Some(item)))
+ }
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Err(e) => {
+ self.pending = None;
+ return Err(e)
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/split.rs b/third_party/rust/futures-0.1.31/src/stream/split.rs
new file mode 100644
index 0000000000..ddaa52997d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/split.rs
@@ -0,0 +1,105 @@
+use std::any::Any;
+use std::error::Error;
+use std::fmt;
+
+use {StartSend, Sink, Stream, Poll, Async, AsyncSink};
+use sync::BiLock;
+
+/// A `Stream` part of the split pair
+#[derive(Debug)]
+pub struct SplitStream<S>(BiLock<S>);
+
+impl<S> SplitStream<S> {
+ /// Attempts to put the two "halves" of a split `Stream + Sink` back
+ /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+ /// a matching pair originating from the same call to `Stream::split`.
+ pub fn reunite(self, other: SplitSink<S>) -> Result<S, ReuniteError<S>> {
+ other.reunite(self)
+ }
+}
+
+impl<S: Stream> Stream for SplitStream<S> {
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ match self.0.poll_lock() {
+ Async::Ready(mut inner) => inner.poll(),
+ Async::NotReady => Ok(Async::NotReady),
+ }
+ }
+}
+
+/// A `Sink` part of the split pair
+#[derive(Debug)]
+pub struct SplitSink<S>(BiLock<S>);
+
+impl<S> SplitSink<S> {
+ /// Attempts to put the two "halves" of a split `Stream + Sink` back
+ /// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
+ /// a matching pair originating from the same call to `Stream::split`.
+ pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S>> {
+ self.0.reunite(other.0).map_err(|err| {
+ ReuniteError(SplitSink(err.0), SplitStream(err.1))
+ })
+ }
+}
+
+impl<S: Sink> Sink for SplitSink<S> {
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem)
+ -> StartSend<S::SinkItem, S::SinkError>
+ {
+ match self.0.poll_lock() {
+ Async::Ready(mut inner) => inner.start_send(item),
+ Async::NotReady => Ok(AsyncSink::NotReady(item)),
+ }
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ match self.0.poll_lock() {
+ Async::Ready(mut inner) => inner.poll_complete(),
+ Async::NotReady => Ok(Async::NotReady),
+ }
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ match self.0.poll_lock() {
+ Async::Ready(mut inner) => inner.close(),
+ Async::NotReady => Ok(Async::NotReady),
+ }
+ }
+}
+
+pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) {
+ let (a, b) = BiLock::new(s);
+ let read = SplitStream(a);
+ let write = SplitSink(b);
+ (write, read)
+}
+
+/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves
+/// of a `Stream + Split`, and thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub SplitSink<T>, pub SplitStream<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("ReuniteError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "tried to reunite a SplitStream and SplitSink that don't form a pair")
+ }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+ fn description(&self) -> &str {
+ "tried to reunite a SplitStream and SplitSink that don't form a pair"
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/take.rs b/third_party/rust/futures-0.1.31/src/stream/take.rs
new file mode 100644
index 0000000000..0ca68496eb
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/take.rs
@@ -0,0 +1,86 @@
+use {Async, Poll};
+use stream::Stream;
+
+/// A stream combinator which returns a maximum number of elements.
+///
+/// This structure is produced by the `Stream::take` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Take<S> {
+ stream: S,
+ remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Take<S>
+ where S: Stream,
+{
+ Take {
+ stream: s,
+ remaining: amt,
+ }
+}
+
+impl<S> Take<S> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S> ::sink::Sink for Take<S>
+ where S: ::sink::Sink + Stream
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S> Stream for Take<S>
+ where S: Stream,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ if self.remaining == 0 {
+ Ok(Async::Ready(None))
+ } else {
+ let next = try_ready!(self.stream.poll());
+ match next {
+ Some(_) => self.remaining -= 1,
+ None => self.remaining = 0,
+ }
+ Ok(Async::Ready(next))
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/take_while.rs b/third_party/rust/futures-0.1.31/src/stream/take_while.rs
new file mode 100644
index 0000000000..732ae855de
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/take_while.rs
@@ -0,0 +1,113 @@
+use {Async, Poll, IntoFuture, Future};
+use stream::Stream;
+
+/// A stream combinator which takes elements from a stream while a predicate
+/// holds.
+///
+/// This structure is produced by the `Stream::take_while` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
+ stream: S,
+ pred: P,
+ pending: Option<(R::Future, S::Item)>,
+ done_taking: bool,
+}
+
+pub fn new<S, P, R>(s: S, p: P) -> TakeWhile<S, P, R>
+ where S: Stream,
+ P: FnMut(&S::Item) -> R,
+ R: IntoFuture<Item=bool, Error=S::Error>,
+{
+ TakeWhile {
+ stream: s,
+ pred: p,
+ pending: None,
+ done_taking: false,
+ }
+}
+
+impl<S, P, R> TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ &self.stream
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ &mut self.stream
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R>
+ where S: ::sink::Sink + Stream, R: IntoFuture
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, P, R> Stream for TakeWhile<S, P, R>
+ where S: Stream,
+ P: FnMut(&S::Item) -> R,
+ R: IntoFuture<Item=bool, Error=S::Error>,
+{
+ type Item = S::Item;
+ type Error = S::Error;
+
+ fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
+ if self.done_taking {
+ return Ok(Async::Ready(None));
+ }
+
+ if self.pending.is_none() {
+ let item = match try_ready!(self.stream.poll()) {
+ Some(e) => e,
+ None => return Ok(Async::Ready(None)),
+ };
+ self.pending = Some(((self.pred)(&item).into_future(), item));
+ }
+
+ assert!(self.pending.is_some());
+ match self.pending.as_mut().unwrap().0.poll() {
+ Ok(Async::Ready(true)) => {
+ let (_, item) = self.pending.take().unwrap();
+ Ok(Async::Ready(Some(item)))
+ },
+ Ok(Async::Ready(false)) => {
+ self.done_taking = true;
+ Ok(Async::Ready(None))
+ }
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(e) => {
+ self.pending = None;
+ Err(e)
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/then.rs b/third_party/rust/futures-0.1.31/src/stream/then.rs
new file mode 100644
index 0000000000..cab338e922
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/then.rs
@@ -0,0 +1,81 @@
+use {Async, IntoFuture, Future, Poll};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto each item produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::then` method.
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Then<S, F, U>
+ where U: IntoFuture,
+{
+ stream: S,
+ future: Option<U::Future>,
+ f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Then<S, F, U>
+ where S: Stream,
+ F: FnMut(Result<S::Item, S::Error>) -> U,
+ U: IntoFuture,
+{
+ Then {
+ stream: s,
+ future: None,
+ f: f,
+ }
+}
+
+// Forwarding impl of Sink from the underlying stream
+impl<S, F, U> ::sink::Sink for Then<S, F, U>
+ where S: ::sink::Sink, U: IntoFuture,
+{
+ type SinkItem = S::SinkItem;
+ type SinkError = S::SinkError;
+
+ fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
+ self.stream.start_send(item)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), S::SinkError> {
+ self.stream.close()
+ }
+}
+
+impl<S, F, U> Stream for Then<S, F, U>
+ where S: Stream,
+ F: FnMut(Result<S::Item, S::Error>) -> U,
+ U: IntoFuture,
+{
+ type Item = U::Item;
+ type Error = U::Error;
+
+ fn poll(&mut self) -> Poll<Option<U::Item>, U::Error> {
+ if self.future.is_none() {
+ let item = match self.stream.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(None)) => return Ok(Async::Ready(None)),
+ Ok(Async::Ready(Some(e))) => Ok(e),
+ Err(e) => Err(e),
+ };
+ self.future = Some((self.f)(item).into_future());
+ }
+ assert!(self.future.is_some());
+ match self.future.as_mut().unwrap().poll() {
+ Ok(Async::Ready(e)) => {
+ self.future = None;
+ Ok(Async::Ready(Some(e)))
+ }
+ Err(e) => {
+ self.future = None;
+ Err(e)
+ }
+ Ok(Async::NotReady) => Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/unfold.rs b/third_party/rust/futures-0.1.31/src/stream/unfold.rs
new file mode 100644
index 0000000000..ac427b8c3b
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/unfold.rs
@@ -0,0 +1,114 @@
+use core::mem;
+
+use {Future, IntoFuture, Async, Poll};
+use stream::Stream;
+
+/// Creates a `Stream` from a seed and a closure returning a `Future`.
+///
+/// This function is the dual for the `Stream::fold()` adapter: while
+/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a
+/// `Stream` from a seed value.
+///
+/// `unfold()` will call the provided closure with the provided seed, then wait
+/// for the returned `Future` to complete with `(a, b)`. It will then yield the
+/// value `a`, and use `b` as the next internal state.
+///
+/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()`
+/// will stop producing items and return `Ok(Async::Ready(None))` in future
+/// calls to `poll()`.
+///
+/// In case of error generated by the returned `Future`, the error will be
+/// returned by the `Stream`. The `Stream` will then yield
+/// `Ok(Async::Ready(None))` in future calls to `poll()`.
+///
+/// This function can typically be used when wanting to go from the "world of
+/// futures" to the "world of streams": the provided closure can build a
+/// `Future` using other library functions working on futures, and `unfold()`
+/// will turn it into a `Stream` by repeating the operation.
+///
+/// # Example
+///
+/// ```rust
+/// use futures::stream::{self, Stream};
+/// use futures::future::{self, Future};
+///
+/// let mut stream = stream::unfold(0, |state| {
+/// if state <= 2 {
+/// let next_state = state + 1;
+/// let yielded = state * 2;
+/// let fut = future::ok::<_, u32>((yielded, next_state));
+/// Some(fut)
+/// } else {
+/// None
+/// }
+/// });
+///
+/// let result = stream.collect().wait();
+/// assert_eq!(result, Ok(vec![0, 2, 4]));
+/// ```
+pub fn unfold<T, F, Fut, It>(init: T, f: F) -> Unfold<T, F, Fut>
+ where F: FnMut(T) -> Option<Fut>,
+ Fut: IntoFuture<Item = (It, T)>,
+{
+ Unfold {
+ f: f,
+ state: State::Ready(init),
+ }
+}
+
+/// A stream which creates futures, polls them and return their result
+///
+/// This stream is returned by the `futures::stream::unfold` method
+#[derive(Debug)]
+#[must_use = "streams do nothing unless polled"]
+pub struct Unfold<T, F, Fut> where Fut: IntoFuture {
+ f: F,
+ state: State<T, Fut::Future>,
+}
+
+impl <T, F, Fut, It> Stream for Unfold<T, F, Fut>
+ where F: FnMut(T) -> Option<Fut>,
+ Fut: IntoFuture<Item = (It, T)>,
+{
+ type Item = It;
+ type Error = Fut::Error;
+
+ fn poll(&mut self) -> Poll<Option<It>, Fut::Error> {
+ loop {
+ match mem::replace(&mut self.state, State::Empty) {
+ // State::Empty may happen if the future returned an error
+ State::Empty => { return Ok(Async::Ready(None)); }
+ State::Ready(state) => {
+ match (self.f)(state) {
+ Some(fut) => { self.state = State::Processing(fut.into_future()); }
+ None => { return Ok(Async::Ready(None)); }
+ }
+ }
+ State::Processing(mut fut) => {
+ match fut.poll()? {
+ Async:: Ready((item, next_state)) => {
+ self.state = State::Ready(next_state);
+ return Ok(Async::Ready(Some(item)));
+ }
+ Async::NotReady => {
+ self.state = State::Processing(fut);
+ return Ok(Async::NotReady);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+enum State<T, F> where F: Future {
+ /// Placeholder state when doing work, or when the returned Future generated an error
+ Empty,
+
+ /// Ready to generate new future; current internal state is the `T`
+ Ready(T),
+
+ /// Working on a future generated previously
+ Processing(F),
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/wait.rs b/third_party/rust/futures-0.1.31/src/stream/wait.rs
new file mode 100644
index 0000000000..80acb6c2a6
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/wait.rs
@@ -0,0 +1,53 @@
+use stream::Stream;
+use executor;
+
+/// A stream combinator which converts an asynchronous stream to a **blocking
+/// iterator**.
+///
+/// Created by the `Stream::wait` method, this function transforms any stream
+/// into a standard iterator. This is implemented by blocking the current thread
+/// while items on the underlying stream aren't ready yet.
+#[must_use = "iterators do nothing unless advanced"]
+#[derive(Debug)]
+pub struct Wait<S> {
+ stream: executor::Spawn<S>,
+}
+
+impl<S> Wait<S> {
+ /// Acquires a reference to the underlying stream that this combinator is
+ /// pulling from.
+ pub fn get_ref(&self) -> &S {
+ self.stream.get_ref()
+ }
+
+ /// Acquires a mutable reference to the underlying stream that this
+ /// combinator is pulling from.
+ ///
+ /// Note that care must be taken to avoid tampering with the state of the
+ /// stream which may otherwise confuse this combinator.
+ pub fn get_mut(&mut self) -> &mut S {
+ self.stream.get_mut()
+ }
+
+ /// Consumes this combinator, returning the underlying stream.
+ ///
+ /// Note that this may discard intermediate state of this combinator, so
+ /// care should be taken to avoid losing resources when this is called.
+ pub fn into_inner(self) -> S {
+ self.stream.into_inner()
+ }
+}
+
+pub fn new<S: Stream>(s: S) -> Wait<S> {
+ Wait {
+ stream: executor::spawn(s),
+ }
+}
+
+impl<S: Stream> Iterator for Wait<S> {
+ type Item = Result<S::Item, S::Error>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.stream.wait_stream()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/stream/zip.rs b/third_party/rust/futures-0.1.31/src/stream/zip.rs
new file mode 100644
index 0000000000..17e3c69ffe
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/stream/zip.rs
@@ -0,0 +1,59 @@
+use {Async, Poll};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from one or both of the underlying
+/// streams as they become available. Errors, however, are not merged: you
+#[derive(Debug)]
+/// get at most one error at a time.
+#[must_use = "streams do nothing unless polled"]
+pub struct Zip<S1: Stream, S2: Stream> {
+ stream1: Fuse<S1>,
+ stream2: Fuse<S2>,
+ queued1: Option<S1::Item>,
+ queued2: Option<S2::Item>,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Zip<S1, S2>
+ where S1: Stream, S2: Stream<Error = S1::Error>
+{
+ Zip {
+ stream1: stream1.fuse(),
+ stream2: stream2.fuse(),
+ queued1: None,
+ queued2: None,
+ }
+}
+
+impl<S1, S2> Stream for Zip<S1, S2>
+ where S1: Stream, S2: Stream<Error = S1::Error>
+{
+ type Item = (S1::Item, S2::Item);
+ type Error = S1::Error;
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ if self.queued1.is_none() {
+ match self.stream1.poll()? {
+ Async::Ready(Some(item1)) => self.queued1 = Some(item1),
+ Async::Ready(None) | Async::NotReady => {}
+ }
+ }
+ if self.queued2.is_none() {
+ match self.stream2.poll()? {
+ Async::Ready(Some(item2)) => self.queued2 = Some(item2),
+ Async::Ready(None) | Async::NotReady => {}
+ }
+ }
+
+ if self.queued1.is_some() && self.queued2.is_some() {
+ let pair = (self.queued1.take().unwrap(),
+ self.queued2.take().unwrap());
+ Ok(Async::Ready(Some(pair)))
+ } else if self.stream1.is_done() || self.stream2.is_done() {
+ Ok(Async::Ready(None))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sync/bilock.rs b/third_party/rust/futures-0.1.31/src/sync/bilock.rs
new file mode 100644
index 0000000000..af9e1eeb2c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sync/bilock.rs
@@ -0,0 +1,298 @@
+use std::any::Any;
+use std::boxed::Box;
+use std::cell::UnsafeCell;
+use std::error::Error;
+use std::fmt;
+use std::mem;
+use std::ops::{Deref, DerefMut};
+use std::sync::Arc;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+
+use {Async, Future, Poll};
+use task::{self, Task};
+
+/// A type of futures-powered synchronization primitive which is a mutex between
+/// two possible owners.
+///
+/// This primitive is not as generic as a full-blown mutex but is sufficient for
+/// many use cases where there are only two possible owners of a resource. The
+/// implementation of `BiLock` can be more optimized for just the two possible
+/// owners.
+///
+/// Note that it's possible to use this lock through a poll-style interface with
+/// the `poll_lock` method but you can also use it as a future with the `lock`
+/// method that consumes a `BiLock` and returns a future that will resolve when
+/// it's locked.
+///
+/// A `BiLock` is typically used for "split" operations where data which serves
+/// two purposes wants to be split into two to be worked with separately. For
+/// example a TCP stream could be both a reader and a writer or a framing layer
+/// could be both a stream and a sink for messages. A `BiLock` enables splitting
+/// these two and then using each independently in a futures-powered fashion.
+#[derive(Debug)]
+pub struct BiLock<T> {
+ inner: Arc<Inner<T>>,
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+ state: AtomicUsize,
+ inner: Option<UnsafeCell<T>>,
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl<T> BiLock<T> {
+ /// Creates a new `BiLock` protecting the provided data.
+ ///
+ /// Two handles to the lock are returned, and these are the only two handles
+ /// that will ever be available to the lock. These can then be sent to separate
+ /// tasks to be managed there.
+ pub fn new(t: T) -> (BiLock<T>, BiLock<T>) {
+ let inner = Arc::new(Inner {
+ state: AtomicUsize::new(0),
+ inner: Some(UnsafeCell::new(t)),
+ });
+
+ (BiLock { inner: inner.clone() }, BiLock { inner: inner })
+ }
+
+ /// Attempt to acquire this lock, returning `NotReady` if it can't be
+ /// acquired.
+ ///
+ /// This function will acquire the lock in a nonblocking fashion, returning
+ /// immediately if the lock is already held. If the lock is successfully
+ /// acquired then `Async::Ready` is returned with a value that represents
+ /// the locked value (and can be used to access the protected data). The
+ /// lock is unlocked when the returned `BiLockGuard` is dropped.
+ ///
+ /// If the lock is already held then this function will return
+ /// `Async::NotReady`. In this case the current task will also be scheduled
+ /// to receive a notification when the lock would otherwise become
+ /// available.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if called outside the context of a future's
+ /// task.
+ pub fn poll_lock(&self) -> Async<BiLockGuard<T>> {
+ loop {
+ match self.inner.state.swap(1, SeqCst) {
+ // Woohoo, we grabbed the lock!
+ 0 => return Async::Ready(BiLockGuard { inner: self }),
+
+ // Oops, someone else has locked the lock
+ 1 => {}
+
+ // A task was previously blocked on this lock, likely our task,
+ // so we need to update that task.
+ n => unsafe {
+ drop(Box::from_raw(n as *mut Task));
+ }
+ }
+
+ let me = Box::new(task::current());
+ let me = Box::into_raw(me) as usize;
+
+ match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) {
+ // The lock is still locked, but we've now parked ourselves, so
+ // just report that we're scheduled to receive a notification.
+ Ok(_) => return Async::NotReady,
+
+ // Oops, looks like the lock was unlocked after our swap above
+ // and before the compare_exchange. Deallocate what we just
+ // allocated and go through the loop again.
+ Err(0) => unsafe {
+ drop(Box::from_raw(me as *mut Task));
+ },
+
+ // The top of this loop set the previous state to 1, so if we
+ // failed the CAS above then it's because the previous value was
+ // *not* zero or one. This indicates that a task was blocked,
+ // but we're trying to acquire the lock and there's only one
+ // other reference of the lock, so it should be impossible for
+ // that task to ever block itself.
+ Err(n) => panic!("invalid state: {}", n),
+ }
+ }
+ }
+
+ /// Perform a "blocking lock" of this lock, consuming this lock handle and
+ /// returning a future to the acquired lock.
+ ///
+ /// This function consumes the `BiLock<T>` and returns a sentinel future,
+ /// `BiLockAcquire<T>`. The returned future will resolve to
+ /// `BiLockAcquired<T>` which represents a locked lock similarly to
+ /// `BiLockGuard<T>`.
+ ///
+ /// Note that the returned future will never resolve to an error.
+ pub fn lock(self) -> BiLockAcquire<T> {
+ BiLockAcquire {
+ inner: Some(self),
+ }
+ }
+
+ /// Attempts to put the two "halves" of a `BiLock<T>` back together and
+ /// recover the original value. Succeeds only if the two `BiLock<T>`s
+ /// originated from the same call to `BiLock::new`.
+ pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>> {
+ if &*self.inner as *const _ == &*other.inner as *const _ {
+ drop(other);
+ let inner = Arc::try_unwrap(self.inner)
+ .ok()
+ .expect("futures: try_unwrap failed in BiLock<T>::reunite");
+ Ok(unsafe { inner.into_inner() })
+ } else {
+ Err(ReuniteError(self, other))
+ }
+ }
+
+ fn unlock(&self) {
+ match self.inner.state.swap(0, SeqCst) {
+ // we've locked the lock, shouldn't be possible for us to see an
+ // unlocked lock.
+ 0 => panic!("invalid unlocked state"),
+
+ // Ok, no one else tried to get the lock, we're done.
+ 1 => {}
+
+ // Another task has parked themselves on this lock, let's wake them
+ // up as its now their turn.
+ n => unsafe {
+ Box::from_raw(n as *mut Task).notify();
+ }
+ }
+ }
+}
+
+impl<T> Inner<T> {
+ unsafe fn into_inner(mut self) -> T {
+ mem::replace(&mut self.inner, None).unwrap().into_inner()
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ assert_eq!(self.state.load(SeqCst), 0);
+ }
+}
+
+/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
+/// thus could not be `reunite`d.
+pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
+
+impl<T> fmt::Debug for ReuniteError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("ReuniteError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T> fmt::Display for ReuniteError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "tried to reunite two BiLocks that don't form a pair")
+ }
+}
+
+impl<T: Any> Error for ReuniteError<T> {
+ fn description(&self) -> &str {
+ "tried to reunite two BiLocks that don't form a pair"
+ }
+}
+
+/// Returned RAII guard from the `poll_lock` method.
+///
+/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
+/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
+/// unlocked.
+#[derive(Debug)]
+pub struct BiLockGuard<'a, T: 'a> {
+ inner: &'a BiLock<T>,
+}
+
+impl<'a, T> Deref for BiLockGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.inner.inner.inner.as_ref().unwrap().get() }
+ }
+}
+
+impl<'a, T> DerefMut for BiLockGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.inner.inner.inner.as_ref().unwrap().get() }
+ }
+}
+
+impl<'a, T> Drop for BiLockGuard<'a, T> {
+ fn drop(&mut self) {
+ self.inner.unlock();
+ }
+}
+
+/// Future returned by `BiLock::lock` which will resolve when the lock is
+/// acquired.
+#[derive(Debug)]
+pub struct BiLockAcquire<T> {
+ inner: Option<BiLock<T>>,
+}
+
+impl<T> Future for BiLockAcquire<T> {
+ type Item = BiLockAcquired<T>;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> {
+ match self.inner.as_ref().expect("cannot poll after Ready").poll_lock() {
+ Async::Ready(r) => {
+ mem::forget(r);
+ }
+ Async::NotReady => return Ok(Async::NotReady),
+ }
+ Ok(Async::Ready(BiLockAcquired { inner: self.inner.take() }))
+ }
+}
+
+/// Resolved value of the `BiLockAcquire<T>` future.
+///
+/// This value, like `BiLockGuard<T>`, is a sentinel to the value `T` through
+/// implementations of `Deref` and `DerefMut`. When dropped will unlock the
+/// lock, and the original unlocked `BiLock<T>` can be recovered through the
+/// `unlock` method.
+#[derive(Debug)]
+pub struct BiLockAcquired<T> {
+ inner: Option<BiLock<T>>,
+}
+
+impl<T> BiLockAcquired<T> {
+ /// Recovers the original `BiLock<T>`, unlocking this lock.
+ pub fn unlock(mut self) -> BiLock<T> {
+ let bi_lock = self.inner.take().unwrap();
+
+ bi_lock.unlock();
+
+ bi_lock
+ }
+}
+
+impl<T> Deref for BiLockAcquired<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.inner.as_ref().unwrap().inner.inner.as_ref().unwrap().get() }
+ }
+}
+
+impl<T> DerefMut for BiLockAcquired<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.inner.as_mut().unwrap().inner.inner.as_ref().unwrap().get() }
+ }
+}
+
+impl<T> Drop for BiLockAcquired<T> {
+ fn drop(&mut self) {
+ if let Some(ref bi_lock) = self.inner {
+ bi_lock.unlock();
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sync/mod.rs b/third_party/rust/futures-0.1.31/src/sync/mod.rs
new file mode 100644
index 0000000000..0a46e9afbe
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sync/mod.rs
@@ -0,0 +1,17 @@
+//! Future-aware synchronization
+//!
+//! This module, which is modeled after `std::sync`, contains user-space
+//! synchronization tools that work with futures, streams and sinks. In
+//! particular, these synchronizers do *not* block physical OS threads, but
+//! instead work at the task level.
+//!
+//! More information and examples of how to use these synchronization primitives
+//! can be found [online at tokio.rs].
+//!
+//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/synchronization/
+
+pub mod oneshot;
+pub mod mpsc;
+mod bilock;
+
+pub use self::bilock::{BiLock, BiLockGuard, BiLockAcquire, BiLockAcquired};
diff --git a/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs b/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs
new file mode 100644
index 0000000000..31d2320ab6
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sync/mpsc/mod.rs
@@ -0,0 +1,1187 @@
+//! A multi-producer, single-consumer, futures-aware, FIFO queue with back pressure.
+//!
+//! A channel can be used as a communication primitive between tasks running on
+//! `futures-rs` executors. Channel creation provides `Receiver` and `Sender`
+//! handles. `Receiver` implements `Stream` and allows a task to read values
+//! out of the channel. If there is no message to read from the channel, the
+//! current task will be notified when a new value is sent. `Sender` implements
+//! the `Sink` trait and allows a task to send messages into the channel. If
+//! the channel is at capacity, then send will be rejected and the task will be
+//! notified when additional capacity is available.
+//!
+//! # Disconnection
+//!
+//! When all `Sender` handles have been dropped, it is no longer possible to
+//! send values into the channel. This is considered the termination event of
+//! the stream. As such, `Sender::poll` will return `Ok(Ready(None))`.
+//!
+//! If the receiver handle is dropped, then messages can no longer be read out
+//! of the channel. In this case, a `send` will result in an error.
+//!
+//! # Clean Shutdown
+//!
+//! If the `Receiver` is simply dropped, then it is possible for there to be
+//! messages still in the channel that will not be processed. As such, it is
+//! usually desirable to perform a "clean" shutdown. To do this, the receiver
+//! will first call `close`, which will prevent any further messages to be sent
+//! into the channel. Then, the receiver consumes the channel to completion, at
+//! which point the receiver can be dropped.
+
+// At the core, the channel uses an atomic FIFO queue for message passing. This
+// queue is used as the primary coordination primitive. In order to enforce
+// capacity limits and handle back pressure, a secondary FIFO queue is used to
+// send parked task handles.
+//
+// The general idea is that the channel is created with a `buffer` size of `n`.
+// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed"
+// slot to hold a message. This allows `Sender` to know for a fact that a send
+// will succeed *before* starting to do the actual work of sending the value.
+// Since most of this work is lock-free, once the work starts, it is impossible
+// to safely revert.
+//
+// If the sender is unable to process a send operation, then the current
+// task is parked and the handle is sent on the parked task queue.
+//
+// Note that the implementation guarantees that the channel capacity will never
+// exceed the configured limit, however there is no *strict* guarantee that the
+// receiver will wake up a parked task *immediately* when a slot becomes
+// available. However, it will almost always unpark a task when a slot becomes
+// available and it is *guaranteed* that a sender will be unparked when the
+// message that caused the sender to become parked is read out of the channel.
+//
+// The steps for sending a message are roughly:
+//
+// 1) Increment the channel message count
+// 2) If the channel is at capacity, push the task handle onto the wait queue
+// 3) Push the message onto the message queue.
+//
+// The steps for receiving a message are roughly:
+//
+// 1) Pop a message from the message queue
+// 2) Pop a task handle from the wait queue
+// 3) Decrement the channel message count.
+//
+// It's important for the order of operations on lock-free structures to happen
+// in reverse order between the sender and receiver. This makes the message
+// queue the primary coordination structure and establishes the necessary
+// happens-before semantics required for the acquire / release semantics used
+// by the queue structure.
+
+use std::fmt;
+use std::error::Error;
+use std::any::Any;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+use std::sync::{Arc, Mutex};
+use std::thread;
+use std::usize;
+
+use sync::mpsc::queue::{Queue, PopResult};
+use sync::oneshot;
+use task::{self, Task};
+use future::Executor;
+use sink::SendAll;
+use resultstream::{self, Results};
+use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
+
+mod queue;
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `channel` method.
+#[derive(Debug)]
+pub struct Sender<T> {
+ // Channel state shared between the sender and receiver.
+ inner: Arc<Inner<T>>,
+
+ // Handle to the task that is blocked on this sender. This handle is sent
+ // to the receiver half in order to be notified when the sender becomes
+ // unblocked.
+ sender_task: Arc<Mutex<SenderTask>>,
+
+ // True if the sender might be blocked. This is an optimization to avoid
+ // having to lock the mutex most of the time.
+ maybe_parked: bool,
+}
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `unbounded` method.
+#[derive(Debug)]
+pub struct UnboundedSender<T>(Sender<T>);
+
+trait AssertKinds: Send + Sync + Clone {}
+impl AssertKinds for UnboundedSender<u32> {}
+
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `channel` method.
+#[derive(Debug)]
+pub struct Receiver<T> {
+ inner: Arc<Inner<T>>,
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `unbounded` method.
+#[derive(Debug)]
+pub struct UnboundedReceiver<T>(Receiver<T>);
+
+/// Error type for sending, used when the receiving end of a channel is
+/// dropped
+#[derive(Clone, PartialEq, Eq)]
+pub struct SendError<T>(T);
+
+/// Error type returned from `try_send`
+#[derive(Clone, PartialEq, Eq)]
+pub struct TrySendError<T> {
+ kind: TrySendErrorKind<T>,
+}
+
+#[derive(Clone, PartialEq, Eq)]
+enum TrySendErrorKind<T> {
+ Full(T),
+ Disconnected(T),
+}
+
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("SendError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "send failed because receiver is gone")
+ }
+}
+
+impl<T: Any> Error for SendError<T>
+{
+ fn description(&self) -> &str {
+ "send failed because receiver is gone"
+ }
+}
+
+impl<T> SendError<T> {
+ /// Returns the message that was attempted to be sent but failed.
+ pub fn into_inner(self) -> T {
+ self.0
+ }
+}
+
+impl<T> fmt::Debug for TrySendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("TrySendError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T> fmt::Display for TrySendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ if self.is_full() {
+ write!(fmt, "send failed because channel is full")
+ } else {
+ write!(fmt, "send failed because receiver is gone")
+ }
+ }
+}
+
+impl<T: Any> Error for TrySendError<T> {
+ fn description(&self) -> &str {
+ if self.is_full() {
+ "send failed because channel is full"
+ } else {
+ "send failed because receiver is gone"
+ }
+ }
+}
+
+impl<T> TrySendError<T> {
+ /// Returns true if this error is a result of the channel being full
+ pub fn is_full(&self) -> bool {
+ use self::TrySendErrorKind::*;
+
+ match self.kind {
+ Full(_) => true,
+ _ => false,
+ }
+ }
+
+ /// Returns true if this error is a result of the receiver being dropped
+ pub fn is_disconnected(&self) -> bool {
+ use self::TrySendErrorKind::*;
+
+ match self.kind {
+ Disconnected(_) => true,
+ _ => false,
+ }
+ }
+
+ /// Returns the message that was attempted to be sent but failed.
+ pub fn into_inner(self) -> T {
+ use self::TrySendErrorKind::*;
+
+ match self.kind {
+ Full(v) | Disconnected(v) => v,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Inner<T> {
+ // Max buffer size of the channel. If `None` then the channel is unbounded.
+ buffer: Option<usize>,
+
+ // Internal channel state. Consists of the number of messages stored in the
+ // channel as well as a flag signalling that the channel is closed.
+ state: AtomicUsize,
+
+ // Atomic, FIFO queue used to send messages to the receiver
+ message_queue: Queue<Option<T>>,
+
+ // Atomic, FIFO queue used to send parked task handles to the receiver.
+ parked_queue: Queue<Arc<Mutex<SenderTask>>>,
+
+ // Number of senders in existence
+ num_senders: AtomicUsize,
+
+ // Handle to the receiver's task.
+ recv_task: Mutex<ReceiverTask>,
+}
+
+// Struct representation of `Inner::state`.
+#[derive(Debug, Clone, Copy)]
+struct State {
+ // `true` when the channel is open
+ is_open: bool,
+
+ // Number of messages in the channel
+ num_messages: usize,
+}
+
+#[derive(Debug)]
+struct ReceiverTask {
+ unparked: bool,
+ task: Option<Task>,
+}
+
+// Returned from Receiver::try_park()
+enum TryPark {
+ Parked,
+ Closed,
+ NotEmpty,
+}
+
+// The `is_open` flag is stored in the left-most bit of `Inner::state`
+const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1);
+
+// When a new channel is created, it is created in the open state with no
+// pending messages.
+const INIT_STATE: usize = OPEN_MASK;
+
+// The maximum number of messages that a channel can track is `usize::MAX >> 1`
+const MAX_CAPACITY: usize = !(OPEN_MASK);
+
+// The maximum requested buffer size must be less than the maximum capacity of
+// a channel. This is because each sender gets a guaranteed slot.
+const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
+
+// Sent to the consumer to wake up blocked producers
+#[derive(Debug)]
+struct SenderTask {
+ task: Option<Task>,
+ is_parked: bool,
+}
+
+impl SenderTask {
+ fn new() -> Self {
+ SenderTask {
+ task: None,
+ is_parked: false,
+ }
+ }
+
+ fn notify(&mut self) {
+ self.is_parked = false;
+
+ if let Some(task) = self.task.take() {
+ task.notify();
+ }
+ }
+}
+
+/// Creates an in-memory channel implementation of the `Stream` trait with
+/// bounded capacity.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. This
+/// channel is unique in that it implements back pressure to ensure that the
+/// sender never outpaces the receiver. The channel capacity is equal to
+/// `buffer + num-senders`. In other words, each sender gets a guaranteed slot
+/// in the channel capacity, and on top of that there are `buffer` "first come,
+/// first serve" slots available to all senders.
+///
+/// The `Receiver` returned implements the `Stream` trait and has access to any
+/// number of the associated combinators for transforming the result.
+pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
+ // Check that the requested buffer size does not exceed the maximum buffer
+ // size permitted by the system.
+ assert!(buffer < MAX_BUFFER, "requested buffer size too large");
+ channel2(Some(buffer))
+}
+
+/// Creates an in-memory channel implementation of the `Stream` trait with
+/// unbounded capacity.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. A `send`
+/// on this channel will always succeed as long as the receive half has not
+/// been closed. If the receiver falls behind, messages will be buffered
+/// internally.
+///
+/// **Note** that the amount of available system memory is an implicit bound to
+/// the channel. Using an `unbounded` channel has the ability of causing the
+/// process to run out of memory. In this case, the process will be aborted.
+pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
+ let (tx, rx) = channel2(None);
+ (UnboundedSender(tx), UnboundedReceiver(rx))
+}
+
+fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
+ let inner = Arc::new(Inner {
+ buffer: buffer,
+ state: AtomicUsize::new(INIT_STATE),
+ message_queue: Queue::new(),
+ parked_queue: Queue::new(),
+ num_senders: AtomicUsize::new(1),
+ recv_task: Mutex::new(ReceiverTask {
+ unparked: false,
+ task: None,
+ }),
+ });
+
+ let tx = Sender {
+ inner: inner.clone(),
+ sender_task: Arc::new(Mutex::new(SenderTask::new())),
+ maybe_parked: false,
+ };
+
+ let rx = Receiver {
+ inner: inner,
+ };
+
+ (tx, rx)
+}
+
+/*
+ *
+ * ===== impl Sender =====
+ *
+ */
+
+impl<T> Sender<T> {
+ /// Attempts to send a message on this `Sender<T>` without blocking.
+ ///
+ /// This function, unlike `start_send`, is safe to call whether it's being
+ /// called on a task or not. Note that this function, however, will *not*
+ /// attempt to block the current task if the message cannot be sent.
+ ///
+ /// It is not recommended to call this function from inside of a future,
+ /// only from an external thread where you've otherwise arranged to be
+ /// notified when the channel is no longer full.
+ pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
+ // If the sender is currently blocked, reject the message
+ if !self.poll_unparked(false).is_ready() {
+ return Err(TrySendError {
+ kind: TrySendErrorKind::Full(msg),
+ });
+ }
+
+ // The channel has capacity to accept the message, so send it
+ self.do_send(Some(msg), false)
+ .map_err(|SendError(v)| {
+ TrySendError {
+ kind: TrySendErrorKind::Disconnected(v),
+ }
+ })
+ }
+
+ // Do the send without failing
+ // None means close
+ fn do_send(&mut self, msg: Option<T>, do_park: bool) -> Result<(), SendError<T>> {
+ // First, increment the number of messages contained by the channel.
+ // This operation will also atomically determine if the sender task
+ // should be parked.
+ //
+ // None is returned in the case that the channel has been closed by the
+ // receiver. This happens when `Receiver::close` is called or the
+ // receiver is dropped.
+ let park_self = match self.inc_num_messages(msg.is_none()) {
+ Some(park_self) => park_self,
+ None => {
+ // The receiver has closed the channel. Only abort if actually
+ // sending a message. It is important that the stream
+ // termination (None) is always sent. This technically means
+ // that it is possible for the queue to contain the following
+ // number of messages:
+ //
+ // num-senders + buffer + 1
+ //
+ if let Some(msg) = msg {
+ return Err(SendError(msg));
+ } else {
+ return Ok(());
+ }
+ }
+ };
+
+ // If the channel has reached capacity, then the sender task needs to
+ // be parked. This will send the task handle on the parked task queue.
+ //
+ // However, when `do_send` is called while dropping the `Sender`,
+ // `task::current()` can't be called safely. In this case, in order to
+ // maintain internal consistency, a blank message is pushed onto the
+ // parked task queue.
+ if park_self {
+ self.park(do_park);
+ }
+
+ self.queue_push_and_signal(msg);
+
+ Ok(())
+ }
+
+ // Do the send without parking current task.
+ //
+ // To be called from unbounded sender.
+ fn do_send_nb(&self, msg: T) -> Result<(), SendError<T>> {
+ match self.inc_num_messages(false) {
+ Some(park_self) => assert!(!park_self),
+ None => return Err(SendError(msg)),
+ };
+
+ self.queue_push_and_signal(Some(msg));
+
+ Ok(())
+ }
+
+ // Push message to the queue and signal to the receiver
+ fn queue_push_and_signal(&self, msg: Option<T>) {
+ // Push the message onto the message queue
+ self.inner.message_queue.push(msg);
+
+ // Signal to the receiver that a message has been enqueued. If the
+ // receiver is parked, this will unpark the task.
+ self.signal();
+ }
+
+ // Increment the number of queued messages. Returns if the sender should
+ // block.
+ fn inc_num_messages(&self, close: bool) -> Option<bool> {
+ let mut curr = self.inner.state.load(SeqCst);
+
+ loop {
+ let mut state = decode_state(curr);
+
+ // The receiver end closed the channel.
+ if !state.is_open {
+ return None;
+ }
+
+ // This probably is never hit? Odds are the process will run out of
+ // memory first. It may be worth to return something else in this
+ // case?
+ assert!(state.num_messages < MAX_CAPACITY, "buffer space exhausted; \
+ sending this messages would overflow the state");
+
+ state.num_messages += 1;
+
+ // The channel is closed by all sender handles being dropped.
+ if close {
+ state.is_open = false;
+ }
+
+ let next = encode_state(&state);
+ match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => {
+ // Block if the current number of pending messages has exceeded
+ // the configured buffer size
+ let park_self = match self.inner.buffer {
+ Some(buffer) => state.num_messages > buffer,
+ None => false,
+ };
+
+ return Some(park_self)
+ }
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+
+ // Signal to the receiver task that a message has been enqueued
+ fn signal(&self) {
+ // TODO
+ // This logic can probably be improved by guarding the lock with an
+ // atomic.
+ //
+ // Do this step first so that the lock is dropped when
+ // `unpark` is called
+ let task = {
+ let mut recv_task = self.inner.recv_task.lock().unwrap();
+
+ // If the receiver has already been unparked, then there is nothing
+ // more to do
+ if recv_task.unparked {
+ return;
+ }
+
+ // Setting this flag enables the receiving end to detect that
+ // an unpark event happened in order to avoid unnecessarily
+ // parking.
+ recv_task.unparked = true;
+ recv_task.task.take()
+ };
+
+ if let Some(task) = task {
+ task.notify();
+ }
+ }
+
+ fn park(&mut self, can_park: bool) {
+ // TODO: clean up internal state if the task::current will fail
+
+ let task = if can_park {
+ Some(task::current())
+ } else {
+ None
+ };
+
+ {
+ let mut sender = self.sender_task.lock().unwrap();
+ sender.task = task;
+ sender.is_parked = true;
+ }
+
+ // Send handle over queue
+ let t = self.sender_task.clone();
+ self.inner.parked_queue.push(t);
+
+ // Check to make sure we weren't closed after we sent our task on the
+ // queue
+ let state = decode_state(self.inner.state.load(SeqCst));
+ self.maybe_parked = state.is_open;
+ }
+
+ /// Polls the channel to determine if there is guaranteed to be capacity to send at least one
+ /// item without waiting.
+ ///
+ /// Returns `Ok(Async::Ready(_))` if there is sufficient capacity, or returns
+ /// `Ok(Async::NotReady)` if the channel is not guaranteed to have capacity. Returns
+ /// `Err(SendError(_))` if the receiver has been dropped.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if called from outside the context of a task or future.
+ pub fn poll_ready(&mut self) -> Poll<(), SendError<()>> {
+ let state = decode_state(self.inner.state.load(SeqCst));
+ if !state.is_open {
+ return Err(SendError(()));
+ }
+
+ Ok(self.poll_unparked(true))
+ }
+
+ /// Returns whether this channel is closed without needing a context.
+ pub fn is_closed(&self) -> bool {
+ !decode_state(self.inner.state.load(SeqCst)).is_open
+ }
+
+ fn poll_unparked(&mut self, do_park: bool) -> Async<()> {
+ // First check the `maybe_parked` variable. This avoids acquiring the
+ // lock in most cases
+ if self.maybe_parked {
+ // Get a lock on the task handle
+ let mut task = self.sender_task.lock().unwrap();
+
+ if !task.is_parked {
+ self.maybe_parked = false;
+ return Async::Ready(())
+ }
+
+ // At this point, an unpark request is pending, so there will be an
+ // unpark sometime in the future. We just need to make sure that
+ // the correct task will be notified.
+ //
+ // Update the task in case the `Sender` has been moved to another
+ // task
+ task.task = if do_park {
+ Some(task::current())
+ } else {
+ None
+ };
+
+ Async::NotReady
+ } else {
+ Async::Ready(())
+ }
+ }
+}
+
+impl<T> Sink for Sender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ // If the sender is currently blocked, reject the message before doing
+ // any work.
+ if !self.poll_unparked(true).is_ready() {
+ return Ok(AsyncSink::NotReady(msg));
+ }
+
+ // The channel has capacity to accept the message, so send it.
+ self.do_send(Some(msg), true)?;
+
+ Ok(AsyncSink::Ready)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ self.poll_ready()
+ // At this point, the value cannot be returned and `SendError`
+ // cannot be created with a `T` without breaking backwards
+ // comptibility. This means we cannot return an error.
+ //
+ // That said, there is also no guarantee that a `poll_complete`
+ // returning `Ok` implies the receiver sees the message.
+ .or_else(|_| Ok(().into()))
+ }
+
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T> UnboundedSender<T> {
+ /// Returns whether this channel is closed without needing a context.
+ pub fn is_closed(&self) -> bool {
+ self.0.is_closed()
+ }
+
+ /// Sends the provided message along this channel.
+ ///
+ /// This is an unbounded sender, so this function differs from `Sink::send`
+ /// by ensuring the return type reflects that the channel is always ready to
+ /// receive messages.
+ #[deprecated(note = "renamed to `unbounded_send`")]
+ #[doc(hidden)]
+ pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+ self.unbounded_send(msg)
+ }
+
+ /// Sends the provided message along this channel.
+ ///
+ /// This is an unbounded sender, so this function differs from `Sink::send`
+ /// by ensuring the return type reflects that the channel is always ready to
+ /// receive messages.
+ pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
+ self.0.do_send_nb(msg)
+ }
+}
+
+impl<T> Sink for UnboundedSender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ self.0.start_send(msg)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ self.0.poll_complete()
+ }
+
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<'a, T> Sink for &'a UnboundedSender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ self.0.do_send_nb(msg)?;
+ Ok(AsyncSink::Ready)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T> Clone for UnboundedSender<T> {
+ fn clone(&self) -> UnboundedSender<T> {
+ UnboundedSender(self.0.clone())
+ }
+}
+
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Sender<T> {
+ // Since this atomic op isn't actually guarding any memory and we don't
+ // care about any orderings besides the ordering on the single atomic
+ // variable, a relaxed ordering is acceptable.
+ let mut curr = self.inner.num_senders.load(SeqCst);
+
+ loop {
+ // If the maximum number of senders has been reached, then fail
+ if curr == self.inner.max_senders() {
+ panic!("cannot clone `Sender` -- too many outstanding senders");
+ }
+
+ debug_assert!(curr < self.inner.max_senders());
+
+ let next = curr + 1;
+ let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
+
+ // The ABA problem doesn't matter here. We only care that the
+ // number of senders never exceeds the maximum.
+ if actual == curr {
+ return Sender {
+ inner: self.inner.clone(),
+ sender_task: Arc::new(Mutex::new(SenderTask::new())),
+ maybe_parked: false,
+ };
+ }
+
+ curr = actual;
+ }
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ // Ordering between variables don't matter here
+ let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
+
+ if prev == 1 {
+ let _ = self.do_send(None, false);
+ }
+ }
+}
+
+/*
+ *
+ * ===== impl Receiver =====
+ *
+ */
+
+impl<T> Receiver<T> {
+ /// Closes the receiving half
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ let mut curr = self.inner.state.load(SeqCst);
+
+ loop {
+ let mut state = decode_state(curr);
+
+ if !state.is_open {
+ break
+ }
+
+ state.is_open = false;
+
+ let next = encode_state(&state);
+ match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => break,
+ Err(actual) => curr = actual,
+ }
+ }
+
+ // Wake up any threads waiting as they'll see that we've closed the
+ // channel and will continue on their merry way.
+ loop {
+ match unsafe { self.inner.parked_queue.pop() } {
+ PopResult::Data(task) => {
+ task.lock().unwrap().notify();
+ }
+ PopResult::Empty => break,
+ PopResult::Inconsistent => thread::yield_now(),
+ }
+ }
+ }
+
+ fn next_message(&mut self) -> Async<Option<T>> {
+ // Pop off a message
+ loop {
+ match unsafe { self.inner.message_queue.pop() } {
+ PopResult::Data(msg) => {
+ // If there are any parked task handles in the parked queue,
+ // pop one and unpark it.
+ self.unpark_one();
+ // Decrement number of messages
+ self.dec_num_messages();
+
+ return Async::Ready(msg);
+ }
+ PopResult::Empty => {
+ // The queue is empty, return NotReady
+ return Async::NotReady;
+ }
+ PopResult::Inconsistent => {
+ // Inconsistent means that there will be a message to pop
+ // in a short time. This branch can only be reached if
+ // values are being produced from another thread, so there
+ // are a few ways that we can deal with this:
+ //
+ // 1) Spin
+ // 2) thread::yield_now()
+ // 3) task::current().unwrap() & return NotReady
+ //
+ // For now, thread::yield_now() is used, but it would
+ // probably be better to spin a few times then yield.
+ thread::yield_now();
+ }
+ }
+ }
+ }
+
+ // Unpark a single task handle if there is one pending in the parked queue
+ fn unpark_one(&mut self) {
+ loop {
+ match unsafe { self.inner.parked_queue.pop() } {
+ PopResult::Data(task) => {
+ task.lock().unwrap().notify();
+ return;
+ }
+ PopResult::Empty => {
+ // Queue empty, no task to wake up.
+ return;
+ }
+ PopResult::Inconsistent => {
+ // Same as above
+ thread::yield_now();
+ }
+ }
+ }
+ }
+
+ // Try to park the receiver task
+ fn try_park(&self) -> TryPark {
+ let curr = self.inner.state.load(SeqCst);
+ let state = decode_state(curr);
+
+ // If the channel is closed, then there is no need to park.
+ if state.is_closed() {
+ return TryPark::Closed;
+ }
+
+ // First, track the task in the `recv_task` slot
+ let mut recv_task = self.inner.recv_task.lock().unwrap();
+
+ if recv_task.unparked {
+ // Consume the `unpark` signal without actually parking
+ recv_task.unparked = false;
+ return TryPark::NotEmpty;
+ }
+
+ recv_task.task = Some(task::current());
+ TryPark::Parked
+ }
+
+ fn dec_num_messages(&self) {
+ let mut curr = self.inner.state.load(SeqCst);
+
+ loop {
+ let mut state = decode_state(curr);
+
+ state.num_messages -= 1;
+
+ let next = encode_state(&state);
+ match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
+ Ok(_) => break,
+ Err(actual) => curr = actual,
+ }
+ }
+ }
+}
+
+impl<T> Stream for Receiver<T> {
+ type Item = T;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<Option<T>, ()> {
+ loop {
+ // Try to read a message off of the message queue.
+ match self.next_message() {
+ Async::Ready(msg) => return Ok(Async::Ready(msg)),
+ Async::NotReady => {
+ // There are no messages to read, in this case, attempt to
+ // park. The act of parking will verify that the channel is
+ // still empty after the park operation has completed.
+ match self.try_park() {
+ TryPark::Parked => {
+ // The task was parked, and the channel is still
+ // empty, return NotReady.
+ return Ok(Async::NotReady);
+ }
+ TryPark::Closed => {
+ // The channel is closed, there will be no further
+ // messages.
+ return Ok(Async::Ready(None));
+ }
+ TryPark::NotEmpty => {
+ // A message has been sent while attempting to
+ // park. Loop again, the next iteration is
+ // guaranteed to get the message.
+ continue;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ // Drain the channel of all pending messages
+ self.close();
+
+ loop {
+ match self.next_message() {
+ Async::Ready(_) => {}
+ Async::NotReady => {
+ let curr = self.inner.state.load(SeqCst);
+ let state = decode_state(curr);
+
+ // If the channel is closed, then there is no need to park.
+ if state.is_closed() {
+ return;
+ }
+
+ // TODO: Spinning isn't ideal, it might be worth
+ // investigating using a condvar or some other strategy
+ // here. That said, if this case is hit, then another thread
+ // is about to push the value into the queue and this isn't
+ // the only spinlock in the impl right now.
+ thread::yield_now();
+ }
+ }
+ }
+ }
+}
+
+impl<T> UnboundedReceiver<T> {
+ /// Closes the receiving half
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ self.0.close();
+ }
+}
+
+impl<T> Stream for UnboundedReceiver<T> {
+ type Item = T;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<Option<T>, ()> {
+ self.0.poll()
+ }
+}
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a stream that proxies a stream on a separate `Executor`.
+/// Created through the `mpsc::spawn` function, this handle will produce
+/// the same values as the proxied stream, as they are produced in the executor,
+/// and uses a limited buffer to exert back-pressure on the remote stream.
+///
+/// If this handle is dropped, then the stream will no longer be polled and is
+/// scheduled to be dropped.
+pub struct SpawnHandle<Item, Error> {
+ rx: Receiver<Result<Item, Error>>,
+ _cancel_tx: oneshot::Sender<()>,
+}
+
+/// Type of future which `Executor` instances must be able to execute for `spawn`.
+pub struct Execute<S: Stream> {
+ inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
+ cancel_rx: oneshot::Receiver<()>,
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// At most `buffer + 1` elements will be buffered at a time. If the buffer
+/// is full, then `stream` will stop progressing until more space is available.
+/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
+ where S: Stream,
+ E: Executor<Execute<S>>
+{
+ let (cancel_tx, cancel_rx) = oneshot::channel();
+ let (tx, rx) = channel(buffer);
+ executor.execute(Execute {
+ inner: tx.send_all(resultstream::new(stream)),
+ cancel_rx: cancel_rx,
+ }).expect("failed to spawn stream");
+ SpawnHandle {
+ rx: rx,
+ _cancel_tx: cancel_tx,
+ }
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream, with unbounded buffering.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// An unbounded buffer is used, which means that values will be buffered as
+/// fast as `stream` can produce them, without any backpressure. Therefore, if
+/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
+/// potentially hog CPU resources.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn_unbounded<S, E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
+ where S: Stream,
+ E: Executor<Execute<S>>
+{
+ let (cancel_tx, cancel_rx) = oneshot::channel();
+ let (tx, rx) = channel2(None);
+ executor.execute(Execute {
+ inner: tx.send_all(resultstream::new(stream)),
+ cancel_rx: cancel_rx,
+ }).expect("failed to spawn stream");
+ SpawnHandle {
+ rx: rx,
+ _cancel_tx: cancel_tx,
+ }
+}
+
+impl<I, E> Stream for SpawnHandle<I, E> {
+ type Item = I;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<I>, E> {
+ match self.rx.poll() {
+ Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
+ Ok(Async::Ready(Some(Err(e)))) => Err(e),
+ Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(_) => unreachable!("mpsc::Receiver should never return Err"),
+ }
+ }
+}
+
+impl<I, E> fmt::Debug for SpawnHandle<I, E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SpawnHandle")
+ .finish()
+ }
+}
+
+impl<S: Stream> Future for Execute<S> {
+ type Item = ();
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<(), ()> {
+ match self.cancel_rx.poll() {
+ Ok(Async::NotReady) => (),
+ _ => return Ok(Async::Ready(())),
+ }
+ match self.inner.poll() {
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ _ => Ok(Async::Ready(()))
+ }
+ }
+}
+
+impl<S: Stream> fmt::Debug for Execute<S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Execute")
+ .finish()
+ }
+}
+
+/*
+ *
+ * ===== impl Inner =====
+ *
+ */
+
+impl<T> Inner<T> {
+ // The return value is such that the total number of messages that can be
+ // enqueued into the channel will never exceed MAX_CAPACITY
+ fn max_senders(&self) -> usize {
+ match self.buffer {
+ Some(buffer) => MAX_CAPACITY - buffer,
+ None => MAX_BUFFER,
+ }
+ }
+}
+
+unsafe impl<T: Send> Send for Inner<T> {}
+unsafe impl<T: Send> Sync for Inner<T> {}
+
+impl State {
+ fn is_closed(&self) -> bool {
+ !self.is_open && self.num_messages == 0
+ }
+}
+
+/*
+ *
+ * ===== Helpers =====
+ *
+ */
+
+fn decode_state(num: usize) -> State {
+ State {
+ is_open: num & OPEN_MASK == OPEN_MASK,
+ num_messages: num & MAX_CAPACITY,
+ }
+}
+
+fn encode_state(state: &State) -> usize {
+ let mut num = state.num_messages;
+
+ if state.is_open {
+ num |= OPEN_MASK;
+ }
+
+ num
+}
diff --git a/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs b/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs
new file mode 100644
index 0000000000..9ff6bcf873
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sync/mpsc/queue.rs
@@ -0,0 +1,151 @@
+/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of Dmitry Vyukov.
+ */
+
+//! A mostly lock-free multi-producer, single consumer queue.
+//!
+//! This module contains an implementation of a concurrent MPSC queue. This
+//! queue can be used to share data between threads, and is also used as the
+//! building block of channels in rust.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue may not be appropriate for all use-cases.
+
+// http://www.1024cores.net/home/lock-free-algorithms
+// /queues/non-intrusive-mpsc-node-based-queue
+
+// NOTE: this implementation is lifted from the standard library and only
+// slightly modified
+
+pub use self::PopResult::*;
+use std::prelude::v1::*;
+
+use std::cell::UnsafeCell;
+use std::ptr;
+use std::sync::atomic::{AtomicPtr, Ordering};
+
+/// A result of the `pop` function.
+pub enum PopResult<T> {
+ /// Some data has been popped
+ Data(T),
+ /// The queue is empty
+ Empty,
+ /// The queue is in an inconsistent state. Popping data should succeed, but
+ /// some pushers have yet to make enough progress in order allow a pop to
+ /// succeed. It is recommended that a pop() occur "in the near future" in
+ /// order to see if the sender has made progress or not
+ Inconsistent,
+}
+
+#[derive(Debug)]
+struct Node<T> {
+ next: AtomicPtr<Node<T>>,
+ value: Option<T>,
+}
+
+/// The multi-producer single-consumer structure. This is not cloneable, but it
+/// may be safely shared so long as it is guaranteed that there is only one
+/// popper at a time (many pushers are allowed).
+#[derive(Debug)]
+pub struct Queue<T> {
+ head: AtomicPtr<Node<T>>,
+ tail: UnsafeCell<*mut Node<T>>,
+}
+
+unsafe impl<T: Send> Send for Queue<T> { }
+unsafe impl<T: Send> Sync for Queue<T> { }
+
+impl<T> Node<T> {
+ unsafe fn new(v: Option<T>) -> *mut Node<T> {
+ Box::into_raw(Box::new(Node {
+ next: AtomicPtr::new(ptr::null_mut()),
+ value: v,
+ }))
+ }
+}
+
+impl<T> Queue<T> {
+ /// Creates a new queue that is safe to share among multiple producers and
+ /// one consumer.
+ pub fn new() -> Queue<T> {
+ let stub = unsafe { Node::new(None) };
+ Queue {
+ head: AtomicPtr::new(stub),
+ tail: UnsafeCell::new(stub),
+ }
+ }
+
+ /// Pushes a new value onto this queue.
+ pub fn push(&self, t: T) {
+ unsafe {
+ let n = Node::new(Some(t));
+ let prev = self.head.swap(n, Ordering::AcqRel);
+ (*prev).next.store(n, Ordering::Release);
+ }
+ }
+
+ /// Pops some data from this queue.
+ ///
+ /// Note that the current implementation means that this function cannot
+ /// return `Option<T>`. It is possible for this queue to be in an
+ /// inconsistent state where many pushes have succeeded and completely
+ /// finished, but pops cannot return `Some(t)`. This inconsistent state
+ /// happens when a pusher is preempted at an inopportune moment.
+ ///
+ /// This inconsistent state means that this queue does indeed have data, but
+ /// it does not currently have access to it at this time.
+ ///
+ /// This function is unsafe because only one thread can call it at a time.
+ pub unsafe fn pop(&self) -> PopResult<T> {
+ let tail = *self.tail.get();
+ let next = (*tail).next.load(Ordering::Acquire);
+
+ if !next.is_null() {
+ *self.tail.get() = next;
+ assert!((*tail).value.is_none());
+ assert!((*next).value.is_some());
+ let ret = (*next).value.take().unwrap();
+ drop(Box::from_raw(tail));
+ return Data(ret);
+ }
+
+ if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
+ }
+}
+
+impl<T> Drop for Queue<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let mut cur = *self.tail.get();
+ while !cur.is_null() {
+ let next = (*cur).next.load(Ordering::Relaxed);
+ drop(Box::from_raw(cur));
+ cur = next;
+ }
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/sync/oneshot.rs b/third_party/rust/futures-0.1.31/src/sync/oneshot.rs
new file mode 100644
index 0000000000..3a9d8efdca
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/sync/oneshot.rs
@@ -0,0 +1,611 @@
+//! A one-shot, futures-aware channel
+
+use std::sync::Arc;
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::Ordering::SeqCst;
+use std::error::Error;
+use std::fmt;
+
+use {Future, Poll, Async};
+use future::{lazy, Lazy, Executor, IntoFuture};
+use lock::Lock;
+use task::{self, Task};
+
+/// A future representing the completion of a computation happening elsewhere in
+/// memory.
+///
+/// This is created by the `oneshot::channel` function.
+#[must_use = "futures do nothing unless polled"]
+#[derive(Debug)]
+pub struct Receiver<T> {
+ inner: Arc<Inner<T>>,
+}
+
+/// Represents the completion half of a oneshot through which the result of a
+/// computation is signaled.
+///
+/// This is created by the `oneshot::channel` function.
+#[derive(Debug)]
+pub struct Sender<T> {
+ inner: Arc<Inner<T>>,
+}
+
+/// Internal state of the `Receiver`/`Sender` pair above. This is all used as
+/// the internal synchronization between the two for send/recv operations.
+#[derive(Debug)]
+struct Inner<T> {
+ /// Indicates whether this oneshot is complete yet. This is filled in both
+ /// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it
+ /// appropriately.
+ ///
+ /// For `Receiver`, if this is `true`, then it's guaranteed that `data` is
+ /// unlocked and ready to be inspected.
+ ///
+ /// For `Sender` if this is `true` then the oneshot has gone away and it
+ /// can return ready from `poll_cancel`.
+ complete: AtomicBool,
+
+ /// The actual data being transferred as part of this `Receiver`. This is
+ /// filled in by `Sender::complete` and read by `Receiver::poll`.
+ ///
+ /// Note that this is protected by `Lock`, but it is in theory safe to
+ /// replace with an `UnsafeCell` as it's actually protected by `complete`
+ /// above. I wouldn't recommend doing this, however, unless someone is
+ /// supremely confident in the various atomic orderings here and there.
+ data: Lock<Option<T>>,
+
+ /// Field to store the task which is blocked in `Receiver::poll`.
+ ///
+ /// This is filled in when a oneshot is polled but not ready yet. Note that
+ /// the `Lock` here, unlike in `data` above, is important to resolve races.
+ /// Both the `Receiver` and the `Sender` halves understand that if they
+ /// can't acquire the lock then some important interference is happening.
+ rx_task: Lock<Option<Task>>,
+
+ /// Like `rx_task` above, except for the task blocked in
+ /// `Sender::poll_cancel`. Additionally, `Lock` cannot be `UnsafeCell`.
+ tx_task: Lock<Option<Task>>,
+}
+
+/// Creates a new futures-aware, one-shot channel.
+///
+/// This function is similar to Rust's channels found in the standard library.
+/// Two halves are returned, the first of which is a `Sender` handle, used to
+/// signal the end of a computation and provide its value. The second half is a
+/// `Receiver` which implements the `Future` trait, resolving to the value that
+/// was given to the `Sender` handle.
+///
+/// Each half can be separately owned and sent across threads/tasks.
+///
+/// # Examples
+///
+/// ```
+/// use std::thread;
+/// use futures::sync::oneshot;
+/// use futures::*;
+///
+/// let (p, c) = oneshot::channel::<i32>();
+///
+/// thread::spawn(|| {
+/// c.map(|i| {
+/// println!("got: {}", i);
+/// }).wait();
+/// });
+///
+/// p.send(3).unwrap();
+/// ```
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let inner = Arc::new(Inner::new());
+ let receiver = Receiver {
+ inner: inner.clone(),
+ };
+ let sender = Sender {
+ inner: inner,
+ };
+ (sender, receiver)
+}
+
+impl<T> Inner<T> {
+ fn new() -> Inner<T> {
+ Inner {
+ complete: AtomicBool::new(false),
+ data: Lock::new(None),
+ rx_task: Lock::new(None),
+ tx_task: Lock::new(None),
+ }
+ }
+
+ fn send(&self, t: T) -> Result<(), T> {
+ if self.complete.load(SeqCst) {
+ return Err(t)
+ }
+
+ // Note that this lock acquisition may fail if the receiver
+ // is closed and sets the `complete` flag to true, whereupon
+ // the receiver may call `poll()`.
+ if let Some(mut slot) = self.data.try_lock() {
+ assert!(slot.is_none());
+ *slot = Some(t);
+ drop(slot);
+
+ // If the receiver called `close()` between the check at the
+ // start of the function, and the lock being released, then
+ // the receiver may not be around to receive it, so try to
+ // pull it back out.
+ if self.complete.load(SeqCst) {
+ // If lock acquisition fails, then receiver is actually
+ // receiving it, so we're good.
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(t) = slot.take() {
+ return Err(t);
+ }
+ }
+ }
+ Ok(())
+ } else {
+ // Must have been closed
+ Err(t)
+ }
+ }
+
+ fn poll_cancel(&self) -> Poll<(), ()> {
+ // Fast path up first, just read the flag and see if our other half is
+ // gone. This flag is set both in our destructor and the oneshot
+ // destructor, but our destructor hasn't run yet so if it's set then the
+ // oneshot is gone.
+ if self.complete.load(SeqCst) {
+ return Ok(Async::Ready(()))
+ }
+
+ // If our other half is not gone then we need to park our current task
+ // and move it into the `notify_cancel` slot to get notified when it's
+ // actually gone.
+ //
+ // If `try_lock` fails, then the `Receiver` is in the process of using
+ // it, so we can deduce that it's now in the process of going away and
+ // hence we're canceled. If it succeeds then we just store our handle.
+ //
+ // Crucially we then check `oneshot_gone` *again* before we return.
+ // While we were storing our handle inside `notify_cancel` the `Receiver`
+ // may have been dropped. The first thing it does is set the flag, and
+ // if it fails to acquire the lock it assumes that we'll see the flag
+ // later on. So... we then try to see the flag later on!
+ let handle = task::current();
+ match self.tx_task.try_lock() {
+ Some(mut p) => *p = Some(handle),
+ None => return Ok(Async::Ready(())),
+ }
+ if self.complete.load(SeqCst) {
+ Ok(Async::Ready(()))
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+
+ fn is_canceled(&self) -> bool {
+ self.complete.load(SeqCst)
+ }
+
+ fn drop_tx(&self) {
+ // Flag that we're a completed `Sender` and try to wake up a receiver.
+ // Whether or not we actually stored any data will get picked up and
+ // translated to either an item or cancellation.
+ //
+ // Note that if we fail to acquire the `rx_task` lock then that means
+ // we're in one of two situations:
+ //
+ // 1. The receiver is trying to block in `poll`
+ // 2. The receiver is being dropped
+ //
+ // In the first case it'll check the `complete` flag after it's done
+ // blocking to see if it succeeded. In the latter case we don't need to
+ // wake up anyone anyway. So in both cases it's ok to ignore the `None`
+ // case of `try_lock` and bail out.
+ //
+ // The first case crucially depends on `Lock` using `SeqCst` ordering
+ // under the hood. If it instead used `Release` / `Acquire` ordering,
+ // then it would not necessarily synchronize with `inner.complete`
+ // and deadlock might be possible, as was observed in
+ // https://github.com/rust-lang-nursery/futures-rs/pull/219.
+ self.complete.store(true, SeqCst);
+ if let Some(mut slot) = self.rx_task.try_lock() {
+ if let Some(task) = slot.take() {
+ drop(slot);
+ task.notify();
+ }
+ }
+ }
+
+ fn close_rx(&self) {
+ // Flag our completion and then attempt to wake up the sender if it's
+ // blocked. See comments in `drop` below for more info
+ self.complete.store(true, SeqCst);
+ if let Some(mut handle) = self.tx_task.try_lock() {
+ if let Some(task) = handle.take() {
+ drop(handle);
+ task.notify()
+ }
+ }
+ }
+
+ fn try_recv(&self) -> Result<Option<T>, Canceled> {
+ // If we're complete, either `::close_rx` or `::drop_tx` was called.
+ // We can assume a successful send if data is present.
+ if self.complete.load(SeqCst) {
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(data) = slot.take() {
+ return Ok(Some(data.into()));
+ }
+ }
+ // Should there be a different error value or a panic in the case
+ // where `self.data.try_lock() == None`?
+ Err(Canceled)
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn recv(&self) -> Poll<T, Canceled> {
+ let mut done = false;
+
+ // Check to see if some data has arrived. If it hasn't then we need to
+ // block our task.
+ //
+ // Note that the acquisition of the `rx_task` lock might fail below, but
+ // the only situation where this can happen is during `Sender::drop`
+ // when we are indeed completed already. If that's happening then we
+ // know we're completed so keep going.
+ if self.complete.load(SeqCst) {
+ done = true;
+ } else {
+ let task = task::current();
+ match self.rx_task.try_lock() {
+ Some(mut slot) => *slot = Some(task),
+ None => done = true,
+ }
+ }
+
+ // If we're `done` via one of the paths above, then look at the data and
+ // figure out what the answer is. If, however, we stored `rx_task`
+ // successfully above we need to check again if we're completed in case
+ // a message was sent while `rx_task` was locked and couldn't notify us
+ // otherwise.
+ //
+ // If we're not done, and we're not complete, though, then we've
+ // successfully blocked our task and we return `NotReady`.
+ if done || self.complete.load(SeqCst) {
+ // If taking the lock fails, the sender will realise that the we're
+ // `done` when it checks the `complete` flag on the way out, and will
+ // treat the send as a failure.
+ if let Some(mut slot) = self.data.try_lock() {
+ if let Some(data) = slot.take() {
+ return Ok(data.into());
+ }
+ }
+ Err(Canceled)
+ } else {
+ Ok(Async::NotReady)
+ }
+ }
+
+ fn drop_rx(&self) {
+ // Indicate to the `Sender` that we're done, so any future calls to
+ // `poll_cancel` are weeded out.
+ self.complete.store(true, SeqCst);
+
+ // If we've blocked a task then there's no need for it to stick around,
+ // so we need to drop it. If this lock acquisition fails, though, then
+ // it's just because our `Sender` is trying to take the task, so we
+ // let them take care of that.
+ if let Some(mut slot) = self.rx_task.try_lock() {
+ let task = slot.take();
+ drop(slot);
+ drop(task);
+ }
+
+ // Finally, if our `Sender` wants to get notified of us going away, it
+ // would have stored something in `tx_task`. Here we try to peel that
+ // out and unpark it.
+ //
+ // Note that the `try_lock` here may fail, but only if the `Sender` is
+ // in the process of filling in the task. If that happens then we
+ // already flagged `complete` and they'll pick that up above.
+ if let Some(mut handle) = self.tx_task.try_lock() {
+ if let Some(task) = handle.take() {
+ drop(handle);
+ task.notify()
+ }
+ }
+ }
+}
+
+impl<T> Sender<T> {
+ #[deprecated(note = "renamed to `send`", since = "0.1.11")]
+ #[doc(hidden)]
+ #[cfg(feature = "with-deprecated")]
+ pub fn complete(self, t: T) {
+ drop(self.send(t));
+ }
+
+ /// Completes this oneshot with a successful result.
+ ///
+ /// This function will consume `self` and indicate to the other end, the
+ /// `Receiver`, that the value provided is the result of the computation this
+ /// represents.
+ ///
+ /// If the value is successfully enqueued for the remote end to receive,
+ /// then `Ok(())` is returned. If the receiving end was deallocated before
+ /// this function was called, however, then `Err` is returned with the value
+ /// provided.
+ pub fn send(self, t: T) -> Result<(), T> {
+ self.inner.send(t)
+ }
+
+ /// Polls this `Sender` half to detect whether the `Receiver` this has
+ /// paired with has gone away.
+ ///
+ /// This function can be used to learn about when the `Receiver` (consumer)
+ /// half has gone away and nothing will be able to receive a message sent
+ /// from `send`.
+ ///
+ /// If `Ready` is returned then it means that the `Receiver` has disappeared
+ /// and the result this `Sender` would otherwise produce should no longer
+ /// be produced.
+ ///
+ /// If `NotReady` is returned then the `Receiver` is still alive and may be
+ /// able to receive a message if sent. The current task, however, is
+ /// scheduled to receive a notification if the corresponding `Receiver` goes
+ /// away.
+ ///
+ /// # Panics
+ ///
+ /// Like `Future::poll`, this function will panic if it's not called from
+ /// within the context of a task. In other words, this should only ever be
+ /// called from inside another future.
+ ///
+ /// If `Ok(Ready)` is returned then the associated `Receiver` has been
+ /// dropped, which means any work required for sending should be canceled.
+ ///
+ /// If you're calling this function from a context that does not have a
+ /// task, then you can use the `is_canceled` API instead.
+ pub fn poll_cancel(&mut self) -> Poll<(), ()> {
+ self.inner.poll_cancel()
+ }
+
+ /// Tests to see whether this `Sender`'s corresponding `Receiver`
+ /// has gone away.
+ ///
+ /// This function can be used to learn about when the `Receiver` (consumer)
+ /// half has gone away and nothing will be able to receive a message sent
+ /// from `send`.
+ ///
+ /// Note that this function is intended to *not* be used in the context of a
+ /// future. If you're implementing a future you probably want to call the
+ /// `poll_cancel` function which will block the current task if the
+ /// cancellation hasn't happened yet. This can be useful when working on a
+ /// non-futures related thread, though, which would otherwise panic if
+ /// `poll_cancel` were called.
+ pub fn is_canceled(&self) -> bool {
+ self.inner.is_canceled()
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ self.inner.drop_tx()
+ }
+}
+
+/// Error returned from a `Receiver<T>` whenever the corresponding `Sender<T>`
+/// is dropped.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct Canceled;
+
+impl fmt::Display for Canceled {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "oneshot canceled")
+ }
+}
+
+impl Error for Canceled {
+ fn description(&self) -> &str {
+ "oneshot canceled"
+ }
+}
+
+impl<T> Receiver<T> {
+ /// Gracefully close this receiver, preventing sending any future messages.
+ ///
+ /// Any `send` operation which happens after this method returns is
+ /// guaranteed to fail. Once this method is called the normal `poll` method
+ /// can be used to determine whether a message was actually sent or not. If
+ /// `Canceled` is returned from `poll` then no message was sent.
+ pub fn close(&mut self) {
+ self.inner.close_rx()
+ }
+
+ /// Attempts to receive a message outside of the context of a task.
+ ///
+ /// Useful when a [`Context`](Context) is not available such as within a
+ /// `Drop` impl.
+ ///
+ /// Does not schedule a task wakeup or have any other side effects.
+ ///
+ /// A return value of `None` must be considered immediately stale (out of
+ /// date) unless [`::close`](Receiver::close) has been called first.
+ ///
+ /// Returns an error if the sender was dropped.
+ pub fn try_recv(&mut self) -> Result<Option<T>, Canceled> {
+ self.inner.try_recv()
+ }
+}
+
+impl<T> Future for Receiver<T> {
+ type Item = T;
+ type Error = Canceled;
+
+ fn poll(&mut self) -> Poll<T, Canceled> {
+ self.inner.recv()
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ self.inner.drop_rx()
+ }
+}
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a future representing the completion of a different future on
+/// a separate executor. Created through the `oneshot::spawn` function this
+/// handle will resolve when the future provided to `spawn` resolves on the
+/// `Executor` instance provided to that function.
+///
+/// If this handle is dropped then the future will automatically no longer be
+/// polled and is scheduled to be dropped. This can be canceled with the
+/// `forget` function, however.
+pub struct SpawnHandle<T, E> {
+ rx: Arc<ExecuteInner<Result<T, E>>>,
+}
+
+struct ExecuteInner<T> {
+ inner: Inner<T>,
+ keep_running: AtomicBool,
+}
+
+/// Type of future which `Execute` instances below must be able to spawn.
+pub struct Execute<F: Future> {
+ future: F,
+ tx: Arc<ExecuteInner<Result<F::Item, F::Error>>>,
+}
+
+/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the completion of the future.
+///
+/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
+/// When `future` completes on `executor` then the `SpawnHandle` will itself be
+/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is
+/// thus safe to send across threads.
+///
+/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
+/// not desired then the `SpawnHandle::forget` function can be used to continue
+/// running the future to completion.
+///
+/// # Panics
+///
+/// This function will panic if the instance of `Spawn` provided is unable to
+/// spawn the `future` provided.
+///
+/// If the provided instance of `Spawn` does not actually run `future` to
+/// completion, then the returned handle may panic when polled. Typically this
+/// is not a problem, though, as most instances of `Spawn` will run futures to
+/// completion.
+///
+/// Note that the returned future will likely panic if the `futures` provided
+/// panics. If a future running on an executor panics that typically means that
+/// the executor drops the future, which falls into the above case of not
+/// running the future to completion essentially.
+pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
+ where F: Future,
+ E: Executor<Execute<F>>,
+{
+ let data = Arc::new(ExecuteInner {
+ inner: Inner::new(),
+ keep_running: AtomicBool::new(false),
+ });
+ executor.execute(Execute {
+ future: future,
+ tx: data.clone(),
+ }).expect("failed to spawn future");
+ SpawnHandle { rx: data }
+}
+
+/// Spawns a function `f` onto the `Spawn` instance provided `s`.
+///
+/// For more information see the `spawn` function in this module. This function
+/// is just a thin wrapper around `spawn` which will execute the closure on the
+/// executor provided and then complete the future that the closure returns.
+pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
+ where F: FnOnce() -> R,
+ R: IntoFuture,
+ E: Executor<Execute<Lazy<F, R>>>,
+{
+ spawn(lazy(f), executor)
+}
+
+impl<T, E> SpawnHandle<T, E> {
+ /// Drop this future without canceling the underlying future.
+ ///
+ /// When `SpawnHandle` is dropped, the spawned future will be canceled as
+ /// well if the future hasn't already resolved. This function can be used
+ /// when to drop this future but keep executing the underlying future.
+ pub fn forget(self) {
+ self.rx.keep_running.store(true, SeqCst);
+ }
+}
+
+impl<T, E> Future for SpawnHandle<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<T, E> {
+ match self.rx.inner.recv() {
+ Ok(Async::Ready(Ok(t))) => Ok(t.into()),
+ Ok(Async::Ready(Err(e))) => Err(e),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(_) => panic!("future was canceled before completion"),
+ }
+ }
+}
+
+impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SpawnHandle")
+ .finish()
+ }
+}
+
+impl<T, E> Drop for SpawnHandle<T, E> {
+ fn drop(&mut self) {
+ self.rx.inner.drop_rx();
+ }
+}
+
+impl<F: Future> Future for Execute<F> {
+ type Item = ();
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<(), ()> {
+ // If we're canceled then we may want to bail out early.
+ //
+ // If the `forget` function was called, though, then we keep going.
+ if self.tx.inner.poll_cancel().unwrap().is_ready() {
+ if !self.tx.keep_running.load(SeqCst) {
+ return Ok(().into())
+ }
+ }
+
+ let result = match self.future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(t)) => Ok(t),
+ Err(e) => Err(e),
+ };
+ drop(self.tx.inner.send(result));
+ Ok(().into())
+ }
+}
+
+impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Execute")
+ .field("future", &self.future)
+ .finish()
+ }
+}
+
+impl<F: Future> Drop for Execute<F> {
+ fn drop(&mut self) {
+ self.tx.inner.drop_tx();
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/task.rs b/third_party/rust/futures-0.1.31/src/task.rs
new file mode 100644
index 0000000000..f83f2c4719
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task.rs
@@ -0,0 +1,46 @@
+//! Tasks used to drive a future computation
+//!
+//! It's intended over time a particular operation (such as servicing an HTTP
+//! request) will involve many futures. This entire operation, however, can be
+//! thought of as one unit, as the entire result is essentially just moving
+//! through one large state machine.
+//!
+//! A "task" is the unit of abstraction for what is driving this state machine
+//! and tree of futures forward. A task is used to poll futures and schedule
+//! futures with, and has utilities for sharing data between tasks and handles
+//! for notifying when a future is ready. Each task also has its own set of
+//! task-local data generated by `task_local!`.
+//!
+//! Note that libraries typically should not manage tasks themselves, but rather
+//! leave that to event loops and other "executors" (see the `executor` module),
+//! or by using the `wait` method to create and execute a task directly on the
+//! current thread.
+//!
+//! More information about the task model can be found [online at tokio.rs].
+//!
+//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-model/
+//!
+//! ## Functions
+//!
+//! There is an important bare function in this module: `current`. The
+//! `current` function returns a handle to the currently running task, panicking
+//! if one isn't present. This handle is then used to later notify the task that
+//! it's ready to make progress through the `Task::notify` method.
+
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
+#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
+#[allow(deprecated)]
+pub use task_impl::{Spawn, spawn, Unpark, Executor, Run, park};
+
+pub use task_impl::{Task, AtomicTask, current, init, is_in_task};
+
+#[allow(deprecated)]
+#[cfg(feature = "use_std")]
+pub use task_impl::{LocalKey, with_unpark_event, UnparkEvent, EventSet};
+
+#[doc(hidden)]
+#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
+#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
+#[allow(deprecated)]
+pub use task_impl::TaskRc;
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs b/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs
new file mode 100644
index 0000000000..d73954e617
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/atomic_task.rs
@@ -0,0 +1,283 @@
+use super::Task;
+
+use core::fmt;
+use core::cell::UnsafeCell;
+use core::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering::{Acquire, Release, AcqRel};
+
+/// A synchronization primitive for task notification.
+///
+/// `AtomicTask` will coordinate concurrent notifications with the consumer
+/// potentially "updating" the underlying task to notify. This is useful in
+/// scenarios where a computation completes in another thread and wants to
+/// notify the consumer, but the consumer is in the process of being migrated to
+/// a new logical task.
+///
+/// Consumers should call `register` before checking the result of a computation
+/// and producers should call `notify` after producing the computation (this
+/// differs from the usual `thread::park` pattern). It is also permitted for
+/// `notify` to be called **before** `register`. This results in a no-op.
+///
+/// A single `AtomicTask` may be reused for any number of calls to `register` or
+/// `notify`.
+///
+/// `AtomicTask` does not provide any memory ordering guarantees, as such the
+/// user should use caution and use other synchronization primitives to guard
+/// the result of the underlying computation.
+pub struct AtomicTask {
+ state: AtomicUsize,
+ task: UnsafeCell<Option<Task>>,
+}
+
+// `AtomicTask` is a multi-consumer, single-producer transfer cell. The cell
+// stores a `Task` value produced by calls to `register` and many threads can
+// race to take the task (to notify it) by calling `notify.
+//
+// If a new `Task` instance is produced by calling `register` before an existing
+// one is consumed, then the existing one is overwritten.
+//
+// While `AtomicTask` is single-producer, the implementation ensures memory
+// safety. In the event of concurrent calls to `register`, there will be a
+// single winner whose task will get stored in the cell. The losers will not
+// have their tasks notified. As such, callers should ensure to add
+// synchronization to calls to `register`.
+//
+// The implementation uses a single `AtomicUsize` value to coordinate access to
+// the `Task` cell. There are two bits that are operated on independently. These
+// are represented by `REGISTERING` and `NOTIFYING`.
+//
+// The `REGISTERING` bit is set when a producer enters the critical section. The
+// `NOTIFYING` bit is set when a consumer enters the critical section. Neither
+// bit being set is represented by `WAITING`.
+//
+// A thread obtains an exclusive lock on the task cell by transitioning the
+// state from `WAITING` to `REGISTERING` or `NOTIFYING`, depending on the
+// operation the thread wishes to perform. When this transition is made, it is
+// guaranteed that no other thread will access the task cell.
+//
+// # Registering
+//
+// On a call to `register`, an attempt to transition the state from WAITING to
+// REGISTERING is made. On success, the caller obtains a lock on the task cell.
+//
+// If the lock is obtained, then the thread sets the task cell to the task
+// provided as an argument. Then it attempts to transition the state back from
+// `REGISTERING` -> `WAITING`.
+//
+// If this transition is successful, then the registering process is complete
+// and the next call to `notify` will observe the task.
+//
+// If the transition fails, then there was a concurrent call to `notify` that
+// was unable to access the task cell (due to the registering thread holding the
+// lock). To handle this, the registering thread removes the task it just set
+// from the cell and calls `notify` on it. This call to notify represents the
+// attempt to notify by the other thread (that set the `NOTIFYING` bit). The
+// state is then transitioned from `REGISTERING | NOTIFYING` back to `WAITING`.
+// This transition must succeed because, at this point, the state cannot be
+// transitioned by another thread.
+//
+// # Notifying
+//
+// On a call to `notify`, an attempt to transition the state from `WAITING` to
+// `NOTIFYING` is made. On success, the caller obtains a lock on the task cell.
+//
+// If the lock is obtained, then the thread takes ownership of the current value
+// in teh task cell, and calls `notify` on it. The state is then transitioned
+// back to `WAITING`. This transition must succeed as, at this point, the state
+// cannot be transitioned by another thread.
+//
+// If the thread is unable to obtain the lock, the `NOTIFYING` bit is still.
+// This is because it has either been set by the current thread but the previous
+// value included the `REGISTERING` bit **or** a concurrent thread is in the
+// `NOTIFYING` critical section. Either way, no action must be taken.
+//
+// If the current thread is the only concurrent call to `notify` and another
+// thread is in the `register` critical section, when the other thread **exits**
+// the `register` critical section, it will observe the `NOTIFYING` bit and
+// handle the notify itself.
+//
+// If another thread is in the `notify` critical section, then it will handle
+// notifying the task.
+//
+// # A potential race (is safely handled).
+//
+// Imagine the following situation:
+//
+// * Thread A obtains the `notify` lock and notifies a task.
+//
+// * Before thread A releases the `notify` lock, the notified task is scheduled.
+//
+// * Thread B attempts to notify the task. In theory this should result in the
+// task being notified, but it cannot because thread A still holds the notify
+// lock.
+//
+// This case is handled by requiring users of `AtomicTask` to call `register`
+// **before** attempting to observe the application state change that resulted
+// in the task being notified. The notifiers also change the application state
+// before calling notify.
+//
+// Because of this, the task will do one of two things.
+//
+// 1) Observe the application state change that Thread B is notifying on. In
+// this case, it is OK for Thread B's notification to be lost.
+//
+// 2) Call register before attempting to observe the application state. Since
+// Thread A still holds the `notify` lock, the call to `register` will result
+// in the task notifying itself and get scheduled again.
+
+/// Idle state
+const WAITING: usize = 0;
+
+/// A new task value is being registered with the `AtomicTask` cell.
+const REGISTERING: usize = 0b01;
+
+/// The task currently registered with the `AtomicTask` cell is being notified.
+const NOTIFYING: usize = 0b10;
+
+impl AtomicTask {
+ /// Create an `AtomicTask` initialized with the given `Task`
+ pub fn new() -> AtomicTask {
+ // Make sure that task is Sync
+ trait AssertSync: Sync {}
+ impl AssertSync for Task {}
+
+ AtomicTask {
+ state: AtomicUsize::new(WAITING),
+ task: UnsafeCell::new(None),
+ }
+ }
+
+ /// Registers the current task to be notified on calls to `notify`.
+ ///
+ /// This is the same as calling `register_task` with `task::current()`.
+ pub fn register(&self) {
+ self.register_task(super::current());
+ }
+
+ /// Registers the provided task to be notified on calls to `notify`.
+ ///
+ /// The new task will take place of any previous tasks that were registered
+ /// by previous calls to `register`. Any calls to `notify` that happen after
+ /// a call to `register` (as defined by the memory ordering rules), will
+ /// notify the `register` caller's task.
+ ///
+ /// It is safe to call `register` with multiple other threads concurrently
+ /// calling `notify`. This will result in the `register` caller's current
+ /// task being notified once.
+ ///
+ /// This function is safe to call concurrently, but this is generally a bad
+ /// idea. Concurrent calls to `register` will attempt to register different
+ /// tasks to be notified. One of the callers will win and have its task set,
+ /// but there is no guarantee as to which caller will succeed.
+ pub fn register_task(&self, task: Task) {
+ match self.state.compare_and_swap(WAITING, REGISTERING, Acquire) {
+ WAITING => {
+ unsafe {
+ // Locked acquired, update the waker cell
+ *self.task.get() = Some(task.clone());
+
+ // Release the lock. If the state transitioned to include
+ // the `NOTIFYING` bit, this means that a notify has been
+ // called concurrently, so we have to remove the task and
+ // notify it.`
+ //
+ // Start by assuming that the state is `REGISTERING` as this
+ // is what we jut set it to.
+ let res = self.state.compare_exchange(
+ REGISTERING, WAITING, AcqRel, Acquire);
+
+ match res {
+ Ok(_) => {}
+ Err(actual) => {
+ // This branch can only be reached if a
+ // concurrent thread called `notify`. In this
+ // case, `actual` **must** be `REGISTERING |
+ // `NOTIFYING`.
+ debug_assert_eq!(actual, REGISTERING | NOTIFYING);
+
+ // Take the task to notify once the atomic operation has
+ // completed.
+ let notify = (*self.task.get()).take().unwrap();
+
+ // Just swap, because no one could change state
+ // while state == `Registering | `Waking`
+ self.state.swap(WAITING, AcqRel);
+
+ // The atomic swap was complete, now
+ // notify the task and return.
+ notify.notify();
+ }
+ }
+ }
+ }
+ NOTIFYING => {
+ // Currently in the process of notifying the task, i.e.,
+ // `notify` is currently being called on the old task handle.
+ // So, we call notify on the new task handle
+ task.notify();
+ }
+ state => {
+ // In this case, a concurrent thread is holding the
+ // "registering" lock. This probably indicates a bug in the
+ // caller's code as racing to call `register` doesn't make much
+ // sense.
+ //
+ // We just want to maintain memory safety. It is ok to drop the
+ // call to `register`.
+ debug_assert!(
+ state == REGISTERING ||
+ state == REGISTERING | NOTIFYING);
+ }
+ }
+ }
+
+ /// Notifies the task that last called `register`.
+ ///
+ /// If `register` has not been called yet, then this does nothing.
+ pub fn notify(&self) {
+ // AcqRel ordering is used in order to acquire the value of the `task`
+ // cell as well as to establish a `release` ordering with whatever
+ // memory the `AtomicTask` is associated with.
+ match self.state.fetch_or(NOTIFYING, AcqRel) {
+ WAITING => {
+ // The notifying lock has been acquired.
+ let task = unsafe { (*self.task.get()).take() };
+
+ // Release the lock
+ self.state.fetch_and(!NOTIFYING, Release);
+
+ if let Some(task) = task {
+ task.notify();
+ }
+ }
+ state => {
+ // There is a concurrent thread currently updating the
+ // associated task.
+ //
+ // Nothing more to do as the `NOTIFYING` bit has been set. It
+ // doesn't matter if there are concurrent registering threads or
+ // not.
+ //
+ debug_assert!(
+ state == REGISTERING ||
+ state == REGISTERING | NOTIFYING ||
+ state == NOTIFYING);
+ }
+ }
+ }
+}
+
+impl Default for AtomicTask {
+ fn default() -> Self {
+ AtomicTask::new()
+ }
+}
+
+impl fmt::Debug for AtomicTask {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "AtomicTask")
+ }
+}
+
+unsafe impl Send for AtomicTask {}
+unsafe impl Sync for AtomicTask {}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/core.rs b/third_party/rust/futures-0.1.31/src/task_impl/core.rs
new file mode 100644
index 0000000000..d454116012
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/core.rs
@@ -0,0 +1,186 @@
+#![cfg_attr(feature = "use_std", allow(dead_code))]
+
+use core::marker;
+use core::mem;
+use core::sync::atomic::AtomicUsize;
+#[allow(deprecated)]
+use core::sync::atomic::ATOMIC_USIZE_INIT;
+use core::sync::atomic::Ordering::{SeqCst, Relaxed};
+
+use super::{BorrowedTask, NotifyHandle};
+
+pub struct LocalKey;
+pub struct LocalMap;
+pub fn local_map() -> LocalMap { LocalMap }
+
+#[derive(Copy, Clone)]
+pub struct BorrowedEvents<'a>(marker::PhantomData<&'a ()>);
+
+#[derive(Copy, Clone)]
+pub struct BorrowedUnpark<'a> {
+ f: &'a Fn() -> NotifyHandle,
+ id: usize,
+}
+
+pub struct TaskUnpark {
+ handle: NotifyHandle,
+ id: usize,
+}
+
+#[derive(Clone)]
+pub struct UnparkEvents;
+
+impl<'a> BorrowedEvents<'a> {
+ pub fn new() -> BorrowedEvents<'a> {
+ BorrowedEvents(marker::PhantomData)
+ }
+
+ pub fn to_owned(&self) -> UnparkEvents {
+ UnparkEvents
+ }
+}
+
+impl<'a> BorrowedUnpark<'a> {
+ #[inline]
+ pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
+ BorrowedUnpark { f: f, id: id }
+ }
+
+ #[inline]
+ pub fn to_owned(&self) -> TaskUnpark {
+ let handle = (self.f)();
+ let id = handle.clone_id(self.id);
+ TaskUnpark { handle: handle, id: id }
+ }
+}
+
+impl UnparkEvents {
+ pub fn notify(&self) {}
+
+ pub fn will_notify(&self, _other: &BorrowedEvents) -> bool {
+ true
+ }
+}
+
+impl TaskUnpark {
+ pub fn notify(&self) {
+ self.handle.notify(self.id);
+ }
+
+ pub fn will_notify(&self, other: &BorrowedUnpark) -> bool {
+ self.id == other.id && self.handle.inner == (other.f)().inner
+ }
+}
+
+impl Clone for TaskUnpark {
+ fn clone(&self) -> TaskUnpark {
+ let handle = self.handle.clone();
+ let id = handle.clone_id(self.id);
+ TaskUnpark { handle: handle, id: id }
+ }
+}
+
+impl Drop for TaskUnpark {
+ fn drop(&mut self) {
+ self.handle.drop_id(self.id);
+ }
+}
+
+#[allow(deprecated)]
+static GET: AtomicUsize = ATOMIC_USIZE_INIT;
+#[allow(deprecated)]
+static SET: AtomicUsize = ATOMIC_USIZE_INIT;
+
+/// Initialize the `futures` task system.
+///
+/// This function is an unsafe low-level implementation detail typically only
+/// used by crates using `futures` in `no_std` context. Users of this crate
+/// who also use the standard library never need to invoke this function.
+///
+/// The task system in the `futures` crate relies on some notion of "local
+/// storage" for the running thread and/or context. The `task::current` function
+/// can get invoked in any context, for example, and needs to be able to return
+/// a `Task`. Typically with the standard library this is supported with
+/// thread-local-storage, but this is not available in `no_std` contexts!
+///
+/// This function is provided to allow `no_std` contexts to continue to be able
+/// to use the standard task system in this crate. The functions provided here
+/// will be used as-if they were thread-local-storage getters/setters. The `get`
+/// function provided is used to retrieve the current thread-local value of the
+/// task system's pointer, returning null if not initialized. The `set` function
+/// updates the value of the pointer.
+///
+/// # Return value
+///
+/// This function will return whether initialization succeeded or not. This
+/// function can be called concurrently and only the first invocation will
+/// succeed. If `false` is returned then the `get` and `set` pointers provided
+/// were *not* registered for use with the task system, but if `true` was
+/// provided then they will be called when the task system is used.
+///
+/// Note that while safe to call concurrently it's recommended to still perform
+/// external synchronization when calling this function. This task system is
+/// not guaranteed to be ready to go until a call to this function returns
+/// `true`. In other words, if you call this function and see `false`, the
+/// task system may not be ready to go as another thread may still be calling
+/// `init`.
+///
+/// # Unsafety
+///
+/// This function is unsafe due to the requirements on the behavior of the
+/// `get` and `set` functions. The pointers returned from these functions must
+/// reflect the semantics specified above and must also be thread-local,
+/// depending on the definition of a "thread" in the calling context.
+pub unsafe fn init(get: fn() -> *mut u8, set: fn(*mut u8)) -> bool {
+ if GET.compare_exchange(0, get as usize, SeqCst, SeqCst).is_ok() {
+ SET.store(set as usize, SeqCst);
+ true
+ } else {
+ false
+ }
+}
+
+/// Return whether the caller is running in a task (and so can use task_local!).
+pub fn is_in_task() -> bool {
+ if let Some(ptr) = get_ptr() {
+ !ptr.is_null()
+ } else {
+ false
+ }
+}
+
+#[inline]
+pub fn get_ptr() -> Option<*mut u8> {
+ match GET.load(Relaxed) {
+ 0 => None,
+ n => Some(unsafe { mem::transmute::<usize, fn() -> *mut u8>(n)() }),
+ }
+}
+
+#[cfg(feature = "use_std")]
+#[inline]
+pub fn is_get_ptr(f: usize) -> bool {
+ GET.load(Relaxed) == f
+}
+
+pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
+ where F: FnOnce() -> R
+{
+ let set = match SET.load(Relaxed) {
+ 0 => panic!("not initialized"),
+ n => unsafe { mem::transmute::<usize, fn(*mut u8)>(n) },
+ };
+
+ struct Reset(fn(*mut u8), *mut u8);
+
+ impl Drop for Reset {
+ #[inline]
+ fn drop(&mut self) {
+ (self.0)(self.1);
+ }
+ }
+
+ let _reset = Reset(set, get_ptr().unwrap());
+ set(task as *const _ as *mut u8);
+ f()
+}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/mod.rs b/third_party/rust/futures-0.1.31/src/task_impl/mod.rs
new file mode 100644
index 0000000000..6f1cf36c0c
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/mod.rs
@@ -0,0 +1,733 @@
+use core::fmt;
+use core::marker::PhantomData;
+
+use {Poll, Future, Stream, Sink, StartSend};
+
+mod atomic_task;
+pub use self::atomic_task::AtomicTask;
+
+mod core;
+
+#[cfg(feature = "use_std")]
+mod std;
+#[cfg(feature = "use_std")]
+pub use self::std::*;
+#[cfg(not(feature = "use_std"))]
+pub use self::core::*;
+
+pub struct BorrowedTask<'a> {
+ id: usize,
+ unpark: BorrowedUnpark<'a>,
+ events: BorrowedEvents<'a>,
+ // Task-local storage
+ map: &'a LocalMap,
+}
+
+fn fresh_task_id() -> usize {
+ use core::sync::atomic::{AtomicUsize, Ordering};
+ #[allow(deprecated)]
+ use core::sync::atomic::ATOMIC_USIZE_INIT;
+
+ // TODO: this assert is a real bummer, need to figure out how to reuse
+ // old IDs that are no longer in use.
+ //
+ // Note, though, that it is intended that these ids go away entirely
+ // eventually, see the comment on `is_current` below.
+ #[allow(deprecated)]
+ static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+ let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
+ assert!(id < usize::max_value() / 2,
+ "too many previous tasks have been allocated");
+ id
+}
+
+fn with<F: FnOnce(&BorrowedTask) -> R, R>(f: F) -> R {
+ unsafe {
+ let task = get_ptr().expect("no Task is currently running");
+ assert!(!task.is_null(), "no Task is currently running");
+ f(&*(task as *const BorrowedTask))
+ }
+}
+
+/// A handle to a "task", which represents a single lightweight "thread" of
+/// execution driving a future to completion.
+///
+/// In general, futures are composed into large units of work, which are then
+/// spawned as tasks onto an *executor*. The executor is responsible for polling
+/// the future as notifications arrive, until the future terminates.
+///
+/// This is obtained by the `task::current` function.
+///
+/// # FAQ
+///
+/// ### Why does `Task` not implement `Eq` and `Hash`?
+///
+/// A valid use case for `Task` to implement these two traits has not been
+/// encountered.
+///
+/// Usually, this question is asked by someone who wants to store a `Task`
+/// instance in a `HashSet`. This seems like an obvious way to implement a
+/// future aware, multi-handle structure; e.g. a multi-producer channel.
+///
+/// In this case, the idea is that whenever a `start_send` is called on one of
+/// the channel's send handles, if the channel is at capacity, the current task
+/// is stored in a set. Then, when capacity is available, a task is removed from
+/// the set and notified.
+///
+/// The problem with this strategy is that multiple `Sender` handles can be used
+/// on the same task. In this case, when the second handle is used and the task
+/// is stored in a set, there already is an entry. Then, when the first
+/// handle is dropped, this entry is cleared, resulting in a dead lock.
+///
+/// See [here](https://github.com/rust-lang-nursery/futures-rs/issues/670) for
+/// more discussion.
+///
+#[derive(Clone)]
+pub struct Task {
+ id: usize,
+ unpark: TaskUnpark,
+ events: UnparkEvents,
+}
+
+trait AssertSend: Send {}
+impl AssertSend for Task {}
+
+/// Returns a handle to the current task to call `notify` at a later date.
+///
+/// The returned handle implements the `Send` and `'static` bounds and may also
+/// be cheaply cloned. This is useful for squirreling away the handle into a
+/// location which is then later signaled that a future can make progress.
+///
+/// Implementations of the `Future` trait typically use this function if they
+/// would otherwise perform a blocking operation. When something isn't ready
+/// yet, this `current` function is called to acquire a handle to the current
+/// task, and then the future arranges it such that when the blocking operation
+/// otherwise finishes (perhaps in the background) it will `notify` the
+/// returned handle.
+///
+/// It's sometimes necessary to pass extra information to the task when
+/// unparking it, so that the task knows something about *why* it was woken.
+/// See the `FutureQueue` documentation for details on how to do this.
+///
+/// # Panics
+///
+/// This function will panic if a task is not currently being executed. That
+/// is, this method can be dangerous to call outside of an implementation of
+/// `poll`.
+pub fn current() -> Task {
+ with(|borrowed| {
+ let unpark = borrowed.unpark.to_owned();
+ let events = borrowed.events.to_owned();
+
+ Task {
+ id: borrowed.id,
+ unpark: unpark,
+ events: events,
+ }
+ })
+}
+
+#[doc(hidden)]
+#[deprecated(note = "renamed to `current`")]
+pub fn park() -> Task {
+ current()
+}
+
+impl Task {
+ /// Indicate that the task should attempt to poll its future in a timely
+ /// fashion.
+ ///
+ /// It's typically guaranteed that, after calling `notify`, `poll` will
+ /// be called at least once subsequently (unless the future has terminated).
+ /// If the task is currently polling its future when `notify` is called, it
+ /// must poll the future *again* afterwards, ensuring that all relevant
+ /// events are eventually observed by the future.
+ pub fn notify(&self) {
+ self.events.notify();
+ self.unpark.notify();
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "renamed to `notify`")]
+ pub fn unpark(&self) {
+ self.notify()
+ }
+
+ /// Returns `true` when called from within the context of the task.
+ ///
+ /// In other words, the task is currently running on the thread calling the
+ /// function. Note that this is currently, and has historically, been
+ /// implemented by tracking an `id` on every instance of `Spawn` created.
+ /// When a `Spawn` is being polled it stores in thread-local-storage the id
+ /// of the instance, and then `task::current` will return a `Task` that also
+ /// stores this id.
+ ///
+ /// The intention of this function was to answer questions like "if I
+ /// `notify` this task, is it equivalent to `task::current().notify()`?"
+ /// The answer "yes" may be able to avoid some extra work to block the
+ /// current task, such as sending a task along a channel or updating a
+ /// stored `Task` somewhere. An answer of "no" typically results in doing
+ /// the work anyway.
+ ///
+ /// Unfortunately this function has been somewhat buggy in the past and is
+ /// not intended to be supported in the future. By simply matching `id` the
+ /// intended question above isn't accurately taking into account, for
+ /// example, unpark events (now deprecated, but still a feature). Thus many
+ /// old users of this API weren't fully accounting for the question it was
+ /// intended they were asking.
+ ///
+ /// This API continues to be implemented but will in the future, e.g. in the
+ /// 0.1.x series of this crate, eventually return `false` unconditionally.
+ /// It is intended that this function will be removed in the next breaking
+ /// change of this crate. If you'd like to continue to be able to answer the
+ /// example question above, it's recommended you use the
+ /// `will_notify_current` method.
+ ///
+ /// If you've got questions about this though please let us know! We'd like
+ /// to learn about other use cases here that we did not consider.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if no current future is being polled.
+ #[deprecated(note = "intended to be removed, see docs for details")]
+ pub fn is_current(&self) -> bool {
+ with(|current| current.id == self.id)
+ }
+
+ /// This function is intended as a performance optimization for structures
+ /// which store a `Task` internally.
+ ///
+ /// The purpose of this function is to answer the question "if I `notify`
+ /// this task is it equivalent to `task::current().notify()`". An answer
+ /// "yes" may mean that you don't actually need to call `task::current()`
+ /// and store it, but rather you can simply leave a stored task in place. An
+ /// answer of "no" typically means that you need to call `task::current()`
+ /// and store it somewhere.
+ ///
+ /// As this is purely a performance optimization a valid implementation for
+ /// this function is to always return `false`. A best effort is done to
+ /// return `true` where possible, but false negatives may happen. Note that
+ /// this function will not return a false positive, however.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if no current future is being polled.
+ #[allow(deprecated)]
+ pub fn will_notify_current(&self) -> bool {
+ with(|current| {
+ self.unpark.will_notify(&current.unpark) &&
+ self.events.will_notify(&current.events)
+ })
+ }
+}
+
+impl fmt::Debug for Task {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Task")
+ .finish()
+ }
+}
+
+/// Representation of a spawned future/stream.
+///
+/// This object is returned by the `spawn` function in this module. This
+/// represents a "fused task and future", storing all necessary pieces of a task
+/// and owning the top-level future that's being driven as well.
+///
+/// A `Spawn` can be poll'd for completion or execution of the current thread
+/// can be blocked indefinitely until a notification arrives. This can be used
+/// with either futures or streams, with different methods being available on
+/// `Spawn` depending which is used.
+pub struct Spawn<T: ?Sized> {
+ id: usize,
+ data: LocalMap,
+ obj: T,
+}
+
+/// Spawns a future or stream, returning it and the new task responsible for
+/// running it to completion.
+///
+/// This function is the termination endpoint for running futures. This method
+/// will conceptually allocate a new task to run the given object, which is
+/// normally either a `Future` or `Stream`.
+///
+/// This function is similar to the `thread::spawn` function but does not
+/// attempt to run code in the background. The future will not make progress
+/// until the methods on `Spawn` are called in turn.
+pub fn spawn<T>(obj: T) -> Spawn<T> {
+ Spawn {
+ id: fresh_task_id(),
+ obj: obj,
+ data: local_map(),
+ }
+}
+
+impl<T: ?Sized> Spawn<T> {
+ /// Get a shared reference to the object the Spawn is wrapping.
+ pub fn get_ref(&self) -> &T {
+ &self.obj
+ }
+
+ /// Get a mutable reference to the object the Spawn is wrapping.
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.obj
+ }
+
+ /// Consume the Spawn, returning its inner object
+ pub fn into_inner(self) -> T where T: Sized {
+ self.obj
+ }
+
+ /// Calls the provided closure, scheduling notifications to be sent to the
+ /// `notify` argument.
+ pub fn poll_fn_notify<N, F, R>(&mut self,
+ notify: &N,
+ id: usize,
+ f: F) -> R
+ where F: FnOnce(&mut T) -> R,
+ N: Clone + Into<NotifyHandle>,
+ {
+ let mk = || notify.clone().into();
+ self.enter(BorrowedUnpark::new(&mk, id), f)
+ }
+
+ /// Polls the internal future, scheduling notifications to be sent to the
+ /// `notify` argument.
+ ///
+ /// This method will poll the internal future, testing if it's completed
+ /// yet. The `notify` argument is used as a sink for notifications sent to
+ /// this future. That is, while the future is being polled, any call to
+ /// `task::current()` will return a handle that contains the `notify`
+ /// specified.
+ ///
+ /// If this function returns `NotReady`, then the `notify` should have been
+ /// scheduled to receive a notification when poll can be called again.
+ /// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be
+ /// safely destroyed.
+ ///
+ /// Note that `notify` itself is passed as a shared reference, and is itself
+ /// not required to be a `NotifyHandle`. The `Clone` and `Into` trait bounds
+ /// will be used to convert this `notify` to a `NotifyHandle` if necessary.
+ /// This construction can avoid an unnecessary atomic reference count bump
+ /// in some situations.
+ ///
+ /// ## Unsafety and `id`
+ ///
+ /// This function and all other `*_notify` functions on this type will treat
+ /// the `id` specified very carefully, explicitly calling functions like the
+ /// `notify` argument's `clone_id` and `drop_id` functions. It should be
+ /// safe to encode a pointer itself into the `id` specified, such as an
+ /// `Arc<N>` or a `Box<N>`. The `clone_id` and `drop_id` functions are then
+ /// intended to be sufficient for the memory management related to that
+ /// pointer.
+ pub fn poll_future_notify<N>(&mut self,
+ notify: &N,
+ id: usize) -> Poll<T::Item, T::Error>
+ where N: Clone + Into<NotifyHandle>,
+ T: Future,
+ {
+ self.poll_fn_notify(notify, id, |f| f.poll())
+ }
+
+ /// Like `poll_future_notify`, except polls the underlying stream.
+ pub fn poll_stream_notify<N>(&mut self,
+ notify: &N,
+ id: usize)
+ -> Poll<Option<T::Item>, T::Error>
+ where N: Clone + Into<NotifyHandle>,
+ T: Stream,
+ {
+ self.poll_fn_notify(notify, id, |s| s.poll())
+ }
+
+ /// Invokes the underlying `start_send` method with this task in place.
+ ///
+ /// If the underlying operation returns `NotReady` then the `notify` value
+ /// passed in will receive a notification when the operation is ready to be
+ /// attempted again.
+ pub fn start_send_notify<N>(&mut self,
+ value: T::SinkItem,
+ notify: &N,
+ id: usize)
+ -> StartSend<T::SinkItem, T::SinkError>
+ where N: Clone + Into<NotifyHandle>,
+ T: Sink,
+ {
+ self.poll_fn_notify(notify, id, |s| s.start_send(value))
+ }
+
+ /// Invokes the underlying `poll_complete` method with this task in place.
+ ///
+ /// If the underlying operation returns `NotReady` then the `notify` value
+ /// passed in will receive a notification when the operation is ready to be
+ /// attempted again.
+ pub fn poll_flush_notify<N>(&mut self,
+ notify: &N,
+ id: usize)
+ -> Poll<(), T::SinkError>
+ where N: Clone + Into<NotifyHandle>,
+ T: Sink,
+ {
+ self.poll_fn_notify(notify, id, |s| s.poll_complete())
+ }
+
+ /// Invokes the underlying `close` method with this task in place.
+ ///
+ /// If the underlying operation returns `NotReady` then the `notify` value
+ /// passed in will receive a notification when the operation is ready to be
+ /// attempted again.
+ pub fn close_notify<N>(&mut self,
+ notify: &N,
+ id: usize)
+ -> Poll<(), T::SinkError>
+ where N: Clone + Into<NotifyHandle>,
+ T: Sink,
+ {
+ self.poll_fn_notify(notify, id, |s| s.close())
+ }
+
+ fn enter<F, R>(&mut self, unpark: BorrowedUnpark, f: F) -> R
+ where F: FnOnce(&mut T) -> R
+ {
+ let borrowed = BorrowedTask {
+ id: self.id,
+ unpark: unpark,
+ events: BorrowedEvents::new(),
+ map: &self.data,
+ };
+ let obj = &mut self.obj;
+ set(&borrowed, || f(obj))
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for Spawn<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Spawn")
+ .field("obj", &&self.obj)
+ .finish()
+ }
+}
+
+/// A trait which represents a sink of notifications that a future is ready to
+/// make progress.
+///
+/// This trait is provided as an argument to the `Spawn::*_notify` family of
+/// functions. It's transitively used as part of the `Task::notify` method to
+/// internally deliver notifications of readiness of a future to move forward.
+///
+/// An instance of `Notify` has one primary method, `notify`, which is given a
+/// contextual argument as to what's being notified. This contextual argument is
+/// *also* provided to the `Spawn::*_notify` family of functions and can be used
+/// to reuse an instance of `Notify` across many futures.
+///
+/// Instances of `Notify` must be safe to share across threads, and the methods
+/// be invoked concurrently. They must also live for the `'static` lifetime,
+/// not containing any stack references.
+pub trait Notify: Send + Sync {
+ /// Indicates that an associated future and/or task are ready to make
+ /// progress.
+ ///
+ /// Typically this means that the receiver of the notification should
+ /// arrange for the future to get poll'd in a prompt fashion.
+ ///
+ /// This method takes an `id` as an argument which was transitively passed
+ /// in from the original call to `Spawn::*_notify`. This id can be used to
+ /// disambiguate which precise future became ready for polling.
+ ///
+ /// # Panics
+ ///
+ /// Since `unpark` may be invoked from arbitrary contexts, it should
+ /// endeavor not to panic and to do as little work as possible. However, it
+ /// is not guaranteed not to panic, and callers should be wary. If a panic
+ /// occurs, that panic may or may not be propagated to the end-user of the
+ /// future that you'd otherwise wake up.
+ fn notify(&self, id: usize);
+
+ /// This function is called whenever a new copy of `id` is needed.
+ ///
+ /// This is called in one of two situations:
+ ///
+ /// * A `Task` is being created through `task::current` while a future is
+ /// being polled. In that case the instance of `Notify` passed in to one
+ /// of the `poll_*` functions is called with the `id` passed into the same
+ /// `poll_*` function.
+ /// * A `Task` is itself being cloned. Each `Task` contains its own id and a
+ /// handle to the `Notify` behind it, and the task's `Notify` is used to
+ /// clone the internal `id` to assign to the new task.
+ ///
+ /// The `id` returned here will be stored in the `Task`-to-be and used later
+ /// to pass to `notify` when the `Task::notify` function is called on that
+ /// `Task`.
+ ///
+ /// Note that typically this is just the identity function, passing through
+ /// the identifier. For more unsafe situations, however, if `id` is itself a
+ /// pointer of some kind this can be used as a hook to "clone" the pointer,
+ /// depending on what that means for the specified pointer.
+ fn clone_id(&self, id: usize) -> usize {
+ id
+ }
+
+ /// All instances of `Task` store an `id` that they're going to internally
+ /// notify with, and this function is called when the `Task` is dropped.
+ ///
+ /// This function provides a hook for schemes which encode pointers in this
+ /// `id` argument to deallocate resources associated with the pointer. It's
+ /// guaranteed that after this function is called the `Task` containing this
+ /// `id` will no longer use the `id`.
+ fn drop_id(&self, id: usize) {
+ drop(id);
+ }
+}
+
+/// Sets the `NotifyHandle` of the current task for the duration of the provided
+/// closure.
+///
+/// This function takes a type that can be converted into a notify handle,
+/// `notify` and `id`, and a closure `f`. The closure `f` will be executed such
+/// that calls to `task::current()` will store a reference to the notify handle
+/// provided, not the one previously in the environment.
+///
+/// Note that calls to `task::current()` in the closure provided *will not* be
+/// equivalent to `task::current()` before this method is called. The two tasks
+/// returned will notify different handles, and the task handles pulled out
+/// during the duration of this closure will not notify the previous task. It's
+/// recommended that you call `task::current()` in some capacity before calling
+/// this function to ensure that calls to `task::current()` inside of this
+/// closure can transitively wake up the outer task.
+///
+/// # Panics
+///
+/// This function will panic if it is called outside the context of a future's
+/// task. This is only valid to call once you've already entered a future via
+/// `Spawn::poll_*` functions.
+pub fn with_notify<F, T, R>(notify: &T, id: usize, f: F) -> R
+ where F: FnOnce() -> R,
+ T: Clone + Into<NotifyHandle>,
+{
+ with(|task| {
+ let mk = || notify.clone().into();
+ let new_task = BorrowedTask {
+ id: task.id,
+ unpark: BorrowedUnpark::new(&mk, id),
+ events: task.events,
+ map: task.map,
+ };
+
+ set(&new_task, f)
+ })
+}
+
+/// An unsafe trait for implementing custom forms of memory management behind a
+/// `Task`.
+///
+/// The `futures` critically relies on "notification handles" to extract for
+/// futures to contain and then later inform that they're ready to make
+/// progress. These handles, however, must be cheap to create and cheap
+/// to clone to ensure that this operation is efficient throughout the
+/// execution of a program.
+///
+/// Typically this sort of memory management is done in the standard library
+/// with the `Arc` type. An `Arc` is relatively cheap to allocate an is
+/// quite cheap to clone and pass around. Plus, it's 100% safe!
+///
+/// When working outside the standard library, however, you don't always have
+/// and `Arc` type available to you. This trait, `UnsafeNotify`, is intended
+/// to be the "unsafe version" of the `Notify` trait. This trait encodes the
+/// memory management operations of a `Task`'s notification handle, allowing
+/// custom implementations for the memory management of a notification handle.
+///
+/// Put another way, the core notification type in this library,
+/// `NotifyHandle`, simply internally contains an instance of
+/// `*mut UnsafeNotify`. This "unsafe trait object" is then used exclusively
+/// to operate with, dynamically dispatching calls to clone, drop, and notify.
+/// Critically though as a raw pointer it doesn't require a particular form
+/// of memory management, allowing external implementations.
+///
+/// A default implementation of the `UnsafeNotify` trait is provided for the
+/// `Arc` type in the standard library. If the `use_std` feature of this crate
+/// is not available however, you'll be required to implement your own
+/// instance of this trait to pass it into `NotifyHandle::new`.
+///
+/// # Unsafety
+///
+/// This trait is manually encoding the memory management of the underlying
+/// handle, and as a result is quite unsafe to implement! Implementors of
+/// this trait must guarantee:
+///
+/// * Calls to `clone_raw` produce uniquely owned handles. It should be safe
+/// to drop the current handle and have the returned handle still be valid.
+/// * Calls to `drop_raw` work with `self` as a raw pointer, deallocating
+/// resources associated with it. This is a pretty unsafe operation as it's
+/// invalidating the `self` pointer, so extreme care needs to be taken.
+///
+/// In general it's recommended to review the trait documentation as well as
+/// the implementation for `Arc` in this crate. When in doubt ping the
+/// `futures` authors to clarify an unsafety question here.
+pub unsafe trait UnsafeNotify: Notify {
+ /// Creates a new `NotifyHandle` from this instance of `UnsafeNotify`.
+ ///
+ /// This function will create a new uniquely owned handle that under the
+ /// hood references the same notification instance. In other words calls
+ /// to `notify` on the returned handle should be equivalent to calls to
+ /// `notify` on this handle.
+ ///
+ /// # Unsafety
+ ///
+ /// This trait is unsafe to implement, as are all these methods. This
+ /// method is also unsafe to call as it's asserting the `UnsafeNotify`
+ /// value is in a consistent state. In general it's recommended to
+ /// review the trait documentation as well as the implementation for `Arc`
+ /// in this crate. When in doubt ping the `futures` authors to clarify
+ /// an unsafety question here.
+ unsafe fn clone_raw(&self) -> NotifyHandle;
+
+ /// Drops this instance of `UnsafeNotify`, deallocating resources
+ /// associated with it.
+ ///
+ /// This method is intended to have a signature such as:
+ ///
+ /// ```ignore
+ /// fn drop_raw(self: *mut Self);
+ /// ```
+ ///
+ /// Unfortunately in Rust today that signature is not object safe.
+ /// Nevertheless it's recommended to implement this function *as if* that
+ /// were its signature. As such it is not safe to call on an invalid
+ /// pointer, nor is the validity of the pointer guaranteed after this
+ /// function returns.
+ ///
+ /// # Unsafety
+ ///
+ /// This trait is unsafe to implement, as are all these methods. This
+ /// method is also unsafe to call as it's asserting the `UnsafeNotify`
+ /// value is in a consistent state. In general it's recommended to
+ /// review the trait documentation as well as the implementation for `Arc`
+ /// in this crate. When in doubt ping the `futures` authors to clarify
+ /// an unsafety question here.
+ unsafe fn drop_raw(&self);
+}
+
+/// A `NotifyHandle` is the core value through which notifications are routed
+/// in the `futures` crate.
+///
+/// All instances of `Task` will contain a `NotifyHandle` handle internally.
+/// This handle itself contains a trait object pointing to an instance of the
+/// `Notify` trait, allowing notifications to get routed through it.
+///
+/// The `NotifyHandle` type internally does not codify any particular memory
+/// management strategy. Internally it contains an instance of `*mut
+/// UnsafeNotify`, and more details about that trait can be found on its own
+/// documentation. Consequently, though, the one constructor of this type,
+/// `NotifyHandle::new`, is `unsafe` to call. It is not recommended to call
+/// this constructor directly.
+///
+/// If you're working with the standard library then it's recommended to
+/// work with the `Arc` type. If you have a struct, `T`, which implements the
+/// `Notify` trait, then you can construct this with
+/// `NotifyHandle::from(t: Arc<T>)`. The coercion to `UnsafeNotify` will
+/// happen automatically and safely for you.
+///
+/// When working externally from the standard library it's recommended to
+/// provide a similar safe constructor for your custom type as opposed to
+/// recommending an invocation of `NotifyHandle::new` directly.
+pub struct NotifyHandle {
+ inner: *mut UnsafeNotify,
+}
+
+unsafe impl Send for NotifyHandle {}
+unsafe impl Sync for NotifyHandle {}
+
+impl NotifyHandle {
+ /// Constructs a new `NotifyHandle` directly.
+ ///
+ /// Note that most code will not need to call this. Implementers of the
+ /// `UnsafeNotify` trait will typically provide a wrapper that calls this
+ /// but you otherwise shouldn't call it directly.
+ ///
+ /// If you're working with the standard library then it's recommended to
+ /// use the `NotifyHandle::from` function instead which works with the safe
+ /// `Arc` type and the safe `Notify` trait.
+ #[inline]
+ pub unsafe fn new(inner: *mut UnsafeNotify) -> NotifyHandle {
+ NotifyHandle { inner: inner }
+ }
+
+ /// Invokes the underlying instance of `Notify` with the provided `id`.
+ pub fn notify(&self, id: usize) {
+ unsafe { (*self.inner).notify(id) }
+ }
+
+ fn clone_id(&self, id: usize) -> usize {
+ unsafe { (*self.inner).clone_id(id) }
+ }
+
+ fn drop_id(&self, id: usize) {
+ unsafe { (*self.inner).drop_id(id) }
+ }
+}
+
+impl Clone for NotifyHandle {
+ #[inline]
+ fn clone(&self) -> Self {
+ unsafe {
+ (*self.inner).clone_raw()
+ }
+ }
+}
+
+impl fmt::Debug for NotifyHandle {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("NotifyHandle")
+ .finish()
+ }
+}
+
+impl Drop for NotifyHandle {
+ fn drop(&mut self) {
+ unsafe {
+ (*self.inner).drop_raw()
+ }
+ }
+}
+
+/// Marker for a `T` that is behind &'static.
+struct StaticRef<T>(PhantomData<T>);
+
+impl<T: Notify> Notify for StaticRef<T> {
+ fn notify(&self, id: usize) {
+ let me = unsafe { &*(self as *const _ as *const T) };
+ me.notify(id);
+ }
+
+ fn clone_id(&self, id: usize) -> usize {
+ let me = unsafe { &*(self as *const _ as *const T) };
+ me.clone_id(id)
+ }
+
+ fn drop_id(&self, id: usize) {
+ let me = unsafe { &*(self as *const _ as *const T) };
+ me.drop_id(id);
+ }
+}
+
+unsafe impl<T: Notify + 'static> UnsafeNotify for StaticRef<T> {
+ unsafe fn clone_raw(&self) -> NotifyHandle {
+ NotifyHandle::new(self as *const _ as *mut StaticRef<T>)
+ }
+
+ unsafe fn drop_raw(&self) {}
+}
+
+impl<T: Notify> From<&'static T> for NotifyHandle {
+ fn from(src : &'static T) -> NotifyHandle {
+ unsafe { NotifyHandle::new(src as *const _ as *mut StaticRef<T>) }
+ }
+}
+
+#[cfg(feature = "nightly")]
+mod nightly {
+ use super::NotifyHandle;
+ use core::marker::Unpin;
+
+ impl Unpin for NotifyHandle {}
+}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs
new file mode 100644
index 0000000000..770912b219
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/std/data.rs
@@ -0,0 +1,131 @@
+use std::prelude::v1::*;
+
+use std::any::TypeId;
+use std::cell::RefCell;
+use std::hash::{BuildHasherDefault, Hasher};
+use std::collections::HashMap;
+
+use task_impl::with;
+
+/// A macro to create a `static` of type `LocalKey`
+///
+/// This macro is intentionally similar to the `thread_local!`, and creates a
+/// `static` which has a `with` method to access the data on a task.
+///
+/// The data associated with each task local is per-task, so different tasks
+/// will contain different values.
+#[macro_export]
+macro_rules! task_local {
+ (static $NAME:ident: $t:ty = $e:expr) => (
+ static $NAME: $crate::task::LocalKey<$t> = {
+ fn __init() -> $t { $e }
+ fn __key() -> ::std::any::TypeId {
+ struct __A;
+ ::std::any::TypeId::of::<__A>()
+ }
+ $crate::task::LocalKey {
+ __init: __init,
+ __key: __key,
+ }
+ };
+ )
+}
+
+pub type LocalMap = RefCell<HashMap<TypeId,
+ Box<Opaque>,
+ BuildHasherDefault<IdHasher>>>;
+
+pub fn local_map() -> LocalMap {
+ RefCell::new(HashMap::default())
+}
+
+pub trait Opaque: Send {}
+impl<T: Send> Opaque for T {}
+
+/// A key for task-local data stored in a future's task.
+///
+/// This type is generated by the `task_local!` macro and performs very
+/// similarly to the `thread_local!` macro and `std::thread::LocalKey` types.
+/// Data associated with a `LocalKey<T>` is stored inside of a future's task,
+/// and the data is destroyed when the future is completed and the task is
+/// destroyed.
+///
+/// Task-local data can migrate between threads and hence requires a `Send`
+/// bound. Additionally, task-local data also requires the `'static` bound to
+/// ensure it lives long enough. When a key is accessed for the first time the
+/// task's data is initialized with the provided initialization expression to
+/// the macro.
+#[derive(Debug)]
+pub struct LocalKey<T> {
+ // "private" fields which have to be public to get around macro hygiene, not
+ // included in the stability story for this type. Can change at any time.
+ #[doc(hidden)]
+ pub __key: fn() -> TypeId,
+ #[doc(hidden)]
+ pub __init: fn() -> T,
+}
+
+pub struct IdHasher {
+ id: u64,
+}
+
+impl Default for IdHasher {
+ fn default() -> IdHasher {
+ IdHasher { id: 0 }
+ }
+}
+
+impl Hasher for IdHasher {
+ fn write(&mut self, _bytes: &[u8]) {
+ // TODO: need to do something sensible
+ panic!("can only hash u64");
+ }
+
+ fn write_u64(&mut self, u: u64) {
+ self.id = u;
+ }
+
+ fn finish(&self) -> u64 {
+ self.id
+ }
+}
+
+impl<T: Send + 'static> LocalKey<T> {
+ /// Access this task-local key, running the provided closure with a
+ /// reference to the value.
+ ///
+ /// This function will access this task-local key to retrieve the data
+ /// associated with the current task and this key. If this is the first time
+ /// this key has been accessed on this task, then the key will be
+ /// initialized with the initialization expression provided at the time the
+ /// `task_local!` macro was called.
+ ///
+ /// The provided closure will be provided a shared reference to the
+ /// underlying data associated with this task-local-key. The data itself is
+ /// stored inside of the current task.
+ ///
+ /// # Panics
+ ///
+ /// This function can possibly panic for a number of reasons:
+ ///
+ /// * If there is not a current task.
+ /// * If the initialization expression is run and it panics
+ /// * If the closure provided panics
+ pub fn with<F, R>(&'static self, f: F) -> R
+ where F: FnOnce(&T) -> R
+ {
+ let key = (self.__key)();
+ with(|task| {
+ let raw_pointer = {
+ let mut data = task.map.borrow_mut();
+ let entry = data.entry(key).or_insert_with(|| {
+ Box::new((self.__init)())
+ });
+ &**entry as *const Opaque as *const T
+ };
+ unsafe {
+ f(&*raw_pointer)
+ }
+ })
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs
new file mode 100644
index 0000000000..e82a23e5d0
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/std/mod.rs
@@ -0,0 +1,719 @@
+use std::prelude::v1::*;
+
+use std::cell::Cell;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::ptr;
+use std::sync::{Arc, Mutex, Condvar, Once};
+#[allow(deprecated)]
+use std::sync::ONCE_INIT;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use {Future, Stream, Sink, Poll, Async, StartSend, AsyncSink};
+use super::core;
+use super::{BorrowedTask, NotifyHandle, Spawn, spawn, Notify, UnsafeNotify};
+
+mod unpark_mutex;
+pub use self::unpark_mutex::UnparkMutex;
+
+mod data;
+pub use self::data::*;
+
+mod task_rc;
+#[allow(deprecated)]
+#[cfg(feature = "with-deprecated")]
+pub use self::task_rc::TaskRc;
+
+pub use task_impl::core::init;
+
+thread_local!(static CURRENT_TASK: Cell<*mut u8> = Cell::new(ptr::null_mut()));
+
+/// Return whether the caller is running in a task (and so can use task_local!).
+pub fn is_in_task() -> bool {
+ CURRENT_TASK.with(|task| !task.get().is_null())
+}
+
+#[allow(deprecated)]
+static INIT: Once = ONCE_INIT;
+
+pub fn get_ptr() -> Option<*mut u8> {
+ // Since this condition will always return true when TLS task storage is
+ // used (the default), the branch predictor will be able to optimize the
+ // branching and a dynamic dispatch will be avoided, which makes the
+ // compiler happier.
+ if core::is_get_ptr(0x1) {
+ Some(CURRENT_TASK.with(|c| c.get()))
+ } else {
+ core::get_ptr()
+ }
+}
+
+fn tls_slot() -> *const Cell<*mut u8> {
+ CURRENT_TASK.with(|c| c as *const _)
+}
+
+pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
+ where F: FnOnce() -> R
+{
+ // Lazily initialize the get / set ptrs
+ //
+ // Note that we won't actually use these functions ever, we'll instead be
+ // testing the pointer's value elsewhere and calling our own functions.
+ INIT.call_once(|| unsafe {
+ let get = mem::transmute::<usize, _>(0x1);
+ let set = mem::transmute::<usize, _>(0x2);
+ init(get, set);
+ });
+
+ // Same as above.
+ if core::is_get_ptr(0x1) {
+ struct Reset(*const Cell<*mut u8>, *mut u8);
+
+ impl Drop for Reset {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ (*self.0).set(self.1);
+ }
+ }
+ }
+
+ unsafe {
+ let slot = tls_slot();
+ let _reset = Reset(slot, (*slot).get());
+ (*slot).set(task as *const _ as *mut u8);
+ f()
+ }
+ } else {
+ core::set(task, f)
+ }
+}
+
+#[derive(Copy, Clone)]
+#[allow(deprecated)]
+pub enum BorrowedUnpark<'a> {
+ Old(&'a Arc<Unpark>),
+ New(core::BorrowedUnpark<'a>),
+}
+
+#[derive(Copy, Clone)]
+#[allow(deprecated)]
+pub enum BorrowedEvents<'a> {
+ None,
+ One(&'a UnparkEvent, &'a BorrowedEvents<'a>),
+}
+
+#[derive(Clone)]
+pub enum TaskUnpark {
+ #[allow(deprecated)]
+ Old(Arc<Unpark>),
+ New(core::TaskUnpark),
+}
+
+#[derive(Clone)]
+#[allow(deprecated)]
+pub enum UnparkEvents {
+ None,
+ One(UnparkEvent),
+ Many(Box<[UnparkEvent]>),
+}
+
+impl<'a> BorrowedUnpark<'a> {
+ #[inline]
+ pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
+ BorrowedUnpark::New(core::BorrowedUnpark::new(f, id))
+ }
+
+ #[inline]
+ pub fn to_owned(&self) -> TaskUnpark {
+ match *self {
+ BorrowedUnpark::Old(old) => TaskUnpark::Old(old.clone()),
+ BorrowedUnpark::New(new) => TaskUnpark::New(new.to_owned()),
+ }
+ }
+}
+
+impl<'a> BorrowedEvents<'a> {
+ #[inline]
+ pub fn new() -> BorrowedEvents<'a> {
+ BorrowedEvents::None
+ }
+
+ #[inline]
+ pub fn to_owned(&self) -> UnparkEvents {
+ let mut one_event = None;
+ let mut list = Vec::new();
+ let mut cur = self;
+ while let BorrowedEvents::One(event, next) = *cur {
+ let event = event.clone();
+ match one_event.take() {
+ None if list.len() == 0 => one_event = Some(event),
+ None => list.push(event),
+ Some(event2) => {
+ list.push(event2);
+ list.push(event);
+ }
+ }
+ cur = next;
+ }
+
+ match one_event {
+ None if list.len() == 0 => UnparkEvents::None,
+ None => UnparkEvents::Many(list.into_boxed_slice()),
+ Some(e) => UnparkEvents::One(e),
+ }
+ }
+}
+
+impl UnparkEvents {
+ pub fn notify(&self) {
+ match *self {
+ UnparkEvents::None => {}
+ UnparkEvents::One(ref e) => e.unpark(),
+ UnparkEvents::Many(ref list) => {
+ for event in list.iter() {
+ event.unpark();
+ }
+ }
+ }
+ }
+
+ pub fn will_notify(&self, events: &BorrowedEvents) -> bool {
+ // Pessimistically assume that any unpark events mean that we're not
+ // equivalent to the current task.
+ match *self {
+ UnparkEvents::None => {}
+ _ => return false,
+ }
+
+ match *events {
+ BorrowedEvents::None => return true,
+ _ => {},
+ }
+
+ return false
+ }
+}
+
+#[allow(deprecated)]
+impl TaskUnpark {
+ pub fn notify(&self) {
+ match *self {
+ TaskUnpark::Old(ref old) => old.unpark(),
+ TaskUnpark::New(ref new) => new.notify(),
+ }
+ }
+
+ pub fn will_notify(&self, unpark: &BorrowedUnpark) -> bool {
+ match (unpark, self) {
+ (&BorrowedUnpark::Old(old1), &TaskUnpark::Old(ref old2)) => {
+ &**old1 as *const Unpark == &**old2 as *const Unpark
+ }
+ (&BorrowedUnpark::New(ref new1), &TaskUnpark::New(ref new2)) => {
+ new2.will_notify(new1)
+ }
+ _ => false,
+ }
+ }
+}
+
+impl<F: Future> Spawn<F> {
+ #[doc(hidden)]
+ #[deprecated(note = "recommended to use `poll_future_notify` instead")]
+ #[allow(deprecated)]
+ pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> {
+ self.enter(BorrowedUnpark::Old(&unpark), |f| f.poll())
+ }
+
+ /// Waits for the internal future to complete, blocking this thread's
+ /// execution until it does.
+ ///
+ /// This function will call `poll_future` in a loop, waiting for the future
+ /// to complete. When a future cannot make progress it will use
+ /// `thread::park` to block the current thread.
+ pub fn wait_future(&mut self) -> Result<F::Item, F::Error> {
+ ThreadNotify::with_current(|notify| {
+
+ loop {
+ match self.poll_future_notify(notify, 0)? {
+ Async::NotReady => notify.park(),
+ Async::Ready(e) => return Ok(e),
+ }
+ }
+ })
+ }
+
+
+ #[doc(hidden)]
+ #[deprecated]
+ #[allow(deprecated)]
+ pub fn execute(self, exec: Arc<Executor>)
+ where F: Future<Item=(), Error=()> + Send + 'static,
+ {
+ exec.clone().execute(Run {
+ // Ideally this method would be defined directly on
+ // `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and
+ // it'd be more explicit, but unfortunately that currently has a
+ // link error on nightly: rust-lang/rust#36155
+ spawn: spawn(Box::new(self.into_inner())),
+ inner: Arc::new(RunInner {
+ exec: exec,
+ mutex: UnparkMutex::new()
+ }),
+ })
+ }
+}
+
+impl<S: Stream> Spawn<S> {
+ #[deprecated(note = "recommended to use `poll_stream_notify` instead")]
+ #[allow(deprecated)]
+ #[doc(hidden)]
+ pub fn poll_stream(&mut self, unpark: Arc<Unpark>)
+ -> Poll<Option<S::Item>, S::Error> {
+ self.enter(BorrowedUnpark::Old(&unpark), |s| s.poll())
+ }
+
+ /// Like `wait_future`, except only waits for the next element to arrive on
+ /// the underlying stream.
+ pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> {
+ ThreadNotify::with_current(|notify| {
+
+ loop {
+ match self.poll_stream_notify(notify, 0) {
+ Ok(Async::NotReady) => notify.park(),
+ Ok(Async::Ready(Some(e))) => return Some(Ok(e)),
+ Ok(Async::Ready(None)) => return None,
+ Err(e) => return Some(Err(e)),
+ }
+ }
+ })
+ }
+}
+
+impl<S: Sink> Spawn<S> {
+ #[doc(hidden)]
+ #[deprecated(note = "recommended to use `start_send_notify` instead")]
+ #[allow(deprecated)]
+ pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>)
+ -> StartSend<S::SinkItem, S::SinkError> {
+ self.enter(BorrowedUnpark::Old(unpark), |s| s.start_send(value))
+ }
+
+ #[deprecated(note = "recommended to use `poll_flush_notify` instead")]
+ #[allow(deprecated)]
+ #[doc(hidden)]
+ pub fn poll_flush(&mut self, unpark: &Arc<Unpark>)
+ -> Poll<(), S::SinkError> {
+ self.enter(BorrowedUnpark::Old(unpark), |s| s.poll_complete())
+ }
+
+ /// Blocks the current thread until it's able to send `value` on this sink.
+ ///
+ /// This function will send the `value` on the sink that this task wraps. If
+ /// the sink is not ready to send the value yet then the current thread will
+ /// be blocked until it's able to send the value.
+ pub fn wait_send(&mut self, mut value: S::SinkItem)
+ -> Result<(), S::SinkError> {
+ ThreadNotify::with_current(|notify| {
+
+ loop {
+ value = match self.start_send_notify(value, notify, 0)? {
+ AsyncSink::NotReady(v) => v,
+ AsyncSink::Ready => return Ok(()),
+ };
+ notify.park();
+ }
+ })
+ }
+
+ /// Blocks the current thread until it's able to flush this sink.
+ ///
+ /// This function will call the underlying sink's `poll_complete` method
+ /// until it returns that it's ready, proxying out errors upwards to the
+ /// caller if one occurs.
+ ///
+ /// The thread will be blocked until `poll_complete` returns that it's
+ /// ready.
+ pub fn wait_flush(&mut self) -> Result<(), S::SinkError> {
+ ThreadNotify::with_current(|notify| {
+
+ loop {
+ if self.poll_flush_notify(notify, 0)?.is_ready() {
+ return Ok(())
+ }
+ notify.park();
+ }
+ })
+ }
+
+ /// Blocks the current thread until it's able to close this sink.
+ ///
+ /// This function will close the sink that this task wraps. If the sink
+ /// is not ready to be close yet, then the current thread will be blocked
+ /// until it's closed.
+ pub fn wait_close(&mut self) -> Result<(), S::SinkError> {
+ ThreadNotify::with_current(|notify| {
+
+ loop {
+ if self.close_notify(notify, 0)?.is_ready() {
+ return Ok(())
+ }
+ notify.park();
+ }
+ })
+ }
+}
+
+/// A trait which represents a sink of notifications that a future is ready to
+/// make progress.
+///
+/// This trait is provided as an argument to the `Spawn::poll_future` and
+/// `Spawn::poll_stream` functions. It's transitively used as part of the
+/// `Task::unpark` method to internally deliver notifications of readiness of a
+/// future to move forward.
+#[deprecated(note = "recommended to use `Notify` instead")]
+pub trait Unpark: Send + Sync {
+ /// Indicates that an associated future and/or task are ready to make
+ /// progress.
+ ///
+ /// Typically this means that the receiver of the notification should
+ /// arrange for the future to get poll'd in a prompt fashion.
+ fn unpark(&self);
+}
+
+/// A trait representing requests to poll futures.
+///
+/// This trait is an argument to the `Spawn::execute` which is used to run a
+/// future to completion. An executor will receive requests to run a future and
+/// an executor is responsible for ensuring that happens in a timely fashion.
+///
+/// Note that this trait is likely to be deprecated and/or renamed to avoid
+/// clashing with the `future::Executor` trait. If you've got a use case for
+/// this or would like to comment on the name please let us know!
+#[deprecated]
+#[allow(deprecated)]
+pub trait Executor: Send + Sync + 'static {
+ /// Requests that `Run` is executed soon on the given executor.
+ fn execute(&self, r: Run);
+}
+
+/// Units of work submitted to an `Executor`, currently only created
+/// internally.
+#[deprecated]
+pub struct Run {
+ spawn: Spawn<Box<Future<Item = (), Error = ()> + Send>>,
+ inner: Arc<RunInner>,
+}
+
+#[allow(deprecated)]
+struct RunInner {
+ mutex: UnparkMutex<Run>,
+ exec: Arc<Executor>,
+}
+
+#[allow(deprecated)]
+impl Run {
+ /// Actually run the task (invoking `poll` on its future) on the current
+ /// thread.
+ pub fn run(self) {
+ let Run { mut spawn, inner } = self;
+
+ // SAFETY: the ownership of this `Run` object is evidence that
+ // we are in the `POLLING`/`REPOLL` state for the mutex.
+ unsafe {
+ inner.mutex.start_poll();
+
+ loop {
+ match spawn.poll_future_notify(&inner, 0) {
+ Ok(Async::NotReady) => {}
+ Ok(Async::Ready(())) |
+ Err(()) => return inner.mutex.complete(),
+ }
+ let run = Run { spawn: spawn, inner: inner.clone() };
+ match inner.mutex.wait(run) {
+ Ok(()) => return, // we've waited
+ Err(r) => spawn = r.spawn, // someone's notified us
+ }
+ }
+ }
+ }
+}
+
+#[allow(deprecated)]
+impl fmt::Debug for Run {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Run")
+ .field("contents", &"...")
+ .finish()
+ }
+}
+
+#[allow(deprecated)]
+impl Notify for RunInner {
+ fn notify(&self, _id: usize) {
+ match self.mutex.notify() {
+ Ok(run) => self.exec.execute(run),
+ Err(()) => {}
+ }
+ }
+}
+
+// ===== ThreadNotify =====
+
+struct ThreadNotify {
+ state: AtomicUsize,
+ mutex: Mutex<()>,
+ condvar: Condvar,
+}
+
+const IDLE: usize = 0;
+const NOTIFY: usize = 1;
+const SLEEP: usize = 2;
+
+thread_local! {
+ static CURRENT_THREAD_NOTIFY: Arc<ThreadNotify> = Arc::new(ThreadNotify {
+ state: AtomicUsize::new(IDLE),
+ mutex: Mutex::new(()),
+ condvar: Condvar::new(),
+ });
+}
+
+impl ThreadNotify {
+ fn with_current<F, R>(f: F) -> R
+ where F: FnOnce(&Arc<ThreadNotify>) -> R,
+ {
+ CURRENT_THREAD_NOTIFY.with(|notify| f(notify))
+ }
+
+ fn park(&self) {
+ // If currently notified, then we skip sleeping. This is checked outside
+ // of the lock to avoid acquiring a mutex if not necessary.
+ match self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
+ NOTIFY => return,
+ IDLE => {},
+ _ => unreachable!(),
+ }
+
+ // The state is currently idle, so obtain the lock and then try to
+ // transition to a sleeping state.
+ let mut m = self.mutex.lock().unwrap();
+
+ // Transition to sleeping
+ match self.state.compare_and_swap(IDLE, SLEEP, Ordering::SeqCst) {
+ NOTIFY => {
+ // Notified before we could sleep, consume the notification and
+ // exit
+ self.state.store(IDLE, Ordering::SeqCst);
+ return;
+ }
+ IDLE => {},
+ _ => unreachable!(),
+ }
+
+ // Loop until we've been notified
+ loop {
+ m = self.condvar.wait(m).unwrap();
+
+ // Transition back to idle, loop otherwise
+ if NOTIFY == self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
+ return;
+ }
+ }
+ }
+}
+
+impl Notify for ThreadNotify {
+ fn notify(&self, _unpark_id: usize) {
+ // First, try transitioning from IDLE -> NOTIFY, this does not require a
+ // lock.
+ match self.state.compare_and_swap(IDLE, NOTIFY, Ordering::SeqCst) {
+ IDLE | NOTIFY => return,
+ SLEEP => {}
+ _ => unreachable!(),
+ }
+
+ // The other half is sleeping, this requires a lock
+ let _m = self.mutex.lock().unwrap();
+
+ // Transition from SLEEP -> NOTIFY
+ match self.state.compare_and_swap(SLEEP, NOTIFY, Ordering::SeqCst) {
+ SLEEP => {}
+ _ => return,
+ }
+
+ // Wakeup the sleeper
+ self.condvar.notify_one();
+ }
+}
+
+// ===== UnparkEvent =====
+
+/// For the duration of the given callback, add an "unpark event" to be
+/// triggered when the task handle is used to unpark the task.
+///
+/// Unpark events are used to pass information about what event caused a task to
+/// be unparked. In some cases, tasks are waiting on a large number of possible
+/// events, and need precise information about the wakeup to avoid extraneous
+/// polling.
+///
+/// Every `Task` handle comes with a set of unpark events which will fire when
+/// `unpark` is called. When fired, these events insert an identifier into a
+/// concurrent set, which the task can read from to determine what events
+/// occurred.
+///
+/// This function immediately invokes the closure, `f`, but arranges things so
+/// that `task::park` will produce a `Task` handle that includes the given
+/// unpark event.
+///
+/// # Panics
+///
+/// This function will panic if a task is not currently being executed. That
+/// is, this method can be dangerous to call outside of an implementation of
+/// `poll`.
+#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+#[allow(deprecated)]
+pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R
+ where F: FnOnce() -> R
+{
+ super::with(|task| {
+ let new_task = BorrowedTask {
+ id: task.id,
+ unpark: task.unpark,
+ events: BorrowedEvents::One(&event, &task.events),
+ map: task.map,
+ };
+
+ super::set(&new_task, f)
+ })
+}
+
+/// A set insertion to trigger upon `unpark`.
+///
+/// Unpark events are used to communicate information about *why* an unpark
+/// occurred, in particular populating sets with event identifiers so that the
+/// unparked task can avoid extraneous polling. See `with_unpark_event` for
+/// more.
+#[derive(Clone)]
+#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+#[allow(deprecated)]
+pub struct UnparkEvent {
+ set: Arc<EventSet>,
+ item: usize,
+}
+
+#[allow(deprecated)]
+impl UnparkEvent {
+ /// Construct an unpark event that will insert `id` into `set` when
+ /// triggered.
+ #[deprecated(note = "recommended to use `FuturesUnordered` instead")]
+ pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent {
+ UnparkEvent {
+ set: set,
+ item: id,
+ }
+ }
+
+ fn unpark(&self) {
+ self.set.insert(self.item);
+ }
+}
+
+#[allow(deprecated)]
+impl fmt::Debug for UnparkEvent {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("UnparkEvent")
+ .field("set", &"...")
+ .field("item", &self.item)
+ .finish()
+ }
+}
+
+/// A concurrent set which allows for the insertion of `usize` values.
+///
+/// `EventSet`s are used to communicate precise information about the event(s)
+/// that triggered a task notification. See `task::with_unpark_event` for details.
+#[deprecated(since="0.1.18", note = "recommended to use `FuturesUnordered` instead")]
+pub trait EventSet: Send + Sync + 'static {
+ /// Insert the given ID into the set
+ fn insert(&self, id: usize);
+}
+
+// Safe implementation of `UnsafeNotify` for `Arc` in the standard library.
+//
+// Note that this is a very unsafe implementation! The crucial pieces is that
+// these two values are considered equivalent:
+//
+// * Arc<T>
+// * *const ArcWrapped<T>
+//
+// We don't actually know the layout of `ArcWrapped<T>` as it's an
+// implementation detail in the standard library. We can work, though, by
+// casting it through and back an `Arc<T>`.
+//
+// This also means that you won't actually fine `UnsafeNotify for Arc<T>`
+// because it's the wrong level of indirection. These methods are sort of
+// receiving Arc<T>, but not an owned version. It's... complicated. We may be
+// one of the first users of unsafe trait objects!
+
+struct ArcWrapped<T>(PhantomData<T>);
+
+impl<T: Notify + 'static> Notify for ArcWrapped<T> {
+ fn notify(&self, id: usize) {
+ unsafe {
+ let me: *const ArcWrapped<T> = self;
+ T::notify(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+ id)
+ }
+ }
+
+ fn clone_id(&self, id: usize) -> usize {
+ unsafe {
+ let me: *const ArcWrapped<T> = self;
+ T::clone_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+ id)
+ }
+ }
+
+ fn drop_id(&self, id: usize) {
+ unsafe {
+ let me: *const ArcWrapped<T> = self;
+ T::drop_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
+ id)
+ }
+ }
+}
+
+unsafe impl<T: Notify + 'static> UnsafeNotify for ArcWrapped<T> {
+ unsafe fn clone_raw(&self) -> NotifyHandle {
+ let me: *const ArcWrapped<T> = self;
+ let arc = (*(&me as *const *const ArcWrapped<T> as *const Arc<T>)).clone();
+ NotifyHandle::from(arc)
+ }
+
+ unsafe fn drop_raw(&self) {
+ let mut me: *const ArcWrapped<T> = self;
+ let me = &mut me as *mut *const ArcWrapped<T> as *mut Arc<T>;
+ ptr::drop_in_place(me);
+ }
+}
+
+impl<T> From<Arc<T>> for NotifyHandle
+ where T: Notify + 'static,
+{
+ fn from(rc: Arc<T>) -> NotifyHandle {
+ unsafe {
+ let ptr = mem::transmute::<Arc<T>, *mut ArcWrapped<T>>(rc);
+ NotifyHandle::new(ptr)
+ }
+ }
+}
+
+#[cfg(feature = "nightly")]
+mod nightly {
+ use super::{TaskUnpark, UnparkEvents};
+ use core::marker::Unpin;
+
+ impl Unpin for TaskUnpark {}
+ impl Unpin for UnparkEvents {}
+}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs
new file mode 100644
index 0000000000..51bb44878d
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/std/task_rc.rs
@@ -0,0 +1,129 @@
+#![cfg(feature = "with-deprecated")]
+#![allow(deprecated)]
+#![deprecated(since = "0.1.4",
+ note = "replaced with `BiLock` in many cases, otherwise slated \
+ for removal due to confusion")]
+
+use std::prelude::v1::*;
+use std::sync::Arc;
+use std::cell::UnsafeCell;
+use task_impl;
+
+// One critical piece of this module's contents are the `TaskRc<A>` handles.
+// The purpose of this is to conceptually be able to store data in a task,
+// allowing it to be accessed within multiple futures at once. For example if
+// you have some concurrent futures working, they may all want mutable access to
+// some data. We already know that when the futures are being poll'd that we're
+// entirely synchronized (aka `&mut Task`), so you shouldn't require an
+// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
+//
+// So the idea here is that you insert data into a task via `Task::insert`, and
+// a handle to that data is then returned to you. That handle can later get
+// presented to the task itself to actually retrieve the underlying data. The
+// invariant is that the data can only ever be accessed with the task present,
+// and the lifetime of the actual data returned is connected to the lifetime of
+// the task itself.
+//
+// Conceptually I at least like to think of this as "dynamically adding more
+// struct fields to a `Task`". Each call to insert creates a new "name" for the
+// struct field, a `TaskRc<A>`, and then you can access the fields of a struct
+// with the struct itself (`Task`) as well as the name of the field
+// (`TaskRc<A>`). If that analogy doesn't make sense then oh well, it at least
+// helped me!
+//
+// So anyway, we do some interesting trickery here to actually get it to work.
+// Each `TaskRc<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're
+// not even adding data to the `Task`! Each `TaskRc<A>` contains a reference
+// to this `Arc`, and `TaskRc` handles can be cloned which just bumps the
+// reference count on the `Arc` itself.
+//
+// As before, though, you can present the `Arc` to a `Task` and if they
+// originated from the same place you're allowed safe access to the internals.
+// We allow but shared and mutable access without the `Sync` bound on the data,
+// crucially noting that a `Task` itself is not `Sync`.
+//
+// So hopefully I've convinced you of this point that the `get` and `get_mut`
+// methods below are indeed safe. The data is always valid as it's stored in an
+// `Arc`, and access is only allowed with the proof of the associated `Task`.
+// One thing you might be asking yourself though is what exactly is this "proof
+// of a task"? Right now it's a `usize` corresponding to the `Task`'s
+// `TaskHandle` arc allocation.
+//
+// Wait a minute, isn't that the ABA problem! That is, we create a task A, add
+// some data to it, destroy task A, do some work, create a task B, and then ask
+// to get the data from task B. In this case though the point of the
+// `task_inner` "proof" field is simply that there's some non-`Sync` token
+// proving that you can get access to the data. So while weird, this case should
+// still be safe, as the data's not stored in the task itself.
+
+/// A reference to a piece of data that's accessible only within a specific
+/// `Task`.
+///
+/// This data is `Send` even when `A` is not `Sync`, because the data stored
+/// within is accessed in a single-threaded way. The thread accessing it may
+/// change over time, if the task migrates, so `A` must be `Send`.
+#[derive(Debug)]
+pub struct TaskRc<A> {
+ task: task_impl::Task,
+ ptr: Arc<UnsafeCell<A>>,
+}
+
+// for safety here, see docs at the top of this module
+unsafe impl<A: Send> Send for TaskRc<A> {}
+unsafe impl<A: Sync> Sync for TaskRc<A> {}
+
+impl<A> TaskRc<A> {
+ /// Inserts a new piece of task-local data into this task, returning a
+ /// reference to it.
+ ///
+ /// Ownership of the data will be transferred to the task, and the data will
+ /// be destroyed when the task itself is destroyed. The returned value can
+ /// be passed to the `with` method to get a reference back to the original
+ /// data.
+ ///
+ /// Note that the returned handle is cloneable and copyable and can be sent
+ /// to other futures which will be associated with the same task. All
+ /// futures will then have access to this data when passed the reference
+ /// back.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if a task is not currently running.
+ pub fn new(a: A) -> TaskRc<A> {
+ TaskRc {
+ task: task_impl::park(),
+ ptr: Arc::new(UnsafeCell::new(a)),
+ }
+ }
+
+ /// Operate with a reference to the underlying data.
+ ///
+ /// This method should be passed a handle previously returned by
+ /// `Task::insert`. That handle, when passed back into this method, will
+ /// retrieve a reference to the original data.
+ ///
+ /// # Panics
+ ///
+ /// This method will panic if a task is not currently running or if `self`
+ /// does not belong to the task that is currently running. That is, if
+ /// another task generated the `data` handle passed in, this method will
+ /// panic.
+ pub fn with<F, R>(&self, f: F) -> R
+ where F: FnOnce(&A) -> R
+ {
+ if !self.task.is_current() {
+ panic!("TaskRc being accessed on task it does not belong to");
+ }
+
+ f(unsafe { &*self.ptr.get() })
+ }
+}
+
+impl<A> Clone for TaskRc<A> {
+ fn clone(&self) -> TaskRc<A> {
+ TaskRc {
+ task: self.task.clone(),
+ ptr: self.ptr.clone(),
+ }
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs b/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs
new file mode 100644
index 0000000000..246def2753
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/task_impl/std/unpark_mutex.rs
@@ -0,0 +1,144 @@
+use std::cell::UnsafeCell;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
+
+/// A "lock" around data `D`, which employs a *helping* strategy.
+///
+/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
+/// invoked on only a single thread at a time (2) `poll` being invoked at least
+/// once after each `unpark` (unless the future has completed).
+pub struct UnparkMutex<D> {
+ // The state of task execution (state machine described below)
+ status: AtomicUsize,
+
+ // The actual task data, accessible only in the POLLING state
+ inner: UnsafeCell<Option<D>>,
+}
+
+// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
+// acquisition failure, the current lock holder performs the desired work --
+// re-polling.
+//
+// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
+// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
+// must therefore be `Send`.
+unsafe impl<D: Send> Send for UnparkMutex<D> {}
+unsafe impl<D: Send> Sync for UnparkMutex<D> {}
+
+// There are four possible task states, listed below with their possible
+// transitions:
+
+// The task is blocked, waiting on an event
+const WAITING: usize = 0; // --> POLLING
+
+// The task is actively being polled by a thread; arrival of additional events
+// of interest should move it to the REPOLL state
+const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE
+
+// The task is actively being polled, but will need to be re-polled upon
+// completion to ensure that all events were observed.
+const REPOLL: usize = 2; // --> POLLING
+
+// The task has finished executing (either successfully or with an error/panic)
+const COMPLETE: usize = 3; // No transitions out
+
+impl<D> UnparkMutex<D> {
+ pub fn new() -> UnparkMutex<D> {
+ UnparkMutex {
+ status: AtomicUsize::new(WAITING),
+ inner: UnsafeCell::new(None),
+ }
+ }
+
+ /// Attempt to "notify" the mutex that a poll should occur.
+ ///
+ /// An `Ok` result indicates that the `POLLING` state has been entered, and
+ /// the caller can proceed to poll the future. An `Err` result indicates
+ /// that polling is not necessary (because the task is finished or the
+ /// polling has been delegated).
+ pub fn notify(&self) -> Result<D, ()> {
+ let mut status = self.status.load(SeqCst);
+ loop {
+ match status {
+ // The task is idle, so try to run it immediately.
+ WAITING => {
+ match self.status.compare_exchange(WAITING, POLLING,
+ SeqCst, SeqCst) {
+ Ok(_) => {
+ let data = unsafe {
+ // SAFETY: we've ensured mutual exclusion via
+ // the status protocol; we are the only thread
+ // that has transitioned to the POLLING state,
+ // and we won't transition back to QUEUED until
+ // the lock is "released" by this thread. See
+ // the protocol diagram above.
+ (*self.inner.get()).take().unwrap()
+ };
+ return Ok(data);
+ }
+ Err(cur) => status = cur,
+ }
+ }
+
+ // The task is being polled, so we need to record that it should
+ // be *repolled* when complete.
+ POLLING => {
+ match self.status.compare_exchange(POLLING, REPOLL,
+ SeqCst, SeqCst) {
+ Ok(_) => return Err(()),
+ Err(cur) => status = cur,
+ }
+ }
+
+ // The task is already scheduled for polling, or is complete, so
+ // we've got nothing to do.
+ _ => return Err(()),
+ }
+ }
+ }
+
+ /// Alert the mutex that polling is about to begin, clearing any accumulated
+ /// re-poll requests.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub unsafe fn start_poll(&self) {
+ self.status.store(POLLING, SeqCst);
+ }
+
+ /// Alert the mutex that polling completed with NotReady.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub unsafe fn wait(&self, data: D) -> Result<(), D> {
+ *self.inner.get() = Some(data);
+
+ match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
+ // no unparks came in while we were running
+ Ok(_) => Ok(()),
+
+ // guaranteed to be in REPOLL state; just clobber the
+ // state and run again.
+ Err(status) => {
+ assert_eq!(status, REPOLL);
+ self.status.store(POLLING, SeqCst);
+ Err((*self.inner.get()).take().unwrap())
+ }
+ }
+ }
+
+ /// Alert the mutex that the task has completed execution and should not be
+ /// notified again.
+ ///
+ /// # Safety
+ ///
+ /// Callable only from the `POLLING`/`REPOLL` states, i.e. between
+ /// successful calls to `notify` and `wait`/`complete`.
+ pub unsafe fn complete(&self) {
+ self.status.store(COMPLETE, SeqCst);
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/unsync/mod.rs b/third_party/rust/futures-0.1.31/src/unsync/mod.rs
new file mode 100644
index 0000000000..aaa5a707ba
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/unsync/mod.rs
@@ -0,0 +1,7 @@
+//! Future-aware single-threaded synchronization
+//!
+//! This module contains similar abstractions to `sync`, for communications
+//! between tasks on the same thread only.
+
+pub mod mpsc;
+pub mod oneshot;
diff --git a/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs b/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs
new file mode 100644
index 0000000000..ba0d52dc98
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/unsync/mpsc.rs
@@ -0,0 +1,474 @@
+//! A multi-producer, single-consumer, futures-aware, FIFO queue with back
+//! pressure, for use communicating between tasks on the same thread.
+//!
+//! These queues are the same as those in `futures::sync`, except they're not
+//! intended to be sent across threads.
+
+use std::any::Any;
+use std::cell::RefCell;
+use std::collections::VecDeque;
+use std::error::Error;
+use std::fmt;
+use std::mem;
+use std::rc::{Rc, Weak};
+
+use task::{self, Task};
+use future::Executor;
+use sink::SendAll;
+use resultstream::{self, Results};
+use unsync::oneshot;
+use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
+
+/// Creates a bounded in-memory channel with buffered storage.
+///
+/// This method creates concrete implementations of the `Stream` and `Sink`
+/// traits which can be used to communicate a stream of values between tasks
+/// with backpressure. The channel capacity is exactly `buffer`. On average,
+/// sending a message through this channel performs no dynamic allocation.
+pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
+ channel_(Some(buffer))
+}
+
+fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
+ let shared = Rc::new(RefCell::new(Shared {
+ buffer: VecDeque::new(),
+ capacity: buffer,
+ blocked_senders: VecDeque::new(),
+ blocked_recv: None,
+ }));
+ let sender = Sender { shared: Rc::downgrade(&shared) };
+ let receiver = Receiver { state: State::Open(shared) };
+ (sender, receiver)
+}
+
+#[derive(Debug)]
+struct Shared<T> {
+ buffer: VecDeque<T>,
+ capacity: Option<usize>,
+ blocked_senders: VecDeque<Task>,
+ blocked_recv: Option<Task>,
+}
+
+/// The transmission end of a channel.
+///
+/// This is created by the `channel` function.
+#[derive(Debug)]
+pub struct Sender<T> {
+ shared: Weak<RefCell<Shared<T>>>,
+}
+
+impl<T> Sender<T> {
+ fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
+ let shared = match self.shared.upgrade() {
+ Some(shared) => shared,
+ None => return Err(SendError(msg)), // receiver was dropped
+ };
+ let mut shared = shared.borrow_mut();
+
+ match shared.capacity {
+ Some(capacity) if shared.buffer.len() == capacity => {
+ shared.blocked_senders.push_back(task::current());
+ Ok(AsyncSink::NotReady(msg))
+ }
+ _ => {
+ shared.buffer.push_back(msg);
+ if let Some(task) = shared.blocked_recv.take() {
+ task.notify();
+ }
+ Ok(AsyncSink::Ready)
+ }
+ }
+ }
+}
+
+impl<T> Clone for Sender<T> {
+ fn clone(&self) -> Self {
+ Sender { shared: self.shared.clone() }
+ }
+}
+
+impl<T> Sink for Sender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ self.do_send(msg)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ let shared = match self.shared.upgrade() {
+ Some(shared) => shared,
+ None => return,
+ };
+ // The number of existing `Weak` indicates if we are possibly the last
+ // `Sender`. If we are the last, we possibly must notify a blocked
+ // `Receiver`. `self.shared` is always one of the `Weak` to this shared
+ // data. Therefore the smallest possible Rc::weak_count(&shared) is 1.
+ if Rc::weak_count(&shared) == 1 {
+ if let Some(task) = shared.borrow_mut().blocked_recv.take() {
+ // Wake up receiver as its stream has ended
+ task.notify();
+ }
+ }
+ }
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is created by the `channel` function.
+#[derive(Debug)]
+pub struct Receiver<T> {
+ state: State<T>,
+}
+
+/// Possible states of a receiver. We're either Open (can receive more messages)
+/// or we're closed with a list of messages we have left to receive.
+#[derive(Debug)]
+enum State<T> {
+ Open(Rc<RefCell<Shared<T>>>),
+ Closed(VecDeque<T>),
+}
+
+impl<T> Receiver<T> {
+ /// Closes the receiving half
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ let (blockers, items) = match self.state {
+ State::Open(ref state) => {
+ let mut state = state.borrow_mut();
+ let items = mem::replace(&mut state.buffer, VecDeque::new());
+ let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new());
+ (blockers, items)
+ }
+ State::Closed(_) => return,
+ };
+ self.state = State::Closed(items);
+ for task in blockers {
+ task.notify();
+ }
+ }
+}
+
+impl<T> Stream for Receiver<T> {
+ type Item = T;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ let me = match self.state {
+ State::Open(ref mut me) => me,
+ State::Closed(ref mut items) => {
+ return Ok(Async::Ready(items.pop_front()))
+ }
+ };
+
+ if let Some(shared) = Rc::get_mut(me) {
+ // All senders have been dropped, so drain the buffer and end the
+ // stream.
+ return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front()));
+ }
+
+ let mut shared = me.borrow_mut();
+ if let Some(msg) = shared.buffer.pop_front() {
+ if let Some(task) = shared.blocked_senders.pop_front() {
+ drop(shared);
+ task.notify();
+ }
+ Ok(Async::Ready(Some(msg)))
+ } else {
+ shared.blocked_recv = Some(task::current());
+ Ok(Async::NotReady)
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ self.close();
+ }
+}
+
+/// The transmission end of an unbounded channel.
+///
+/// This is created by the `unbounded` function.
+#[derive(Debug)]
+pub struct UnboundedSender<T>(Sender<T>);
+
+impl<T> Clone for UnboundedSender<T> {
+ fn clone(&self) -> Self {
+ UnboundedSender(self.0.clone())
+ }
+}
+
+impl<T> Sink for UnboundedSender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ self.0.start_send(msg)
+ }
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<'a, T> Sink for &'a UnboundedSender<T> {
+ type SinkItem = T;
+ type SinkError = SendError<T>;
+
+ fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
+ self.0.do_send(msg)
+ }
+
+ fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+
+ fn close(&mut self) -> Poll<(), SendError<T>> {
+ Ok(Async::Ready(()))
+ }
+}
+
+impl<T> UnboundedSender<T> {
+ /// Sends the provided message along this channel.
+ ///
+ /// This is an unbounded sender, so this function differs from `Sink::send`
+ /// by ensuring the return type reflects that the channel is always ready to
+ /// receive messages.
+ #[deprecated(note = "renamed to `unbounded_send`")]
+ #[doc(hidden)]
+ pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
+ self.unbounded_send(msg)
+ }
+
+ /// Sends the provided message along this channel.
+ ///
+ /// This is an unbounded sender, so this function differs from `Sink::send`
+ /// by ensuring the return type reflects that the channel is always ready to
+ /// receive messages.
+ pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
+ let shared = match self.0.shared.upgrade() {
+ Some(shared) => shared,
+ None => return Err(SendError(msg)),
+ };
+ let mut shared = shared.borrow_mut();
+ shared.buffer.push_back(msg);
+ if let Some(task) = shared.blocked_recv.take() {
+ drop(shared);
+ task.notify();
+ }
+ Ok(())
+ }
+}
+
+/// The receiving end of an unbounded channel.
+///
+/// This is created by the `unbounded` function.
+#[derive(Debug)]
+pub struct UnboundedReceiver<T>(Receiver<T>);
+
+impl<T> UnboundedReceiver<T> {
+ /// Closes the receiving half
+ ///
+ /// This prevents any further messages from being sent on the channel while
+ /// still enabling the receiver to drain messages that are buffered.
+ pub fn close(&mut self) {
+ self.0.close();
+ }
+}
+
+impl<T> Stream for UnboundedReceiver<T> {
+ type Item = T;
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
+ self.0.poll()
+ }
+}
+
+/// Creates an unbounded in-memory channel with buffered storage.
+///
+/// Identical semantics to `channel`, except with no limit to buffer size.
+pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
+ let (send, recv) = channel_(None);
+ (UnboundedSender(send), UnboundedReceiver(recv))
+}
+
+/// Error type for sending, used when the receiving end of a channel is
+/// dropped
+pub struct SendError<T>(T);
+
+impl<T> fmt::Debug for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("SendError")
+ .field(&"...")
+ .finish()
+ }
+}
+
+impl<T> fmt::Display for SendError<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ write!(fmt, "send failed because receiver is gone")
+ }
+}
+
+impl<T: Any> Error for SendError<T> {
+ fn description(&self) -> &str {
+ "send failed because receiver is gone"
+ }
+}
+
+impl<T> SendError<T> {
+ /// Returns the message that was attempted to be sent but failed.
+ pub fn into_inner(self) -> T {
+ self.0
+ }
+}
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a stream that proxies a stream on a separate `Executor`.
+/// Created through the `mpsc::spawn` function, this handle will produce
+/// the same values as the proxied stream, as they are produced in the executor,
+/// and uses a limited buffer to exert back-pressure on the remote stream.
+///
+/// If this handle is dropped, then the stream will no longer be polled and is
+/// scheduled to be dropped.
+pub struct SpawnHandle<Item, Error> {
+ inner: Receiver<Result<Item, Error>>,
+ _cancel_tx: oneshot::Sender<()>,
+}
+
+/// Type of future which `Executor` instances must be able to execute for `spawn`.
+pub struct Execute<S: Stream> {
+ inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
+ cancel_rx: oneshot::Receiver<()>,
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// At most `buffer + 1` elements will be buffered at a time. If the buffer
+/// is full, then `stream` will stop progressing until more space is available.
+/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
+ where S: Stream,
+ E: Executor<Execute<S>>
+{
+ let (cancel_tx, cancel_rx) = oneshot::channel();
+ let (tx, rx) = channel(buffer);
+ executor.execute(Execute {
+ inner: tx.send_all(resultstream::new(stream)),
+ cancel_rx: cancel_rx,
+ }).expect("failed to spawn stream");
+ SpawnHandle {
+ inner: rx,
+ _cancel_tx: cancel_tx,
+ }
+}
+
+/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the remote stream, with unbounded buffering.
+///
+/// The `stream` will be canceled if the `SpawnHandle` is dropped.
+///
+/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
+/// When `stream` has additional items available, then the `SpawnHandle`
+/// will have those same items available.
+///
+/// An unbounded buffer is used, which means that values will be buffered as
+/// fast as `stream` can produce them, without any backpressure. Therefore, if
+/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
+/// potentially hog CPU resources. In particular, if `stream` is infinite
+/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
+/// will result in an infinite loop.
+///
+/// # Panics
+///
+/// This function will panic if `executor` is unable spawn a `Future` containing
+/// the entirety of the `stream`.
+pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
+ where S: Stream,
+ E: Executor<Execute<S>>
+{
+ let (cancel_tx, cancel_rx) = oneshot::channel();
+ let (tx, rx) = channel_(None);
+ executor.execute(Execute {
+ inner: tx.send_all(resultstream::new(stream)),
+ cancel_rx: cancel_rx,
+ }).expect("failed to spawn stream");
+ SpawnHandle {
+ inner: rx,
+ _cancel_tx: cancel_tx,
+ }
+}
+
+impl<I, E> Stream for SpawnHandle<I, E> {
+ type Item = I;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<Option<I>, E> {
+ match self.inner.poll() {
+ Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
+ Ok(Async::Ready(Some(Err(e)))) => Err(e),
+ Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(_) => unreachable!("mpsc::Receiver should never return Err"),
+ }
+ }
+}
+
+impl<I, E> fmt::Debug for SpawnHandle<I, E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SpawnHandle")
+ .finish()
+ }
+}
+
+impl<S: Stream> Future for Execute<S> {
+ type Item = ();
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<(), ()> {
+ match self.cancel_rx.poll() {
+ Ok(Async::NotReady) => (),
+ _ => return Ok(Async::Ready(())),
+ }
+ match self.inner.poll() {
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ _ => Ok(Async::Ready(()))
+ }
+ }
+}
+
+impl<S: Stream> fmt::Debug for Execute<S> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Execute")
+ .finish()
+ }
+}
diff --git a/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs b/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs
new file mode 100644
index 0000000000..7ae2890f9e
--- /dev/null
+++ b/third_party/rust/futures-0.1.31/src/unsync/oneshot.rs
@@ -0,0 +1,351 @@
+//! A one-shot, futures-aware channel
+//!
+//! This channel is similar to that in `sync::oneshot` but cannot be sent across
+//! threads.
+
+use std::cell::{Cell, RefCell};
+use std::fmt;
+use std::rc::{Rc, Weak};
+
+use {Future, Poll, Async};
+use future::{Executor, IntoFuture, Lazy, lazy};
+use task::{self, Task};
+
+/// Creates a new futures-aware, one-shot channel.
+///
+/// This function is the same as `sync::oneshot::channel` except that the
+/// returned values cannot be sent across threads.
+pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
+ let inner = Rc::new(RefCell::new(Inner {
+ value: None,
+ tx_task: None,
+ rx_task: None,
+ }));
+ let tx = Sender {
+ inner: Rc::downgrade(&inner),
+ };
+ let rx = Receiver {
+ state: State::Open(inner),
+ };
+ (tx, rx)
+}
+
+/// Represents the completion half of a oneshot through which the result of a
+/// computation is signaled.
+///
+/// This is created by the `unsync::oneshot::channel` function and is equivalent
+/// in functionality to `sync::oneshot::Sender` except that it cannot be sent
+/// across threads.
+#[derive(Debug)]
+pub struct Sender<T> {
+ inner: Weak<RefCell<Inner<T>>>,
+}
+
+/// A future representing the completion of a computation happening elsewhere in
+/// memory.
+///
+/// This is created by the `unsync::oneshot::channel` function and is equivalent
+/// in functionality to `sync::oneshot::Receiver` except that it cannot be sent
+/// across threads.
+#[derive(Debug)]
+#[must_use = "futures do nothing unless polled"]
+pub struct Receiver<T> {
+ state: State<T>,
+}
+
+#[derive(Debug)]
+enum State<T> {
+ Open(Rc<RefCell<Inner<T>>>),
+ Closed(Option<T>),
+}
+
+pub use sync::oneshot::Canceled;
+
+#[derive(Debug)]
+struct Inner<T> {
+ value: Option<T>,
+ tx_task: Option<Task>,
+ rx_task: Option<Task>,
+}
+
+impl<T> Sender<T> {
+ /// Completes this oneshot with a successful result.
+ ///
+ /// This function will consume `self` and indicate to the other end, the
+ /// `Receiver`, that the error provided is the result of the computation this
+ /// represents.
+ ///
+ /// If the value is successfully enqueued for the remote end to receive,
+ /// then `Ok(())` is returned. If the receiving end was deallocated before
+ /// this function was called, however, then `Err` is returned with the value
+ /// provided.
+ pub fn send(self, val: T) -> Result<(), T> {
+ if let Some(inner) = self.inner.upgrade() {
+ inner.borrow_mut().value = Some(val);
+ Ok(())
+ } else {
+ Err(val)
+ }
+ }
+
+ /// Polls this `Sender` half to detect whether the `Receiver` this has
+ /// paired with has gone away.
+ ///
+ /// This function can be used to learn about when the `Receiver` (consumer)
+ /// half has gone away and nothing will be able to receive a message sent
+ /// from `complete`.
+ ///
+ /// Like `Future::poll`, this function will panic if it's not called from
+ /// within the context of a task. In other words, this should only ever be
+ /// called from inside another future.
+ ///
+ /// If `Ready` is returned then it means that the `Receiver` has disappeared
+ /// and the result this `Sender` would otherwise produce should no longer
+ /// be produced.
+ ///
+ /// If `NotReady` is returned then the `Receiver` is still alive and may be
+ /// able to receive a message if sent. The current task, however, is
+ /// scheduled to receive a notification if the corresponding `Receiver` goes
+ /// away.
+ pub fn poll_cancel(&mut self) -> Poll<(), ()> {
+ match self.inner.upgrade() {
+ Some(inner) => {
+ inner.borrow_mut().tx_task = Some(task::current());
+ Ok(Async::NotReady)
+ }
+ None => Ok(().into()),
+ }
+ }
+
+ /// Tests to see whether this `Sender`'s corresponding `Receiver`
+ /// has gone away.
+ ///
+ /// This function can be used to learn about when the `Receiver` (consumer)
+ /// half has gone away and nothing will be able to receive a message sent
+ /// from `send`.
+ ///
+ /// Note that this function is intended to *not* be used in the context of a
+ /// future. If you're implementing a future you probably want to call the
+ /// `poll_cancel` function which will block the current task if the
+ /// cancellation hasn't happened yet. This can be useful when working on a
+ /// non-futures related thread, though, which would otherwise panic if
+ /// `poll_cancel` were called.
+ pub fn is_canceled(&self) -> bool {
+ !self.inner.upgrade().is_some()
+ }
+}
+
+impl<T> Drop for Sender<T> {
+ fn drop(&mut self) {
+ let inner = match self.inner.upgrade() {
+ Some(inner) => inner,
+ None => return,
+ };
+ let rx_task = {
+ let mut borrow = inner.borrow_mut();
+ borrow.tx_task.take();
+ borrow.rx_task.take()
+ };
+ if let Some(task) = rx_task {
+ task.notify();
+ }
+ }
+}
+
+impl<T> Receiver<T> {
+ /// Gracefully close this receiver, preventing sending any future messages.
+ ///
+ /// Any `send` operation which happens after this method returns is
+ /// guaranteed to fail. Once this method is called the normal `poll` method
+ /// can be used to determine whether a message was actually sent or not. If
+ /// `Canceled` is returned from `poll` then no message was sent.
+ pub fn close(&mut self) {
+ let (item, task) = match self.state {
+ State::Open(ref inner) => {
+ let mut inner = inner.borrow_mut();
+ drop(inner.rx_task.take());
+ (inner.value.take(), inner.tx_task.take())
+ }
+ State::Closed(_) => return,
+ };
+ self.state = State::Closed(item);
+ if let Some(task) = task {
+ task.notify();
+ }
+ }
+}
+
+impl<T> Future for Receiver<T> {
+ type Item = T;
+ type Error = Canceled;
+
+ fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+ let inner = match self.state {
+ State::Open(ref mut inner) => inner,
+ State::Closed(ref mut item) => {
+ match item.take() {
+ Some(item) => return Ok(item.into()),
+ None => return Err(Canceled),
+ }
+ }
+ };
+
+ // If we've got a value, then skip the logic below as we're done.
+ if let Some(val) = inner.borrow_mut().value.take() {
+ return Ok(Async::Ready(val))
+ }
+
+ // If we can get mutable access, then the sender has gone away. We
+ // didn't see a value above, so we're canceled. Otherwise we park
+ // our task and wait for a value to come in.
+ if Rc::get_mut(inner).is_some() {
+ Err(Canceled)
+ } else {
+ inner.borrow_mut().rx_task = Some(task::current());
+ Ok(Async::NotReady)
+ }
+ }
+}
+
+impl<T> Drop for Receiver<T> {
+ fn drop(&mut self) {
+ self.close();
+ }
+}
+
+/// Handle returned from the `spawn` function.
+///
+/// This handle is a future representing the completion of a different future on
+/// a separate executor. Created through the `oneshot::spawn` function this
+/// handle will resolve when the future provided to `spawn` resolves on the
+/// `Executor` instance provided to that function.
+///
+/// If this handle is dropped then the future will automatically no longer be
+/// polled and is scheduled to be dropped. This can be canceled with the
+/// `forget` function, however.
+pub struct SpawnHandle<T, E> {
+ rx: Receiver<Result<T, E>>,
+ keep_running: Rc<Cell<bool>>,
+}
+
+/// Type of future which `Spawn` instances below must be able to spawn.
+pub struct Execute<F: Future> {
+ future: F,
+ tx: Option<Sender<Result<F::Item, F::Error>>>,
+ keep_running: Rc<Cell<bool>>,
+}
+
+/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
+/// returning a handle representing the completion of the future.
+///
+/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
+/// When `future` completes on `executor` then the `SpawnHandle` will itself be
+/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is
+/// thus not safe to send across threads.
+///
+/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
+/// not desired then the `SpawnHandle::forget` function can be used to continue
+/// running the future to completion.
+///
+/// # Panics
+///
+/// This function will panic if the instance of `Spawn` provided is unable to
+/// spawn the `future` provided.
+///
+/// If the provided instance of `Spawn` does not actually run `future` to
+/// completion, then the returned handle may panic when polled. Typically this
+/// is not a problem, though, as most instances of `Spawn` will run futures to
+/// completion.
+pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
+ where F: Future,
+ E: Executor<Execute<F>>,
+{
+ let flag = Rc::new(Cell::new(false));
+ let (tx, rx) = channel();
+ executor.execute(Execute {
+ future: future,
+ tx: Some(tx),
+ keep_running: flag.clone(),
+ }).expect("failed to spawn future");
+ SpawnHandle {
+ rx: rx,
+ keep_running: flag,
+ }
+}
+
+/// Spawns a function `f` onto the `Spawn` instance provided `s`.
+///
+/// For more information see the `spawn` function in this module. This function
+/// is just a thin wrapper around `spawn` which will execute the closure on the
+/// executor provided and then complete the future that the closure returns.
+pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
+ where F: FnOnce() -> R,
+ R: IntoFuture,
+ E: Executor<Execute<Lazy<F, R>>>,
+{
+ spawn(lazy(f), executor)
+}
+
+impl<T, E> SpawnHandle<T, E> {
+ /// Drop this future without canceling the underlying future.
+ ///
+ /// When `SpawnHandle` is dropped, the spawned future will be canceled as
+ /// well if the future hasn't already resolved. This function can be used
+ /// when to drop this future but keep executing the underlying future.
+ pub fn forget(self) {
+ self.keep_running.set(true);
+ }
+}
+
+impl<T, E> Future for SpawnHandle<T, E> {
+ type Item = T;
+ type Error = E;
+
+ fn poll(&mut self) -> Poll<T, E> {
+ match self.rx.poll() {
+ Ok(Async::Ready(Ok(t))) => Ok(t.into()),
+ Ok(Async::Ready(Err(e))) => Err(e),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(_) => panic!("future was canceled before completion"),
+ }
+ }
+}
+
+impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("SpawnHandle")
+ .finish()
+ }
+}
+
+impl<F: Future> Future for Execute<F> {
+ type Item = ();
+ type Error = ();
+
+ fn poll(&mut self) -> Poll<(), ()> {
+ // If we're canceled then we may want to bail out early.
+ //
+ // If the `forget` function was called, though, then we keep going.
+ if self.tx.as_mut().unwrap().poll_cancel().unwrap().is_ready() {
+ if !self.keep_running.get() {
+ return Ok(().into())
+ }
+ }
+
+ let result = match self.future.poll() {
+ Ok(Async::NotReady) => return Ok(Async::NotReady),
+ Ok(Async::Ready(t)) => Ok(t),
+ Err(e) => Err(e),
+ };
+ drop(self.tx.take().unwrap().send(result));
+ Ok(().into())
+ }
+}
+
+impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Execute")
+ .field("future", &self.future)
+ .finish()
+ }
+}